diff --git a/.grype.yaml b/.grype.yaml
new file mode 100644
index 00000000000..3113d6c38eb
--- /dev/null
+++ b/.grype.yaml
@@ -0,0 +1,3 @@
+ignore:
+ - package:
+ type: gem
diff --git a/go.mod b/go.mod
index eb5aff012ab..1ce8853dfd9 100644
--- a/go.mod
+++ b/go.mod
@@ -1,11 +1,11 @@
module code.cloudfoundry.org/cli
-go 1.22
+go 1.22.5
require (
code.cloudfoundry.org/bytefmt v0.0.0-20230612151507-41ef4d1f67a4
code.cloudfoundry.org/cfnetworking-cli-api v0.0.0-20190103195135-4b04f26287a6
- code.cloudfoundry.org/cli-plugin-repo v0.0.0-20200304195157-af98c4be9b85
+ code.cloudfoundry.org/cli-plugin-repo v0.0.0-20240701140235-0ab1c0e216c8
code.cloudfoundry.org/cli/integration/assets/hydrabroker v0.0.0-20201002233634-81722a1144e4
code.cloudfoundry.org/clock v1.1.0
code.cloudfoundry.org/diego-ssh v0.0.0-20230810200140-af9d79fe9c82
diff --git a/go.sum b/go.sum
index 62bbf6a5c58..7e8670f37ae 100644
--- a/go.sum
+++ b/go.sum
@@ -2,8 +2,8 @@ code.cloudfoundry.org/bytefmt v0.0.0-20230612151507-41ef4d1f67a4 h1:9G5F8zgma5v0
code.cloudfoundry.org/bytefmt v0.0.0-20230612151507-41ef4d1f67a4/go.mod h1:wYHCXH/gI19ujoFVuMkY48qPpPCoHLKBKXPkn67h/Yc=
code.cloudfoundry.org/cfnetworking-cli-api v0.0.0-20190103195135-4b04f26287a6 h1:Yc9r1p21kEpni9WlG4mwOZw87TB2QlyS9sAEebZ3+ak=
code.cloudfoundry.org/cfnetworking-cli-api v0.0.0-20190103195135-4b04f26287a6/go.mod h1:u5FovqC5GGAEbFPz+IdjycDA+gIjhUwqxnu0vbHwVeM=
-code.cloudfoundry.org/cli-plugin-repo v0.0.0-20200304195157-af98c4be9b85 h1:jaHWw9opYjKPrDT19uydBBWSxl+g5F4Hv030fqMsalo=
-code.cloudfoundry.org/cli-plugin-repo v0.0.0-20200304195157-af98c4be9b85/go.mod h1:R1EiyOAr7lW0l/YkZNqItUNZ01Q/dYUfbTn4X4Z+82M=
+code.cloudfoundry.org/cli-plugin-repo v0.0.0-20240701140235-0ab1c0e216c8 h1:U9t+5gylASZzKYjaLhW4azDnGvSH4GKVhmWoMAbaLRo=
+code.cloudfoundry.org/cli-plugin-repo v0.0.0-20240701140235-0ab1c0e216c8/go.mod h1:cf3zRgfxIBUHx8FmX9SgiQ0QWVxRsH+begnYUdigZag=
code.cloudfoundry.org/cli/integration/assets/hydrabroker v0.0.0-20201002233634-81722a1144e4 h1:O+j8afQWaDuxzKGcculsjgHGK+biBTn6iMjgFpBf54c=
code.cloudfoundry.org/cli/integration/assets/hydrabroker v0.0.0-20201002233634-81722a1144e4/go.mod h1:dVTgo9kQbYns/QM4A1C2GtxqUnFSvJTk2Qhw+M0/uzk=
code.cloudfoundry.org/clock v1.1.0 h1:XLzC6W3Ah/Y7ht1rmZ6+QfPdt1iGWEAAtIZXgiaj57c=
@@ -227,6 +227,7 @@ github.com/vito/go-interact v0.0.0-20171111012221-fa338ed9e9ec h1:Klu98tQ9Z1t23g
github.com/vito/go-interact v0.0.0-20171111012221-fa338ed9e9ec/go.mod h1:wPlfmglZmRWMYv/qJy3P+fK/UnoQB5ISk4txfNd9tDo=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
+github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
go.step.sm/crypto v0.44.8 h1:jDSHL6FdB1UTA0d56ECNx9XtLVkewzeg38Vy3HWB3N8=
go.step.sm/crypto v0.44.8/go.mod h1:QEmu4T9YewrDuaJnrV1I0zWZ15aJ/mqRUfL5w3R2WgU=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
@@ -234,16 +235,19 @@ golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8U
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
+golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU=
+golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs=
golang.org/x/crypto v0.24.0 h1:mnl8DM0o513X8fdIkmyFE/5hTYxbwYOjDS/+rK6qpRI=
golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5DM=
golang.org/x/exp/typeparams v0.0.0-20220218215828-6cf2b201936e/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk=
golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY=
+golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
+golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA=
golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
golang.org/x/net v0.0.0-20180218175443-cbe0f9307d01/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20180418062111-d41e8174641f/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
@@ -256,15 +260,21 @@ golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT
golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
+golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
+golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
+golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
+golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44=
+golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg=
golang.org/x/net v0.26.0 h1:soB7SVo0PWrY4vPW/+ay0jKDNScG2X9wFeYlXIvJsOQ=
golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M=
golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
-golang.org/x/sys v0.0.0-20180419222023-a2a45943ae67/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@@ -285,14 +295,24 @@ golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220319134239-a9b59b0215f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220422013727-9388b58f7150/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.21.0 h1:rF+pYz3DAGSQAxAu1CbC7catZg4ebC4UIeIhKxBZvws=
golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
+golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
+golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
+golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk=
+golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58=
golang.org/x/term v0.21.0 h1:WVXCp+/EBEHOj53Rvu+7KiT/iElMrO8ACK16SMZ3jaA=
golang.org/x/term v0.21.0/go.mod h1:ooXLefLobQVslOqselCNF4SxFAaoS6KujMbsGzSDmX0=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
@@ -300,6 +320,9 @@ golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
+golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
+golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
+golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4=
golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
@@ -309,6 +332,8 @@ golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtn
golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E=
golang.org/x/tools v0.1.11-0.20220316014157-77aa08bb151a/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E=
+golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
+golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d h1:vU5i/LfpvrRCpgM/VPfJLg5KjxD3E+hfT1SH+d9zLwg=
golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
diff --git a/integration/assets/hydrabroker/go.mod b/integration/assets/hydrabroker/go.mod
index b6db35c4b65..bb6997c54f2 100644
--- a/integration/assets/hydrabroker/go.mod
+++ b/integration/assets/hydrabroker/go.mod
@@ -3,10 +3,10 @@ module code.cloudfoundry.org/cli/integration/assets/hydrabroker
go 1.13
require (
- github.com/go-playground/validator/v10 v10.2.0
- github.com/gorilla/mux v1.7.4
+ github.com/go-playground/validator/v10 v10.22.0
+ github.com/gorilla/mux v1.8.1
github.com/nu7hatch/gouuid v0.0.0-20131221200532-179d4d0c4d8d
- github.com/onsi/ginkgo v1.11.0
- github.com/onsi/gomega v1.8.1
- github.com/pivotal-cf/brokerapi/v7 v7.2.0
+ github.com/onsi/ginkgo/v2 v2.19.0
+ github.com/onsi/gomega v1.33.1
+ github.com/pivotal-cf/brokerapi/v7 v7.5.0
)
diff --git a/integration/assets/hydrabroker/go.sum b/integration/assets/hydrabroker/go.sum
index 8f89d4800a8..dce581cb561 100644
--- a/integration/assets/hydrabroker/go.sum
+++ b/integration/assets/hydrabroker/go.sum
@@ -1,37 +1,82 @@
code.cloudfoundry.org/lager v1.1.1-0.20191008172124-a9afc05ee5be h1:rnGRgbKlOPKbI9N/PscJ78Ug5Iw+o1kE7aDW01V+0FM=
code.cloudfoundry.org/lager v1.1.1-0.20191008172124-a9afc05ee5be/go.mod h1:O2sS7gKP3HM2iemG+EnwvyNQK7pTSC6Foi4QiMp9sSk=
-github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 h1:DDGfHa7BWjL4YnC6+E63dPcxHo2sUxDIu8g3QgEJdRY=
github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4=
github.com/bmizerany/pat v0.0.0-20170815010413-6226ea591a40 h1:y4B3+GPxKlrigF1ha5FFErxK+sr6sWxQovRMzwMhejo=
github.com/bmizerany/pat v0.0.0-20170815010413-6226ea591a40/go.mod h1:8rLXio+WjiTceGBHIoTvn60HIbs7Hm7bcHjyrSqYB9c=
-github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=
+github.com/chromedp/cdproto v0.0.0-20230802225258-3cf4e6d46a89/go.mod h1:GKljq0VrfU4D5yc+2qA6OVr8pmO/MBbPEWqWQ/oqGEs=
+github.com/chromedp/chromedp v0.9.2/go.mod h1:LkSXJKONWTCHAfQasKFUZI+mxqS4tZqhmtGzzhLsnLs=
+github.com/chromedp/sysutil v1.0.0/go.mod h1:kgWmDdq8fTzXYcKIBqIYvRRTnYb9aNS9moAV0xufSww=
+github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
+github.com/chzyer/logex v1.2.1/go.mod h1:JLbx6lG2kDbNRFnfkgvh4eRJRPX1QCoOIWomwysCBrQ=
+github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
+github.com/chzyer/readline v1.5.1/go.mod h1:Eh+b79XXUwfKfcPLepksvw2tcLE/Ct21YObkaSkeBlk=
+github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
+github.com/chzyer/test v1.0.0/go.mod h1:2JlltgoNkt4TW/z9V/IzDdFaMTM2JPIi26O1pF38GC8=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
+github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/drewolson/testflight v1.0.0 h1:jgA0pHcFIPnXoBmyFzrdoR2ka4UvReMDsjYc7Jcvl80=
github.com/drewolson/testflight v1.0.0/go.mod h1:t9oKuuEohRGLb80SWX+uxJHuhX98B7HnojqtW+Ryq30=
-github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
-github.com/go-playground/assert/v2 v2.0.1 h1:MsBgLAaY856+nPRTKrp3/OZK38U/wa0CcBYNjji3q3A=
-github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4=
-github.com/go-playground/locales v0.13.0 h1:HyWk6mgj5qFqCT5fjGBuRArbVDfE4hi8+e8ceBS/t7Q=
-github.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTMfnGwq/HNwmWNS8=
-github.com/go-playground/universal-translator v0.17.0 h1:icxd5fm+REJzpZx7ZfpaD876Lmtgy7VtROAbHHXk8no=
-github.com/go-playground/universal-translator v0.17.0/go.mod h1:UkSxE5sNxxRwHyU+Scu5vgOQjsIJAF8j9muTVoKLVtA=
-github.com/go-playground/validator/v10 v10.2.0 h1:KgJ0snyC2R9VXYN2rneOtQcw5aHQB1Vv0sFl1UcHBOY=
-github.com/go-playground/validator/v10 v10.2.0/go.mod h1:uOYAAleCW8F/7oMFd6aG0GOhaH6EGOAJShg8Id5JGkI=
+github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4=
+github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
+github.com/gabriel-vasile/mimetype v1.4.3 h1:in2uUcidCuFcDKtdcBxlR0rJ1+fsokWf+uqxgUFjbI0=
+github.com/gabriel-vasile/mimetype v1.4.3/go.mod h1:d8uq/6HKRL6CGdk+aubisF/M5GcPfT7nKyLpA0lbSSk=
+github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
+github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
+github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ=
+github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
+github.com/go-playground/assert/v2 v2.2.0 h1:JvknZsQTYeFEAhQwI4qEt9cyV5ONwRHC+lYKSsYSR8s=
+github.com/go-playground/assert/v2 v2.2.0/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4=
+github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA=
+github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY=
+github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY=
+github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY=
+github.com/go-playground/validator/v10 v10.22.0 h1:k6HsTZ0sTnROkhS//R0O+55JgM8C4Bx7ia+JlgcnOao=
+github.com/go-playground/validator/v10 v10.22.0/go.mod h1:dbuPbCMFw/DrkbEynArYaCwl3amGuJotoKCe95atGMM=
+github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE=
+github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI=
+github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls=
+github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI=
+github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8=
+github.com/gobwas/httphead v0.1.0/go.mod h1:O/RXo79gxV8G+RqlR/otEwx4Q36zl9rqC5u12GKvMCM=
+github.com/gobwas/pool v0.2.1/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw=
+github.com/gobwas/ws v1.2.1/go.mod h1:hRKAFb8wOxFROYNsT1bqfWnhX+b5MFeJM9r2ZSwg/KY=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
-github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg=
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
+github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
+github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
+github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
+github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
+github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
+github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
+github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
+github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
+github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
+github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
+github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
+github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
+github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
+github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
+github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20240424215950-a892ee059fd6 h1:k7nVchz72niMH6YLQNvHSdIE7iqsQxK1P41mySCvssg=
+github.com/google/pprof v0.0.0-20240424215950-a892ee059fd6/go.mod h1:kf6iHlnVGwgKolg33glAes7Yg/8iWP8ukqeldJSO7jw=
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
github.com/google/uuid v1.1.0 h1:Jf4mxPC/ziBnoPIdpQdPJ9OeiomAUHLvxmPRSPH9m4s=
github.com/google/uuid v1.1.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
-github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
-github.com/gorilla/mux v1.7.4 h1:VuZ8uybHlWmqV03+zRzdwKL4tUnIp1MAQtp1mIFE1bc=
-github.com/gorilla/mux v1.7.4/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
-github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
+github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
+github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY=
+github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
+github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
+github.com/ianlancetaylor/demangle v0.0.0-20240312041847-bd984b5ce465/go.mod h1:gx7rwoVhcfuVKG5uya9Hs3Sxj7EIvldVofAWIUtGouw=
github.com/joefitzgerald/rainbow-reporter v0.1.0/go.mod h1:481CNgqmVHQZzdIbN52CupLJyoVwB10FQ/IQlF1pdL8=
+github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/kr/pretty v0.0.0-20160823170715-cfb55aafdaf3/go.mod h1:Bvhd+E3laJ0AVkG0c9rmtZcnhV0HQ3+c3YxxqTvc/gA=
github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
@@ -40,68 +85,283 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.0.0-20160504234017-7cafcd837844/go.mod h1:sjUstKUATFIcff4qlB53Kml0wQPtJVc/3fWrmuUmcfA=
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
-github.com/leodido/go-urn v1.2.0 h1:hpXL4XnriNwQ/ABnpepYM/1vCLWNDfUNts8dX3xTG6Y=
-github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII=
-github.com/maxbrunsfeld/counterfeiter/v6 v6.2.2/go.mod h1:eD9eIE7cdwcMi9rYluz88Jz2VyhSmden33/aXg4oVIY=
+github.com/ledongthuc/pdf v0.0.0-20220302134840-0c2507a12d80/go.mod h1:imJHygn/1yfhB7XSJJKlFZKl/J+dCPAknuiaGOshXAs=
+github.com/leodido/go-urn v1.4.0 h1:WT9HwE9SGECu3lg4d/dIA+jxlljEa1/ffXKmRjqdmIQ=
+github.com/leodido/go-urn v1.4.0/go.mod h1:bvxc+MVxLKB4z00jd1z+Dvzr47oO32F/QSNjSBOlFxI=
+github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
+github.com/maxbrunsfeld/counterfeiter/v6 v6.2.3/go.mod h1:1ftk08SazyElaaNvmqAfZWGwJzshjCfBXDLoQtPAMNk=
github.com/nu7hatch/gouuid v0.0.0-20131221200532-179d4d0c4d8d h1:VhgPp6v9qf9Agr/56bj7Y/xa04UccTW04VP0Qed4vnQ=
github.com/nu7hatch/gouuid v0.0.0-20131221200532-179d4d0c4d8d/go.mod h1:YUTz3bUH2ZwIWBy3CJBeOBEugqcmXREj14T+iG/4k4U=
+github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
+github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE=
+github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU=
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
-github.com/onsi/ginkgo v1.11.0 h1:JAKSXpt1YjtLA7YpPiqO9ss6sNXEsPfSGdwN0UHqzrw=
-github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
-github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
-github.com/onsi/gomega v1.8.1 h1:C5Dqfs/LeauYDX0jJXIe2SWmwCbGzx9yF8C8xy3Lh34=
-github.com/onsi/gomega v1.8.1/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA=
+github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
+github.com/onsi/ginkgo v1.14.1/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY=
+github.com/onsi/ginkgo v1.16.4 h1:29JGrr5oVBm5ulCWet69zQkzWipVXIol6ygQUe/EzNc=
+github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0=
+github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c=
+github.com/onsi/ginkgo/v2 v2.1.4/go.mod h1:um6tUpWM/cxCK3/FK8BXqEiUMUwRgSM4JXG47RKZmLU=
+github.com/onsi/ginkgo/v2 v2.1.6/go.mod h1:MEH45j8TBi6u9BMogfbp0stKC5cdGjumZj5Y7AG4VIk=
+github.com/onsi/ginkgo/v2 v2.3.0/go.mod h1:Eew0uilEqZmIEZr8JrvYlvOM7Rr6xzTmMV8AyFNU9d0=
+github.com/onsi/ginkgo/v2 v2.4.0/go.mod h1:iHkDK1fKGcBoEHT5W7YBq4RFWaQulw+caOMkAt4OrFo=
+github.com/onsi/ginkgo/v2 v2.5.0/go.mod h1:Luc4sArBICYCS8THh8v3i3i5CuSZO+RaQRaJoeNwomw=
+github.com/onsi/ginkgo/v2 v2.7.0/go.mod h1:yjiuMwPokqY1XauOgju45q3sJt6VzQ/Fict1LFVcsAo=
+github.com/onsi/ginkgo/v2 v2.8.1/go.mod h1:N1/NbDngAFcSLdyZ+/aYTYGSlq9qMCS/cNKGJjy+csc=
+github.com/onsi/ginkgo/v2 v2.9.0/go.mod h1:4xkjoL/tZv4SMWeww56BU5kAt19mVB47gTWxmrTcxyk=
+github.com/onsi/ginkgo/v2 v2.9.1/go.mod h1:FEcmzVcCHl+4o9bQZVab+4dC9+j+91t2FHSzmGAPfuo=
+github.com/onsi/ginkgo/v2 v2.9.2/go.mod h1:WHcJJG2dIlcCqVfBAwUCrJxSPFb6v4azBwgxeMeDuts=
+github.com/onsi/ginkgo/v2 v2.9.5/go.mod h1:tvAoo1QUJwNEU2ITftXTpR7R1RbCzoZUOs3RonqW57k=
+github.com/onsi/ginkgo/v2 v2.9.7/go.mod h1:cxrmXWykAwTwhQsJOPfdIDiJ+l2RYq7U8hFU+M/1uw0=
+github.com/onsi/ginkgo/v2 v2.11.0/go.mod h1:ZhrRA5XmEE3x3rhlzamx/JJvujdZoJ2uvgI7kR0iZvM=
+github.com/onsi/ginkgo/v2 v2.13.0/go.mod h1:TE309ZR8s5FsKKpuB1YAQYBzCaAfUgatB/xlT/ETL/o=
+github.com/onsi/ginkgo/v2 v2.17.1/go.mod h1:llBI3WDLL9Z6taip6f33H76YcWtJv+7R3HigUjbIBOs=
+github.com/onsi/ginkgo/v2 v2.17.2/go.mod h1:nP2DPOQoNsQmsVyv5rDA8JkXQoCs6goXIvr/PRJ1eCc=
+github.com/onsi/ginkgo/v2 v2.19.0 h1:9Cnnf7UHo57Hy3k6/m5k3dRfGTMXGvxhHFvkDTCTpvA=
+github.com/onsi/ginkgo/v2 v2.19.0/go.mod h1:rlwLi9PilAFJ8jCg9UE1QP6VBpd6/xj3SRC0d6TU0To=
+github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
+github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA=
+github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
+github.com/onsi/gomega v1.10.4/go.mod h1:g/HbgYopi++010VEqkFgJHKC09uJiW9UkXvMUuKHUCQ=
+github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY=
+github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro=
+github.com/onsi/gomega v1.20.1/go.mod h1:DtrZpjmvpn2mPm4YWQa0/ALMDj9v4YxLgojwPeREyVo=
+github.com/onsi/gomega v1.21.1/go.mod h1:iYAIXgPSaDHak0LCMA+AWBpIKBr8WZicMxnE8luStNc=
+github.com/onsi/gomega v1.22.1/go.mod h1:x6n7VNe4hw0vkyYUM4mjIXx3JbLiPaBPNgB7PRQ1tuM=
+github.com/onsi/gomega v1.24.0/go.mod h1:Z/NWtiqwBrwUt4/2loMmHL63EDLnYHmVbuBpDr2vQAg=
+github.com/onsi/gomega v1.24.1/go.mod h1:3AOiACssS3/MajrniINInwbfOOtfZvplPzuRSmvt1jM=
+github.com/onsi/gomega v1.26.0/go.mod h1:r+zV744Re+DiYCIPRlYOTxn0YkOLcAnW8k1xXdMPGhM=
+github.com/onsi/gomega v1.27.1/go.mod h1:aHX5xOykVYzWOV4WqQy0sy8BQptgukenXpCXfadcIAw=
+github.com/onsi/gomega v1.27.3/go.mod h1:5vG284IBtfDAmDyrK+eGyZmUgUlmi+Wngqo557cZ6Gw=
+github.com/onsi/gomega v1.27.4/go.mod h1:riYq/GJKh8hhoM01HN6Vmuy93AarCXCBGpvFDK3q3fQ=
+github.com/onsi/gomega v1.27.6/go.mod h1:PIQNjfQwkP3aQAH7lf7j87O/5FiNr+ZR8+ipb+qQlhg=
+github.com/onsi/gomega v1.27.7/go.mod h1:1p8OOlwo2iUUDsHnOrjE5UKYJ+e3W8eQ3qSlRahPmr4=
+github.com/onsi/gomega v1.27.8/go.mod h1:2J8vzI/s+2shY9XHRApDkdgPo1TKT7P2u6fXeJKFnNQ=
+github.com/onsi/gomega v1.27.10/go.mod h1:RsS8tutOdbdgzbPtzzATp12yT7kM5I5aElG3evPbQ0M=
+github.com/onsi/gomega v1.30.0/go.mod h1:9sxs+SwGrKI0+PWe4Fxa9tFQQBG5xSsSbMXOI8PPpoQ=
+github.com/onsi/gomega v1.33.0/go.mod h1:+925n5YtiFsLzzafLUHzVMBpvvRAzrydIBiSIxjX3wY=
+github.com/onsi/gomega v1.33.1 h1:dsYjIxxSR755MDmKVsaFQTE22ChNBcuuTWgkUDSubOk=
+github.com/onsi/gomega v1.33.1/go.mod h1:U4R44UsT+9eLIaYRB2a5qajjtQYn0hauxvRm16AVYg0=
+github.com/orisano/pixelmatch v0.0.0-20220722002657-fb0b55479cde/go.mod h1:nZgzbfBr3hhjoZnS66nKrHmduYNpc34ny7RK4z5/HM0=
github.com/pborman/uuid v0.0.0-20180906182336-adf5a7427709 h1:zNBQb37RGLmJybyMcs983HfUfpkw9OTFD9tbBfAViHE=
github.com/pborman/uuid v0.0.0-20180906182336-adf5a7427709/go.mod h1:VyrYX9gd7irzKovcSS6BIIEwPRkP2Wm2m9ufcdFSJ34=
-github.com/pivotal-cf/brokerapi/v7 v7.2.0 h1:LL/OS3H2p+K30YG1ppB7Fr1YFQ669My00icLkxYqdwU=
-github.com/pivotal-cf/brokerapi/v7 v7.2.0/go.mod h1:5QRQ8vJmav91F+AvY5NA/QoDOq70XgBVxXKUK4N/cNE=
+github.com/pivotal-cf/brokerapi/v7 v7.5.0 h1:l7kAjlTL4bGIkl2m4dljc1xqgQYbU+4BjJskfkUn6Vc=
+github.com/pivotal-cf/brokerapi/v7 v7.5.0/go.mod h1:+z5BKkzLViNax5Q8S3Z6e6dkDpAJl4ZJIu9mP1fhyZM=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/sclevine/spec v1.2.0/go.mod h1:W4J29eT/Kzv7/b9IWLB055Z+qvVC9vt0Arko24q7p+U=
+github.com/sclevine/spec v1.4.0/go.mod h1:LvpgJaFyvQzRvc1kaDs0bulYwzC70PbiYjC4QnFHkOM=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
-github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk=
-github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
+github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
+github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
+github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
+github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
+github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=
+github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
+github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
+github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
+golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw=
+golang.org/x/crypto v0.11.0/go.mod h1:xgJhtzW8F9jGdVFWZESrid1U1bjeNy4zgy5cRr/CIio=
+golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw=
+golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc=
+golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4=
+golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg=
+golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU=
+golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs=
+golang.org/x/crypto v0.22.0/go.mod h1:vr6Su+7cTlO45qkww3VDJlzDn0ctJvRgYbC2NvXHt+M=
+golang.org/x/crypto v0.23.0 h1:dIJU/v2J8Mdglj/8rJ6UUOM3Zc9zLZxVZwwxMooUSAI=
+golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8=
golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
+golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
+golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY=
+golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
+golang.org/x/mod v0.6.0/go.mod h1:4mET923SAdbXp2ki8ey+zGs1SLqsuM2Y0uvdZR/fUNI=
+golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
+golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
+golang.org/x/mod v0.9.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
+golang.org/x/mod v0.10.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
+golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
+golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
+golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
+golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
golang.org/x/net v0.0.0-20180218175443-cbe0f9307d01/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20190628185345-da137c7871d7 h1:rTIdg5QFRR7XCaK4LCjBiPbx8j4DQRpdYMnGn/bJUEU=
golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
+golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
+golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
+golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk=
+golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
+golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
+golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
+golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco=
+golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY=
+golang.org/x/net v0.3.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE=
+golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws=
+golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
+golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
+golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc=
+golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
+golang.org/x/net v0.12.0/go.mod h1:zEVYFnQC7m/vmpQFELhcD1EWkZlX69l4oqgmer6hfKA=
+golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI=
+golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk=
+golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE=
+golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY=
+golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44=
+golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg=
+golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8=
+golang.org/x/net v0.25.0 h1:d/OCCoBEUq33pjydKrGQhw7IlUPI2Oylr+8qLx49kac=
+golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.2.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
+golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
+golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb h1:fgwFCsaw9buMuxNd6+DQfAuSFqbNiQZpcgJQAgJsK6k=
golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220310020820-b874c991c1a5/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220319134239-a9b59b0215f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220422013727-9388b58f7150/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.9.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y=
+golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/telemetry v0.0.0-20240228155512-f48c80bd79b2/go.mod h1:TeRTkGYfJXctD9OcfyVLyj2J3IxLnKwHJR8f4D8a3YE=
+golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
+golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
+golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
+golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc=
+golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA=
+golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ=
+golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
+golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U=
+golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
+golang.org/x/term v0.10.0/go.mod h1:lpqdcUyK/oCiQxvxVrppt5ggO2KCZ5QblwqPnfZ6d5o=
+golang.org/x/term v0.11.0/go.mod h1:zC9APTIj3jG3FdV/Ons+XE1riIZXG4aZ4GTHiPZJPIU=
+golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU=
+golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U=
+golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY=
+golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk=
+golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58=
+golang.org/x/term v0.19.0/go.mod h1:2CuTdWZ7KHSQwUzKva0cbMg6q2DMI3Mmxp+gKJbskEk=
+golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
-golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
+golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
+golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ=
+golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
+golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
+golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
+golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
+golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
+golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
+golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
+golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
+golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
+golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
+golang.org/x/text v0.15.0 h1:h1V/4gjBv8v9cjcR6+AR5+/cIYK5N/WAgiv4xlsEtAk=
+golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
-golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
-golang.org/x/tools v0.0.0-20190706070813-72ffa07ba3db h1:9hRk1xeL9LTT3yX/941DqeBz87XgHAQuj+TbimYJuiw=
-golang.org/x/tools v0.0.0-20190706070813-72ffa07ba3db/go.mod h1:jcCCGcm9btYwXyDqrUWc6MKQKKGJCWEQ3AfLSRIbEuI=
-golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7 h1:9zdDQZ7Thm29KFXgAX/+yaf3eVbP7djjWp/dXAppNCc=
+golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20200301222351-066e0c02454c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E=
+golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
+golang.org/x/tools v0.2.0/go.mod h1:y4OqIKeOV/fWJetJ8bXPU1sEVniLMIyDAZWeHdV+NTA=
+golang.org/x/tools v0.4.0/go.mod h1:UE5sM2OK9E/d67R0ANs2xJizIymRP5gJU295PvKXxjQ=
+golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
+golang.org/x/tools v0.7.0/go.mod h1:4pg6aUX35JBAogB10C9AtvVL+qowtN4pT3CGSQex14s=
+golang.org/x/tools v0.9.1/go.mod h1:owI94Op576fPu3cIGQeHs3joujW/2Oc6MtlxbF5dfNc=
+golang.org/x/tools v0.9.3/go.mod h1:owI94Op576fPu3cIGQeHs3joujW/2Oc6MtlxbF5dfNc=
+golang.org/x/tools v0.12.0/go.mod h1:Sc0INKfu04TlqNoRA1hgpFZbhYXHPr4V5DzpSBTPqQM=
+golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58=
+golang.org/x/tools v0.17.0/go.mod h1:xsh6VxdV005rRVaS6SSAf9oiAqljS7UZUacMZ8Bnsps=
+golang.org/x/tools v0.20.0/go.mod h1:WvitBU7JJf6A4jOdg4S1tviW9bhUxkgeCui/0JHctQg=
+golang.org/x/tools v0.21.0 h1:qc0xYgIbsSDt9EyWz05J5wfa7LOVW0YTLOXrqdLAWIw=
+golang.org/x/tools v0.21.0/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
+google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
+google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
+google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
+google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
+google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
+google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
+google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
+google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
+google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI=
+google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
-gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
-gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I=
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-honnef.co/go/tools v0.0.1-2019.2.3 h1:3JgtbtFHMiCmsznwGVTUWbgGov+pVqnlf1dEJTNAXeM=
-honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
+gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
+gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
+gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+honnef.co/go/tools v0.0.1-2020.1.5/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
diff --git a/integration/assets/hydrabroker/integrationtest/integrationtest_test.go b/integration/assets/hydrabroker/integrationtest/integrationtest_test.go
index 896358dfeb9..eccba639564 100644
--- a/integration/assets/hydrabroker/integrationtest/integrationtest_test.go
+++ b/integration/assets/hydrabroker/integrationtest/integrationtest_test.go
@@ -15,6 +15,7 @@ import (
uuid2 "github.com/nu7hatch/gouuid"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
+ . "github.com/onsi/gomega/gstruct"
"github.com/pivotal-cf/brokerapi/v7/domain/apiresponses"
)
diff --git a/integration/assets/hydrabroker/vendor/github.com/gabriel-vasile/mimetype/.gitattributes b/integration/assets/hydrabroker/vendor/github.com/gabriel-vasile/mimetype/.gitattributes
new file mode 100644
index 00000000000..0cc26ec01c6
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/gabriel-vasile/mimetype/.gitattributes
@@ -0,0 +1 @@
+testdata/* linguist-vendored
diff --git a/integration/assets/hydrabroker/vendor/github.com/gabriel-vasile/mimetype/CODE_OF_CONDUCT.md b/integration/assets/hydrabroker/vendor/github.com/gabriel-vasile/mimetype/CODE_OF_CONDUCT.md
new file mode 100644
index 00000000000..8479cd87d67
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/gabriel-vasile/mimetype/CODE_OF_CONDUCT.md
@@ -0,0 +1,76 @@
+# Contributor Covenant Code of Conduct
+
+## Our Pledge
+
+In the interest of fostering an open and welcoming environment, we as
+contributors and maintainers pledge to making participation in our project and
+our community a harassment-free experience for everyone, regardless of age, body
+size, disability, ethnicity, sex characteristics, gender identity and expression,
+level of experience, education, socio-economic status, nationality, personal
+appearance, race, religion, or sexual identity and orientation.
+
+## Our Standards
+
+Examples of behavior that contributes to creating a positive environment
+include:
+
+* Using welcoming and inclusive language
+* Being respectful of differing viewpoints and experiences
+* Gracefully accepting constructive criticism
+* Focusing on what is best for the community
+* Showing empathy towards other community members
+
+Examples of unacceptable behavior by participants include:
+
+* The use of sexualized language or imagery and unwelcome sexual attention or
+ advances
+* Trolling, insulting/derogatory comments, and personal or political attacks
+* Public or private harassment
+* Publishing others' private information, such as a physical or electronic
+ address, without explicit permission
+* Other conduct which could reasonably be considered inappropriate in a
+ professional setting
+
+## Our Responsibilities
+
+Project maintainers are responsible for clarifying the standards of acceptable
+behavior and are expected to take appropriate and fair corrective action in
+response to any instances of unacceptable behavior.
+
+Project maintainers have the right and responsibility to remove, edit, or
+reject comments, commits, code, wiki edits, issues, and other contributions
+that are not aligned to this Code of Conduct, or to ban temporarily or
+permanently any contributor for other behaviors that they deem inappropriate,
+threatening, offensive, or harmful.
+
+## Scope
+
+This Code of Conduct applies both within project spaces and in public spaces
+when an individual is representing the project or its community. Examples of
+representing a project or community include using an official project e-mail
+address, posting via an official social media account, or acting as an appointed
+representative at an online or offline event. Representation of a project may be
+further defined and clarified by project maintainers.
+
+## Enforcement
+
+Instances of abusive, harassing, or otherwise unacceptable behavior may be
+reported by contacting the project team at vasile.gabriel@email.com. All
+complaints will be reviewed and investigated and will result in a response that
+is deemed necessary and appropriate to the circumstances. The project team is
+obligated to maintain confidentiality with regard to the reporter of an incident.
+Further details of specific enforcement policies may be posted separately.
+
+Project maintainers who do not follow or enforce the Code of Conduct in good
+faith may face temporary or permanent repercussions as determined by other
+members of the project's leadership.
+
+## Attribution
+
+This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4,
+available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html
+
+[homepage]: https://www.contributor-covenant.org
+
+For answers to common questions about this code of conduct, see
+https://www.contributor-covenant.org/faq
diff --git a/integration/assets/hydrabroker/vendor/github.com/gabriel-vasile/mimetype/CONTRIBUTING.md b/integration/assets/hydrabroker/vendor/github.com/gabriel-vasile/mimetype/CONTRIBUTING.md
new file mode 100644
index 00000000000..56ae4e57c6c
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/gabriel-vasile/mimetype/CONTRIBUTING.md
@@ -0,0 +1,12 @@
+## Contribute
+Contributions to **mimetype** are welcome. If you find an issue and you consider
+contributing, you can use the [Github issues tracker](https://github.com/gabriel-vasile/mimetype/issues)
+in order to report it, or better yet, open a pull request.
+
+Code contributions must respect these rules:
+ - code must be test covered
+ - code must be formatted using gofmt tool
+ - exported names must be documented
+
+**Important**: By submitting a pull request, you agree to allow the project
+owner to license your work under the same license as that used by the project.
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/LICENSE b/integration/assets/hydrabroker/vendor/github.com/gabriel-vasile/mimetype/LICENSE
similarity index 94%
rename from integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/LICENSE
rename to integration/assets/hydrabroker/vendor/github.com/gabriel-vasile/mimetype/LICENSE
index 91b5cef30eb..6aac070c78f 100644
--- a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/LICENSE
+++ b/integration/assets/hydrabroker/vendor/github.com/gabriel-vasile/mimetype/LICENSE
@@ -1,6 +1,6 @@
-The MIT License (MIT)
+MIT License
-Copyright (c) 2016 Yasuhiro Matsumoto
+Copyright (c) 2018-2020 Gabriel Vasile
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
diff --git a/integration/assets/hydrabroker/vendor/github.com/gabriel-vasile/mimetype/README.md b/integration/assets/hydrabroker/vendor/github.com/gabriel-vasile/mimetype/README.md
new file mode 100644
index 00000000000..231b29190fd
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/gabriel-vasile/mimetype/README.md
@@ -0,0 +1,105 @@
+
+ mimetype
+
+
+
+ A package for detecting MIME types and extensions based on magic numbers
+
+
+ Goroutine safe, extensible, no C bindings
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+## Features
+- fast and precise MIME type and file extension detection
+- long list of [supported MIME types](supported_mimes.md)
+- possibility to [extend](https://pkg.go.dev/github.com/gabriel-vasile/mimetype#example-package-Extend) with other file formats
+- common file formats are prioritized
+- [text vs. binary files differentiation](https://pkg.go.dev/github.com/gabriel-vasile/mimetype#example-package-TextVsBinary)
+- safe for concurrent usage
+
+## Install
+```bash
+go get github.com/gabriel-vasile/mimetype
+```
+
+## Usage
+```go
+mtype := mimetype.Detect([]byte)
+// OR
+mtype, err := mimetype.DetectReader(io.Reader)
+// OR
+mtype, err := mimetype.DetectFile("/path/to/file")
+fmt.Println(mtype.String(), mtype.Extension())
+```
+See the [runnable Go Playground examples](https://pkg.go.dev/github.com/gabriel-vasile/mimetype#pkg-overview).
+
+## Usage'
+Only use libraries like **mimetype** as a last resort. Content type detection
+using magic numbers is slow, inaccurate, and non-standard. Most of the times
+protocols have methods for specifying such metadata; e.g., `Content-Type` header
+in HTTP and SMTP.
+
+## FAQ
+Q: My file is in the list of [supported MIME types](supported_mimes.md) but
+it is not correctly detected. What should I do?
+
+A: Some file formats (often Microsoft Office documents) keep their signatures
+towards the end of the file. Try increasing the number of bytes used for detection
+with:
+```go
+mimetype.SetLimit(1024*1024) // Set limit to 1MB.
+// or
+mimetype.SetLimit(0) // No limit, whole file content used.
+mimetype.DetectFile("file.doc")
+```
+If increasing the limit does not help, please
+[open an issue](https://github.com/gabriel-vasile/mimetype/issues/new?assignees=&labels=&template=mismatched-mime-type-detected.md&title=).
+
+## Structure
+**mimetype** uses a hierarchical structure to keep the MIME type detection logic.
+This reduces the number of calls needed for detecting the file type. The reason
+behind this choice is that there are file formats used as containers for other
+file formats. For example, Microsoft Office files are just zip archives,
+containing specific metadata files. Once a file has been identified as a
+zip, there is no need to check if it is a text file, but it is worth checking if
+it is an Microsoft Office file.
+
+To prevent loading entire files into memory, when detecting from a
+[reader](https://pkg.go.dev/github.com/gabriel-vasile/mimetype#DetectReader)
+or from a [file](https://pkg.go.dev/github.com/gabriel-vasile/mimetype#DetectFile)
+**mimetype** limits itself to reading only the header of the input.
+
+
+
+
+## Performance
+Thanks to the hierarchical structure, searching for common formats first,
+and limiting itself to file headers, **mimetype** matches the performance of
+stdlib `http.DetectContentType` while outperforming the alternative package.
+
+```bash
+ mimetype http.DetectContentType filetype
+BenchmarkMatchTar-24 250 ns/op 400 ns/op 3778 ns/op
+BenchmarkMatchZip-24 524 ns/op 351 ns/op 4884 ns/op
+BenchmarkMatchJpeg-24 103 ns/op 228 ns/op 839 ns/op
+BenchmarkMatchGif-24 139 ns/op 202 ns/op 751 ns/op
+BenchmarkMatchPng-24 165 ns/op 221 ns/op 1176 ns/op
+```
+
+## Contributing
+See [CONTRIBUTING.md](CONTRIBUTING.md).
diff --git a/integration/assets/hydrabroker/vendor/github.com/gabriel-vasile/mimetype/go.mod b/integration/assets/hydrabroker/vendor/github.com/gabriel-vasile/mimetype/go.mod
new file mode 100644
index 00000000000..9fc4890d921
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/gabriel-vasile/mimetype/go.mod
@@ -0,0 +1,5 @@
+module github.com/gabriel-vasile/mimetype
+
+go 1.20
+
+require golang.org/x/net v0.17.0
diff --git a/integration/assets/hydrabroker/vendor/github.com/gabriel-vasile/mimetype/go.sum b/integration/assets/hydrabroker/vendor/github.com/gabriel-vasile/mimetype/go.sum
new file mode 100644
index 00000000000..0d6f45486d3
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/gabriel-vasile/mimetype/go.sum
@@ -0,0 +1,2 @@
+golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM=
+golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE=
diff --git a/integration/assets/hydrabroker/vendor/github.com/gabriel-vasile/mimetype/internal/charset/charset.go b/integration/assets/hydrabroker/vendor/github.com/gabriel-vasile/mimetype/internal/charset/charset.go
new file mode 100644
index 00000000000..0647f730e53
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/gabriel-vasile/mimetype/internal/charset/charset.go
@@ -0,0 +1,309 @@
+package charset
+
+import (
+ "bytes"
+ "encoding/xml"
+ "strings"
+ "unicode/utf8"
+
+ "golang.org/x/net/html"
+)
+
+const (
+ F = 0 /* character never appears in text */
+ T = 1 /* character appears in plain ASCII text */
+ I = 2 /* character appears in ISO-8859 text */
+ X = 3 /* character appears in non-ISO extended ASCII (Mac, IBM PC) */
+)
+
+var (
+ boms = []struct {
+ bom []byte
+ enc string
+ }{
+ {[]byte{0xEF, 0xBB, 0xBF}, "utf-8"},
+ {[]byte{0x00, 0x00, 0xFE, 0xFF}, "utf-32be"},
+ {[]byte{0xFF, 0xFE, 0x00, 0x00}, "utf-32le"},
+ {[]byte{0xFE, 0xFF}, "utf-16be"},
+ {[]byte{0xFF, 0xFE}, "utf-16le"},
+ }
+
+ // https://github.com/file/file/blob/fa93fb9f7d21935f1c7644c47d2975d31f12b812/src/encoding.c#L241
+ textChars = [256]byte{
+ /* BEL BS HT LF VT FF CR */
+ F, F, F, F, F, F, F, T, T, T, T, T, T, T, F, F, /* 0x0X */
+ /* ESC */
+ F, F, F, F, F, F, F, F, F, F, F, T, F, F, F, F, /* 0x1X */
+ T, T, T, T, T, T, T, T, T, T, T, T, T, T, T, T, /* 0x2X */
+ T, T, T, T, T, T, T, T, T, T, T, T, T, T, T, T, /* 0x3X */
+ T, T, T, T, T, T, T, T, T, T, T, T, T, T, T, T, /* 0x4X */
+ T, T, T, T, T, T, T, T, T, T, T, T, T, T, T, T, /* 0x5X */
+ T, T, T, T, T, T, T, T, T, T, T, T, T, T, T, T, /* 0x6X */
+ T, T, T, T, T, T, T, T, T, T, T, T, T, T, T, F, /* 0x7X */
+ /* NEL */
+ X, X, X, X, X, T, X, X, X, X, X, X, X, X, X, X, /* 0x8X */
+ X, X, X, X, X, X, X, X, X, X, X, X, X, X, X, X, /* 0x9X */
+ I, I, I, I, I, I, I, I, I, I, I, I, I, I, I, I, /* 0xaX */
+ I, I, I, I, I, I, I, I, I, I, I, I, I, I, I, I, /* 0xbX */
+ I, I, I, I, I, I, I, I, I, I, I, I, I, I, I, I, /* 0xcX */
+ I, I, I, I, I, I, I, I, I, I, I, I, I, I, I, I, /* 0xdX */
+ I, I, I, I, I, I, I, I, I, I, I, I, I, I, I, I, /* 0xeX */
+ I, I, I, I, I, I, I, I, I, I, I, I, I, I, I, I, /* 0xfX */
+ }
+)
+
+// FromBOM returns the charset declared in the BOM of content.
+func FromBOM(content []byte) string {
+ for _, b := range boms {
+ if bytes.HasPrefix(content, b.bom) {
+ return b.enc
+ }
+ }
+ return ""
+}
+
+// FromPlain returns the charset of a plain text. It relies on BOM presence
+// and it falls back on checking each byte in content.
+func FromPlain(content []byte) string {
+ if len(content) == 0 {
+ return ""
+ }
+ if cset := FromBOM(content); cset != "" {
+ return cset
+ }
+ origContent := content
+ // Try to detect UTF-8.
+ // First eliminate any partial rune at the end.
+ for i := len(content) - 1; i >= 0 && i > len(content)-4; i-- {
+ b := content[i]
+ if b < 0x80 {
+ break
+ }
+ if utf8.RuneStart(b) {
+ content = content[:i]
+ break
+ }
+ }
+ hasHighBit := false
+ for _, c := range content {
+ if c >= 0x80 {
+ hasHighBit = true
+ break
+ }
+ }
+ if hasHighBit && utf8.Valid(content) {
+ return "utf-8"
+ }
+
+ // ASCII is a subset of UTF8. Follow W3C recommendation and replace with UTF8.
+ if ascii(origContent) {
+ return "utf-8"
+ }
+
+ return latin(origContent)
+}
+
+func latin(content []byte) string {
+ hasControlBytes := false
+ for _, b := range content {
+ t := textChars[b]
+ if t != T && t != I {
+ return ""
+ }
+ if b >= 0x80 && b <= 0x9F {
+ hasControlBytes = true
+ }
+ }
+ // Code range 0x80 to 0x9F is reserved for control characters in ISO-8859-1
+ // (so-called C1 Controls). Windows 1252, however, has printable punctuation
+ // characters in this range.
+ if hasControlBytes {
+ return "windows-1252"
+ }
+ return "iso-8859-1"
+}
+
+func ascii(content []byte) bool {
+ for _, b := range content {
+ if textChars[b] != T {
+ return false
+ }
+ }
+ return true
+}
+
+// FromXML returns the charset of an XML document. It relies on the XML
+// header and falls back on the plain
+// text content.
+func FromXML(content []byte) string {
+ if cset := fromXML(content); cset != "" {
+ return cset
+ }
+ return FromPlain(content)
+}
+func fromXML(content []byte) string {
+ content = trimLWS(content)
+ dec := xml.NewDecoder(bytes.NewReader(content))
+ rawT, err := dec.RawToken()
+ if err != nil {
+ return ""
+ }
+
+ t, ok := rawT.(xml.ProcInst)
+ if !ok {
+ return ""
+ }
+
+ return strings.ToLower(xmlEncoding(string(t.Inst)))
+}
+
+// FromHTML returns the charset of an HTML document. It first looks if a BOM is
+// present and if so uses it to determine the charset. If no BOM is present,
+// it relies on the meta tag and falls back on the
+// plain text content.
+func FromHTML(content []byte) string {
+ if cset := FromBOM(content); cset != "" {
+ return cset
+ }
+ if cset := fromHTML(content); cset != "" {
+ return cset
+ }
+ return FromPlain(content)
+}
+
+func fromHTML(content []byte) string {
+ z := html.NewTokenizer(bytes.NewReader(content))
+ for {
+ switch z.Next() {
+ case html.ErrorToken:
+ return ""
+
+ case html.StartTagToken, html.SelfClosingTagToken:
+ tagName, hasAttr := z.TagName()
+ if !bytes.Equal(tagName, []byte("meta")) {
+ continue
+ }
+ attrList := make(map[string]bool)
+ gotPragma := false
+
+ const (
+ dontKnow = iota
+ doNeedPragma
+ doNotNeedPragma
+ )
+ needPragma := dontKnow
+
+ name := ""
+ for hasAttr {
+ var key, val []byte
+ key, val, hasAttr = z.TagAttr()
+ ks := string(key)
+ if attrList[ks] {
+ continue
+ }
+ attrList[ks] = true
+ for i, c := range val {
+ if 'A' <= c && c <= 'Z' {
+ val[i] = c + 0x20
+ }
+ }
+
+ switch ks {
+ case "http-equiv":
+ if bytes.Equal(val, []byte("content-type")) {
+ gotPragma = true
+ }
+
+ case "content":
+ name = fromMetaElement(string(val))
+ if name != "" {
+ needPragma = doNeedPragma
+ }
+
+ case "charset":
+ name = string(val)
+ needPragma = doNotNeedPragma
+ }
+ }
+
+ if needPragma == dontKnow || needPragma == doNeedPragma && !gotPragma {
+ continue
+ }
+
+ if strings.HasPrefix(name, "utf-16") {
+ name = "utf-8"
+ }
+
+ return name
+ }
+ }
+}
+
+func fromMetaElement(s string) string {
+ for s != "" {
+ csLoc := strings.Index(s, "charset")
+ if csLoc == -1 {
+ return ""
+ }
+ s = s[csLoc+len("charset"):]
+ s = strings.TrimLeft(s, " \t\n\f\r")
+ if !strings.HasPrefix(s, "=") {
+ continue
+ }
+ s = s[1:]
+ s = strings.TrimLeft(s, " \t\n\f\r")
+ if s == "" {
+ return ""
+ }
+ if q := s[0]; q == '"' || q == '\'' {
+ s = s[1:]
+ closeQuote := strings.IndexRune(s, rune(q))
+ if closeQuote == -1 {
+ return ""
+ }
+ return s[:closeQuote]
+ }
+
+ end := strings.IndexAny(s, "; \t\n\f\r")
+ if end == -1 {
+ end = len(s)
+ }
+ return s[:end]
+ }
+ return ""
+}
+
+func xmlEncoding(s string) string {
+ param := "encoding="
+ idx := strings.Index(s, param)
+ if idx == -1 {
+ return ""
+ }
+ v := s[idx+len(param):]
+ if v == "" {
+ return ""
+ }
+ if v[0] != '\'' && v[0] != '"' {
+ return ""
+ }
+ idx = strings.IndexRune(v[1:], rune(v[0]))
+ if idx == -1 {
+ return ""
+ }
+ return v[1 : idx+1]
+}
+
+// trimLWS trims whitespace from beginning of the input.
+// TODO: find a way to call trimLWS once per detection instead of once in each
+// detector which needs the trimmed input.
+func trimLWS(in []byte) []byte {
+ firstNonWS := 0
+ for ; firstNonWS < len(in) && isWS(in[firstNonWS]); firstNonWS++ {
+ }
+
+ return in[firstNonWS:]
+}
+
+func isWS(b byte) bool {
+ return b == '\t' || b == '\n' || b == '\x0c' || b == '\r' || b == ' '
+}
diff --git a/integration/assets/hydrabroker/vendor/github.com/gabriel-vasile/mimetype/internal/json/json.go b/integration/assets/hydrabroker/vendor/github.com/gabriel-vasile/mimetype/internal/json/json.go
new file mode 100644
index 00000000000..ee39349aefe
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/gabriel-vasile/mimetype/internal/json/json.go
@@ -0,0 +1,544 @@
+// Copyright (c) 2009 The Go Authors. All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Package json provides a JSON value parser state machine.
+// This package is almost entirely copied from the Go stdlib.
+// Changes made to it permit users of the package to tell
+// if some slice of bytes is a valid beginning of a json string.
+package json
+
+import (
+ "fmt"
+)
+
+type (
+ scanStatus int
+)
+
+const (
+ parseObjectKey = iota // parsing object key (before colon)
+ parseObjectValue // parsing object value (after colon)
+ parseArrayValue // parsing array value
+
+ scanContinue scanStatus = iota // uninteresting byte
+ scanBeginLiteral // end implied by next result != scanContinue
+ scanBeginObject // begin object
+ scanObjectKey // just finished object key (string)
+ scanObjectValue // just finished non-last object value
+ scanEndObject // end object (implies scanObjectValue if possible)
+ scanBeginArray // begin array
+ scanArrayValue // just finished array value
+ scanEndArray // end array (implies scanArrayValue if possible)
+ scanSkipSpace // space byte; can skip; known to be last "continue" result
+ scanEnd // top-level value ended *before* this byte; known to be first "stop" result
+ scanError // hit an error, scanner.err.
+
+ // This limits the max nesting depth to prevent stack overflow.
+ // This is permitted by https://tools.ietf.org/html/rfc7159#section-9
+ maxNestingDepth = 10000
+)
+
+type (
+ scanner struct {
+ step func(*scanner, byte) scanStatus
+ parseState []int
+ endTop bool
+ err error
+ index int
+ }
+)
+
+// Scan returns the number of bytes scanned and if there was any error
+// in trying to reach the end of data.
+func Scan(data []byte) (int, error) {
+ s := &scanner{}
+ _ = checkValid(data, s)
+ return s.index, s.err
+}
+
+// checkValid verifies that data is valid JSON-encoded data.
+// scan is passed in for use by checkValid to avoid an allocation.
+func checkValid(data []byte, scan *scanner) error {
+ scan.reset()
+ for _, c := range data {
+ scan.index++
+ if scan.step(scan, c) == scanError {
+ return scan.err
+ }
+ }
+ if scan.eof() == scanError {
+ return scan.err
+ }
+ return nil
+}
+
+func isSpace(c byte) bool {
+ return c == ' ' || c == '\t' || c == '\r' || c == '\n'
+}
+
+func (s *scanner) reset() {
+ s.step = stateBeginValue
+ s.parseState = s.parseState[0:0]
+ s.err = nil
+}
+
+// eof tells the scanner that the end of input has been reached.
+// It returns a scan status just as s.step does.
+func (s *scanner) eof() scanStatus {
+ if s.err != nil {
+ return scanError
+ }
+ if s.endTop {
+ return scanEnd
+ }
+ s.step(s, ' ')
+ if s.endTop {
+ return scanEnd
+ }
+ if s.err == nil {
+ s.err = fmt.Errorf("unexpected end of JSON input")
+ }
+ return scanError
+}
+
+// pushParseState pushes a new parse state p onto the parse stack.
+// an error state is returned if maxNestingDepth was exceeded, otherwise successState is returned.
+func (s *scanner) pushParseState(c byte, newParseState int, successState scanStatus) scanStatus {
+ s.parseState = append(s.parseState, newParseState)
+ if len(s.parseState) <= maxNestingDepth {
+ return successState
+ }
+ return s.error(c, "exceeded max depth")
+}
+
+// popParseState pops a parse state (already obtained) off the stack
+// and updates s.step accordingly.
+func (s *scanner) popParseState() {
+ n := len(s.parseState) - 1
+ s.parseState = s.parseState[0:n]
+ if n == 0 {
+ s.step = stateEndTop
+ s.endTop = true
+ } else {
+ s.step = stateEndValue
+ }
+}
+
+// stateBeginValueOrEmpty is the state after reading `[`.
+func stateBeginValueOrEmpty(s *scanner, c byte) scanStatus {
+ if c <= ' ' && isSpace(c) {
+ return scanSkipSpace
+ }
+ if c == ']' {
+ return stateEndValue(s, c)
+ }
+ return stateBeginValue(s, c)
+}
+
+// stateBeginValue is the state at the beginning of the input.
+func stateBeginValue(s *scanner, c byte) scanStatus {
+ if c <= ' ' && isSpace(c) {
+ return scanSkipSpace
+ }
+ switch c {
+ case '{':
+ s.step = stateBeginStringOrEmpty
+ return s.pushParseState(c, parseObjectKey, scanBeginObject)
+ case '[':
+ s.step = stateBeginValueOrEmpty
+ return s.pushParseState(c, parseArrayValue, scanBeginArray)
+ case '"':
+ s.step = stateInString
+ return scanBeginLiteral
+ case '-':
+ s.step = stateNeg
+ return scanBeginLiteral
+ case '0': // beginning of 0.123
+ s.step = state0
+ return scanBeginLiteral
+ case 't': // beginning of true
+ s.step = stateT
+ return scanBeginLiteral
+ case 'f': // beginning of false
+ s.step = stateF
+ return scanBeginLiteral
+ case 'n': // beginning of null
+ s.step = stateN
+ return scanBeginLiteral
+ }
+ if '1' <= c && c <= '9' { // beginning of 1234.5
+ s.step = state1
+ return scanBeginLiteral
+ }
+ return s.error(c, "looking for beginning of value")
+}
+
+// stateBeginStringOrEmpty is the state after reading `{`.
+func stateBeginStringOrEmpty(s *scanner, c byte) scanStatus {
+ if c <= ' ' && isSpace(c) {
+ return scanSkipSpace
+ }
+ if c == '}' {
+ n := len(s.parseState)
+ s.parseState[n-1] = parseObjectValue
+ return stateEndValue(s, c)
+ }
+ return stateBeginString(s, c)
+}
+
+// stateBeginString is the state after reading `{"key": value,`.
+func stateBeginString(s *scanner, c byte) scanStatus {
+ if c <= ' ' && isSpace(c) {
+ return scanSkipSpace
+ }
+ if c == '"' {
+ s.step = stateInString
+ return scanBeginLiteral
+ }
+ return s.error(c, "looking for beginning of object key string")
+}
+
+// stateEndValue is the state after completing a value,
+// such as after reading `{}` or `true` or `["x"`.
+func stateEndValue(s *scanner, c byte) scanStatus {
+ n := len(s.parseState)
+ if n == 0 {
+ // Completed top-level before the current byte.
+ s.step = stateEndTop
+ s.endTop = true
+ return stateEndTop(s, c)
+ }
+ if c <= ' ' && isSpace(c) {
+ s.step = stateEndValue
+ return scanSkipSpace
+ }
+ ps := s.parseState[n-1]
+ switch ps {
+ case parseObjectKey:
+ if c == ':' {
+ s.parseState[n-1] = parseObjectValue
+ s.step = stateBeginValue
+ return scanObjectKey
+ }
+ return s.error(c, "after object key")
+ case parseObjectValue:
+ if c == ',' {
+ s.parseState[n-1] = parseObjectKey
+ s.step = stateBeginString
+ return scanObjectValue
+ }
+ if c == '}' {
+ s.popParseState()
+ return scanEndObject
+ }
+ return s.error(c, "after object key:value pair")
+ case parseArrayValue:
+ if c == ',' {
+ s.step = stateBeginValue
+ return scanArrayValue
+ }
+ if c == ']' {
+ s.popParseState()
+ return scanEndArray
+ }
+ return s.error(c, "after array element")
+ }
+ return s.error(c, "")
+}
+
+// stateEndTop is the state after finishing the top-level value,
+// such as after reading `{}` or `[1,2,3]`.
+// Only space characters should be seen now.
+func stateEndTop(s *scanner, c byte) scanStatus {
+ if c != ' ' && c != '\t' && c != '\r' && c != '\n' {
+ // Complain about non-space byte on next call.
+ s.error(c, "after top-level value")
+ }
+ return scanEnd
+}
+
+// stateInString is the state after reading `"`.
+func stateInString(s *scanner, c byte) scanStatus {
+ if c == '"' {
+ s.step = stateEndValue
+ return scanContinue
+ }
+ if c == '\\' {
+ s.step = stateInStringEsc
+ return scanContinue
+ }
+ if c < 0x20 {
+ return s.error(c, "in string literal")
+ }
+ return scanContinue
+}
+
+// stateInStringEsc is the state after reading `"\` during a quoted string.
+func stateInStringEsc(s *scanner, c byte) scanStatus {
+ switch c {
+ case 'b', 'f', 'n', 'r', 't', '\\', '/', '"':
+ s.step = stateInString
+ return scanContinue
+ case 'u':
+ s.step = stateInStringEscU
+ return scanContinue
+ }
+ return s.error(c, "in string escape code")
+}
+
+// stateInStringEscU is the state after reading `"\u` during a quoted string.
+func stateInStringEscU(s *scanner, c byte) scanStatus {
+ if '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F' {
+ s.step = stateInStringEscU1
+ return scanContinue
+ }
+ // numbers
+ return s.error(c, "in \\u hexadecimal character escape")
+}
+
+// stateInStringEscU1 is the state after reading `"\u1` during a quoted string.
+func stateInStringEscU1(s *scanner, c byte) scanStatus {
+ if '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F' {
+ s.step = stateInStringEscU12
+ return scanContinue
+ }
+ // numbers
+ return s.error(c, "in \\u hexadecimal character escape")
+}
+
+// stateInStringEscU12 is the state after reading `"\u12` during a quoted string.
+func stateInStringEscU12(s *scanner, c byte) scanStatus {
+ if '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F' {
+ s.step = stateInStringEscU123
+ return scanContinue
+ }
+ // numbers
+ return s.error(c, "in \\u hexadecimal character escape")
+}
+
+// stateInStringEscU123 is the state after reading `"\u123` during a quoted string.
+func stateInStringEscU123(s *scanner, c byte) scanStatus {
+ if '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F' {
+ s.step = stateInString
+ return scanContinue
+ }
+ // numbers
+ return s.error(c, "in \\u hexadecimal character escape")
+}
+
+// stateNeg is the state after reading `-` during a number.
+func stateNeg(s *scanner, c byte) scanStatus {
+ if c == '0' {
+ s.step = state0
+ return scanContinue
+ }
+ if '1' <= c && c <= '9' {
+ s.step = state1
+ return scanContinue
+ }
+ return s.error(c, "in numeric literal")
+}
+
+// state1 is the state after reading a non-zero integer during a number,
+// such as after reading `1` or `100` but not `0`.
+func state1(s *scanner, c byte) scanStatus {
+ if '0' <= c && c <= '9' {
+ s.step = state1
+ return scanContinue
+ }
+ return state0(s, c)
+}
+
+// state0 is the state after reading `0` during a number.
+func state0(s *scanner, c byte) scanStatus {
+ if c == '.' {
+ s.step = stateDot
+ return scanContinue
+ }
+ if c == 'e' || c == 'E' {
+ s.step = stateE
+ return scanContinue
+ }
+ return stateEndValue(s, c)
+}
+
+// stateDot is the state after reading the integer and decimal point in a number,
+// such as after reading `1.`.
+func stateDot(s *scanner, c byte) scanStatus {
+ if '0' <= c && c <= '9' {
+ s.step = stateDot0
+ return scanContinue
+ }
+ return s.error(c, "after decimal point in numeric literal")
+}
+
+// stateDot0 is the state after reading the integer, decimal point, and subsequent
+// digits of a number, such as after reading `3.14`.
+func stateDot0(s *scanner, c byte) scanStatus {
+ if '0' <= c && c <= '9' {
+ return scanContinue
+ }
+ if c == 'e' || c == 'E' {
+ s.step = stateE
+ return scanContinue
+ }
+ return stateEndValue(s, c)
+}
+
+// stateE is the state after reading the mantissa and e in a number,
+// such as after reading `314e` or `0.314e`.
+func stateE(s *scanner, c byte) scanStatus {
+ if c == '+' || c == '-' {
+ s.step = stateESign
+ return scanContinue
+ }
+ return stateESign(s, c)
+}
+
+// stateESign is the state after reading the mantissa, e, and sign in a number,
+// such as after reading `314e-` or `0.314e+`.
+func stateESign(s *scanner, c byte) scanStatus {
+ if '0' <= c && c <= '9' {
+ s.step = stateE0
+ return scanContinue
+ }
+ return s.error(c, "in exponent of numeric literal")
+}
+
+// stateE0 is the state after reading the mantissa, e, optional sign,
+// and at least one digit of the exponent in a number,
+// such as after reading `314e-2` or `0.314e+1` or `3.14e0`.
+func stateE0(s *scanner, c byte) scanStatus {
+ if '0' <= c && c <= '9' {
+ return scanContinue
+ }
+ return stateEndValue(s, c)
+}
+
+// stateT is the state after reading `t`.
+func stateT(s *scanner, c byte) scanStatus {
+ if c == 'r' {
+ s.step = stateTr
+ return scanContinue
+ }
+ return s.error(c, "in literal true (expecting 'r')")
+}
+
+// stateTr is the state after reading `tr`.
+func stateTr(s *scanner, c byte) scanStatus {
+ if c == 'u' {
+ s.step = stateTru
+ return scanContinue
+ }
+ return s.error(c, "in literal true (expecting 'u')")
+}
+
+// stateTru is the state after reading `tru`.
+func stateTru(s *scanner, c byte) scanStatus {
+ if c == 'e' {
+ s.step = stateEndValue
+ return scanContinue
+ }
+ return s.error(c, "in literal true (expecting 'e')")
+}
+
+// stateF is the state after reading `f`.
+func stateF(s *scanner, c byte) scanStatus {
+ if c == 'a' {
+ s.step = stateFa
+ return scanContinue
+ }
+ return s.error(c, "in literal false (expecting 'a')")
+}
+
+// stateFa is the state after reading `fa`.
+func stateFa(s *scanner, c byte) scanStatus {
+ if c == 'l' {
+ s.step = stateFal
+ return scanContinue
+ }
+ return s.error(c, "in literal false (expecting 'l')")
+}
+
+// stateFal is the state after reading `fal`.
+func stateFal(s *scanner, c byte) scanStatus {
+ if c == 's' {
+ s.step = stateFals
+ return scanContinue
+ }
+ return s.error(c, "in literal false (expecting 's')")
+}
+
+// stateFals is the state after reading `fals`.
+func stateFals(s *scanner, c byte) scanStatus {
+ if c == 'e' {
+ s.step = stateEndValue
+ return scanContinue
+ }
+ return s.error(c, "in literal false (expecting 'e')")
+}
+
+// stateN is the state after reading `n`.
+func stateN(s *scanner, c byte) scanStatus {
+ if c == 'u' {
+ s.step = stateNu
+ return scanContinue
+ }
+ return s.error(c, "in literal null (expecting 'u')")
+}
+
+// stateNu is the state after reading `nu`.
+func stateNu(s *scanner, c byte) scanStatus {
+ if c == 'l' {
+ s.step = stateNul
+ return scanContinue
+ }
+ return s.error(c, "in literal null (expecting 'l')")
+}
+
+// stateNul is the state after reading `nul`.
+func stateNul(s *scanner, c byte) scanStatus {
+ if c == 'l' {
+ s.step = stateEndValue
+ return scanContinue
+ }
+ return s.error(c, "in literal null (expecting 'l')")
+}
+
+// stateError is the state after reaching a syntax error,
+// such as after reading `[1}` or `5.1.2`.
+func stateError(s *scanner, c byte) scanStatus {
+ return scanError
+}
+
+// error records an error and switches to the error state.
+func (s *scanner) error(c byte, context string) scanStatus {
+ s.step = stateError
+ s.err = fmt.Errorf("invalid character <<%c>> %s", c, context)
+ return scanError
+}
diff --git a/integration/assets/hydrabroker/vendor/github.com/gabriel-vasile/mimetype/internal/magic/archive.go b/integration/assets/hydrabroker/vendor/github.com/gabriel-vasile/mimetype/internal/magic/archive.go
new file mode 100644
index 00000000000..fec11f080a6
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/gabriel-vasile/mimetype/internal/magic/archive.go
@@ -0,0 +1,124 @@
+package magic
+
+import (
+ "bytes"
+ "encoding/binary"
+)
+
+var (
+ // SevenZ matches a 7z archive.
+ SevenZ = prefix([]byte{0x37, 0x7A, 0xBC, 0xAF, 0x27, 0x1C})
+ // Gzip matches gzip files based on http://www.zlib.org/rfc-gzip.html#header-trailer.
+ Gzip = prefix([]byte{0x1f, 0x8b})
+ // Fits matches an Flexible Image Transport System file.
+ Fits = prefix([]byte{
+ 0x53, 0x49, 0x4D, 0x50, 0x4C, 0x45, 0x20, 0x20, 0x3D, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x54,
+ })
+ // Xar matches an eXtensible ARchive format file.
+ Xar = prefix([]byte{0x78, 0x61, 0x72, 0x21})
+ // Bz2 matches a bzip2 file.
+ Bz2 = prefix([]byte{0x42, 0x5A, 0x68})
+ // Ar matches an ar (Unix) archive file.
+ Ar = prefix([]byte{0x21, 0x3C, 0x61, 0x72, 0x63, 0x68, 0x3E})
+ // Deb matches a Debian package file.
+ Deb = offset([]byte{
+ 0x64, 0x65, 0x62, 0x69, 0x61, 0x6E, 0x2D,
+ 0x62, 0x69, 0x6E, 0x61, 0x72, 0x79,
+ }, 8)
+ // Warc matches a Web ARChive file.
+ Warc = prefix([]byte("WARC/1.0"), []byte("WARC/1.1"))
+ // Cab matches a Microsoft Cabinet archive file.
+ Cab = prefix([]byte("MSCF\x00\x00\x00\x00"))
+ // Xz matches an xz compressed stream based on https://tukaani.org/xz/xz-file-format.txt.
+ Xz = prefix([]byte{0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00})
+ // Lzip matches an Lzip compressed file.
+ Lzip = prefix([]byte{0x4c, 0x5a, 0x49, 0x50})
+ // RPM matches an RPM or Delta RPM package file.
+ RPM = prefix([]byte{0xed, 0xab, 0xee, 0xdb}, []byte("drpm"))
+ // Cpio matches a cpio archive file.
+ Cpio = prefix([]byte("070707"), []byte("070701"), []byte("070702"))
+ // RAR matches a RAR archive file.
+ RAR = prefix([]byte("Rar!\x1A\x07\x00"), []byte("Rar!\x1A\x07\x01\x00"))
+)
+
+// InstallShieldCab matches an InstallShield Cabinet archive file.
+func InstallShieldCab(raw []byte, _ uint32) bool {
+ return len(raw) > 7 &&
+ bytes.Equal(raw[0:4], []byte("ISc(")) &&
+ raw[6] == 0 &&
+ (raw[7] == 1 || raw[7] == 2 || raw[7] == 4)
+}
+
+// Zstd matches a Zstandard archive file.
+func Zstd(raw []byte, limit uint32) bool {
+ return len(raw) >= 4 &&
+ (0x22 <= raw[0] && raw[0] <= 0x28 || raw[0] == 0x1E) && // Different Zstandard versions.
+ bytes.HasPrefix(raw[1:], []byte{0xB5, 0x2F, 0xFD})
+}
+
+// CRX matches a Chrome extension file: a zip archive prepended by a package header.
+func CRX(raw []byte, limit uint32) bool {
+ const minHeaderLen = 16
+ if len(raw) < minHeaderLen || !bytes.HasPrefix(raw, []byte("Cr24")) {
+ return false
+ }
+ pubkeyLen := binary.LittleEndian.Uint32(raw[8:12])
+ sigLen := binary.LittleEndian.Uint32(raw[12:16])
+ zipOffset := minHeaderLen + pubkeyLen + sigLen
+ if uint32(len(raw)) < zipOffset {
+ return false
+ }
+ return Zip(raw[zipOffset:], limit)
+}
+
+// Tar matches a (t)ape (ar)chive file.
+func Tar(raw []byte, _ uint32) bool {
+ // The "magic" header field for files in in UStar (POSIX IEEE P1003.1) archives
+ // has the prefix "ustar". The values of the remaining bytes in this field vary
+ // by archiver implementation.
+ if len(raw) >= 512 && bytes.HasPrefix(raw[257:], []byte{0x75, 0x73, 0x74, 0x61, 0x72}) {
+ return true
+ }
+
+ if len(raw) < 256 {
+ return false
+ }
+
+ // The older v7 format has no "magic" field, and therefore must be identified
+ // with heuristics based on legal ranges of values for other header fields:
+ // https://www.nationalarchives.gov.uk/PRONOM/Format/proFormatSearch.aspx?status=detailReport&id=385&strPageToDisplay=signatures
+ rules := []struct {
+ min, max uint8
+ i int
+ }{
+ {0x21, 0xEF, 0},
+ {0x30, 0x37, 105},
+ {0x20, 0x37, 106},
+ {0x00, 0x00, 107},
+ {0x30, 0x37, 113},
+ {0x20, 0x37, 114},
+ {0x00, 0x00, 115},
+ {0x30, 0x37, 121},
+ {0x20, 0x37, 122},
+ {0x00, 0x00, 123},
+ {0x30, 0x37, 134},
+ {0x30, 0x37, 146},
+ {0x30, 0x37, 153},
+ {0x00, 0x37, 154},
+ }
+ for _, r := range rules {
+ if raw[r.i] < r.min || raw[r.i] > r.max {
+ return false
+ }
+ }
+
+ for _, i := range []uint8{135, 147, 155} {
+ if raw[i] != 0x00 && raw[i] != 0x20 {
+ return false
+ }
+ }
+
+ return true
+}
diff --git a/integration/assets/hydrabroker/vendor/github.com/gabriel-vasile/mimetype/internal/magic/audio.go b/integration/assets/hydrabroker/vendor/github.com/gabriel-vasile/mimetype/internal/magic/audio.go
new file mode 100644
index 00000000000..d17e32482c9
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/gabriel-vasile/mimetype/internal/magic/audio.go
@@ -0,0 +1,76 @@
+package magic
+
+import (
+ "bytes"
+ "encoding/binary"
+)
+
+var (
+ // Flac matches a Free Lossless Audio Codec file.
+ Flac = prefix([]byte("\x66\x4C\x61\x43\x00\x00\x00\x22"))
+ // Midi matches a Musical Instrument Digital Interface file.
+ Midi = prefix([]byte("\x4D\x54\x68\x64"))
+ // Ape matches a Monkey's Audio file.
+ Ape = prefix([]byte("\x4D\x41\x43\x20\x96\x0F\x00\x00\x34\x00\x00\x00\x18\x00\x00\x00\x90\xE3"))
+ // MusePack matches a Musepack file.
+ MusePack = prefix([]byte("MPCK"))
+ // Au matches a Sun Microsystems au file.
+ Au = prefix([]byte("\x2E\x73\x6E\x64"))
+ // Amr matches an Adaptive Multi-Rate file.
+ Amr = prefix([]byte("\x23\x21\x41\x4D\x52"))
+ // Voc matches a Creative Voice file.
+ Voc = prefix([]byte("Creative Voice File"))
+ // M3u matches a Playlist file.
+ M3u = prefix([]byte("#EXTM3U"))
+ // AAC matches an Advanced Audio Coding file.
+ AAC = prefix([]byte{0xFF, 0xF1}, []byte{0xFF, 0xF9})
+)
+
+// Mp3 matches an mp3 file.
+func Mp3(raw []byte, limit uint32) bool {
+ if len(raw) < 3 {
+ return false
+ }
+
+ if bytes.HasPrefix(raw, []byte("ID3")) {
+ // MP3s with an ID3v2 tag will start with "ID3"
+ // ID3v1 tags, however appear at the end of the file.
+ return true
+ }
+
+ // Match MP3 files without tags
+ switch binary.BigEndian.Uint16(raw[:2]) & 0xFFFE {
+ case 0xFFFA:
+ // MPEG ADTS, layer III, v1
+ return true
+ case 0xFFF2:
+ // MPEG ADTS, layer III, v2
+ return true
+ case 0xFFE2:
+ // MPEG ADTS, layer III, v2.5
+ return true
+ }
+
+ return false
+}
+
+// Wav matches a Waveform Audio File Format file.
+func Wav(raw []byte, limit uint32) bool {
+ return len(raw) > 12 &&
+ bytes.Equal(raw[:4], []byte("RIFF")) &&
+ bytes.Equal(raw[8:12], []byte{0x57, 0x41, 0x56, 0x45})
+}
+
+// Aiff matches Audio Interchange File Format file.
+func Aiff(raw []byte, limit uint32) bool {
+ return len(raw) > 12 &&
+ bytes.Equal(raw[:4], []byte{0x46, 0x4F, 0x52, 0x4D}) &&
+ bytes.Equal(raw[8:12], []byte{0x41, 0x49, 0x46, 0x46})
+}
+
+// Qcp matches a Qualcomm Pure Voice file.
+func Qcp(raw []byte, limit uint32) bool {
+ return len(raw) > 12 &&
+ bytes.Equal(raw[:4], []byte("RIFF")) &&
+ bytes.Equal(raw[8:12], []byte("QLCM"))
+}
diff --git a/integration/assets/hydrabroker/vendor/github.com/gabriel-vasile/mimetype/internal/magic/binary.go b/integration/assets/hydrabroker/vendor/github.com/gabriel-vasile/mimetype/internal/magic/binary.go
new file mode 100644
index 00000000000..f1e944987c3
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/gabriel-vasile/mimetype/internal/magic/binary.go
@@ -0,0 +1,198 @@
+package magic
+
+import (
+ "bytes"
+ "debug/macho"
+ "encoding/binary"
+)
+
+var (
+ // Lnk matches Microsoft lnk binary format.
+ Lnk = prefix([]byte{0x4C, 0x00, 0x00, 0x00, 0x01, 0x14, 0x02, 0x00})
+ // Wasm matches a web assembly File Format file.
+ Wasm = prefix([]byte{0x00, 0x61, 0x73, 0x6D})
+ // Exe matches a Windows/DOS executable file.
+ Exe = prefix([]byte{0x4D, 0x5A})
+ // Elf matches an Executable and Linkable Format file.
+ Elf = prefix([]byte{0x7F, 0x45, 0x4C, 0x46})
+ // Nes matches a Nintendo Entertainment system ROM file.
+ Nes = prefix([]byte{0x4E, 0x45, 0x53, 0x1A})
+ // SWF matches an Adobe Flash swf file.
+ SWF = prefix([]byte("CWS"), []byte("FWS"), []byte("ZWS"))
+ // Torrent has bencoded text in the beginning.
+ Torrent = prefix([]byte("d8:announce"))
+)
+
+// Java bytecode and Mach-O binaries share the same magic number.
+// More info here https://github.com/threatstack/libmagic/blob/master/magic/Magdir/cafebabe
+func classOrMachOFat(in []byte) bool {
+ // There should be at least 8 bytes for both of them because the only way to
+ // quickly distinguish them is by comparing byte at position 7
+ if len(in) < 8 {
+ return false
+ }
+
+ return bytes.HasPrefix(in, []byte{0xCA, 0xFE, 0xBA, 0xBE})
+}
+
+// Class matches a java class file.
+func Class(raw []byte, limit uint32) bool {
+ return classOrMachOFat(raw) && raw[7] > 30
+}
+
+// MachO matches Mach-O binaries format.
+func MachO(raw []byte, limit uint32) bool {
+ if classOrMachOFat(raw) && raw[7] < 20 {
+ return true
+ }
+
+ if len(raw) < 4 {
+ return false
+ }
+
+ be := binary.BigEndian.Uint32(raw)
+ le := binary.LittleEndian.Uint32(raw)
+
+ return be == macho.Magic32 ||
+ le == macho.Magic32 ||
+ be == macho.Magic64 ||
+ le == macho.Magic64
+}
+
+// Dbf matches a dBase file.
+// https://www.dbase.com/Knowledgebase/INT/db7_file_fmt.htm
+func Dbf(raw []byte, limit uint32) bool {
+ if len(raw) < 68 {
+ return false
+ }
+
+ // 3rd and 4th bytes contain the last update month and day of month.
+ if !(0 < raw[2] && raw[2] < 13 && 0 < raw[3] && raw[3] < 32) {
+ return false
+ }
+
+ // 12, 13, 30, 31 are reserved bytes and always filled with 0x00.
+ if raw[12] != 0x00 || raw[13] != 0x00 || raw[30] != 0x00 || raw[31] != 0x00 {
+ return false
+ }
+ // Production MDX flag;
+ // 0x01 if a production .MDX file exists for this table;
+ // 0x00 if no .MDX file exists.
+ if raw[28] > 0x01 {
+ return false
+ }
+
+ // dbf type is dictated by the first byte.
+ dbfTypes := []byte{
+ 0x02, 0x03, 0x04, 0x05, 0x30, 0x31, 0x32, 0x42, 0x62, 0x7B, 0x82,
+ 0x83, 0x87, 0x8A, 0x8B, 0x8E, 0xB3, 0xCB, 0xE5, 0xF5, 0xF4, 0xFB,
+ }
+ for _, b := range dbfTypes {
+ if raw[0] == b {
+ return true
+ }
+ }
+
+ return false
+}
+
+// ElfObj matches an object file.
+func ElfObj(raw []byte, limit uint32) bool {
+ return len(raw) > 17 && ((raw[16] == 0x01 && raw[17] == 0x00) ||
+ (raw[16] == 0x00 && raw[17] == 0x01))
+}
+
+// ElfExe matches an executable file.
+func ElfExe(raw []byte, limit uint32) bool {
+ return len(raw) > 17 && ((raw[16] == 0x02 && raw[17] == 0x00) ||
+ (raw[16] == 0x00 && raw[17] == 0x02))
+}
+
+// ElfLib matches a shared library file.
+func ElfLib(raw []byte, limit uint32) bool {
+ return len(raw) > 17 && ((raw[16] == 0x03 && raw[17] == 0x00) ||
+ (raw[16] == 0x00 && raw[17] == 0x03))
+}
+
+// ElfDump matches a core dump file.
+func ElfDump(raw []byte, limit uint32) bool {
+ return len(raw) > 17 && ((raw[16] == 0x04 && raw[17] == 0x00) ||
+ (raw[16] == 0x00 && raw[17] == 0x04))
+}
+
+// Dcm matches a DICOM medical format file.
+func Dcm(raw []byte, limit uint32) bool {
+ return len(raw) > 131 &&
+ bytes.Equal(raw[128:132], []byte{0x44, 0x49, 0x43, 0x4D})
+}
+
+// Marc matches a MARC21 (MAchine-Readable Cataloging) file.
+func Marc(raw []byte, limit uint32) bool {
+ // File is at least 24 bytes ("leader" field size).
+ if len(raw) < 24 {
+ return false
+ }
+
+ // Fixed bytes at offset 20.
+ if !bytes.Equal(raw[20:24], []byte("4500")) {
+ return false
+ }
+
+ // First 5 bytes are ASCII digits.
+ for i := 0; i < 5; i++ {
+ if raw[i] < '0' || raw[i] > '9' {
+ return false
+ }
+ }
+
+ // Field terminator is present in first 2048 bytes.
+ return bytes.Contains(raw[:min(2048, len(raw))], []byte{0x1E})
+}
+
+// Glb matches a glTF model format file.
+// GLB is the binary file format representation of 3D models saved in
+// the GL transmission Format (glTF).
+// GLB uses little endian and its header structure is as follows:
+//
+// <-- 12-byte header -->
+// | magic | version | length |
+// | (uint32) | (uint32) | (uint32) |
+// | \x67\x6C\x54\x46 | \x01\x00\x00\x00 | ... |
+// | g l T F | 1 | ... |
+//
+// Visit [glTF specification] and [IANA glTF entry] for more details.
+//
+// [glTF specification]: https://registry.khronos.org/glTF/specs/2.0/glTF-2.0.html
+// [IANA glTF entry]: https://www.iana.org/assignments/media-types/model/gltf-binary
+var Glb = prefix([]byte("\x67\x6C\x54\x46\x02\x00\x00\x00"),
+ []byte("\x67\x6C\x54\x46\x01\x00\x00\x00"))
+
+// TzIf matches a Time Zone Information Format (TZif) file.
+// See more: https://tools.ietf.org/id/draft-murchison-tzdist-tzif-00.html#rfc.section.3
+// Its header structure is shown below:
+// +---------------+---+
+// | magic (4) | <-+-- version (1)
+// +---------------+---+---------------------------------------+
+// | [unused - reserved for future use] (15) |
+// +---------------+---------------+---------------+-----------+
+// | isutccnt (4) | isstdcnt (4) | leapcnt (4) |
+// +---------------+---------------+---------------+
+// | timecnt (4) | typecnt (4) | charcnt (4) |
+func TzIf(raw []byte, limit uint32) bool {
+ // File is at least 44 bytes (header size).
+ if len(raw) < 44 {
+ return false
+ }
+
+ if !bytes.HasPrefix(raw, []byte("TZif")) {
+ return false
+ }
+
+ // Field "typecnt" MUST not be zero.
+ if binary.BigEndian.Uint32(raw[36:40]) == 0 {
+ return false
+ }
+
+ // Version has to be NUL (0x00), '2' (0x32) or '3' (0x33).
+ return raw[4] == 0x00 || raw[4] == 0x32 || raw[4] == 0x33
+}
diff --git a/integration/assets/hydrabroker/vendor/github.com/gabriel-vasile/mimetype/internal/magic/database.go b/integration/assets/hydrabroker/vendor/github.com/gabriel-vasile/mimetype/internal/magic/database.go
new file mode 100644
index 00000000000..cb1fed12f7a
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/gabriel-vasile/mimetype/internal/magic/database.go
@@ -0,0 +1,13 @@
+package magic
+
+var (
+ // Sqlite matches an SQLite database file.
+ Sqlite = prefix([]byte{
+ 0x53, 0x51, 0x4c, 0x69, 0x74, 0x65, 0x20, 0x66,
+ 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x20, 0x33, 0x00,
+ })
+ // MsAccessAce matches Microsoft Access dababase file.
+ MsAccessAce = offset([]byte("Standard ACE DB"), 4)
+ // MsAccessMdb matches legacy Microsoft Access database file (JET, 2003 and earlier).
+ MsAccessMdb = offset([]byte("Standard Jet DB"), 4)
+)
diff --git a/integration/assets/hydrabroker/vendor/github.com/gabriel-vasile/mimetype/internal/magic/document.go b/integration/assets/hydrabroker/vendor/github.com/gabriel-vasile/mimetype/internal/magic/document.go
new file mode 100644
index 00000000000..b3b26d5a12b
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/gabriel-vasile/mimetype/internal/magic/document.go
@@ -0,0 +1,62 @@
+package magic
+
+import "bytes"
+
+var (
+ // Pdf matches a Portable Document Format file.
+ // https://github.com/file/file/blob/11010cc805546a3e35597e67e1129a481aed40e8/magic/Magdir/pdf
+ Pdf = prefix(
+ // usual pdf signature
+ []byte("%PDF-"),
+ // new-line prefixed signature
+ []byte("\012%PDF-"),
+ // UTF-8 BOM prefixed signature
+ []byte("\xef\xbb\xbf%PDF-"),
+ )
+ // Fdf matches a Forms Data Format file.
+ Fdf = prefix([]byte("%FDF"))
+ // Mobi matches a Mobi file.
+ Mobi = offset([]byte("BOOKMOBI"), 60)
+ // Lit matches a Microsoft Lit file.
+ Lit = prefix([]byte("ITOLITLS"))
+)
+
+// DjVu matches a DjVu file.
+func DjVu(raw []byte, limit uint32) bool {
+ if len(raw) < 12 {
+ return false
+ }
+ if !bytes.HasPrefix(raw, []byte{0x41, 0x54, 0x26, 0x54, 0x46, 0x4F, 0x52, 0x4D}) {
+ return false
+ }
+ return bytes.HasPrefix(raw[12:], []byte("DJVM")) ||
+ bytes.HasPrefix(raw[12:], []byte("DJVU")) ||
+ bytes.HasPrefix(raw[12:], []byte("DJVI")) ||
+ bytes.HasPrefix(raw[12:], []byte("THUM"))
+}
+
+// P7s matches an .p7s signature File (PEM, Base64).
+func P7s(raw []byte, limit uint32) bool {
+ // Check for PEM Encoding.
+ if bytes.HasPrefix(raw, []byte("-----BEGIN PKCS7")) {
+ return true
+ }
+ // Check if DER Encoding is long enough.
+ if len(raw) < 20 {
+ return false
+ }
+ // Magic Bytes for the signedData ASN.1 encoding.
+ startHeader := [][]byte{{0x30, 0x80}, {0x30, 0x81}, {0x30, 0x82}, {0x30, 0x83}, {0x30, 0x84}}
+ signedDataMatch := []byte{0x06, 0x09, 0x2A, 0x86, 0x48, 0x86, 0xF7, 0x0D, 0x01, 0x07}
+ // Check if Header is correct. There are multiple valid headers.
+ for i, match := range startHeader {
+ // If first bytes match, then check for ASN.1 Object Type.
+ if bytes.HasPrefix(raw, match) {
+ if bytes.HasPrefix(raw[i+2:], signedDataMatch) {
+ return true
+ }
+ }
+ }
+
+ return false
+}
diff --git a/integration/assets/hydrabroker/vendor/github.com/gabriel-vasile/mimetype/internal/magic/font.go b/integration/assets/hydrabroker/vendor/github.com/gabriel-vasile/mimetype/internal/magic/font.go
new file mode 100644
index 00000000000..43af28212e7
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/gabriel-vasile/mimetype/internal/magic/font.go
@@ -0,0 +1,39 @@
+package magic
+
+import (
+ "bytes"
+)
+
+var (
+ // Woff matches a Web Open Font Format file.
+ Woff = prefix([]byte("wOFF"))
+ // Woff2 matches a Web Open Font Format version 2 file.
+ Woff2 = prefix([]byte("wOF2"))
+ // Otf matches an OpenType font file.
+ Otf = prefix([]byte{0x4F, 0x54, 0x54, 0x4F, 0x00})
+)
+
+// Ttf matches a TrueType font file.
+func Ttf(raw []byte, limit uint32) bool {
+ if !bytes.HasPrefix(raw, []byte{0x00, 0x01, 0x00, 0x00}) {
+ return false
+ }
+ return !MsAccessAce(raw, limit) && !MsAccessMdb(raw, limit)
+}
+
+// Eot matches an Embedded OpenType font file.
+func Eot(raw []byte, limit uint32) bool {
+ return len(raw) > 35 &&
+ bytes.Equal(raw[34:36], []byte{0x4C, 0x50}) &&
+ (bytes.Equal(raw[8:11], []byte{0x02, 0x00, 0x01}) ||
+ bytes.Equal(raw[8:11], []byte{0x01, 0x00, 0x00}) ||
+ bytes.Equal(raw[8:11], []byte{0x02, 0x00, 0x02}))
+}
+
+// Ttc matches a TrueType Collection font file.
+func Ttc(raw []byte, limit uint32) bool {
+ return len(raw) > 7 &&
+ bytes.HasPrefix(raw, []byte("ttcf")) &&
+ (bytes.Equal(raw[4:8], []byte{0x00, 0x01, 0x00, 0x00}) ||
+ bytes.Equal(raw[4:8], []byte{0x00, 0x02, 0x00, 0x00}))
+}
diff --git a/integration/assets/hydrabroker/vendor/github.com/gabriel-vasile/mimetype/internal/magic/ftyp.go b/integration/assets/hydrabroker/vendor/github.com/gabriel-vasile/mimetype/internal/magic/ftyp.go
new file mode 100644
index 00000000000..6575b4aecd6
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/gabriel-vasile/mimetype/internal/magic/ftyp.go
@@ -0,0 +1,88 @@
+package magic
+
+import "bytes"
+
+var (
+ // AVIF matches an AV1 Image File Format still or animated.
+ // Wikipedia page seems outdated listing image/avif-sequence for animations.
+ // https://github.com/AOMediaCodec/av1-avif/issues/59
+ AVIF = ftyp([]byte("avif"), []byte("avis"))
+ // Mp4 matches an MP4 file.
+ Mp4 = ftyp(
+ []byte("avc1"), []byte("dash"), []byte("iso2"), []byte("iso3"),
+ []byte("iso4"), []byte("iso5"), []byte("iso6"), []byte("isom"),
+ []byte("mmp4"), []byte("mp41"), []byte("mp42"), []byte("mp4v"),
+ []byte("mp71"), []byte("MSNV"), []byte("NDAS"), []byte("NDSC"),
+ []byte("NSDC"), []byte("NSDH"), []byte("NDSM"), []byte("NDSP"),
+ []byte("NDSS"), []byte("NDXC"), []byte("NDXH"), []byte("NDXM"),
+ []byte("NDXP"), []byte("NDXS"), []byte("F4V "), []byte("F4P "),
+ )
+ // ThreeGP matches a 3GPP file.
+ ThreeGP = ftyp(
+ []byte("3gp1"), []byte("3gp2"), []byte("3gp3"), []byte("3gp4"),
+ []byte("3gp5"), []byte("3gp6"), []byte("3gp7"), []byte("3gs7"),
+ []byte("3ge6"), []byte("3ge7"), []byte("3gg6"),
+ )
+ // ThreeG2 matches a 3GPP2 file.
+ ThreeG2 = ftyp(
+ []byte("3g24"), []byte("3g25"), []byte("3g26"), []byte("3g2a"),
+ []byte("3g2b"), []byte("3g2c"), []byte("KDDI"),
+ )
+ // AMp4 matches an audio MP4 file.
+ AMp4 = ftyp(
+ // audio for Adobe Flash Player 9+
+ []byte("F4A "), []byte("F4B "),
+ // Apple iTunes AAC-LC (.M4A) Audio
+ []byte("M4B "), []byte("M4P "),
+ // MPEG-4 (.MP4) for SonyPSP
+ []byte("MSNV"),
+ // Nero Digital AAC Audio
+ []byte("NDAS"),
+ )
+ // Mqv matches a Sony / Mobile QuickTime file.
+ Mqv = ftyp([]byte("mqt "))
+ // M4a matches an audio M4A file.
+ M4a = ftyp([]byte("M4A "))
+ // M4v matches an Appl4 M4V video file.
+ M4v = ftyp([]byte("M4V "), []byte("M4VH"), []byte("M4VP"))
+ // Heic matches a High Efficiency Image Coding (HEIC) file.
+ Heic = ftyp([]byte("heic"), []byte("heix"))
+ // HeicSequence matches a High Efficiency Image Coding (HEIC) file sequence.
+ HeicSequence = ftyp([]byte("hevc"), []byte("hevx"))
+ // Heif matches a High Efficiency Image File Format (HEIF) file.
+ Heif = ftyp([]byte("mif1"), []byte("heim"), []byte("heis"), []byte("avic"))
+ // HeifSequence matches a High Efficiency Image File Format (HEIF) file sequence.
+ HeifSequence = ftyp([]byte("msf1"), []byte("hevm"), []byte("hevs"), []byte("avcs"))
+ // TODO: add support for remaining video formats at ftyps.com.
+)
+
+// QuickTime matches a QuickTime File Format file.
+// https://www.loc.gov/preservation/digital/formats/fdd/fdd000052.shtml
+// https://developer.apple.com/library/archive/documentation/QuickTime/QTFF/QTFFChap1/qtff1.html#//apple_ref/doc/uid/TP40000939-CH203-38190
+// https://github.com/apache/tika/blob/0f5570691133c75ac4472c3340354a6c4080b104/tika-core/src/main/resources/org/apache/tika/mime/tika-mimetypes.xml#L7758-L7777
+func QuickTime(raw []byte, _ uint32) bool {
+ if len(raw) < 12 {
+ return false
+ }
+ // First 4 bytes represent the size of the atom as unsigned int.
+ // Next 4 bytes are the type of the atom.
+ // For `ftyp` atoms check if first byte in size is 0, otherwise, a text file
+ // which happens to contain 'ftypqt ' at index 4 will trigger a false positive.
+ if bytes.Equal(raw[4:12], []byte("ftypqt ")) ||
+ bytes.Equal(raw[4:12], []byte("ftypmoov")) {
+ return raw[0] == 0x00
+ }
+ basicAtomTypes := [][]byte{
+ []byte("moov\x00"),
+ []byte("mdat\x00"),
+ []byte("free\x00"),
+ []byte("skip\x00"),
+ []byte("pnot\x00"),
+ }
+ for _, a := range basicAtomTypes {
+ if bytes.Equal(raw[4:9], a) {
+ return true
+ }
+ }
+ return bytes.Equal(raw[:8], []byte("\x00\x00\x00\x08wide"))
+}
diff --git a/integration/assets/hydrabroker/vendor/github.com/gabriel-vasile/mimetype/internal/magic/geo.go b/integration/assets/hydrabroker/vendor/github.com/gabriel-vasile/mimetype/internal/magic/geo.go
new file mode 100644
index 00000000000..f077e16724d
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/gabriel-vasile/mimetype/internal/magic/geo.go
@@ -0,0 +1,55 @@
+package magic
+
+import (
+ "bytes"
+ "encoding/binary"
+)
+
+// Shp matches a shape format file.
+// https://www.esri.com/library/whitepapers/pdfs/shapefile.pdf
+func Shp(raw []byte, limit uint32) bool {
+ if len(raw) < 112 {
+ return false
+ }
+
+ if !(binary.BigEndian.Uint32(raw[0:4]) == 9994 &&
+ binary.BigEndian.Uint32(raw[4:8]) == 0 &&
+ binary.BigEndian.Uint32(raw[8:12]) == 0 &&
+ binary.BigEndian.Uint32(raw[12:16]) == 0 &&
+ binary.BigEndian.Uint32(raw[16:20]) == 0 &&
+ binary.BigEndian.Uint32(raw[20:24]) == 0 &&
+ binary.LittleEndian.Uint32(raw[28:32]) == 1000) {
+ return false
+ }
+
+ shapeTypes := []int{
+ 0, // Null shape
+ 1, // Point
+ 3, // Polyline
+ 5, // Polygon
+ 8, // MultiPoint
+ 11, // PointZ
+ 13, // PolylineZ
+ 15, // PolygonZ
+ 18, // MultiPointZ
+ 21, // PointM
+ 23, // PolylineM
+ 25, // PolygonM
+ 28, // MultiPointM
+ 31, // MultiPatch
+ }
+
+ for _, st := range shapeTypes {
+ if st == int(binary.LittleEndian.Uint32(raw[108:112])) {
+ return true
+ }
+ }
+
+ return false
+}
+
+// Shx matches a shape index format file.
+// https://www.esri.com/library/whitepapers/pdfs/shapefile.pdf
+func Shx(raw []byte, limit uint32) bool {
+ return bytes.HasPrefix(raw, []byte{0x00, 0x00, 0x27, 0x0A})
+}
diff --git a/integration/assets/hydrabroker/vendor/github.com/gabriel-vasile/mimetype/internal/magic/image.go b/integration/assets/hydrabroker/vendor/github.com/gabriel-vasile/mimetype/internal/magic/image.go
new file mode 100644
index 00000000000..0eb7e95f375
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/gabriel-vasile/mimetype/internal/magic/image.go
@@ -0,0 +1,110 @@
+package magic
+
+import "bytes"
+
+var (
+ // Png matches a Portable Network Graphics file.
+ // https://www.w3.org/TR/PNG/
+ Png = prefix([]byte{0x89, 0x50, 0x4E, 0x47, 0x0D, 0x0A, 0x1A, 0x0A})
+ // Apng matches an Animated Portable Network Graphics file.
+ // https://wiki.mozilla.org/APNG_Specification
+ Apng = offset([]byte("acTL"), 37)
+ // Jpg matches a Joint Photographic Experts Group file.
+ Jpg = prefix([]byte{0xFF, 0xD8, 0xFF})
+ // Jp2 matches a JPEG 2000 Image file (ISO 15444-1).
+ Jp2 = jpeg2k([]byte{0x6a, 0x70, 0x32, 0x20})
+ // Jpx matches a JPEG 2000 Image file (ISO 15444-2).
+ Jpx = jpeg2k([]byte{0x6a, 0x70, 0x78, 0x20})
+ // Jpm matches a JPEG 2000 Image file (ISO 15444-6).
+ Jpm = jpeg2k([]byte{0x6a, 0x70, 0x6D, 0x20})
+ // Gif matches a Graphics Interchange Format file.
+ Gif = prefix([]byte("GIF87a"), []byte("GIF89a"))
+ // Bmp matches a bitmap image file.
+ Bmp = prefix([]byte{0x42, 0x4D})
+ // Ps matches a PostScript file.
+ Ps = prefix([]byte("%!PS-Adobe-"))
+ // Psd matches a Photoshop Document file.
+ Psd = prefix([]byte("8BPS"))
+ // Ico matches an ICO file.
+ Ico = prefix([]byte{0x00, 0x00, 0x01, 0x00}, []byte{0x00, 0x00, 0x02, 0x00})
+ // Icns matches an ICNS (Apple Icon Image format) file.
+ Icns = prefix([]byte("icns"))
+ // Tiff matches a Tagged Image File Format file.
+ Tiff = prefix([]byte{0x49, 0x49, 0x2A, 0x00}, []byte{0x4D, 0x4D, 0x00, 0x2A})
+ // Bpg matches a Better Portable Graphics file.
+ Bpg = prefix([]byte{0x42, 0x50, 0x47, 0xFB})
+ // Xcf matches GIMP image data.
+ Xcf = prefix([]byte("gimp xcf"))
+ // Pat matches GIMP pattern data.
+ Pat = offset([]byte("GPAT"), 20)
+ // Gbr matches GIMP brush data.
+ Gbr = offset([]byte("GIMP"), 20)
+ // Hdr matches Radiance HDR image.
+ // https://web.archive.org/web/20060913152809/http://local.wasp.uwa.edu.au/~pbourke/dataformats/pic/
+ Hdr = prefix([]byte("#?RADIANCE\n"))
+ // Xpm matches X PixMap image data.
+ Xpm = prefix([]byte{0x2F, 0x2A, 0x20, 0x58, 0x50, 0x4D, 0x20, 0x2A, 0x2F})
+ // Jxs matches a JPEG XS coded image file (ISO/IEC 21122-3).
+ Jxs = prefix([]byte{0x00, 0x00, 0x00, 0x0C, 0x4A, 0x58, 0x53, 0x20, 0x0D, 0x0A, 0x87, 0x0A})
+ // Jxr matches Microsoft HD JXR photo file.
+ Jxr = prefix([]byte{0x49, 0x49, 0xBC, 0x01})
+)
+
+func jpeg2k(sig []byte) Detector {
+ return func(raw []byte, _ uint32) bool {
+ if len(raw) < 24 {
+ return false
+ }
+
+ if !bytes.Equal(raw[4:8], []byte{0x6A, 0x50, 0x20, 0x20}) &&
+ !bytes.Equal(raw[4:8], []byte{0x6A, 0x50, 0x32, 0x20}) {
+ return false
+ }
+ return bytes.Equal(raw[20:24], sig)
+ }
+}
+
+// Webp matches a WebP file.
+func Webp(raw []byte, _ uint32) bool {
+ return len(raw) > 12 &&
+ bytes.Equal(raw[0:4], []byte("RIFF")) &&
+ bytes.Equal(raw[8:12], []byte{0x57, 0x45, 0x42, 0x50})
+}
+
+// Dwg matches a CAD drawing file.
+func Dwg(raw []byte, _ uint32) bool {
+ if len(raw) < 6 || raw[0] != 0x41 || raw[1] != 0x43 {
+ return false
+ }
+ dwgVersions := [][]byte{
+ {0x31, 0x2E, 0x34, 0x30},
+ {0x31, 0x2E, 0x35, 0x30},
+ {0x32, 0x2E, 0x31, 0x30},
+ {0x31, 0x30, 0x30, 0x32},
+ {0x31, 0x30, 0x30, 0x33},
+ {0x31, 0x30, 0x30, 0x34},
+ {0x31, 0x30, 0x30, 0x36},
+ {0x31, 0x30, 0x30, 0x39},
+ {0x31, 0x30, 0x31, 0x32},
+ {0x31, 0x30, 0x31, 0x34},
+ {0x31, 0x30, 0x31, 0x35},
+ {0x31, 0x30, 0x31, 0x38},
+ {0x31, 0x30, 0x32, 0x31},
+ {0x31, 0x30, 0x32, 0x34},
+ {0x31, 0x30, 0x33, 0x32},
+ }
+
+ for _, d := range dwgVersions {
+ if bytes.Equal(raw[2:6], d) {
+ return true
+ }
+ }
+
+ return false
+}
+
+// Jxl matches JPEG XL image file.
+func Jxl(raw []byte, _ uint32) bool {
+ return bytes.HasPrefix(raw, []byte{0xFF, 0x0A}) ||
+ bytes.HasPrefix(raw, []byte("\x00\x00\x00\x0cJXL\x20\x0d\x0a\x87\x0a"))
+}
diff --git a/integration/assets/hydrabroker/vendor/github.com/gabriel-vasile/mimetype/internal/magic/magic.go b/integration/assets/hydrabroker/vendor/github.com/gabriel-vasile/mimetype/internal/magic/magic.go
new file mode 100644
index 00000000000..34b84f401bd
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/gabriel-vasile/mimetype/internal/magic/magic.go
@@ -0,0 +1,241 @@
+// Package magic holds the matching functions used to find MIME types.
+package magic
+
+import (
+ "bytes"
+ "fmt"
+)
+
+type (
+ // Detector receiveѕ the raw data of a file and returns whether the data
+ // meets any conditions. The limit parameter is an upper limit to the number
+ // of bytes received and is used to tell if the byte slice represents the
+ // whole file or is just the header of a file: len(raw) < limit or len(raw)>limit.
+ Detector func(raw []byte, limit uint32) bool
+ xmlSig struct {
+ // the local name of the root tag
+ localName []byte
+ // the namespace of the XML document
+ xmlns []byte
+ }
+)
+
+// prefix creates a Detector which returns true if any of the provided signatures
+// is the prefix of the raw input.
+func prefix(sigs ...[]byte) Detector {
+ return func(raw []byte, limit uint32) bool {
+ for _, s := range sigs {
+ if bytes.HasPrefix(raw, s) {
+ return true
+ }
+ }
+ return false
+ }
+}
+
+// offset creates a Detector which returns true if the provided signature can be
+// found at offset in the raw input.
+func offset(sig []byte, offset int) Detector {
+ return func(raw []byte, limit uint32) bool {
+ return len(raw) > offset && bytes.HasPrefix(raw[offset:], sig)
+ }
+}
+
+// ciPrefix is like prefix but the check is case insensitive.
+func ciPrefix(sigs ...[]byte) Detector {
+ return func(raw []byte, limit uint32) bool {
+ for _, s := range sigs {
+ if ciCheck(s, raw) {
+ return true
+ }
+ }
+ return false
+ }
+}
+func ciCheck(sig, raw []byte) bool {
+ if len(raw) < len(sig)+1 {
+ return false
+ }
+ // perform case insensitive check
+ for i, b := range sig {
+ db := raw[i]
+ if 'A' <= b && b <= 'Z' {
+ db &= 0xDF
+ }
+ if b != db {
+ return false
+ }
+ }
+
+ return true
+}
+
+// xml creates a Detector which returns true if any of the provided XML signatures
+// matches the raw input.
+func xml(sigs ...xmlSig) Detector {
+ return func(raw []byte, limit uint32) bool {
+ raw = trimLWS(raw)
+ if len(raw) == 0 {
+ return false
+ }
+ for _, s := range sigs {
+ if xmlCheck(s, raw) {
+ return true
+ }
+ }
+ return false
+ }
+}
+func xmlCheck(sig xmlSig, raw []byte) bool {
+ raw = raw[:min(len(raw), 512)]
+
+ if len(sig.localName) == 0 {
+ return bytes.Index(raw, sig.xmlns) > 0
+ }
+ if len(sig.xmlns) == 0 {
+ return bytes.Index(raw, sig.localName) > 0
+ }
+
+ localNameIndex := bytes.Index(raw, sig.localName)
+ return localNameIndex != -1 && localNameIndex < bytes.Index(raw, sig.xmlns)
+}
+
+// markup creates a Detector which returns true is any of the HTML signatures
+// matches the raw input.
+func markup(sigs ...[]byte) Detector {
+ return func(raw []byte, limit uint32) bool {
+ if bytes.HasPrefix(raw, []byte{0xEF, 0xBB, 0xBF}) {
+ // We skip the UTF-8 BOM if present to ensure we correctly
+ // process any leading whitespace. The presence of the BOM
+ // is taken into account during charset detection in charset.go.
+ raw = trimLWS(raw[3:])
+ } else {
+ raw = trimLWS(raw)
+ }
+ if len(raw) == 0 {
+ return false
+ }
+ for _, s := range sigs {
+ if markupCheck(s, raw) {
+ return true
+ }
+ }
+ return false
+ }
+}
+func markupCheck(sig, raw []byte) bool {
+ if len(raw) < len(sig)+1 {
+ return false
+ }
+
+ // perform case insensitive check
+ for i, b := range sig {
+ db := raw[i]
+ if 'A' <= b && b <= 'Z' {
+ db &= 0xDF
+ }
+ if b != db {
+ return false
+ }
+ }
+ // Next byte must be space or right angle bracket.
+ if db := raw[len(sig)]; db != ' ' && db != '>' {
+ return false
+ }
+
+ return true
+}
+
+// ftyp creates a Detector which returns true if any of the FTYP signatures
+// matches the raw input.
+func ftyp(sigs ...[]byte) Detector {
+ return func(raw []byte, limit uint32) bool {
+ if len(raw) < 12 {
+ return false
+ }
+ for _, s := range sigs {
+ if bytes.Equal(raw[4:12], append([]byte("ftyp"), s...)) {
+ return true
+ }
+ }
+ return false
+ }
+}
+
+func newXMLSig(localName, xmlns string) xmlSig {
+ ret := xmlSig{xmlns: []byte(xmlns)}
+ if localName != "" {
+ ret.localName = []byte(fmt.Sprintf("<%s", localName))
+ }
+
+ return ret
+}
+
+// A valid shebang starts with the "#!" characters,
+// followed by any number of spaces,
+// followed by the path to the interpreter,
+// and, optionally, followed by the arguments for the interpreter.
+//
+// Ex:
+//
+// #! /usr/bin/env php
+//
+// /usr/bin/env is the interpreter, php is the first and only argument.
+func shebang(sigs ...[]byte) Detector {
+ return func(raw []byte, limit uint32) bool {
+ for _, s := range sigs {
+ if shebangCheck(s, firstLine(raw)) {
+ return true
+ }
+ }
+ return false
+ }
+}
+
+func shebangCheck(sig, raw []byte) bool {
+ if len(raw) < len(sig)+2 {
+ return false
+ }
+ if raw[0] != '#' || raw[1] != '!' {
+ return false
+ }
+
+ return bytes.Equal(trimLWS(trimRWS(raw[2:])), sig)
+}
+
+// trimLWS trims whitespace from beginning of the input.
+func trimLWS(in []byte) []byte {
+ firstNonWS := 0
+ for ; firstNonWS < len(in) && isWS(in[firstNonWS]); firstNonWS++ {
+ }
+
+ return in[firstNonWS:]
+}
+
+// trimRWS trims whitespace from the end of the input.
+func trimRWS(in []byte) []byte {
+ lastNonWS := len(in) - 1
+ for ; lastNonWS > 0 && isWS(in[lastNonWS]); lastNonWS-- {
+ }
+
+ return in[:lastNonWS+1]
+}
+
+func firstLine(in []byte) []byte {
+ lineEnd := 0
+ for ; lineEnd < len(in) && in[lineEnd] != '\n'; lineEnd++ {
+ }
+
+ return in[:lineEnd]
+}
+
+func isWS(b byte) bool {
+ return b == '\t' || b == '\n' || b == '\x0c' || b == '\r' || b == ' '
+}
+
+func min(a, b int) int {
+ if a < b {
+ return a
+ }
+ return b
+}
diff --git a/integration/assets/hydrabroker/vendor/github.com/gabriel-vasile/mimetype/internal/magic/ms_office.go b/integration/assets/hydrabroker/vendor/github.com/gabriel-vasile/mimetype/internal/magic/ms_office.go
new file mode 100644
index 00000000000..5964ce596c1
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/gabriel-vasile/mimetype/internal/magic/ms_office.go
@@ -0,0 +1,225 @@
+package magic
+
+import (
+ "bytes"
+ "encoding/binary"
+)
+
+var (
+ xlsxSigFiles = []string{
+ "xl/worksheets/",
+ "xl/drawings/",
+ "xl/theme/",
+ "xl/_rels/",
+ "xl/styles.xml",
+ "xl/workbook.xml",
+ "xl/sharedStrings.xml",
+ }
+ docxSigFiles = []string{
+ "word/media/",
+ "word/_rels/document.xml.rels",
+ "word/document.xml",
+ "word/styles.xml",
+ "word/fontTable.xml",
+ "word/settings.xml",
+ "word/numbering.xml",
+ "word/header",
+ "word/footer",
+ }
+ pptxSigFiles = []string{
+ "ppt/slides/",
+ "ppt/media/",
+ "ppt/slideLayouts/",
+ "ppt/theme/",
+ "ppt/slideMasters/",
+ "ppt/tags/",
+ "ppt/notesMasters/",
+ "ppt/_rels/",
+ "ppt/handoutMasters/",
+ "ppt/notesSlides/",
+ "ppt/presentation.xml",
+ "ppt/tableStyles.xml",
+ "ppt/presProps.xml",
+ "ppt/viewProps.xml",
+ }
+)
+
+// Xlsx matches a Microsoft Excel 2007 file.
+func Xlsx(raw []byte, limit uint32) bool {
+ return zipContains(raw, xlsxSigFiles...)
+}
+
+// Docx matches a Microsoft Word 2007 file.
+func Docx(raw []byte, limit uint32) bool {
+ return zipContains(raw, docxSigFiles...)
+}
+
+// Pptx matches a Microsoft PowerPoint 2007 file.
+func Pptx(raw []byte, limit uint32) bool {
+ return zipContains(raw, pptxSigFiles...)
+}
+
+// Ole matches an Open Linking and Embedding file.
+//
+// https://en.wikipedia.org/wiki/Object_Linking_and_Embedding
+func Ole(raw []byte, limit uint32) bool {
+ return bytes.HasPrefix(raw, []byte{0xD0, 0xCF, 0x11, 0xE0, 0xA1, 0xB1, 0x1A, 0xE1})
+}
+
+// Aaf matches an Advanced Authoring Format file.
+// See: https://pyaaf.readthedocs.io/en/latest/about.html
+// See: https://en.wikipedia.org/wiki/Advanced_Authoring_Format
+func Aaf(raw []byte, limit uint32) bool {
+ if len(raw) < 31 {
+ return false
+ }
+ return bytes.HasPrefix(raw[8:], []byte{0x41, 0x41, 0x46, 0x42, 0x0D, 0x00, 0x4F, 0x4D}) &&
+ (raw[30] == 0x09 || raw[30] == 0x0C)
+}
+
+// Doc matches a Microsoft Word 97-2003 file.
+// See: https://github.com/decalage2/oletools/blob/412ee36ae45e70f42123e835871bac956d958461/oletools/common/clsid.py
+func Doc(raw []byte, _ uint32) bool {
+ clsids := [][]byte{
+ // Microsoft Word 97-2003 Document (Word.Document.8)
+ {0x06, 0x09, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x46},
+ // Microsoft Word 6.0-7.0 Document (Word.Document.6)
+ {0x00, 0x09, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x46},
+ // Microsoft Word Picture (Word.Picture.8)
+ {0x07, 0x09, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x46},
+ }
+
+ for _, clsid := range clsids {
+ if matchOleClsid(raw, clsid) {
+ return true
+ }
+ }
+
+ return false
+}
+
+// Ppt matches a Microsoft PowerPoint 97-2003 file or a PowerPoint 95 presentation.
+func Ppt(raw []byte, limit uint32) bool {
+ // Root CLSID test is the safest way to detect identify OLE, however, the format
+ // often places the root CLSID at the end of the file.
+ if matchOleClsid(raw, []byte{
+ 0x10, 0x8d, 0x81, 0x64, 0x9b, 0x4f, 0xcf, 0x11,
+ 0x86, 0xea, 0x00, 0xaa, 0x00, 0xb9, 0x29, 0xe8,
+ }) || matchOleClsid(raw, []byte{
+ 0x70, 0xae, 0x7b, 0xea, 0x3b, 0xfb, 0xcd, 0x11,
+ 0xa9, 0x03, 0x00, 0xaa, 0x00, 0x51, 0x0e, 0xa3,
+ }) {
+ return true
+ }
+
+ lin := len(raw)
+ if lin < 520 {
+ return false
+ }
+ pptSubHeaders := [][]byte{
+ {0xA0, 0x46, 0x1D, 0xF0},
+ {0x00, 0x6E, 0x1E, 0xF0},
+ {0x0F, 0x00, 0xE8, 0x03},
+ }
+ for _, h := range pptSubHeaders {
+ if bytes.HasPrefix(raw[512:], h) {
+ return true
+ }
+ }
+
+ if bytes.HasPrefix(raw[512:], []byte{0xFD, 0xFF, 0xFF, 0xFF}) &&
+ raw[518] == 0x00 && raw[519] == 0x00 {
+ return true
+ }
+
+ return lin > 1152 && bytes.Contains(raw[1152:min(4096, lin)],
+ []byte("P\x00o\x00w\x00e\x00r\x00P\x00o\x00i\x00n\x00t\x00 D\x00o\x00c\x00u\x00m\x00e\x00n\x00t"))
+}
+
+// Xls matches a Microsoft Excel 97-2003 file.
+func Xls(raw []byte, limit uint32) bool {
+ // Root CLSID test is the safest way to detect identify OLE, however, the format
+ // often places the root CLSID at the end of the file.
+ if matchOleClsid(raw, []byte{
+ 0x10, 0x08, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00,
+ }) || matchOleClsid(raw, []byte{
+ 0x20, 0x08, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00,
+ }) {
+ return true
+ }
+
+ lin := len(raw)
+ if lin < 520 {
+ return false
+ }
+ xlsSubHeaders := [][]byte{
+ {0x09, 0x08, 0x10, 0x00, 0x00, 0x06, 0x05, 0x00},
+ {0xFD, 0xFF, 0xFF, 0xFF, 0x10},
+ {0xFD, 0xFF, 0xFF, 0xFF, 0x1F},
+ {0xFD, 0xFF, 0xFF, 0xFF, 0x22},
+ {0xFD, 0xFF, 0xFF, 0xFF, 0x23},
+ {0xFD, 0xFF, 0xFF, 0xFF, 0x28},
+ {0xFD, 0xFF, 0xFF, 0xFF, 0x29},
+ }
+ for _, h := range xlsSubHeaders {
+ if bytes.HasPrefix(raw[512:], h) {
+ return true
+ }
+ }
+
+ return lin > 1152 && bytes.Contains(raw[1152:min(4096, lin)],
+ []byte("W\x00k\x00s\x00S\x00S\x00W\x00o\x00r\x00k\x00B\x00o\x00o\x00k"))
+}
+
+// Pub matches a Microsoft Publisher file.
+func Pub(raw []byte, limit uint32) bool {
+ return matchOleClsid(raw, []byte{
+ 0x01, 0x12, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0xC0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x46,
+ })
+}
+
+// Msg matches a Microsoft Outlook email file.
+func Msg(raw []byte, limit uint32) bool {
+ return matchOleClsid(raw, []byte{
+ 0x0B, 0x0D, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xC0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x46,
+ })
+}
+
+// Msi matches a Microsoft Windows Installer file.
+// http://fileformats.archiveteam.org/wiki/Microsoft_Compound_File
+func Msi(raw []byte, limit uint32) bool {
+ return matchOleClsid(raw, []byte{
+ 0x84, 0x10, 0x0C, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xC0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x46,
+ })
+}
+
+// Helper to match by a specific CLSID of a compound file.
+//
+// http://fileformats.archiveteam.org/wiki/Microsoft_Compound_File
+func matchOleClsid(in []byte, clsid []byte) bool {
+ // Microsoft Compound files v3 have a sector length of 512, while v4 has 4096.
+ // Change sector offset depending on file version.
+ // https://www.loc.gov/preservation/digital/formats/fdd/fdd000392.shtml
+ sectorLength := 512
+ if len(in) < sectorLength {
+ return false
+ }
+ if in[26] == 0x04 && in[27] == 0x00 {
+ sectorLength = 4096
+ }
+
+ // SecID of first sector of the directory stream.
+ firstSecID := int(binary.LittleEndian.Uint32(in[48:52]))
+
+ // Expected offset of CLSID for root storage object.
+ clsidOffset := sectorLength*(1+firstSecID) + 80
+
+ if len(in) <= clsidOffset+16 {
+ return false
+ }
+
+ return bytes.HasPrefix(in[clsidOffset:], clsid)
+}
diff --git a/integration/assets/hydrabroker/vendor/github.com/gabriel-vasile/mimetype/internal/magic/ogg.go b/integration/assets/hydrabroker/vendor/github.com/gabriel-vasile/mimetype/internal/magic/ogg.go
new file mode 100644
index 00000000000..bb4cd781b6e
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/gabriel-vasile/mimetype/internal/magic/ogg.go
@@ -0,0 +1,42 @@
+package magic
+
+import (
+ "bytes"
+)
+
+/*
+ NOTE:
+
+ In May 2003, two Internet RFCs were published relating to the format.
+ The Ogg bitstream was defined in RFC 3533 (which is classified as
+ 'informative') and its Internet content type (application/ogg) in RFC
+ 3534 (which is, as of 2006, a proposed standard protocol). In
+ September 2008, RFC 3534 was obsoleted by RFC 5334, which added
+ content types video/ogg, audio/ogg and filename extensions .ogx, .ogv,
+ .oga, .spx.
+
+ See:
+ https://tools.ietf.org/html/rfc3533
+ https://developer.mozilla.org/en-US/docs/Web/HTTP/Configuring_servers_for_Ogg_media#Serve_media_with_the_correct_MIME_type
+ https://github.com/file/file/blob/master/magic/Magdir/vorbis
+*/
+
+// Ogg matches an Ogg file.
+func Ogg(raw []byte, limit uint32) bool {
+ return bytes.HasPrefix(raw, []byte("\x4F\x67\x67\x53\x00"))
+}
+
+// OggAudio matches an audio ogg file.
+func OggAudio(raw []byte, limit uint32) bool {
+ return len(raw) >= 37 && (bytes.HasPrefix(raw[28:], []byte("\x7fFLAC")) ||
+ bytes.HasPrefix(raw[28:], []byte("\x01vorbis")) ||
+ bytes.HasPrefix(raw[28:], []byte("OpusHead")) ||
+ bytes.HasPrefix(raw[28:], []byte("Speex\x20\x20\x20")))
+}
+
+// OggVideo matches a video ogg file.
+func OggVideo(raw []byte, limit uint32) bool {
+ return len(raw) >= 37 && (bytes.HasPrefix(raw[28:], []byte("\x80theora")) ||
+ bytes.HasPrefix(raw[28:], []byte("fishead\x00")) ||
+ bytes.HasPrefix(raw[28:], []byte("\x01video\x00\x00\x00"))) // OGM video
+}
diff --git a/integration/assets/hydrabroker/vendor/github.com/gabriel-vasile/mimetype/internal/magic/text.go b/integration/assets/hydrabroker/vendor/github.com/gabriel-vasile/mimetype/internal/magic/text.go
new file mode 100644
index 00000000000..e2a03caf50a
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/gabriel-vasile/mimetype/internal/magic/text.go
@@ -0,0 +1,375 @@
+package magic
+
+import (
+ "bufio"
+ "bytes"
+ "strings"
+ "time"
+
+ "github.com/gabriel-vasile/mimetype/internal/charset"
+ "github.com/gabriel-vasile/mimetype/internal/json"
+)
+
+var (
+ // HTML matches a Hypertext Markup Language file.
+ HTML = markup(
+ []byte(" 0
+}
+
+// GeoJSON matches a RFC 7946 GeoJSON file.
+//
+// GeoJSON detection implies searching for key:value pairs like: `"type": "Feature"`
+// in the input.
+// BUG(gabriel-vasile): The "type" key should be searched for in the root object.
+func GeoJSON(raw []byte, limit uint32) bool {
+ raw = trimLWS(raw)
+ if len(raw) == 0 {
+ return false
+ }
+ // GeoJSON is always a JSON object, not a JSON array or any other JSON value.
+ if raw[0] != '{' {
+ return false
+ }
+
+ s := []byte(`"type"`)
+ si, sl := bytes.Index(raw, s), len(s)
+
+ if si == -1 {
+ return false
+ }
+
+ // If the "type" string is the suffix of the input,
+ // there is no need to search for the value of the key.
+ if si+sl == len(raw) {
+ return false
+ }
+ // Skip the "type" part.
+ raw = raw[si+sl:]
+ // Skip any whitespace before the colon.
+ raw = trimLWS(raw)
+ // Check for colon.
+ if len(raw) == 0 || raw[0] != ':' {
+ return false
+ }
+ // Skip any whitespace after the colon.
+ raw = trimLWS(raw[1:])
+
+ geoJSONTypes := [][]byte{
+ []byte(`"Feature"`),
+ []byte(`"FeatureCollection"`),
+ []byte(`"Point"`),
+ []byte(`"LineString"`),
+ []byte(`"Polygon"`),
+ []byte(`"MultiPoint"`),
+ []byte(`"MultiLineString"`),
+ []byte(`"MultiPolygon"`),
+ []byte(`"GeometryCollection"`),
+ }
+ for _, t := range geoJSONTypes {
+ if bytes.HasPrefix(raw, t) {
+ return true
+ }
+ }
+
+ return false
+}
+
+// NdJSON matches a Newline delimited JSON file. All complete lines from raw
+// must be valid JSON documents meaning they contain one of the valid JSON data
+// types.
+func NdJSON(raw []byte, limit uint32) bool {
+ lCount, hasObjOrArr := 0, false
+ sc := bufio.NewScanner(dropLastLine(raw, limit))
+ for sc.Scan() {
+ l := sc.Bytes()
+ // Empty lines are allowed in NDJSON.
+ if l = trimRWS(trimLWS(l)); len(l) == 0 {
+ continue
+ }
+ _, err := json.Scan(l)
+ if err != nil {
+ return false
+ }
+ if l[0] == '[' || l[0] == '{' {
+ hasObjOrArr = true
+ }
+ lCount++
+ }
+
+ return lCount > 1 && hasObjOrArr
+}
+
+// HAR matches a HAR Spec file.
+// Spec: http://www.softwareishard.com/blog/har-12-spec/
+func HAR(raw []byte, limit uint32) bool {
+ s := []byte(`"log"`)
+ si, sl := bytes.Index(raw, s), len(s)
+
+ if si == -1 {
+ return false
+ }
+
+ // If the "log" string is the suffix of the input,
+ // there is no need to search for the value of the key.
+ if si+sl == len(raw) {
+ return false
+ }
+ // Skip the "log" part.
+ raw = raw[si+sl:]
+ // Skip any whitespace before the colon.
+ raw = trimLWS(raw)
+ // Check for colon.
+ if len(raw) == 0 || raw[0] != ':' {
+ return false
+ }
+ // Skip any whitespace after the colon.
+ raw = trimLWS(raw[1:])
+
+ harJSONTypes := [][]byte{
+ []byte(`"version"`),
+ []byte(`"creator"`),
+ []byte(`"entries"`),
+ }
+ for _, t := range harJSONTypes {
+ si := bytes.Index(raw, t)
+ if si > -1 {
+ return true
+ }
+ }
+
+ return false
+}
+
+// Svg matches a SVG file.
+func Svg(raw []byte, limit uint32) bool {
+ return bytes.Contains(raw, []byte(" 00:02:19,376) limits secondLine
+ // length to exactly 29 characters.
+ if len(secondLine) != 29 {
+ return false
+ }
+ // Decimal separator of fractional seconds in the timestamps must be a
+ // comma, not a period.
+ if strings.Contains(secondLine, ".") {
+ return false
+ }
+ // For Go <1.17, comma is not recognised as a decimal separator by `time.Parse`.
+ secondLine = strings.ReplaceAll(secondLine, ",", ".")
+ // Second line must be a time range.
+ ts := strings.Split(secondLine, " --> ")
+ if len(ts) != 2 {
+ return false
+ }
+ const layout = "15:04:05.000"
+ t0, err := time.Parse(layout, ts[0])
+ if err != nil {
+ return false
+ }
+ t1, err := time.Parse(layout, ts[1])
+ if err != nil {
+ return false
+ }
+ if t0.After(t1) {
+ return false
+ }
+
+ // A third line must exist and not be empty. This is the actual subtitle text.
+ return s.Scan() && len(s.Bytes()) != 0
+}
+
+// Vtt matches a Web Video Text Tracks (WebVTT) file. See
+// https://www.iana.org/assignments/media-types/text/vtt.
+func Vtt(raw []byte, limit uint32) bool {
+ // Prefix match.
+ prefixes := [][]byte{
+ {0xEF, 0xBB, 0xBF, 0x57, 0x45, 0x42, 0x56, 0x54, 0x54, 0x0A}, // UTF-8 BOM, "WEBVTT" and a line feed
+ {0xEF, 0xBB, 0xBF, 0x57, 0x45, 0x42, 0x56, 0x54, 0x54, 0x0D}, // UTF-8 BOM, "WEBVTT" and a carriage return
+ {0xEF, 0xBB, 0xBF, 0x57, 0x45, 0x42, 0x56, 0x54, 0x54, 0x20}, // UTF-8 BOM, "WEBVTT" and a space
+ {0xEF, 0xBB, 0xBF, 0x57, 0x45, 0x42, 0x56, 0x54, 0x54, 0x09}, // UTF-8 BOM, "WEBVTT" and a horizontal tab
+ {0x57, 0x45, 0x42, 0x56, 0x54, 0x54, 0x0A}, // "WEBVTT" and a line feed
+ {0x57, 0x45, 0x42, 0x56, 0x54, 0x54, 0x0D}, // "WEBVTT" and a carriage return
+ {0x57, 0x45, 0x42, 0x56, 0x54, 0x54, 0x20}, // "WEBVTT" and a space
+ {0x57, 0x45, 0x42, 0x56, 0x54, 0x54, 0x09}, // "WEBVTT" and a horizontal tab
+ }
+ for _, p := range prefixes {
+ if bytes.HasPrefix(raw, p) {
+ return true
+ }
+ }
+
+ // Exact match.
+ return bytes.Equal(raw, []byte{0xEF, 0xBB, 0xBF, 0x57, 0x45, 0x42, 0x56, 0x54, 0x54}) || // UTF-8 BOM and "WEBVTT"
+ bytes.Equal(raw, []byte{0x57, 0x45, 0x42, 0x56, 0x54, 0x54}) // "WEBVTT"
+}
diff --git a/integration/assets/hydrabroker/vendor/github.com/gabriel-vasile/mimetype/internal/magic/text_csv.go b/integration/assets/hydrabroker/vendor/github.com/gabriel-vasile/mimetype/internal/magic/text_csv.go
new file mode 100644
index 00000000000..84ed6492840
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/gabriel-vasile/mimetype/internal/magic/text_csv.go
@@ -0,0 +1,63 @@
+package magic
+
+import (
+ "bytes"
+ "encoding/csv"
+ "errors"
+ "io"
+)
+
+// Csv matches a comma-separated values file.
+func Csv(raw []byte, limit uint32) bool {
+ return sv(raw, ',', limit)
+}
+
+// Tsv matches a tab-separated values file.
+func Tsv(raw []byte, limit uint32) bool {
+ return sv(raw, '\t', limit)
+}
+
+func sv(in []byte, comma rune, limit uint32) bool {
+ r := csv.NewReader(dropLastLine(in, limit))
+ r.Comma = comma
+ r.ReuseRecord = true
+ r.LazyQuotes = true
+ r.Comment = '#'
+
+ lines := 0
+ for {
+ _, err := r.Read()
+ if errors.Is(err, io.EOF) {
+ break
+ }
+ if err != nil {
+ return false
+ }
+ lines++
+ }
+
+ return r.FieldsPerRecord > 1 && lines > 1
+}
+
+// dropLastLine drops the last incomplete line from b.
+//
+// mimetype limits itself to ReadLimit bytes when performing a detection.
+// This means, for file formats like CSV for NDJSON, the last line of the input
+// can be an incomplete line.
+func dropLastLine(b []byte, cutAt uint32) io.Reader {
+ if cutAt == 0 {
+ return bytes.NewReader(b)
+ }
+ if uint32(len(b)) >= cutAt {
+ for i := cutAt - 1; i > 0; i-- {
+ if b[i] == '\n' {
+ return bytes.NewReader(b[:i])
+ }
+ }
+
+ // No newline was found between the 0 index and cutAt.
+ return bytes.NewReader(b[:cutAt])
+ }
+
+ return bytes.NewReader(b)
+}
diff --git a/integration/assets/hydrabroker/vendor/github.com/gabriel-vasile/mimetype/internal/magic/video.go b/integration/assets/hydrabroker/vendor/github.com/gabriel-vasile/mimetype/internal/magic/video.go
new file mode 100644
index 00000000000..9caf55538a3
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/gabriel-vasile/mimetype/internal/magic/video.go
@@ -0,0 +1,85 @@
+package magic
+
+import (
+ "bytes"
+)
+
+var (
+ // Flv matches a Flash video file.
+ Flv = prefix([]byte("\x46\x4C\x56\x01"))
+ // Asf matches an Advanced Systems Format file.
+ Asf = prefix([]byte{
+ 0x30, 0x26, 0xB2, 0x75, 0x8E, 0x66, 0xCF, 0x11,
+ 0xA6, 0xD9, 0x00, 0xAA, 0x00, 0x62, 0xCE, 0x6C,
+ })
+ // Rmvb matches a RealMedia Variable Bitrate file.
+ Rmvb = prefix([]byte{0x2E, 0x52, 0x4D, 0x46})
+)
+
+// WebM matches a WebM file.
+func WebM(raw []byte, limit uint32) bool {
+ return isMatroskaFileTypeMatched(raw, "webm")
+}
+
+// Mkv matches a mkv file.
+func Mkv(raw []byte, limit uint32) bool {
+ return isMatroskaFileTypeMatched(raw, "matroska")
+}
+
+// isMatroskaFileTypeMatched is used for webm and mkv file matching.
+// It checks for .Eߣ sequence. If the sequence is found,
+// then it means it is Matroska media container, including WebM.
+// Then it verifies which of the file type it is representing by matching the
+// file specific string.
+func isMatroskaFileTypeMatched(in []byte, flType string) bool {
+ if bytes.HasPrefix(in, []byte("\x1A\x45\xDF\xA3")) {
+ return isFileTypeNamePresent(in, flType)
+ }
+ return false
+}
+
+// isFileTypeNamePresent accepts the matroska input data stream and searches
+// for the given file type in the stream. Return whether a match is found.
+// The logic of search is: find first instance of \x42\x82 and then
+// search for given string after n bytes of above instance.
+func isFileTypeNamePresent(in []byte, flType string) bool {
+ ind, maxInd, lenIn := 0, 4096, len(in)
+ if lenIn < maxInd { // restricting length to 4096
+ maxInd = lenIn
+ }
+ ind = bytes.Index(in[:maxInd], []byte("\x42\x82"))
+ if ind > 0 && lenIn > ind+2 {
+ ind += 2
+
+ // filetype name will be present exactly
+ // n bytes after the match of the two bytes "\x42\x82"
+ n := vintWidth(int(in[ind]))
+ if lenIn > ind+n {
+ return bytes.HasPrefix(in[ind+n:], []byte(flType))
+ }
+ }
+ return false
+}
+
+// vintWidth parses the variable-integer width in matroska containers
+func vintWidth(v int) int {
+ mask, max, num := 128, 8, 1
+ for num < max && v&mask == 0 {
+ mask = mask >> 1
+ num++
+ }
+ return num
+}
+
+// Mpeg matches a Moving Picture Experts Group file.
+func Mpeg(raw []byte, limit uint32) bool {
+ return len(raw) > 3 && bytes.HasPrefix(raw, []byte{0x00, 0x00, 0x01}) &&
+ raw[3] >= 0xB0 && raw[3] <= 0xBF
+}
+
+// Avi matches an Audio Video Interleaved file.
+func Avi(raw []byte, limit uint32) bool {
+ return len(raw) > 16 &&
+ bytes.Equal(raw[:4], []byte("RIFF")) &&
+ bytes.Equal(raw[8:16], []byte("AVI LIST"))
+}
diff --git a/integration/assets/hydrabroker/vendor/github.com/gabriel-vasile/mimetype/internal/magic/zip.go b/integration/assets/hydrabroker/vendor/github.com/gabriel-vasile/mimetype/internal/magic/zip.go
new file mode 100644
index 00000000000..dabee947b9d
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/gabriel-vasile/mimetype/internal/magic/zip.go
@@ -0,0 +1,92 @@
+package magic
+
+import (
+ "bytes"
+ "encoding/binary"
+ "strings"
+)
+
+var (
+ // Odt matches an OpenDocument Text file.
+ Odt = offset([]byte("mimetypeapplication/vnd.oasis.opendocument.text"), 30)
+ // Ott matches an OpenDocument Text Template file.
+ Ott = offset([]byte("mimetypeapplication/vnd.oasis.opendocument.text-template"), 30)
+ // Ods matches an OpenDocument Spreadsheet file.
+ Ods = offset([]byte("mimetypeapplication/vnd.oasis.opendocument.spreadsheet"), 30)
+ // Ots matches an OpenDocument Spreadsheet Template file.
+ Ots = offset([]byte("mimetypeapplication/vnd.oasis.opendocument.spreadsheet-template"), 30)
+ // Odp matches an OpenDocument Presentation file.
+ Odp = offset([]byte("mimetypeapplication/vnd.oasis.opendocument.presentation"), 30)
+ // Otp matches an OpenDocument Presentation Template file.
+ Otp = offset([]byte("mimetypeapplication/vnd.oasis.opendocument.presentation-template"), 30)
+ // Odg matches an OpenDocument Drawing file.
+ Odg = offset([]byte("mimetypeapplication/vnd.oasis.opendocument.graphics"), 30)
+ // Otg matches an OpenDocument Drawing Template file.
+ Otg = offset([]byte("mimetypeapplication/vnd.oasis.opendocument.graphics-template"), 30)
+ // Odf matches an OpenDocument Formula file.
+ Odf = offset([]byte("mimetypeapplication/vnd.oasis.opendocument.formula"), 30)
+ // Odc matches an OpenDocument Chart file.
+ Odc = offset([]byte("mimetypeapplication/vnd.oasis.opendocument.chart"), 30)
+ // Epub matches an EPUB file.
+ Epub = offset([]byte("mimetypeapplication/epub+zip"), 30)
+ // Sxc matches an OpenOffice Spreadsheet file.
+ Sxc = offset([]byte("mimetypeapplication/vnd.sun.xml.calc"), 30)
+)
+
+// Zip matches a zip archive.
+func Zip(raw []byte, limit uint32) bool {
+ return len(raw) > 3 &&
+ raw[0] == 0x50 && raw[1] == 0x4B &&
+ (raw[2] == 0x3 || raw[2] == 0x5 || raw[2] == 0x7) &&
+ (raw[3] == 0x4 || raw[3] == 0x6 || raw[3] == 0x8)
+}
+
+// Jar matches a Java archive file.
+func Jar(raw []byte, limit uint32) bool {
+ return zipContains(raw, "META-INF/MANIFEST.MF")
+}
+
+// zipTokenizer holds the source zip file and scanned index.
+type zipTokenizer struct {
+ in []byte
+ i int // current index
+}
+
+// next returns the next file name from the zip headers.
+// https://web.archive.org/web/20191129114319/https://users.cs.jmu.edu/buchhofp/forensics/formats/pkzip.html
+func (t *zipTokenizer) next() (fileName string) {
+ if t.i > len(t.in) {
+ return
+ }
+ in := t.in[t.i:]
+ // pkSig is the signature of the zip local file header.
+ pkSig := []byte("PK\003\004")
+ pkIndex := bytes.Index(in, pkSig)
+ // 30 is the offset of the file name in the header.
+ fNameOffset := pkIndex + 30
+ // end if signature not found or file name offset outside of file.
+ if pkIndex == -1 || fNameOffset > len(in) {
+ return
+ }
+
+ fNameLen := int(binary.LittleEndian.Uint16(in[pkIndex+26 : pkIndex+28]))
+ if fNameLen <= 0 || fNameOffset+fNameLen > len(in) {
+ return
+ }
+ t.i += fNameOffset + fNameLen
+ return string(in[fNameOffset : fNameOffset+fNameLen])
+}
+
+// zipContains returns true if the zip file headers from in contain any of the paths.
+func zipContains(in []byte, paths ...string) bool {
+ t := zipTokenizer{in: in}
+ for i, tok := 0, t.next(); tok != ""; i, tok = i+1, t.next() {
+ for p := range paths {
+ if strings.HasPrefix(tok, paths[p]) {
+ return true
+ }
+ }
+ }
+
+ return false
+}
diff --git a/integration/assets/hydrabroker/vendor/github.com/gabriel-vasile/mimetype/mime.go b/integration/assets/hydrabroker/vendor/github.com/gabriel-vasile/mimetype/mime.go
new file mode 100644
index 00000000000..62cb15f5932
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/gabriel-vasile/mimetype/mime.go
@@ -0,0 +1,186 @@
+package mimetype
+
+import (
+ "mime"
+
+ "github.com/gabriel-vasile/mimetype/internal/charset"
+ "github.com/gabriel-vasile/mimetype/internal/magic"
+)
+
+// MIME struct holds information about a file format: the string representation
+// of the MIME type, the extension and the parent file format.
+type MIME struct {
+ mime string
+ aliases []string
+ extension string
+ // detector receives the raw input and a limit for the number of bytes it is
+ // allowed to check. It returns whether the input matches a signature or not.
+ detector magic.Detector
+ children []*MIME
+ parent *MIME
+}
+
+// String returns the string representation of the MIME type, e.g., "application/zip".
+func (m *MIME) String() string {
+ return m.mime
+}
+
+// Extension returns the file extension associated with the MIME type.
+// It includes the leading dot, as in ".html". When the file format does not
+// have an extension, the empty string is returned.
+func (m *MIME) Extension() string {
+ return m.extension
+}
+
+// Parent returns the parent MIME type from the hierarchy.
+// Each MIME type has a non-nil parent, except for the root MIME type.
+//
+// For example, the application/json and text/html MIME types have text/plain as
+// their parent because they are text files who happen to contain JSON or HTML.
+// Another example is the ZIP format, which is used as container
+// for Microsoft Office files, EPUB files, JAR files, and others.
+func (m *MIME) Parent() *MIME {
+ return m.parent
+}
+
+// Is checks whether this MIME type, or any of its aliases, is equal to the
+// expected MIME type. MIME type equality test is done on the "type/subtype"
+// section, ignores any optional MIME parameters, ignores any leading and
+// trailing whitespace, and is case insensitive.
+func (m *MIME) Is(expectedMIME string) bool {
+ // Parsing is needed because some detected MIME types contain parameters
+ // that need to be stripped for the comparison.
+ expectedMIME, _, _ = mime.ParseMediaType(expectedMIME)
+ found, _, _ := mime.ParseMediaType(m.mime)
+
+ if expectedMIME == found {
+ return true
+ }
+
+ for _, alias := range m.aliases {
+ if alias == expectedMIME {
+ return true
+ }
+ }
+
+ return false
+}
+
+func newMIME(
+ mime, extension string,
+ detector magic.Detector,
+ children ...*MIME) *MIME {
+ m := &MIME{
+ mime: mime,
+ extension: extension,
+ detector: detector,
+ children: children,
+ }
+
+ for _, c := range children {
+ c.parent = m
+ }
+
+ return m
+}
+
+func (m *MIME) alias(aliases ...string) *MIME {
+ m.aliases = aliases
+ return m
+}
+
+// match does a depth-first search on the signature tree. It returns the deepest
+// successful node for which all the children detection functions fail.
+func (m *MIME) match(in []byte, readLimit uint32) *MIME {
+ for _, c := range m.children {
+ if c.detector(in, readLimit) {
+ return c.match(in, readLimit)
+ }
+ }
+
+ needsCharset := map[string]func([]byte) string{
+ "text/plain": charset.FromPlain,
+ "text/html": charset.FromHTML,
+ "text/xml": charset.FromXML,
+ }
+ // ps holds optional MIME parameters.
+ ps := map[string]string{}
+ if f, ok := needsCharset[m.mime]; ok {
+ if cset := f(in); cset != "" {
+ ps["charset"] = cset
+ }
+ }
+
+ return m.cloneHierarchy(ps)
+}
+
+// flatten transforms an hierarchy of MIMEs into a slice of MIMEs.
+func (m *MIME) flatten() []*MIME {
+ out := []*MIME{m}
+ for _, c := range m.children {
+ out = append(out, c.flatten()...)
+ }
+
+ return out
+}
+
+// clone creates a new MIME with the provided optional MIME parameters.
+func (m *MIME) clone(ps map[string]string) *MIME {
+ clonedMIME := m.mime
+ if len(ps) > 0 {
+ clonedMIME = mime.FormatMediaType(m.mime, ps)
+ }
+
+ return &MIME{
+ mime: clonedMIME,
+ aliases: m.aliases,
+ extension: m.extension,
+ }
+}
+
+// cloneHierarchy creates a clone of m and all its ancestors. The optional MIME
+// parameters are set on the last child of the hierarchy.
+func (m *MIME) cloneHierarchy(ps map[string]string) *MIME {
+ ret := m.clone(ps)
+ lastChild := ret
+ for p := m.Parent(); p != nil; p = p.Parent() {
+ pClone := p.clone(nil)
+ lastChild.parent = pClone
+ lastChild = pClone
+ }
+
+ return ret
+}
+
+func (m *MIME) lookup(mime string) *MIME {
+ for _, n := range append(m.aliases, m.mime) {
+ if n == mime {
+ return m
+ }
+ }
+
+ for _, c := range m.children {
+ if m := c.lookup(mime); m != nil {
+ return m
+ }
+ }
+ return nil
+}
+
+// Extend adds detection for a sub-format. The detector is a function
+// returning true when the raw input file satisfies a signature.
+// The sub-format will be detected if all the detectors in the parent chain return true.
+// The extension should include the leading dot, as in ".html".
+func (m *MIME) Extend(detector func(raw []byte, limit uint32) bool, mime, extension string, aliases ...string) {
+ c := &MIME{
+ mime: mime,
+ extension: extension,
+ detector: detector,
+ parent: m,
+ aliases: aliases,
+ }
+
+ mu.Lock()
+ m.children = append([]*MIME{c}, m.children...)
+ mu.Unlock()
+}
diff --git a/integration/assets/hydrabroker/vendor/github.com/gabriel-vasile/mimetype/mimetype.gif b/integration/assets/hydrabroker/vendor/github.com/gabriel-vasile/mimetype/mimetype.gif
new file mode 100644
index 00000000000..c3e80876738
Binary files /dev/null and b/integration/assets/hydrabroker/vendor/github.com/gabriel-vasile/mimetype/mimetype.gif differ
diff --git a/integration/assets/hydrabroker/vendor/github.com/gabriel-vasile/mimetype/mimetype.go b/integration/assets/hydrabroker/vendor/github.com/gabriel-vasile/mimetype/mimetype.go
new file mode 100644
index 00000000000..1b5909b751b
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/gabriel-vasile/mimetype/mimetype.go
@@ -0,0 +1,124 @@
+// Package mimetype uses magic number signatures to detect the MIME type of a file.
+//
+// File formats are stored in a hierarchy with application/octet-stream at its root.
+// For example, the hierarchy for HTML format is application/octet-stream ->
+// text/plain -> text/html.
+package mimetype
+
+import (
+ "io"
+ "io/ioutil"
+ "mime"
+ "os"
+ "sync/atomic"
+)
+
+// readLimit is the maximum number of bytes from the input used when detecting.
+var readLimit uint32 = 3072
+
+// Detect returns the MIME type found from the provided byte slice.
+//
+// The result is always a valid MIME type, with application/octet-stream
+// returned when identification failed.
+func Detect(in []byte) *MIME {
+ // Using atomic because readLimit can be written at the same time in other goroutine.
+ l := atomic.LoadUint32(&readLimit)
+ if l > 0 && len(in) > int(l) {
+ in = in[:l]
+ }
+ mu.RLock()
+ defer mu.RUnlock()
+ return root.match(in, l)
+}
+
+// DetectReader returns the MIME type of the provided reader.
+//
+// The result is always a valid MIME type, with application/octet-stream
+// returned when identification failed with or without an error.
+// Any error returned is related to the reading from the input reader.
+//
+// DetectReader assumes the reader offset is at the start. If the input is an
+// io.ReadSeeker you previously read from, it should be rewinded before detection:
+//
+// reader.Seek(0, io.SeekStart)
+func DetectReader(r io.Reader) (*MIME, error) {
+ var in []byte
+ var err error
+
+ // Using atomic because readLimit can be written at the same time in other goroutine.
+ l := atomic.LoadUint32(&readLimit)
+ if l == 0 {
+ in, err = ioutil.ReadAll(r)
+ if err != nil {
+ return errMIME, err
+ }
+ } else {
+ var n int
+ in = make([]byte, l)
+ // io.UnexpectedEOF means len(r) < len(in). It is not an error in this case,
+ // it just means the input file is smaller than the allocated bytes slice.
+ n, err = io.ReadFull(r, in)
+ if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF {
+ return errMIME, err
+ }
+ in = in[:n]
+ }
+
+ mu.RLock()
+ defer mu.RUnlock()
+ return root.match(in, l), nil
+}
+
+// DetectFile returns the MIME type of the provided file.
+//
+// The result is always a valid MIME type, with application/octet-stream
+// returned when identification failed with or without an error.
+// Any error returned is related to the opening and reading from the input file.
+func DetectFile(path string) (*MIME, error) {
+ f, err := os.Open(path)
+ if err != nil {
+ return errMIME, err
+ }
+ defer f.Close()
+
+ return DetectReader(f)
+}
+
+// EqualsAny reports whether s MIME type is equal to any MIME type in mimes.
+// MIME type equality test is done on the "type/subtype" section, ignores
+// any optional MIME parameters, ignores any leading and trailing whitespace,
+// and is case insensitive.
+func EqualsAny(s string, mimes ...string) bool {
+ s, _, _ = mime.ParseMediaType(s)
+ for _, m := range mimes {
+ m, _, _ = mime.ParseMediaType(m)
+ if s == m {
+ return true
+ }
+ }
+
+ return false
+}
+
+// SetLimit sets the maximum number of bytes read from input when detecting the MIME type.
+// Increasing the limit provides better detection for file formats which store
+// their magical numbers towards the end of the file: docx, pptx, xlsx, etc.
+// A limit of 0 means the whole input file will be used.
+func SetLimit(limit uint32) {
+ // Using atomic because readLimit can be read at the same time in other goroutine.
+ atomic.StoreUint32(&readLimit, limit)
+}
+
+// Extend adds detection for other file formats.
+// It is equivalent to calling Extend() on the root mime type "application/octet-stream".
+func Extend(detector func(raw []byte, limit uint32) bool, mime, extension string, aliases ...string) {
+ root.Extend(detector, mime, extension, aliases...)
+}
+
+// Lookup finds a MIME object by its string representation.
+// The representation can be the main mime type, or any of its aliases.
+func Lookup(mime string) *MIME {
+ mu.RLock()
+ defer mu.RUnlock()
+ return root.lookup(mime)
+}
diff --git a/integration/assets/hydrabroker/vendor/github.com/gabriel-vasile/mimetype/supported_mimes.md b/integration/assets/hydrabroker/vendor/github.com/gabriel-vasile/mimetype/supported_mimes.md
new file mode 100644
index 00000000000..5ec6f6b6502
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/gabriel-vasile/mimetype/supported_mimes.md
@@ -0,0 +1,178 @@
+## 173 Supported MIME types
+This file is automatically generated when running tests. Do not edit manually.
+
+Extension | MIME type | Aliases
+--------- | --------- | -------
+**n/a** | application/octet-stream | -
+**.xpm** | image/x-xpixmap | -
+**.7z** | application/x-7z-compressed | -
+**.zip** | application/zip | application/x-zip, application/x-zip-compressed
+**.xlsx** | application/vnd.openxmlformats-officedocument.spreadsheetml.sheet | -
+**.docx** | application/vnd.openxmlformats-officedocument.wordprocessingml.document | -
+**.pptx** | application/vnd.openxmlformats-officedocument.presentationml.presentation | -
+**.epub** | application/epub+zip | -
+**.jar** | application/jar | -
+**.odt** | application/vnd.oasis.opendocument.text | application/x-vnd.oasis.opendocument.text
+**.ott** | application/vnd.oasis.opendocument.text-template | application/x-vnd.oasis.opendocument.text-template
+**.ods** | application/vnd.oasis.opendocument.spreadsheet | application/x-vnd.oasis.opendocument.spreadsheet
+**.ots** | application/vnd.oasis.opendocument.spreadsheet-template | application/x-vnd.oasis.opendocument.spreadsheet-template
+**.odp** | application/vnd.oasis.opendocument.presentation | application/x-vnd.oasis.opendocument.presentation
+**.otp** | application/vnd.oasis.opendocument.presentation-template | application/x-vnd.oasis.opendocument.presentation-template
+**.odg** | application/vnd.oasis.opendocument.graphics | application/x-vnd.oasis.opendocument.graphics
+**.otg** | application/vnd.oasis.opendocument.graphics-template | application/x-vnd.oasis.opendocument.graphics-template
+**.odf** | application/vnd.oasis.opendocument.formula | application/x-vnd.oasis.opendocument.formula
+**.odc** | application/vnd.oasis.opendocument.chart | application/x-vnd.oasis.opendocument.chart
+**.sxc** | application/vnd.sun.xml.calc | -
+**.pdf** | application/pdf | application/x-pdf
+**.fdf** | application/vnd.fdf | -
+**n/a** | application/x-ole-storage | -
+**.msi** | application/x-ms-installer | application/x-windows-installer, application/x-msi
+**.aaf** | application/octet-stream | -
+**.msg** | application/vnd.ms-outlook | -
+**.xls** | application/vnd.ms-excel | application/msexcel
+**.pub** | application/vnd.ms-publisher | -
+**.ppt** | application/vnd.ms-powerpoint | application/mspowerpoint
+**.doc** | application/msword | application/vnd.ms-word
+**.ps** | application/postscript | -
+**.psd** | image/vnd.adobe.photoshop | image/x-psd, application/photoshop
+**.p7s** | application/pkcs7-signature | -
+**.ogg** | application/ogg | application/x-ogg
+**.oga** | audio/ogg | -
+**.ogv** | video/ogg | -
+**.png** | image/png | -
+**.png** | image/vnd.mozilla.apng | -
+**.jpg** | image/jpeg | -
+**.jxl** | image/jxl | -
+**.jp2** | image/jp2 | -
+**.jpf** | image/jpx | -
+**.jpm** | image/jpm | video/jpm
+**.jxs** | image/jxs | -
+**.gif** | image/gif | -
+**.webp** | image/webp | -
+**.exe** | application/vnd.microsoft.portable-executable | -
+**n/a** | application/x-elf | -
+**n/a** | application/x-object | -
+**n/a** | application/x-executable | -
+**.so** | application/x-sharedlib | -
+**n/a** | application/x-coredump | -
+**.a** | application/x-archive | application/x-unix-archive
+**.deb** | application/vnd.debian.binary-package | -
+**.tar** | application/x-tar | -
+**.xar** | application/x-xar | -
+**.bz2** | application/x-bzip2 | -
+**.fits** | application/fits | -
+**.tiff** | image/tiff | -
+**.bmp** | image/bmp | image/x-bmp, image/x-ms-bmp
+**.ico** | image/x-icon | -
+**.mp3** | audio/mpeg | audio/x-mpeg, audio/mp3
+**.flac** | audio/flac | -
+**.midi** | audio/midi | audio/mid, audio/sp-midi, audio/x-mid, audio/x-midi
+**.ape** | audio/ape | -
+**.mpc** | audio/musepack | -
+**.amr** | audio/amr | audio/amr-nb
+**.wav** | audio/wav | audio/x-wav, audio/vnd.wave, audio/wave
+**.aiff** | audio/aiff | audio/x-aiff
+**.au** | audio/basic | -
+**.mpeg** | video/mpeg | -
+**.mov** | video/quicktime | -
+**.mqv** | video/quicktime | -
+**.mp4** | video/mp4 | -
+**.webm** | video/webm | audio/webm
+**.3gp** | video/3gpp | video/3gp, audio/3gpp
+**.3g2** | video/3gpp2 | video/3g2, audio/3gpp2
+**.avi** | video/x-msvideo | video/avi, video/msvideo
+**.flv** | video/x-flv | -
+**.mkv** | video/x-matroska | -
+**.asf** | video/x-ms-asf | video/asf, video/x-ms-wmv
+**.aac** | audio/aac | -
+**.voc** | audio/x-unknown | -
+**.mp4** | audio/mp4 | audio/x-m4a, audio/x-mp4a
+**.m4a** | audio/x-m4a | -
+**.m3u** | application/vnd.apple.mpegurl | audio/mpegurl
+**.m4v** | video/x-m4v | -
+**.rmvb** | application/vnd.rn-realmedia-vbr | -
+**.gz** | application/gzip | application/x-gzip, application/x-gunzip, application/gzipped, application/gzip-compressed, application/x-gzip-compressed, gzip/document
+**.class** | application/x-java-applet | -
+**.swf** | application/x-shockwave-flash | -
+**.crx** | application/x-chrome-extension | -
+**.ttf** | font/ttf | font/sfnt, application/x-font-ttf, application/font-sfnt
+**.woff** | font/woff | -
+**.woff2** | font/woff2 | -
+**.otf** | font/otf | -
+**.ttc** | font/collection | -
+**.eot** | application/vnd.ms-fontobject | -
+**.wasm** | application/wasm | -
+**.shx** | application/vnd.shx | -
+**.shp** | application/vnd.shp | -
+**.dbf** | application/x-dbf | -
+**.dcm** | application/dicom | -
+**.rar** | application/x-rar-compressed | application/x-rar
+**.djvu** | image/vnd.djvu | -
+**.mobi** | application/x-mobipocket-ebook | -
+**.lit** | application/x-ms-reader | -
+**.bpg** | image/bpg | -
+**.sqlite** | application/vnd.sqlite3 | application/x-sqlite3
+**.dwg** | image/vnd.dwg | image/x-dwg, application/acad, application/x-acad, application/autocad_dwg, application/dwg, application/x-dwg, application/x-autocad, drawing/dwg
+**.nes** | application/vnd.nintendo.snes.rom | -
+**.lnk** | application/x-ms-shortcut | -
+**.macho** | application/x-mach-binary | -
+**.qcp** | audio/qcelp | -
+**.icns** | image/x-icns | -
+**.heic** | image/heic | -
+**.heic** | image/heic-sequence | -
+**.heif** | image/heif | -
+**.heif** | image/heif-sequence | -
+**.hdr** | image/vnd.radiance | -
+**.mrc** | application/marc | -
+**.mdb** | application/x-msaccess | -
+**.accdb** | application/x-msaccess | -
+**.zst** | application/zstd | -
+**.cab** | application/vnd.ms-cab-compressed | -
+**.rpm** | application/x-rpm | -
+**.xz** | application/x-xz | -
+**.lz** | application/lzip | application/x-lzip
+**.torrent** | application/x-bittorrent | -
+**.cpio** | application/x-cpio | -
+**n/a** | application/tzif | -
+**.xcf** | image/x-xcf | -
+**.pat** | image/x-gimp-pat | -
+**.gbr** | image/x-gimp-gbr | -
+**.glb** | model/gltf-binary | -
+**.avif** | image/avif | -
+**.cab** | application/x-installshield | -
+**.jxr** | image/jxr | image/vnd.ms-photo
+**.txt** | text/plain | -
+**.html** | text/html | -
+**.svg** | image/svg+xml | -
+**.xml** | text/xml | -
+**.rss** | application/rss+xml | text/rss
+**.atom** | application/atom+xml | -
+**.x3d** | model/x3d+xml | -
+**.kml** | application/vnd.google-earth.kml+xml | -
+**.xlf** | application/x-xliff+xml | -
+**.dae** | model/vnd.collada+xml | -
+**.gml** | application/gml+xml | -
+**.gpx** | application/gpx+xml | -
+**.tcx** | application/vnd.garmin.tcx+xml | -
+**.amf** | application/x-amf | -
+**.3mf** | application/vnd.ms-package.3dmanufacturing-3dmodel+xml | -
+**.xfdf** | application/vnd.adobe.xfdf | -
+**.owl** | application/owl+xml | -
+**.php** | text/x-php | -
+**.js** | application/javascript | application/x-javascript, text/javascript
+**.lua** | text/x-lua | -
+**.pl** | text/x-perl | -
+**.py** | text/x-python | text/x-script.python, application/x-python
+**.json** | application/json | -
+**.geojson** | application/geo+json | -
+**.har** | application/json | -
+**.ndjson** | application/x-ndjson | -
+**.rtf** | text/rtf | -
+**.srt** | application/x-subrip | application/x-srt, text/x-srt
+**.tcl** | text/x-tcl | application/x-tcl
+**.csv** | text/csv | -
+**.tsv** | text/tab-separated-values | -
+**.vcf** | text/vcard | -
+**.ics** | text/calendar | -
+**.warc** | application/warc | -
+**.vtt** | text/vtt | -
diff --git a/integration/assets/hydrabroker/vendor/github.com/gabriel-vasile/mimetype/tree.go b/integration/assets/hydrabroker/vendor/github.com/gabriel-vasile/mimetype/tree.go
new file mode 100644
index 00000000000..253bd006499
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/gabriel-vasile/mimetype/tree.go
@@ -0,0 +1,260 @@
+package mimetype
+
+import (
+ "sync"
+
+ "github.com/gabriel-vasile/mimetype/internal/magic"
+)
+
+// mimetype stores the list of MIME types in a tree structure with
+// "application/octet-stream" at the root of the hierarchy. The hierarchy
+// approach minimizes the number of checks that need to be done on the input
+// and allows for more precise results once the base type of file has been
+// identified.
+//
+// root is a detector which passes for any slice of bytes.
+// When a detector passes the check, the children detectors
+// are tried in order to find a more accurate MIME type.
+var root = newMIME("application/octet-stream", "",
+ func([]byte, uint32) bool { return true },
+ xpm, sevenZ, zip, pdf, fdf, ole, ps, psd, p7s, ogg, png, jpg, jxl, jp2, jpx,
+ jpm, jxs, gif, webp, exe, elf, ar, tar, xar, bz2, fits, tiff, bmp, ico, mp3, flac,
+ midi, ape, musePack, amr, wav, aiff, au, mpeg, quickTime, mqv, mp4, webM,
+ threeGP, threeG2, avi, flv, mkv, asf, aac, voc, aMp4, m4a, m3u, m4v, rmvb,
+ gzip, class, swf, crx, ttf, woff, woff2, otf, ttc, eot, wasm, shx, dbf, dcm, rar,
+ djvu, mobi, lit, bpg, sqlite3, dwg, nes, lnk, macho, qcp, icns, heic,
+ heicSeq, heif, heifSeq, hdr, mrc, mdb, accdb, zstd, cab, rpm, xz, lzip,
+ torrent, cpio, tzif, xcf, pat, gbr, glb, avif, cabIS, jxr,
+ // Keep text last because it is the slowest check
+ text,
+)
+
+// errMIME is returned from Detect functions when err is not nil.
+// Detect could return root for erroneous cases, but it needs to lock mu in order to do so.
+// errMIME is same as root but it does not require locking.
+var errMIME = newMIME("application/octet-stream", "", func([]byte, uint32) bool { return false })
+
+// mu guards access to the root MIME tree. Access to root must be synchronized with this lock.
+var mu = &sync.RWMutex{}
+
+// The list of nodes appended to the root node.
+var (
+ xz = newMIME("application/x-xz", ".xz", magic.Xz)
+ gzip = newMIME("application/gzip", ".gz", magic.Gzip).alias(
+ "application/x-gzip", "application/x-gunzip", "application/gzipped",
+ "application/gzip-compressed", "application/x-gzip-compressed",
+ "gzip/document")
+ sevenZ = newMIME("application/x-7z-compressed", ".7z", magic.SevenZ)
+ zip = newMIME("application/zip", ".zip", magic.Zip, xlsx, docx, pptx, epub, jar, odt, ods, odp, odg, odf, odc, sxc).
+ alias("application/x-zip", "application/x-zip-compressed")
+ tar = newMIME("application/x-tar", ".tar", magic.Tar)
+ xar = newMIME("application/x-xar", ".xar", magic.Xar)
+ bz2 = newMIME("application/x-bzip2", ".bz2", magic.Bz2)
+ pdf = newMIME("application/pdf", ".pdf", magic.Pdf).
+ alias("application/x-pdf")
+ fdf = newMIME("application/vnd.fdf", ".fdf", magic.Fdf)
+ xlsx = newMIME("application/vnd.openxmlformats-officedocument.spreadsheetml.sheet", ".xlsx", magic.Xlsx)
+ docx = newMIME("application/vnd.openxmlformats-officedocument.wordprocessingml.document", ".docx", magic.Docx)
+ pptx = newMIME("application/vnd.openxmlformats-officedocument.presentationml.presentation", ".pptx", magic.Pptx)
+ epub = newMIME("application/epub+zip", ".epub", magic.Epub)
+ jar = newMIME("application/jar", ".jar", magic.Jar)
+ ole = newMIME("application/x-ole-storage", "", magic.Ole, msi, aaf, msg, xls, pub, ppt, doc)
+ msi = newMIME("application/x-ms-installer", ".msi", magic.Msi).
+ alias("application/x-windows-installer", "application/x-msi")
+ aaf = newMIME("application/octet-stream", ".aaf", magic.Aaf)
+ doc = newMIME("application/msword", ".doc", magic.Doc).
+ alias("application/vnd.ms-word")
+ ppt = newMIME("application/vnd.ms-powerpoint", ".ppt", magic.Ppt).
+ alias("application/mspowerpoint")
+ pub = newMIME("application/vnd.ms-publisher", ".pub", magic.Pub)
+ xls = newMIME("application/vnd.ms-excel", ".xls", magic.Xls).
+ alias("application/msexcel")
+ msg = newMIME("application/vnd.ms-outlook", ".msg", magic.Msg)
+ ps = newMIME("application/postscript", ".ps", magic.Ps)
+ fits = newMIME("application/fits", ".fits", magic.Fits)
+ ogg = newMIME("application/ogg", ".ogg", magic.Ogg, oggAudio, oggVideo).
+ alias("application/x-ogg")
+ oggAudio = newMIME("audio/ogg", ".oga", magic.OggAudio)
+ oggVideo = newMIME("video/ogg", ".ogv", magic.OggVideo)
+ text = newMIME("text/plain", ".txt", magic.Text, html, svg, xml, php, js, lua, perl, python, json, ndJSON, rtf, srt, tcl, csv, tsv, vCard, iCalendar, warc, vtt)
+ xml = newMIME("text/xml", ".xml", magic.XML, rss, atom, x3d, kml, xliff, collada, gml, gpx, tcx, amf, threemf, xfdf, owl2)
+ json = newMIME("application/json", ".json", magic.JSON, geoJSON, har)
+ har = newMIME("application/json", ".har", magic.HAR)
+ csv = newMIME("text/csv", ".csv", magic.Csv)
+ tsv = newMIME("text/tab-separated-values", ".tsv", magic.Tsv)
+ geoJSON = newMIME("application/geo+json", ".geojson", magic.GeoJSON)
+ ndJSON = newMIME("application/x-ndjson", ".ndjson", magic.NdJSON)
+ html = newMIME("text/html", ".html", magic.HTML)
+ php = newMIME("text/x-php", ".php", magic.Php)
+ rtf = newMIME("text/rtf", ".rtf", magic.Rtf)
+ js = newMIME("application/javascript", ".js", magic.Js).
+ alias("application/x-javascript", "text/javascript")
+ srt = newMIME("application/x-subrip", ".srt", magic.Srt).
+ alias("application/x-srt", "text/x-srt")
+ vtt = newMIME("text/vtt", ".vtt", magic.Vtt)
+ lua = newMIME("text/x-lua", ".lua", magic.Lua)
+ perl = newMIME("text/x-perl", ".pl", magic.Perl)
+ python = newMIME("text/x-python", ".py", magic.Python).
+ alias("text/x-script.python", "application/x-python")
+ tcl = newMIME("text/x-tcl", ".tcl", magic.Tcl).
+ alias("application/x-tcl")
+ vCard = newMIME("text/vcard", ".vcf", magic.VCard)
+ iCalendar = newMIME("text/calendar", ".ics", magic.ICalendar)
+ svg = newMIME("image/svg+xml", ".svg", magic.Svg)
+ rss = newMIME("application/rss+xml", ".rss", magic.Rss).
+ alias("text/rss")
+ owl2 = newMIME("application/owl+xml", ".owl", magic.Owl2)
+ atom = newMIME("application/atom+xml", ".atom", magic.Atom)
+ x3d = newMIME("model/x3d+xml", ".x3d", magic.X3d)
+ kml = newMIME("application/vnd.google-earth.kml+xml", ".kml", magic.Kml)
+ xliff = newMIME("application/x-xliff+xml", ".xlf", magic.Xliff)
+ collada = newMIME("model/vnd.collada+xml", ".dae", magic.Collada)
+ gml = newMIME("application/gml+xml", ".gml", magic.Gml)
+ gpx = newMIME("application/gpx+xml", ".gpx", magic.Gpx)
+ tcx = newMIME("application/vnd.garmin.tcx+xml", ".tcx", magic.Tcx)
+ amf = newMIME("application/x-amf", ".amf", magic.Amf)
+ threemf = newMIME("application/vnd.ms-package.3dmanufacturing-3dmodel+xml", ".3mf", magic.Threemf)
+ png = newMIME("image/png", ".png", magic.Png, apng)
+ apng = newMIME("image/vnd.mozilla.apng", ".png", magic.Apng)
+ jpg = newMIME("image/jpeg", ".jpg", magic.Jpg)
+ jxl = newMIME("image/jxl", ".jxl", magic.Jxl)
+ jp2 = newMIME("image/jp2", ".jp2", magic.Jp2)
+ jpx = newMIME("image/jpx", ".jpf", magic.Jpx)
+ jpm = newMIME("image/jpm", ".jpm", magic.Jpm).
+ alias("video/jpm")
+ jxs = newMIME("image/jxs", ".jxs", magic.Jxs)
+ xpm = newMIME("image/x-xpixmap", ".xpm", magic.Xpm)
+ bpg = newMIME("image/bpg", ".bpg", magic.Bpg)
+ gif = newMIME("image/gif", ".gif", magic.Gif)
+ webp = newMIME("image/webp", ".webp", magic.Webp)
+ tiff = newMIME("image/tiff", ".tiff", magic.Tiff)
+ bmp = newMIME("image/bmp", ".bmp", magic.Bmp).
+ alias("image/x-bmp", "image/x-ms-bmp")
+ ico = newMIME("image/x-icon", ".ico", magic.Ico)
+ icns = newMIME("image/x-icns", ".icns", magic.Icns)
+ psd = newMIME("image/vnd.adobe.photoshop", ".psd", magic.Psd).
+ alias("image/x-psd", "application/photoshop")
+ heic = newMIME("image/heic", ".heic", magic.Heic)
+ heicSeq = newMIME("image/heic-sequence", ".heic", magic.HeicSequence)
+ heif = newMIME("image/heif", ".heif", magic.Heif)
+ heifSeq = newMIME("image/heif-sequence", ".heif", magic.HeifSequence)
+ hdr = newMIME("image/vnd.radiance", ".hdr", magic.Hdr)
+ avif = newMIME("image/avif", ".avif", magic.AVIF)
+ mp3 = newMIME("audio/mpeg", ".mp3", magic.Mp3).
+ alias("audio/x-mpeg", "audio/mp3")
+ flac = newMIME("audio/flac", ".flac", magic.Flac)
+ midi = newMIME("audio/midi", ".midi", magic.Midi).
+ alias("audio/mid", "audio/sp-midi", "audio/x-mid", "audio/x-midi")
+ ape = newMIME("audio/ape", ".ape", magic.Ape)
+ musePack = newMIME("audio/musepack", ".mpc", magic.MusePack)
+ wav = newMIME("audio/wav", ".wav", magic.Wav).
+ alias("audio/x-wav", "audio/vnd.wave", "audio/wave")
+ aiff = newMIME("audio/aiff", ".aiff", magic.Aiff).alias("audio/x-aiff")
+ au = newMIME("audio/basic", ".au", magic.Au)
+ amr = newMIME("audio/amr", ".amr", magic.Amr).
+ alias("audio/amr-nb")
+ aac = newMIME("audio/aac", ".aac", magic.AAC)
+ voc = newMIME("audio/x-unknown", ".voc", magic.Voc)
+ aMp4 = newMIME("audio/mp4", ".mp4", magic.AMp4).
+ alias("audio/x-m4a", "audio/x-mp4a")
+ m4a = newMIME("audio/x-m4a", ".m4a", magic.M4a)
+ m3u = newMIME("application/vnd.apple.mpegurl", ".m3u", magic.M3u).
+ alias("audio/mpegurl")
+ m4v = newMIME("video/x-m4v", ".m4v", magic.M4v)
+ mp4 = newMIME("video/mp4", ".mp4", magic.Mp4)
+ webM = newMIME("video/webm", ".webm", magic.WebM).
+ alias("audio/webm")
+ mpeg = newMIME("video/mpeg", ".mpeg", magic.Mpeg)
+ quickTime = newMIME("video/quicktime", ".mov", magic.QuickTime)
+ mqv = newMIME("video/quicktime", ".mqv", magic.Mqv)
+ threeGP = newMIME("video/3gpp", ".3gp", magic.ThreeGP).
+ alias("video/3gp", "audio/3gpp")
+ threeG2 = newMIME("video/3gpp2", ".3g2", magic.ThreeG2).
+ alias("video/3g2", "audio/3gpp2")
+ avi = newMIME("video/x-msvideo", ".avi", magic.Avi).
+ alias("video/avi", "video/msvideo")
+ flv = newMIME("video/x-flv", ".flv", magic.Flv)
+ mkv = newMIME("video/x-matroska", ".mkv", magic.Mkv)
+ asf = newMIME("video/x-ms-asf", ".asf", magic.Asf).
+ alias("video/asf", "video/x-ms-wmv")
+ rmvb = newMIME("application/vnd.rn-realmedia-vbr", ".rmvb", magic.Rmvb)
+ class = newMIME("application/x-java-applet", ".class", magic.Class)
+ swf = newMIME("application/x-shockwave-flash", ".swf", magic.SWF)
+ crx = newMIME("application/x-chrome-extension", ".crx", magic.CRX)
+ ttf = newMIME("font/ttf", ".ttf", magic.Ttf).
+ alias("font/sfnt", "application/x-font-ttf", "application/font-sfnt")
+ woff = newMIME("font/woff", ".woff", magic.Woff)
+ woff2 = newMIME("font/woff2", ".woff2", magic.Woff2)
+ otf = newMIME("font/otf", ".otf", magic.Otf)
+ ttc = newMIME("font/collection", ".ttc", magic.Ttc)
+ eot = newMIME("application/vnd.ms-fontobject", ".eot", magic.Eot)
+ wasm = newMIME("application/wasm", ".wasm", magic.Wasm)
+ shp = newMIME("application/vnd.shp", ".shp", magic.Shp)
+ shx = newMIME("application/vnd.shx", ".shx", magic.Shx, shp)
+ dbf = newMIME("application/x-dbf", ".dbf", magic.Dbf)
+ exe = newMIME("application/vnd.microsoft.portable-executable", ".exe", magic.Exe)
+ elf = newMIME("application/x-elf", "", magic.Elf, elfObj, elfExe, elfLib, elfDump)
+ elfObj = newMIME("application/x-object", "", magic.ElfObj)
+ elfExe = newMIME("application/x-executable", "", magic.ElfExe)
+ elfLib = newMIME("application/x-sharedlib", ".so", magic.ElfLib)
+ elfDump = newMIME("application/x-coredump", "", magic.ElfDump)
+ ar = newMIME("application/x-archive", ".a", magic.Ar, deb).
+ alias("application/x-unix-archive")
+ deb = newMIME("application/vnd.debian.binary-package", ".deb", magic.Deb)
+ rpm = newMIME("application/x-rpm", ".rpm", magic.RPM)
+ dcm = newMIME("application/dicom", ".dcm", magic.Dcm)
+ odt = newMIME("application/vnd.oasis.opendocument.text", ".odt", magic.Odt, ott).
+ alias("application/x-vnd.oasis.opendocument.text")
+ ott = newMIME("application/vnd.oasis.opendocument.text-template", ".ott", magic.Ott).
+ alias("application/x-vnd.oasis.opendocument.text-template")
+ ods = newMIME("application/vnd.oasis.opendocument.spreadsheet", ".ods", magic.Ods, ots).
+ alias("application/x-vnd.oasis.opendocument.spreadsheet")
+ ots = newMIME("application/vnd.oasis.opendocument.spreadsheet-template", ".ots", magic.Ots).
+ alias("application/x-vnd.oasis.opendocument.spreadsheet-template")
+ odp = newMIME("application/vnd.oasis.opendocument.presentation", ".odp", magic.Odp, otp).
+ alias("application/x-vnd.oasis.opendocument.presentation")
+ otp = newMIME("application/vnd.oasis.opendocument.presentation-template", ".otp", magic.Otp).
+ alias("application/x-vnd.oasis.opendocument.presentation-template")
+ odg = newMIME("application/vnd.oasis.opendocument.graphics", ".odg", magic.Odg, otg).
+ alias("application/x-vnd.oasis.opendocument.graphics")
+ otg = newMIME("application/vnd.oasis.opendocument.graphics-template", ".otg", magic.Otg).
+ alias("application/x-vnd.oasis.opendocument.graphics-template")
+ odf = newMIME("application/vnd.oasis.opendocument.formula", ".odf", magic.Odf).
+ alias("application/x-vnd.oasis.opendocument.formula")
+ odc = newMIME("application/vnd.oasis.opendocument.chart", ".odc", magic.Odc).
+ alias("application/x-vnd.oasis.opendocument.chart")
+ sxc = newMIME("application/vnd.sun.xml.calc", ".sxc", magic.Sxc)
+ rar = newMIME("application/x-rar-compressed", ".rar", magic.RAR).
+ alias("application/x-rar")
+ djvu = newMIME("image/vnd.djvu", ".djvu", magic.DjVu)
+ mobi = newMIME("application/x-mobipocket-ebook", ".mobi", magic.Mobi)
+ lit = newMIME("application/x-ms-reader", ".lit", magic.Lit)
+ sqlite3 = newMIME("application/vnd.sqlite3", ".sqlite", magic.Sqlite).
+ alias("application/x-sqlite3")
+ dwg = newMIME("image/vnd.dwg", ".dwg", magic.Dwg).
+ alias("image/x-dwg", "application/acad", "application/x-acad",
+ "application/autocad_dwg", "application/dwg", "application/x-dwg",
+ "application/x-autocad", "drawing/dwg")
+ warc = newMIME("application/warc", ".warc", magic.Warc)
+ nes = newMIME("application/vnd.nintendo.snes.rom", ".nes", magic.Nes)
+ lnk = newMIME("application/x-ms-shortcut", ".lnk", magic.Lnk)
+ macho = newMIME("application/x-mach-binary", ".macho", magic.MachO)
+ qcp = newMIME("audio/qcelp", ".qcp", magic.Qcp)
+ mrc = newMIME("application/marc", ".mrc", magic.Marc)
+ mdb = newMIME("application/x-msaccess", ".mdb", magic.MsAccessMdb)
+ accdb = newMIME("application/x-msaccess", ".accdb", magic.MsAccessAce)
+ zstd = newMIME("application/zstd", ".zst", magic.Zstd)
+ cab = newMIME("application/vnd.ms-cab-compressed", ".cab", magic.Cab)
+ cabIS = newMIME("application/x-installshield", ".cab", magic.InstallShieldCab)
+ lzip = newMIME("application/lzip", ".lz", magic.Lzip).alias("application/x-lzip")
+ torrent = newMIME("application/x-bittorrent", ".torrent", magic.Torrent)
+ cpio = newMIME("application/x-cpio", ".cpio", magic.Cpio)
+ tzif = newMIME("application/tzif", "", magic.TzIf)
+ p7s = newMIME("application/pkcs7-signature", ".p7s", magic.P7s)
+ xcf = newMIME("image/x-xcf", ".xcf", magic.Xcf)
+ pat = newMIME("image/x-gimp-pat", ".pat", magic.Pat)
+ gbr = newMIME("image/x-gimp-gbr", ".gbr", magic.Gbr)
+ xfdf = newMIME("application/vnd.adobe.xfdf", ".xfdf", magic.Xfdf)
+ glb = newMIME("model/gltf-binary", ".glb", magic.Glb)
+ jxr = newMIME("image/jxr", ".jxr", magic.Jxr).alias("image/vnd.ms-photo")
+)
diff --git a/integration/assets/hydrabroker/vendor/github.com/go-logr/logr/.golangci.yaml b/integration/assets/hydrabroker/vendor/github.com/go-logr/logr/.golangci.yaml
new file mode 100644
index 00000000000..0cffafa7bf9
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/go-logr/logr/.golangci.yaml
@@ -0,0 +1,26 @@
+run:
+ timeout: 1m
+ tests: true
+
+linters:
+ disable-all: true
+ enable:
+ - asciicheck
+ - errcheck
+ - forcetypeassert
+ - gocritic
+ - gofmt
+ - goimports
+ - gosimple
+ - govet
+ - ineffassign
+ - misspell
+ - revive
+ - staticcheck
+ - typecheck
+ - unused
+
+issues:
+ exclude-use-default: false
+ max-issues-per-linter: 0
+ max-same-issues: 10
diff --git a/integration/assets/hydrabroker/vendor/github.com/go-logr/logr/CHANGELOG.md b/integration/assets/hydrabroker/vendor/github.com/go-logr/logr/CHANGELOG.md
new file mode 100644
index 00000000000..c3569600463
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/go-logr/logr/CHANGELOG.md
@@ -0,0 +1,6 @@
+# CHANGELOG
+
+## v1.0.0-rc1
+
+This is the first logged release. Major changes (including breaking changes)
+have occurred since earlier tags.
diff --git a/integration/assets/hydrabroker/vendor/github.com/go-logr/logr/CONTRIBUTING.md b/integration/assets/hydrabroker/vendor/github.com/go-logr/logr/CONTRIBUTING.md
new file mode 100644
index 00000000000..5d37e294c5f
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/go-logr/logr/CONTRIBUTING.md
@@ -0,0 +1,17 @@
+# Contributing
+
+Logr is open to pull-requests, provided they fit within the intended scope of
+the project. Specifically, this library aims to be VERY small and minimalist,
+with no external dependencies.
+
+## Compatibility
+
+This project intends to follow [semantic versioning](http://semver.org) and
+is very strict about compatibility. Any proposed changes MUST follow those
+rules.
+
+## Performance
+
+As a logging library, logr must be as light-weight as possible. Any proposed
+code change must include results of running the [benchmark](./benchmark)
+before and after the change.
diff --git a/integration/assets/hydrabroker/vendor/gopkg.in/yaml.v2/LICENSE b/integration/assets/hydrabroker/vendor/github.com/go-logr/logr/LICENSE
similarity index 100%
rename from integration/assets/hydrabroker/vendor/gopkg.in/yaml.v2/LICENSE
rename to integration/assets/hydrabroker/vendor/github.com/go-logr/logr/LICENSE
diff --git a/integration/assets/hydrabroker/vendor/github.com/go-logr/logr/README.md b/integration/assets/hydrabroker/vendor/github.com/go-logr/logr/README.md
new file mode 100644
index 00000000000..8969526a6e5
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/go-logr/logr/README.md
@@ -0,0 +1,406 @@
+# A minimal logging API for Go
+
+[![Go Reference](https://pkg.go.dev/badge/github.com/go-logr/logr.svg)](https://pkg.go.dev/github.com/go-logr/logr)
+[![OpenSSF Scorecard](https://api.securityscorecards.dev/projects/github.com/go-logr/logr/badge)](https://securityscorecards.dev/viewer/?platform=github.com&org=go-logr&repo=logr)
+
+logr offers an(other) opinion on how Go programs and libraries can do logging
+without becoming coupled to a particular logging implementation. This is not
+an implementation of logging - it is an API. In fact it is two APIs with two
+different sets of users.
+
+The `Logger` type is intended for application and library authors. It provides
+a relatively small API which can be used everywhere you want to emit logs. It
+defers the actual act of writing logs (to files, to stdout, or whatever) to the
+`LogSink` interface.
+
+The `LogSink` interface is intended for logging library implementers. It is a
+pure interface which can be implemented by logging frameworks to provide the actual logging
+functionality.
+
+This decoupling allows application and library developers to write code in
+terms of `logr.Logger` (which has very low dependency fan-out) while the
+implementation of logging is managed "up stack" (e.g. in or near `main()`.)
+Application developers can then switch out implementations as necessary.
+
+Many people assert that libraries should not be logging, and as such efforts
+like this are pointless. Those people are welcome to convince the authors of
+the tens-of-thousands of libraries that *DO* write logs that they are all
+wrong. In the meantime, logr takes a more practical approach.
+
+## Typical usage
+
+Somewhere, early in an application's life, it will make a decision about which
+logging library (implementation) it actually wants to use. Something like:
+
+```
+ func main() {
+ // ... other setup code ...
+
+ // Create the "root" logger. We have chosen the "logimpl" implementation,
+ // which takes some initial parameters and returns a logr.Logger.
+ logger := logimpl.New(param1, param2)
+
+ // ... other setup code ...
+```
+
+Most apps will call into other libraries, create structures to govern the flow,
+etc. The `logr.Logger` object can be passed to these other libraries, stored
+in structs, or even used as a package-global variable, if needed. For example:
+
+```
+ app := createTheAppObject(logger)
+ app.Run()
+```
+
+Outside of this early setup, no other packages need to know about the choice of
+implementation. They write logs in terms of the `logr.Logger` that they
+received:
+
+```
+ type appObject struct {
+ // ... other fields ...
+ logger logr.Logger
+ // ... other fields ...
+ }
+
+ func (app *appObject) Run() {
+ app.logger.Info("starting up", "timestamp", time.Now())
+
+ // ... app code ...
+```
+
+## Background
+
+If the Go standard library had defined an interface for logging, this project
+probably would not be needed. Alas, here we are.
+
+When the Go developers started developing such an interface with
+[slog](https://github.com/golang/go/issues/56345), they adopted some of the
+logr design but also left out some parts and changed others:
+
+| Feature | logr | slog |
+|---------|------|------|
+| High-level API | `Logger` (passed by value) | `Logger` (passed by [pointer](https://github.com/golang/go/issues/59126)) |
+| Low-level API | `LogSink` | `Handler` |
+| Stack unwinding | done by `LogSink` | done by `Logger` |
+| Skipping helper functions | `WithCallDepth`, `WithCallStackHelper` | [not supported by Logger](https://github.com/golang/go/issues/59145) |
+| Generating a value for logging on demand | `Marshaler` | `LogValuer` |
+| Log levels | >= 0, higher meaning "less important" | positive and negative, with 0 for "info" and higher meaning "more important" |
+| Error log entries | always logged, don't have a verbosity level | normal log entries with level >= `LevelError` |
+| Passing logger via context | `NewContext`, `FromContext` | no API |
+| Adding a name to a logger | `WithName` | no API |
+| Modify verbosity of log entries in a call chain | `V` | no API |
+| Grouping of key/value pairs | not supported | `WithGroup`, `GroupValue` |
+| Pass context for extracting additional values | no API | API variants like `InfoCtx` |
+
+The high-level slog API is explicitly meant to be one of many different APIs
+that can be layered on top of a shared `slog.Handler`. logr is one such
+alternative API, with [interoperability](#slog-interoperability) provided by
+some conversion functions.
+
+### Inspiration
+
+Before you consider this package, please read [this blog post by the
+inimitable Dave Cheney][warning-makes-no-sense]. We really appreciate what
+he has to say, and it largely aligns with our own experiences.
+
+### Differences from Dave's ideas
+
+The main differences are:
+
+1. Dave basically proposes doing away with the notion of a logging API in favor
+of `fmt.Printf()`. We disagree, especially when you consider things like output
+locations, timestamps, file and line decorations, and structured logging. This
+package restricts the logging API to just 2 types of logs: info and error.
+
+Info logs are things you want to tell the user which are not errors. Error
+logs are, well, errors. If your code receives an `error` from a subordinate
+function call and is logging that `error` *and not returning it*, use error
+logs.
+
+2. Verbosity-levels on info logs. This gives developers a chance to indicate
+arbitrary grades of importance for info logs, without assigning names with
+semantic meaning such as "warning", "trace", and "debug." Superficially this
+may feel very similar, but the primary difference is the lack of semantics.
+Because verbosity is a numerical value, it's safe to assume that an app running
+with higher verbosity means more (and less important) logs will be generated.
+
+## Implementations (non-exhaustive)
+
+There are implementations for the following logging libraries:
+
+- **a function** (can bridge to non-structured libraries): [funcr](https://github.com/go-logr/logr/tree/master/funcr)
+- **a testing.T** (for use in Go tests, with JSON-like output): [testr](https://github.com/go-logr/logr/tree/master/testr)
+- **github.com/google/glog**: [glogr](https://github.com/go-logr/glogr)
+- **k8s.io/klog** (for Kubernetes): [klogr](https://git.k8s.io/klog/klogr)
+- **a testing.T** (with klog-like text output): [ktesting](https://git.k8s.io/klog/ktesting)
+- **go.uber.org/zap**: [zapr](https://github.com/go-logr/zapr)
+- **log** (the Go standard library logger): [stdr](https://github.com/go-logr/stdr)
+- **github.com/sirupsen/logrus**: [logrusr](https://github.com/bombsimon/logrusr)
+- **github.com/wojas/genericr**: [genericr](https://github.com/wojas/genericr) (makes it easy to implement your own backend)
+- **logfmt** (Heroku style [logging](https://www.brandur.org/logfmt)): [logfmtr](https://github.com/iand/logfmtr)
+- **github.com/rs/zerolog**: [zerologr](https://github.com/go-logr/zerologr)
+- **github.com/go-kit/log**: [gokitlogr](https://github.com/tonglil/gokitlogr) (also compatible with github.com/go-kit/kit/log since v0.12.0)
+- **bytes.Buffer** (writing to a buffer): [bufrlogr](https://github.com/tonglil/buflogr) (useful for ensuring values were logged, like during testing)
+
+## slog interoperability
+
+Interoperability goes both ways, using the `logr.Logger` API with a `slog.Handler`
+and using the `slog.Logger` API with a `logr.LogSink`. `FromSlogHandler` and
+`ToSlogHandler` convert between a `logr.Logger` and a `slog.Handler`.
+As usual, `slog.New` can be used to wrap such a `slog.Handler` in the high-level
+slog API.
+
+### Using a `logr.LogSink` as backend for slog
+
+Ideally, a logr sink implementation should support both logr and slog by
+implementing both the normal logr interface(s) and `SlogSink`. Because
+of a conflict in the parameters of the common `Enabled` method, it is [not
+possible to implement both slog.Handler and logr.Sink in the same
+type](https://github.com/golang/go/issues/59110).
+
+If both are supported, log calls can go from the high-level APIs to the backend
+without the need to convert parameters. `FromSlogHandler` and `ToSlogHandler` can
+convert back and forth without adding additional wrappers, with one exception:
+when `Logger.V` was used to adjust the verbosity for a `slog.Handler`, then
+`ToSlogHandler` has to use a wrapper which adjusts the verbosity for future
+log calls.
+
+Such an implementation should also support values that implement specific
+interfaces from both packages for logging (`logr.Marshaler`, `slog.LogValuer`,
+`slog.GroupValue`). logr does not convert those.
+
+Not supporting slog has several drawbacks:
+- Recording source code locations works correctly if the handler gets called
+ through `slog.Logger`, but may be wrong in other cases. That's because a
+ `logr.Sink` does its own stack unwinding instead of using the program counter
+ provided by the high-level API.
+- slog levels <= 0 can be mapped to logr levels by negating the level without a
+ loss of information. But all slog levels > 0 (e.g. `slog.LevelWarning` as
+ used by `slog.Logger.Warn`) must be mapped to 0 before calling the sink
+ because logr does not support "more important than info" levels.
+- The slog group concept is supported by prefixing each key in a key/value
+ pair with the group names, separated by a dot. For structured output like
+ JSON it would be better to group the key/value pairs inside an object.
+- Special slog values and interfaces don't work as expected.
+- The overhead is likely to be higher.
+
+These drawbacks are severe enough that applications using a mixture of slog and
+logr should switch to a different backend.
+
+### Using a `slog.Handler` as backend for logr
+
+Using a plain `slog.Handler` without support for logr works better than the
+other direction:
+- All logr verbosity levels can be mapped 1:1 to their corresponding slog level
+ by negating them.
+- Stack unwinding is done by the `SlogSink` and the resulting program
+ counter is passed to the `slog.Handler`.
+- Names added via `Logger.WithName` are gathered and recorded in an additional
+ attribute with `logger` as key and the names separated by slash as value.
+- `Logger.Error` is turned into a log record with `slog.LevelError` as level
+ and an additional attribute with `err` as key, if an error was provided.
+
+The main drawback is that `logr.Marshaler` will not be supported. Types should
+ideally support both `logr.Marshaler` and `slog.Valuer`. If compatibility
+with logr implementations without slog support is not important, then
+`slog.Valuer` is sufficient.
+
+### Context support for slog
+
+Storing a logger in a `context.Context` is not supported by
+slog. `NewContextWithSlogLogger` and `FromContextAsSlogLogger` can be
+used to fill this gap. They store and retrieve a `slog.Logger` pointer
+under the same context key that is also used by `NewContext` and
+`FromContext` for `logr.Logger` value.
+
+When `NewContextWithSlogLogger` is followed by `FromContext`, the latter will
+automatically convert the `slog.Logger` to a
+`logr.Logger`. `FromContextAsSlogLogger` does the same for the other direction.
+
+With this approach, binaries which use either slog or logr are as efficient as
+possible with no unnecessary allocations. This is also why the API stores a
+`slog.Logger` pointer: when storing a `slog.Handler`, creating a `slog.Logger`
+on retrieval would need to allocate one.
+
+The downside is that switching back and forth needs more allocations. Because
+logr is the API that is already in use by different packages, in particular
+Kubernetes, the recommendation is to use the `logr.Logger` API in code which
+uses contextual logging.
+
+An alternative to adding values to a logger and storing that logger in the
+context is to store the values in the context and to configure a logging
+backend to extract those values when emitting log entries. This only works when
+log calls are passed the context, which is not supported by the logr API.
+
+With the slog API, it is possible, but not
+required. https://github.com/veqryn/slog-context is a package for slog which
+provides additional support code for this approach. It also contains wrappers
+for the context functions in logr, so developers who prefer to not use the logr
+APIs directly can use those instead and the resulting code will still be
+interoperable with logr.
+
+## FAQ
+
+### Conceptual
+
+#### Why structured logging?
+
+- **Structured logs are more easily queryable**: Since you've got
+ key-value pairs, it's much easier to query your structured logs for
+ particular values by filtering on the contents of a particular key --
+ think searching request logs for error codes, Kubernetes reconcilers for
+ the name and namespace of the reconciled object, etc.
+
+- **Structured logging makes it easier to have cross-referenceable logs**:
+ Similarly to searchability, if you maintain conventions around your
+ keys, it becomes easy to gather all log lines related to a particular
+ concept.
+
+- **Structured logs allow better dimensions of filtering**: if you have
+ structure to your logs, you've got more precise control over how much
+ information is logged -- you might choose in a particular configuration
+ to log certain keys but not others, only log lines where a certain key
+ matches a certain value, etc., instead of just having v-levels and names
+ to key off of.
+
+- **Structured logs better represent structured data**: sometimes, the
+ data that you want to log is inherently structured (think tuple-link
+ objects.) Structured logs allow you to preserve that structure when
+ outputting.
+
+#### Why V-levels?
+
+**V-levels give operators an easy way to control the chattiness of log
+operations**. V-levels provide a way for a given package to distinguish
+the relative importance or verbosity of a given log message. Then, if
+a particular logger or package is logging too many messages, the user
+of the package can simply change the v-levels for that library.
+
+#### Why not named levels, like Info/Warning/Error?
+
+Read [Dave Cheney's post][warning-makes-no-sense]. Then read [Differences
+from Dave's ideas](#differences-from-daves-ideas).
+
+#### Why not allow format strings, too?
+
+**Format strings negate many of the benefits of structured logs**:
+
+- They're not easily searchable without resorting to fuzzy searching,
+ regular expressions, etc.
+
+- They don't store structured data well, since contents are flattened into
+ a string.
+
+- They're not cross-referenceable.
+
+- They don't compress easily, since the message is not constant.
+
+(Unless you turn positional parameters into key-value pairs with numerical
+keys, at which point you've gotten key-value logging with meaningless
+keys.)
+
+### Practical
+
+#### Why key-value pairs, and not a map?
+
+Key-value pairs are *much* easier to optimize, especially around
+allocations. Zap (a structured logger that inspired logr's interface) has
+[performance measurements](https://github.com/uber-go/zap#performance)
+that show this quite nicely.
+
+While the interface ends up being a little less obvious, you get
+potentially better performance, plus avoid making users type
+`map[string]string{}` every time they want to log.
+
+#### What if my V-levels differ between libraries?
+
+That's fine. Control your V-levels on a per-logger basis, and use the
+`WithName` method to pass different loggers to different libraries.
+
+Generally, you should take care to ensure that you have relatively
+consistent V-levels within a given logger, however, as this makes deciding
+on what verbosity of logs to request easier.
+
+#### But I really want to use a format string!
+
+That's not actually a question. Assuming your question is "how do
+I convert my mental model of logging with format strings to logging with
+constant messages":
+
+1. Figure out what the error actually is, as you'd write in a TL;DR style,
+ and use that as a message.
+
+2. For every place you'd write a format specifier, look to the word before
+ it, and add that as a key value pair.
+
+For instance, consider the following examples (all taken from spots in the
+Kubernetes codebase):
+
+- `klog.V(4).Infof("Client is returning errors: code %v, error %v",
+ responseCode, err)` becomes `logger.Error(err, "client returned an
+ error", "code", responseCode)`
+
+- `klog.V(4).Infof("Got a Retry-After %ds response for attempt %d to %v",
+ seconds, retries, url)` becomes `logger.V(4).Info("got a retry-after
+ response when requesting url", "attempt", retries, "after
+ seconds", seconds, "url", url)`
+
+If you *really* must use a format string, use it in a key's value, and
+call `fmt.Sprintf` yourself. For instance: `log.Printf("unable to
+reflect over type %T")` becomes `logger.Info("unable to reflect over
+type", "type", fmt.Sprintf("%T"))`. In general though, the cases where
+this is necessary should be few and far between.
+
+#### How do I choose my V-levels?
+
+This is basically the only hard constraint: increase V-levels to denote
+more verbose or more debug-y logs.
+
+Otherwise, you can start out with `0` as "you always want to see this",
+`1` as "common logging that you might *possibly* want to turn off", and
+`10` as "I would like to performance-test your log collection stack."
+
+Then gradually choose levels in between as you need them, working your way
+down from 10 (for debug and trace style logs) and up from 1 (for chattier
+info-type logs). For reference, slog pre-defines -4 for debug logs
+(corresponds to 4 in logr), which matches what is
+[recommended for Kubernetes](https://github.com/kubernetes/community/blob/master/contributors/devel/sig-instrumentation/logging.md#what-method-to-use).
+
+#### How do I choose my keys?
+
+Keys are fairly flexible, and can hold more or less any string
+value. For best compatibility with implementations and consistency
+with existing code in other projects, there are a few conventions you
+should consider.
+
+- Make your keys human-readable.
+- Constant keys are generally a good idea.
+- Be consistent across your codebase.
+- Keys should naturally match parts of the message string.
+- Use lower case for simple keys and
+ [lowerCamelCase](https://en.wiktionary.org/wiki/lowerCamelCase) for
+ more complex ones. Kubernetes is one example of a project that has
+ [adopted that
+ convention](https://github.com/kubernetes/community/blob/HEAD/contributors/devel/sig-instrumentation/migration-to-structured-logging.md#name-arguments).
+
+While key names are mostly unrestricted (and spaces are acceptable),
+it's generally a good idea to stick to printable ascii characters, or at
+least match the general character set of your log lines.
+
+#### Why should keys be constant values?
+
+The point of structured logging is to make later log processing easier. Your
+keys are, effectively, the schema of each log message. If you use different
+keys across instances of the same log line, you will make your structured logs
+much harder to use. `Sprintf()` is for values, not for keys!
+
+#### Why is this not a pure interface?
+
+The Logger type is implemented as a struct in order to allow the Go compiler to
+optimize things like high-V `Info` logs that are not triggered. Not all of
+these implementations are implemented yet, but this structure was suggested as
+a way to ensure they *can* be implemented. All of the real work is behind the
+`LogSink` interface.
+
+[warning-makes-no-sense]: http://dave.cheney.net/2015/11/05/lets-talk-about-logging
diff --git a/integration/assets/hydrabroker/vendor/github.com/go-logr/logr/SECURITY.md b/integration/assets/hydrabroker/vendor/github.com/go-logr/logr/SECURITY.md
new file mode 100644
index 00000000000..1ca756fc7b3
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/go-logr/logr/SECURITY.md
@@ -0,0 +1,18 @@
+# Security Policy
+
+If you have discovered a security vulnerability in this project, please report it
+privately. **Do not disclose it as a public issue.** This gives us time to work with you
+to fix the issue before public exposure, reducing the chance that the exploit will be
+used before a patch is released.
+
+You may submit the report in the following ways:
+
+- send an email to go-logr-security@googlegroups.com
+- send us a [private vulnerability report](https://github.com/go-logr/logr/security/advisories/new)
+
+Please provide the following information in your report:
+
+- A description of the vulnerability and its impact
+- How to reproduce the issue
+
+We ask that you give us 90 days to work on a fix before public exposure.
diff --git a/integration/assets/hydrabroker/vendor/github.com/go-logr/logr/context.go b/integration/assets/hydrabroker/vendor/github.com/go-logr/logr/context.go
new file mode 100644
index 00000000000..de8bcc3ad89
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/go-logr/logr/context.go
@@ -0,0 +1,33 @@
+/*
+Copyright 2023 The logr Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package logr
+
+// contextKey is how we find Loggers in a context.Context. With Go < 1.21,
+// the value is always a Logger value. With Go >= 1.21, the value can be a
+// Logger value or a slog.Logger pointer.
+type contextKey struct{}
+
+// notFoundError exists to carry an IsNotFound method.
+type notFoundError struct{}
+
+func (notFoundError) Error() string {
+ return "no logr.Logger was present"
+}
+
+func (notFoundError) IsNotFound() bool {
+ return true
+}
diff --git a/integration/assets/hydrabroker/vendor/github.com/go-logr/logr/context_noslog.go b/integration/assets/hydrabroker/vendor/github.com/go-logr/logr/context_noslog.go
new file mode 100644
index 00000000000..f012f9a18e8
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/go-logr/logr/context_noslog.go
@@ -0,0 +1,49 @@
+//go:build !go1.21
+// +build !go1.21
+
+/*
+Copyright 2019 The logr Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package logr
+
+import (
+ "context"
+)
+
+// FromContext returns a Logger from ctx or an error if no Logger is found.
+func FromContext(ctx context.Context) (Logger, error) {
+ if v, ok := ctx.Value(contextKey{}).(Logger); ok {
+ return v, nil
+ }
+
+ return Logger{}, notFoundError{}
+}
+
+// FromContextOrDiscard returns a Logger from ctx. If no Logger is found, this
+// returns a Logger that discards all log messages.
+func FromContextOrDiscard(ctx context.Context) Logger {
+ if v, ok := ctx.Value(contextKey{}).(Logger); ok {
+ return v
+ }
+
+ return Discard()
+}
+
+// NewContext returns a new Context, derived from ctx, which carries the
+// provided Logger.
+func NewContext(ctx context.Context, logger Logger) context.Context {
+ return context.WithValue(ctx, contextKey{}, logger)
+}
diff --git a/integration/assets/hydrabroker/vendor/github.com/go-logr/logr/context_slog.go b/integration/assets/hydrabroker/vendor/github.com/go-logr/logr/context_slog.go
new file mode 100644
index 00000000000..065ef0b8280
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/go-logr/logr/context_slog.go
@@ -0,0 +1,83 @@
+//go:build go1.21
+// +build go1.21
+
+/*
+Copyright 2019 The logr Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package logr
+
+import (
+ "context"
+ "fmt"
+ "log/slog"
+)
+
+// FromContext returns a Logger from ctx or an error if no Logger is found.
+func FromContext(ctx context.Context) (Logger, error) {
+ v := ctx.Value(contextKey{})
+ if v == nil {
+ return Logger{}, notFoundError{}
+ }
+
+ switch v := v.(type) {
+ case Logger:
+ return v, nil
+ case *slog.Logger:
+ return FromSlogHandler(v.Handler()), nil
+ default:
+ // Not reached.
+ panic(fmt.Sprintf("unexpected value type for logr context key: %T", v))
+ }
+}
+
+// FromContextAsSlogLogger returns a slog.Logger from ctx or nil if no such Logger is found.
+func FromContextAsSlogLogger(ctx context.Context) *slog.Logger {
+ v := ctx.Value(contextKey{})
+ if v == nil {
+ return nil
+ }
+
+ switch v := v.(type) {
+ case Logger:
+ return slog.New(ToSlogHandler(v))
+ case *slog.Logger:
+ return v
+ default:
+ // Not reached.
+ panic(fmt.Sprintf("unexpected value type for logr context key: %T", v))
+ }
+}
+
+// FromContextOrDiscard returns a Logger from ctx. If no Logger is found, this
+// returns a Logger that discards all log messages.
+func FromContextOrDiscard(ctx context.Context) Logger {
+ if logger, err := FromContext(ctx); err == nil {
+ return logger
+ }
+ return Discard()
+}
+
+// NewContext returns a new Context, derived from ctx, which carries the
+// provided Logger.
+func NewContext(ctx context.Context, logger Logger) context.Context {
+ return context.WithValue(ctx, contextKey{}, logger)
+}
+
+// NewContextWithSlogLogger returns a new Context, derived from ctx, which carries the
+// provided slog.Logger.
+func NewContextWithSlogLogger(ctx context.Context, logger *slog.Logger) context.Context {
+ return context.WithValue(ctx, contextKey{}, logger)
+}
diff --git a/integration/assets/hydrabroker/vendor/github.com/go-logr/logr/discard.go b/integration/assets/hydrabroker/vendor/github.com/go-logr/logr/discard.go
new file mode 100644
index 00000000000..99fe8be93c1
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/go-logr/logr/discard.go
@@ -0,0 +1,24 @@
+/*
+Copyright 2020 The logr Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package logr
+
+// Discard returns a Logger that discards all messages logged to it. It can be
+// used whenever the caller is not interested in the logs. Logger instances
+// produced by this function always compare as equal.
+func Discard() Logger {
+ return New(nil)
+}
diff --git a/integration/assets/hydrabroker/vendor/github.com/go-logr/logr/funcr/funcr.go b/integration/assets/hydrabroker/vendor/github.com/go-logr/logr/funcr/funcr.go
new file mode 100644
index 00000000000..fb2f866f4b7
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/go-logr/logr/funcr/funcr.go
@@ -0,0 +1,911 @@
+/*
+Copyright 2021 The logr Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package funcr implements formatting of structured log messages and
+// optionally captures the call site and timestamp.
+//
+// The simplest way to use it is via its implementation of a
+// github.com/go-logr/logr.LogSink with output through an arbitrary
+// "write" function. See New and NewJSON for details.
+//
+// # Custom LogSinks
+//
+// For users who need more control, a funcr.Formatter can be embedded inside
+// your own custom LogSink implementation. This is useful when the LogSink
+// needs to implement additional methods, for example.
+//
+// # Formatting
+//
+// This will respect logr.Marshaler, fmt.Stringer, and error interfaces for
+// values which are being logged. When rendering a struct, funcr will use Go's
+// standard JSON tags (all except "string").
+package funcr
+
+import (
+ "bytes"
+ "encoding"
+ "encoding/json"
+ "fmt"
+ "path/filepath"
+ "reflect"
+ "runtime"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/go-logr/logr"
+)
+
+// New returns a logr.Logger which is implemented by an arbitrary function.
+func New(fn func(prefix, args string), opts Options) logr.Logger {
+ return logr.New(newSink(fn, NewFormatter(opts)))
+}
+
+// NewJSON returns a logr.Logger which is implemented by an arbitrary function
+// and produces JSON output.
+func NewJSON(fn func(obj string), opts Options) logr.Logger {
+ fnWrapper := func(_, obj string) {
+ fn(obj)
+ }
+ return logr.New(newSink(fnWrapper, NewFormatterJSON(opts)))
+}
+
+// Underlier exposes access to the underlying logging function. Since
+// callers only have a logr.Logger, they have to know which
+// implementation is in use, so this interface is less of an
+// abstraction and more of a way to test type conversion.
+type Underlier interface {
+ GetUnderlying() func(prefix, args string)
+}
+
+func newSink(fn func(prefix, args string), formatter Formatter) logr.LogSink {
+ l := &fnlogger{
+ Formatter: formatter,
+ write: fn,
+ }
+ // For skipping fnlogger.Info and fnlogger.Error.
+ l.Formatter.AddCallDepth(1)
+ return l
+}
+
+// Options carries parameters which influence the way logs are generated.
+type Options struct {
+ // LogCaller tells funcr to add a "caller" key to some or all log lines.
+ // This has some overhead, so some users might not want it.
+ LogCaller MessageClass
+
+ // LogCallerFunc tells funcr to also log the calling function name. This
+ // has no effect if caller logging is not enabled (see Options.LogCaller).
+ LogCallerFunc bool
+
+ // LogTimestamp tells funcr to add a "ts" key to log lines. This has some
+ // overhead, so some users might not want it.
+ LogTimestamp bool
+
+ // TimestampFormat tells funcr how to render timestamps when LogTimestamp
+ // is enabled. If not specified, a default format will be used. For more
+ // details, see docs for Go's time.Layout.
+ TimestampFormat string
+
+ // LogInfoLevel tells funcr what key to use to log the info level.
+ // If not specified, the info level will be logged as "level".
+ // If this is set to "", the info level will not be logged at all.
+ LogInfoLevel *string
+
+ // Verbosity tells funcr which V logs to produce. Higher values enable
+ // more logs. Info logs at or below this level will be written, while logs
+ // above this level will be discarded.
+ Verbosity int
+
+ // RenderBuiltinsHook allows users to mutate the list of key-value pairs
+ // while a log line is being rendered. The kvList argument follows logr
+ // conventions - each pair of slice elements is comprised of a string key
+ // and an arbitrary value (verified and sanitized before calling this
+ // hook). The value returned must follow the same conventions. This hook
+ // can be used to audit or modify logged data. For example, you might want
+ // to prefix all of funcr's built-in keys with some string. This hook is
+ // only called for built-in (provided by funcr itself) key-value pairs.
+ // Equivalent hooks are offered for key-value pairs saved via
+ // logr.Logger.WithValues or Formatter.AddValues (see RenderValuesHook) and
+ // for user-provided pairs (see RenderArgsHook).
+ RenderBuiltinsHook func(kvList []any) []any
+
+ // RenderValuesHook is the same as RenderBuiltinsHook, except that it is
+ // only called for key-value pairs saved via logr.Logger.WithValues. See
+ // RenderBuiltinsHook for more details.
+ RenderValuesHook func(kvList []any) []any
+
+ // RenderArgsHook is the same as RenderBuiltinsHook, except that it is only
+ // called for key-value pairs passed directly to Info and Error. See
+ // RenderBuiltinsHook for more details.
+ RenderArgsHook func(kvList []any) []any
+
+ // MaxLogDepth tells funcr how many levels of nested fields (e.g. a struct
+ // that contains a struct, etc.) it may log. Every time it finds a struct,
+ // slice, array, or map the depth is increased by one. When the maximum is
+ // reached, the value will be converted to a string indicating that the max
+ // depth has been exceeded. If this field is not specified, a default
+ // value will be used.
+ MaxLogDepth int
+}
+
+// MessageClass indicates which category or categories of messages to consider.
+type MessageClass int
+
+const (
+ // None ignores all message classes.
+ None MessageClass = iota
+ // All considers all message classes.
+ All
+ // Info only considers info messages.
+ Info
+ // Error only considers error messages.
+ Error
+)
+
+// fnlogger inherits some of its LogSink implementation from Formatter
+// and just needs to add some glue code.
+type fnlogger struct {
+ Formatter
+ write func(prefix, args string)
+}
+
+func (l fnlogger) WithName(name string) logr.LogSink {
+ l.Formatter.AddName(name)
+ return &l
+}
+
+func (l fnlogger) WithValues(kvList ...any) logr.LogSink {
+ l.Formatter.AddValues(kvList)
+ return &l
+}
+
+func (l fnlogger) WithCallDepth(depth int) logr.LogSink {
+ l.Formatter.AddCallDepth(depth)
+ return &l
+}
+
+func (l fnlogger) Info(level int, msg string, kvList ...any) {
+ prefix, args := l.FormatInfo(level, msg, kvList)
+ l.write(prefix, args)
+}
+
+func (l fnlogger) Error(err error, msg string, kvList ...any) {
+ prefix, args := l.FormatError(err, msg, kvList)
+ l.write(prefix, args)
+}
+
+func (l fnlogger) GetUnderlying() func(prefix, args string) {
+ return l.write
+}
+
+// Assert conformance to the interfaces.
+var _ logr.LogSink = &fnlogger{}
+var _ logr.CallDepthLogSink = &fnlogger{}
+var _ Underlier = &fnlogger{}
+
+// NewFormatter constructs a Formatter which emits a JSON-like key=value format.
+func NewFormatter(opts Options) Formatter {
+ return newFormatter(opts, outputKeyValue)
+}
+
+// NewFormatterJSON constructs a Formatter which emits strict JSON.
+func NewFormatterJSON(opts Options) Formatter {
+ return newFormatter(opts, outputJSON)
+}
+
+// Defaults for Options.
+const defaultTimestampFormat = "2006-01-02 15:04:05.000000"
+const defaultMaxLogDepth = 16
+
+func newFormatter(opts Options, outfmt outputFormat) Formatter {
+ if opts.TimestampFormat == "" {
+ opts.TimestampFormat = defaultTimestampFormat
+ }
+ if opts.MaxLogDepth == 0 {
+ opts.MaxLogDepth = defaultMaxLogDepth
+ }
+ if opts.LogInfoLevel == nil {
+ opts.LogInfoLevel = new(string)
+ *opts.LogInfoLevel = "level"
+ }
+ f := Formatter{
+ outputFormat: outfmt,
+ prefix: "",
+ values: nil,
+ depth: 0,
+ opts: &opts,
+ }
+ return f
+}
+
+// Formatter is an opaque struct which can be embedded in a LogSink
+// implementation. It should be constructed with NewFormatter. Some of
+// its methods directly implement logr.LogSink.
+type Formatter struct {
+ outputFormat outputFormat
+ prefix string
+ values []any
+ valuesStr string
+ parentValuesStr string
+ depth int
+ opts *Options
+ group string // for slog groups
+ groupDepth int
+}
+
+// outputFormat indicates which outputFormat to use.
+type outputFormat int
+
+const (
+ // outputKeyValue emits a JSON-like key=value format, but not strict JSON.
+ outputKeyValue outputFormat = iota
+ // outputJSON emits strict JSON.
+ outputJSON
+)
+
+// PseudoStruct is a list of key-value pairs that gets logged as a struct.
+type PseudoStruct []any
+
+// render produces a log line, ready to use.
+func (f Formatter) render(builtins, args []any) string {
+ // Empirically bytes.Buffer is faster than strings.Builder for this.
+ buf := bytes.NewBuffer(make([]byte, 0, 1024))
+ if f.outputFormat == outputJSON {
+ buf.WriteByte('{') // for the whole line
+ }
+
+ vals := builtins
+ if hook := f.opts.RenderBuiltinsHook; hook != nil {
+ vals = hook(f.sanitize(vals))
+ }
+ f.flatten(buf, vals, false, false) // keys are ours, no need to escape
+ continuing := len(builtins) > 0
+
+ if f.parentValuesStr != "" {
+ if continuing {
+ buf.WriteByte(f.comma())
+ }
+ buf.WriteString(f.parentValuesStr)
+ continuing = true
+ }
+
+ groupDepth := f.groupDepth
+ if f.group != "" {
+ if f.valuesStr != "" || len(args) != 0 {
+ if continuing {
+ buf.WriteByte(f.comma())
+ }
+ buf.WriteString(f.quoted(f.group, true)) // escape user-provided keys
+ buf.WriteByte(f.colon())
+ buf.WriteByte('{') // for the group
+ continuing = false
+ } else {
+ // The group was empty
+ groupDepth--
+ }
+ }
+
+ if f.valuesStr != "" {
+ if continuing {
+ buf.WriteByte(f.comma())
+ }
+ buf.WriteString(f.valuesStr)
+ continuing = true
+ }
+
+ vals = args
+ if hook := f.opts.RenderArgsHook; hook != nil {
+ vals = hook(f.sanitize(vals))
+ }
+ f.flatten(buf, vals, continuing, true) // escape user-provided keys
+
+ for i := 0; i < groupDepth; i++ {
+ buf.WriteByte('}') // for the groups
+ }
+
+ if f.outputFormat == outputJSON {
+ buf.WriteByte('}') // for the whole line
+ }
+
+ return buf.String()
+}
+
+// flatten renders a list of key-value pairs into a buffer. If continuing is
+// true, it assumes that the buffer has previous values and will emit a
+// separator (which depends on the output format) before the first pair it
+// writes. If escapeKeys is true, the keys are assumed to have
+// non-JSON-compatible characters in them and must be evaluated for escapes.
+//
+// This function returns a potentially modified version of kvList, which
+// ensures that there is a value for every key (adding a value if needed) and
+// that each key is a string (substituting a key if needed).
+func (f Formatter) flatten(buf *bytes.Buffer, kvList []any, continuing bool, escapeKeys bool) []any {
+ // This logic overlaps with sanitize() but saves one type-cast per key,
+ // which can be measurable.
+ if len(kvList)%2 != 0 {
+ kvList = append(kvList, noValue)
+ }
+ copied := false
+ for i := 0; i < len(kvList); i += 2 {
+ k, ok := kvList[i].(string)
+ if !ok {
+ if !copied {
+ newList := make([]any, len(kvList))
+ copy(newList, kvList)
+ kvList = newList
+ copied = true
+ }
+ k = f.nonStringKey(kvList[i])
+ kvList[i] = k
+ }
+ v := kvList[i+1]
+
+ if i > 0 || continuing {
+ if f.outputFormat == outputJSON {
+ buf.WriteByte(f.comma())
+ } else {
+ // In theory the format could be something we don't understand. In
+ // practice, we control it, so it won't be.
+ buf.WriteByte(' ')
+ }
+ }
+
+ buf.WriteString(f.quoted(k, escapeKeys))
+ buf.WriteByte(f.colon())
+ buf.WriteString(f.pretty(v))
+ }
+ return kvList
+}
+
+func (f Formatter) quoted(str string, escape bool) string {
+ if escape {
+ return prettyString(str)
+ }
+ // this is faster
+ return `"` + str + `"`
+}
+
+func (f Formatter) comma() byte {
+ if f.outputFormat == outputJSON {
+ return ','
+ }
+ return ' '
+}
+
+func (f Formatter) colon() byte {
+ if f.outputFormat == outputJSON {
+ return ':'
+ }
+ return '='
+}
+
+func (f Formatter) pretty(value any) string {
+ return f.prettyWithFlags(value, 0, 0)
+}
+
+const (
+ flagRawStruct = 0x1 // do not print braces on structs
+)
+
+// TODO: This is not fast. Most of the overhead goes here.
+func (f Formatter) prettyWithFlags(value any, flags uint32, depth int) string {
+ if depth > f.opts.MaxLogDepth {
+ return `""`
+ }
+
+ // Handle types that take full control of logging.
+ if v, ok := value.(logr.Marshaler); ok {
+ // Replace the value with what the type wants to get logged.
+ // That then gets handled below via reflection.
+ value = invokeMarshaler(v)
+ }
+
+ // Handle types that want to format themselves.
+ switch v := value.(type) {
+ case fmt.Stringer:
+ value = invokeStringer(v)
+ case error:
+ value = invokeError(v)
+ }
+
+ // Handling the most common types without reflect is a small perf win.
+ switch v := value.(type) {
+ case bool:
+ return strconv.FormatBool(v)
+ case string:
+ return prettyString(v)
+ case int:
+ return strconv.FormatInt(int64(v), 10)
+ case int8:
+ return strconv.FormatInt(int64(v), 10)
+ case int16:
+ return strconv.FormatInt(int64(v), 10)
+ case int32:
+ return strconv.FormatInt(int64(v), 10)
+ case int64:
+ return strconv.FormatInt(int64(v), 10)
+ case uint:
+ return strconv.FormatUint(uint64(v), 10)
+ case uint8:
+ return strconv.FormatUint(uint64(v), 10)
+ case uint16:
+ return strconv.FormatUint(uint64(v), 10)
+ case uint32:
+ return strconv.FormatUint(uint64(v), 10)
+ case uint64:
+ return strconv.FormatUint(v, 10)
+ case uintptr:
+ return strconv.FormatUint(uint64(v), 10)
+ case float32:
+ return strconv.FormatFloat(float64(v), 'f', -1, 32)
+ case float64:
+ return strconv.FormatFloat(v, 'f', -1, 64)
+ case complex64:
+ return `"` + strconv.FormatComplex(complex128(v), 'f', -1, 64) + `"`
+ case complex128:
+ return `"` + strconv.FormatComplex(v, 'f', -1, 128) + `"`
+ case PseudoStruct:
+ buf := bytes.NewBuffer(make([]byte, 0, 1024))
+ v = f.sanitize(v)
+ if flags&flagRawStruct == 0 {
+ buf.WriteByte('{')
+ }
+ for i := 0; i < len(v); i += 2 {
+ if i > 0 {
+ buf.WriteByte(f.comma())
+ }
+ k, _ := v[i].(string) // sanitize() above means no need to check success
+ // arbitrary keys might need escaping
+ buf.WriteString(prettyString(k))
+ buf.WriteByte(f.colon())
+ buf.WriteString(f.prettyWithFlags(v[i+1], 0, depth+1))
+ }
+ if flags&flagRawStruct == 0 {
+ buf.WriteByte('}')
+ }
+ return buf.String()
+ }
+
+ buf := bytes.NewBuffer(make([]byte, 0, 256))
+ t := reflect.TypeOf(value)
+ if t == nil {
+ return "null"
+ }
+ v := reflect.ValueOf(value)
+ switch t.Kind() {
+ case reflect.Bool:
+ return strconv.FormatBool(v.Bool())
+ case reflect.String:
+ return prettyString(v.String())
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return strconv.FormatInt(int64(v.Int()), 10)
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return strconv.FormatUint(uint64(v.Uint()), 10)
+ case reflect.Float32:
+ return strconv.FormatFloat(float64(v.Float()), 'f', -1, 32)
+ case reflect.Float64:
+ return strconv.FormatFloat(v.Float(), 'f', -1, 64)
+ case reflect.Complex64:
+ return `"` + strconv.FormatComplex(complex128(v.Complex()), 'f', -1, 64) + `"`
+ case reflect.Complex128:
+ return `"` + strconv.FormatComplex(v.Complex(), 'f', -1, 128) + `"`
+ case reflect.Struct:
+ if flags&flagRawStruct == 0 {
+ buf.WriteByte('{')
+ }
+ printComma := false // testing i>0 is not enough because of JSON omitted fields
+ for i := 0; i < t.NumField(); i++ {
+ fld := t.Field(i)
+ if fld.PkgPath != "" {
+ // reflect says this field is only defined for non-exported fields.
+ continue
+ }
+ if !v.Field(i).CanInterface() {
+ // reflect isn't clear exactly what this means, but we can't use it.
+ continue
+ }
+ name := ""
+ omitempty := false
+ if tag, found := fld.Tag.Lookup("json"); found {
+ if tag == "-" {
+ continue
+ }
+ if comma := strings.Index(tag, ","); comma != -1 {
+ if n := tag[:comma]; n != "" {
+ name = n
+ }
+ rest := tag[comma:]
+ if strings.Contains(rest, ",omitempty,") || strings.HasSuffix(rest, ",omitempty") {
+ omitempty = true
+ }
+ } else {
+ name = tag
+ }
+ }
+ if omitempty && isEmpty(v.Field(i)) {
+ continue
+ }
+ if printComma {
+ buf.WriteByte(f.comma())
+ }
+ printComma = true // if we got here, we are rendering a field
+ if fld.Anonymous && fld.Type.Kind() == reflect.Struct && name == "" {
+ buf.WriteString(f.prettyWithFlags(v.Field(i).Interface(), flags|flagRawStruct, depth+1))
+ continue
+ }
+ if name == "" {
+ name = fld.Name
+ }
+ // field names can't contain characters which need escaping
+ buf.WriteString(f.quoted(name, false))
+ buf.WriteByte(f.colon())
+ buf.WriteString(f.prettyWithFlags(v.Field(i).Interface(), 0, depth+1))
+ }
+ if flags&flagRawStruct == 0 {
+ buf.WriteByte('}')
+ }
+ return buf.String()
+ case reflect.Slice, reflect.Array:
+ // If this is outputing as JSON make sure this isn't really a json.RawMessage.
+ // If so just emit "as-is" and don't pretty it as that will just print
+ // it as [X,Y,Z,...] which isn't terribly useful vs the string form you really want.
+ if f.outputFormat == outputJSON {
+ if rm, ok := value.(json.RawMessage); ok {
+ // If it's empty make sure we emit an empty value as the array style would below.
+ if len(rm) > 0 {
+ buf.Write(rm)
+ } else {
+ buf.WriteString("null")
+ }
+ return buf.String()
+ }
+ }
+ buf.WriteByte('[')
+ for i := 0; i < v.Len(); i++ {
+ if i > 0 {
+ buf.WriteByte(f.comma())
+ }
+ e := v.Index(i)
+ buf.WriteString(f.prettyWithFlags(e.Interface(), 0, depth+1))
+ }
+ buf.WriteByte(']')
+ return buf.String()
+ case reflect.Map:
+ buf.WriteByte('{')
+ // This does not sort the map keys, for best perf.
+ it := v.MapRange()
+ i := 0
+ for it.Next() {
+ if i > 0 {
+ buf.WriteByte(f.comma())
+ }
+ // If a map key supports TextMarshaler, use it.
+ keystr := ""
+ if m, ok := it.Key().Interface().(encoding.TextMarshaler); ok {
+ txt, err := m.MarshalText()
+ if err != nil {
+ keystr = fmt.Sprintf("", err.Error())
+ } else {
+ keystr = string(txt)
+ }
+ keystr = prettyString(keystr)
+ } else {
+ // prettyWithFlags will produce already-escaped values
+ keystr = f.prettyWithFlags(it.Key().Interface(), 0, depth+1)
+ if t.Key().Kind() != reflect.String {
+ // JSON only does string keys. Unlike Go's standard JSON, we'll
+ // convert just about anything to a string.
+ keystr = prettyString(keystr)
+ }
+ }
+ buf.WriteString(keystr)
+ buf.WriteByte(f.colon())
+ buf.WriteString(f.prettyWithFlags(it.Value().Interface(), 0, depth+1))
+ i++
+ }
+ buf.WriteByte('}')
+ return buf.String()
+ case reflect.Ptr, reflect.Interface:
+ if v.IsNil() {
+ return "null"
+ }
+ return f.prettyWithFlags(v.Elem().Interface(), 0, depth)
+ }
+ return fmt.Sprintf(`""`, t.Kind().String())
+}
+
+func prettyString(s string) string {
+ // Avoid escaping (which does allocations) if we can.
+ if needsEscape(s) {
+ return strconv.Quote(s)
+ }
+ b := bytes.NewBuffer(make([]byte, 0, 1024))
+ b.WriteByte('"')
+ b.WriteString(s)
+ b.WriteByte('"')
+ return b.String()
+}
+
+// needsEscape determines whether the input string needs to be escaped or not,
+// without doing any allocations.
+func needsEscape(s string) bool {
+ for _, r := range s {
+ if !strconv.IsPrint(r) || r == '\\' || r == '"' {
+ return true
+ }
+ }
+ return false
+}
+
+func isEmpty(v reflect.Value) bool {
+ switch v.Kind() {
+ case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
+ return v.Len() == 0
+ case reflect.Bool:
+ return !v.Bool()
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return v.Int() == 0
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return v.Uint() == 0
+ case reflect.Float32, reflect.Float64:
+ return v.Float() == 0
+ case reflect.Complex64, reflect.Complex128:
+ return v.Complex() == 0
+ case reflect.Interface, reflect.Ptr:
+ return v.IsNil()
+ }
+ return false
+}
+
+func invokeMarshaler(m logr.Marshaler) (ret any) {
+ defer func() {
+ if r := recover(); r != nil {
+ ret = fmt.Sprintf("", r)
+ }
+ }()
+ return m.MarshalLog()
+}
+
+func invokeStringer(s fmt.Stringer) (ret string) {
+ defer func() {
+ if r := recover(); r != nil {
+ ret = fmt.Sprintf("", r)
+ }
+ }()
+ return s.String()
+}
+
+func invokeError(e error) (ret string) {
+ defer func() {
+ if r := recover(); r != nil {
+ ret = fmt.Sprintf("", r)
+ }
+ }()
+ return e.Error()
+}
+
+// Caller represents the original call site for a log line, after considering
+// logr.Logger.WithCallDepth and logr.Logger.WithCallStackHelper. The File and
+// Line fields will always be provided, while the Func field is optional.
+// Users can set the render hook fields in Options to examine logged key-value
+// pairs, one of which will be {"caller", Caller} if the Options.LogCaller
+// field is enabled for the given MessageClass.
+type Caller struct {
+ // File is the basename of the file for this call site.
+ File string `json:"file"`
+ // Line is the line number in the file for this call site.
+ Line int `json:"line"`
+ // Func is the function name for this call site, or empty if
+ // Options.LogCallerFunc is not enabled.
+ Func string `json:"function,omitempty"`
+}
+
+func (f Formatter) caller() Caller {
+ // +1 for this frame, +1 for Info/Error.
+ pc, file, line, ok := runtime.Caller(f.depth + 2)
+ if !ok {
+ return Caller{"", 0, ""}
+ }
+ fn := ""
+ if f.opts.LogCallerFunc {
+ if fp := runtime.FuncForPC(pc); fp != nil {
+ fn = fp.Name()
+ }
+ }
+
+ return Caller{filepath.Base(file), line, fn}
+}
+
+const noValue = ""
+
+func (f Formatter) nonStringKey(v any) string {
+ return fmt.Sprintf("", f.snippet(v))
+}
+
+// snippet produces a short snippet string of an arbitrary value.
+func (f Formatter) snippet(v any) string {
+ const snipLen = 16
+
+ snip := f.pretty(v)
+ if len(snip) > snipLen {
+ snip = snip[:snipLen]
+ }
+ return snip
+}
+
+// sanitize ensures that a list of key-value pairs has a value for every key
+// (adding a value if needed) and that each key is a string (substituting a key
+// if needed).
+func (f Formatter) sanitize(kvList []any) []any {
+ if len(kvList)%2 != 0 {
+ kvList = append(kvList, noValue)
+ }
+ for i := 0; i < len(kvList); i += 2 {
+ _, ok := kvList[i].(string)
+ if !ok {
+ kvList[i] = f.nonStringKey(kvList[i])
+ }
+ }
+ return kvList
+}
+
+// startGroup opens a new group scope (basically a sub-struct), which locks all
+// the current saved values and starts them anew. This is needed to satisfy
+// slog.
+func (f *Formatter) startGroup(group string) {
+ // Unnamed groups are just inlined.
+ if group == "" {
+ return
+ }
+
+ // Any saved values can no longer be changed.
+ buf := bytes.NewBuffer(make([]byte, 0, 1024))
+ continuing := false
+
+ if f.parentValuesStr != "" {
+ buf.WriteString(f.parentValuesStr)
+ continuing = true
+ }
+
+ if f.group != "" && f.valuesStr != "" {
+ if continuing {
+ buf.WriteByte(f.comma())
+ }
+ buf.WriteString(f.quoted(f.group, true)) // escape user-provided keys
+ buf.WriteByte(f.colon())
+ buf.WriteByte('{') // for the group
+ continuing = false
+ }
+
+ if f.valuesStr != "" {
+ if continuing {
+ buf.WriteByte(f.comma())
+ }
+ buf.WriteString(f.valuesStr)
+ }
+
+ // NOTE: We don't close the scope here - that's done later, when a log line
+ // is actually rendered (because we have N scopes to close).
+
+ f.parentValuesStr = buf.String()
+
+ // Start collecting new values.
+ f.group = group
+ f.groupDepth++
+ f.valuesStr = ""
+ f.values = nil
+}
+
+// Init configures this Formatter from runtime info, such as the call depth
+// imposed by logr itself.
+// Note that this receiver is a pointer, so depth can be saved.
+func (f *Formatter) Init(info logr.RuntimeInfo) {
+ f.depth += info.CallDepth
+}
+
+// Enabled checks whether an info message at the given level should be logged.
+func (f Formatter) Enabled(level int) bool {
+ return level <= f.opts.Verbosity
+}
+
+// GetDepth returns the current depth of this Formatter. This is useful for
+// implementations which do their own caller attribution.
+func (f Formatter) GetDepth() int {
+ return f.depth
+}
+
+// FormatInfo renders an Info log message into strings. The prefix will be
+// empty when no names were set (via AddNames), or when the output is
+// configured for JSON.
+func (f Formatter) FormatInfo(level int, msg string, kvList []any) (prefix, argsStr string) {
+ args := make([]any, 0, 64) // using a constant here impacts perf
+ prefix = f.prefix
+ if f.outputFormat == outputJSON {
+ args = append(args, "logger", prefix)
+ prefix = ""
+ }
+ if f.opts.LogTimestamp {
+ args = append(args, "ts", time.Now().Format(f.opts.TimestampFormat))
+ }
+ if policy := f.opts.LogCaller; policy == All || policy == Info {
+ args = append(args, "caller", f.caller())
+ }
+ if key := *f.opts.LogInfoLevel; key != "" {
+ args = append(args, key, level)
+ }
+ args = append(args, "msg", msg)
+ return prefix, f.render(args, kvList)
+}
+
+// FormatError renders an Error log message into strings. The prefix will be
+// empty when no names were set (via AddNames), or when the output is
+// configured for JSON.
+func (f Formatter) FormatError(err error, msg string, kvList []any) (prefix, argsStr string) {
+ args := make([]any, 0, 64) // using a constant here impacts perf
+ prefix = f.prefix
+ if f.outputFormat == outputJSON {
+ args = append(args, "logger", prefix)
+ prefix = ""
+ }
+ if f.opts.LogTimestamp {
+ args = append(args, "ts", time.Now().Format(f.opts.TimestampFormat))
+ }
+ if policy := f.opts.LogCaller; policy == All || policy == Error {
+ args = append(args, "caller", f.caller())
+ }
+ args = append(args, "msg", msg)
+ var loggableErr any
+ if err != nil {
+ loggableErr = err.Error()
+ }
+ args = append(args, "error", loggableErr)
+ return prefix, f.render(args, kvList)
+}
+
+// AddName appends the specified name. funcr uses '/' characters to separate
+// name elements. Callers should not pass '/' in the provided name string, but
+// this library does not actually enforce that.
+func (f *Formatter) AddName(name string) {
+ if len(f.prefix) > 0 {
+ f.prefix += "/"
+ }
+ f.prefix += name
+}
+
+// AddValues adds key-value pairs to the set of saved values to be logged with
+// each log line.
+func (f *Formatter) AddValues(kvList []any) {
+ // Three slice args forces a copy.
+ n := len(f.values)
+ f.values = append(f.values[:n:n], kvList...)
+
+ vals := f.values
+ if hook := f.opts.RenderValuesHook; hook != nil {
+ vals = hook(f.sanitize(vals))
+ }
+
+ // Pre-render values, so we don't have to do it on each Info/Error call.
+ buf := bytes.NewBuffer(make([]byte, 0, 1024))
+ f.flatten(buf, vals, false, true) // escape user-provided keys
+ f.valuesStr = buf.String()
+}
+
+// AddCallDepth increases the number of stack-frames to skip when attributing
+// the log line to a file and line.
+func (f *Formatter) AddCallDepth(depth int) {
+ f.depth += depth
+}
diff --git a/integration/assets/hydrabroker/vendor/github.com/go-logr/logr/funcr/slogsink.go b/integration/assets/hydrabroker/vendor/github.com/go-logr/logr/funcr/slogsink.go
new file mode 100644
index 00000000000..7bd84761e2d
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/go-logr/logr/funcr/slogsink.go
@@ -0,0 +1,105 @@
+//go:build go1.21
+// +build go1.21
+
+/*
+Copyright 2023 The logr Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package funcr
+
+import (
+ "context"
+ "log/slog"
+
+ "github.com/go-logr/logr"
+)
+
+var _ logr.SlogSink = &fnlogger{}
+
+const extraSlogSinkDepth = 3 // 2 for slog, 1 for SlogSink
+
+func (l fnlogger) Handle(_ context.Context, record slog.Record) error {
+ kvList := make([]any, 0, 2*record.NumAttrs())
+ record.Attrs(func(attr slog.Attr) bool {
+ kvList = attrToKVs(attr, kvList)
+ return true
+ })
+
+ if record.Level >= slog.LevelError {
+ l.WithCallDepth(extraSlogSinkDepth).Error(nil, record.Message, kvList...)
+ } else {
+ level := l.levelFromSlog(record.Level)
+ l.WithCallDepth(extraSlogSinkDepth).Info(level, record.Message, kvList...)
+ }
+ return nil
+}
+
+func (l fnlogger) WithAttrs(attrs []slog.Attr) logr.SlogSink {
+ kvList := make([]any, 0, 2*len(attrs))
+ for _, attr := range attrs {
+ kvList = attrToKVs(attr, kvList)
+ }
+ l.AddValues(kvList)
+ return &l
+}
+
+func (l fnlogger) WithGroup(name string) logr.SlogSink {
+ l.startGroup(name)
+ return &l
+}
+
+// attrToKVs appends a slog.Attr to a logr-style kvList. It handle slog Groups
+// and other details of slog.
+func attrToKVs(attr slog.Attr, kvList []any) []any {
+ attrVal := attr.Value.Resolve()
+ if attrVal.Kind() == slog.KindGroup {
+ groupVal := attrVal.Group()
+ grpKVs := make([]any, 0, 2*len(groupVal))
+ for _, attr := range groupVal {
+ grpKVs = attrToKVs(attr, grpKVs)
+ }
+ if attr.Key == "" {
+ // slog says we have to inline these
+ kvList = append(kvList, grpKVs...)
+ } else {
+ kvList = append(kvList, attr.Key, PseudoStruct(grpKVs))
+ }
+ } else if attr.Key != "" {
+ kvList = append(kvList, attr.Key, attrVal.Any())
+ }
+
+ return kvList
+}
+
+// levelFromSlog adjusts the level by the logger's verbosity and negates it.
+// It ensures that the result is >= 0. This is necessary because the result is
+// passed to a LogSink and that API did not historically document whether
+// levels could be negative or what that meant.
+//
+// Some example usage:
+//
+// logrV0 := getMyLogger()
+// logrV2 := logrV0.V(2)
+// slogV2 := slog.New(logr.ToSlogHandler(logrV2))
+// slogV2.Debug("msg") // =~ logrV2.V(4) =~ logrV0.V(6)
+// slogV2.Info("msg") // =~ logrV2.V(0) =~ logrV0.V(2)
+// slogv2.Warn("msg") // =~ logrV2.V(-4) =~ logrV0.V(0)
+func (l fnlogger) levelFromSlog(level slog.Level) int {
+ result := -level
+ if result < 0 {
+ result = 0 // because LogSink doesn't expect negative V levels
+ }
+ return int(result)
+}
diff --git a/integration/assets/hydrabroker/vendor/github.com/go-logr/logr/go.mod b/integration/assets/hydrabroker/vendor/github.com/go-logr/logr/go.mod
new file mode 100644
index 00000000000..b4f52a41744
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/go-logr/logr/go.mod
@@ -0,0 +1,3 @@
+module github.com/go-logr/logr
+
+go 1.18
diff --git a/integration/assets/hydrabroker/vendor/github.com/go-logr/logr/logr.go b/integration/assets/hydrabroker/vendor/github.com/go-logr/logr/logr.go
new file mode 100644
index 00000000000..b4428e105b4
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/go-logr/logr/logr.go
@@ -0,0 +1,520 @@
+/*
+Copyright 2019 The logr Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// This design derives from Dave Cheney's blog:
+// http://dave.cheney.net/2015/11/05/lets-talk-about-logging
+
+// Package logr defines a general-purpose logging API and abstract interfaces
+// to back that API. Packages in the Go ecosystem can depend on this package,
+// while callers can implement logging with whatever backend is appropriate.
+//
+// # Usage
+//
+// Logging is done using a Logger instance. Logger is a concrete type with
+// methods, which defers the actual logging to a LogSink interface. The main
+// methods of Logger are Info() and Error(). Arguments to Info() and Error()
+// are key/value pairs rather than printf-style formatted strings, emphasizing
+// "structured logging".
+//
+// With Go's standard log package, we might write:
+//
+// log.Printf("setting target value %s", targetValue)
+//
+// With logr's structured logging, we'd write:
+//
+// logger.Info("setting target", "value", targetValue)
+//
+// Errors are much the same. Instead of:
+//
+// log.Printf("failed to open the pod bay door for user %s: %v", user, err)
+//
+// We'd write:
+//
+// logger.Error(err, "failed to open the pod bay door", "user", user)
+//
+// Info() and Error() are very similar, but they are separate methods so that
+// LogSink implementations can choose to do things like attach additional
+// information (such as stack traces) on calls to Error(). Error() messages are
+// always logged, regardless of the current verbosity. If there is no error
+// instance available, passing nil is valid.
+//
+// # Verbosity
+//
+// Often we want to log information only when the application in "verbose
+// mode". To write log lines that are more verbose, Logger has a V() method.
+// The higher the V-level of a log line, the less critical it is considered.
+// Log-lines with V-levels that are not enabled (as per the LogSink) will not
+// be written. Level V(0) is the default, and logger.V(0).Info() has the same
+// meaning as logger.Info(). Negative V-levels have the same meaning as V(0).
+// Error messages do not have a verbosity level and are always logged.
+//
+// Where we might have written:
+//
+// if flVerbose >= 2 {
+// log.Printf("an unusual thing happened")
+// }
+//
+// We can write:
+//
+// logger.V(2).Info("an unusual thing happened")
+//
+// # Logger Names
+//
+// Logger instances can have name strings so that all messages logged through
+// that instance have additional context. For example, you might want to add
+// a subsystem name:
+//
+// logger.WithName("compactor").Info("started", "time", time.Now())
+//
+// The WithName() method returns a new Logger, which can be passed to
+// constructors or other functions for further use. Repeated use of WithName()
+// will accumulate name "segments". These name segments will be joined in some
+// way by the LogSink implementation. It is strongly recommended that name
+// segments contain simple identifiers (letters, digits, and hyphen), and do
+// not contain characters that could muddle the log output or confuse the
+// joining operation (e.g. whitespace, commas, periods, slashes, brackets,
+// quotes, etc).
+//
+// # Saved Values
+//
+// Logger instances can store any number of key/value pairs, which will be
+// logged alongside all messages logged through that instance. For example,
+// you might want to create a Logger instance per managed object:
+//
+// With the standard log package, we might write:
+//
+// log.Printf("decided to set field foo to value %q for object %s/%s",
+// targetValue, object.Namespace, object.Name)
+//
+// With logr we'd write:
+//
+// // Elsewhere: set up the logger to log the object name.
+// obj.logger = mainLogger.WithValues(
+// "name", obj.name, "namespace", obj.namespace)
+//
+// // later on...
+// obj.logger.Info("setting foo", "value", targetValue)
+//
+// # Best Practices
+//
+// Logger has very few hard rules, with the goal that LogSink implementations
+// might have a lot of freedom to differentiate. There are, however, some
+// things to consider.
+//
+// The log message consists of a constant message attached to the log line.
+// This should generally be a simple description of what's occurring, and should
+// never be a format string. Variable information can then be attached using
+// named values.
+//
+// Keys are arbitrary strings, but should generally be constant values. Values
+// may be any Go value, but how the value is formatted is determined by the
+// LogSink implementation.
+//
+// Logger instances are meant to be passed around by value. Code that receives
+// such a value can call its methods without having to check whether the
+// instance is ready for use.
+//
+// The zero logger (= Logger{}) is identical to Discard() and discards all log
+// entries. Code that receives a Logger by value can simply call it, the methods
+// will never crash. For cases where passing a logger is optional, a pointer to Logger
+// should be used.
+//
+// # Key Naming Conventions
+//
+// Keys are not strictly required to conform to any specification or regex, but
+// it is recommended that they:
+// - be human-readable and meaningful (not auto-generated or simple ordinals)
+// - be constant (not dependent on input data)
+// - contain only printable characters
+// - not contain whitespace or punctuation
+// - use lower case for simple keys and lowerCamelCase for more complex ones
+//
+// These guidelines help ensure that log data is processed properly regardless
+// of the log implementation. For example, log implementations will try to
+// output JSON data or will store data for later database (e.g. SQL) queries.
+//
+// While users are generally free to use key names of their choice, it's
+// generally best to avoid using the following keys, as they're frequently used
+// by implementations:
+// - "caller": the calling information (file/line) of a particular log line
+// - "error": the underlying error value in the `Error` method
+// - "level": the log level
+// - "logger": the name of the associated logger
+// - "msg": the log message
+// - "stacktrace": the stack trace associated with a particular log line or
+// error (often from the `Error` message)
+// - "ts": the timestamp for a log line
+//
+// Implementations are encouraged to make use of these keys to represent the
+// above concepts, when necessary (for example, in a pure-JSON output form, it
+// would be necessary to represent at least message and timestamp as ordinary
+// named values).
+//
+// # Break Glass
+//
+// Implementations may choose to give callers access to the underlying
+// logging implementation. The recommended pattern for this is:
+//
+// // Underlier exposes access to the underlying logging implementation.
+// // Since callers only have a logr.Logger, they have to know which
+// // implementation is in use, so this interface is less of an abstraction
+// // and more of way to test type conversion.
+// type Underlier interface {
+// GetUnderlying()
+// }
+//
+// Logger grants access to the sink to enable type assertions like this:
+//
+// func DoSomethingWithImpl(log logr.Logger) {
+// if underlier, ok := log.GetSink().(impl.Underlier); ok {
+// implLogger := underlier.GetUnderlying()
+// ...
+// }
+// }
+//
+// Custom `With*` functions can be implemented by copying the complete
+// Logger struct and replacing the sink in the copy:
+//
+// // WithFooBar changes the foobar parameter in the log sink and returns a
+// // new logger with that modified sink. It does nothing for loggers where
+// // the sink doesn't support that parameter.
+// func WithFoobar(log logr.Logger, foobar int) logr.Logger {
+// if foobarLogSink, ok := log.GetSink().(FoobarSink); ok {
+// log = log.WithSink(foobarLogSink.WithFooBar(foobar))
+// }
+// return log
+// }
+//
+// Don't use New to construct a new Logger with a LogSink retrieved from an
+// existing Logger. Source code attribution might not work correctly and
+// unexported fields in Logger get lost.
+//
+// Beware that the same LogSink instance may be shared by different logger
+// instances. Calling functions that modify the LogSink will affect all of
+// those.
+package logr
+
+// New returns a new Logger instance. This is primarily used by libraries
+// implementing LogSink, rather than end users. Passing a nil sink will create
+// a Logger which discards all log lines.
+func New(sink LogSink) Logger {
+ logger := Logger{}
+ logger.setSink(sink)
+ if sink != nil {
+ sink.Init(runtimeInfo)
+ }
+ return logger
+}
+
+// setSink stores the sink and updates any related fields. It mutates the
+// logger and thus is only safe to use for loggers that are not currently being
+// used concurrently.
+func (l *Logger) setSink(sink LogSink) {
+ l.sink = sink
+}
+
+// GetSink returns the stored sink.
+func (l Logger) GetSink() LogSink {
+ return l.sink
+}
+
+// WithSink returns a copy of the logger with the new sink.
+func (l Logger) WithSink(sink LogSink) Logger {
+ l.setSink(sink)
+ return l
+}
+
+// Logger is an interface to an abstract logging implementation. This is a
+// concrete type for performance reasons, but all the real work is passed on to
+// a LogSink. Implementations of LogSink should provide their own constructors
+// that return Logger, not LogSink.
+//
+// The underlying sink can be accessed through GetSink and be modified through
+// WithSink. This enables the implementation of custom extensions (see "Break
+// Glass" in the package documentation). Normally the sink should be used only
+// indirectly.
+type Logger struct {
+ sink LogSink
+ level int
+}
+
+// Enabled tests whether this Logger is enabled. For example, commandline
+// flags might be used to set the logging verbosity and disable some info logs.
+func (l Logger) Enabled() bool {
+ // Some implementations of LogSink look at the caller in Enabled (e.g.
+ // different verbosity levels per package or file), but we only pass one
+ // CallDepth in (via Init). This means that all calls from Logger to the
+ // LogSink's Enabled, Info, and Error methods must have the same number of
+ // frames. In other words, Logger methods can't call other Logger methods
+ // which call these LogSink methods unless we do it the same in all paths.
+ return l.sink != nil && l.sink.Enabled(l.level)
+}
+
+// Info logs a non-error message with the given key/value pairs as context.
+//
+// The msg argument should be used to add some constant description to the log
+// line. The key/value pairs can then be used to add additional variable
+// information. The key/value pairs must alternate string keys and arbitrary
+// values.
+func (l Logger) Info(msg string, keysAndValues ...any) {
+ if l.sink == nil {
+ return
+ }
+ if l.sink.Enabled(l.level) { // see comment in Enabled
+ if withHelper, ok := l.sink.(CallStackHelperLogSink); ok {
+ withHelper.GetCallStackHelper()()
+ }
+ l.sink.Info(l.level, msg, keysAndValues...)
+ }
+}
+
+// Error logs an error, with the given message and key/value pairs as context.
+// It functions similarly to Info, but may have unique behavior, and should be
+// preferred for logging errors (see the package documentations for more
+// information). The log message will always be emitted, regardless of
+// verbosity level.
+//
+// The msg argument should be used to add context to any underlying error,
+// while the err argument should be used to attach the actual error that
+// triggered this log line, if present. The err parameter is optional
+// and nil may be passed instead of an error instance.
+func (l Logger) Error(err error, msg string, keysAndValues ...any) {
+ if l.sink == nil {
+ return
+ }
+ if withHelper, ok := l.sink.(CallStackHelperLogSink); ok {
+ withHelper.GetCallStackHelper()()
+ }
+ l.sink.Error(err, msg, keysAndValues...)
+}
+
+// V returns a new Logger instance for a specific verbosity level, relative to
+// this Logger. In other words, V-levels are additive. A higher verbosity
+// level means a log message is less important. Negative V-levels are treated
+// as 0.
+func (l Logger) V(level int) Logger {
+ if l.sink == nil {
+ return l
+ }
+ if level < 0 {
+ level = 0
+ }
+ l.level += level
+ return l
+}
+
+// GetV returns the verbosity level of the logger. If the logger's LogSink is
+// nil as in the Discard logger, this will always return 0.
+func (l Logger) GetV() int {
+ // 0 if l.sink nil because of the if check in V above.
+ return l.level
+}
+
+// WithValues returns a new Logger instance with additional key/value pairs.
+// See Info for documentation on how key/value pairs work.
+func (l Logger) WithValues(keysAndValues ...any) Logger {
+ if l.sink == nil {
+ return l
+ }
+ l.setSink(l.sink.WithValues(keysAndValues...))
+ return l
+}
+
+// WithName returns a new Logger instance with the specified name element added
+// to the Logger's name. Successive calls with WithName append additional
+// suffixes to the Logger's name. It's strongly recommended that name segments
+// contain only letters, digits, and hyphens (see the package documentation for
+// more information).
+func (l Logger) WithName(name string) Logger {
+ if l.sink == nil {
+ return l
+ }
+ l.setSink(l.sink.WithName(name))
+ return l
+}
+
+// WithCallDepth returns a Logger instance that offsets the call stack by the
+// specified number of frames when logging call site information, if possible.
+// This is useful for users who have helper functions between the "real" call
+// site and the actual calls to Logger methods. If depth is 0 the attribution
+// should be to the direct caller of this function. If depth is 1 the
+// attribution should skip 1 call frame, and so on. Successive calls to this
+// are additive.
+//
+// If the underlying log implementation supports a WithCallDepth(int) method,
+// it will be called and the result returned. If the implementation does not
+// support CallDepthLogSink, the original Logger will be returned.
+//
+// To skip one level, WithCallStackHelper() should be used instead of
+// WithCallDepth(1) because it works with implementions that support the
+// CallDepthLogSink and/or CallStackHelperLogSink interfaces.
+func (l Logger) WithCallDepth(depth int) Logger {
+ if l.sink == nil {
+ return l
+ }
+ if withCallDepth, ok := l.sink.(CallDepthLogSink); ok {
+ l.setSink(withCallDepth.WithCallDepth(depth))
+ }
+ return l
+}
+
+// WithCallStackHelper returns a new Logger instance that skips the direct
+// caller when logging call site information, if possible. This is useful for
+// users who have helper functions between the "real" call site and the actual
+// calls to Logger methods and want to support loggers which depend on marking
+// each individual helper function, like loggers based on testing.T.
+//
+// In addition to using that new logger instance, callers also must call the
+// returned function.
+//
+// If the underlying log implementation supports a WithCallDepth(int) method,
+// WithCallDepth(1) will be called to produce a new logger. If it supports a
+// WithCallStackHelper() method, that will be also called. If the
+// implementation does not support either of these, the original Logger will be
+// returned.
+func (l Logger) WithCallStackHelper() (func(), Logger) {
+ if l.sink == nil {
+ return func() {}, l
+ }
+ var helper func()
+ if withCallDepth, ok := l.sink.(CallDepthLogSink); ok {
+ l.setSink(withCallDepth.WithCallDepth(1))
+ }
+ if withHelper, ok := l.sink.(CallStackHelperLogSink); ok {
+ helper = withHelper.GetCallStackHelper()
+ } else {
+ helper = func() {}
+ }
+ return helper, l
+}
+
+// IsZero returns true if this logger is an uninitialized zero value
+func (l Logger) IsZero() bool {
+ return l.sink == nil
+}
+
+// RuntimeInfo holds information that the logr "core" library knows which
+// LogSinks might want to know.
+type RuntimeInfo struct {
+ // CallDepth is the number of call frames the logr library adds between the
+ // end-user and the LogSink. LogSink implementations which choose to print
+ // the original logging site (e.g. file & line) should climb this many
+ // additional frames to find it.
+ CallDepth int
+}
+
+// runtimeInfo is a static global. It must not be changed at run time.
+var runtimeInfo = RuntimeInfo{
+ CallDepth: 1,
+}
+
+// LogSink represents a logging implementation. End-users will generally not
+// interact with this type.
+type LogSink interface {
+ // Init receives optional information about the logr library for LogSink
+ // implementations that need it.
+ Init(info RuntimeInfo)
+
+ // Enabled tests whether this LogSink is enabled at the specified V-level.
+ // For example, commandline flags might be used to set the logging
+ // verbosity and disable some info logs.
+ Enabled(level int) bool
+
+ // Info logs a non-error message with the given key/value pairs as context.
+ // The level argument is provided for optional logging. This method will
+ // only be called when Enabled(level) is true. See Logger.Info for more
+ // details.
+ Info(level int, msg string, keysAndValues ...any)
+
+ // Error logs an error, with the given message and key/value pairs as
+ // context. See Logger.Error for more details.
+ Error(err error, msg string, keysAndValues ...any)
+
+ // WithValues returns a new LogSink with additional key/value pairs. See
+ // Logger.WithValues for more details.
+ WithValues(keysAndValues ...any) LogSink
+
+ // WithName returns a new LogSink with the specified name appended. See
+ // Logger.WithName for more details.
+ WithName(name string) LogSink
+}
+
+// CallDepthLogSink represents a LogSink that knows how to climb the call stack
+// to identify the original call site and can offset the depth by a specified
+// number of frames. This is useful for users who have helper functions
+// between the "real" call site and the actual calls to Logger methods.
+// Implementations that log information about the call site (such as file,
+// function, or line) would otherwise log information about the intermediate
+// helper functions.
+//
+// This is an optional interface and implementations are not required to
+// support it.
+type CallDepthLogSink interface {
+ // WithCallDepth returns a LogSink that will offset the call
+ // stack by the specified number of frames when logging call
+ // site information.
+ //
+ // If depth is 0, the LogSink should skip exactly the number
+ // of call frames defined in RuntimeInfo.CallDepth when Info
+ // or Error are called, i.e. the attribution should be to the
+ // direct caller of Logger.Info or Logger.Error.
+ //
+ // If depth is 1 the attribution should skip 1 call frame, and so on.
+ // Successive calls to this are additive.
+ WithCallDepth(depth int) LogSink
+}
+
+// CallStackHelperLogSink represents a LogSink that knows how to climb
+// the call stack to identify the original call site and can skip
+// intermediate helper functions if they mark themselves as
+// helper. Go's testing package uses that approach.
+//
+// This is useful for users who have helper functions between the
+// "real" call site and the actual calls to Logger methods.
+// Implementations that log information about the call site (such as
+// file, function, or line) would otherwise log information about the
+// intermediate helper functions.
+//
+// This is an optional interface and implementations are not required
+// to support it. Implementations that choose to support this must not
+// simply implement it as WithCallDepth(1), because
+// Logger.WithCallStackHelper will call both methods if they are
+// present. This should only be implemented for LogSinks that actually
+// need it, as with testing.T.
+type CallStackHelperLogSink interface {
+ // GetCallStackHelper returns a function that must be called
+ // to mark the direct caller as helper function when logging
+ // call site information.
+ GetCallStackHelper() func()
+}
+
+// Marshaler is an optional interface that logged values may choose to
+// implement. Loggers with structured output, such as JSON, should
+// log the object return by the MarshalLog method instead of the
+// original value.
+type Marshaler interface {
+ // MarshalLog can be used to:
+ // - ensure that structs are not logged as strings when the original
+ // value has a String method: return a different type without a
+ // String method
+ // - select which fields of a complex type should get logged:
+ // return a simpler struct with fewer fields
+ // - log unexported fields: return a different struct
+ // with exported fields
+ //
+ // It may return any value of any type.
+ MarshalLog() any
+}
diff --git a/integration/assets/hydrabroker/vendor/github.com/go-logr/logr/sloghandler.go b/integration/assets/hydrabroker/vendor/github.com/go-logr/logr/sloghandler.go
new file mode 100644
index 00000000000..82d1ba49481
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/go-logr/logr/sloghandler.go
@@ -0,0 +1,192 @@
+//go:build go1.21
+// +build go1.21
+
+/*
+Copyright 2023 The logr Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package logr
+
+import (
+ "context"
+ "log/slog"
+)
+
+type slogHandler struct {
+ // May be nil, in which case all logs get discarded.
+ sink LogSink
+ // Non-nil if sink is non-nil and implements SlogSink.
+ slogSink SlogSink
+
+ // groupPrefix collects values from WithGroup calls. It gets added as
+ // prefix to value keys when handling a log record.
+ groupPrefix string
+
+ // levelBias can be set when constructing the handler to influence the
+ // slog.Level of log records. A positive levelBias reduces the
+ // slog.Level value. slog has no API to influence this value after the
+ // handler got created, so it can only be set indirectly through
+ // Logger.V.
+ levelBias slog.Level
+}
+
+var _ slog.Handler = &slogHandler{}
+
+// groupSeparator is used to concatenate WithGroup names and attribute keys.
+const groupSeparator = "."
+
+// GetLevel is used for black box unit testing.
+func (l *slogHandler) GetLevel() slog.Level {
+ return l.levelBias
+}
+
+func (l *slogHandler) Enabled(_ context.Context, level slog.Level) bool {
+ return l.sink != nil && (level >= slog.LevelError || l.sink.Enabled(l.levelFromSlog(level)))
+}
+
+func (l *slogHandler) Handle(ctx context.Context, record slog.Record) error {
+ if l.slogSink != nil {
+ // Only adjust verbosity level of log entries < slog.LevelError.
+ if record.Level < slog.LevelError {
+ record.Level -= l.levelBias
+ }
+ return l.slogSink.Handle(ctx, record)
+ }
+
+ // No need to check for nil sink here because Handle will only be called
+ // when Enabled returned true.
+
+ kvList := make([]any, 0, 2*record.NumAttrs())
+ record.Attrs(func(attr slog.Attr) bool {
+ kvList = attrToKVs(attr, l.groupPrefix, kvList)
+ return true
+ })
+ if record.Level >= slog.LevelError {
+ l.sinkWithCallDepth().Error(nil, record.Message, kvList...)
+ } else {
+ level := l.levelFromSlog(record.Level)
+ l.sinkWithCallDepth().Info(level, record.Message, kvList...)
+ }
+ return nil
+}
+
+// sinkWithCallDepth adjusts the stack unwinding so that when Error or Info
+// are called by Handle, code in slog gets skipped.
+//
+// This offset currently (Go 1.21.0) works for calls through
+// slog.New(ToSlogHandler(...)). There's no guarantee that the call
+// chain won't change. Wrapping the handler will also break unwinding. It's
+// still better than not adjusting at all....
+//
+// This cannot be done when constructing the handler because FromSlogHandler needs
+// access to the original sink without this adjustment. A second copy would
+// work, but then WithAttrs would have to be called for both of them.
+func (l *slogHandler) sinkWithCallDepth() LogSink {
+ if sink, ok := l.sink.(CallDepthLogSink); ok {
+ return sink.WithCallDepth(2)
+ }
+ return l.sink
+}
+
+func (l *slogHandler) WithAttrs(attrs []slog.Attr) slog.Handler {
+ if l.sink == nil || len(attrs) == 0 {
+ return l
+ }
+
+ clone := *l
+ if l.slogSink != nil {
+ clone.slogSink = l.slogSink.WithAttrs(attrs)
+ clone.sink = clone.slogSink
+ } else {
+ kvList := make([]any, 0, 2*len(attrs))
+ for _, attr := range attrs {
+ kvList = attrToKVs(attr, l.groupPrefix, kvList)
+ }
+ clone.sink = l.sink.WithValues(kvList...)
+ }
+ return &clone
+}
+
+func (l *slogHandler) WithGroup(name string) slog.Handler {
+ if l.sink == nil {
+ return l
+ }
+ if name == "" {
+ // slog says to inline empty groups
+ return l
+ }
+ clone := *l
+ if l.slogSink != nil {
+ clone.slogSink = l.slogSink.WithGroup(name)
+ clone.sink = clone.slogSink
+ } else {
+ clone.groupPrefix = addPrefix(clone.groupPrefix, name)
+ }
+ return &clone
+}
+
+// attrToKVs appends a slog.Attr to a logr-style kvList. It handle slog Groups
+// and other details of slog.
+func attrToKVs(attr slog.Attr, groupPrefix string, kvList []any) []any {
+ attrVal := attr.Value.Resolve()
+ if attrVal.Kind() == slog.KindGroup {
+ groupVal := attrVal.Group()
+ grpKVs := make([]any, 0, 2*len(groupVal))
+ prefix := groupPrefix
+ if attr.Key != "" {
+ prefix = addPrefix(groupPrefix, attr.Key)
+ }
+ for _, attr := range groupVal {
+ grpKVs = attrToKVs(attr, prefix, grpKVs)
+ }
+ kvList = append(kvList, grpKVs...)
+ } else if attr.Key != "" {
+ kvList = append(kvList, addPrefix(groupPrefix, attr.Key), attrVal.Any())
+ }
+
+ return kvList
+}
+
+func addPrefix(prefix, name string) string {
+ if prefix == "" {
+ return name
+ }
+ if name == "" {
+ return prefix
+ }
+ return prefix + groupSeparator + name
+}
+
+// levelFromSlog adjusts the level by the logger's verbosity and negates it.
+// It ensures that the result is >= 0. This is necessary because the result is
+// passed to a LogSink and that API did not historically document whether
+// levels could be negative or what that meant.
+//
+// Some example usage:
+//
+// logrV0 := getMyLogger()
+// logrV2 := logrV0.V(2)
+// slogV2 := slog.New(logr.ToSlogHandler(logrV2))
+// slogV2.Debug("msg") // =~ logrV2.V(4) =~ logrV0.V(6)
+// slogV2.Info("msg") // =~ logrV2.V(0) =~ logrV0.V(2)
+// slogv2.Warn("msg") // =~ logrV2.V(-4) =~ logrV0.V(0)
+func (l *slogHandler) levelFromSlog(level slog.Level) int {
+ result := -level
+ result += l.levelBias // in case the original Logger had a V level
+ if result < 0 {
+ result = 0 // because LogSink doesn't expect negative V levels
+ }
+ return int(result)
+}
diff --git a/integration/assets/hydrabroker/vendor/github.com/go-logr/logr/slogr.go b/integration/assets/hydrabroker/vendor/github.com/go-logr/logr/slogr.go
new file mode 100644
index 00000000000..28a83d02439
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/go-logr/logr/slogr.go
@@ -0,0 +1,100 @@
+//go:build go1.21
+// +build go1.21
+
+/*
+Copyright 2023 The logr Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package logr
+
+import (
+ "context"
+ "log/slog"
+)
+
+// FromSlogHandler returns a Logger which writes to the slog.Handler.
+//
+// The logr verbosity level is mapped to slog levels such that V(0) becomes
+// slog.LevelInfo and V(4) becomes slog.LevelDebug.
+func FromSlogHandler(handler slog.Handler) Logger {
+ if handler, ok := handler.(*slogHandler); ok {
+ if handler.sink == nil {
+ return Discard()
+ }
+ return New(handler.sink).V(int(handler.levelBias))
+ }
+ return New(&slogSink{handler: handler})
+}
+
+// ToSlogHandler returns a slog.Handler which writes to the same sink as the Logger.
+//
+// The returned logger writes all records with level >= slog.LevelError as
+// error log entries with LogSink.Error, regardless of the verbosity level of
+// the Logger:
+//
+// logger :=
+// slog.New(ToSlogHandler(logger.V(10))).Error(...) -> logSink.Error(...)
+//
+// The level of all other records gets reduced by the verbosity
+// level of the Logger and the result is negated. If it happens
+// to be negative, then it gets replaced by zero because a LogSink
+// is not expected to handled negative levels:
+//
+// slog.New(ToSlogHandler(logger)).Debug(...) -> logger.GetSink().Info(level=4, ...)
+// slog.New(ToSlogHandler(logger)).Warning(...) -> logger.GetSink().Info(level=0, ...)
+// slog.New(ToSlogHandler(logger)).Info(...) -> logger.GetSink().Info(level=0, ...)
+// slog.New(ToSlogHandler(logger.V(4))).Info(...) -> logger.GetSink().Info(level=4, ...)
+func ToSlogHandler(logger Logger) slog.Handler {
+ if sink, ok := logger.GetSink().(*slogSink); ok && logger.GetV() == 0 {
+ return sink.handler
+ }
+
+ handler := &slogHandler{sink: logger.GetSink(), levelBias: slog.Level(logger.GetV())}
+ if slogSink, ok := handler.sink.(SlogSink); ok {
+ handler.slogSink = slogSink
+ }
+ return handler
+}
+
+// SlogSink is an optional interface that a LogSink can implement to support
+// logging through the slog.Logger or slog.Handler APIs better. It then should
+// also support special slog values like slog.Group. When used as a
+// slog.Handler, the advantages are:
+//
+// - stack unwinding gets avoided in favor of logging the pre-recorded PC,
+// as intended by slog
+// - proper grouping of key/value pairs via WithGroup
+// - verbosity levels > slog.LevelInfo can be recorded
+// - less overhead
+//
+// Both APIs (Logger and slog.Logger/Handler) then are supported equally
+// well. Developers can pick whatever API suits them better and/or mix
+// packages which use either API in the same binary with a common logging
+// implementation.
+//
+// This interface is necessary because the type implementing the LogSink
+// interface cannot also implement the slog.Handler interface due to the
+// different prototype of the common Enabled method.
+//
+// An implementation could support both interfaces in two different types, but then
+// additional interfaces would be needed to convert between those types in FromSlogHandler
+// and ToSlogHandler.
+type SlogSink interface {
+ LogSink
+
+ Handle(ctx context.Context, record slog.Record) error
+ WithAttrs(attrs []slog.Attr) SlogSink
+ WithGroup(name string) SlogSink
+}
diff --git a/integration/assets/hydrabroker/vendor/github.com/go-logr/logr/slogsink.go b/integration/assets/hydrabroker/vendor/github.com/go-logr/logr/slogsink.go
new file mode 100644
index 00000000000..4060fcbc2b0
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/go-logr/logr/slogsink.go
@@ -0,0 +1,120 @@
+//go:build go1.21
+// +build go1.21
+
+/*
+Copyright 2023 The logr Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package logr
+
+import (
+ "context"
+ "log/slog"
+ "runtime"
+ "time"
+)
+
+var (
+ _ LogSink = &slogSink{}
+ _ CallDepthLogSink = &slogSink{}
+ _ Underlier = &slogSink{}
+)
+
+// Underlier is implemented by the LogSink returned by NewFromLogHandler.
+type Underlier interface {
+ // GetUnderlying returns the Handler used by the LogSink.
+ GetUnderlying() slog.Handler
+}
+
+const (
+ // nameKey is used to log the `WithName` values as an additional attribute.
+ nameKey = "logger"
+
+ // errKey is used to log the error parameter of Error as an additional attribute.
+ errKey = "err"
+)
+
+type slogSink struct {
+ callDepth int
+ name string
+ handler slog.Handler
+}
+
+func (l *slogSink) Init(info RuntimeInfo) {
+ l.callDepth = info.CallDepth
+}
+
+func (l *slogSink) GetUnderlying() slog.Handler {
+ return l.handler
+}
+
+func (l *slogSink) WithCallDepth(depth int) LogSink {
+ newLogger := *l
+ newLogger.callDepth += depth
+ return &newLogger
+}
+
+func (l *slogSink) Enabled(level int) bool {
+ return l.handler.Enabled(context.Background(), slog.Level(-level))
+}
+
+func (l *slogSink) Info(level int, msg string, kvList ...interface{}) {
+ l.log(nil, msg, slog.Level(-level), kvList...)
+}
+
+func (l *slogSink) Error(err error, msg string, kvList ...interface{}) {
+ l.log(err, msg, slog.LevelError, kvList...)
+}
+
+func (l *slogSink) log(err error, msg string, level slog.Level, kvList ...interface{}) {
+ var pcs [1]uintptr
+ // skip runtime.Callers, this function, Info/Error, and all helper functions above that.
+ runtime.Callers(3+l.callDepth, pcs[:])
+
+ record := slog.NewRecord(time.Now(), level, msg, pcs[0])
+ if l.name != "" {
+ record.AddAttrs(slog.String(nameKey, l.name))
+ }
+ if err != nil {
+ record.AddAttrs(slog.Any(errKey, err))
+ }
+ record.Add(kvList...)
+ _ = l.handler.Handle(context.Background(), record)
+}
+
+func (l slogSink) WithName(name string) LogSink {
+ if l.name != "" {
+ l.name += "/"
+ }
+ l.name += name
+ return &l
+}
+
+func (l slogSink) WithValues(kvList ...interface{}) LogSink {
+ l.handler = l.handler.WithAttrs(kvListToAttrs(kvList...))
+ return &l
+}
+
+func kvListToAttrs(kvList ...interface{}) []slog.Attr {
+ // We don't need the record itself, only its Add method.
+ record := slog.NewRecord(time.Time{}, 0, "", 0)
+ record.Add(kvList...)
+ attrs := make([]slog.Attr, 0, record.NumAttrs())
+ record.Attrs(func(attr slog.Attr) bool {
+ attrs = append(attrs, attr)
+ return true
+ })
+ return attrs
+}
diff --git a/integration/assets/hydrabroker/vendor/github.com/go-playground/locales/README.md b/integration/assets/hydrabroker/vendor/github.com/go-playground/locales/README.md
index ba1b0680c97..7b6be2c6478 100644
--- a/integration/assets/hydrabroker/vendor/github.com/go-playground/locales/README.md
+++ b/integration/assets/hydrabroker/vendor/github.com/go-playground/locales/README.md
@@ -1,17 +1,15 @@
## locales
- ![Project status](https://img.shields.io/badge/version-0.13.0-green.svg)
+ ![Project status](https://img.shields.io/badge/version-0.14.1-green.svg)
[![Build Status](https://travis-ci.org/go-playground/locales.svg?branch=master)](https://travis-ci.org/go-playground/locales)
-[![Go Report Card](https://goreportcard.com/badge/github.com/go-playground/locales)](https://goreportcard.com/report/github.com/go-playground/locales)
[![GoDoc](https://godoc.org/github.com/go-playground/locales?status.svg)](https://godoc.org/github.com/go-playground/locales)
![License](https://img.shields.io/dub/l/vibe-d.svg)
-[![Gitter](https://badges.gitter.im/go-playground/locales.svg)](https://gitter.im/go-playground/locales?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge)
Locales is a set of locales generated from the [Unicode CLDR Project](http://cldr.unicode.org/) which can be used independently or within
an i18n package; these were built for use with, but not exclusive to, [Universal Translator](https://github.com/go-playground/universal-translator).
Features
--------
-- [x] Rules generated from the latest [CLDR](http://cldr.unicode.org/index/downloads) data, v31.0.1
+- [x] Rules generated from the latest [CLDR](http://cldr.unicode.org/index/downloads) data, v36.0.1
- [x] Contains Cardinal, Ordinal and Range Plural Rules
- [x] Contains Month, Weekday and Timezone translations built in
- [x] Contains Date & Time formatting functions
diff --git a/integration/assets/hydrabroker/vendor/github.com/go-playground/locales/currency/currency.go b/integration/assets/hydrabroker/vendor/github.com/go-playground/locales/currency/currency.go
index cdaba596b62..b5a95fb0743 100644
--- a/integration/assets/hydrabroker/vendor/github.com/go-playground/locales/currency/currency.go
+++ b/integration/assets/hydrabroker/vendor/github.com/go-playground/locales/currency/currency.go
@@ -176,6 +176,7 @@ const (
MNT
MOP
MRO
+ MRU
MTL
MTP
MUR
@@ -262,9 +263,11 @@ const (
UYI
UYP
UYU
+ UYW
UZS
VEB
VEF
+ VES
VND
VNN
VUV
diff --git a/integration/assets/hydrabroker/vendor/github.com/go-playground/locales/go.mod b/integration/assets/hydrabroker/vendor/github.com/go-playground/locales/go.mod
index 34ab6f2387d..9d4b9aedb44 100644
--- a/integration/assets/hydrabroker/vendor/github.com/go-playground/locales/go.mod
+++ b/integration/assets/hydrabroker/vendor/github.com/go-playground/locales/go.mod
@@ -1,5 +1,5 @@
module github.com/go-playground/locales
-go 1.13
+go 1.17
-require golang.org/x/text v0.3.2
+require golang.org/x/text v0.3.8
diff --git a/integration/assets/hydrabroker/vendor/github.com/go-playground/locales/go.sum b/integration/assets/hydrabroker/vendor/github.com/go-playground/locales/go.sum
index 63c9200f5ee..b691fa8e1ce 100644
--- a/integration/assets/hydrabroker/vendor/github.com/go-playground/locales/go.sum
+++ b/integration/assets/hydrabroker/vendor/github.com/go-playground/locales/go.sum
@@ -1,3 +1,25 @@
-golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
-golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
+github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
+golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
+golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
+golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
+golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
+golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
+golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
+golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
+golang.org/x/text v0.3.8 h1:nAL+RVCQ9uMn3vJZbV+MRnydTJFPf8qqY42YiA6MrqY=
+golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
+golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
diff --git a/integration/assets/hydrabroker/vendor/github.com/go-playground/universal-translator/Makefile b/integration/assets/hydrabroker/vendor/github.com/go-playground/universal-translator/Makefile
new file mode 100644
index 00000000000..ec3455bd59c
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/go-playground/universal-translator/Makefile
@@ -0,0 +1,18 @@
+GOCMD=GO111MODULE=on go
+
+linters-install:
+ @golangci-lint --version >/dev/null 2>&1 || { \
+ echo "installing linting tools..."; \
+ curl -sfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh| sh -s v1.41.1; \
+ }
+
+lint: linters-install
+ golangci-lint run
+
+test:
+ $(GOCMD) test -cover -race ./...
+
+bench:
+ $(GOCMD) test -bench=. -benchmem ./...
+
+.PHONY: test lint linters-install
\ No newline at end of file
diff --git a/integration/assets/hydrabroker/vendor/github.com/go-playground/universal-translator/README.md b/integration/assets/hydrabroker/vendor/github.com/go-playground/universal-translator/README.md
index 071f33ab25a..d9b6654741b 100644
--- a/integration/assets/hydrabroker/vendor/github.com/go-playground/universal-translator/README.md
+++ b/integration/assets/hydrabroker/vendor/github.com/go-playground/universal-translator/README.md
@@ -1,11 +1,9 @@
## universal-translator
- ![Project status](https://img.shields.io/badge/version-0.17.0-green.svg)
-[![Build Status](https://travis-ci.org/go-playground/universal-translator.svg?branch=master)](https://travis-ci.org/go-playground/universal-translator)
+ ![Project status](https://img.shields.io/badge/version-0.18.1-green.svg)
[![Coverage Status](https://coveralls.io/repos/github/go-playground/universal-translator/badge.svg)](https://coveralls.io/github/go-playground/universal-translator)
[![Go Report Card](https://goreportcard.com/badge/github.com/go-playground/universal-translator)](https://goreportcard.com/report/github.com/go-playground/universal-translator)
[![GoDoc](https://godoc.org/github.com/go-playground/universal-translator?status.svg)](https://godoc.org/github.com/go-playground/universal-translator)
![License](https://img.shields.io/dub/l/vibe-d.svg)
-[![Gitter](https://badges.gitter.im/go-playground/universal-translator.svg)](https://gitter.im/go-playground/universal-translator?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge)
Universal Translator is an i18n Translator for Go/Golang using CLDR data + pluralization rules
@@ -18,7 +16,7 @@ use in your applications.
Features
--------
-- [x] Rules generated from the [CLDR](http://cldr.unicode.org/index/downloads) data, v30.0.3
+- [x] Rules generated from the [CLDR](http://cldr.unicode.org/index/downloads) data, v36.0.1
- [x] Contains Cardinal, Ordinal and Range Plural Rules
- [x] Contains Month, Weekday and Timezone translations built in
- [x] Contains Date & Time formatting functions
@@ -51,7 +49,7 @@ Please see https://godoc.org/github.com/go-playground/universal-translator for u
File formatting
--------------
-All types, Plain substitution, Cardinal, Ordinal and Range translations can all be contained withing the same file(s);
+All types, Plain substitution, Cardinal, Ordinal and Range translations can all be contained within the same file(s);
they are only separated for easy viewing.
##### Examples:
diff --git a/integration/assets/hydrabroker/vendor/github.com/go-playground/universal-translator/go.mod b/integration/assets/hydrabroker/vendor/github.com/go-playground/universal-translator/go.mod
index 8079590fe6b..4063e7161e3 100644
--- a/integration/assets/hydrabroker/vendor/github.com/go-playground/universal-translator/go.mod
+++ b/integration/assets/hydrabroker/vendor/github.com/go-playground/universal-translator/go.mod
@@ -1,5 +1,5 @@
module github.com/go-playground/universal-translator
-go 1.13
+go 1.18
-require github.com/go-playground/locales v0.13.0
+require github.com/go-playground/locales v0.14.1
diff --git a/integration/assets/hydrabroker/vendor/github.com/go-playground/universal-translator/go.sum b/integration/assets/hydrabroker/vendor/github.com/go-playground/universal-translator/go.sum
index cbbf32416ec..63c3fd3dca3 100644
--- a/integration/assets/hydrabroker/vendor/github.com/go-playground/universal-translator/go.sum
+++ b/integration/assets/hydrabroker/vendor/github.com/go-playground/universal-translator/go.sum
@@ -1,4 +1,2 @@
-github.com/go-playground/locales v0.13.0 h1:HyWk6mgj5qFqCT5fjGBuRArbVDfE4hi8+e8ceBS/t7Q=
-github.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTMfnGwq/HNwmWNS8=
-golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
-golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA=
+github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY=
diff --git a/integration/assets/hydrabroker/vendor/github.com/go-playground/universal-translator/import_export.go b/integration/assets/hydrabroker/vendor/github.com/go-playground/universal-translator/import_export.go
index 7bd76f26b6f..87a1b465cba 100644
--- a/integration/assets/hydrabroker/vendor/github.com/go-playground/universal-translator/import_export.go
+++ b/integration/assets/hydrabroker/vendor/github.com/go-playground/universal-translator/import_export.go
@@ -3,7 +3,6 @@ package ut
import (
"encoding/json"
"fmt"
- "io/ioutil"
"os"
"path/filepath"
@@ -41,7 +40,6 @@ const (
func (t *UniversalTranslator) Export(format ImportExportFormat, dirname string) error {
_, err := os.Stat(dirname)
- fmt.Println(dirname, err, os.IsNotExist(err))
if err != nil {
if !os.IsNotExist(err) {
@@ -138,7 +136,7 @@ func (t *UniversalTranslator) Export(format ImportExportFormat, dirname string)
return err
}
- err = ioutil.WriteFile(filepath.Join(dirname, fmt.Sprintf("%s%s", locale.Locale(), ext)), b, 0644)
+ err = os.WriteFile(filepath.Join(dirname, fmt.Sprintf("%s%s", locale.Locale(), ext)), b, 0644)
if err != nil {
return err
}
@@ -200,7 +198,7 @@ func (t *UniversalTranslator) Import(format ImportExportFormat, dirnameOrFilenam
// NOTE: generally used when assets have been embedded into the binary and are already in memory.
func (t *UniversalTranslator) ImportByReader(format ImportExportFormat, reader io.Reader) error {
- b, err := ioutil.ReadAll(reader)
+ b, err := io.ReadAll(reader)
if err != nil {
return err
}
@@ -257,6 +255,8 @@ func (t *UniversalTranslator) ImportByReader(format ImportExportFormat, reader i
func stringToPR(s string) locales.PluralRule {
switch s {
+ case "Zero":
+ return locales.PluralRuleZero
case "One":
return locales.PluralRuleOne
case "Two":
diff --git a/integration/assets/hydrabroker/vendor/github.com/go-playground/universal-translator/translator.go b/integration/assets/hydrabroker/vendor/github.com/go-playground/universal-translator/translator.go
index cfafce8a09d..24b18db92a1 100644
--- a/integration/assets/hydrabroker/vendor/github.com/go-playground/universal-translator/translator.go
+++ b/integration/assets/hydrabroker/vendor/github.com/go-playground/universal-translator/translator.go
@@ -159,13 +159,13 @@ func (t *translator) AddCardinal(key interface{}, text string, rule locales.Plur
}
} else {
- tarr = make([]*transText, 7, 7)
+ tarr = make([]*transText, 7)
t.cardinalTanslations[key] = tarr
}
trans := &transText{
text: text,
- indexes: make([]int, 2, 2),
+ indexes: make([]int, 2),
}
tarr[rule] = trans
@@ -211,13 +211,13 @@ func (t *translator) AddOrdinal(key interface{}, text string, rule locales.Plura
}
} else {
- tarr = make([]*transText, 7, 7)
+ tarr = make([]*transText, 7)
t.ordinalTanslations[key] = tarr
}
trans := &transText{
text: text,
- indexes: make([]int, 2, 2),
+ indexes: make([]int, 2),
}
tarr[rule] = trans
@@ -261,13 +261,13 @@ func (t *translator) AddRange(key interface{}, text string, rule locales.PluralR
}
} else {
- tarr = make([]*transText, 7, 7)
+ tarr = make([]*transText, 7)
t.rangeTanslations[key] = tarr
}
trans := &transText{
text: text,
- indexes: make([]int, 4, 4),
+ indexes: make([]int, 4),
}
tarr[rule] = trans
diff --git a/integration/assets/hydrabroker/vendor/github.com/go-playground/validator/v10/.gitignore b/integration/assets/hydrabroker/vendor/github.com/go-playground/validator/v10/.gitignore
index 792ca00d285..6305e52900a 100644
--- a/integration/assets/hydrabroker/vendor/github.com/go-playground/validator/v10/.gitignore
+++ b/integration/assets/hydrabroker/vendor/github.com/go-playground/validator/v10/.gitignore
@@ -6,6 +6,7 @@
# Folders
_obj
_test
+bin
# Architecture specific extensions/prefixes
*.[568vq]
@@ -25,5 +26,7 @@ _testmain.go
*.test
*.out
*.txt
+/**/*.DS_Store
cover.html
-README.html
\ No newline at end of file
+README.html
+.idea
diff --git a/integration/assets/hydrabroker/vendor/github.com/go-playground/validator/v10/.travis.yml b/integration/assets/hydrabroker/vendor/github.com/go-playground/validator/v10/.travis.yml
deleted file mode 100644
index b88bb9fc35a..00000000000
--- a/integration/assets/hydrabroker/vendor/github.com/go-playground/validator/v10/.travis.yml
+++ /dev/null
@@ -1,29 +0,0 @@
-language: go
-go:
- - 1.13.4
- - tip
-matrix:
- allow_failures:
- - go: tip
-
-notifications:
- email:
- recipients: dean.karn@gmail.com
- on_success: change
- on_failure: always
-
-before_install:
- - go install github.com/mattn/goveralls
- - mkdir -p $GOPATH/src/gopkg.in
- - ln -s $GOPATH/src/github.com/$TRAVIS_REPO_SLUG $GOPATH/src/gopkg.in/validator.v9
-
-# Only clone the most recent commit.
-git:
- depth: 1
-
-script:
- - go test -v -race -covermode=atomic -coverprofile=coverage.coverprofile ./...
-
-after_success: |
- [ $TRAVIS_GO_VERSION = 1.13.4 ] &&
- goveralls -coverprofile=coverage.coverprofile -service travis-ci -repotoken $COVERALLS_TOKEN
\ No newline at end of file
diff --git a/integration/assets/hydrabroker/vendor/github.com/go-playground/validator/v10/MAINTAINERS.md b/integration/assets/hydrabroker/vendor/github.com/go-playground/validator/v10/MAINTAINERS.md
new file mode 100644
index 00000000000..b809c4ce128
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/go-playground/validator/v10/MAINTAINERS.md
@@ -0,0 +1,16 @@
+## Maintainers Guide
+
+### Semantic Versioning
+Semantic versioning as defined [here](https://semver.org) must be strictly adhered to.
+
+### External Dependencies
+Any new external dependencies MUST:
+- Have a compatible LICENSE present.
+- Be actively maintained.
+- Be approved by @go-playground/admins
+
+### PR Merge Requirements
+- Up-to-date branch.
+- Passing tests and linting.
+- CODEOWNERS approval.
+- Tests that cover both the Happy and Unhappy paths.
\ No newline at end of file
diff --git a/integration/assets/hydrabroker/vendor/github.com/go-playground/validator/v10/Makefile b/integration/assets/hydrabroker/vendor/github.com/go-playground/validator/v10/Makefile
index 19c91ed73e9..e097dfaf2f5 100644
--- a/integration/assets/hydrabroker/vendor/github.com/go-playground/validator/v10/Makefile
+++ b/integration/assets/hydrabroker/vendor/github.com/go-playground/validator/v10/Makefile
@@ -1,18 +1,18 @@
-GOCMD=GO111MODULE=on go
+GOCMD=go
linters-install:
@golangci-lint --version >/dev/null 2>&1 || { \
echo "installing linting tools..."; \
- curl -sfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh| sh -s v1.21.0; \
+ curl -sfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh| sh -s v1.41.1; \
}
lint: linters-install
- $(PWD)/bin/golangci-lint run
+ golangci-lint run
test:
$(GOCMD) test -cover -race ./...
bench:
- $(GOCMD) test -bench=. -benchmem ./...
+ $(GOCMD) test -run=NONE -bench=. -benchmem ./...
-.PHONY: test lint linters-install
\ No newline at end of file
+.PHONY: test lint linters-install
diff --git a/integration/assets/hydrabroker/vendor/github.com/go-playground/validator/v10/README.md b/integration/assets/hydrabroker/vendor/github.com/go-playground/validator/v10/README.md
index daf28f1da88..9ab0705aeb7 100644
--- a/integration/assets/hydrabroker/vendor/github.com/go-playground/validator/v10/README.md
+++ b/integration/assets/hydrabroker/vendor/github.com/go-playground/validator/v10/README.md
@@ -1,20 +1,20 @@
Package validator
-================
- [![Join the chat at https://gitter.im/go-playground/validator](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/go-playground/validator?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
-![Project status](https://img.shields.io/badge/version-10.1.0-green.svg)
+=================
+ [![Join the chat at https://gitter.im/go-playground/validator](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/go-playground/validator?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
+![Project status](https://img.shields.io/badge/version-10.22.0-green.svg)
[![Build Status](https://travis-ci.org/go-playground/validator.svg?branch=master)](https://travis-ci.org/go-playground/validator)
[![Coverage Status](https://coveralls.io/repos/go-playground/validator/badge.svg?branch=master&service=github)](https://coveralls.io/github/go-playground/validator?branch=master)
[![Go Report Card](https://goreportcard.com/badge/github.com/go-playground/validator)](https://goreportcard.com/report/github.com/go-playground/validator)
-[![GoDoc](https://godoc.org/github.com/go-playground/validator?status.svg)](https://godoc.org/github.com/go-playground/validator)
+[![GoDoc](https://godoc.org/github.com/go-playground/validator?status.svg)](https://pkg.go.dev/github.com/go-playground/validator/v10)
![License](https://img.shields.io/dub/l/vibe-d.svg)
Package validator implements value validations for structs and individual fields based on tags.
It has the following **unique** features:
-- Cross Field and Cross Struct validations by using validation tags or custom validators.
+- Cross Field and Cross Struct validations by using validation tags or custom validators.
- Slice, Array and Map diving, which allows any or all levels of a multidimensional field to be validated.
-- Ability to dive into both map keys and values for validation
+- Ability to dive into both map keys and values for validation
- Handles type interface by determining it's underlying type prior to validation.
- Handles custom field types such as sql driver Valuer see [Valuer](https://golang.org/src/database/sql/driver/types.go?s=1210:1293#L29)
- Alias validation tags, which allows for mapping of several validations to a single tag for easier defining of validations on structs
@@ -43,7 +43,7 @@ They return type error to avoid the issue discussed in the following, where err
* http://stackoverflow.com/a/29138676/3158232
* https://github.com/go-playground/validator/issues/134
-Validator only InvalidValidationError for bad validation input, nil or ValidationErrors as type error; so, in your code all you need to do is check if the error returned is not nil, and if it's not check if error is InvalidValidationError ( if necessary, most of the time it isn't ) type cast it to type ValidationErrors like so:
+Validator returns only InvalidValidationError for bad validation input, nil or ValidationErrors as type error; so, in your code all you need to do is check if the error returned is not nil, and if it's not check if error is InvalidValidationError ( if necessary, most of the time it isn't ) type cast it to type ValidationErrors like so:
```go
err := validate.Struct(mystruct)
@@ -53,7 +53,7 @@ validationErrors := err.(validator.ValidationErrors)
Usage and documentation
------
-Please see https://godoc.org/github.com/go-playground/validator for detailed usage docs.
+Please see https://pkg.go.dev/github.com/go-playground/validator/v10 for detailed usage docs.
##### Examples:
@@ -64,75 +64,276 @@ Please see https://godoc.org/github.com/go-playground/validator for detailed usa
- [Gin upgrade and/or override validator](https://github.com/go-playground/validator/tree/v9/_examples/gin-upgrading-overriding)
- [wash - an example application putting it all together](https://github.com/bluesuncorp/wash)
+Baked-in Validations
+------
+
+### Special Notes:
+- If new to using validator it is highly recommended to initialize it using the `WithRequiredStructEnabled` option which is opt-in to new behaviour that will become the default behaviour in v11+. See documentation for more details.
+```go
+validate := validator.New(validator.WithRequiredStructEnabled())
+```
+
+### Fields:
+
+| Tag | Description |
+| - | - |
+| eqcsfield | Field Equals Another Field (relative)|
+| eqfield | Field Equals Another Field |
+| fieldcontains | Check the indicated characters are present in the Field |
+| fieldexcludes | Check the indicated characters are not present in the field |
+| gtcsfield | Field Greater Than Another Relative Field |
+| gtecsfield | Field Greater Than or Equal To Another Relative Field |
+| gtefield | Field Greater Than or Equal To Another Field |
+| gtfield | Field Greater Than Another Field |
+| ltcsfield | Less Than Another Relative Field |
+| ltecsfield | Less Than or Equal To Another Relative Field |
+| ltefield | Less Than or Equal To Another Field |
+| ltfield | Less Than Another Field |
+| necsfield | Field Does Not Equal Another Field (relative) |
+| nefield | Field Does Not Equal Another Field |
+
+### Network:
+
+| Tag | Description |
+| - | - |
+| cidr | Classless Inter-Domain Routing CIDR |
+| cidrv4 | Classless Inter-Domain Routing CIDRv4 |
+| cidrv6 | Classless Inter-Domain Routing CIDRv6 |
+| datauri | Data URL |
+| fqdn | Full Qualified Domain Name (FQDN) |
+| hostname | Hostname RFC 952 |
+| hostname_port | HostPort |
+| hostname_rfc1123 | Hostname RFC 1123 |
+| ip | Internet Protocol Address IP |
+| ip4_addr | Internet Protocol Address IPv4 |
+| ip6_addr | Internet Protocol Address IPv6 |
+| ip_addr | Internet Protocol Address IP |
+| ipv4 | Internet Protocol Address IPv4 |
+| ipv6 | Internet Protocol Address IPv6 |
+| mac | Media Access Control Address MAC |
+| tcp4_addr | Transmission Control Protocol Address TCPv4 |
+| tcp6_addr | Transmission Control Protocol Address TCPv6 |
+| tcp_addr | Transmission Control Protocol Address TCP |
+| udp4_addr | User Datagram Protocol Address UDPv4 |
+| udp6_addr | User Datagram Protocol Address UDPv6 |
+| udp_addr | User Datagram Protocol Address UDP |
+| unix_addr | Unix domain socket end point Address |
+| uri | URI String |
+| url | URL String |
+| http_url | HTTP URL String |
+| url_encoded | URL Encoded |
+| urn_rfc2141 | Urn RFC 2141 String |
+
+### Strings:
+
+| Tag | Description |
+| - | - |
+| alpha | Alpha Only |
+| alphanum | Alphanumeric |
+| alphanumunicode | Alphanumeric Unicode |
+| alphaunicode | Alpha Unicode |
+| ascii | ASCII |
+| boolean | Boolean |
+| contains | Contains |
+| containsany | Contains Any |
+| containsrune | Contains Rune |
+| endsnotwith | Ends Not With |
+| endswith | Ends With |
+| excludes | Excludes |
+| excludesall | Excludes All |
+| excludesrune | Excludes Rune |
+| lowercase | Lowercase |
+| multibyte | Multi-Byte Characters |
+| number | Number |
+| numeric | Numeric |
+| printascii | Printable ASCII |
+| startsnotwith | Starts Not With |
+| startswith | Starts With |
+| uppercase | Uppercase |
+
+### Format:
+| Tag | Description |
+| - | - |
+| base64 | Base64 String |
+| base64url | Base64URL String |
+| base64rawurl | Base64RawURL String |
+| bic | Business Identifier Code (ISO 9362) |
+| bcp47_language_tag | Language tag (BCP 47) |
+| btc_addr | Bitcoin Address |
+| btc_addr_bech32 | Bitcoin Bech32 Address (segwit) |
+| credit_card | Credit Card Number |
+| mongodb | MongoDB ObjectID |
+| mongodb_connection_string | MongoDB Connection String |
+| cron | Cron |
+| spicedb | SpiceDb ObjectID/Permission/Type |
+| datetime | Datetime |
+| e164 | e164 formatted phone number |
+| email | E-mail String
+| eth_addr | Ethereum Address |
+| hexadecimal | Hexadecimal String |
+| hexcolor | Hexcolor String |
+| hsl | HSL String |
+| hsla | HSLA String |
+| html | HTML Tags |
+| html_encoded | HTML Encoded |
+| isbn | International Standard Book Number |
+| isbn10 | International Standard Book Number 10 |
+| isbn13 | International Standard Book Number 13 |
+| issn | International Standard Serial Number |
+| iso3166_1_alpha2 | Two-letter country code (ISO 3166-1 alpha-2) |
+| iso3166_1_alpha3 | Three-letter country code (ISO 3166-1 alpha-3) |
+| iso3166_1_alpha_numeric | Numeric country code (ISO 3166-1 numeric) |
+| iso3166_2 | Country subdivision code (ISO 3166-2) |
+| iso4217 | Currency code (ISO 4217) |
+| json | JSON |
+| jwt | JSON Web Token (JWT) |
+| latitude | Latitude |
+| longitude | Longitude |
+| luhn_checksum | Luhn Algorithm Checksum (for strings and (u)int) |
+| postcode_iso3166_alpha2 | Postcode |
+| postcode_iso3166_alpha2_field | Postcode |
+| rgb | RGB String |
+| rgba | RGBA String |
+| ssn | Social Security Number SSN |
+| timezone | Timezone |
+| uuid | Universally Unique Identifier UUID |
+| uuid3 | Universally Unique Identifier UUID v3 |
+| uuid3_rfc4122 | Universally Unique Identifier UUID v3 RFC4122 |
+| uuid4 | Universally Unique Identifier UUID v4 |
+| uuid4_rfc4122 | Universally Unique Identifier UUID v4 RFC4122 |
+| uuid5 | Universally Unique Identifier UUID v5 |
+| uuid5_rfc4122 | Universally Unique Identifier UUID v5 RFC4122 |
+| uuid_rfc4122 | Universally Unique Identifier UUID RFC4122 |
+| md4 | MD4 hash |
+| md5 | MD5 hash |
+| sha256 | SHA256 hash |
+| sha384 | SHA384 hash |
+| sha512 | SHA512 hash |
+| ripemd128 | RIPEMD-128 hash |
+| ripemd128 | RIPEMD-160 hash |
+| tiger128 | TIGER128 hash |
+| tiger160 | TIGER160 hash |
+| tiger192 | TIGER192 hash |
+| semver | Semantic Versioning 2.0.0 |
+| ulid | Universally Unique Lexicographically Sortable Identifier ULID |
+| cve | Common Vulnerabilities and Exposures Identifier (CVE id) |
+
+### Comparisons:
+| Tag | Description |
+| - | - |
+| eq | Equals |
+| eq_ignore_case | Equals ignoring case |
+| gt | Greater than|
+| gte | Greater than or equal |
+| lt | Less Than |
+| lte | Less Than or Equal |
+| ne | Not Equal |
+| ne_ignore_case | Not Equal ignoring case |
+
+### Other:
+| Tag | Description |
+| - | - |
+| dir | Existing Directory |
+| dirpath | Directory Path |
+| file | Existing File |
+| filepath | File Path |
+| image | Image |
+| isdefault | Is Default |
+| len | Length |
+| max | Maximum |
+| min | Minimum |
+| oneof | One Of |
+| required | Required |
+| required_if | Required If |
+| required_unless | Required Unless |
+| required_with | Required With |
+| required_with_all | Required With All |
+| required_without | Required Without |
+| required_without_all | Required Without All |
+| excluded_if | Excluded If |
+| excluded_unless | Excluded Unless |
+| excluded_with | Excluded With |
+| excluded_with_all | Excluded With All |
+| excluded_without | Excluded Without |
+| excluded_without_all | Excluded Without All |
+| unique | Unique |
+
+#### Aliases:
+| Tag | Description |
+| - | - |
+| iscolor | hexcolor\|rgb\|rgba\|hsl\|hsla |
+| country_code | iso3166_1_alpha2\|iso3166_1_alpha3\|iso3166_1_alpha_numeric |
+
Benchmarks
------
###### Run on MacBook Pro (15-inch, 2017) go version go1.10.2 darwin/amd64
```go
+go version go1.21.0 darwin/arm64
goos: darwin
-goarch: amd64
-pkg: github.com/go-playground/validator
-BenchmarkFieldSuccess-8 20000000 83.6 ns/op 0 B/op 0 allocs/op
-BenchmarkFieldSuccessParallel-8 50000000 26.8 ns/op 0 B/op 0 allocs/op
-BenchmarkFieldFailure-8 5000000 291 ns/op 208 B/op 4 allocs/op
-BenchmarkFieldFailureParallel-8 20000000 107 ns/op 208 B/op 4 allocs/op
-BenchmarkFieldArrayDiveSuccess-8 2000000 623 ns/op 201 B/op 11 allocs/op
-BenchmarkFieldArrayDiveSuccessParallel-8 10000000 237 ns/op 201 B/op 11 allocs/op
-BenchmarkFieldArrayDiveFailure-8 2000000 859 ns/op 412 B/op 16 allocs/op
-BenchmarkFieldArrayDiveFailureParallel-8 5000000 335 ns/op 413 B/op 16 allocs/op
-BenchmarkFieldMapDiveSuccess-8 1000000 1292 ns/op 432 B/op 18 allocs/op
-BenchmarkFieldMapDiveSuccessParallel-8 3000000 467 ns/op 432 B/op 18 allocs/op
-BenchmarkFieldMapDiveFailure-8 1000000 1082 ns/op 512 B/op 16 allocs/op
-BenchmarkFieldMapDiveFailureParallel-8 5000000 425 ns/op 512 B/op 16 allocs/op
-BenchmarkFieldMapDiveWithKeysSuccess-8 1000000 1539 ns/op 480 B/op 21 allocs/op
-BenchmarkFieldMapDiveWithKeysSuccessParallel-8 3000000 613 ns/op 480 B/op 21 allocs/op
-BenchmarkFieldMapDiveWithKeysFailure-8 1000000 1413 ns/op 721 B/op 21 allocs/op
-BenchmarkFieldMapDiveWithKeysFailureParallel-8 3000000 575 ns/op 721 B/op 21 allocs/op
-BenchmarkFieldCustomTypeSuccess-8 10000000 216 ns/op 32 B/op 2 allocs/op
-BenchmarkFieldCustomTypeSuccessParallel-8 20000000 82.2 ns/op 32 B/op 2 allocs/op
-BenchmarkFieldCustomTypeFailure-8 5000000 274 ns/op 208 B/op 4 allocs/op
-BenchmarkFieldCustomTypeFailureParallel-8 20000000 116 ns/op 208 B/op 4 allocs/op
-BenchmarkFieldOrTagSuccess-8 2000000 740 ns/op 16 B/op 1 allocs/op
-BenchmarkFieldOrTagSuccessParallel-8 3000000 474 ns/op 16 B/op 1 allocs/op
-BenchmarkFieldOrTagFailure-8 3000000 471 ns/op 224 B/op 5 allocs/op
-BenchmarkFieldOrTagFailureParallel-8 3000000 414 ns/op 224 B/op 5 allocs/op
-BenchmarkStructLevelValidationSuccess-8 10000000 213 ns/op 32 B/op 2 allocs/op
-BenchmarkStructLevelValidationSuccessParallel-8 20000000 91.8 ns/op 32 B/op 2 allocs/op
-BenchmarkStructLevelValidationFailure-8 3000000 473 ns/op 304 B/op 8 allocs/op
-BenchmarkStructLevelValidationFailureParallel-8 10000000 234 ns/op 304 B/op 8 allocs/op
-BenchmarkStructSimpleCustomTypeSuccess-8 5000000 385 ns/op 32 B/op 2 allocs/op
-BenchmarkStructSimpleCustomTypeSuccessParallel-8 10000000 161 ns/op 32 B/op 2 allocs/op
-BenchmarkStructSimpleCustomTypeFailure-8 2000000 640 ns/op 424 B/op 9 allocs/op
-BenchmarkStructSimpleCustomTypeFailureParallel-8 5000000 318 ns/op 440 B/op 10 allocs/op
-BenchmarkStructFilteredSuccess-8 2000000 597 ns/op 288 B/op 9 allocs/op
-BenchmarkStructFilteredSuccessParallel-8 10000000 266 ns/op 288 B/op 9 allocs/op
-BenchmarkStructFilteredFailure-8 3000000 454 ns/op 256 B/op 7 allocs/op
-BenchmarkStructFilteredFailureParallel-8 10000000 214 ns/op 256 B/op 7 allocs/op
-BenchmarkStructPartialSuccess-8 3000000 502 ns/op 256 B/op 6 allocs/op
-BenchmarkStructPartialSuccessParallel-8 10000000 225 ns/op 256 B/op 6 allocs/op
-BenchmarkStructPartialFailure-8 2000000 702 ns/op 480 B/op 11 allocs/op
-BenchmarkStructPartialFailureParallel-8 5000000 329 ns/op 480 B/op 11 allocs/op
-BenchmarkStructExceptSuccess-8 2000000 793 ns/op 496 B/op 12 allocs/op
-BenchmarkStructExceptSuccessParallel-8 10000000 193 ns/op 240 B/op 5 allocs/op
-BenchmarkStructExceptFailure-8 2000000 639 ns/op 464 B/op 10 allocs/op
-BenchmarkStructExceptFailureParallel-8 5000000 300 ns/op 464 B/op 10 allocs/op
-BenchmarkStructSimpleCrossFieldSuccess-8 3000000 417 ns/op 72 B/op 3 allocs/op
-BenchmarkStructSimpleCrossFieldSuccessParallel-8 10000000 163 ns/op 72 B/op 3 allocs/op
-BenchmarkStructSimpleCrossFieldFailure-8 2000000 645 ns/op 304 B/op 8 allocs/op
-BenchmarkStructSimpleCrossFieldFailureParallel-8 5000000 285 ns/op 304 B/op 8 allocs/op
-BenchmarkStructSimpleCrossStructCrossFieldSuccess-8 3000000 588 ns/op 80 B/op 4 allocs/op
-BenchmarkStructSimpleCrossStructCrossFieldSuccessParallel-8 10000000 221 ns/op 80 B/op 4 allocs/op
-BenchmarkStructSimpleCrossStructCrossFieldFailure-8 2000000 868 ns/op 320 B/op 9 allocs/op
-BenchmarkStructSimpleCrossStructCrossFieldFailureParallel-8 5000000 337 ns/op 320 B/op 9 allocs/op
-BenchmarkStructSimpleSuccess-8 5000000 260 ns/op 0 B/op 0 allocs/op
-BenchmarkStructSimpleSuccessParallel-8 20000000 90.6 ns/op 0 B/op 0 allocs/op
-BenchmarkStructSimpleFailure-8 2000000 619 ns/op 424 B/op 9 allocs/op
-BenchmarkStructSimpleFailureParallel-8 5000000 296 ns/op 424 B/op 9 allocs/op
-BenchmarkStructComplexSuccess-8 1000000 1454 ns/op 128 B/op 8 allocs/op
-BenchmarkStructComplexSuccessParallel-8 3000000 579 ns/op 128 B/op 8 allocs/op
-BenchmarkStructComplexFailure-8 300000 4140 ns/op 3041 B/op 53 allocs/op
-BenchmarkStructComplexFailureParallel-8 1000000 2127 ns/op 3041 B/op 53 allocs/op
-BenchmarkOneof-8 10000000 140 ns/op 0 B/op 0 allocs/op
-BenchmarkOneofParallel-8 20000000 70.1 ns/op 0 B/op 0 allocs/op
+goarch: arm64
+pkg: github.com/go-playground/validator/v10
+BenchmarkFieldSuccess-8 33142266 35.94 ns/op 0 B/op 0 allocs/op
+BenchmarkFieldSuccessParallel-8 200816191 6.568 ns/op 0 B/op 0 allocs/op
+BenchmarkFieldFailure-8 6779707 175.1 ns/op 200 B/op 4 allocs/op
+BenchmarkFieldFailureParallel-8 11044147 108.4 ns/op 200 B/op 4 allocs/op
+BenchmarkFieldArrayDiveSuccess-8 6054232 194.4 ns/op 97 B/op 5 allocs/op
+BenchmarkFieldArrayDiveSuccessParallel-8 12523388 94.07 ns/op 97 B/op 5 allocs/op
+BenchmarkFieldArrayDiveFailure-8 3587043 334.3 ns/op 300 B/op 10 allocs/op
+BenchmarkFieldArrayDiveFailureParallel-8 5816665 200.8 ns/op 300 B/op 10 allocs/op
+BenchmarkFieldMapDiveSuccess-8 2217910 540.1 ns/op 288 B/op 14 allocs/op
+BenchmarkFieldMapDiveSuccessParallel-8 4446698 258.7 ns/op 288 B/op 14 allocs/op
+BenchmarkFieldMapDiveFailure-8 2392759 504.6 ns/op 376 B/op 13 allocs/op
+BenchmarkFieldMapDiveFailureParallel-8 4244199 286.9 ns/op 376 B/op 13 allocs/op
+BenchmarkFieldMapDiveWithKeysSuccess-8 2005857 592.1 ns/op 288 B/op 14 allocs/op
+BenchmarkFieldMapDiveWithKeysSuccessParallel-8 4400850 296.9 ns/op 288 B/op 14 allocs/op
+BenchmarkFieldMapDiveWithKeysFailure-8 1850227 643.8 ns/op 553 B/op 16 allocs/op
+BenchmarkFieldMapDiveWithKeysFailureParallel-8 3293233 375.1 ns/op 553 B/op 16 allocs/op
+BenchmarkFieldCustomTypeSuccess-8 12174412 98.25 ns/op 32 B/op 2 allocs/op
+BenchmarkFieldCustomTypeSuccessParallel-8 34389907 35.49 ns/op 32 B/op 2 allocs/op
+BenchmarkFieldCustomTypeFailure-8 7582524 156.6 ns/op 184 B/op 3 allocs/op
+BenchmarkFieldCustomTypeFailureParallel-8 13019902 92.79 ns/op 184 B/op 3 allocs/op
+BenchmarkFieldOrTagSuccess-8 3427260 349.4 ns/op 16 B/op 1 allocs/op
+BenchmarkFieldOrTagSuccessParallel-8 15144128 81.25 ns/op 16 B/op 1 allocs/op
+BenchmarkFieldOrTagFailure-8 5913546 201.9 ns/op 216 B/op 5 allocs/op
+BenchmarkFieldOrTagFailureParallel-8 9810212 113.7 ns/op 216 B/op 5 allocs/op
+BenchmarkStructLevelValidationSuccess-8 13456327 87.66 ns/op 16 B/op 1 allocs/op
+BenchmarkStructLevelValidationSuccessParallel-8 41818888 27.77 ns/op 16 B/op 1 allocs/op
+BenchmarkStructLevelValidationFailure-8 4166284 272.6 ns/op 264 B/op 7 allocs/op
+BenchmarkStructLevelValidationFailureParallel-8 7594581 152.1 ns/op 264 B/op 7 allocs/op
+BenchmarkStructSimpleCustomTypeSuccess-8 6508082 182.6 ns/op 32 B/op 2 allocs/op
+BenchmarkStructSimpleCustomTypeSuccessParallel-8 23078605 54.78 ns/op 32 B/op 2 allocs/op
+BenchmarkStructSimpleCustomTypeFailure-8 3118352 381.0 ns/op 416 B/op 9 allocs/op
+BenchmarkStructSimpleCustomTypeFailureParallel-8 5300738 224.1 ns/op 432 B/op 10 allocs/op
+BenchmarkStructFilteredSuccess-8 4761807 251.1 ns/op 216 B/op 5 allocs/op
+BenchmarkStructFilteredSuccessParallel-8 8792598 128.6 ns/op 216 B/op 5 allocs/op
+BenchmarkStructFilteredFailure-8 5202573 232.1 ns/op 216 B/op 5 allocs/op
+BenchmarkStructFilteredFailureParallel-8 9591267 121.4 ns/op 216 B/op 5 allocs/op
+BenchmarkStructPartialSuccess-8 5188512 231.6 ns/op 224 B/op 4 allocs/op
+BenchmarkStructPartialSuccessParallel-8 9179776 123.1 ns/op 224 B/op 4 allocs/op
+BenchmarkStructPartialFailure-8 3071212 392.5 ns/op 440 B/op 9 allocs/op
+BenchmarkStructPartialFailureParallel-8 5344261 223.7 ns/op 440 B/op 9 allocs/op
+BenchmarkStructExceptSuccess-8 3184230 375.0 ns/op 424 B/op 8 allocs/op
+BenchmarkStructExceptSuccessParallel-8 10090130 108.9 ns/op 208 B/op 3 allocs/op
+BenchmarkStructExceptFailure-8 3347226 357.7 ns/op 424 B/op 8 allocs/op
+BenchmarkStructExceptFailureParallel-8 5654923 209.5 ns/op 424 B/op 8 allocs/op
+BenchmarkStructSimpleCrossFieldSuccess-8 5232265 229.1 ns/op 56 B/op 3 allocs/op
+BenchmarkStructSimpleCrossFieldSuccessParallel-8 17436674 64.75 ns/op 56 B/op 3 allocs/op
+BenchmarkStructSimpleCrossFieldFailure-8 3128613 383.6 ns/op 272 B/op 8 allocs/op
+BenchmarkStructSimpleCrossFieldFailureParallel-8 6994113 168.8 ns/op 272 B/op 8 allocs/op
+BenchmarkStructSimpleCrossStructCrossFieldSuccess-8 3506487 340.9 ns/op 64 B/op 4 allocs/op
+BenchmarkStructSimpleCrossStructCrossFieldSuccessParallel-8 13431300 91.77 ns/op 64 B/op 4 allocs/op
+BenchmarkStructSimpleCrossStructCrossFieldFailure-8 2410566 500.9 ns/op 288 B/op 9 allocs/op
+BenchmarkStructSimpleCrossStructCrossFieldFailureParallel-8 6344510 188.2 ns/op 288 B/op 9 allocs/op
+BenchmarkStructSimpleSuccess-8 8922726 133.8 ns/op 0 B/op 0 allocs/op
+BenchmarkStructSimpleSuccessParallel-8 55291153 23.63 ns/op 0 B/op 0 allocs/op
+BenchmarkStructSimpleFailure-8 3171553 378.4 ns/op 416 B/op 9 allocs/op
+BenchmarkStructSimpleFailureParallel-8 5571692 212.0 ns/op 416 B/op 9 allocs/op
+BenchmarkStructComplexSuccess-8 1683750 714.5 ns/op 224 B/op 5 allocs/op
+BenchmarkStructComplexSuccessParallel-8 4578046 257.0 ns/op 224 B/op 5 allocs/op
+BenchmarkStructComplexFailure-8 481585 2547 ns/op 3041 B/op 48 allocs/op
+BenchmarkStructComplexFailureParallel-8 965764 1577 ns/op 3040 B/op 48 allocs/op
+BenchmarkOneof-8 17380881 68.50 ns/op 0 B/op 0 allocs/op
+BenchmarkOneofParallel-8 8084733 153.5 ns/op 0 B/op 0 allocs/op
```
Complementary Software
@@ -149,5 +350,10 @@ How to Contribute
Make a pull request...
License
-------
+-------
Distributed under MIT License, please see license file within the code for more details.
+
+Maintainers
+-----------
+This project has grown large enough that more than one person is required to properly support the community.
+If you are interested in becoming a maintainer please reach out to me https://github.com/deankarn
diff --git a/integration/assets/hydrabroker/vendor/github.com/go-playground/validator/v10/baked_in.go b/integration/assets/hydrabroker/vendor/github.com/go-playground/validator/v10/baked_in.go
index 4124c621a02..b6fbaafad97 100644
--- a/integration/assets/hydrabroker/vendor/github.com/go-playground/validator/v10/baked_in.go
+++ b/integration/assets/hydrabroker/vendor/github.com/go-playground/validator/v10/baked_in.go
@@ -4,8 +4,10 @@ import (
"bytes"
"context"
"crypto/sha256"
+ "encoding/hex"
"encoding/json"
"fmt"
+ "io/fs"
"net"
"net/url"
"os"
@@ -13,9 +15,14 @@ import (
"strconv"
"strings"
"sync"
+ "syscall"
"time"
"unicode/utf8"
+ "golang.org/x/crypto/sha3"
+ "golang.org/x/text/language"
+
+ "github.com/gabriel-vasile/mimetype"
urn "github.com/leodido/go-urn"
)
@@ -27,7 +34,7 @@ type Func func(fl FieldLevel) bool
// validation needs. The return value should be true when validation succeeds.
type FuncCtx func(ctx context.Context, fl FieldLevel) bool
-// wrapFunc wraps noramal Func makes it compatible with FuncCtx
+// wrapFunc wraps normal Func makes it compatible with FuncCtx
func wrapFunc(fn Func) FuncCtx {
if fn == nil {
return nil // be sure not to wrap a bad function.
@@ -44,6 +51,7 @@ var (
endKeysTag: {},
structOnlyTag: {},
omitempty: {},
+ omitnil: {},
skipValidationTag: {},
utf8HexComma: {},
utf8Pipe: {},
@@ -52,131 +60,192 @@ var (
isdefault: {},
}
- // BakedInAliasValidators is a default mapping of a single validation tag that
+ // bakedInAliases is a default mapping of a single validation tag that
// defines a common or complex set of validation(s) to simplify
// adding validation to structs.
bakedInAliases = map[string]string{
- "iscolor": "hexcolor|rgb|rgba|hsl|hsla",
+ "iscolor": "hexcolor|rgb|rgba|hsl|hsla",
+ "country_code": "iso3166_1_alpha2|iso3166_1_alpha3|iso3166_1_alpha_numeric",
+ "eu_country_code": "iso3166_1_alpha2_eu|iso3166_1_alpha3_eu|iso3166_1_alpha_numeric_eu",
}
- // BakedInValidators is the default map of ValidationFunc
+ // bakedInValidators is the default map of ValidationFunc
// you can add, remove or even replace items to suite your needs,
// or even disregard and use your own map if so desired.
bakedInValidators = map[string]Func{
- "required": hasValue,
- "required_with": requiredWith,
- "required_with_all": requiredWithAll,
- "required_without": requiredWithout,
- "required_without_all": requiredWithoutAll,
- "isdefault": isDefault,
- "len": hasLengthOf,
- "min": hasMinOf,
- "max": hasMaxOf,
- "eq": isEq,
- "ne": isNe,
- "lt": isLt,
- "lte": isLte,
- "gt": isGt,
- "gte": isGte,
- "eqfield": isEqField,
- "eqcsfield": isEqCrossStructField,
- "necsfield": isNeCrossStructField,
- "gtcsfield": isGtCrossStructField,
- "gtecsfield": isGteCrossStructField,
- "ltcsfield": isLtCrossStructField,
- "ltecsfield": isLteCrossStructField,
- "nefield": isNeField,
- "gtefield": isGteField,
- "gtfield": isGtField,
- "ltefield": isLteField,
- "ltfield": isLtField,
- "fieldcontains": fieldContains,
- "fieldexcludes": fieldExcludes,
- "alpha": isAlpha,
- "alphanum": isAlphanum,
- "alphaunicode": isAlphaUnicode,
- "alphanumunicode": isAlphanumUnicode,
- "numeric": isNumeric,
- "number": isNumber,
- "hexadecimal": isHexadecimal,
- "hexcolor": isHEXColor,
- "rgb": isRGB,
- "rgba": isRGBA,
- "hsl": isHSL,
- "hsla": isHSLA,
- "e164": isE164,
- "email": isEmail,
- "url": isURL,
- "uri": isURI,
- "urn_rfc2141": isUrnRFC2141, // RFC 2141
- "file": isFile,
- "base64": isBase64,
- "base64url": isBase64URL,
- "contains": contains,
- "containsany": containsAny,
- "containsrune": containsRune,
- "excludes": excludes,
- "excludesall": excludesAll,
- "excludesrune": excludesRune,
- "startswith": startsWith,
- "endswith": endsWith,
- "isbn": isISBN,
- "isbn10": isISBN10,
- "isbn13": isISBN13,
- "eth_addr": isEthereumAddress,
- "btc_addr": isBitcoinAddress,
- "btc_addr_bech32": isBitcoinBech32Address,
- "uuid": isUUID,
- "uuid3": isUUID3,
- "uuid4": isUUID4,
- "uuid5": isUUID5,
- "uuid_rfc4122": isUUIDRFC4122,
- "uuid3_rfc4122": isUUID3RFC4122,
- "uuid4_rfc4122": isUUID4RFC4122,
- "uuid5_rfc4122": isUUID5RFC4122,
- "ascii": isASCII,
- "printascii": isPrintableASCII,
- "multibyte": hasMultiByteCharacter,
- "datauri": isDataURI,
- "latitude": isLatitude,
- "longitude": isLongitude,
- "ssn": isSSN,
- "ipv4": isIPv4,
- "ipv6": isIPv6,
- "ip": isIP,
- "cidrv4": isCIDRv4,
- "cidrv6": isCIDRv6,
- "cidr": isCIDR,
- "tcp4_addr": isTCP4AddrResolvable,
- "tcp6_addr": isTCP6AddrResolvable,
- "tcp_addr": isTCPAddrResolvable,
- "udp4_addr": isUDP4AddrResolvable,
- "udp6_addr": isUDP6AddrResolvable,
- "udp_addr": isUDPAddrResolvable,
- "ip4_addr": isIP4AddrResolvable,
- "ip6_addr": isIP6AddrResolvable,
- "ip_addr": isIPAddrResolvable,
- "unix_addr": isUnixAddrResolvable,
- "mac": isMAC,
- "hostname": isHostnameRFC952, // RFC 952
- "hostname_rfc1123": isHostnameRFC1123, // RFC 1123
- "fqdn": isFQDN,
- "unique": isUnique,
- "oneof": isOneOf,
- "html": isHTML,
- "html_encoded": isHTMLEncoded,
- "url_encoded": isURLEncoded,
- "dir": isDir,
- "json": isJSON,
- "hostname_port": isHostnamePort,
- "lowercase": isLowercase,
- "uppercase": isUppercase,
- "datetime": isDatetime,
+ "required": hasValue,
+ "required_if": requiredIf,
+ "required_unless": requiredUnless,
+ "skip_unless": skipUnless,
+ "required_with": requiredWith,
+ "required_with_all": requiredWithAll,
+ "required_without": requiredWithout,
+ "required_without_all": requiredWithoutAll,
+ "excluded_if": excludedIf,
+ "excluded_unless": excludedUnless,
+ "excluded_with": excludedWith,
+ "excluded_with_all": excludedWithAll,
+ "excluded_without": excludedWithout,
+ "excluded_without_all": excludedWithoutAll,
+ "isdefault": isDefault,
+ "len": hasLengthOf,
+ "min": hasMinOf,
+ "max": hasMaxOf,
+ "eq": isEq,
+ "eq_ignore_case": isEqIgnoreCase,
+ "ne": isNe,
+ "ne_ignore_case": isNeIgnoreCase,
+ "lt": isLt,
+ "lte": isLte,
+ "gt": isGt,
+ "gte": isGte,
+ "eqfield": isEqField,
+ "eqcsfield": isEqCrossStructField,
+ "necsfield": isNeCrossStructField,
+ "gtcsfield": isGtCrossStructField,
+ "gtecsfield": isGteCrossStructField,
+ "ltcsfield": isLtCrossStructField,
+ "ltecsfield": isLteCrossStructField,
+ "nefield": isNeField,
+ "gtefield": isGteField,
+ "gtfield": isGtField,
+ "ltefield": isLteField,
+ "ltfield": isLtField,
+ "fieldcontains": fieldContains,
+ "fieldexcludes": fieldExcludes,
+ "alpha": isAlpha,
+ "alphanum": isAlphanum,
+ "alphaunicode": isAlphaUnicode,
+ "alphanumunicode": isAlphanumUnicode,
+ "boolean": isBoolean,
+ "numeric": isNumeric,
+ "number": isNumber,
+ "hexadecimal": isHexadecimal,
+ "hexcolor": isHEXColor,
+ "rgb": isRGB,
+ "rgba": isRGBA,
+ "hsl": isHSL,
+ "hsla": isHSLA,
+ "e164": isE164,
+ "email": isEmail,
+ "url": isURL,
+ "http_url": isHttpURL,
+ "uri": isURI,
+ "urn_rfc2141": isUrnRFC2141, // RFC 2141
+ "file": isFile,
+ "filepath": isFilePath,
+ "base32": isBase32,
+ "base64": isBase64,
+ "base64url": isBase64URL,
+ "base64rawurl": isBase64RawURL,
+ "contains": contains,
+ "containsany": containsAny,
+ "containsrune": containsRune,
+ "excludes": excludes,
+ "excludesall": excludesAll,
+ "excludesrune": excludesRune,
+ "startswith": startsWith,
+ "endswith": endsWith,
+ "startsnotwith": startsNotWith,
+ "endsnotwith": endsNotWith,
+ "image": isImage,
+ "isbn": isISBN,
+ "isbn10": isISBN10,
+ "isbn13": isISBN13,
+ "issn": isISSN,
+ "eth_addr": isEthereumAddress,
+ "eth_addr_checksum": isEthereumAddressChecksum,
+ "btc_addr": isBitcoinAddress,
+ "btc_addr_bech32": isBitcoinBech32Address,
+ "uuid": isUUID,
+ "uuid3": isUUID3,
+ "uuid4": isUUID4,
+ "uuid5": isUUID5,
+ "uuid_rfc4122": isUUIDRFC4122,
+ "uuid3_rfc4122": isUUID3RFC4122,
+ "uuid4_rfc4122": isUUID4RFC4122,
+ "uuid5_rfc4122": isUUID5RFC4122,
+ "ulid": isULID,
+ "md4": isMD4,
+ "md5": isMD5,
+ "sha256": isSHA256,
+ "sha384": isSHA384,
+ "sha512": isSHA512,
+ "ripemd128": isRIPEMD128,
+ "ripemd160": isRIPEMD160,
+ "tiger128": isTIGER128,
+ "tiger160": isTIGER160,
+ "tiger192": isTIGER192,
+ "ascii": isASCII,
+ "printascii": isPrintableASCII,
+ "multibyte": hasMultiByteCharacter,
+ "datauri": isDataURI,
+ "latitude": isLatitude,
+ "longitude": isLongitude,
+ "ssn": isSSN,
+ "ipv4": isIPv4,
+ "ipv6": isIPv6,
+ "ip": isIP,
+ "cidrv4": isCIDRv4,
+ "cidrv6": isCIDRv6,
+ "cidr": isCIDR,
+ "tcp4_addr": isTCP4AddrResolvable,
+ "tcp6_addr": isTCP6AddrResolvable,
+ "tcp_addr": isTCPAddrResolvable,
+ "udp4_addr": isUDP4AddrResolvable,
+ "udp6_addr": isUDP6AddrResolvable,
+ "udp_addr": isUDPAddrResolvable,
+ "ip4_addr": isIP4AddrResolvable,
+ "ip6_addr": isIP6AddrResolvable,
+ "ip_addr": isIPAddrResolvable,
+ "unix_addr": isUnixAddrResolvable,
+ "mac": isMAC,
+ "hostname": isHostnameRFC952, // RFC 952
+ "hostname_rfc1123": isHostnameRFC1123, // RFC 1123
+ "fqdn": isFQDN,
+ "unique": isUnique,
+ "oneof": isOneOf,
+ "html": isHTML,
+ "html_encoded": isHTMLEncoded,
+ "url_encoded": isURLEncoded,
+ "dir": isDir,
+ "dirpath": isDirPath,
+ "json": isJSON,
+ "jwt": isJWT,
+ "hostname_port": isHostnamePort,
+ "lowercase": isLowercase,
+ "uppercase": isUppercase,
+ "datetime": isDatetime,
+ "timezone": isTimeZone,
+ "iso3166_1_alpha2": isIso3166Alpha2,
+ "iso3166_1_alpha2_eu": isIso3166Alpha2EU,
+ "iso3166_1_alpha3": isIso3166Alpha3,
+ "iso3166_1_alpha3_eu": isIso3166Alpha3EU,
+ "iso3166_1_alpha_numeric": isIso3166AlphaNumeric,
+ "iso3166_1_alpha_numeric_eu": isIso3166AlphaNumericEU,
+ "iso3166_2": isIso31662,
+ "iso4217": isIso4217,
+ "iso4217_numeric": isIso4217Numeric,
+ "bcp47_language_tag": isBCP47LanguageTag,
+ "postcode_iso3166_alpha2": isPostcodeByIso3166Alpha2,
+ "postcode_iso3166_alpha2_field": isPostcodeByIso3166Alpha2Field,
+ "bic": isIsoBicFormat,
+ "semver": isSemverFormat,
+ "dns_rfc1035_label": isDnsRFC1035LabelFormat,
+ "credit_card": isCreditCard,
+ "cve": isCveFormat,
+ "luhn_checksum": hasLuhnChecksum,
+ "mongodb": isMongoDBObjectId,
+ "mongodb_connection_string": isMongoDBConnectionString,
+ "cron": isCron,
+ "spicedb": isSpiceDB,
}
)
-var oneofValsCache = map[string][]string{}
-var oneofValsCacheRWLock = sync.RWMutex{}
+var (
+ oneofValsCache = map[string][]string{}
+ oneofValsCacheRWLock = sync.RWMutex{}
+)
func parseOneOfParam2(s string) []string {
oneofValsCacheRWLock.RLock()
@@ -184,7 +253,7 @@ func parseOneOfParam2(s string) []string {
oneofValsCacheRWLock.RUnlock()
if !ok {
oneofValsCacheRWLock.Lock()
- vals = splitParamsRegex.FindAllString(s, -1)
+ vals = splitParamsRegex().FindAllString(s, -1)
for i := 0; i < len(vals); i++ {
vals[i] = strings.Replace(vals[i], "'", "", -1)
}
@@ -195,15 +264,15 @@ func parseOneOfParam2(s string) []string {
}
func isURLEncoded(fl FieldLevel) bool {
- return uRLEncodedRegex.MatchString(fl.Field().String())
+ return uRLEncodedRegex().MatchString(fl.Field().String())
}
func isHTMLEncoded(fl FieldLevel) bool {
- return hTMLEncodedRegex.MatchString(fl.Field().String())
+ return hTMLEncodedRegex().MatchString(fl.Field().String())
}
func isHTML(fl FieldLevel) bool {
- return hTMLRegex.MatchString(fl.Field().String())
+ return hTMLRegex().MatchString(fl.Field().String())
}
func isOneOf(fl FieldLevel) bool {
@@ -232,113 +301,138 @@ func isOneOf(fl FieldLevel) bool {
// isUnique is the validation function for validating if each array|slice|map value is unique
func isUnique(fl FieldLevel) bool {
-
field := fl.Field()
param := fl.Param()
v := reflect.ValueOf(struct{}{})
switch field.Kind() {
case reflect.Slice, reflect.Array:
+ elem := field.Type().Elem()
+ if elem.Kind() == reflect.Ptr {
+ elem = elem.Elem()
+ }
+
if param == "" {
- m := reflect.MakeMap(reflect.MapOf(field.Type().Elem(), v.Type()))
+ m := reflect.MakeMap(reflect.MapOf(elem, v.Type()))
for i := 0; i < field.Len(); i++ {
- m.SetMapIndex(field.Index(i), v)
+ m.SetMapIndex(reflect.Indirect(field.Index(i)), v)
}
return field.Len() == m.Len()
}
- sf, ok := field.Type().Elem().FieldByName(param)
+ sf, ok := elem.FieldByName(param)
if !ok {
panic(fmt.Sprintf("Bad field name %s", param))
}
- m := reflect.MakeMap(reflect.MapOf(sf.Type, v.Type()))
+ sfTyp := sf.Type
+ if sfTyp.Kind() == reflect.Ptr {
+ sfTyp = sfTyp.Elem()
+ }
+
+ m := reflect.MakeMap(reflect.MapOf(sfTyp, v.Type()))
+ var fieldlen int
for i := 0; i < field.Len(); i++ {
- m.SetMapIndex(field.Index(i).FieldByName(param), v)
+ key := reflect.Indirect(reflect.Indirect(field.Index(i)).FieldByName(param))
+ if key.IsValid() {
+ fieldlen++
+ m.SetMapIndex(key, v)
+ }
}
- return field.Len() == m.Len()
+ return fieldlen == m.Len()
case reflect.Map:
- m := reflect.MakeMap(reflect.MapOf(field.Type().Elem(), v.Type()))
+ var m reflect.Value
+ if field.Type().Elem().Kind() == reflect.Ptr {
+ m = reflect.MakeMap(reflect.MapOf(field.Type().Elem().Elem(), v.Type()))
+ } else {
+ m = reflect.MakeMap(reflect.MapOf(field.Type().Elem(), v.Type()))
+ }
for _, k := range field.MapKeys() {
- m.SetMapIndex(field.MapIndex(k), v)
+ m.SetMapIndex(reflect.Indirect(field.MapIndex(k)), v)
}
+
return field.Len() == m.Len()
default:
+ if parent := fl.Parent(); parent.Kind() == reflect.Struct {
+ uniqueField := parent.FieldByName(param)
+ if uniqueField == reflect.ValueOf(nil) {
+ panic(fmt.Sprintf("Bad field name provided %s", param))
+ }
+
+ if uniqueField.Kind() != field.Kind() {
+ panic(fmt.Sprintf("Bad field type %T:%T", field.Interface(), uniqueField.Interface()))
+ }
+
+ return field.Interface() != uniqueField.Interface()
+ }
+
panic(fmt.Sprintf("Bad field type %T", field.Interface()))
}
}
-// IsMAC is the validation function for validating if the field's value is a valid MAC address.
+// isMAC is the validation function for validating if the field's value is a valid MAC address.
func isMAC(fl FieldLevel) bool {
-
_, err := net.ParseMAC(fl.Field().String())
return err == nil
}
-// IsCIDRv4 is the validation function for validating if the field's value is a valid v4 CIDR address.
+// isCIDRv4 is the validation function for validating if the field's value is a valid v4 CIDR address.
func isCIDRv4(fl FieldLevel) bool {
+ ip, net, err := net.ParseCIDR(fl.Field().String())
- ip, _, err := net.ParseCIDR(fl.Field().String())
-
- return err == nil && ip.To4() != nil
+ return err == nil && ip.To4() != nil && net.IP.Equal(ip)
}
-// IsCIDRv6 is the validation function for validating if the field's value is a valid v6 CIDR address.
+// isCIDRv6 is the validation function for validating if the field's value is a valid v6 CIDR address.
func isCIDRv6(fl FieldLevel) bool {
-
ip, _, err := net.ParseCIDR(fl.Field().String())
return err == nil && ip.To4() == nil
}
-// IsCIDR is the validation function for validating if the field's value is a valid v4 or v6 CIDR address.
+// isCIDR is the validation function for validating if the field's value is a valid v4 or v6 CIDR address.
func isCIDR(fl FieldLevel) bool {
-
_, _, err := net.ParseCIDR(fl.Field().String())
return err == nil
}
-// IsIPv4 is the validation function for validating if a value is a valid v4 IP address.
+// isIPv4 is the validation function for validating if a value is a valid v4 IP address.
func isIPv4(fl FieldLevel) bool {
-
ip := net.ParseIP(fl.Field().String())
return ip != nil && ip.To4() != nil
}
-// IsIPv6 is the validation function for validating if the field's value is a valid v6 IP address.
+// isIPv6 is the validation function for validating if the field's value is a valid v6 IP address.
func isIPv6(fl FieldLevel) bool {
-
ip := net.ParseIP(fl.Field().String())
return ip != nil && ip.To4() == nil
}
-// IsIP is the validation function for validating if the field's value is a valid v4 or v6 IP address.
+// isIP is the validation function for validating if the field's value is a valid v4 or v6 IP address.
func isIP(fl FieldLevel) bool {
-
ip := net.ParseIP(fl.Field().String())
return ip != nil
}
-// IsSSN is the validation function for validating if the field's value is a valid SSN.
+// isSSN is the validation function for validating if the field's value is a valid SSN.
func isSSN(fl FieldLevel) bool {
-
field := fl.Field()
if field.Len() != 11 {
return false
}
- return sSNRegex.MatchString(field.String())
+ return sSNRegex().MatchString(field.String())
}
-// IsLongitude is the validation function for validating if the field's value is a valid longitude coordinate.
+// isLongitude is the validation function for validating if the field's value is a valid longitude coordinate.
func isLongitude(fl FieldLevel) bool {
field := fl.Field()
@@ -358,10 +452,10 @@ func isLongitude(fl FieldLevel) bool {
panic(fmt.Sprintf("Bad field type %T", field.Interface()))
}
- return longitudeRegex.MatchString(v)
+ return longitudeRegex().MatchString(v)
}
-// IsLatitude is the validation function for validating if the field's value is a valid latitude coordinate.
+// isLatitude is the validation function for validating if the field's value is a valid latitude coordinate.
func isLatitude(fl FieldLevel) bool {
field := fl.Field()
@@ -381,98 +475,150 @@ func isLatitude(fl FieldLevel) bool {
panic(fmt.Sprintf("Bad field type %T", field.Interface()))
}
- return latitudeRegex.MatchString(v)
+ return latitudeRegex().MatchString(v)
}
-// IsDataURI is the validation function for validating if the field's value is a valid data URI.
+// isDataURI is the validation function for validating if the field's value is a valid data URI.
func isDataURI(fl FieldLevel) bool {
-
uri := strings.SplitN(fl.Field().String(), ",", 2)
if len(uri) != 2 {
return false
}
- if !dataURIRegex.MatchString(uri[0]) {
+ if !dataURIRegex().MatchString(uri[0]) {
return false
}
- return base64Regex.MatchString(uri[1])
+ return base64Regex().MatchString(uri[1])
}
-// HasMultiByteCharacter is the validation function for validating if the field's value has a multi byte character.
+// hasMultiByteCharacter is the validation function for validating if the field's value has a multi byte character.
func hasMultiByteCharacter(fl FieldLevel) bool {
-
field := fl.Field()
if field.Len() == 0 {
return true
}
- return multibyteRegex.MatchString(field.String())
+ return multibyteRegex().MatchString(field.String())
}
-// IsPrintableASCII is the validation function for validating if the field's value is a valid printable ASCII character.
+// isPrintableASCII is the validation function for validating if the field's value is a valid printable ASCII character.
func isPrintableASCII(fl FieldLevel) bool {
- return printableASCIIRegex.MatchString(fl.Field().String())
+ return printableASCIIRegex().MatchString(fl.Field().String())
}
-// IsASCII is the validation function for validating if the field's value is a valid ASCII character.
+// isASCII is the validation function for validating if the field's value is a valid ASCII character.
func isASCII(fl FieldLevel) bool {
- return aSCIIRegex.MatchString(fl.Field().String())
+ return aSCIIRegex().MatchString(fl.Field().String())
}
-// IsUUID5 is the validation function for validating if the field's value is a valid v5 UUID.
+// isUUID5 is the validation function for validating if the field's value is a valid v5 UUID.
func isUUID5(fl FieldLevel) bool {
- return uUID5Regex.MatchString(fl.Field().String())
+ return fieldMatchesRegexByStringerValOrString(uUID5Regex, fl)
}
-// IsUUID4 is the validation function for validating if the field's value is a valid v4 UUID.
+// isUUID4 is the validation function for validating if the field's value is a valid v4 UUID.
func isUUID4(fl FieldLevel) bool {
- return uUID4Regex.MatchString(fl.Field().String())
+ return fieldMatchesRegexByStringerValOrString(uUID4Regex, fl)
}
-// IsUUID3 is the validation function for validating if the field's value is a valid v3 UUID.
+// isUUID3 is the validation function for validating if the field's value is a valid v3 UUID.
func isUUID3(fl FieldLevel) bool {
- return uUID3Regex.MatchString(fl.Field().String())
+ return fieldMatchesRegexByStringerValOrString(uUID3Regex, fl)
}
-// IsUUID is the validation function for validating if the field's value is a valid UUID of any version.
+// isUUID is the validation function for validating if the field's value is a valid UUID of any version.
func isUUID(fl FieldLevel) bool {
- return uUIDRegex.MatchString(fl.Field().String())
+ return fieldMatchesRegexByStringerValOrString(uUIDRegex, fl)
}
-// IsUUID5RFC4122 is the validation function for validating if the field's value is a valid RFC4122 v5 UUID.
+// isUUID5RFC4122 is the validation function for validating if the field's value is a valid RFC4122 v5 UUID.
func isUUID5RFC4122(fl FieldLevel) bool {
- return uUID5RFC4122Regex.MatchString(fl.Field().String())
+ return fieldMatchesRegexByStringerValOrString(uUID5RFC4122Regex, fl)
}
-// IsUUID4RFC4122 is the validation function for validating if the field's value is a valid RFC4122 v4 UUID.
+// isUUID4RFC4122 is the validation function for validating if the field's value is a valid RFC4122 v4 UUID.
func isUUID4RFC4122(fl FieldLevel) bool {
- return uUID4RFC4122Regex.MatchString(fl.Field().String())
+ return fieldMatchesRegexByStringerValOrString(uUID4RFC4122Regex, fl)
}
-// IsUUID3RFC4122 is the validation function for validating if the field's value is a valid RFC4122 v3 UUID.
+// isUUID3RFC4122 is the validation function for validating if the field's value is a valid RFC4122 v3 UUID.
func isUUID3RFC4122(fl FieldLevel) bool {
- return uUID3RFC4122Regex.MatchString(fl.Field().String())
+ return fieldMatchesRegexByStringerValOrString(uUID3RFC4122Regex, fl)
}
-// IsUUIDRFC4122 is the validation function for validating if the field's value is a valid RFC4122 UUID of any version.
+// isUUIDRFC4122 is the validation function for validating if the field's value is a valid RFC4122 UUID of any version.
func isUUIDRFC4122(fl FieldLevel) bool {
- return uUIDRFC4122Regex.MatchString(fl.Field().String())
+ return fieldMatchesRegexByStringerValOrString(uUIDRFC4122Regex, fl)
+}
+
+// isULID is the validation function for validating if the field's value is a valid ULID.
+func isULID(fl FieldLevel) bool {
+ return fieldMatchesRegexByStringerValOrString(uLIDRegex, fl)
+}
+
+// isMD4 is the validation function for validating if the field's value is a valid MD4.
+func isMD4(fl FieldLevel) bool {
+ return md4Regex().MatchString(fl.Field().String())
+}
+
+// isMD5 is the validation function for validating if the field's value is a valid MD5.
+func isMD5(fl FieldLevel) bool {
+ return md5Regex().MatchString(fl.Field().String())
+}
+
+// isSHA256 is the validation function for validating if the field's value is a valid SHA256.
+func isSHA256(fl FieldLevel) bool {
+ return sha256Regex().MatchString(fl.Field().String())
+}
+
+// isSHA384 is the validation function for validating if the field's value is a valid SHA384.
+func isSHA384(fl FieldLevel) bool {
+ return sha384Regex().MatchString(fl.Field().String())
+}
+
+// isSHA512 is the validation function for validating if the field's value is a valid SHA512.
+func isSHA512(fl FieldLevel) bool {
+ return sha512Regex().MatchString(fl.Field().String())
+}
+
+// isRIPEMD128 is the validation function for validating if the field's value is a valid PIPEMD128.
+func isRIPEMD128(fl FieldLevel) bool {
+ return ripemd128Regex().MatchString(fl.Field().String())
+}
+
+// isRIPEMD160 is the validation function for validating if the field's value is a valid PIPEMD160.
+func isRIPEMD160(fl FieldLevel) bool {
+ return ripemd160Regex().MatchString(fl.Field().String())
+}
+
+// isTIGER128 is the validation function for validating if the field's value is a valid TIGER128.
+func isTIGER128(fl FieldLevel) bool {
+ return tiger128Regex().MatchString(fl.Field().String())
+}
+
+// isTIGER160 is the validation function for validating if the field's value is a valid TIGER160.
+func isTIGER160(fl FieldLevel) bool {
+ return tiger160Regex().MatchString(fl.Field().String())
+}
+
+// isTIGER192 is the validation function for validating if the field's value is a valid isTIGER192.
+func isTIGER192(fl FieldLevel) bool {
+ return tiger192Regex().MatchString(fl.Field().String())
}
-// IsISBN is the validation function for validating if the field's value is a valid v10 or v13 ISBN.
+// isISBN is the validation function for validating if the field's value is a valid v10 or v13 ISBN.
func isISBN(fl FieldLevel) bool {
return isISBN10(fl) || isISBN13(fl)
}
-// IsISBN13 is the validation function for validating if the field's value is a valid v13 ISBN.
+// isISBN13 is the validation function for validating if the field's value is a valid v13 ISBN.
func isISBN13(fl FieldLevel) bool {
-
s := strings.Replace(strings.Replace(fl.Field().String(), "-", "", 4), " ", "", 4)
- if !iSBN13Regex.MatchString(s) {
+ if !iSBN13Regex().MatchString(s) {
return false
}
@@ -488,12 +634,11 @@ func isISBN13(fl FieldLevel) bool {
return (int32(s[12]-'0'))-((10-(checksum%10))%10) == 0
}
-// IsISBN10 is the validation function for validating if the field's value is a valid v10 ISBN.
+// isISBN10 is the validation function for validating if the field's value is a valid v10 ISBN.
func isISBN10(fl FieldLevel) bool {
-
s := strings.Replace(strings.Replace(fl.Field().String(), "-", "", 3), " ", "", 3)
- if !iSBN10Regex.MatchString(s) {
+ if !iSBN10Regex().MatchString(s) {
return false
}
@@ -513,28 +658,70 @@ func isISBN10(fl FieldLevel) bool {
return checksum%11 == 0
}
-// IsEthereumAddress is the validation function for validating if the field's value is a valid ethereum address based currently only on the format
+// isISSN is the validation function for validating if the field's value is a valid ISSN.
+func isISSN(fl FieldLevel) bool {
+ s := fl.Field().String()
+
+ if !iSSNRegex().MatchString(s) {
+ return false
+ }
+ s = strings.ReplaceAll(s, "-", "")
+
+ pos := 8
+ checksum := 0
+
+ for i := 0; i < 7; i++ {
+ checksum += pos * int(s[i]-'0')
+ pos--
+ }
+
+ if s[7] == 'X' {
+ checksum += 10
+ } else {
+ checksum += int(s[7] - '0')
+ }
+
+ return checksum%11 == 0
+}
+
+// isEthereumAddress is the validation function for validating if the field's value is a valid Ethereum address.
func isEthereumAddress(fl FieldLevel) bool {
address := fl.Field().String()
- if !ethAddressRegex.MatchString(address) {
+ return ethAddressRegex().MatchString(address)
+}
+
+// isEthereumAddressChecksum is the validation function for validating if the field's value is a valid checksummed Ethereum address.
+func isEthereumAddressChecksum(fl FieldLevel) bool {
+ address := fl.Field().String()
+
+ if !ethAddressRegex().MatchString(address) {
return false
}
+ // Checksum validation. Reference: https://github.com/ethereum/EIPs/blob/master/EIPS/eip-55.md
+ address = address[2:] // Skip "0x" prefix.
+ h := sha3.NewLegacyKeccak256()
+ // hash.Hash's io.Writer implementation says it never returns an error. https://golang.org/pkg/hash/#Hash
+ _, _ = h.Write([]byte(strings.ToLower(address)))
+ hash := hex.EncodeToString(h.Sum(nil))
- if ethaddressRegexUpper.MatchString(address) || ethAddressRegexLower.MatchString(address) {
- return true
+ for i := 0; i < len(address); i++ {
+ if address[i] <= '9' { // Skip 0-9 digits: they don't have upper/lower-case.
+ continue
+ }
+ if hash[i] > '7' && address[i] >= 'a' || hash[i] <= '7' && address[i] <= 'F' {
+ return false
+ }
}
- // checksum validation is blocked by https://github.com/golang/crypto/pull/28
-
return true
}
-// IsBitcoinAddress is the validation function for validating if the field's value is a valid btc address
+// isBitcoinAddress is the validation function for validating if the field's value is a valid btc address
func isBitcoinAddress(fl FieldLevel) bool {
address := fl.Field().String()
- if !btcAddressRegex.MatchString(address) {
+ if !btcAddressRegex().MatchString(address) {
return false
}
@@ -567,11 +754,11 @@ func isBitcoinAddress(fl FieldLevel) bool {
return validchecksum == computedchecksum
}
-// IsBitcoinBech32Address is the validation function for validating if the field's value is a valid bech32 btc address
+// isBitcoinBech32Address is the validation function for validating if the field's value is a valid bech32 btc address
func isBitcoinBech32Address(fl FieldLevel) bool {
address := fl.Field().String()
- if !btcLowerAddressRegexBech32.MatchString(address) && !btcUpperAddressRegexBech32.MatchString(address) {
+ if !btcLowerAddressRegexBech32().MatchString(address) && !btcUpperAddressRegexBech32().MatchString(address) {
return false
}
@@ -647,50 +834,59 @@ func isBitcoinBech32Address(fl FieldLevel) bool {
return true
}
-// ExcludesRune is the validation function for validating that the field's value does not contain the rune specified within the param.
+// excludesRune is the validation function for validating that the field's value does not contain the rune specified within the param.
func excludesRune(fl FieldLevel) bool {
return !containsRune(fl)
}
-// ExcludesAll is the validation function for validating that the field's value does not contain any of the characters specified within the param.
+// excludesAll is the validation function for validating that the field's value does not contain any of the characters specified within the param.
func excludesAll(fl FieldLevel) bool {
return !containsAny(fl)
}
-// Excludes is the validation function for validating that the field's value does not contain the text specified within the param.
+// excludes is the validation function for validating that the field's value does not contain the text specified within the param.
func excludes(fl FieldLevel) bool {
return !contains(fl)
}
-// ContainsRune is the validation function for validating that the field's value contains the rune specified within the param.
+// containsRune is the validation function for validating that the field's value contains the rune specified within the param.
func containsRune(fl FieldLevel) bool {
-
r, _ := utf8.DecodeRuneInString(fl.Param())
return strings.ContainsRune(fl.Field().String(), r)
}
-// ContainsAny is the validation function for validating that the field's value contains any of the characters specified within the param.
+// containsAny is the validation function for validating that the field's value contains any of the characters specified within the param.
func containsAny(fl FieldLevel) bool {
return strings.ContainsAny(fl.Field().String(), fl.Param())
}
-// Contains is the validation function for validating that the field's value contains the text specified within the param.
+// contains is the validation function for validating that the field's value contains the text specified within the param.
func contains(fl FieldLevel) bool {
return strings.Contains(fl.Field().String(), fl.Param())
}
-// StartsWith is the validation function for validating that the field's value starts with the text specified within the param.
+// startsWith is the validation function for validating that the field's value starts with the text specified within the param.
func startsWith(fl FieldLevel) bool {
return strings.HasPrefix(fl.Field().String(), fl.Param())
}
-// EndsWith is the validation function for validating that the field's value ends with the text specified within the param.
+// endsWith is the validation function for validating that the field's value ends with the text specified within the param.
func endsWith(fl FieldLevel) bool {
return strings.HasSuffix(fl.Field().String(), fl.Param())
}
-// FieldContains is the validation function for validating if the current field's value contains the field specified by the param's value.
+// startsNotWith is the validation function for validating that the field's value does not start with the text specified within the param.
+func startsNotWith(fl FieldLevel) bool {
+ return !startsWith(fl)
+}
+
+// endsNotWith is the validation function for validating that the field's value does not end with the text specified within the param.
+func endsNotWith(fl FieldLevel) bool {
+ return !endsWith(fl)
+}
+
+// fieldContains is the validation function for validating if the current field's value contains the field specified by the param's value.
func fieldContains(fl FieldLevel) bool {
field := fl.Field()
@@ -703,7 +899,7 @@ func fieldContains(fl FieldLevel) bool {
return strings.Contains(field.String(), currentField.String())
}
-// FieldExcludes is the validation function for validating if the current field's value excludes the field specified by the param's value.
+// fieldExcludes is the validation function for validating if the current field's value excludes the field specified by the param's value.
func fieldExcludes(fl FieldLevel) bool {
field := fl.Field()
@@ -715,9 +911,8 @@ func fieldExcludes(fl FieldLevel) bool {
return !strings.Contains(field.String(), currentField.String())
}
-// IsNeField is the validation function for validating if the current field's value is not equal to the field specified by the param's value.
+// isNeField is the validation function for validating if the current field's value is not equal to the field specified by the param's value.
func isNeField(fl FieldLevel) bool {
-
field := fl.Field()
kind := field.Kind()
@@ -741,16 +936,14 @@ func isNeField(fl FieldLevel) bool {
case reflect.Slice, reflect.Map, reflect.Array:
return int64(field.Len()) != int64(currentField.Len())
+ case reflect.Bool:
+ return field.Bool() != currentField.Bool()
+
case reflect.Struct:
fieldType := field.Type()
- // Not Same underlying type i.e. struct and time
- if fieldType != currentField.Type() {
- return true
- }
-
- if fieldType == timeType {
+ if fieldType.ConvertibleTo(timeType) && currentField.Type().ConvertibleTo(timeType) {
t := currentField.Interface().(time.Time)
fieldTime := field.Interface().(time.Time)
@@ -758,20 +951,29 @@ func isNeField(fl FieldLevel) bool {
return !fieldTime.Equal(t)
}
+ // Not Same underlying type i.e. struct and time
+ if fieldType != currentField.Type() {
+ return true
+ }
}
// default reflect.String:
return field.String() != currentField.String()
}
-// IsNe is the validation function for validating that the field's value does not equal the provided param value.
+// isNe is the validation function for validating that the field's value does not equal the provided param value.
func isNe(fl FieldLevel) bool {
return !isEq(fl)
}
-// IsLteCrossStructField is the validation function for validating if the current field's value is less than or equal to the field, within a separate struct, specified by the param's value.
-func isLteCrossStructField(fl FieldLevel) bool {
+// isNeIgnoreCase is the validation function for validating that the field's string value does not equal the
+// provided param value. The comparison is case-insensitive
+func isNeIgnoreCase(fl FieldLevel) bool {
+ return !isEqIgnoreCase(fl)
+}
+// isLteCrossStructField is the validation function for validating if the current field's value is less than or equal to the field, within a separate struct, specified by the param's value.
+func isLteCrossStructField(fl FieldLevel) bool {
field := fl.Field()
kind := field.Kind()
@@ -798,28 +1000,27 @@ func isLteCrossStructField(fl FieldLevel) bool {
fieldType := field.Type()
- // Not Same underlying type i.e. struct and time
- if fieldType != topField.Type() {
- return false
- }
-
- if fieldType == timeType {
+ if fieldType.ConvertibleTo(timeType) && topField.Type().ConvertibleTo(timeType) {
- fieldTime := field.Interface().(time.Time)
- topTime := topField.Interface().(time.Time)
+ fieldTime := field.Convert(timeType).Interface().(time.Time)
+ topTime := topField.Convert(timeType).Interface().(time.Time)
return fieldTime.Before(topTime) || fieldTime.Equal(topTime)
}
+
+ // Not Same underlying type i.e. struct and time
+ if fieldType != topField.Type() {
+ return false
+ }
}
// default reflect.String:
return field.String() <= topField.String()
}
-// IsLtCrossStructField is the validation function for validating if the current field's value is less than the field, within a separate struct, specified by the param's value.
+// isLtCrossStructField is the validation function for validating if the current field's value is less than the field, within a separate struct, specified by the param's value.
// NOTE: This is exposed for use within your own custom functions and not intended to be called directly.
func isLtCrossStructField(fl FieldLevel) bool {
-
field := fl.Field()
kind := field.Kind()
@@ -846,27 +1047,26 @@ func isLtCrossStructField(fl FieldLevel) bool {
fieldType := field.Type()
- // Not Same underlying type i.e. struct and time
- if fieldType != topField.Type() {
- return false
- }
-
- if fieldType == timeType {
+ if fieldType.ConvertibleTo(timeType) && topField.Type().ConvertibleTo(timeType) {
- fieldTime := field.Interface().(time.Time)
- topTime := topField.Interface().(time.Time)
+ fieldTime := field.Convert(timeType).Interface().(time.Time)
+ topTime := topField.Convert(timeType).Interface().(time.Time)
return fieldTime.Before(topTime)
}
+
+ // Not Same underlying type i.e. struct and time
+ if fieldType != topField.Type() {
+ return false
+ }
}
// default reflect.String:
return field.String() < topField.String()
}
-// IsGteCrossStructField is the validation function for validating if the current field's value is greater than or equal to the field, within a separate struct, specified by the param's value.
+// isGteCrossStructField is the validation function for validating if the current field's value is greater than or equal to the field, within a separate struct, specified by the param's value.
func isGteCrossStructField(fl FieldLevel) bool {
-
field := fl.Field()
kind := field.Kind()
@@ -893,27 +1093,26 @@ func isGteCrossStructField(fl FieldLevel) bool {
fieldType := field.Type()
- // Not Same underlying type i.e. struct and time
- if fieldType != topField.Type() {
- return false
- }
-
- if fieldType == timeType {
+ if fieldType.ConvertibleTo(timeType) && topField.Type().ConvertibleTo(timeType) {
- fieldTime := field.Interface().(time.Time)
- topTime := topField.Interface().(time.Time)
+ fieldTime := field.Convert(timeType).Interface().(time.Time)
+ topTime := topField.Convert(timeType).Interface().(time.Time)
return fieldTime.After(topTime) || fieldTime.Equal(topTime)
}
+
+ // Not Same underlying type i.e. struct and time
+ if fieldType != topField.Type() {
+ return false
+ }
}
// default reflect.String:
return field.String() >= topField.String()
}
-// IsGtCrossStructField is the validation function for validating if the current field's value is greater than the field, within a separate struct, specified by the param's value.
+// isGtCrossStructField is the validation function for validating if the current field's value is greater than the field, within a separate struct, specified by the param's value.
func isGtCrossStructField(fl FieldLevel) bool {
-
field := fl.Field()
kind := field.Kind()
@@ -940,27 +1139,26 @@ func isGtCrossStructField(fl FieldLevel) bool {
fieldType := field.Type()
- // Not Same underlying type i.e. struct and time
- if fieldType != topField.Type() {
- return false
- }
-
- if fieldType == timeType {
+ if fieldType.ConvertibleTo(timeType) && topField.Type().ConvertibleTo(timeType) {
- fieldTime := field.Interface().(time.Time)
- topTime := topField.Interface().(time.Time)
+ fieldTime := field.Convert(timeType).Interface().(time.Time)
+ topTime := topField.Convert(timeType).Interface().(time.Time)
return fieldTime.After(topTime)
}
+
+ // Not Same underlying type i.e. struct and time
+ if fieldType != topField.Type() {
+ return false
+ }
}
// default reflect.String:
return field.String() > topField.String()
}
-// IsNeCrossStructField is the validation function for validating that the current field's value is not equal to the field, within a separate struct, specified by the param's value.
+// isNeCrossStructField is the validation function for validating that the current field's value is not equal to the field, within a separate struct, specified by the param's value.
func isNeCrossStructField(fl FieldLevel) bool {
-
field := fl.Field()
kind := field.Kind()
@@ -983,31 +1181,33 @@ func isNeCrossStructField(fl FieldLevel) bool {
case reflect.Slice, reflect.Map, reflect.Array:
return int64(topField.Len()) != int64(field.Len())
+ case reflect.Bool:
+ return topField.Bool() != field.Bool()
+
case reflect.Struct:
fieldType := field.Type()
- // Not Same underlying type i.e. struct and time
- if fieldType != topField.Type() {
- return true
- }
-
- if fieldType == timeType {
+ if fieldType.ConvertibleTo(timeType) && topField.Type().ConvertibleTo(timeType) {
- t := field.Interface().(time.Time)
- fieldTime := topField.Interface().(time.Time)
+ t := field.Convert(timeType).Interface().(time.Time)
+ fieldTime := topField.Convert(timeType).Interface().(time.Time)
return !fieldTime.Equal(t)
}
+
+ // Not Same underlying type i.e. struct and time
+ if fieldType != topField.Type() {
+ return true
+ }
}
// default reflect.String:
return topField.String() != field.String()
}
-// IsEqCrossStructField is the validation function for validating that the current field's value is equal to the field, within a separate struct, specified by the param's value.
+// isEqCrossStructField is the validation function for validating that the current field's value is equal to the field, within a separate struct, specified by the param's value.
func isEqCrossStructField(fl FieldLevel) bool {
-
field := fl.Field()
kind := field.Kind()
@@ -1030,31 +1230,33 @@ func isEqCrossStructField(fl FieldLevel) bool {
case reflect.Slice, reflect.Map, reflect.Array:
return int64(topField.Len()) == int64(field.Len())
+ case reflect.Bool:
+ return topField.Bool() == field.Bool()
+
case reflect.Struct:
fieldType := field.Type()
- // Not Same underlying type i.e. struct and time
- if fieldType != topField.Type() {
- return false
- }
-
- if fieldType == timeType {
+ if fieldType.ConvertibleTo(timeType) && topField.Type().ConvertibleTo(timeType) {
- t := field.Interface().(time.Time)
- fieldTime := topField.Interface().(time.Time)
+ t := field.Convert(timeType).Interface().(time.Time)
+ fieldTime := topField.Convert(timeType).Interface().(time.Time)
return fieldTime.Equal(t)
}
+
+ // Not Same underlying type i.e. struct and time
+ if fieldType != topField.Type() {
+ return false
+ }
}
// default reflect.String:
return topField.String() == field.String()
}
-// IsEqField is the validation function for validating if the current field's value is equal to the field specified by the param's value.
+// isEqField is the validation function for validating if the current field's value is equal to the field specified by the param's value.
func isEqField(fl FieldLevel) bool {
-
field := fl.Field()
kind := field.Kind()
@@ -1077,32 +1279,33 @@ func isEqField(fl FieldLevel) bool {
case reflect.Slice, reflect.Map, reflect.Array:
return int64(field.Len()) == int64(currentField.Len())
+ case reflect.Bool:
+ return field.Bool() == currentField.Bool()
+
case reflect.Struct:
fieldType := field.Type()
- // Not Same underlying type i.e. struct and time
- if fieldType != currentField.Type() {
- return false
- }
-
- if fieldType == timeType {
+ if fieldType.ConvertibleTo(timeType) && currentField.Type().ConvertibleTo(timeType) {
- t := currentField.Interface().(time.Time)
- fieldTime := field.Interface().(time.Time)
+ t := currentField.Convert(timeType).Interface().(time.Time)
+ fieldTime := field.Convert(timeType).Interface().(time.Time)
return fieldTime.Equal(t)
}
+ // Not Same underlying type i.e. struct and time
+ if fieldType != currentField.Type() {
+ return false
+ }
}
// default reflect.String:
return field.String() == currentField.String()
}
-// IsEq is the validation function for validating if the current field's value is equal to the param's value.
+// isEq is the validation function for validating if the current field's value is equal to the param's value.
func isEq(fl FieldLevel) bool {
-
field := fl.Field()
param := fl.Param()
@@ -1117,7 +1320,7 @@ func isEq(fl FieldLevel) bool {
return int64(field.Len()) == p
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
- p := asInt(param)
+ p := asIntFromType(field.Type(), param)
return field.Int() == p
@@ -1126,8 +1329,13 @@ func isEq(fl FieldLevel) bool {
return field.Uint() == p
- case reflect.Float32, reflect.Float64:
- p := asFloat(param)
+ case reflect.Float32:
+ p := asFloat32(param)
+
+ return field.Float() == p
+
+ case reflect.Float64:
+ p := asFloat64(param)
return field.Float() == p
@@ -1140,23 +1348,89 @@ func isEq(fl FieldLevel) bool {
panic(fmt.Sprintf("Bad field type %T", field.Interface()))
}
-// IsBase64 is the validation function for validating if the current field's value is a valid base 64.
+// isEqIgnoreCase is the validation function for validating if the current field's string value is
+// equal to the param's value.
+// The comparison is case-insensitive.
+func isEqIgnoreCase(fl FieldLevel) bool {
+ field := fl.Field()
+ param := fl.Param()
+
+ switch field.Kind() {
+
+ case reflect.String:
+ return strings.EqualFold(field.String(), param)
+ }
+
+ panic(fmt.Sprintf("Bad field type %T", field.Interface()))
+}
+
+// isPostcodeByIso3166Alpha2 validates by value which is country code in iso 3166 alpha 2
+// example: `postcode_iso3166_alpha2=US`
+func isPostcodeByIso3166Alpha2(fl FieldLevel) bool {
+ field := fl.Field()
+ param := fl.Param()
+
+ postcodeRegexInit.Do(initPostcodes)
+ reg, found := postCodeRegexDict[param]
+ if !found {
+ return false
+ }
+
+ return reg.MatchString(field.String())
+}
+
+// isPostcodeByIso3166Alpha2Field validates by field which represents for a value of country code in iso 3166 alpha 2
+// example: `postcode_iso3166_alpha2_field=CountryCode`
+func isPostcodeByIso3166Alpha2Field(fl FieldLevel) bool {
+ field := fl.Field()
+ params := parseOneOfParam2(fl.Param())
+
+ if len(params) != 1 {
+ return false
+ }
+
+ currentField, kind, _, found := fl.GetStructFieldOKAdvanced2(fl.Parent(), params[0])
+ if !found {
+ return false
+ }
+
+ if kind != reflect.String {
+ panic(fmt.Sprintf("Bad field type %T", currentField.Interface()))
+ }
+
+ reg, found := postCodeRegexDict[currentField.String()]
+ if !found {
+ return false
+ }
+
+ return reg.MatchString(field.String())
+}
+
+// isBase32 is the validation function for validating if the current field's value is a valid base 32.
+func isBase32(fl FieldLevel) bool {
+ return base32Regex().MatchString(fl.Field().String())
+}
+
+// isBase64 is the validation function for validating if the current field's value is a valid base 64.
func isBase64(fl FieldLevel) bool {
- return base64Regex.MatchString(fl.Field().String())
+ return base64Regex().MatchString(fl.Field().String())
}
-// IsBase64URL is the validation function for validating if the current field's value is a valid base64 URL safe string.
+// isBase64URL is the validation function for validating if the current field's value is a valid base64 URL safe string.
func isBase64URL(fl FieldLevel) bool {
- return base64URLRegex.MatchString(fl.Field().String())
+ return base64URLRegex().MatchString(fl.Field().String())
}
-// IsURI is the validation function for validating if the current field's value is a valid URI.
-func isURI(fl FieldLevel) bool {
+// isBase64RawURL is the validation function for validating if the current field's value is a valid base64 URL safe string without '=' padding.
+func isBase64RawURL(fl FieldLevel) bool {
+ return base64RawURLRegex().MatchString(fl.Field().String())
+}
+// isURI is the validation function for validating if the current field's value is a valid URI.
+func isURI(fl FieldLevel) bool {
field := fl.Field()
switch field.Kind() {
-
case reflect.String:
s := field.String()
@@ -1179,46 +1453,75 @@ func isURI(fl FieldLevel) bool {
panic(fmt.Sprintf("Bad field type %T", field.Interface()))
}
-// IsURL is the validation function for validating if the current field's value is a valid URL.
-func isURL(fl FieldLevel) bool {
+// isFileURL is the helper function for validating if the `path` valid file URL as per RFC8089
+func isFileURL(path string) bool {
+ if !strings.HasPrefix(path, "file:/") {
+ return false
+ }
+ _, err := url.ParseRequestURI(path)
+ return err == nil
+}
+// isURL is the validation function for validating if the current field's value is a valid URL.
+func isURL(fl FieldLevel) bool {
field := fl.Field()
switch field.Kind() {
-
case reflect.String:
- var i int
- s := field.String()
-
- // checks needed as of Go 1.6 because of change https://github.com/golang/go/commit/617c93ce740c3c3cc28cdd1a0d712be183d0b328#diff-6c2d018290e298803c0c9419d8739885L195
- // emulate browser and strip the '#' suffix prior to validation. see issue-#237
- if i = strings.Index(s, "#"); i > -1 {
- s = s[:i]
- }
+ s := strings.ToLower(field.String())
if len(s) == 0 {
return false
}
- url, err := url.ParseRequestURI(s)
+ if isFileURL(s) {
+ return true
+ }
+ url, err := url.Parse(s)
if err != nil || url.Scheme == "" {
return false
}
+ if url.Host == "" && url.Fragment == "" && url.Opaque == "" {
+ return false
+ }
+
return true
}
panic(fmt.Sprintf("Bad field type %T", field.Interface()))
}
-// isUrnRFC2141 is the validation function for validating if the current field's value is a valid URN as per RFC 2141.
-func isUrnRFC2141(fl FieldLevel) bool {
- field := fl.Field()
+// isHttpURL is the validation function for validating if the current field's value is a valid HTTP(s) URL.
+func isHttpURL(fl FieldLevel) bool {
+ if !isURL(fl) {
+ return false
+ }
+
+ field := fl.Field()
+ switch field.Kind() {
+ case reflect.String:
+
+ s := strings.ToLower(field.String())
+
+ url, err := url.Parse(s)
+ if err != nil || url.Host == "" {
+ return false
+ }
+
+ return url.Scheme == "http" || url.Scheme == "https"
+ }
+
+ panic(fmt.Sprintf("Bad field type %T", field.Interface()))
+}
+
+// isUrnRFC2141 is the validation function for validating if the current field's value is a valid URN as per RFC 2141.
+func isUrnRFC2141(fl FieldLevel) bool {
+ field := fl.Field()
switch field.Kind() {
-
case reflect.String:
str := field.String()
@@ -1231,7 +1534,7 @@ func isUrnRFC2141(fl FieldLevel) bool {
panic(fmt.Sprintf("Bad field type %T", field.Interface()))
}
-// IsFile is the validation function for validating if the current field's value is a valid file path.
+// isFile is the validation function for validating if the current field's value is a valid existing file path.
func isFile(fl FieldLevel) bool {
field := fl.Field()
@@ -1248,84 +1551,211 @@ func isFile(fl FieldLevel) bool {
panic(fmt.Sprintf("Bad field type %T", field.Interface()))
}
-// IsE164 is the validation function for validating if the current field's value is a valid e.164 formatted phone number.
+// isImage is the validation function for validating if the current field's value contains the path to a valid image file
+func isImage(fl FieldLevel) bool {
+ mimetypes := map[string]bool{
+ "image/bmp": true,
+ "image/cis-cod": true,
+ "image/gif": true,
+ "image/ief": true,
+ "image/jpeg": true,
+ "image/jp2": true,
+ "image/jpx": true,
+ "image/jpm": true,
+ "image/pipeg": true,
+ "image/png": true,
+ "image/svg+xml": true,
+ "image/tiff": true,
+ "image/webp": true,
+ "image/x-cmu-raster": true,
+ "image/x-cmx": true,
+ "image/x-icon": true,
+ "image/x-portable-anymap": true,
+ "image/x-portable-bitmap": true,
+ "image/x-portable-graymap": true,
+ "image/x-portable-pixmap": true,
+ "image/x-rgb": true,
+ "image/x-xbitmap": true,
+ "image/x-xpixmap": true,
+ "image/x-xwindowdump": true,
+ }
+ field := fl.Field()
+
+ switch field.Kind() {
+ case reflect.String:
+ filePath := field.String()
+ fileInfo, err := os.Stat(filePath)
+
+ if err != nil {
+ return false
+ }
+
+ if fileInfo.IsDir() {
+ return false
+ }
+
+ file, err := os.Open(filePath)
+ if err != nil {
+ return false
+ }
+ defer file.Close()
+
+ mime, err := mimetype.DetectReader(file)
+ if err != nil {
+ return false
+ }
+
+ if _, ok := mimetypes[mime.String()]; ok {
+ return true
+ }
+ }
+ return false
+}
+
+// isFilePath is the validation function for validating if the current field's value is a valid file path.
+func isFilePath(fl FieldLevel) bool {
+
+ var exists bool
+ var err error
+
+ field := fl.Field()
+
+ // Not valid if it is a directory.
+ if isDir(fl) {
+ return false
+ }
+ // If it exists, it obviously is valid.
+ // This is done first to avoid code duplication and unnecessary additional logic.
+ if exists = isFile(fl); exists {
+ return true
+ }
+
+ // It does not exist but may still be a valid filepath.
+ switch field.Kind() {
+ case reflect.String:
+ // Every OS allows for whitespace, but none
+ // let you use a file with no filename (to my knowledge).
+ // Unless you're dealing with raw inodes, but I digress.
+ if strings.TrimSpace(field.String()) == "" {
+ return false
+ }
+ // We make sure it isn't a directory.
+ if strings.HasSuffix(field.String(), string(os.PathSeparator)) {
+ return false
+ }
+ if _, err = os.Stat(field.String()); err != nil {
+ switch t := err.(type) {
+ case *fs.PathError:
+ if t.Err == syscall.EINVAL {
+ // It's definitely an invalid character in the filepath.
+ return false
+ }
+ // It could be a permission error, a does-not-exist error, etc.
+ // Out-of-scope for this validation, though.
+ return true
+ default:
+ // Something went *seriously* wrong.
+ /*
+ Per https://pkg.go.dev/os#Stat:
+ "If there is an error, it will be of type *PathError."
+ */
+ panic(err)
+ }
+ }
+ }
+
+ panic(fmt.Sprintf("Bad field type %T", field.Interface()))
+}
+
+// isE164 is the validation function for validating if the current field's value is a valid e.164 formatted phone number.
func isE164(fl FieldLevel) bool {
- return e164Regex.MatchString(fl.Field().String())
+ return e164Regex().MatchString(fl.Field().String())
}
-// IsEmail is the validation function for validating if the current field's value is a valid email address.
+// isEmail is the validation function for validating if the current field's value is a valid email address.
func isEmail(fl FieldLevel) bool {
- return emailRegex.MatchString(fl.Field().String())
+ return emailRegex().MatchString(fl.Field().String())
}
-// IsHSLA is the validation function for validating if the current field's value is a valid HSLA color.
+// isHSLA is the validation function for validating if the current field's value is a valid HSLA color.
func isHSLA(fl FieldLevel) bool {
- return hslaRegex.MatchString(fl.Field().String())
+ return hslaRegex().MatchString(fl.Field().String())
}
-// IsHSL is the validation function for validating if the current field's value is a valid HSL color.
+// isHSL is the validation function for validating if the current field's value is a valid HSL color.
func isHSL(fl FieldLevel) bool {
- return hslRegex.MatchString(fl.Field().String())
+ return hslRegex().MatchString(fl.Field().String())
}
-// IsRGBA is the validation function for validating if the current field's value is a valid RGBA color.
+// isRGBA is the validation function for validating if the current field's value is a valid RGBA color.
func isRGBA(fl FieldLevel) bool {
- return rgbaRegex.MatchString(fl.Field().String())
+ return rgbaRegex().MatchString(fl.Field().String())
}
-// IsRGB is the validation function for validating if the current field's value is a valid RGB color.
+// isRGB is the validation function for validating if the current field's value is a valid RGB color.
func isRGB(fl FieldLevel) bool {
- return rgbRegex.MatchString(fl.Field().String())
+ return rgbRegex().MatchString(fl.Field().String())
}
-// IsHEXColor is the validation function for validating if the current field's value is a valid HEX color.
+// isHEXColor is the validation function for validating if the current field's value is a valid HEX color.
func isHEXColor(fl FieldLevel) bool {
- return hexcolorRegex.MatchString(fl.Field().String())
+ return hexColorRegex().MatchString(fl.Field().String())
}
-// IsHexadecimal is the validation function for validating if the current field's value is a valid hexadecimal.
+// isHexadecimal is the validation function for validating if the current field's value is a valid hexadecimal.
func isHexadecimal(fl FieldLevel) bool {
- return hexadecimalRegex.MatchString(fl.Field().String())
+ return hexadecimalRegex().MatchString(fl.Field().String())
}
-// IsNumber is the validation function for validating if the current field's value is a valid number.
+// isNumber is the validation function for validating if the current field's value is a valid number.
func isNumber(fl FieldLevel) bool {
switch fl.Field().Kind() {
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr, reflect.Float32, reflect.Float64:
return true
default:
- return numberRegex.MatchString(fl.Field().String())
+ return numberRegex().MatchString(fl.Field().String())
}
}
-// IsNumeric is the validation function for validating if the current field's value is a valid numeric value.
+// isNumeric is the validation function for validating if the current field's value is a valid numeric value.
func isNumeric(fl FieldLevel) bool {
switch fl.Field().Kind() {
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr, reflect.Float32, reflect.Float64:
return true
default:
- return numericRegex.MatchString(fl.Field().String())
+ return numericRegex().MatchString(fl.Field().String())
}
}
-// IsAlphanum is the validation function for validating if the current field's value is a valid alphanumeric value.
+// isAlphanum is the validation function for validating if the current field's value is a valid alphanumeric value.
func isAlphanum(fl FieldLevel) bool {
- return alphaNumericRegex.MatchString(fl.Field().String())
+ return alphaNumericRegex().MatchString(fl.Field().String())
}
-// IsAlpha is the validation function for validating if the current field's value is a valid alpha value.
+// isAlpha is the validation function for validating if the current field's value is a valid alpha value.
func isAlpha(fl FieldLevel) bool {
- return alphaRegex.MatchString(fl.Field().String())
+ return alphaRegex().MatchString(fl.Field().String())
}
-// IsAlphanumUnicode is the validation function for validating if the current field's value is a valid alphanumeric unicode value.
+// isAlphanumUnicode is the validation function for validating if the current field's value is a valid alphanumeric unicode value.
func isAlphanumUnicode(fl FieldLevel) bool {
- return alphaUnicodeNumericRegex.MatchString(fl.Field().String())
+ return alphaUnicodeNumericRegex().MatchString(fl.Field().String())
}
-// IsAlphaUnicode is the validation function for validating if the current field's value is a valid alpha unicode value.
+// isAlphaUnicode is the validation function for validating if the current field's value is a valid alpha unicode value.
func isAlphaUnicode(fl FieldLevel) bool {
- return alphaUnicodeRegex.MatchString(fl.Field().String())
+ return alphaUnicodeRegex().MatchString(fl.Field().String())
+}
+
+// isBoolean is the validation function for validating if the current field's value is a valid boolean value or can be safely converted to a boolean value.
+func isBoolean(fl FieldLevel) bool {
+ switch fl.Field().Kind() {
+ case reflect.Bool:
+ return true
+ default:
+ _, err := strconv.ParseBool(fl.Field().String())
+ return err == nil
+ }
}
// isDefault is the opposite of required aka hasValue
@@ -1333,7 +1763,7 @@ func isDefault(fl FieldLevel) bool {
return !hasValue(fl)
}
-// HasValue is the validation function for validating if the current field's value is not the default static value.
+// hasValue is the validation function for validating if the current field's value is not the default static value.
func hasValue(fl FieldLevel) bool {
field := fl.Field()
switch field.Kind() {
@@ -1343,11 +1773,11 @@ func hasValue(fl FieldLevel) bool {
if fl.(*validate).fldIsPointer && field.Interface() != nil {
return true
}
- return field.IsValid() && field.Interface() != reflect.Zero(field.Type()).Interface()
+ return field.IsValid() && !field.IsZero()
}
}
-// requireCheckField is a func for check field kind
+// requireCheckFieldKind is a func for check field kind
func requireCheckFieldKind(fl FieldLevel, param string, defaultNotFoundValue bool) bool {
field := fl.Field()
kind := field.Kind()
@@ -1367,11 +1797,134 @@ func requireCheckFieldKind(fl FieldLevel, param string, defaultNotFoundValue boo
if nullable && field.Interface() != nil {
return false
}
- return field.IsValid() && field.Interface() == reflect.Zero(field.Type()).Interface()
+ return field.IsValid() && field.IsZero()
+ }
+}
+
+// requireCheckFieldValue is a func for check field value
+func requireCheckFieldValue(
+ fl FieldLevel, param string, value string, defaultNotFoundValue bool,
+) bool {
+ field, kind, _, found := fl.GetStructFieldOKAdvanced2(fl.Parent(), param)
+ if !found {
+ return defaultNotFoundValue
+ }
+
+ switch kind {
+
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return field.Int() == asInt(value)
+
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return field.Uint() == asUint(value)
+
+ case reflect.Float32:
+ return field.Float() == asFloat32(value)
+
+ case reflect.Float64:
+ return field.Float() == asFloat64(value)
+
+ case reflect.Slice, reflect.Map, reflect.Array:
+ return int64(field.Len()) == asInt(value)
+
+ case reflect.Bool:
+ return field.Bool() == asBool(value)
+ }
+
+ // default reflect.String:
+ return field.String() == value
+}
+
+// requiredIf is the validation function
+// The field under validation must be present and not empty only if all the other specified fields are equal to the value following with the specified field.
+func requiredIf(fl FieldLevel) bool {
+ params := parseOneOfParam2(fl.Param())
+ if len(params)%2 != 0 {
+ panic(fmt.Sprintf("Bad param number for required_if %s", fl.FieldName()))
+ }
+ for i := 0; i < len(params); i += 2 {
+ if !requireCheckFieldValue(fl, params[i], params[i+1], false) {
+ return true
+ }
+ }
+ return hasValue(fl)
+}
+
+// excludedIf is the validation function
+// The field under validation must not be present or is empty only if all the other specified fields are equal to the value following with the specified field.
+func excludedIf(fl FieldLevel) bool {
+ params := parseOneOfParam2(fl.Param())
+ if len(params)%2 != 0 {
+ panic(fmt.Sprintf("Bad param number for excluded_if %s", fl.FieldName()))
+ }
+
+ for i := 0; i < len(params); i += 2 {
+ if !requireCheckFieldValue(fl, params[i], params[i+1], false) {
+ return true
+ }
+ }
+ return !hasValue(fl)
+}
+
+// requiredUnless is the validation function
+// The field under validation must be present and not empty only unless all the other specified fields are equal to the value following with the specified field.
+func requiredUnless(fl FieldLevel) bool {
+ params := parseOneOfParam2(fl.Param())
+ if len(params)%2 != 0 {
+ panic(fmt.Sprintf("Bad param number for required_unless %s", fl.FieldName()))
+ }
+
+ for i := 0; i < len(params); i += 2 {
+ if requireCheckFieldValue(fl, params[i], params[i+1], false) {
+ return true
+ }
+ }
+ return hasValue(fl)
+}
+
+// skipUnless is the validation function
+// The field under validation must be present and not empty only unless all the other specified fields are equal to the value following with the specified field.
+func skipUnless(fl FieldLevel) bool {
+ params := parseOneOfParam2(fl.Param())
+ if len(params)%2 != 0 {
+ panic(fmt.Sprintf("Bad param number for skip_unless %s", fl.FieldName()))
+ }
+ for i := 0; i < len(params); i += 2 {
+ if !requireCheckFieldValue(fl, params[i], params[i+1], false) {
+ return true
+ }
+ }
+ return hasValue(fl)
+}
+
+// excludedUnless is the validation function
+// The field under validation must not be present or is empty unless all the other specified fields are equal to the value following with the specified field.
+func excludedUnless(fl FieldLevel) bool {
+ params := parseOneOfParam2(fl.Param())
+ if len(params)%2 != 0 {
+ panic(fmt.Sprintf("Bad param number for excluded_unless %s", fl.FieldName()))
+ }
+ for i := 0; i < len(params); i += 2 {
+ if !requireCheckFieldValue(fl, params[i], params[i+1], false) {
+ return !hasValue(fl)
+ }
}
+ return true
}
-// RequiredWith is the validation function
+// excludedWith is the validation function
+// The field under validation must not be present or is empty if any of the other specified fields are present.
+func excludedWith(fl FieldLevel) bool {
+ params := parseOneOfParam2(fl.Param())
+ for _, param := range params {
+ if !requireCheckFieldKind(fl, param, true) {
+ return !hasValue(fl)
+ }
+ }
+ return true
+}
+
+// requiredWith is the validation function
// The field under validation must be present and not empty only if any of the other specified fields are present.
func requiredWith(fl FieldLevel) bool {
params := parseOneOfParam2(fl.Param())
@@ -1383,7 +1936,19 @@ func requiredWith(fl FieldLevel) bool {
return true
}
-// RequiredWithAll is the validation function
+// excludedWithAll is the validation function
+// The field under validation must not be present or is empty if all of the other specified fields are present.
+func excludedWithAll(fl FieldLevel) bool {
+ params := parseOneOfParam2(fl.Param())
+ for _, param := range params {
+ if requireCheckFieldKind(fl, param, true) {
+ return true
+ }
+ }
+ return !hasValue(fl)
+}
+
+// requiredWithAll is the validation function
// The field under validation must be present and not empty only if all of the other specified fields are present.
func requiredWithAll(fl FieldLevel) bool {
params := parseOneOfParam2(fl.Param())
@@ -1395,7 +1960,16 @@ func requiredWithAll(fl FieldLevel) bool {
return hasValue(fl)
}
-// RequiredWithout is the validation function
+// excludedWithout is the validation function
+// The field under validation must not be present or is empty when any of the other specified fields are not present.
+func excludedWithout(fl FieldLevel) bool {
+ if requireCheckFieldKind(fl, strings.TrimSpace(fl.Param()), true) {
+ return !hasValue(fl)
+ }
+ return true
+}
+
+// requiredWithout is the validation function
// The field under validation must be present and not empty only when any of the other specified fields are not present.
func requiredWithout(fl FieldLevel) bool {
if requireCheckFieldKind(fl, strings.TrimSpace(fl.Param()), true) {
@@ -1404,7 +1978,19 @@ func requiredWithout(fl FieldLevel) bool {
return true
}
-// RequiredWithoutAll is the validation function
+// excludedWithoutAll is the validation function
+// The field under validation must not be present or is empty when all of the other specified fields are not present.
+func excludedWithoutAll(fl FieldLevel) bool {
+ params := parseOneOfParam2(fl.Param())
+ for _, param := range params {
+ if !requireCheckFieldKind(fl, param, true) {
+ return true
+ }
+ }
+ return !hasValue(fl)
+}
+
+// requiredWithoutAll is the validation function
// The field under validation must be present and not empty only when all of the other specified fields are not present.
func requiredWithoutAll(fl FieldLevel) bool {
params := parseOneOfParam2(fl.Param())
@@ -1416,9 +2002,8 @@ func requiredWithoutAll(fl FieldLevel) bool {
return hasValue(fl)
}
-// IsGteField is the validation function for validating if the current field's value is greater than or equal to the field specified by the param's value.
+// isGteField is the validation function for validating if the current field's value is greater than or equal to the field specified by the param's value.
func isGteField(fl FieldLevel) bool {
-
field := fl.Field()
kind := field.Kind()
@@ -1445,27 +2030,26 @@ func isGteField(fl FieldLevel) bool {
fieldType := field.Type()
- // Not Same underlying type i.e. struct and time
- if fieldType != currentField.Type() {
- return false
- }
-
- if fieldType == timeType {
+ if fieldType.ConvertibleTo(timeType) && currentField.Type().ConvertibleTo(timeType) {
- t := currentField.Interface().(time.Time)
- fieldTime := field.Interface().(time.Time)
+ t := currentField.Convert(timeType).Interface().(time.Time)
+ fieldTime := field.Convert(timeType).Interface().(time.Time)
return fieldTime.After(t) || fieldTime.Equal(t)
}
+
+ // Not Same underlying type i.e. struct and time
+ if fieldType != currentField.Type() {
+ return false
+ }
}
// default reflect.String
return len(field.String()) >= len(currentField.String())
}
-// IsGtField is the validation function for validating if the current field's value is greater than the field specified by the param's value.
+// isGtField is the validation function for validating if the current field's value is greater than the field specified by the param's value.
func isGtField(fl FieldLevel) bool {
-
field := fl.Field()
kind := field.Kind()
@@ -1492,27 +2076,26 @@ func isGtField(fl FieldLevel) bool {
fieldType := field.Type()
- // Not Same underlying type i.e. struct and time
- if fieldType != currentField.Type() {
- return false
- }
+ if fieldType.ConvertibleTo(timeType) && currentField.Type().ConvertibleTo(timeType) {
- if fieldType == timeType {
-
- t := currentField.Interface().(time.Time)
- fieldTime := field.Interface().(time.Time)
+ t := currentField.Convert(timeType).Interface().(time.Time)
+ fieldTime := field.Convert(timeType).Interface().(time.Time)
return fieldTime.After(t)
}
+
+ // Not Same underlying type i.e. struct and time
+ if fieldType != currentField.Type() {
+ return false
+ }
}
// default reflect.String
return len(field.String()) > len(currentField.String())
}
-// IsGte is the validation function for validating if the current field's value is greater than or equal to the param's value.
+// isGte is the validation function for validating if the current field's value is greater than or equal to the param's value.
func isGte(fl FieldLevel) bool {
-
field := fl.Field()
param := fl.Param()
@@ -1529,7 +2112,7 @@ func isGte(fl FieldLevel) bool {
return int64(field.Len()) >= p
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
- p := asInt(param)
+ p := asIntFromType(field.Type(), param)
return field.Int() >= p
@@ -1538,17 +2121,22 @@ func isGte(fl FieldLevel) bool {
return field.Uint() >= p
- case reflect.Float32, reflect.Float64:
- p := asFloat(param)
+ case reflect.Float32:
+ p := asFloat32(param)
+
+ return field.Float() >= p
+
+ case reflect.Float64:
+ p := asFloat64(param)
return field.Float() >= p
case reflect.Struct:
- if field.Type() == timeType {
+ if field.Type().ConvertibleTo(timeType) {
now := time.Now().UTC()
- t := field.Interface().(time.Time)
+ t := field.Convert(timeType).Interface().(time.Time)
return t.After(now) || t.Equal(now)
}
@@ -1557,9 +2145,8 @@ func isGte(fl FieldLevel) bool {
panic(fmt.Sprintf("Bad field type %T", field.Interface()))
}
-// IsGt is the validation function for validating if the current field's value is greater than the param's value.
+// isGt is the validation function for validating if the current field's value is greater than the param's value.
func isGt(fl FieldLevel) bool {
-
field := fl.Field()
param := fl.Param()
@@ -1576,7 +2163,7 @@ func isGt(fl FieldLevel) bool {
return int64(field.Len()) > p
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
- p := asInt(param)
+ p := asIntFromType(field.Type(), param)
return field.Int() > p
@@ -1585,24 +2172,29 @@ func isGt(fl FieldLevel) bool {
return field.Uint() > p
- case reflect.Float32, reflect.Float64:
- p := asFloat(param)
+ case reflect.Float32:
+ p := asFloat32(param)
return field.Float() > p
+
+ case reflect.Float64:
+ p := asFloat64(param)
+
+ return field.Float() > p
+
case reflect.Struct:
- if field.Type() == timeType {
+ if field.Type().ConvertibleTo(timeType) {
- return field.Interface().(time.Time).After(time.Now().UTC())
+ return field.Convert(timeType).Interface().(time.Time).After(time.Now().UTC())
}
}
panic(fmt.Sprintf("Bad field type %T", field.Interface()))
}
-// HasLengthOf is the validation function for validating if the current field's value is equal to the param's value.
+// hasLengthOf is the validation function for validating if the current field's value is equal to the param's value.
func hasLengthOf(fl FieldLevel) bool {
-
field := fl.Field()
param := fl.Param()
@@ -1619,7 +2211,7 @@ func hasLengthOf(fl FieldLevel) bool {
return int64(field.Len()) == p
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
- p := asInt(param)
+ p := asIntFromType(field.Type(), param)
return field.Int() == p
@@ -1628,8 +2220,13 @@ func hasLengthOf(fl FieldLevel) bool {
return field.Uint() == p
- case reflect.Float32, reflect.Float64:
- p := asFloat(param)
+ case reflect.Float32:
+ p := asFloat32(param)
+
+ return field.Float() == p
+
+ case reflect.Float64:
+ p := asFloat64(param)
return field.Float() == p
}
@@ -1637,14 +2234,13 @@ func hasLengthOf(fl FieldLevel) bool {
panic(fmt.Sprintf("Bad field type %T", field.Interface()))
}
-// HasMinOf is the validation function for validating if the current field's value is greater than or equal to the param's value.
+// hasMinOf is the validation function for validating if the current field's value is greater than or equal to the param's value.
func hasMinOf(fl FieldLevel) bool {
return isGte(fl)
}
-// IsLteField is the validation function for validating if the current field's value is less than or equal to the field specified by the param's value.
+// isLteField is the validation function for validating if the current field's value is less than or equal to the field specified by the param's value.
func isLteField(fl FieldLevel) bool {
-
field := fl.Field()
kind := field.Kind()
@@ -1671,27 +2267,26 @@ func isLteField(fl FieldLevel) bool {
fieldType := field.Type()
- // Not Same underlying type i.e. struct and time
- if fieldType != currentField.Type() {
- return false
- }
+ if fieldType.ConvertibleTo(timeType) && currentField.Type().ConvertibleTo(timeType) {
- if fieldType == timeType {
-
- t := currentField.Interface().(time.Time)
- fieldTime := field.Interface().(time.Time)
+ t := currentField.Convert(timeType).Interface().(time.Time)
+ fieldTime := field.Convert(timeType).Interface().(time.Time)
return fieldTime.Before(t) || fieldTime.Equal(t)
}
+
+ // Not Same underlying type i.e. struct and time
+ if fieldType != currentField.Type() {
+ return false
+ }
}
// default reflect.String
return len(field.String()) <= len(currentField.String())
}
-// IsLtField is the validation function for validating if the current field's value is less than the field specified by the param's value.
+// isLtField is the validation function for validating if the current field's value is less than the field specified by the param's value.
func isLtField(fl FieldLevel) bool {
-
field := fl.Field()
kind := field.Kind()
@@ -1718,27 +2313,26 @@ func isLtField(fl FieldLevel) bool {
fieldType := field.Type()
- // Not Same underlying type i.e. struct and time
- if fieldType != currentField.Type() {
- return false
- }
-
- if fieldType == timeType {
+ if fieldType.ConvertibleTo(timeType) && currentField.Type().ConvertibleTo(timeType) {
- t := currentField.Interface().(time.Time)
- fieldTime := field.Interface().(time.Time)
+ t := currentField.Convert(timeType).Interface().(time.Time)
+ fieldTime := field.Convert(timeType).Interface().(time.Time)
return fieldTime.Before(t)
}
+
+ // Not Same underlying type i.e. struct and time
+ if fieldType != currentField.Type() {
+ return false
+ }
}
// default reflect.String
return len(field.String()) < len(currentField.String())
}
-// IsLte is the validation function for validating if the current field's value is less than or equal to the param's value.
+// isLte is the validation function for validating if the current field's value is less than or equal to the param's value.
func isLte(fl FieldLevel) bool {
-
field := fl.Field()
param := fl.Param()
@@ -1755,7 +2349,7 @@ func isLte(fl FieldLevel) bool {
return int64(field.Len()) <= p
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
- p := asInt(param)
+ p := asIntFromType(field.Type(), param)
return field.Int() <= p
@@ -1764,17 +2358,22 @@ func isLte(fl FieldLevel) bool {
return field.Uint() <= p
- case reflect.Float32, reflect.Float64:
- p := asFloat(param)
+ case reflect.Float32:
+ p := asFloat32(param)
+
+ return field.Float() <= p
+
+ case reflect.Float64:
+ p := asFloat64(param)
return field.Float() <= p
case reflect.Struct:
- if field.Type() == timeType {
+ if field.Type().ConvertibleTo(timeType) {
now := time.Now().UTC()
- t := field.Interface().(time.Time)
+ t := field.Convert(timeType).Interface().(time.Time)
return t.Before(now) || t.Equal(now)
}
@@ -1783,9 +2382,8 @@ func isLte(fl FieldLevel) bool {
panic(fmt.Sprintf("Bad field type %T", field.Interface()))
}
-// IsLt is the validation function for validating if the current field's value is less than the param's value.
+// isLt is the validation function for validating if the current field's value is less than the param's value.
func isLt(fl FieldLevel) bool {
-
field := fl.Field()
param := fl.Param()
@@ -1802,7 +2400,7 @@ func isLt(fl FieldLevel) bool {
return int64(field.Len()) < p
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
- p := asInt(param)
+ p := asIntFromType(field.Type(), param)
return field.Int() < p
@@ -1811,30 +2409,34 @@ func isLt(fl FieldLevel) bool {
return field.Uint() < p
- case reflect.Float32, reflect.Float64:
- p := asFloat(param)
+ case reflect.Float32:
+ p := asFloat32(param)
+
+ return field.Float() < p
+
+ case reflect.Float64:
+ p := asFloat64(param)
return field.Float() < p
case reflect.Struct:
- if field.Type() == timeType {
+ if field.Type().ConvertibleTo(timeType) {
- return field.Interface().(time.Time).Before(time.Now().UTC())
+ return field.Convert(timeType).Interface().(time.Time).Before(time.Now().UTC())
}
}
panic(fmt.Sprintf("Bad field type %T", field.Interface()))
}
-// HasMaxOf is the validation function for validating if the current field's value is less than or equal to the param's value.
+// hasMaxOf is the validation function for validating if the current field's value is less than or equal to the param's value.
func hasMaxOf(fl FieldLevel) bool {
return isLte(fl)
}
-// IsTCP4AddrResolvable is the validation function for validating if the field's value is a resolvable tcp4 address.
+// isTCP4AddrResolvable is the validation function for validating if the field's value is a resolvable tcp4 address.
func isTCP4AddrResolvable(fl FieldLevel) bool {
-
if !isIP4Addr(fl) {
return false
}
@@ -1843,9 +2445,8 @@ func isTCP4AddrResolvable(fl FieldLevel) bool {
return err == nil
}
-// IsTCP6AddrResolvable is the validation function for validating if the field's value is a resolvable tcp6 address.
+// isTCP6AddrResolvable is the validation function for validating if the field's value is a resolvable tcp6 address.
func isTCP6AddrResolvable(fl FieldLevel) bool {
-
if !isIP6Addr(fl) {
return false
}
@@ -1855,9 +2456,8 @@ func isTCP6AddrResolvable(fl FieldLevel) bool {
return err == nil
}
-// IsTCPAddrResolvable is the validation function for validating if the field's value is a resolvable tcp address.
+// isTCPAddrResolvable is the validation function for validating if the field's value is a resolvable tcp address.
func isTCPAddrResolvable(fl FieldLevel) bool {
-
if !isIP4Addr(fl) && !isIP6Addr(fl) {
return false
}
@@ -1867,9 +2467,8 @@ func isTCPAddrResolvable(fl FieldLevel) bool {
return err == nil
}
-// IsUDP4AddrResolvable is the validation function for validating if the field's value is a resolvable udp4 address.
+// isUDP4AddrResolvable is the validation function for validating if the field's value is a resolvable udp4 address.
func isUDP4AddrResolvable(fl FieldLevel) bool {
-
if !isIP4Addr(fl) {
return false
}
@@ -1879,9 +2478,8 @@ func isUDP4AddrResolvable(fl FieldLevel) bool {
return err == nil
}
-// IsUDP6AddrResolvable is the validation function for validating if the field's value is a resolvable udp6 address.
+// isUDP6AddrResolvable is the validation function for validating if the field's value is a resolvable udp6 address.
func isUDP6AddrResolvable(fl FieldLevel) bool {
-
if !isIP6Addr(fl) {
return false
}
@@ -1891,9 +2489,8 @@ func isUDP6AddrResolvable(fl FieldLevel) bool {
return err == nil
}
-// IsUDPAddrResolvable is the validation function for validating if the field's value is a resolvable udp address.
+// isUDPAddrResolvable is the validation function for validating if the field's value is a resolvable udp address.
func isUDPAddrResolvable(fl FieldLevel) bool {
-
if !isIP4Addr(fl) && !isIP6Addr(fl) {
return false
}
@@ -1903,9 +2500,8 @@ func isUDPAddrResolvable(fl FieldLevel) bool {
return err == nil
}
-// IsIP4AddrResolvable is the validation function for validating if the field's value is a resolvable ip4 address.
+// isIP4AddrResolvable is the validation function for validating if the field's value is a resolvable ip4 address.
func isIP4AddrResolvable(fl FieldLevel) bool {
-
if !isIPv4(fl) {
return false
}
@@ -1915,9 +2511,8 @@ func isIP4AddrResolvable(fl FieldLevel) bool {
return err == nil
}
-// IsIP6AddrResolvable is the validation function for validating if the field's value is a resolvable ip6 address.
+// isIP6AddrResolvable is the validation function for validating if the field's value is a resolvable ip6 address.
func isIP6AddrResolvable(fl FieldLevel) bool {
-
if !isIPv6(fl) {
return false
}
@@ -1927,9 +2522,8 @@ func isIP6AddrResolvable(fl FieldLevel) bool {
return err == nil
}
-// IsIPAddrResolvable is the validation function for validating if the field's value is a resolvable ip address.
+// isIPAddrResolvable is the validation function for validating if the field's value is a resolvable ip address.
func isIPAddrResolvable(fl FieldLevel) bool {
-
if !isIP(fl) {
return false
}
@@ -1939,16 +2533,14 @@ func isIPAddrResolvable(fl FieldLevel) bool {
return err == nil
}
-// IsUnixAddrResolvable is the validation function for validating if the field's value is a resolvable unix address.
+// isUnixAddrResolvable is the validation function for validating if the field's value is a resolvable unix address.
func isUnixAddrResolvable(fl FieldLevel) bool {
-
_, err := net.ResolveUnixAddr("unix", fl.Field().String())
return err == nil
}
func isIP4Addr(fl FieldLevel) bool {
-
val := fl.Field().String()
if idx := strings.LastIndex(val, ":"); idx != -1 {
@@ -1961,7 +2553,6 @@ func isIP4Addr(fl FieldLevel) bool {
}
func isIP6Addr(fl FieldLevel) bool {
-
val := fl.Field().String()
if idx := strings.LastIndex(val, ":"); idx != -1 {
@@ -1976,11 +2567,11 @@ func isIP6Addr(fl FieldLevel) bool {
}
func isHostnameRFC952(fl FieldLevel) bool {
- return hostnameRegexRFC952.MatchString(fl.Field().String())
+ return hostnameRegexRFC952().MatchString(fl.Field().String())
}
func isHostnameRFC1123(fl FieldLevel) bool {
- return hostnameRegexRFC1123.MatchString(fl.Field().String())
+ return hostnameRegexRFC1123().MatchString(fl.Field().String())
}
func isFQDN(fl FieldLevel) bool {
@@ -1990,15 +2581,10 @@ func isFQDN(fl FieldLevel) bool {
return false
}
- if val[len(val)-1] == '.' {
- val = val[0 : len(val)-1]
- }
-
- return strings.ContainsAny(val, ".") &&
- hostnameRegexRFC952.MatchString(val)
+ return fqdnRegexRFC1123().MatchString(val)
}
-// IsDir is the validation function for validating if the current field's value is a valid directory.
+// isDir is the validation function for validating if the current field's value is a valid existing directory.
func isDir(fl FieldLevel) bool {
field := fl.Field()
@@ -2014,18 +2600,89 @@ func isDir(fl FieldLevel) bool {
panic(fmt.Sprintf("Bad field type %T", field.Interface()))
}
+// isDirPath is the validation function for validating if the current field's value is a valid directory.
+func isDirPath(fl FieldLevel) bool {
+
+ var exists bool
+ var err error
+
+ field := fl.Field()
+
+ // If it exists, it obviously is valid.
+ // This is done first to avoid code duplication and unnecessary additional logic.
+ if exists = isDir(fl); exists {
+ return true
+ }
+
+ // It does not exist but may still be a valid path.
+ switch field.Kind() {
+ case reflect.String:
+ // Every OS allows for whitespace, but none
+ // let you use a dir with no name (to my knowledge).
+ // Unless you're dealing with raw inodes, but I digress.
+ if strings.TrimSpace(field.String()) == "" {
+ return false
+ }
+ if _, err = os.Stat(field.String()); err != nil {
+ switch t := err.(type) {
+ case *fs.PathError:
+ if t.Err == syscall.EINVAL {
+ // It's definitely an invalid character in the path.
+ return false
+ }
+ // It could be a permission error, a does-not-exist error, etc.
+ // Out-of-scope for this validation, though.
+ // Lastly, we make sure it is a directory.
+ if strings.HasSuffix(field.String(), string(os.PathSeparator)) {
+ return true
+ } else {
+ return false
+ }
+ default:
+ // Something went *seriously* wrong.
+ /*
+ Per https://pkg.go.dev/os#Stat:
+ "If there is an error, it will be of type *PathError."
+ */
+ panic(err)
+ }
+ }
+ // We repeat the check here to make sure it is an explicit directory in case the above os.Stat didn't trigger an error.
+ if strings.HasSuffix(field.String(), string(os.PathSeparator)) {
+ return true
+ } else {
+ return false
+ }
+ }
+
+ panic(fmt.Sprintf("Bad field type %T", field.Interface()))
+}
+
// isJSON is the validation function for validating if the current field's value is a valid json string.
func isJSON(fl FieldLevel) bool {
field := fl.Field()
- if field.Kind() == reflect.String {
+ switch field.Kind() {
+ case reflect.String:
val := field.String()
return json.Valid([]byte(val))
+ case reflect.Slice:
+ fieldType := field.Type()
+
+ if fieldType.ConvertibleTo(byteSliceType) {
+ b := field.Convert(byteSliceType).Interface().([]byte)
+ return json.Valid(b)
+ }
}
panic(fmt.Sprintf("Bad field type %T", field.Interface()))
}
+// isJWT is the validation function for validating if the current field's value is a valid JWT string.
+func isJWT(fl FieldLevel) bool {
+ return jWTRegex().MatchString(fl.Field().String())
+}
+
// isHostnamePort validates a : combination for fields typically used for socket address.
func isHostnamePort(fl FieldLevel) bool {
val := fl.Field().String()
@@ -2034,13 +2691,15 @@ func isHostnamePort(fl FieldLevel) bool {
return false
}
// Port must be a iny <= 65535.
- if portNum, err := strconv.ParseInt(port, 10, 32); err != nil || portNum > 65535 || portNum < 1 {
+ if portNum, err := strconv.ParseInt(
+ port, 10, 32,
+ ); err != nil || portNum > 65535 || portNum < 1 {
return false
}
// If host is specified, it should match a DNS name
if host != "" {
- return hostnameRegexRFC1123.MatchString(host)
+ return hostnameRegexRFC1123().MatchString(host)
}
return true
}
@@ -2080,12 +2739,275 @@ func isDatetime(fl FieldLevel) bool {
if field.Kind() == reflect.String {
_, err := time.Parse(param, field.String())
+
+ return err == nil
+ }
+
+ panic(fmt.Sprintf("Bad field type %T", field.Interface()))
+}
+
+// isTimeZone is the validation function for validating if the current field's value is a valid time zone string.
+func isTimeZone(fl FieldLevel) bool {
+ field := fl.Field()
+
+ if field.Kind() == reflect.String {
+ // empty value is converted to UTC by time.LoadLocation but disallow it as it is not a valid time zone name
+ if field.String() == "" {
+ return false
+ }
+
+ // Local value is converted to the current system time zone by time.LoadLocation but disallow it as it is not a valid time zone name
+ if strings.ToLower(field.String()) == "local" {
+ return false
+ }
+
+ _, err := time.LoadLocation(field.String())
+ return err == nil
+ }
+
+ panic(fmt.Sprintf("Bad field type %T", field.Interface()))
+}
+
+// isIso3166Alpha2 is the validation function for validating if the current field's value is a valid iso3166-1 alpha-2 country code.
+func isIso3166Alpha2(fl FieldLevel) bool {
+ _, ok := iso3166_1_alpha2[fl.Field().String()]
+ return ok
+}
+
+// isIso3166Alpha2EU is the validation function for validating if the current field's value is a valid iso3166-1 alpha-2 European Union country code.
+func isIso3166Alpha2EU(fl FieldLevel) bool {
+ _, ok := iso3166_1_alpha2_eu[fl.Field().String()]
+ return ok
+}
+
+// isIso3166Alpha3 is the validation function for validating if the current field's value is a valid iso3166-1 alpha-3 country code.
+func isIso3166Alpha3(fl FieldLevel) bool {
+ _, ok := iso3166_1_alpha3[fl.Field().String()]
+ return ok
+}
+
+// isIso3166Alpha3EU is the validation function for validating if the current field's value is a valid iso3166-1 alpha-3 European Union country code.
+func isIso3166Alpha3EU(fl FieldLevel) bool {
+ _, ok := iso3166_1_alpha3_eu[fl.Field().String()]
+ return ok
+}
+
+// isIso3166AlphaNumeric is the validation function for validating if the current field's value is a valid iso3166-1 alpha-numeric country code.
+func isIso3166AlphaNumeric(fl FieldLevel) bool {
+ field := fl.Field()
+
+ var code int
+ switch field.Kind() {
+ case reflect.String:
+ i, err := strconv.Atoi(field.String())
if err != nil {
return false
}
+ code = i % 1000
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ code = int(field.Int() % 1000)
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+ code = int(field.Uint() % 1000)
+ default:
+ panic(fmt.Sprintf("Bad field type %T", field.Interface()))
+ }
- return true
+ _, ok := iso3166_1_alpha_numeric[code]
+ return ok
+}
+
+// isIso3166AlphaNumericEU is the validation function for validating if the current field's value is a valid iso3166-1 alpha-numeric European Union country code.
+func isIso3166AlphaNumericEU(fl FieldLevel) bool {
+ field := fl.Field()
+
+ var code int
+ switch field.Kind() {
+ case reflect.String:
+ i, err := strconv.Atoi(field.String())
+ if err != nil {
+ return false
+ }
+ code = i % 1000
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ code = int(field.Int() % 1000)
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+ code = int(field.Uint() % 1000)
+ default:
+ panic(fmt.Sprintf("Bad field type %T", field.Interface()))
+ }
+
+ _, ok := iso3166_1_alpha_numeric_eu[code]
+ return ok
+}
+
+// isIso31662 is the validation function for validating if the current field's value is a valid iso3166-2 code.
+func isIso31662(fl FieldLevel) bool {
+ _, ok := iso3166_2[fl.Field().String()]
+ return ok
+}
+
+// isIso4217 is the validation function for validating if the current field's value is a valid iso4217 currency code.
+func isIso4217(fl FieldLevel) bool {
+ _, ok := iso4217[fl.Field().String()]
+ return ok
+}
+
+// isIso4217Numeric is the validation function for validating if the current field's value is a valid iso4217 numeric currency code.
+func isIso4217Numeric(fl FieldLevel) bool {
+ field := fl.Field()
+
+ var code int
+ switch field.Kind() {
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ code = int(field.Int())
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+ code = int(field.Uint())
+ default:
+ panic(fmt.Sprintf("Bad field type %T", field.Interface()))
+ }
+
+ _, ok := iso4217_numeric[code]
+ return ok
+}
+
+// isBCP47LanguageTag is the validation function for validating if the current field's value is a valid BCP 47 language tag, as parsed by language.Parse
+func isBCP47LanguageTag(fl FieldLevel) bool {
+ field := fl.Field()
+
+ if field.Kind() == reflect.String {
+ _, err := language.Parse(field.String())
+ return err == nil
}
panic(fmt.Sprintf("Bad field type %T", field.Interface()))
}
+
+// isIsoBicFormat is the validation function for validating if the current field's value is a valid Business Identifier Code (SWIFT code), defined in ISO 9362
+func isIsoBicFormat(fl FieldLevel) bool {
+ bicString := fl.Field().String()
+
+ return bicRegex().MatchString(bicString)
+}
+
+// isSemverFormat is the validation function for validating if the current field's value is a valid semver version, defined in Semantic Versioning 2.0.0
+func isSemverFormat(fl FieldLevel) bool {
+ semverString := fl.Field().String()
+
+ return semverRegex().MatchString(semverString)
+}
+
+// isCveFormat is the validation function for validating if the current field's value is a valid cve id, defined in CVE mitre org
+func isCveFormat(fl FieldLevel) bool {
+ cveString := fl.Field().String()
+
+ return cveRegex().MatchString(cveString)
+}
+
+// isDnsRFC1035LabelFormat is the validation function
+// for validating if the current field's value is
+// a valid dns RFC 1035 label, defined in RFC 1035.
+func isDnsRFC1035LabelFormat(fl FieldLevel) bool {
+ val := fl.Field().String()
+ return dnsRegexRFC1035Label().MatchString(val)
+}
+
+// digitsHaveLuhnChecksum returns true if and only if the last element of the given digits slice is the Luhn checksum of the previous elements
+func digitsHaveLuhnChecksum(digits []string) bool {
+ size := len(digits)
+ sum := 0
+ for i, digit := range digits {
+ value, err := strconv.Atoi(digit)
+ if err != nil {
+ return false
+ }
+ if size%2 == 0 && i%2 == 0 || size%2 == 1 && i%2 == 1 {
+ v := value * 2
+ if v >= 10 {
+ sum += 1 + (v % 10)
+ } else {
+ sum += v
+ }
+ } else {
+ sum += value
+ }
+ }
+ return (sum % 10) == 0
+}
+
+// isMongoDBObjectId is the validation function for validating if the current field's value is valid MongoDB ObjectID
+func isMongoDBObjectId(fl FieldLevel) bool {
+ val := fl.Field().String()
+ return mongodbIdRegex().MatchString(val)
+}
+
+// isMongoDBConnectionString is the validation function for validating if the current field's value is valid MongoDB Connection String
+func isMongoDBConnectionString(fl FieldLevel) bool {
+ val := fl.Field().String()
+ return mongodbConnectionRegex().MatchString(val)
+}
+
+// isSpiceDB is the validation function for validating if the current field's value is valid for use with Authzed SpiceDB in the indicated way
+func isSpiceDB(fl FieldLevel) bool {
+ val := fl.Field().String()
+ param := fl.Param()
+
+ switch param {
+ case "permission":
+ return spicedbPermissionRegex().MatchString(val)
+ case "type":
+ return spicedbTypeRegex().MatchString(val)
+ case "id", "":
+ return spicedbIDRegex().MatchString(val)
+ }
+
+ panic("Unrecognized parameter: " + param)
+}
+
+// isCreditCard is the validation function for validating if the current field's value is a valid credit card number
+func isCreditCard(fl FieldLevel) bool {
+ val := fl.Field().String()
+ var creditCard bytes.Buffer
+ segments := strings.Split(val, " ")
+ for _, segment := range segments {
+ if len(segment) < 3 {
+ return false
+ }
+ creditCard.WriteString(segment)
+ }
+
+ ccDigits := strings.Split(creditCard.String(), "")
+ size := len(ccDigits)
+ if size < 12 || size > 19 {
+ return false
+ }
+
+ return digitsHaveLuhnChecksum(ccDigits)
+}
+
+// hasLuhnChecksum is the validation for validating if the current field's value has a valid Luhn checksum
+func hasLuhnChecksum(fl FieldLevel) bool {
+ field := fl.Field()
+ var str string // convert to a string which will then be split into single digits; easier and more readable than shifting/extracting single digits from a number
+ switch field.Kind() {
+ case reflect.String:
+ str = field.String()
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ str = strconv.FormatInt(field.Int(), 10)
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+ str = strconv.FormatUint(field.Uint(), 10)
+ default:
+ panic(fmt.Sprintf("Bad field type %T", field.Interface()))
+ }
+ size := len(str)
+ if size < 2 { // there has to be at least one digit that carries a meaning + the checksum
+ return false
+ }
+ digits := strings.Split(str, "")
+ return digitsHaveLuhnChecksum(digits)
+}
+
+// isCron is the validation function for validating if the current field's value is a valid cron expression
+func isCron(fl FieldLevel) bool {
+ cronString := fl.Field().String()
+ return cronRegex().MatchString(cronString)
+}
diff --git a/integration/assets/hydrabroker/vendor/github.com/go-playground/validator/v10/cache.go b/integration/assets/hydrabroker/vendor/github.com/go-playground/validator/v10/cache.go
index 0d18d6ec49c..2063e1b795d 100644
--- a/integration/assets/hydrabroker/vendor/github.com/go-playground/validator/v10/cache.go
+++ b/integration/assets/hydrabroker/vendor/github.com/go-playground/validator/v10/cache.go
@@ -20,6 +20,7 @@ const (
typeOr
typeKeys
typeEndKeys
+ typeOmitNil
)
const (
@@ -114,6 +115,7 @@ func (v *Validate) extractStructCache(current reflect.Value, sName string) *cStr
cs = &cStruct{name: sName, fields: make([]*cField, 0), fn: v.structLevelFuncs[typ]}
numFields := current.NumField()
+ rules := v.rules[typ]
var ctag *cTag
var fld reflect.StructField
@@ -124,11 +126,15 @@ func (v *Validate) extractStructCache(current reflect.Value, sName string) *cStr
fld = typ.Field(i)
- if !fld.Anonymous && len(fld.PkgPath) > 0 {
+ if !v.privateFieldValidation && !fld.Anonymous && len(fld.PkgPath) > 0 {
continue
}
- tag = fld.Tag.Get(v.tagName)
+ if rtag, ok := rules[fld.Name]; ok {
+ tag = rtag
+ } else {
+ tag = fld.Tag.Get(v.tagName)
+ }
if tag == skipValidationTag {
continue
@@ -247,6 +253,10 @@ func (v *Validate) parseFieldTagsRecursive(tag string, fieldName string, alias s
current.typeof = typeOmitEmpty
continue
+ case omitnil:
+ current.typeof = typeOmitNil
+ continue
+
case structOnlyTag:
current.typeof = typeStructOnly
continue
@@ -284,7 +294,7 @@ func (v *Validate) parseFieldTagsRecursive(tag string, fieldName string, alias s
if wrapper, ok := v.validations[current.tag]; ok {
current.fn = wrapper.fn
- current.runValidationWhenNil = wrapper.runValidatinOnNil
+ current.runValidationWhenNil = wrapper.runValidationOnNil
} else {
panic(strings.TrimSpace(fmt.Sprintf(undefinedValidation, current.tag, fieldName)))
}
diff --git a/integration/assets/hydrabroker/vendor/github.com/go-playground/validator/v10/country_codes.go b/integration/assets/hydrabroker/vendor/github.com/go-playground/validator/v10/country_codes.go
new file mode 100644
index 00000000000..b5f10d3c113
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/go-playground/validator/v10/country_codes.go
@@ -0,0 +1,1177 @@
+package validator
+
+var iso3166_1_alpha2 = map[string]struct{}{
+ // see: https://www.iso.org/iso-3166-country-codes.html
+ "AF": {}, "AX": {}, "AL": {}, "DZ": {}, "AS": {},
+ "AD": {}, "AO": {}, "AI": {}, "AQ": {}, "AG": {},
+ "AR": {}, "AM": {}, "AW": {}, "AU": {}, "AT": {},
+ "AZ": {}, "BS": {}, "BH": {}, "BD": {}, "BB": {},
+ "BY": {}, "BE": {}, "BZ": {}, "BJ": {}, "BM": {},
+ "BT": {}, "BO": {}, "BQ": {}, "BA": {}, "BW": {},
+ "BV": {}, "BR": {}, "IO": {}, "BN": {}, "BG": {},
+ "BF": {}, "BI": {}, "KH": {}, "CM": {}, "CA": {},
+ "CV": {}, "KY": {}, "CF": {}, "TD": {}, "CL": {},
+ "CN": {}, "CX": {}, "CC": {}, "CO": {}, "KM": {},
+ "CG": {}, "CD": {}, "CK": {}, "CR": {}, "CI": {},
+ "HR": {}, "CU": {}, "CW": {}, "CY": {}, "CZ": {},
+ "DK": {}, "DJ": {}, "DM": {}, "DO": {}, "EC": {},
+ "EG": {}, "SV": {}, "GQ": {}, "ER": {}, "EE": {},
+ "ET": {}, "FK": {}, "FO": {}, "FJ": {}, "FI": {},
+ "FR": {}, "GF": {}, "PF": {}, "TF": {}, "GA": {},
+ "GM": {}, "GE": {}, "DE": {}, "GH": {}, "GI": {},
+ "GR": {}, "GL": {}, "GD": {}, "GP": {}, "GU": {},
+ "GT": {}, "GG": {}, "GN": {}, "GW": {}, "GY": {},
+ "HT": {}, "HM": {}, "VA": {}, "HN": {}, "HK": {},
+ "HU": {}, "IS": {}, "IN": {}, "ID": {}, "IR": {},
+ "IQ": {}, "IE": {}, "IM": {}, "IL": {}, "IT": {},
+ "JM": {}, "JP": {}, "JE": {}, "JO": {}, "KZ": {},
+ "KE": {}, "KI": {}, "KP": {}, "KR": {}, "KW": {},
+ "KG": {}, "LA": {}, "LV": {}, "LB": {}, "LS": {},
+ "LR": {}, "LY": {}, "LI": {}, "LT": {}, "LU": {},
+ "MO": {}, "MK": {}, "MG": {}, "MW": {}, "MY": {},
+ "MV": {}, "ML": {}, "MT": {}, "MH": {}, "MQ": {},
+ "MR": {}, "MU": {}, "YT": {}, "MX": {}, "FM": {},
+ "MD": {}, "MC": {}, "MN": {}, "ME": {}, "MS": {},
+ "MA": {}, "MZ": {}, "MM": {}, "NA": {}, "NR": {},
+ "NP": {}, "NL": {}, "NC": {}, "NZ": {}, "NI": {},
+ "NE": {}, "NG": {}, "NU": {}, "NF": {}, "MP": {},
+ "NO": {}, "OM": {}, "PK": {}, "PW": {}, "PS": {},
+ "PA": {}, "PG": {}, "PY": {}, "PE": {}, "PH": {},
+ "PN": {}, "PL": {}, "PT": {}, "PR": {}, "QA": {},
+ "RE": {}, "RO": {}, "RU": {}, "RW": {}, "BL": {},
+ "SH": {}, "KN": {}, "LC": {}, "MF": {}, "PM": {},
+ "VC": {}, "WS": {}, "SM": {}, "ST": {}, "SA": {},
+ "SN": {}, "RS": {}, "SC": {}, "SL": {}, "SG": {},
+ "SX": {}, "SK": {}, "SI": {}, "SB": {}, "SO": {},
+ "ZA": {}, "GS": {}, "SS": {}, "ES": {}, "LK": {},
+ "SD": {}, "SR": {}, "SJ": {}, "SZ": {}, "SE": {},
+ "CH": {}, "SY": {}, "TW": {}, "TJ": {}, "TZ": {},
+ "TH": {}, "TL": {}, "TG": {}, "TK": {}, "TO": {},
+ "TT": {}, "TN": {}, "TR": {}, "TM": {}, "TC": {},
+ "TV": {}, "UG": {}, "UA": {}, "AE": {}, "GB": {},
+ "US": {}, "UM": {}, "UY": {}, "UZ": {}, "VU": {},
+ "VE": {}, "VN": {}, "VG": {}, "VI": {}, "WF": {},
+ "EH": {}, "YE": {}, "ZM": {}, "ZW": {}, "XK": {},
+}
+
+var iso3166_1_alpha2_eu = map[string]struct{}{
+ "AT": {}, "BE": {}, "BG": {}, "HR": {}, "CY": {},
+ "CZ": {}, "DK": {}, "EE": {}, "FI": {}, "FR": {},
+ "DE": {}, "GR": {}, "HU": {}, "IE": {}, "IT": {},
+ "LV": {}, "LT": {}, "LU": {}, "MT": {}, "NL": {},
+ "PL": {}, "PT": {}, "RO": {}, "SK": {}, "SI": {},
+ "ES": {}, "SE": {},
+}
+
+var iso3166_1_alpha3 = map[string]struct{}{
+ // see: https://www.iso.org/iso-3166-country-codes.html
+ "AFG": {}, "ALB": {}, "DZA": {}, "ASM": {}, "AND": {},
+ "AGO": {}, "AIA": {}, "ATA": {}, "ATG": {}, "ARG": {},
+ "ARM": {}, "ABW": {}, "AUS": {}, "AUT": {}, "AZE": {},
+ "BHS": {}, "BHR": {}, "BGD": {}, "BRB": {}, "BLR": {},
+ "BEL": {}, "BLZ": {}, "BEN": {}, "BMU": {}, "BTN": {},
+ "BOL": {}, "BES": {}, "BIH": {}, "BWA": {}, "BVT": {},
+ "BRA": {}, "IOT": {}, "BRN": {}, "BGR": {}, "BFA": {},
+ "BDI": {}, "CPV": {}, "KHM": {}, "CMR": {}, "CAN": {},
+ "CYM": {}, "CAF": {}, "TCD": {}, "CHL": {}, "CHN": {},
+ "CXR": {}, "CCK": {}, "COL": {}, "COM": {}, "COD": {},
+ "COG": {}, "COK": {}, "CRI": {}, "HRV": {}, "CUB": {},
+ "CUW": {}, "CYP": {}, "CZE": {}, "CIV": {}, "DNK": {},
+ "DJI": {}, "DMA": {}, "DOM": {}, "ECU": {}, "EGY": {},
+ "SLV": {}, "GNQ": {}, "ERI": {}, "EST": {}, "SWZ": {},
+ "ETH": {}, "FLK": {}, "FRO": {}, "FJI": {}, "FIN": {},
+ "FRA": {}, "GUF": {}, "PYF": {}, "ATF": {}, "GAB": {},
+ "GMB": {}, "GEO": {}, "DEU": {}, "GHA": {}, "GIB": {},
+ "GRC": {}, "GRL": {}, "GRD": {}, "GLP": {}, "GUM": {},
+ "GTM": {}, "GGY": {}, "GIN": {}, "GNB": {}, "GUY": {},
+ "HTI": {}, "HMD": {}, "VAT": {}, "HND": {}, "HKG": {},
+ "HUN": {}, "ISL": {}, "IND": {}, "IDN": {}, "IRN": {},
+ "IRQ": {}, "IRL": {}, "IMN": {}, "ISR": {}, "ITA": {},
+ "JAM": {}, "JPN": {}, "JEY": {}, "JOR": {}, "KAZ": {},
+ "KEN": {}, "KIR": {}, "PRK": {}, "KOR": {}, "KWT": {},
+ "KGZ": {}, "LAO": {}, "LVA": {}, "LBN": {}, "LSO": {},
+ "LBR": {}, "LBY": {}, "LIE": {}, "LTU": {}, "LUX": {},
+ "MAC": {}, "MDG": {}, "MWI": {}, "MYS": {}, "MDV": {},
+ "MLI": {}, "MLT": {}, "MHL": {}, "MTQ": {}, "MRT": {},
+ "MUS": {}, "MYT": {}, "MEX": {}, "FSM": {}, "MDA": {},
+ "MCO": {}, "MNG": {}, "MNE": {}, "MSR": {}, "MAR": {},
+ "MOZ": {}, "MMR": {}, "NAM": {}, "NRU": {}, "NPL": {},
+ "NLD": {}, "NCL": {}, "NZL": {}, "NIC": {}, "NER": {},
+ "NGA": {}, "NIU": {}, "NFK": {}, "MKD": {}, "MNP": {},
+ "NOR": {}, "OMN": {}, "PAK": {}, "PLW": {}, "PSE": {},
+ "PAN": {}, "PNG": {}, "PRY": {}, "PER": {}, "PHL": {},
+ "PCN": {}, "POL": {}, "PRT": {}, "PRI": {}, "QAT": {},
+ "ROU": {}, "RUS": {}, "RWA": {}, "REU": {}, "BLM": {},
+ "SHN": {}, "KNA": {}, "LCA": {}, "MAF": {}, "SPM": {},
+ "VCT": {}, "WSM": {}, "SMR": {}, "STP": {}, "SAU": {},
+ "SEN": {}, "SRB": {}, "SYC": {}, "SLE": {}, "SGP": {},
+ "SXM": {}, "SVK": {}, "SVN": {}, "SLB": {}, "SOM": {},
+ "ZAF": {}, "SGS": {}, "SSD": {}, "ESP": {}, "LKA": {},
+ "SDN": {}, "SUR": {}, "SJM": {}, "SWE": {}, "CHE": {},
+ "SYR": {}, "TWN": {}, "TJK": {}, "TZA": {}, "THA": {},
+ "TLS": {}, "TGO": {}, "TKL": {}, "TON": {}, "TTO": {},
+ "TUN": {}, "TUR": {}, "TKM": {}, "TCA": {}, "TUV": {},
+ "UGA": {}, "UKR": {}, "ARE": {}, "GBR": {}, "UMI": {},
+ "USA": {}, "URY": {}, "UZB": {}, "VUT": {}, "VEN": {},
+ "VNM": {}, "VGB": {}, "VIR": {}, "WLF": {}, "ESH": {},
+ "YEM": {}, "ZMB": {}, "ZWE": {}, "ALA": {}, "UNK": {},
+}
+
+var iso3166_1_alpha3_eu = map[string]struct{}{
+ "AUT": {}, "BEL": {}, "BGR": {}, "HRV": {}, "CYP": {},
+ "CZE": {}, "DNK": {}, "EST": {}, "FIN": {}, "FRA": {},
+ "DEU": {}, "GRC": {}, "HUN": {}, "IRL": {}, "ITA": {},
+ "LVA": {}, "LTU": {}, "LUX": {}, "MLT": {}, "NLD": {},
+ "POL": {}, "PRT": {}, "ROU": {}, "SVK": {}, "SVN": {},
+ "ESP": {}, "SWE": {},
+}
+var iso3166_1_alpha_numeric = map[int]struct{}{
+ // see: https://www.iso.org/iso-3166-country-codes.html
+ 4: {}, 8: {}, 12: {}, 16: {}, 20: {},
+ 24: {}, 660: {}, 10: {}, 28: {}, 32: {},
+ 51: {}, 533: {}, 36: {}, 40: {}, 31: {},
+ 44: {}, 48: {}, 50: {}, 52: {}, 112: {},
+ 56: {}, 84: {}, 204: {}, 60: {}, 64: {},
+ 68: {}, 535: {}, 70: {}, 72: {}, 74: {},
+ 76: {}, 86: {}, 96: {}, 100: {}, 854: {},
+ 108: {}, 132: {}, 116: {}, 120: {}, 124: {},
+ 136: {}, 140: {}, 148: {}, 152: {}, 156: {},
+ 162: {}, 166: {}, 170: {}, 174: {}, 180: {},
+ 178: {}, 184: {}, 188: {}, 191: {}, 192: {},
+ 531: {}, 196: {}, 203: {}, 384: {}, 208: {},
+ 262: {}, 212: {}, 214: {}, 218: {}, 818: {},
+ 222: {}, 226: {}, 232: {}, 233: {}, 748: {},
+ 231: {}, 238: {}, 234: {}, 242: {}, 246: {},
+ 250: {}, 254: {}, 258: {}, 260: {}, 266: {},
+ 270: {}, 268: {}, 276: {}, 288: {}, 292: {},
+ 300: {}, 304: {}, 308: {}, 312: {}, 316: {},
+ 320: {}, 831: {}, 324: {}, 624: {}, 328: {},
+ 332: {}, 334: {}, 336: {}, 340: {}, 344: {},
+ 348: {}, 352: {}, 356: {}, 360: {}, 364: {},
+ 368: {}, 372: {}, 833: {}, 376: {}, 380: {},
+ 388: {}, 392: {}, 832: {}, 400: {}, 398: {},
+ 404: {}, 296: {}, 408: {}, 410: {}, 414: {},
+ 417: {}, 418: {}, 428: {}, 422: {}, 426: {},
+ 430: {}, 434: {}, 438: {}, 440: {}, 442: {},
+ 446: {}, 450: {}, 454: {}, 458: {}, 462: {},
+ 466: {}, 470: {}, 584: {}, 474: {}, 478: {},
+ 480: {}, 175: {}, 484: {}, 583: {}, 498: {},
+ 492: {}, 496: {}, 499: {}, 500: {}, 504: {},
+ 508: {}, 104: {}, 516: {}, 520: {}, 524: {},
+ 528: {}, 540: {}, 554: {}, 558: {}, 562: {},
+ 566: {}, 570: {}, 574: {}, 807: {}, 580: {},
+ 578: {}, 512: {}, 586: {}, 585: {}, 275: {},
+ 591: {}, 598: {}, 600: {}, 604: {}, 608: {},
+ 612: {}, 616: {}, 620: {}, 630: {}, 634: {},
+ 642: {}, 643: {}, 646: {}, 638: {}, 652: {},
+ 654: {}, 659: {}, 662: {}, 663: {}, 666: {},
+ 670: {}, 882: {}, 674: {}, 678: {}, 682: {},
+ 686: {}, 688: {}, 690: {}, 694: {}, 702: {},
+ 534: {}, 703: {}, 705: {}, 90: {}, 706: {},
+ 710: {}, 239: {}, 728: {}, 724: {}, 144: {},
+ 729: {}, 740: {}, 744: {}, 752: {}, 756: {},
+ 760: {}, 158: {}, 762: {}, 834: {}, 764: {},
+ 626: {}, 768: {}, 772: {}, 776: {}, 780: {},
+ 788: {}, 792: {}, 795: {}, 796: {}, 798: {},
+ 800: {}, 804: {}, 784: {}, 826: {}, 581: {},
+ 840: {}, 858: {}, 860: {}, 548: {}, 862: {},
+ 704: {}, 92: {}, 850: {}, 876: {}, 732: {},
+ 887: {}, 894: {}, 716: {}, 248: {}, 153: {},
+}
+
+var iso3166_1_alpha_numeric_eu = map[int]struct{}{
+ 40: {}, 56: {}, 100: {}, 191: {}, 196: {},
+ 200: {}, 208: {}, 233: {}, 246: {}, 250: {},
+ 276: {}, 300: {}, 348: {}, 372: {}, 380: {},
+ 428: {}, 440: {}, 442: {}, 470: {}, 528: {},
+ 616: {}, 620: {}, 642: {}, 703: {}, 705: {},
+ 724: {}, 752: {},
+}
+
+var iso3166_2 = map[string]struct{}{
+ "AD-02": {}, "AD-03": {}, "AD-04": {}, "AD-05": {}, "AD-06": {},
+ "AD-07": {}, "AD-08": {}, "AE-AJ": {}, "AE-AZ": {}, "AE-DU": {},
+ "AE-FU": {}, "AE-RK": {}, "AE-SH": {}, "AE-UQ": {}, "AF-BAL": {},
+ "AF-BAM": {}, "AF-BDG": {}, "AF-BDS": {}, "AF-BGL": {}, "AF-DAY": {},
+ "AF-FRA": {}, "AF-FYB": {}, "AF-GHA": {}, "AF-GHO": {}, "AF-HEL": {},
+ "AF-HER": {}, "AF-JOW": {}, "AF-KAB": {}, "AF-KAN": {}, "AF-KAP": {},
+ "AF-KDZ": {}, "AF-KHO": {}, "AF-KNR": {}, "AF-LAG": {}, "AF-LOG": {},
+ "AF-NAN": {}, "AF-NIM": {}, "AF-NUR": {}, "AF-PAN": {}, "AF-PAR": {},
+ "AF-PIA": {}, "AF-PKA": {}, "AF-SAM": {}, "AF-SAR": {}, "AF-TAK": {},
+ "AF-URU": {}, "AF-WAR": {}, "AF-ZAB": {}, "AG-03": {}, "AG-04": {},
+ "AG-05": {}, "AG-06": {}, "AG-07": {}, "AG-08": {}, "AG-10": {},
+ "AG-11": {}, "AL-01": {}, "AL-02": {}, "AL-03": {}, "AL-04": {},
+ "AL-05": {}, "AL-06": {}, "AL-07": {}, "AL-08": {}, "AL-09": {},
+ "AL-10": {}, "AL-11": {}, "AL-12": {}, "AL-BR": {}, "AL-BU": {},
+ "AL-DI": {}, "AL-DL": {}, "AL-DR": {}, "AL-DV": {}, "AL-EL": {},
+ "AL-ER": {}, "AL-FR": {}, "AL-GJ": {}, "AL-GR": {}, "AL-HA": {},
+ "AL-KA": {}, "AL-KB": {}, "AL-KC": {}, "AL-KO": {}, "AL-KR": {},
+ "AL-KU": {}, "AL-LB": {}, "AL-LE": {}, "AL-LU": {}, "AL-MK": {},
+ "AL-MM": {}, "AL-MR": {}, "AL-MT": {}, "AL-PG": {}, "AL-PQ": {},
+ "AL-PR": {}, "AL-PU": {}, "AL-SH": {}, "AL-SK": {}, "AL-SR": {},
+ "AL-TE": {}, "AL-TP": {}, "AL-TR": {}, "AL-VL": {}, "AM-AG": {},
+ "AM-AR": {}, "AM-AV": {}, "AM-ER": {}, "AM-GR": {}, "AM-KT": {},
+ "AM-LO": {}, "AM-SH": {}, "AM-SU": {}, "AM-TV": {}, "AM-VD": {},
+ "AO-BGO": {}, "AO-BGU": {}, "AO-BIE": {}, "AO-CAB": {}, "AO-CCU": {},
+ "AO-CNN": {}, "AO-CNO": {}, "AO-CUS": {}, "AO-HUA": {}, "AO-HUI": {},
+ "AO-LNO": {}, "AO-LSU": {}, "AO-LUA": {}, "AO-MAL": {}, "AO-MOX": {},
+ "AO-NAM": {}, "AO-UIG": {}, "AO-ZAI": {}, "AR-A": {}, "AR-B": {},
+ "AR-C": {}, "AR-D": {}, "AR-E": {}, "AR-F": {}, "AR-G": {}, "AR-H": {},
+ "AR-J": {}, "AR-K": {}, "AR-L": {}, "AR-M": {}, "AR-N": {},
+ "AR-P": {}, "AR-Q": {}, "AR-R": {}, "AR-S": {}, "AR-T": {},
+ "AR-U": {}, "AR-V": {}, "AR-W": {}, "AR-X": {}, "AR-Y": {},
+ "AR-Z": {}, "AT-1": {}, "AT-2": {}, "AT-3": {}, "AT-4": {},
+ "AT-5": {}, "AT-6": {}, "AT-7": {}, "AT-8": {}, "AT-9": {},
+ "AU-ACT": {}, "AU-NSW": {}, "AU-NT": {}, "AU-QLD": {}, "AU-SA": {},
+ "AU-TAS": {}, "AU-VIC": {}, "AU-WA": {}, "AZ-ABS": {}, "AZ-AGA": {},
+ "AZ-AGC": {}, "AZ-AGM": {}, "AZ-AGS": {}, "AZ-AGU": {}, "AZ-AST": {},
+ "AZ-BA": {}, "AZ-BAB": {}, "AZ-BAL": {}, "AZ-BAR": {}, "AZ-BEY": {},
+ "AZ-BIL": {}, "AZ-CAB": {}, "AZ-CAL": {}, "AZ-CUL": {}, "AZ-DAS": {},
+ "AZ-FUZ": {}, "AZ-GA": {}, "AZ-GAD": {}, "AZ-GOR": {}, "AZ-GOY": {},
+ "AZ-GYG": {}, "AZ-HAC": {}, "AZ-IMI": {}, "AZ-ISM": {}, "AZ-KAL": {},
+ "AZ-KAN": {}, "AZ-KUR": {}, "AZ-LA": {}, "AZ-LAC": {}, "AZ-LAN": {},
+ "AZ-LER": {}, "AZ-MAS": {}, "AZ-MI": {}, "AZ-NA": {}, "AZ-NEF": {},
+ "AZ-NV": {}, "AZ-NX": {}, "AZ-OGU": {}, "AZ-ORD": {}, "AZ-QAB": {},
+ "AZ-QAX": {}, "AZ-QAZ": {}, "AZ-QBA": {}, "AZ-QBI": {}, "AZ-QOB": {},
+ "AZ-QUS": {}, "AZ-SA": {}, "AZ-SAB": {}, "AZ-SAD": {}, "AZ-SAH": {},
+ "AZ-SAK": {}, "AZ-SAL": {}, "AZ-SAR": {}, "AZ-SAT": {}, "AZ-SBN": {},
+ "AZ-SIY": {}, "AZ-SKR": {}, "AZ-SM": {}, "AZ-SMI": {}, "AZ-SMX": {},
+ "AZ-SR": {}, "AZ-SUS": {}, "AZ-TAR": {}, "AZ-TOV": {}, "AZ-UCA": {},
+ "AZ-XA": {}, "AZ-XAC": {}, "AZ-XCI": {}, "AZ-XIZ": {}, "AZ-XVD": {},
+ "AZ-YAR": {}, "AZ-YE": {}, "AZ-YEV": {}, "AZ-ZAN": {}, "AZ-ZAQ": {},
+ "AZ-ZAR": {}, "BA-01": {}, "BA-02": {}, "BA-03": {}, "BA-04": {},
+ "BA-05": {}, "BA-06": {}, "BA-07": {}, "BA-08": {}, "BA-09": {},
+ "BA-10": {}, "BA-BIH": {}, "BA-BRC": {}, "BA-SRP": {}, "BB-01": {},
+ "BB-02": {}, "BB-03": {}, "BB-04": {}, "BB-05": {}, "BB-06": {},
+ "BB-07": {}, "BB-08": {}, "BB-09": {}, "BB-10": {}, "BB-11": {},
+ "BD-01": {}, "BD-02": {}, "BD-03": {}, "BD-04": {}, "BD-05": {},
+ "BD-06": {}, "BD-07": {}, "BD-08": {}, "BD-09": {}, "BD-10": {},
+ "BD-11": {}, "BD-12": {}, "BD-13": {}, "BD-14": {}, "BD-15": {},
+ "BD-16": {}, "BD-17": {}, "BD-18": {}, "BD-19": {}, "BD-20": {},
+ "BD-21": {}, "BD-22": {}, "BD-23": {}, "BD-24": {}, "BD-25": {},
+ "BD-26": {}, "BD-27": {}, "BD-28": {}, "BD-29": {}, "BD-30": {},
+ "BD-31": {}, "BD-32": {}, "BD-33": {}, "BD-34": {}, "BD-35": {},
+ "BD-36": {}, "BD-37": {}, "BD-38": {}, "BD-39": {}, "BD-40": {},
+ "BD-41": {}, "BD-42": {}, "BD-43": {}, "BD-44": {}, "BD-45": {},
+ "BD-46": {}, "BD-47": {}, "BD-48": {}, "BD-49": {}, "BD-50": {},
+ "BD-51": {}, "BD-52": {}, "BD-53": {}, "BD-54": {}, "BD-55": {},
+ "BD-56": {}, "BD-57": {}, "BD-58": {}, "BD-59": {}, "BD-60": {},
+ "BD-61": {}, "BD-62": {}, "BD-63": {}, "BD-64": {}, "BD-A": {},
+ "BD-B": {}, "BD-C": {}, "BD-D": {}, "BD-E": {}, "BD-F": {},
+ "BD-G": {}, "BE-BRU": {}, "BE-VAN": {}, "BE-VBR": {}, "BE-VLG": {},
+ "BE-VLI": {}, "BE-VOV": {}, "BE-VWV": {}, "BE-WAL": {}, "BE-WBR": {},
+ "BE-WHT": {}, "BE-WLG": {}, "BE-WLX": {}, "BE-WNA": {}, "BF-01": {},
+ "BF-02": {}, "BF-03": {}, "BF-04": {}, "BF-05": {}, "BF-06": {},
+ "BF-07": {}, "BF-08": {}, "BF-09": {}, "BF-10": {}, "BF-11": {},
+ "BF-12": {}, "BF-13": {}, "BF-BAL": {}, "BF-BAM": {}, "BF-BAN": {},
+ "BF-BAZ": {}, "BF-BGR": {}, "BF-BLG": {}, "BF-BLK": {}, "BF-COM": {},
+ "BF-GAN": {}, "BF-GNA": {}, "BF-GOU": {}, "BF-HOU": {}, "BF-IOB": {},
+ "BF-KAD": {}, "BF-KEN": {}, "BF-KMD": {}, "BF-KMP": {}, "BF-KOP": {},
+ "BF-KOS": {}, "BF-KOT": {}, "BF-KOW": {}, "BF-LER": {}, "BF-LOR": {},
+ "BF-MOU": {}, "BF-NAM": {}, "BF-NAO": {}, "BF-NAY": {}, "BF-NOU": {},
+ "BF-OUB": {}, "BF-OUD": {}, "BF-PAS": {}, "BF-PON": {}, "BF-SEN": {},
+ "BF-SIS": {}, "BF-SMT": {}, "BF-SNG": {}, "BF-SOM": {}, "BF-SOR": {},
+ "BF-TAP": {}, "BF-TUI": {}, "BF-YAG": {}, "BF-YAT": {}, "BF-ZIR": {},
+ "BF-ZON": {}, "BF-ZOU": {}, "BG-01": {}, "BG-02": {}, "BG-03": {},
+ "BG-04": {}, "BG-05": {}, "BG-06": {}, "BG-07": {}, "BG-08": {},
+ "BG-09": {}, "BG-10": {}, "BG-11": {}, "BG-12": {}, "BG-13": {},
+ "BG-14": {}, "BG-15": {}, "BG-16": {}, "BG-17": {}, "BG-18": {},
+ "BG-19": {}, "BG-20": {}, "BG-21": {}, "BG-22": {}, "BG-23": {},
+ "BG-24": {}, "BG-25": {}, "BG-26": {}, "BG-27": {}, "BG-28": {},
+ "BH-13": {}, "BH-14": {}, "BH-15": {}, "BH-16": {}, "BH-17": {},
+ "BI-BB": {}, "BI-BL": {}, "BI-BM": {}, "BI-BR": {}, "BI-CA": {},
+ "BI-CI": {}, "BI-GI": {}, "BI-KI": {}, "BI-KR": {}, "BI-KY": {},
+ "BI-MA": {}, "BI-MU": {}, "BI-MW": {}, "BI-NG": {}, "BI-RM": {}, "BI-RT": {},
+ "BI-RY": {}, "BJ-AK": {}, "BJ-AL": {}, "BJ-AQ": {}, "BJ-BO": {},
+ "BJ-CO": {}, "BJ-DO": {}, "BJ-KO": {}, "BJ-LI": {}, "BJ-MO": {},
+ "BJ-OU": {}, "BJ-PL": {}, "BJ-ZO": {}, "BN-BE": {}, "BN-BM": {},
+ "BN-TE": {}, "BN-TU": {}, "BO-B": {}, "BO-C": {}, "BO-H": {},
+ "BO-L": {}, "BO-N": {}, "BO-O": {}, "BO-P": {}, "BO-S": {},
+ "BO-T": {}, "BQ-BO": {}, "BQ-SA": {}, "BQ-SE": {}, "BR-AC": {},
+ "BR-AL": {}, "BR-AM": {}, "BR-AP": {}, "BR-BA": {}, "BR-CE": {},
+ "BR-DF": {}, "BR-ES": {}, "BR-FN": {}, "BR-GO": {}, "BR-MA": {},
+ "BR-MG": {}, "BR-MS": {}, "BR-MT": {}, "BR-PA": {}, "BR-PB": {},
+ "BR-PE": {}, "BR-PI": {}, "BR-PR": {}, "BR-RJ": {}, "BR-RN": {},
+ "BR-RO": {}, "BR-RR": {}, "BR-RS": {}, "BR-SC": {}, "BR-SE": {},
+ "BR-SP": {}, "BR-TO": {}, "BS-AK": {}, "BS-BI": {}, "BS-BP": {},
+ "BS-BY": {}, "BS-CE": {}, "BS-CI": {}, "BS-CK": {}, "BS-CO": {},
+ "BS-CS": {}, "BS-EG": {}, "BS-EX": {}, "BS-FP": {}, "BS-GC": {},
+ "BS-HI": {}, "BS-HT": {}, "BS-IN": {}, "BS-LI": {}, "BS-MC": {},
+ "BS-MG": {}, "BS-MI": {}, "BS-NE": {}, "BS-NO": {}, "BS-NP": {}, "BS-NS": {},
+ "BS-RC": {}, "BS-RI": {}, "BS-SA": {}, "BS-SE": {}, "BS-SO": {},
+ "BS-SS": {}, "BS-SW": {}, "BS-WG": {}, "BT-11": {}, "BT-12": {},
+ "BT-13": {}, "BT-14": {}, "BT-15": {}, "BT-21": {}, "BT-22": {},
+ "BT-23": {}, "BT-24": {}, "BT-31": {}, "BT-32": {}, "BT-33": {},
+ "BT-34": {}, "BT-41": {}, "BT-42": {}, "BT-43": {}, "BT-44": {},
+ "BT-45": {}, "BT-GA": {}, "BT-TY": {}, "BW-CE": {}, "BW-CH": {}, "BW-GH": {},
+ "BW-KG": {}, "BW-KL": {}, "BW-KW": {}, "BW-NE": {}, "BW-NW": {},
+ "BW-SE": {}, "BW-SO": {}, "BY-BR": {}, "BY-HM": {}, "BY-HO": {},
+ "BY-HR": {}, "BY-MA": {}, "BY-MI": {}, "BY-VI": {}, "BZ-BZ": {},
+ "BZ-CY": {}, "BZ-CZL": {}, "BZ-OW": {}, "BZ-SC": {}, "BZ-TOL": {},
+ "CA-AB": {}, "CA-BC": {}, "CA-MB": {}, "CA-NB": {}, "CA-NL": {},
+ "CA-NS": {}, "CA-NT": {}, "CA-NU": {}, "CA-ON": {}, "CA-PE": {},
+ "CA-QC": {}, "CA-SK": {}, "CA-YT": {}, "CD-BC": {}, "CD-BN": {},
+ "CD-EQ": {}, "CD-HK": {}, "CD-IT": {}, "CD-KA": {}, "CD-KC": {}, "CD-KE": {}, "CD-KG": {}, "CD-KN": {},
+ "CD-KW": {}, "CD-KS": {}, "CD-LU": {}, "CD-MA": {}, "CD-NK": {}, "CD-OR": {}, "CD-SA": {}, "CD-SK": {},
+ "CD-TA": {}, "CD-TO": {}, "CF-AC": {}, "CF-BB": {}, "CF-BGF": {}, "CF-BK": {}, "CF-HK": {}, "CF-HM": {},
+ "CF-HS": {}, "CF-KB": {}, "CF-KG": {}, "CF-LB": {}, "CF-MB": {},
+ "CF-MP": {}, "CF-NM": {}, "CF-OP": {}, "CF-SE": {}, "CF-UK": {},
+ "CF-VK": {}, "CG-11": {}, "CG-12": {}, "CG-13": {}, "CG-14": {},
+ "CG-15": {}, "CG-16": {}, "CG-2": {}, "CG-5": {}, "CG-7": {}, "CG-8": {},
+ "CG-9": {}, "CG-BZV": {}, "CH-AG": {}, "CH-AI": {}, "CH-AR": {},
+ "CH-BE": {}, "CH-BL": {}, "CH-BS": {}, "CH-FR": {}, "CH-GE": {},
+ "CH-GL": {}, "CH-GR": {}, "CH-JU": {}, "CH-LU": {}, "CH-NE": {},
+ "CH-NW": {}, "CH-OW": {}, "CH-SG": {}, "CH-SH": {}, "CH-SO": {},
+ "CH-SZ": {}, "CH-TG": {}, "CH-TI": {}, "CH-UR": {}, "CH-VD": {},
+ "CH-VS": {}, "CH-ZG": {}, "CH-ZH": {}, "CI-AB": {}, "CI-BS": {},
+ "CI-CM": {}, "CI-DN": {}, "CI-GD": {}, "CI-LC": {}, "CI-LG": {},
+ "CI-MG": {}, "CI-SM": {}, "CI-SV": {}, "CI-VB": {}, "CI-WR": {},
+ "CI-YM": {}, "CI-ZZ": {}, "CL-AI": {}, "CL-AN": {}, "CL-AP": {},
+ "CL-AR": {}, "CL-AT": {}, "CL-BI": {}, "CL-CO": {}, "CL-LI": {},
+ "CL-LL": {}, "CL-LR": {}, "CL-MA": {}, "CL-ML": {}, "CL-NB": {}, "CL-RM": {},
+ "CL-TA": {}, "CL-VS": {}, "CM-AD": {}, "CM-CE": {}, "CM-EN": {},
+ "CM-ES": {}, "CM-LT": {}, "CM-NO": {}, "CM-NW": {}, "CM-OU": {},
+ "CM-SU": {}, "CM-SW": {}, "CN-AH": {}, "CN-BJ": {}, "CN-CQ": {},
+ "CN-FJ": {}, "CN-GS": {}, "CN-GD": {}, "CN-GX": {}, "CN-GZ": {},
+ "CN-HI": {}, "CN-HE": {}, "CN-HL": {}, "CN-HA": {}, "CN-HB": {},
+ "CN-HN": {}, "CN-JS": {}, "CN-JX": {}, "CN-JL": {}, "CN-LN": {},
+ "CN-NM": {}, "CN-NX": {}, "CN-QH": {}, "CN-SN": {}, "CN-SD": {}, "CN-SH": {},
+ "CN-SX": {}, "CN-SC": {}, "CN-TJ": {}, "CN-XJ": {}, "CN-XZ": {}, "CN-YN": {},
+ "CN-ZJ": {}, "CO-AMA": {}, "CO-ANT": {}, "CO-ARA": {}, "CO-ATL": {},
+ "CO-BOL": {}, "CO-BOY": {}, "CO-CAL": {}, "CO-CAQ": {}, "CO-CAS": {},
+ "CO-CAU": {}, "CO-CES": {}, "CO-CHO": {}, "CO-COR": {}, "CO-CUN": {},
+ "CO-DC": {}, "CO-GUA": {}, "CO-GUV": {}, "CO-HUI": {}, "CO-LAG": {},
+ "CO-MAG": {}, "CO-MET": {}, "CO-NAR": {}, "CO-NSA": {}, "CO-PUT": {},
+ "CO-QUI": {}, "CO-RIS": {}, "CO-SAN": {}, "CO-SAP": {}, "CO-SUC": {},
+ "CO-TOL": {}, "CO-VAC": {}, "CO-VAU": {}, "CO-VID": {}, "CR-A": {},
+ "CR-C": {}, "CR-G": {}, "CR-H": {}, "CR-L": {}, "CR-P": {},
+ "CR-SJ": {}, "CU-01": {}, "CU-02": {}, "CU-03": {}, "CU-04": {},
+ "CU-05": {}, "CU-06": {}, "CU-07": {}, "CU-08": {}, "CU-09": {},
+ "CU-10": {}, "CU-11": {}, "CU-12": {}, "CU-13": {}, "CU-14": {}, "CU-15": {},
+ "CU-16": {}, "CU-99": {}, "CV-B": {}, "CV-BR": {}, "CV-BV": {}, "CV-CA": {},
+ "CV-CF": {}, "CV-CR": {}, "CV-MA": {}, "CV-MO": {}, "CV-PA": {},
+ "CV-PN": {}, "CV-PR": {}, "CV-RB": {}, "CV-RG": {}, "CV-RS": {},
+ "CV-S": {}, "CV-SD": {}, "CV-SF": {}, "CV-SL": {}, "CV-SM": {},
+ "CV-SO": {}, "CV-SS": {}, "CV-SV": {}, "CV-TA": {}, "CV-TS": {},
+ "CY-01": {}, "CY-02": {}, "CY-03": {}, "CY-04": {}, "CY-05": {},
+ "CY-06": {}, "CZ-10": {}, "CZ-101": {}, "CZ-102": {}, "CZ-103": {},
+ "CZ-104": {}, "CZ-105": {}, "CZ-106": {}, "CZ-107": {}, "CZ-108": {},
+ "CZ-109": {}, "CZ-110": {}, "CZ-111": {}, "CZ-112": {}, "CZ-113": {},
+ "CZ-114": {}, "CZ-115": {}, "CZ-116": {}, "CZ-117": {}, "CZ-118": {},
+ "CZ-119": {}, "CZ-120": {}, "CZ-121": {}, "CZ-122": {}, "CZ-20": {},
+ "CZ-201": {}, "CZ-202": {}, "CZ-203": {}, "CZ-204": {}, "CZ-205": {},
+ "CZ-206": {}, "CZ-207": {}, "CZ-208": {}, "CZ-209": {}, "CZ-20A": {},
+ "CZ-20B": {}, "CZ-20C": {}, "CZ-31": {}, "CZ-311": {}, "CZ-312": {},
+ "CZ-313": {}, "CZ-314": {}, "CZ-315": {}, "CZ-316": {}, "CZ-317": {},
+ "CZ-32": {}, "CZ-321": {}, "CZ-322": {}, "CZ-323": {}, "CZ-324": {},
+ "CZ-325": {}, "CZ-326": {}, "CZ-327": {}, "CZ-41": {}, "CZ-411": {},
+ "CZ-412": {}, "CZ-413": {}, "CZ-42": {}, "CZ-421": {}, "CZ-422": {},
+ "CZ-423": {}, "CZ-424": {}, "CZ-425": {}, "CZ-426": {}, "CZ-427": {},
+ "CZ-51": {}, "CZ-511": {}, "CZ-512": {}, "CZ-513": {}, "CZ-514": {},
+ "CZ-52": {}, "CZ-521": {}, "CZ-522": {}, "CZ-523": {}, "CZ-524": {},
+ "CZ-525": {}, "CZ-53": {}, "CZ-531": {}, "CZ-532": {}, "CZ-533": {},
+ "CZ-534": {}, "CZ-63": {}, "CZ-631": {}, "CZ-632": {}, "CZ-633": {},
+ "CZ-634": {}, "CZ-635": {}, "CZ-64": {}, "CZ-641": {}, "CZ-642": {},
+ "CZ-643": {}, "CZ-644": {}, "CZ-645": {}, "CZ-646": {}, "CZ-647": {},
+ "CZ-71": {}, "CZ-711": {}, "CZ-712": {}, "CZ-713": {}, "CZ-714": {},
+ "CZ-715": {}, "CZ-72": {}, "CZ-721": {}, "CZ-722": {}, "CZ-723": {},
+ "CZ-724": {}, "CZ-80": {}, "CZ-801": {}, "CZ-802": {}, "CZ-803": {},
+ "CZ-804": {}, "CZ-805": {}, "CZ-806": {}, "DE-BB": {}, "DE-BE": {},
+ "DE-BW": {}, "DE-BY": {}, "DE-HB": {}, "DE-HE": {}, "DE-HH": {},
+ "DE-MV": {}, "DE-NI": {}, "DE-NW": {}, "DE-RP": {}, "DE-SH": {},
+ "DE-SL": {}, "DE-SN": {}, "DE-ST": {}, "DE-TH": {}, "DJ-AR": {},
+ "DJ-AS": {}, "DJ-DI": {}, "DJ-DJ": {}, "DJ-OB": {}, "DJ-TA": {},
+ "DK-81": {}, "DK-82": {}, "DK-83": {}, "DK-84": {}, "DK-85": {},
+ "DM-01": {}, "DM-02": {}, "DM-03": {}, "DM-04": {}, "DM-05": {},
+ "DM-06": {}, "DM-07": {}, "DM-08": {}, "DM-09": {}, "DM-10": {},
+ "DO-01": {}, "DO-02": {}, "DO-03": {}, "DO-04": {}, "DO-05": {},
+ "DO-06": {}, "DO-07": {}, "DO-08": {}, "DO-09": {}, "DO-10": {},
+ "DO-11": {}, "DO-12": {}, "DO-13": {}, "DO-14": {}, "DO-15": {},
+ "DO-16": {}, "DO-17": {}, "DO-18": {}, "DO-19": {}, "DO-20": {},
+ "DO-21": {}, "DO-22": {}, "DO-23": {}, "DO-24": {}, "DO-25": {},
+ "DO-26": {}, "DO-27": {}, "DO-28": {}, "DO-29": {}, "DO-30": {}, "DO-31": {},
+ "DZ-01": {}, "DZ-02": {}, "DZ-03": {}, "DZ-04": {}, "DZ-05": {},
+ "DZ-06": {}, "DZ-07": {}, "DZ-08": {}, "DZ-09": {}, "DZ-10": {},
+ "DZ-11": {}, "DZ-12": {}, "DZ-13": {}, "DZ-14": {}, "DZ-15": {},
+ "DZ-16": {}, "DZ-17": {}, "DZ-18": {}, "DZ-19": {}, "DZ-20": {},
+ "DZ-21": {}, "DZ-22": {}, "DZ-23": {}, "DZ-24": {}, "DZ-25": {},
+ "DZ-26": {}, "DZ-27": {}, "DZ-28": {}, "DZ-29": {}, "DZ-30": {},
+ "DZ-31": {}, "DZ-32": {}, "DZ-33": {}, "DZ-34": {}, "DZ-35": {},
+ "DZ-36": {}, "DZ-37": {}, "DZ-38": {}, "DZ-39": {}, "DZ-40": {},
+ "DZ-41": {}, "DZ-42": {}, "DZ-43": {}, "DZ-44": {}, "DZ-45": {},
+ "DZ-46": {}, "DZ-47": {}, "DZ-48": {}, "DZ-49": {}, "DZ-51": {},
+ "DZ-53": {}, "DZ-55": {}, "DZ-56": {}, "DZ-57": {}, "EC-A": {}, "EC-B": {},
+ "EC-C": {}, "EC-D": {}, "EC-E": {}, "EC-F": {}, "EC-G": {},
+ "EC-H": {}, "EC-I": {}, "EC-L": {}, "EC-M": {}, "EC-N": {},
+ "EC-O": {}, "EC-P": {}, "EC-R": {}, "EC-S": {}, "EC-SD": {},
+ "EC-SE": {}, "EC-T": {}, "EC-U": {}, "EC-W": {}, "EC-X": {},
+ "EC-Y": {}, "EC-Z": {}, "EE-37": {}, "EE-39": {}, "EE-44": {}, "EE-45": {},
+ "EE-49": {}, "EE-50": {}, "EE-51": {}, "EE-52": {}, "EE-56": {}, "EE-57": {},
+ "EE-59": {}, "EE-60": {}, "EE-64": {}, "EE-65": {}, "EE-67": {}, "EE-68": {},
+ "EE-70": {}, "EE-71": {}, "EE-74": {}, "EE-78": {}, "EE-79": {}, "EE-81": {}, "EE-82": {},
+ "EE-84": {}, "EE-86": {}, "EE-87": {}, "EG-ALX": {}, "EG-ASN": {}, "EG-AST": {},
+ "EG-BA": {}, "EG-BH": {}, "EG-BNS": {}, "EG-C": {}, "EG-DK": {},
+ "EG-DT": {}, "EG-FYM": {}, "EG-GH": {}, "EG-GZ": {}, "EG-HU": {},
+ "EG-IS": {}, "EG-JS": {}, "EG-KB": {}, "EG-KFS": {}, "EG-KN": {},
+ "EG-LX": {}, "EG-MN": {}, "EG-MNF": {}, "EG-MT": {}, "EG-PTS": {}, "EG-SHG": {},
+ "EG-SHR": {}, "EG-SIN": {}, "EG-SU": {}, "EG-SUZ": {}, "EG-WAD": {},
+ "ER-AN": {}, "ER-DK": {}, "ER-DU": {}, "ER-GB": {}, "ER-MA": {},
+ "ER-SK": {}, "ES-A": {}, "ES-AB": {}, "ES-AL": {}, "ES-AN": {},
+ "ES-AR": {}, "ES-AS": {}, "ES-AV": {}, "ES-B": {}, "ES-BA": {},
+ "ES-BI": {}, "ES-BU": {}, "ES-C": {}, "ES-CA": {}, "ES-CB": {},
+ "ES-CC": {}, "ES-CE": {}, "ES-CL": {}, "ES-CM": {}, "ES-CN": {},
+ "ES-CO": {}, "ES-CR": {}, "ES-CS": {}, "ES-CT": {}, "ES-CU": {},
+ "ES-EX": {}, "ES-GA": {}, "ES-GC": {}, "ES-GI": {}, "ES-GR": {},
+ "ES-GU": {}, "ES-H": {}, "ES-HU": {}, "ES-IB": {}, "ES-J": {},
+ "ES-L": {}, "ES-LE": {}, "ES-LO": {}, "ES-LU": {}, "ES-M": {},
+ "ES-MA": {}, "ES-MC": {}, "ES-MD": {}, "ES-ML": {}, "ES-MU": {},
+ "ES-NA": {}, "ES-NC": {}, "ES-O": {}, "ES-OR": {}, "ES-P": {},
+ "ES-PM": {}, "ES-PO": {}, "ES-PV": {}, "ES-RI": {}, "ES-S": {},
+ "ES-SA": {}, "ES-SE": {}, "ES-SG": {}, "ES-SO": {}, "ES-SS": {},
+ "ES-T": {}, "ES-TE": {}, "ES-TF": {}, "ES-TO": {}, "ES-V": {},
+ "ES-VA": {}, "ES-VC": {}, "ES-VI": {}, "ES-Z": {}, "ES-ZA": {},
+ "ET-AA": {}, "ET-AF": {}, "ET-AM": {}, "ET-BE": {}, "ET-DD": {},
+ "ET-GA": {}, "ET-HA": {}, "ET-OR": {}, "ET-SN": {}, "ET-SO": {},
+ "ET-TI": {}, "FI-01": {}, "FI-02": {}, "FI-03": {}, "FI-04": {},
+ "FI-05": {}, "FI-06": {}, "FI-07": {}, "FI-08": {}, "FI-09": {},
+ "FI-10": {}, "FI-11": {}, "FI-12": {}, "FI-13": {}, "FI-14": {},
+ "FI-15": {}, "FI-16": {}, "FI-17": {}, "FI-18": {}, "FI-19": {},
+ "FJ-C": {}, "FJ-E": {}, "FJ-N": {}, "FJ-R": {}, "FJ-W": {},
+ "FM-KSA": {}, "FM-PNI": {}, "FM-TRK": {}, "FM-YAP": {}, "FR-01": {},
+ "FR-02": {}, "FR-03": {}, "FR-04": {}, "FR-05": {}, "FR-06": {},
+ "FR-07": {}, "FR-08": {}, "FR-09": {}, "FR-10": {}, "FR-11": {},
+ "FR-12": {}, "FR-13": {}, "FR-14": {}, "FR-15": {}, "FR-16": {},
+ "FR-17": {}, "FR-18": {}, "FR-19": {}, "FR-20R": {}, "FR-21": {}, "FR-22": {},
+ "FR-23": {}, "FR-24": {}, "FR-25": {}, "FR-26": {}, "FR-27": {},
+ "FR-28": {}, "FR-29": {}, "FR-2A": {}, "FR-2B": {}, "FR-30": {},
+ "FR-31": {}, "FR-32": {}, "FR-33": {}, "FR-34": {}, "FR-35": {},
+ "FR-36": {}, "FR-37": {}, "FR-38": {}, "FR-39": {}, "FR-40": {},
+ "FR-41": {}, "FR-42": {}, "FR-43": {}, "FR-44": {}, "FR-45": {},
+ "FR-46": {}, "FR-47": {}, "FR-48": {}, "FR-49": {}, "FR-50": {},
+ "FR-51": {}, "FR-52": {}, "FR-53": {}, "FR-54": {}, "FR-55": {},
+ "FR-56": {}, "FR-57": {}, "FR-58": {}, "FR-59": {}, "FR-60": {},
+ "FR-61": {}, "FR-62": {}, "FR-63": {}, "FR-64": {}, "FR-65": {},
+ "FR-66": {}, "FR-67": {}, "FR-68": {}, "FR-69": {}, "FR-70": {},
+ "FR-71": {}, "FR-72": {}, "FR-73": {}, "FR-74": {}, "FR-75": {},
+ "FR-76": {}, "FR-77": {}, "FR-78": {}, "FR-79": {}, "FR-80": {},
+ "FR-81": {}, "FR-82": {}, "FR-83": {}, "FR-84": {}, "FR-85": {},
+ "FR-86": {}, "FR-87": {}, "FR-88": {}, "FR-89": {}, "FR-90": {},
+ "FR-91": {}, "FR-92": {}, "FR-93": {}, "FR-94": {}, "FR-95": {},
+ "FR-ARA": {}, "FR-BFC": {}, "FR-BL": {}, "FR-BRE": {}, "FR-COR": {},
+ "FR-CP": {}, "FR-CVL": {}, "FR-GES": {}, "FR-GF": {}, "FR-GP": {},
+ "FR-GUA": {}, "FR-HDF": {}, "FR-IDF": {}, "FR-LRE": {}, "FR-MAY": {},
+ "FR-MF": {}, "FR-MQ": {}, "FR-NAQ": {}, "FR-NC": {}, "FR-NOR": {},
+ "FR-OCC": {}, "FR-PAC": {}, "FR-PDL": {}, "FR-PF": {}, "FR-PM": {},
+ "FR-RE": {}, "FR-TF": {}, "FR-WF": {}, "FR-YT": {}, "GA-1": {},
+ "GA-2": {}, "GA-3": {}, "GA-4": {}, "GA-5": {}, "GA-6": {},
+ "GA-7": {}, "GA-8": {}, "GA-9": {}, "GB-ABC": {}, "GB-ABD": {},
+ "GB-ABE": {}, "GB-AGB": {}, "GB-AGY": {}, "GB-AND": {}, "GB-ANN": {},
+ "GB-ANS": {}, "GB-BAS": {}, "GB-BBD": {}, "GB-BDF": {}, "GB-BDG": {},
+ "GB-BEN": {}, "GB-BEX": {}, "GB-BFS": {}, "GB-BGE": {}, "GB-BGW": {},
+ "GB-BIR": {}, "GB-BKM": {}, "GB-BMH": {}, "GB-BNE": {}, "GB-BNH": {},
+ "GB-BNS": {}, "GB-BOL": {}, "GB-BPL": {}, "GB-BRC": {}, "GB-BRD": {},
+ "GB-BRY": {}, "GB-BST": {}, "GB-BUR": {}, "GB-CAM": {}, "GB-CAY": {},
+ "GB-CBF": {}, "GB-CCG": {}, "GB-CGN": {}, "GB-CHE": {}, "GB-CHW": {},
+ "GB-CLD": {}, "GB-CLK": {}, "GB-CMA": {}, "GB-CMD": {}, "GB-CMN": {},
+ "GB-CON": {}, "GB-COV": {}, "GB-CRF": {}, "GB-CRY": {}, "GB-CWY": {},
+ "GB-DAL": {}, "GB-DBY": {}, "GB-DEN": {}, "GB-DER": {}, "GB-DEV": {},
+ "GB-DGY": {}, "GB-DNC": {}, "GB-DND": {}, "GB-DOR": {}, "GB-DRS": {},
+ "GB-DUD": {}, "GB-DUR": {}, "GB-EAL": {}, "GB-EAW": {}, "GB-EAY": {},
+ "GB-EDH": {}, "GB-EDU": {}, "GB-ELN": {}, "GB-ELS": {}, "GB-ENF": {},
+ "GB-ENG": {}, "GB-ERW": {}, "GB-ERY": {}, "GB-ESS": {}, "GB-ESX": {},
+ "GB-FAL": {}, "GB-FIF": {}, "GB-FLN": {}, "GB-FMO": {}, "GB-GAT": {},
+ "GB-GBN": {}, "GB-GLG": {}, "GB-GLS": {}, "GB-GRE": {}, "GB-GWN": {},
+ "GB-HAL": {}, "GB-HAM": {}, "GB-HAV": {}, "GB-HCK": {}, "GB-HEF": {},
+ "GB-HIL": {}, "GB-HLD": {}, "GB-HMF": {}, "GB-HNS": {}, "GB-HPL": {},
+ "GB-HRT": {}, "GB-HRW": {}, "GB-HRY": {}, "GB-IOS": {}, "GB-IOW": {},
+ "GB-ISL": {}, "GB-IVC": {}, "GB-KEC": {}, "GB-KEN": {}, "GB-KHL": {},
+ "GB-KIR": {}, "GB-KTT": {}, "GB-KWL": {}, "GB-LAN": {}, "GB-LBC": {},
+ "GB-LBH": {}, "GB-LCE": {}, "GB-LDS": {}, "GB-LEC": {}, "GB-LEW": {},
+ "GB-LIN": {}, "GB-LIV": {}, "GB-LND": {}, "GB-LUT": {}, "GB-MAN": {},
+ "GB-MDB": {}, "GB-MDW": {}, "GB-MEA": {}, "GB-MIK": {}, "GD-01": {},
+ "GB-MLN": {}, "GB-MON": {}, "GB-MRT": {}, "GB-MRY": {}, "GB-MTY": {},
+ "GB-MUL": {}, "GB-NAY": {}, "GB-NBL": {}, "GB-NEL": {}, "GB-NET": {},
+ "GB-NFK": {}, "GB-NGM": {}, "GB-NIR": {}, "GB-NLK": {}, "GB-NLN": {},
+ "GB-NMD": {}, "GB-NSM": {}, "GB-NTH": {}, "GB-NTL": {}, "GB-NTT": {},
+ "GB-NTY": {}, "GB-NWM": {}, "GB-NWP": {}, "GB-NYK": {}, "GB-OLD": {},
+ "GB-ORK": {}, "GB-OXF": {}, "GB-PEM": {}, "GB-PKN": {}, "GB-PLY": {},
+ "GB-POL": {}, "GB-POR": {}, "GB-POW": {}, "GB-PTE": {}, "GB-RCC": {},
+ "GB-RCH": {}, "GB-RCT": {}, "GB-RDB": {}, "GB-RDG": {}, "GB-RFW": {},
+ "GB-RIC": {}, "GB-ROT": {}, "GB-RUT": {}, "GB-SAW": {}, "GB-SAY": {},
+ "GB-SCB": {}, "GB-SCT": {}, "GB-SFK": {}, "GB-SFT": {}, "GB-SGC": {},
+ "GB-SHF": {}, "GB-SHN": {}, "GB-SHR": {}, "GB-SKP": {}, "GB-SLF": {},
+ "GB-SLG": {}, "GB-SLK": {}, "GB-SND": {}, "GB-SOL": {}, "GB-SOM": {},
+ "GB-SOS": {}, "GB-SRY": {}, "GB-STE": {}, "GB-STG": {}, "GB-STH": {},
+ "GB-STN": {}, "GB-STS": {}, "GB-STT": {}, "GB-STY": {}, "GB-SWA": {},
+ "GB-SWD": {}, "GB-SWK": {}, "GB-TAM": {}, "GB-TFW": {}, "GB-THR": {},
+ "GB-TOB": {}, "GB-TOF": {}, "GB-TRF": {}, "GB-TWH": {}, "GB-UKM": {},
+ "GB-VGL": {}, "GB-WAR": {}, "GB-WBK": {}, "GB-WDU": {}, "GB-WFT": {},
+ "GB-WGN": {}, "GB-WIL": {}, "GB-WKF": {}, "GB-WLL": {}, "GB-WLN": {},
+ "GB-WLS": {}, "GB-WLV": {}, "GB-WND": {}, "GB-WNM": {}, "GB-WOK": {},
+ "GB-WOR": {}, "GB-WRL": {}, "GB-WRT": {}, "GB-WRX": {}, "GB-WSM": {},
+ "GB-WSX": {}, "GB-YOR": {}, "GB-ZET": {}, "GD-02": {}, "GD-03": {},
+ "GD-04": {}, "GD-05": {}, "GD-06": {}, "GD-10": {}, "GE-AB": {},
+ "GE-AJ": {}, "GE-GU": {}, "GE-IM": {}, "GE-KA": {}, "GE-KK": {},
+ "GE-MM": {}, "GE-RL": {}, "GE-SJ": {}, "GE-SK": {}, "GE-SZ": {},
+ "GE-TB": {}, "GH-AA": {}, "GH-AH": {}, "GH-AF": {}, "GH-BA": {}, "GH-BO": {}, "GH-BE": {}, "GH-CP": {},
+ "GH-EP": {}, "GH-NP": {}, "GH-TV": {}, "GH-UE": {}, "GH-UW": {},
+ "GH-WP": {}, "GL-AV": {}, "GL-KU": {}, "GL-QA": {}, "GL-QT": {}, "GL-QE": {}, "GL-SM": {},
+ "GM-B": {}, "GM-L": {}, "GM-M": {}, "GM-N": {}, "GM-U": {},
+ "GM-W": {}, "GN-B": {}, "GN-BE": {}, "GN-BF": {}, "GN-BK": {},
+ "GN-C": {}, "GN-CO": {}, "GN-D": {}, "GN-DB": {}, "GN-DI": {},
+ "GN-DL": {}, "GN-DU": {}, "GN-F": {}, "GN-FA": {}, "GN-FO": {},
+ "GN-FR": {}, "GN-GA": {}, "GN-GU": {}, "GN-K": {}, "GN-KA": {},
+ "GN-KB": {}, "GN-KD": {}, "GN-KE": {}, "GN-KN": {}, "GN-KO": {},
+ "GN-KS": {}, "GN-L": {}, "GN-LA": {}, "GN-LE": {}, "GN-LO": {},
+ "GN-M": {}, "GN-MC": {}, "GN-MD": {}, "GN-ML": {}, "GN-MM": {},
+ "GN-N": {}, "GN-NZ": {}, "GN-PI": {}, "GN-SI": {}, "GN-TE": {},
+ "GN-TO": {}, "GN-YO": {}, "GQ-AN": {}, "GQ-BN": {}, "GQ-BS": {},
+ "GQ-C": {}, "GQ-CS": {}, "GQ-I": {}, "GQ-KN": {}, "GQ-LI": {},
+ "GQ-WN": {}, "GR-01": {}, "GR-03": {}, "GR-04": {}, "GR-05": {},
+ "GR-06": {}, "GR-07": {}, "GR-11": {}, "GR-12": {}, "GR-13": {},
+ "GR-14": {}, "GR-15": {}, "GR-16": {}, "GR-17": {}, "GR-21": {},
+ "GR-22": {}, "GR-23": {}, "GR-24": {}, "GR-31": {}, "GR-32": {},
+ "GR-33": {}, "GR-34": {}, "GR-41": {}, "GR-42": {}, "GR-43": {},
+ "GR-44": {}, "GR-51": {}, "GR-52": {}, "GR-53": {}, "GR-54": {},
+ "GR-55": {}, "GR-56": {}, "GR-57": {}, "GR-58": {}, "GR-59": {},
+ "GR-61": {}, "GR-62": {}, "GR-63": {}, "GR-64": {}, "GR-69": {},
+ "GR-71": {}, "GR-72": {}, "GR-73": {}, "GR-81": {}, "GR-82": {},
+ "GR-83": {}, "GR-84": {}, "GR-85": {}, "GR-91": {}, "GR-92": {},
+ "GR-93": {}, "GR-94": {}, "GR-A": {}, "GR-A1": {}, "GR-B": {},
+ "GR-C": {}, "GR-D": {}, "GR-E": {}, "GR-F": {}, "GR-G": {},
+ "GR-H": {}, "GR-I": {}, "GR-J": {}, "GR-K": {}, "GR-L": {},
+ "GR-M": {}, "GT-01": {}, "GT-02": {}, "GT-03": {}, "GT-04": {},
+ "GT-05": {}, "GT-06": {}, "GT-07": {}, "GT-08": {}, "GT-09": {},
+ "GT-10": {}, "GT-11": {}, "GT-12": {}, "GT-13": {}, "GT-14": {},
+ "GT-15": {}, "GT-16": {}, "GT-17": {}, "GT-18": {}, "GT-19": {},
+ "GT-20": {}, "GT-21": {}, "GT-22": {}, "GW-BA": {}, "GW-BL": {},
+ "GW-BM": {}, "GW-BS": {}, "GW-CA": {}, "GW-GA": {}, "GW-L": {},
+ "GW-N": {}, "GW-OI": {}, "GW-QU": {}, "GW-S": {}, "GW-TO": {},
+ "GY-BA": {}, "GY-CU": {}, "GY-DE": {}, "GY-EB": {}, "GY-ES": {},
+ "GY-MA": {}, "GY-PM": {}, "GY-PT": {}, "GY-UD": {}, "GY-UT": {},
+ "HN-AT": {}, "HN-CH": {}, "HN-CL": {}, "HN-CM": {}, "HN-CP": {},
+ "HN-CR": {}, "HN-EP": {}, "HN-FM": {}, "HN-GD": {}, "HN-IB": {},
+ "HN-IN": {}, "HN-LE": {}, "HN-LP": {}, "HN-OC": {}, "HN-OL": {},
+ "HN-SB": {}, "HN-VA": {}, "HN-YO": {}, "HR-01": {}, "HR-02": {},
+ "HR-03": {}, "HR-04": {}, "HR-05": {}, "HR-06": {}, "HR-07": {},
+ "HR-08": {}, "HR-09": {}, "HR-10": {}, "HR-11": {}, "HR-12": {},
+ "HR-13": {}, "HR-14": {}, "HR-15": {}, "HR-16": {}, "HR-17": {},
+ "HR-18": {}, "HR-19": {}, "HR-20": {}, "HR-21": {}, "HT-AR": {},
+ "HT-CE": {}, "HT-GA": {}, "HT-ND": {}, "HT-NE": {}, "HT-NO": {}, "HT-NI": {},
+ "HT-OU": {}, "HT-SD": {}, "HT-SE": {}, "HU-BA": {}, "HU-BC": {},
+ "HU-BE": {}, "HU-BK": {}, "HU-BU": {}, "HU-BZ": {}, "HU-CS": {},
+ "HU-DE": {}, "HU-DU": {}, "HU-EG": {}, "HU-ER": {}, "HU-FE": {},
+ "HU-GS": {}, "HU-GY": {}, "HU-HB": {}, "HU-HE": {}, "HU-HV": {},
+ "HU-JN": {}, "HU-KE": {}, "HU-KM": {}, "HU-KV": {}, "HU-MI": {},
+ "HU-NK": {}, "HU-NO": {}, "HU-NY": {}, "HU-PE": {}, "HU-PS": {},
+ "HU-SD": {}, "HU-SF": {}, "HU-SH": {}, "HU-SK": {}, "HU-SN": {},
+ "HU-SO": {}, "HU-SS": {}, "HU-ST": {}, "HU-SZ": {}, "HU-TB": {},
+ "HU-TO": {}, "HU-VA": {}, "HU-VE": {}, "HU-VM": {}, "HU-ZA": {},
+ "HU-ZE": {}, "ID-AC": {}, "ID-BA": {}, "ID-BB": {}, "ID-BE": {},
+ "ID-BT": {}, "ID-GO": {}, "ID-IJ": {}, "ID-JA": {}, "ID-JB": {},
+ "ID-JI": {}, "ID-JK": {}, "ID-JT": {}, "ID-JW": {}, "ID-KA": {},
+ "ID-KB": {}, "ID-KI": {}, "ID-KU": {}, "ID-KR": {}, "ID-KS": {},
+ "ID-KT": {}, "ID-LA": {}, "ID-MA": {}, "ID-ML": {}, "ID-MU": {},
+ "ID-NB": {}, "ID-NT": {}, "ID-NU": {}, "ID-PA": {}, "ID-PB": {},
+ "ID-PE": {}, "ID-PP": {}, "ID-PS": {}, "ID-PT": {}, "ID-RI": {},
+ "ID-SA": {}, "ID-SB": {}, "ID-SG": {}, "ID-SL": {}, "ID-SM": {},
+ "ID-SN": {}, "ID-SR": {}, "ID-SS": {}, "ID-ST": {}, "ID-SU": {},
+ "ID-YO": {}, "IE-C": {}, "IE-CE": {}, "IE-CN": {}, "IE-CO": {},
+ "IE-CW": {}, "IE-D": {}, "IE-DL": {}, "IE-G": {}, "IE-KE": {},
+ "IE-KK": {}, "IE-KY": {}, "IE-L": {}, "IE-LD": {}, "IE-LH": {},
+ "IE-LK": {}, "IE-LM": {}, "IE-LS": {}, "IE-M": {}, "IE-MH": {},
+ "IE-MN": {}, "IE-MO": {}, "IE-OY": {}, "IE-RN": {}, "IE-SO": {},
+ "IE-TA": {}, "IE-U": {}, "IE-WD": {}, "IE-WH": {}, "IE-WW": {},
+ "IE-WX": {}, "IL-D": {}, "IL-HA": {}, "IL-JM": {}, "IL-M": {},
+ "IL-TA": {}, "IL-Z": {}, "IN-AN": {}, "IN-AP": {}, "IN-AR": {},
+ "IN-AS": {}, "IN-BR": {}, "IN-CH": {}, "IN-CT": {}, "IN-DH": {},
+ "IN-DL": {}, "IN-DN": {}, "IN-GA": {}, "IN-GJ": {}, "IN-HP": {},
+ "IN-HR": {}, "IN-JH": {}, "IN-JK": {}, "IN-KA": {}, "IN-KL": {},
+ "IN-LD": {}, "IN-MH": {}, "IN-ML": {}, "IN-MN": {}, "IN-MP": {},
+ "IN-MZ": {}, "IN-NL": {}, "IN-TG": {}, "IN-OR": {}, "IN-PB": {}, "IN-PY": {},
+ "IN-RJ": {}, "IN-SK": {}, "IN-TN": {}, "IN-TR": {}, "IN-UP": {},
+ "IN-UT": {}, "IN-WB": {}, "IQ-AN": {}, "IQ-AR": {}, "IQ-BA": {},
+ "IQ-BB": {}, "IQ-BG": {}, "IQ-DA": {}, "IQ-DI": {}, "IQ-DQ": {},
+ "IQ-KA": {}, "IQ-KI": {}, "IQ-MA": {}, "IQ-MU": {}, "IQ-NA": {}, "IQ-NI": {},
+ "IQ-QA": {}, "IQ-SD": {}, "IQ-SW": {}, "IQ-SU": {}, "IQ-TS": {}, "IQ-WA": {},
+ "IR-00": {}, "IR-01": {}, "IR-02": {}, "IR-03": {}, "IR-04": {}, "IR-05": {},
+ "IR-06": {}, "IR-07": {}, "IR-08": {}, "IR-09": {}, "IR-10": {}, "IR-11": {},
+ "IR-12": {}, "IR-13": {}, "IR-14": {}, "IR-15": {}, "IR-16": {},
+ "IR-17": {}, "IR-18": {}, "IR-19": {}, "IR-20": {}, "IR-21": {},
+ "IR-22": {}, "IR-23": {}, "IR-24": {}, "IR-25": {}, "IR-26": {},
+ "IR-27": {}, "IR-28": {}, "IR-29": {}, "IR-30": {}, "IR-31": {},
+ "IS-0": {}, "IS-1": {}, "IS-2": {}, "IS-3": {}, "IS-4": {},
+ "IS-5": {}, "IS-6": {}, "IS-7": {}, "IS-8": {}, "IT-21": {},
+ "IT-23": {}, "IT-25": {}, "IT-32": {}, "IT-34": {}, "IT-36": {},
+ "IT-42": {}, "IT-45": {}, "IT-52": {}, "IT-55": {}, "IT-57": {},
+ "IT-62": {}, "IT-65": {}, "IT-67": {}, "IT-72": {}, "IT-75": {},
+ "IT-77": {}, "IT-78": {}, "IT-82": {}, "IT-88": {}, "IT-AG": {},
+ "IT-AL": {}, "IT-AN": {}, "IT-AO": {}, "IT-AP": {}, "IT-AQ": {},
+ "IT-AR": {}, "IT-AT": {}, "IT-AV": {}, "IT-BA": {}, "IT-BG": {},
+ "IT-BI": {}, "IT-BL": {}, "IT-BN": {}, "IT-BO": {}, "IT-BR": {},
+ "IT-BS": {}, "IT-BT": {}, "IT-BZ": {}, "IT-CA": {}, "IT-CB": {},
+ "IT-CE": {}, "IT-CH": {}, "IT-CI": {}, "IT-CL": {}, "IT-CN": {},
+ "IT-CO": {}, "IT-CR": {}, "IT-CS": {}, "IT-CT": {}, "IT-CZ": {},
+ "IT-EN": {}, "IT-FC": {}, "IT-FE": {}, "IT-FG": {}, "IT-FI": {},
+ "IT-FM": {}, "IT-FR": {}, "IT-GE": {}, "IT-GO": {}, "IT-GR": {},
+ "IT-IM": {}, "IT-IS": {}, "IT-KR": {}, "IT-LC": {}, "IT-LE": {},
+ "IT-LI": {}, "IT-LO": {}, "IT-LT": {}, "IT-LU": {}, "IT-MB": {},
+ "IT-MC": {}, "IT-ME": {}, "IT-MI": {}, "IT-MN": {}, "IT-MO": {},
+ "IT-MS": {}, "IT-MT": {}, "IT-NA": {}, "IT-NO": {}, "IT-NU": {},
+ "IT-OG": {}, "IT-OR": {}, "IT-OT": {}, "IT-PA": {}, "IT-PC": {},
+ "IT-PD": {}, "IT-PE": {}, "IT-PG": {}, "IT-PI": {}, "IT-PN": {},
+ "IT-PO": {}, "IT-PR": {}, "IT-PT": {}, "IT-PU": {}, "IT-PV": {},
+ "IT-PZ": {}, "IT-RA": {}, "IT-RC": {}, "IT-RE": {}, "IT-RG": {},
+ "IT-RI": {}, "IT-RM": {}, "IT-RN": {}, "IT-RO": {}, "IT-SA": {},
+ "IT-SI": {}, "IT-SO": {}, "IT-SP": {}, "IT-SR": {}, "IT-SS": {},
+ "IT-SV": {}, "IT-TA": {}, "IT-TE": {}, "IT-TN": {}, "IT-TO": {},
+ "IT-TP": {}, "IT-TR": {}, "IT-TS": {}, "IT-TV": {}, "IT-UD": {},
+ "IT-VA": {}, "IT-VB": {}, "IT-VC": {}, "IT-VE": {}, "IT-VI": {},
+ "IT-VR": {}, "IT-VS": {}, "IT-VT": {}, "IT-VV": {}, "JM-01": {},
+ "JM-02": {}, "JM-03": {}, "JM-04": {}, "JM-05": {}, "JM-06": {},
+ "JM-07": {}, "JM-08": {}, "JM-09": {}, "JM-10": {}, "JM-11": {},
+ "JM-12": {}, "JM-13": {}, "JM-14": {}, "JO-AJ": {}, "JO-AM": {},
+ "JO-AQ": {}, "JO-AT": {}, "JO-AZ": {}, "JO-BA": {}, "JO-IR": {},
+ "JO-JA": {}, "JO-KA": {}, "JO-MA": {}, "JO-MD": {}, "JO-MN": {},
+ "JP-01": {}, "JP-02": {}, "JP-03": {}, "JP-04": {}, "JP-05": {},
+ "JP-06": {}, "JP-07": {}, "JP-08": {}, "JP-09": {}, "JP-10": {},
+ "JP-11": {}, "JP-12": {}, "JP-13": {}, "JP-14": {}, "JP-15": {},
+ "JP-16": {}, "JP-17": {}, "JP-18": {}, "JP-19": {}, "JP-20": {},
+ "JP-21": {}, "JP-22": {}, "JP-23": {}, "JP-24": {}, "JP-25": {},
+ "JP-26": {}, "JP-27": {}, "JP-28": {}, "JP-29": {}, "JP-30": {},
+ "JP-31": {}, "JP-32": {}, "JP-33": {}, "JP-34": {}, "JP-35": {},
+ "JP-36": {}, "JP-37": {}, "JP-38": {}, "JP-39": {}, "JP-40": {},
+ "JP-41": {}, "JP-42": {}, "JP-43": {}, "JP-44": {}, "JP-45": {},
+ "JP-46": {}, "JP-47": {}, "KE-01": {}, "KE-02": {}, "KE-03": {},
+ "KE-04": {}, "KE-05": {}, "KE-06": {}, "KE-07": {}, "KE-08": {},
+ "KE-09": {}, "KE-10": {}, "KE-11": {}, "KE-12": {}, "KE-13": {},
+ "KE-14": {}, "KE-15": {}, "KE-16": {}, "KE-17": {}, "KE-18": {},
+ "KE-19": {}, "KE-20": {}, "KE-21": {}, "KE-22": {}, "KE-23": {},
+ "KE-24": {}, "KE-25": {}, "KE-26": {}, "KE-27": {}, "KE-28": {},
+ "KE-29": {}, "KE-30": {}, "KE-31": {}, "KE-32": {}, "KE-33": {},
+ "KE-34": {}, "KE-35": {}, "KE-36": {}, "KE-37": {}, "KE-38": {},
+ "KE-39": {}, "KE-40": {}, "KE-41": {}, "KE-42": {}, "KE-43": {},
+ "KE-44": {}, "KE-45": {}, "KE-46": {}, "KE-47": {}, "KG-B": {},
+ "KG-C": {}, "KG-GB": {}, "KG-GO": {}, "KG-J": {}, "KG-N": {}, "KG-O": {},
+ "KG-T": {}, "KG-Y": {}, "KH-1": {}, "KH-10": {}, "KH-11": {},
+ "KH-12": {}, "KH-13": {}, "KH-14": {}, "KH-15": {}, "KH-16": {},
+ "KH-17": {}, "KH-18": {}, "KH-19": {}, "KH-2": {}, "KH-20": {},
+ "KH-21": {}, "KH-22": {}, "KH-23": {}, "KH-24": {}, "KH-3": {},
+ "KH-4": {}, "KH-5": {}, "KH-6": {}, "KH-7": {}, "KH-8": {},
+ "KH-9": {}, "KI-G": {}, "KI-L": {}, "KI-P": {}, "KM-A": {},
+ "KM-G": {}, "KM-M": {}, "KN-01": {}, "KN-02": {}, "KN-03": {},
+ "KN-04": {}, "KN-05": {}, "KN-06": {}, "KN-07": {}, "KN-08": {},
+ "KN-09": {}, "KN-10": {}, "KN-11": {}, "KN-12": {}, "KN-13": {},
+ "KN-15": {}, "KN-K": {}, "KN-N": {}, "KP-01": {}, "KP-02": {},
+ "KP-03": {}, "KP-04": {}, "KP-05": {}, "KP-06": {}, "KP-07": {},
+ "KP-08": {}, "KP-09": {}, "KP-10": {}, "KP-13": {}, "KR-11": {},
+ "KR-26": {}, "KR-27": {}, "KR-28": {}, "KR-29": {}, "KR-30": {},
+ "KR-31": {}, "KR-41": {}, "KR-42": {}, "KR-43": {}, "KR-44": {},
+ "KR-45": {}, "KR-46": {}, "KR-47": {}, "KR-48": {}, "KR-49": {},
+ "KW-AH": {}, "KW-FA": {}, "KW-HA": {}, "KW-JA": {}, "KW-KU": {},
+ "KW-MU": {}, "KZ-10": {}, "KZ-75": {}, "KZ-19": {}, "KZ-11": {},
+ "KZ-15": {}, "KZ-71": {}, "KZ-23": {}, "KZ-27": {}, "KZ-47": {},
+ "KZ-55": {}, "KZ-35": {}, "KZ-39": {}, "KZ-43": {}, "KZ-63": {},
+ "KZ-79": {}, "KZ-59": {}, "KZ-61": {}, "KZ-62": {}, "KZ-31": {},
+ "KZ-33": {}, "LA-AT": {}, "LA-BK": {}, "LA-BL": {},
+ "LA-CH": {}, "LA-HO": {}, "LA-KH": {}, "LA-LM": {}, "LA-LP": {},
+ "LA-OU": {}, "LA-PH": {}, "LA-SL": {}, "LA-SV": {}, "LA-VI": {},
+ "LA-VT": {}, "LA-XA": {}, "LA-XE": {}, "LA-XI": {}, "LA-XS": {},
+ "LB-AK": {}, "LB-AS": {}, "LB-BA": {}, "LB-BH": {}, "LB-BI": {},
+ "LB-JA": {}, "LB-JL": {}, "LB-NA": {}, "LC-01": {}, "LC-02": {},
+ "LC-03": {}, "LC-05": {}, "LC-06": {}, "LC-07": {}, "LC-08": {},
+ "LC-10": {}, "LC-11": {}, "LI-01": {}, "LI-02": {},
+ "LI-03": {}, "LI-04": {}, "LI-05": {}, "LI-06": {}, "LI-07": {},
+ "LI-08": {}, "LI-09": {}, "LI-10": {}, "LI-11": {}, "LK-1": {},
+ "LK-11": {}, "LK-12": {}, "LK-13": {}, "LK-2": {}, "LK-21": {},
+ "LK-22": {}, "LK-23": {}, "LK-3": {}, "LK-31": {}, "LK-32": {},
+ "LK-33": {}, "LK-4": {}, "LK-41": {}, "LK-42": {}, "LK-43": {},
+ "LK-44": {}, "LK-45": {}, "LK-5": {}, "LK-51": {}, "LK-52": {},
+ "LK-53": {}, "LK-6": {}, "LK-61": {}, "LK-62": {}, "LK-7": {},
+ "LK-71": {}, "LK-72": {}, "LK-8": {}, "LK-81": {}, "LK-82": {},
+ "LK-9": {}, "LK-91": {}, "LK-92": {}, "LR-BG": {}, "LR-BM": {},
+ "LR-CM": {}, "LR-GB": {}, "LR-GG": {}, "LR-GK": {}, "LR-LO": {},
+ "LR-MG": {}, "LR-MO": {}, "LR-MY": {}, "LR-NI": {}, "LR-RI": {},
+ "LR-SI": {}, "LS-A": {}, "LS-B": {}, "LS-C": {}, "LS-D": {},
+ "LS-E": {}, "LS-F": {}, "LS-G": {}, "LS-H": {}, "LS-J": {},
+ "LS-K": {}, "LT-AL": {}, "LT-KL": {}, "LT-KU": {}, "LT-MR": {},
+ "LT-PN": {}, "LT-SA": {}, "LT-TA": {}, "LT-TE": {}, "LT-UT": {},
+ "LT-VL": {}, "LU-CA": {}, "LU-CL": {}, "LU-DI": {}, "LU-EC": {},
+ "LU-ES": {}, "LU-GR": {}, "LU-LU": {}, "LU-ME": {}, "LU-RD": {},
+ "LU-RM": {}, "LU-VD": {}, "LU-WI": {}, "LU-D": {}, "LU-G": {}, "LU-L": {},
+ "LV-001": {}, "LV-111": {}, "LV-112": {}, "LV-113": {},
+ "LV-002": {}, "LV-003": {}, "LV-004": {}, "LV-005": {}, "LV-006": {},
+ "LV-007": {}, "LV-008": {}, "LV-009": {}, "LV-010": {}, "LV-011": {},
+ "LV-012": {}, "LV-013": {}, "LV-014": {}, "LV-015": {}, "LV-016": {},
+ "LV-017": {}, "LV-018": {}, "LV-019": {}, "LV-020": {}, "LV-021": {},
+ "LV-022": {}, "LV-023": {}, "LV-024": {}, "LV-025": {}, "LV-026": {},
+ "LV-027": {}, "LV-028": {}, "LV-029": {}, "LV-030": {}, "LV-031": {},
+ "LV-032": {}, "LV-033": {}, "LV-034": {}, "LV-035": {}, "LV-036": {},
+ "LV-037": {}, "LV-038": {}, "LV-039": {}, "LV-040": {}, "LV-041": {},
+ "LV-042": {}, "LV-043": {}, "LV-044": {}, "LV-045": {}, "LV-046": {},
+ "LV-047": {}, "LV-048": {}, "LV-049": {}, "LV-050": {}, "LV-051": {},
+ "LV-052": {}, "LV-053": {}, "LV-054": {}, "LV-055": {}, "LV-056": {},
+ "LV-057": {}, "LV-058": {}, "LV-059": {}, "LV-060": {}, "LV-061": {},
+ "LV-062": {}, "LV-063": {}, "LV-064": {}, "LV-065": {}, "LV-066": {},
+ "LV-067": {}, "LV-068": {}, "LV-069": {}, "LV-070": {}, "LV-071": {},
+ "LV-072": {}, "LV-073": {}, "LV-074": {}, "LV-075": {}, "LV-076": {},
+ "LV-077": {}, "LV-078": {}, "LV-079": {}, "LV-080": {}, "LV-081": {},
+ "LV-082": {}, "LV-083": {}, "LV-084": {}, "LV-085": {}, "LV-086": {},
+ "LV-087": {}, "LV-088": {}, "LV-089": {}, "LV-090": {}, "LV-091": {},
+ "LV-092": {}, "LV-093": {}, "LV-094": {}, "LV-095": {}, "LV-096": {},
+ "LV-097": {}, "LV-098": {}, "LV-099": {}, "LV-100": {}, "LV-101": {},
+ "LV-102": {}, "LV-103": {}, "LV-104": {}, "LV-105": {}, "LV-106": {},
+ "LV-107": {}, "LV-108": {}, "LV-109": {}, "LV-110": {}, "LV-DGV": {},
+ "LV-JEL": {}, "LV-JKB": {}, "LV-JUR": {}, "LV-LPX": {}, "LV-REZ": {},
+ "LV-RIX": {}, "LV-VEN": {}, "LV-VMR": {}, "LY-BA": {}, "LY-BU": {},
+ "LY-DR": {}, "LY-GT": {}, "LY-JA": {}, "LY-JB": {}, "LY-JG": {},
+ "LY-JI": {}, "LY-JU": {}, "LY-KF": {}, "LY-MB": {}, "LY-MI": {},
+ "LY-MJ": {}, "LY-MQ": {}, "LY-NL": {}, "LY-NQ": {}, "LY-SB": {},
+ "LY-SR": {}, "LY-TB": {}, "LY-WA": {}, "LY-WD": {}, "LY-WS": {},
+ "LY-ZA": {}, "MA-01": {}, "MA-02": {}, "MA-03": {}, "MA-04": {},
+ "MA-05": {}, "MA-06": {}, "MA-07": {}, "MA-08": {}, "MA-09": {},
+ "MA-10": {}, "MA-11": {}, "MA-12": {}, "MA-13": {}, "MA-14": {},
+ "MA-15": {}, "MA-16": {}, "MA-AGD": {}, "MA-AOU": {}, "MA-ASZ": {},
+ "MA-AZI": {}, "MA-BEM": {}, "MA-BER": {}, "MA-BES": {}, "MA-BOD": {},
+ "MA-BOM": {}, "MA-CAS": {}, "MA-CHE": {}, "MA-CHI": {}, "MA-CHT": {},
+ "MA-ERR": {}, "MA-ESI": {}, "MA-ESM": {}, "MA-FAH": {}, "MA-FES": {},
+ "MA-FIG": {}, "MA-GUE": {}, "MA-HAJ": {}, "MA-HAO": {}, "MA-HOC": {},
+ "MA-IFR": {}, "MA-INE": {}, "MA-JDI": {}, "MA-JRA": {}, "MA-KEN": {},
+ "MA-KES": {}, "MA-KHE": {}, "MA-KHN": {}, "MA-KHO": {}, "MA-LAA": {},
+ "MA-LAR": {}, "MA-MED": {}, "MA-MEK": {}, "MA-MMD": {}, "MA-MMN": {},
+ "MA-MOH": {}, "MA-MOU": {}, "MA-NAD": {}, "MA-NOU": {}, "MA-OUA": {},
+ "MA-OUD": {}, "MA-OUJ": {}, "MA-RAB": {}, "MA-SAF": {}, "MA-SAL": {},
+ "MA-SEF": {}, "MA-SET": {}, "MA-SIK": {}, "MA-SKH": {}, "MA-SYB": {},
+ "MA-TAI": {}, "MA-TAO": {}, "MA-TAR": {}, "MA-TAT": {}, "MA-TAZ": {},
+ "MA-TET": {}, "MA-TIZ": {}, "MA-TNG": {}, "MA-TNT": {}, "MA-ZAG": {},
+ "MC-CL": {}, "MC-CO": {}, "MC-FO": {}, "MC-GA": {}, "MC-JE": {},
+ "MC-LA": {}, "MC-MA": {}, "MC-MC": {}, "MC-MG": {}, "MC-MO": {},
+ "MC-MU": {}, "MC-PH": {}, "MC-SD": {}, "MC-SO": {}, "MC-SP": {},
+ "MC-SR": {}, "MC-VR": {}, "MD-AN": {}, "MD-BA": {}, "MD-BD": {},
+ "MD-BR": {}, "MD-BS": {}, "MD-CA": {}, "MD-CL": {}, "MD-CM": {},
+ "MD-CR": {}, "MD-CS": {}, "MD-CT": {}, "MD-CU": {}, "MD-DO": {},
+ "MD-DR": {}, "MD-DU": {}, "MD-ED": {}, "MD-FA": {}, "MD-FL": {},
+ "MD-GA": {}, "MD-GL": {}, "MD-HI": {}, "MD-IA": {}, "MD-LE": {},
+ "MD-NI": {}, "MD-OC": {}, "MD-OR": {}, "MD-RE": {}, "MD-RI": {},
+ "MD-SD": {}, "MD-SI": {}, "MD-SN": {}, "MD-SO": {}, "MD-ST": {},
+ "MD-SV": {}, "MD-TA": {}, "MD-TE": {}, "MD-UN": {}, "ME-01": {},
+ "ME-02": {}, "ME-03": {}, "ME-04": {}, "ME-05": {}, "ME-06": {},
+ "ME-07": {}, "ME-08": {}, "ME-09": {}, "ME-10": {}, "ME-11": {},
+ "ME-12": {}, "ME-13": {}, "ME-14": {}, "ME-15": {}, "ME-16": {},
+ "ME-17": {}, "ME-18": {}, "ME-19": {}, "ME-20": {}, "ME-21": {}, "ME-24": {},
+ "MG-A": {}, "MG-D": {}, "MG-F": {}, "MG-M": {}, "MG-T": {},
+ "MG-U": {}, "MH-ALK": {}, "MH-ALL": {}, "MH-ARN": {}, "MH-AUR": {},
+ "MH-EBO": {}, "MH-ENI": {}, "MH-JAB": {}, "MH-JAL": {}, "MH-KIL": {},
+ "MH-KWA": {}, "MH-L": {}, "MH-LAE": {}, "MH-LIB": {}, "MH-LIK": {},
+ "MH-MAJ": {}, "MH-MAL": {}, "MH-MEJ": {}, "MH-MIL": {}, "MH-NMK": {},
+ "MH-NMU": {}, "MH-RON": {}, "MH-T": {}, "MH-UJA": {}, "MH-UTI": {},
+ "MH-WTJ": {}, "MH-WTN": {}, "MK-101": {}, "MK-102": {}, "MK-103": {},
+ "MK-104": {}, "MK-105": {},
+ "MK-106": {}, "MK-107": {}, "MK-108": {}, "MK-109": {}, "MK-201": {},
+ "MK-202": {}, "MK-205": {}, "MK-206": {}, "MK-207": {}, "MK-208": {},
+ "MK-209": {}, "MK-210": {}, "MK-211": {}, "MK-301": {}, "MK-303": {},
+ "MK-307": {}, "MK-308": {}, "MK-310": {}, "MK-311": {}, "MK-312": {},
+ "MK-401": {}, "MK-402": {}, "MK-403": {}, "MK-404": {}, "MK-405": {},
+ "MK-406": {}, "MK-408": {}, "MK-409": {}, "MK-410": {}, "MK-501": {},
+ "MK-502": {}, "MK-503": {}, "MK-505": {}, "MK-506": {}, "MK-507": {},
+ "MK-508": {}, "MK-509": {}, "MK-601": {}, "MK-602": {}, "MK-604": {},
+ "MK-605": {}, "MK-606": {}, "MK-607": {}, "MK-608": {}, "MK-609": {},
+ "MK-701": {}, "MK-702": {}, "MK-703": {}, "MK-704": {}, "MK-705": {},
+ "MK-803": {}, "MK-804": {}, "MK-806": {}, "MK-807": {}, "MK-809": {},
+ "MK-810": {}, "MK-811": {}, "MK-812": {}, "MK-813": {}, "MK-814": {},
+ "MK-816": {}, "ML-1": {}, "ML-2": {}, "ML-3": {}, "ML-4": {},
+ "ML-5": {}, "ML-6": {}, "ML-7": {}, "ML-8": {}, "ML-BKO": {},
+ "MM-01": {}, "MM-02": {}, "MM-03": {}, "MM-04": {}, "MM-05": {},
+ "MM-06": {}, "MM-07": {}, "MM-11": {}, "MM-12": {}, "MM-13": {},
+ "MM-14": {}, "MM-15": {}, "MM-16": {}, "MM-17": {}, "MM-18": {}, "MN-035": {},
+ "MN-037": {}, "MN-039": {}, "MN-041": {}, "MN-043": {}, "MN-046": {},
+ "MN-047": {}, "MN-049": {}, "MN-051": {}, "MN-053": {}, "MN-055": {},
+ "MN-057": {}, "MN-059": {}, "MN-061": {}, "MN-063": {}, "MN-064": {},
+ "MN-065": {}, "MN-067": {}, "MN-069": {}, "MN-071": {}, "MN-073": {},
+ "MN-1": {}, "MR-01": {}, "MR-02": {}, "MR-03": {}, "MR-04": {},
+ "MR-05": {}, "MR-06": {}, "MR-07": {}, "MR-08": {}, "MR-09": {},
+ "MR-10": {}, "MR-11": {}, "MR-12": {}, "MR-13": {}, "MR-NKC": {}, "MT-01": {},
+ "MT-02": {}, "MT-03": {}, "MT-04": {}, "MT-05": {}, "MT-06": {},
+ "MT-07": {}, "MT-08": {}, "MT-09": {}, "MT-10": {}, "MT-11": {},
+ "MT-12": {}, "MT-13": {}, "MT-14": {}, "MT-15": {}, "MT-16": {},
+ "MT-17": {}, "MT-18": {}, "MT-19": {}, "MT-20": {}, "MT-21": {},
+ "MT-22": {}, "MT-23": {}, "MT-24": {}, "MT-25": {}, "MT-26": {},
+ "MT-27": {}, "MT-28": {}, "MT-29": {}, "MT-30": {}, "MT-31": {},
+ "MT-32": {}, "MT-33": {}, "MT-34": {}, "MT-35": {}, "MT-36": {},
+ "MT-37": {}, "MT-38": {}, "MT-39": {}, "MT-40": {}, "MT-41": {},
+ "MT-42": {}, "MT-43": {}, "MT-44": {}, "MT-45": {}, "MT-46": {},
+ "MT-47": {}, "MT-48": {}, "MT-49": {}, "MT-50": {}, "MT-51": {},
+ "MT-52": {}, "MT-53": {}, "MT-54": {}, "MT-55": {}, "MT-56": {},
+ "MT-57": {}, "MT-58": {}, "MT-59": {}, "MT-60": {}, "MT-61": {},
+ "MT-62": {}, "MT-63": {}, "MT-64": {}, "MT-65": {}, "MT-66": {},
+ "MT-67": {}, "MT-68": {}, "MU-AG": {}, "MU-BL": {}, "MU-BR": {},
+ "MU-CC": {}, "MU-CU": {}, "MU-FL": {}, "MU-GP": {}, "MU-MO": {},
+ "MU-PA": {}, "MU-PL": {}, "MU-PU": {}, "MU-PW": {}, "MU-QB": {},
+ "MU-RO": {}, "MU-RP": {}, "MU-RR": {}, "MU-SA": {}, "MU-VP": {}, "MV-00": {},
+ "MV-01": {}, "MV-02": {}, "MV-03": {}, "MV-04": {}, "MV-05": {},
+ "MV-07": {}, "MV-08": {}, "MV-12": {}, "MV-13": {}, "MV-14": {},
+ "MV-17": {}, "MV-20": {}, "MV-23": {}, "MV-24": {}, "MV-25": {},
+ "MV-26": {}, "MV-27": {}, "MV-28": {}, "MV-29": {}, "MV-CE": {},
+ "MV-MLE": {}, "MV-NC": {}, "MV-NO": {}, "MV-SC": {}, "MV-SU": {},
+ "MV-UN": {}, "MV-US": {}, "MW-BA": {}, "MW-BL": {}, "MW-C": {},
+ "MW-CK": {}, "MW-CR": {}, "MW-CT": {}, "MW-DE": {}, "MW-DO": {},
+ "MW-KR": {}, "MW-KS": {}, "MW-LI": {}, "MW-LK": {}, "MW-MC": {},
+ "MW-MG": {}, "MW-MH": {}, "MW-MU": {}, "MW-MW": {}, "MW-MZ": {},
+ "MW-N": {}, "MW-NB": {}, "MW-NE": {}, "MW-NI": {}, "MW-NK": {},
+ "MW-NS": {}, "MW-NU": {}, "MW-PH": {}, "MW-RU": {}, "MW-S": {},
+ "MW-SA": {}, "MW-TH": {}, "MW-ZO": {}, "MX-AGU": {}, "MX-BCN": {},
+ "MX-BCS": {}, "MX-CAM": {}, "MX-CHH": {}, "MX-CHP": {}, "MX-COA": {},
+ "MX-COL": {}, "MX-CMX": {}, "MX-DIF": {}, "MX-DUR": {}, "MX-GRO": {}, "MX-GUA": {},
+ "MX-HID": {}, "MX-JAL": {}, "MX-MEX": {}, "MX-MIC": {}, "MX-MOR": {},
+ "MX-NAY": {}, "MX-NLE": {}, "MX-OAX": {}, "MX-PUE": {}, "MX-QUE": {},
+ "MX-ROO": {}, "MX-SIN": {}, "MX-SLP": {}, "MX-SON": {}, "MX-TAB": {},
+ "MX-TAM": {}, "MX-TLA": {}, "MX-VER": {}, "MX-YUC": {}, "MX-ZAC": {},
+ "MY-01": {}, "MY-02": {}, "MY-03": {}, "MY-04": {}, "MY-05": {},
+ "MY-06": {}, "MY-07": {}, "MY-08": {}, "MY-09": {}, "MY-10": {},
+ "MY-11": {}, "MY-12": {}, "MY-13": {}, "MY-14": {}, "MY-15": {},
+ "MY-16": {}, "MZ-A": {}, "MZ-B": {}, "MZ-G": {}, "MZ-I": {},
+ "MZ-L": {}, "MZ-MPM": {}, "MZ-N": {}, "MZ-P": {}, "MZ-Q": {},
+ "MZ-S": {}, "MZ-T": {}, "NA-CA": {}, "NA-ER": {}, "NA-HA": {},
+ "NA-KA": {}, "NA-KE": {}, "NA-KH": {}, "NA-KU": {}, "NA-KW": {}, "NA-OD": {}, "NA-OH": {},
+ "NA-OK": {}, "NA-ON": {}, "NA-OS": {}, "NA-OT": {}, "NA-OW": {},
+ "NE-1": {}, "NE-2": {}, "NE-3": {}, "NE-4": {}, "NE-5": {},
+ "NE-6": {}, "NE-7": {}, "NE-8": {}, "NG-AB": {}, "NG-AD": {},
+ "NG-AK": {}, "NG-AN": {}, "NG-BA": {}, "NG-BE": {}, "NG-BO": {},
+ "NG-BY": {}, "NG-CR": {}, "NG-DE": {}, "NG-EB": {}, "NG-ED": {},
+ "NG-EK": {}, "NG-EN": {}, "NG-FC": {}, "NG-GO": {}, "NG-IM": {},
+ "NG-JI": {}, "NG-KD": {}, "NG-KE": {}, "NG-KN": {}, "NG-KO": {},
+ "NG-KT": {}, "NG-KW": {}, "NG-LA": {}, "NG-NA": {}, "NG-NI": {},
+ "NG-OG": {}, "NG-ON": {}, "NG-OS": {}, "NG-OY": {}, "NG-PL": {},
+ "NG-RI": {}, "NG-SO": {}, "NG-TA": {}, "NG-YO": {}, "NG-ZA": {},
+ "NI-AN": {}, "NI-AS": {}, "NI-BO": {}, "NI-CA": {}, "NI-CI": {},
+ "NI-CO": {}, "NI-ES": {}, "NI-GR": {}, "NI-JI": {}, "NI-LE": {},
+ "NI-MD": {}, "NI-MN": {}, "NI-MS": {}, "NI-MT": {}, "NI-NS": {},
+ "NI-RI": {}, "NI-SJ": {}, "NL-AW": {}, "NL-BQ1": {}, "NL-BQ2": {},
+ "NL-BQ3": {}, "NL-CW": {}, "NL-DR": {}, "NL-FL": {}, "NL-FR": {},
+ "NL-GE": {}, "NL-GR": {}, "NL-LI": {}, "NL-NB": {}, "NL-NH": {},
+ "NL-OV": {}, "NL-SX": {}, "NL-UT": {}, "NL-ZE": {}, "NL-ZH": {},
+ "NO-03": {}, "NO-11": {}, "NO-15": {}, "NO-16": {}, "NO-17": {},
+ "NO-18": {}, "NO-21": {}, "NO-30": {}, "NO-34": {}, "NO-38": {},
+ "NO-42": {}, "NO-46": {}, "NO-50": {}, "NO-54": {},
+ "NO-22": {}, "NP-1": {}, "NP-2": {}, "NP-3": {}, "NP-4": {},
+ "NP-5": {}, "NP-BA": {}, "NP-BH": {}, "NP-DH": {}, "NP-GA": {},
+ "NP-JA": {}, "NP-KA": {}, "NP-KO": {}, "NP-LU": {}, "NP-MA": {},
+ "NP-ME": {}, "NP-NA": {}, "NP-RA": {}, "NP-SA": {}, "NP-SE": {},
+ "NR-01": {}, "NR-02": {}, "NR-03": {}, "NR-04": {}, "NR-05": {},
+ "NR-06": {}, "NR-07": {}, "NR-08": {}, "NR-09": {}, "NR-10": {},
+ "NR-11": {}, "NR-12": {}, "NR-13": {}, "NR-14": {}, "NZ-AUK": {},
+ "NZ-BOP": {}, "NZ-CAN": {}, "NZ-CIT": {}, "NZ-GIS": {}, "NZ-HKB": {},
+ "NZ-MBH": {}, "NZ-MWT": {}, "NZ-N": {}, "NZ-NSN": {}, "NZ-NTL": {},
+ "NZ-OTA": {}, "NZ-S": {}, "NZ-STL": {}, "NZ-TAS": {}, "NZ-TKI": {},
+ "NZ-WGN": {}, "NZ-WKO": {}, "NZ-WTC": {}, "OM-BA": {}, "OM-BS": {}, "OM-BU": {}, "OM-BJ": {},
+ "OM-DA": {}, "OM-MA": {}, "OM-MU": {}, "OM-SH": {}, "OM-SJ": {}, "OM-SS": {}, "OM-WU": {},
+ "OM-ZA": {}, "OM-ZU": {}, "PA-1": {}, "PA-2": {}, "PA-3": {},
+ "PA-4": {}, "PA-5": {}, "PA-6": {}, "PA-7": {}, "PA-8": {},
+ "PA-9": {}, "PA-EM": {}, "PA-KY": {}, "PA-NB": {}, "PE-AMA": {},
+ "PE-ANC": {}, "PE-APU": {}, "PE-ARE": {}, "PE-AYA": {}, "PE-CAJ": {},
+ "PE-CAL": {}, "PE-CUS": {}, "PE-HUC": {}, "PE-HUV": {}, "PE-ICA": {},
+ "PE-JUN": {}, "PE-LAL": {}, "PE-LAM": {}, "PE-LIM": {}, "PE-LMA": {},
+ "PE-LOR": {}, "PE-MDD": {}, "PE-MOQ": {}, "PE-PAS": {}, "PE-PIU": {},
+ "PE-PUN": {}, "PE-SAM": {}, "PE-TAC": {}, "PE-TUM": {}, "PE-UCA": {},
+ "PG-CPK": {}, "PG-CPM": {}, "PG-EBR": {}, "PG-EHG": {}, "PG-EPW": {},
+ "PG-ESW": {}, "PG-GPK": {}, "PG-MBA": {}, "PG-MPL": {}, "PG-MPM": {},
+ "PG-MRL": {}, "PG-NCD": {}, "PG-NIK": {}, "PG-NPP": {}, "PG-NSB": {},
+ "PG-SAN": {}, "PG-SHM": {}, "PG-WBK": {}, "PG-WHM": {}, "PG-WPD": {},
+ "PH-00": {}, "PH-01": {}, "PH-02": {}, "PH-03": {}, "PH-05": {},
+ "PH-06": {}, "PH-07": {}, "PH-08": {}, "PH-09": {}, "PH-10": {},
+ "PH-11": {}, "PH-12": {}, "PH-13": {}, "PH-14": {}, "PH-15": {},
+ "PH-40": {}, "PH-41": {}, "PH-ABR": {}, "PH-AGN": {}, "PH-AGS": {},
+ "PH-AKL": {}, "PH-ALB": {}, "PH-ANT": {}, "PH-APA": {}, "PH-AUR": {},
+ "PH-BAN": {}, "PH-BAS": {}, "PH-BEN": {}, "PH-BIL": {}, "PH-BOH": {},
+ "PH-BTG": {}, "PH-BTN": {}, "PH-BUK": {}, "PH-BUL": {}, "PH-CAG": {},
+ "PH-CAM": {}, "PH-CAN": {}, "PH-CAP": {}, "PH-CAS": {}, "PH-CAT": {},
+ "PH-CAV": {}, "PH-CEB": {}, "PH-COM": {}, "PH-DAO": {}, "PH-DAS": {},
+ "PH-DAV": {}, "PH-DIN": {}, "PH-EAS": {}, "PH-GUI": {}, "PH-IFU": {},
+ "PH-ILI": {}, "PH-ILN": {}, "PH-ILS": {}, "PH-ISA": {}, "PH-KAL": {},
+ "PH-LAG": {}, "PH-LAN": {}, "PH-LAS": {}, "PH-LEY": {}, "PH-LUN": {},
+ "PH-MAD": {}, "PH-MAG": {}, "PH-MAS": {}, "PH-MDC": {}, "PH-MDR": {},
+ "PH-MOU": {}, "PH-MSC": {}, "PH-MSR": {}, "PH-NCO": {}, "PH-NEC": {},
+ "PH-NER": {}, "PH-NSA": {}, "PH-NUE": {}, "PH-NUV": {}, "PH-PAM": {},
+ "PH-PAN": {}, "PH-PLW": {}, "PH-QUE": {}, "PH-QUI": {}, "PH-RIZ": {},
+ "PH-ROM": {}, "PH-SAR": {}, "PH-SCO": {}, "PH-SIG": {}, "PH-SLE": {},
+ "PH-SLU": {}, "PH-SOR": {}, "PH-SUK": {}, "PH-SUN": {}, "PH-SUR": {},
+ "PH-TAR": {}, "PH-TAW": {}, "PH-WSA": {}, "PH-ZAN": {}, "PH-ZAS": {},
+ "PH-ZMB": {}, "PH-ZSI": {}, "PK-BA": {}, "PK-GB": {}, "PK-IS": {},
+ "PK-JK": {}, "PK-KP": {}, "PK-PB": {}, "PK-SD": {}, "PK-TA": {},
+ "PL-02": {}, "PL-04": {}, "PL-06": {}, "PL-08": {}, "PL-10": {},
+ "PL-12": {}, "PL-14": {}, "PL-16": {}, "PL-18": {}, "PL-20": {},
+ "PL-22": {}, "PL-24": {}, "PL-26": {}, "PL-28": {}, "PL-30": {}, "PL-32": {},
+ "PS-BTH": {}, "PS-DEB": {}, "PS-GZA": {}, "PS-HBN": {},
+ "PS-JEM": {}, "PS-JEN": {}, "PS-JRH": {}, "PS-KYS": {}, "PS-NBS": {},
+ "PS-NGZ": {}, "PS-QQA": {}, "PS-RBH": {}, "PS-RFH": {}, "PS-SLT": {},
+ "PS-TBS": {}, "PS-TKM": {}, "PT-01": {}, "PT-02": {}, "PT-03": {},
+ "PT-04": {}, "PT-05": {}, "PT-06": {}, "PT-07": {}, "PT-08": {},
+ "PT-09": {}, "PT-10": {}, "PT-11": {}, "PT-12": {}, "PT-13": {},
+ "PT-14": {}, "PT-15": {}, "PT-16": {}, "PT-17": {}, "PT-18": {},
+ "PT-20": {}, "PT-30": {}, "PW-002": {}, "PW-004": {}, "PW-010": {},
+ "PW-050": {}, "PW-100": {}, "PW-150": {}, "PW-212": {}, "PW-214": {},
+ "PW-218": {}, "PW-222": {}, "PW-224": {}, "PW-226": {}, "PW-227": {},
+ "PW-228": {}, "PW-350": {}, "PW-370": {}, "PY-1": {}, "PY-10": {},
+ "PY-11": {}, "PY-12": {}, "PY-13": {}, "PY-14": {}, "PY-15": {},
+ "PY-16": {}, "PY-19": {}, "PY-2": {}, "PY-3": {}, "PY-4": {},
+ "PY-5": {}, "PY-6": {}, "PY-7": {}, "PY-8": {}, "PY-9": {},
+ "PY-ASU": {}, "QA-DA": {}, "QA-KH": {}, "QA-MS": {}, "QA-RA": {},
+ "QA-US": {}, "QA-WA": {}, "QA-ZA": {}, "RO-AB": {}, "RO-AG": {},
+ "RO-AR": {}, "RO-B": {}, "RO-BC": {}, "RO-BH": {}, "RO-BN": {},
+ "RO-BR": {}, "RO-BT": {}, "RO-BV": {}, "RO-BZ": {}, "RO-CJ": {},
+ "RO-CL": {}, "RO-CS": {}, "RO-CT": {}, "RO-CV": {}, "RO-DB": {},
+ "RO-DJ": {}, "RO-GJ": {}, "RO-GL": {}, "RO-GR": {}, "RO-HD": {},
+ "RO-HR": {}, "RO-IF": {}, "RO-IL": {}, "RO-IS": {}, "RO-MH": {},
+ "RO-MM": {}, "RO-MS": {}, "RO-NT": {}, "RO-OT": {}, "RO-PH": {},
+ "RO-SB": {}, "RO-SJ": {}, "RO-SM": {}, "RO-SV": {}, "RO-TL": {},
+ "RO-TM": {}, "RO-TR": {}, "RO-VL": {}, "RO-VN": {}, "RO-VS": {},
+ "RS-00": {}, "RS-01": {}, "RS-02": {}, "RS-03": {}, "RS-04": {},
+ "RS-05": {}, "RS-06": {}, "RS-07": {}, "RS-08": {}, "RS-09": {},
+ "RS-10": {}, "RS-11": {}, "RS-12": {}, "RS-13": {}, "RS-14": {},
+ "RS-15": {}, "RS-16": {}, "RS-17": {}, "RS-18": {}, "RS-19": {},
+ "RS-20": {}, "RS-21": {}, "RS-22": {}, "RS-23": {}, "RS-24": {},
+ "RS-25": {}, "RS-26": {}, "RS-27": {}, "RS-28": {}, "RS-29": {},
+ "RS-KM": {}, "RS-VO": {}, "RU-AD": {}, "RU-AL": {}, "RU-ALT": {},
+ "RU-AMU": {}, "RU-ARK": {}, "RU-AST": {}, "RU-BA": {}, "RU-BEL": {},
+ "RU-BRY": {}, "RU-BU": {}, "RU-CE": {}, "RU-CHE": {}, "RU-CHU": {},
+ "RU-CU": {}, "RU-DA": {}, "RU-IN": {}, "RU-IRK": {}, "RU-IVA": {},
+ "RU-KAM": {}, "RU-KB": {}, "RU-KC": {}, "RU-KDA": {}, "RU-KEM": {},
+ "RU-KGD": {}, "RU-KGN": {}, "RU-KHA": {}, "RU-KHM": {}, "RU-KIR": {},
+ "RU-KK": {}, "RU-KL": {}, "RU-KLU": {}, "RU-KO": {}, "RU-KOS": {},
+ "RU-KR": {}, "RU-KRS": {}, "RU-KYA": {}, "RU-LEN": {}, "RU-LIP": {},
+ "RU-MAG": {}, "RU-ME": {}, "RU-MO": {}, "RU-MOS": {}, "RU-MOW": {},
+ "RU-MUR": {}, "RU-NEN": {}, "RU-NGR": {}, "RU-NIZ": {}, "RU-NVS": {},
+ "RU-OMS": {}, "RU-ORE": {}, "RU-ORL": {}, "RU-PER": {}, "RU-PNZ": {},
+ "RU-PRI": {}, "RU-PSK": {}, "RU-ROS": {}, "RU-RYA": {}, "RU-SA": {},
+ "RU-SAK": {}, "RU-SAM": {}, "RU-SAR": {}, "RU-SE": {}, "RU-SMO": {},
+ "RU-SPE": {}, "RU-STA": {}, "RU-SVE": {}, "RU-TA": {}, "RU-TAM": {},
+ "RU-TOM": {}, "RU-TUL": {}, "RU-TVE": {}, "RU-TY": {}, "RU-TYU": {},
+ "RU-UD": {}, "RU-ULY": {}, "RU-VGG": {}, "RU-VLA": {}, "RU-VLG": {},
+ "RU-VOR": {}, "RU-YAN": {}, "RU-YAR": {}, "RU-YEV": {}, "RU-ZAB": {},
+ "RW-01": {}, "RW-02": {}, "RW-03": {}, "RW-04": {}, "RW-05": {},
+ "SA-01": {}, "SA-02": {}, "SA-03": {}, "SA-04": {}, "SA-05": {},
+ "SA-06": {}, "SA-07": {}, "SA-08": {}, "SA-09": {}, "SA-10": {},
+ "SA-11": {}, "SA-12": {}, "SA-14": {}, "SB-CE": {}, "SB-CH": {},
+ "SB-CT": {}, "SB-GU": {}, "SB-IS": {}, "SB-MK": {}, "SB-ML": {},
+ "SB-RB": {}, "SB-TE": {}, "SB-WE": {}, "SC-01": {}, "SC-02": {},
+ "SC-03": {}, "SC-04": {}, "SC-05": {}, "SC-06": {}, "SC-07": {},
+ "SC-08": {}, "SC-09": {}, "SC-10": {}, "SC-11": {}, "SC-12": {},
+ "SC-13": {}, "SC-14": {}, "SC-15": {}, "SC-16": {}, "SC-17": {},
+ "SC-18": {}, "SC-19": {}, "SC-20": {}, "SC-21": {}, "SC-22": {},
+ "SC-23": {}, "SC-24": {}, "SC-25": {}, "SD-DC": {}, "SD-DE": {},
+ "SD-DN": {}, "SD-DS": {}, "SD-DW": {}, "SD-GD": {}, "SD-GK": {}, "SD-GZ": {},
+ "SD-KA": {}, "SD-KH": {}, "SD-KN": {}, "SD-KS": {}, "SD-NB": {},
+ "SD-NO": {}, "SD-NR": {}, "SD-NW": {}, "SD-RS": {}, "SD-SI": {},
+ "SE-AB": {}, "SE-AC": {}, "SE-BD": {}, "SE-C": {}, "SE-D": {},
+ "SE-E": {}, "SE-F": {}, "SE-G": {}, "SE-H": {}, "SE-I": {},
+ "SE-K": {}, "SE-M": {}, "SE-N": {}, "SE-O": {}, "SE-S": {},
+ "SE-T": {}, "SE-U": {}, "SE-W": {}, "SE-X": {}, "SE-Y": {},
+ "SE-Z": {}, "SG-01": {}, "SG-02": {}, "SG-03": {}, "SG-04": {},
+ "SG-05": {}, "SH-AC": {}, "SH-HL": {}, "SH-TA": {}, "SI-001": {},
+ "SI-002": {}, "SI-003": {}, "SI-004": {}, "SI-005": {}, "SI-006": {},
+ "SI-007": {}, "SI-008": {}, "SI-009": {}, "SI-010": {}, "SI-011": {},
+ "SI-012": {}, "SI-013": {}, "SI-014": {}, "SI-015": {}, "SI-016": {},
+ "SI-017": {}, "SI-018": {}, "SI-019": {}, "SI-020": {}, "SI-021": {},
+ "SI-022": {}, "SI-023": {}, "SI-024": {}, "SI-025": {}, "SI-026": {},
+ "SI-027": {}, "SI-028": {}, "SI-029": {}, "SI-030": {}, "SI-031": {},
+ "SI-032": {}, "SI-033": {}, "SI-034": {}, "SI-035": {}, "SI-036": {},
+ "SI-037": {}, "SI-038": {}, "SI-039": {}, "SI-040": {}, "SI-041": {},
+ "SI-042": {}, "SI-043": {}, "SI-044": {}, "SI-045": {}, "SI-046": {},
+ "SI-047": {}, "SI-048": {}, "SI-049": {}, "SI-050": {}, "SI-051": {},
+ "SI-052": {}, "SI-053": {}, "SI-054": {}, "SI-055": {}, "SI-056": {},
+ "SI-057": {}, "SI-058": {}, "SI-059": {}, "SI-060": {}, "SI-061": {},
+ "SI-062": {}, "SI-063": {}, "SI-064": {}, "SI-065": {}, "SI-066": {},
+ "SI-067": {}, "SI-068": {}, "SI-069": {}, "SI-070": {}, "SI-071": {},
+ "SI-072": {}, "SI-073": {}, "SI-074": {}, "SI-075": {}, "SI-076": {},
+ "SI-077": {}, "SI-078": {}, "SI-079": {}, "SI-080": {}, "SI-081": {},
+ "SI-082": {}, "SI-083": {}, "SI-084": {}, "SI-085": {}, "SI-086": {},
+ "SI-087": {}, "SI-088": {}, "SI-089": {}, "SI-090": {}, "SI-091": {},
+ "SI-092": {}, "SI-093": {}, "SI-094": {}, "SI-095": {}, "SI-096": {},
+ "SI-097": {}, "SI-098": {}, "SI-099": {}, "SI-100": {}, "SI-101": {},
+ "SI-102": {}, "SI-103": {}, "SI-104": {}, "SI-105": {}, "SI-106": {},
+ "SI-107": {}, "SI-108": {}, "SI-109": {}, "SI-110": {}, "SI-111": {},
+ "SI-112": {}, "SI-113": {}, "SI-114": {}, "SI-115": {}, "SI-116": {},
+ "SI-117": {}, "SI-118": {}, "SI-119": {}, "SI-120": {}, "SI-121": {},
+ "SI-122": {}, "SI-123": {}, "SI-124": {}, "SI-125": {}, "SI-126": {},
+ "SI-127": {}, "SI-128": {}, "SI-129": {}, "SI-130": {}, "SI-131": {},
+ "SI-132": {}, "SI-133": {}, "SI-134": {}, "SI-135": {}, "SI-136": {},
+ "SI-137": {}, "SI-138": {}, "SI-139": {}, "SI-140": {}, "SI-141": {},
+ "SI-142": {}, "SI-143": {}, "SI-144": {}, "SI-146": {}, "SI-147": {},
+ "SI-148": {}, "SI-149": {}, "SI-150": {}, "SI-151": {}, "SI-152": {},
+ "SI-153": {}, "SI-154": {}, "SI-155": {}, "SI-156": {}, "SI-157": {},
+ "SI-158": {}, "SI-159": {}, "SI-160": {}, "SI-161": {}, "SI-162": {},
+ "SI-163": {}, "SI-164": {}, "SI-165": {}, "SI-166": {}, "SI-167": {},
+ "SI-168": {}, "SI-169": {}, "SI-170": {}, "SI-171": {}, "SI-172": {},
+ "SI-173": {}, "SI-174": {}, "SI-175": {}, "SI-176": {}, "SI-177": {},
+ "SI-178": {}, "SI-179": {}, "SI-180": {}, "SI-181": {}, "SI-182": {},
+ "SI-183": {}, "SI-184": {}, "SI-185": {}, "SI-186": {}, "SI-187": {},
+ "SI-188": {}, "SI-189": {}, "SI-190": {}, "SI-191": {}, "SI-192": {},
+ "SI-193": {}, "SI-194": {}, "SI-195": {}, "SI-196": {}, "SI-197": {},
+ "SI-198": {}, "SI-199": {}, "SI-200": {}, "SI-201": {}, "SI-202": {},
+ "SI-203": {}, "SI-204": {}, "SI-205": {}, "SI-206": {}, "SI-207": {},
+ "SI-208": {}, "SI-209": {}, "SI-210": {}, "SI-211": {}, "SI-212": {}, "SI-213": {}, "SK-BC": {},
+ "SK-BL": {}, "SK-KI": {}, "SK-NI": {}, "SK-PV": {}, "SK-TA": {},
+ "SK-TC": {}, "SK-ZI": {}, "SL-E": {}, "SL-N": {}, "SL-S": {},
+ "SL-W": {}, "SM-01": {}, "SM-02": {}, "SM-03": {}, "SM-04": {},
+ "SM-05": {}, "SM-06": {}, "SM-07": {}, "SM-08": {}, "SM-09": {},
+ "SN-DB": {}, "SN-DK": {}, "SN-FK": {}, "SN-KA": {}, "SN-KD": {},
+ "SN-KE": {}, "SN-KL": {}, "SN-LG": {}, "SN-MT": {}, "SN-SE": {},
+ "SN-SL": {}, "SN-TC": {}, "SN-TH": {}, "SN-ZG": {}, "SO-AW": {},
+ "SO-BK": {}, "SO-BN": {}, "SO-BR": {}, "SO-BY": {}, "SO-GA": {},
+ "SO-GE": {}, "SO-HI": {}, "SO-JD": {}, "SO-JH": {}, "SO-MU": {},
+ "SO-NU": {}, "SO-SA": {}, "SO-SD": {}, "SO-SH": {}, "SO-SO": {},
+ "SO-TO": {}, "SO-WO": {}, "SR-BR": {}, "SR-CM": {}, "SR-CR": {},
+ "SR-MA": {}, "SR-NI": {}, "SR-PM": {}, "SR-PR": {}, "SR-SA": {},
+ "SR-SI": {}, "SR-WA": {}, "SS-BN": {}, "SS-BW": {}, "SS-EC": {},
+ "SS-EE8": {}, "SS-EE": {}, "SS-EW": {}, "SS-JG": {}, "SS-LK": {}, "SS-NU": {},
+ "SS-UY": {}, "SS-WR": {}, "ST-01": {}, "ST-P": {}, "ST-S": {}, "SV-AH": {},
+ "SV-CA": {}, "SV-CH": {}, "SV-CU": {}, "SV-LI": {}, "SV-MO": {},
+ "SV-PA": {}, "SV-SA": {}, "SV-SM": {}, "SV-SO": {}, "SV-SS": {},
+ "SV-SV": {}, "SV-UN": {}, "SV-US": {}, "SY-DI": {}, "SY-DR": {},
+ "SY-DY": {}, "SY-HA": {}, "SY-HI": {}, "SY-HL": {}, "SY-HM": {},
+ "SY-ID": {}, "SY-LA": {}, "SY-QU": {}, "SY-RA": {}, "SY-RD": {},
+ "SY-SU": {}, "SY-TA": {}, "SZ-HH": {}, "SZ-LU": {}, "SZ-MA": {},
+ "SZ-SH": {}, "TD-BA": {}, "TD-BG": {}, "TD-BO": {}, "TD-CB": {},
+ "TD-EN": {}, "TD-GR": {}, "TD-HL": {}, "TD-KA": {}, "TD-LC": {},
+ "TD-LO": {}, "TD-LR": {}, "TD-MA": {}, "TD-MC": {}, "TD-ME": {},
+ "TD-MO": {}, "TD-ND": {}, "TD-OD": {}, "TD-SA": {}, "TD-SI": {},
+ "TD-TA": {}, "TD-TI": {}, "TD-WF": {}, "TG-C": {}, "TG-K": {},
+ "TG-M": {}, "TG-P": {}, "TG-S": {}, "TH-10": {}, "TH-11": {},
+ "TH-12": {}, "TH-13": {}, "TH-14": {}, "TH-15": {}, "TH-16": {},
+ "TH-17": {}, "TH-18": {}, "TH-19": {}, "TH-20": {}, "TH-21": {},
+ "TH-22": {}, "TH-23": {}, "TH-24": {}, "TH-25": {}, "TH-26": {},
+ "TH-27": {}, "TH-30": {}, "TH-31": {}, "TH-32": {}, "TH-33": {},
+ "TH-34": {}, "TH-35": {}, "TH-36": {}, "TH-37": {}, "TH-38": {}, "TH-39": {},
+ "TH-40": {}, "TH-41": {}, "TH-42": {}, "TH-43": {}, "TH-44": {},
+ "TH-45": {}, "TH-46": {}, "TH-47": {}, "TH-48": {}, "TH-49": {},
+ "TH-50": {}, "TH-51": {}, "TH-52": {}, "TH-53": {}, "TH-54": {},
+ "TH-55": {}, "TH-56": {}, "TH-57": {}, "TH-58": {}, "TH-60": {},
+ "TH-61": {}, "TH-62": {}, "TH-63": {}, "TH-64": {}, "TH-65": {},
+ "TH-66": {}, "TH-67": {}, "TH-70": {}, "TH-71": {}, "TH-72": {},
+ "TH-73": {}, "TH-74": {}, "TH-75": {}, "TH-76": {}, "TH-77": {},
+ "TH-80": {}, "TH-81": {}, "TH-82": {}, "TH-83": {}, "TH-84": {},
+ "TH-85": {}, "TH-86": {}, "TH-90": {}, "TH-91": {}, "TH-92": {},
+ "TH-93": {}, "TH-94": {}, "TH-95": {}, "TH-96": {}, "TH-S": {},
+ "TJ-GB": {}, "TJ-KT": {}, "TJ-SU": {}, "TJ-DU": {}, "TJ-RA": {}, "TL-AL": {}, "TL-AN": {},
+ "TL-BA": {}, "TL-BO": {}, "TL-CO": {}, "TL-DI": {}, "TL-ER": {},
+ "TL-LA": {}, "TL-LI": {}, "TL-MF": {}, "TL-MT": {}, "TL-OE": {},
+ "TL-VI": {}, "TM-A": {}, "TM-B": {}, "TM-D": {}, "TM-L": {},
+ "TM-M": {}, "TM-S": {}, "TN-11": {}, "TN-12": {}, "TN-13": {},
+ "TN-14": {}, "TN-21": {}, "TN-22": {}, "TN-23": {}, "TN-31": {},
+ "TN-32": {}, "TN-33": {}, "TN-34": {}, "TN-41": {}, "TN-42": {},
+ "TN-43": {}, "TN-51": {}, "TN-52": {}, "TN-53": {}, "TN-61": {},
+ "TN-71": {}, "TN-72": {}, "TN-73": {}, "TN-81": {}, "TN-82": {},
+ "TN-83": {}, "TO-01": {}, "TO-02": {}, "TO-03": {}, "TO-04": {},
+ "TO-05": {}, "TR-01": {}, "TR-02": {}, "TR-03": {}, "TR-04": {},
+ "TR-05": {}, "TR-06": {}, "TR-07": {}, "TR-08": {}, "TR-09": {},
+ "TR-10": {}, "TR-11": {}, "TR-12": {}, "TR-13": {}, "TR-14": {},
+ "TR-15": {}, "TR-16": {}, "TR-17": {}, "TR-18": {}, "TR-19": {},
+ "TR-20": {}, "TR-21": {}, "TR-22": {}, "TR-23": {}, "TR-24": {},
+ "TR-25": {}, "TR-26": {}, "TR-27": {}, "TR-28": {}, "TR-29": {},
+ "TR-30": {}, "TR-31": {}, "TR-32": {}, "TR-33": {}, "TR-34": {},
+ "TR-35": {}, "TR-36": {}, "TR-37": {}, "TR-38": {}, "TR-39": {},
+ "TR-40": {}, "TR-41": {}, "TR-42": {}, "TR-43": {}, "TR-44": {},
+ "TR-45": {}, "TR-46": {}, "TR-47": {}, "TR-48": {}, "TR-49": {},
+ "TR-50": {}, "TR-51": {}, "TR-52": {}, "TR-53": {}, "TR-54": {},
+ "TR-55": {}, "TR-56": {}, "TR-57": {}, "TR-58": {}, "TR-59": {},
+ "TR-60": {}, "TR-61": {}, "TR-62": {}, "TR-63": {}, "TR-64": {},
+ "TR-65": {}, "TR-66": {}, "TR-67": {}, "TR-68": {}, "TR-69": {},
+ "TR-70": {}, "TR-71": {}, "TR-72": {}, "TR-73": {}, "TR-74": {},
+ "TR-75": {}, "TR-76": {}, "TR-77": {}, "TR-78": {}, "TR-79": {},
+ "TR-80": {}, "TR-81": {}, "TT-ARI": {}, "TT-CHA": {}, "TT-CTT": {},
+ "TT-DMN": {}, "TT-ETO": {}, "TT-MRC": {}, "TT-TOB": {}, "TT-PED": {}, "TT-POS": {}, "TT-PRT": {},
+ "TT-PTF": {}, "TT-RCM": {}, "TT-SFO": {}, "TT-SGE": {}, "TT-SIP": {},
+ "TT-SJL": {}, "TT-TUP": {}, "TT-WTO": {}, "TV-FUN": {}, "TV-NIT": {},
+ "TV-NKF": {}, "TV-NKL": {}, "TV-NMA": {}, "TV-NMG": {}, "TV-NUI": {},
+ "TV-VAI": {}, "TW-CHA": {}, "TW-CYI": {}, "TW-CYQ": {}, "TW-KIN": {}, "TW-HSQ": {},
+ "TW-HSZ": {}, "TW-HUA": {}, "TW-LIE": {}, "TW-ILA": {}, "TW-KEE": {}, "TW-KHH": {},
+ "TW-KHQ": {}, "TW-MIA": {}, "TW-NAN": {}, "TW-NWT": {}, "TW-PEN": {}, "TW-PIF": {},
+ "TW-TAO": {}, "TW-TNN": {}, "TW-TNQ": {}, "TW-TPE": {}, "TW-TPQ": {},
+ "TW-TTT": {}, "TW-TXG": {}, "TW-TXQ": {}, "TW-YUN": {}, "TZ-01": {},
+ "TZ-02": {}, "TZ-03": {}, "TZ-04": {}, "TZ-05": {}, "TZ-06": {},
+ "TZ-07": {}, "TZ-08": {}, "TZ-09": {}, "TZ-10": {}, "TZ-11": {},
+ "TZ-12": {}, "TZ-13": {}, "TZ-14": {}, "TZ-15": {}, "TZ-16": {},
+ "TZ-17": {}, "TZ-18": {}, "TZ-19": {}, "TZ-20": {}, "TZ-21": {},
+ "TZ-22": {}, "TZ-23": {}, "TZ-24": {}, "TZ-25": {}, "TZ-26": {}, "TZ-27": {}, "TZ-28": {}, "TZ-29": {}, "TZ-30": {}, "TZ-31": {},
+ "UA-05": {}, "UA-07": {}, "UA-09": {}, "UA-12": {}, "UA-14": {},
+ "UA-18": {}, "UA-21": {}, "UA-23": {}, "UA-26": {}, "UA-30": {},
+ "UA-32": {}, "UA-35": {}, "UA-40": {}, "UA-43": {}, "UA-46": {},
+ "UA-48": {}, "UA-51": {}, "UA-53": {}, "UA-56": {}, "UA-59": {},
+ "UA-61": {}, "UA-63": {}, "UA-65": {}, "UA-68": {}, "UA-71": {},
+ "UA-74": {}, "UA-77": {}, "UG-101": {}, "UG-102": {}, "UG-103": {},
+ "UG-104": {}, "UG-105": {}, "UG-106": {}, "UG-107": {}, "UG-108": {},
+ "UG-109": {}, "UG-110": {}, "UG-111": {}, "UG-112": {}, "UG-113": {},
+ "UG-114": {}, "UG-115": {}, "UG-116": {}, "UG-201": {}, "UG-202": {},
+ "UG-203": {}, "UG-204": {}, "UG-205": {}, "UG-206": {}, "UG-207": {},
+ "UG-208": {}, "UG-209": {}, "UG-210": {}, "UG-211": {}, "UG-212": {},
+ "UG-213": {}, "UG-214": {}, "UG-215": {}, "UG-216": {}, "UG-217": {},
+ "UG-218": {}, "UG-219": {}, "UG-220": {}, "UG-221": {}, "UG-222": {},
+ "UG-223": {}, "UG-224": {}, "UG-301": {}, "UG-302": {}, "UG-303": {},
+ "UG-304": {}, "UG-305": {}, "UG-306": {}, "UG-307": {}, "UG-308": {},
+ "UG-309": {}, "UG-310": {}, "UG-311": {}, "UG-312": {}, "UG-313": {},
+ "UG-314": {}, "UG-315": {}, "UG-316": {}, "UG-317": {}, "UG-318": {},
+ "UG-319": {}, "UG-320": {}, "UG-321": {}, "UG-401": {}, "UG-402": {},
+ "UG-403": {}, "UG-404": {}, "UG-405": {}, "UG-406": {}, "UG-407": {},
+ "UG-408": {}, "UG-409": {}, "UG-410": {}, "UG-411": {}, "UG-412": {},
+ "UG-413": {}, "UG-414": {}, "UG-415": {}, "UG-416": {}, "UG-417": {},
+ "UG-418": {}, "UG-419": {}, "UG-C": {}, "UG-E": {}, "UG-N": {},
+ "UG-W": {}, "UG-322": {}, "UG-323": {}, "UG-420": {}, "UG-117": {},
+ "UG-118": {}, "UG-225": {}, "UG-120": {}, "UG-226": {},
+ "UG-121": {}, "UG-122": {}, "UG-227": {}, "UG-421": {},
+ "UG-325": {}, "UG-228": {}, "UG-123": {}, "UG-422": {},
+ "UG-326": {}, "UG-229": {}, "UG-124": {}, "UG-423": {},
+ "UG-230": {}, "UG-327": {}, "UG-424": {}, "UG-328": {},
+ "UG-425": {}, "UG-426": {}, "UG-330": {},
+ "UM-67": {}, "UM-71": {}, "UM-76": {}, "UM-79": {},
+ "UM-81": {}, "UM-84": {}, "UM-86": {}, "UM-89": {}, "UM-95": {},
+ "US-AK": {}, "US-AL": {}, "US-AR": {}, "US-AS": {}, "US-AZ": {},
+ "US-CA": {}, "US-CO": {}, "US-CT": {}, "US-DC": {}, "US-DE": {},
+ "US-FL": {}, "US-GA": {}, "US-GU": {}, "US-HI": {}, "US-IA": {},
+ "US-ID": {}, "US-IL": {}, "US-IN": {}, "US-KS": {}, "US-KY": {},
+ "US-LA": {}, "US-MA": {}, "US-MD": {}, "US-ME": {}, "US-MI": {},
+ "US-MN": {}, "US-MO": {}, "US-MP": {}, "US-MS": {}, "US-MT": {},
+ "US-NC": {}, "US-ND": {}, "US-NE": {}, "US-NH": {}, "US-NJ": {},
+ "US-NM": {}, "US-NV": {}, "US-NY": {}, "US-OH": {}, "US-OK": {},
+ "US-OR": {}, "US-PA": {}, "US-PR": {}, "US-RI": {}, "US-SC": {},
+ "US-SD": {}, "US-TN": {}, "US-TX": {}, "US-UM": {}, "US-UT": {},
+ "US-VA": {}, "US-VI": {}, "US-VT": {}, "US-WA": {}, "US-WI": {},
+ "US-WV": {}, "US-WY": {}, "UY-AR": {}, "UY-CA": {}, "UY-CL": {},
+ "UY-CO": {}, "UY-DU": {}, "UY-FD": {}, "UY-FS": {}, "UY-LA": {},
+ "UY-MA": {}, "UY-MO": {}, "UY-PA": {}, "UY-RN": {}, "UY-RO": {},
+ "UY-RV": {}, "UY-SA": {}, "UY-SJ": {}, "UY-SO": {}, "UY-TA": {},
+ "UY-TT": {}, "UZ-AN": {}, "UZ-BU": {}, "UZ-FA": {}, "UZ-JI": {},
+ "UZ-NG": {}, "UZ-NW": {}, "UZ-QA": {}, "UZ-QR": {}, "UZ-SA": {},
+ "UZ-SI": {}, "UZ-SU": {}, "UZ-TK": {}, "UZ-TO": {}, "UZ-XO": {},
+ "VC-01": {}, "VC-02": {}, "VC-03": {}, "VC-04": {}, "VC-05": {},
+ "VC-06": {}, "VE-A": {}, "VE-B": {}, "VE-C": {}, "VE-D": {},
+ "VE-E": {}, "VE-F": {}, "VE-G": {}, "VE-H": {}, "VE-I": {},
+ "VE-J": {}, "VE-K": {}, "VE-L": {}, "VE-M": {}, "VE-N": {},
+ "VE-O": {}, "VE-P": {}, "VE-R": {}, "VE-S": {}, "VE-T": {},
+ "VE-U": {}, "VE-V": {}, "VE-W": {}, "VE-X": {}, "VE-Y": {},
+ "VE-Z": {}, "VN-01": {}, "VN-02": {}, "VN-03": {}, "VN-04": {},
+ "VN-05": {}, "VN-06": {}, "VN-07": {}, "VN-09": {}, "VN-13": {},
+ "VN-14": {}, "VN-15": {}, "VN-18": {}, "VN-20": {}, "VN-21": {},
+ "VN-22": {}, "VN-23": {}, "VN-24": {}, "VN-25": {}, "VN-26": {},
+ "VN-27": {}, "VN-28": {}, "VN-29": {}, "VN-30": {}, "VN-31": {},
+ "VN-32": {}, "VN-33": {}, "VN-34": {}, "VN-35": {}, "VN-36": {},
+ "VN-37": {}, "VN-39": {}, "VN-40": {}, "VN-41": {}, "VN-43": {},
+ "VN-44": {}, "VN-45": {}, "VN-46": {}, "VN-47": {}, "VN-49": {},
+ "VN-50": {}, "VN-51": {}, "VN-52": {}, "VN-53": {}, "VN-54": {},
+ "VN-55": {}, "VN-56": {}, "VN-57": {}, "VN-58": {}, "VN-59": {},
+ "VN-61": {}, "VN-63": {}, "VN-66": {}, "VN-67": {}, "VN-68": {},
+ "VN-69": {}, "VN-70": {}, "VN-71": {}, "VN-72": {}, "VN-73": {},
+ "VN-CT": {}, "VN-DN": {}, "VN-HN": {}, "VN-HP": {}, "VN-SG": {},
+ "VU-MAP": {}, "VU-PAM": {}, "VU-SAM": {}, "VU-SEE": {}, "VU-TAE": {},
+ "VU-TOB": {}, "WF-SG": {}, "WF-UV": {}, "WS-AA": {}, "WS-AL": {}, "WS-AT": {}, "WS-FA": {},
+ "WS-GE": {}, "WS-GI": {}, "WS-PA": {}, "WS-SA": {}, "WS-TU": {},
+ "WS-VF": {}, "WS-VS": {}, "YE-AB": {}, "YE-AD": {}, "YE-AM": {},
+ "YE-BA": {}, "YE-DA": {}, "YE-DH": {}, "YE-HD": {}, "YE-HJ": {}, "YE-HU": {},
+ "YE-IB": {}, "YE-JA": {}, "YE-LA": {}, "YE-MA": {}, "YE-MR": {},
+ "YE-MU": {}, "YE-MW": {}, "YE-RA": {}, "YE-SA": {}, "YE-SD": {}, "YE-SH": {},
+ "YE-SN": {}, "YE-TA": {}, "ZA-EC": {}, "ZA-FS": {}, "ZA-GP": {},
+ "ZA-LP": {}, "ZA-MP": {}, "ZA-NC": {}, "ZA-NW": {}, "ZA-WC": {},
+ "ZA-ZN": {}, "ZA-KZN": {}, "ZM-01": {}, "ZM-02": {}, "ZM-03": {}, "ZM-04": {},
+ "ZM-05": {}, "ZM-06": {}, "ZM-07": {}, "ZM-08": {}, "ZM-09": {}, "ZM-10": {},
+ "ZW-BU": {}, "ZW-HA": {}, "ZW-MA": {}, "ZW-MC": {}, "ZW-ME": {},
+ "ZW-MI": {}, "ZW-MN": {}, "ZW-MS": {}, "ZW-MV": {}, "ZW-MW": {},
+}
diff --git a/integration/assets/hydrabroker/vendor/github.com/go-playground/validator/v10/currency_codes.go b/integration/assets/hydrabroker/vendor/github.com/go-playground/validator/v10/currency_codes.go
new file mode 100644
index 00000000000..d0317f89ccb
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/go-playground/validator/v10/currency_codes.go
@@ -0,0 +1,79 @@
+package validator
+
+var iso4217 = map[string]struct{}{
+ "AFN": {}, "EUR": {}, "ALL": {}, "DZD": {}, "USD": {},
+ "AOA": {}, "XCD": {}, "ARS": {}, "AMD": {}, "AWG": {},
+ "AUD": {}, "AZN": {}, "BSD": {}, "BHD": {}, "BDT": {},
+ "BBD": {}, "BYN": {}, "BZD": {}, "XOF": {}, "BMD": {},
+ "INR": {}, "BTN": {}, "BOB": {}, "BOV": {}, "BAM": {},
+ "BWP": {}, "NOK": {}, "BRL": {}, "BND": {}, "BGN": {},
+ "BIF": {}, "CVE": {}, "KHR": {}, "XAF": {}, "CAD": {},
+ "KYD": {}, "CLP": {}, "CLF": {}, "CNY": {}, "COP": {},
+ "COU": {}, "KMF": {}, "CDF": {}, "NZD": {}, "CRC": {},
+ "HRK": {}, "CUP": {}, "CUC": {}, "ANG": {}, "CZK": {},
+ "DKK": {}, "DJF": {}, "DOP": {}, "EGP": {}, "SVC": {},
+ "ERN": {}, "SZL": {}, "ETB": {}, "FKP": {}, "FJD": {},
+ "XPF": {}, "GMD": {}, "GEL": {}, "GHS": {}, "GIP": {},
+ "GTQ": {}, "GBP": {}, "GNF": {}, "GYD": {}, "HTG": {},
+ "HNL": {}, "HKD": {}, "HUF": {}, "ISK": {}, "IDR": {},
+ "XDR": {}, "IRR": {}, "IQD": {}, "ILS": {}, "JMD": {},
+ "JPY": {}, "JOD": {}, "KZT": {}, "KES": {}, "KPW": {},
+ "KRW": {}, "KWD": {}, "KGS": {}, "LAK": {}, "LBP": {},
+ "LSL": {}, "ZAR": {}, "LRD": {}, "LYD": {}, "CHF": {},
+ "MOP": {}, "MKD": {}, "MGA": {}, "MWK": {}, "MYR": {},
+ "MVR": {}, "MRU": {}, "MUR": {}, "XUA": {}, "MXN": {},
+ "MXV": {}, "MDL": {}, "MNT": {}, "MAD": {}, "MZN": {},
+ "MMK": {}, "NAD": {}, "NPR": {}, "NIO": {}, "NGN": {},
+ "OMR": {}, "PKR": {}, "PAB": {}, "PGK": {}, "PYG": {},
+ "PEN": {}, "PHP": {}, "PLN": {}, "QAR": {}, "RON": {},
+ "RUB": {}, "RWF": {}, "SHP": {}, "WST": {}, "STN": {},
+ "SAR": {}, "RSD": {}, "SCR": {}, "SLL": {}, "SGD": {},
+ "XSU": {}, "SBD": {}, "SOS": {}, "SSP": {}, "LKR": {},
+ "SDG": {}, "SRD": {}, "SEK": {}, "CHE": {}, "CHW": {},
+ "SYP": {}, "TWD": {}, "TJS": {}, "TZS": {}, "THB": {},
+ "TOP": {}, "TTD": {}, "TND": {}, "TRY": {}, "TMT": {},
+ "UGX": {}, "UAH": {}, "AED": {}, "USN": {}, "UYU": {},
+ "UYI": {}, "UYW": {}, "UZS": {}, "VUV": {}, "VES": {},
+ "VND": {}, "YER": {}, "ZMW": {}, "ZWL": {}, "XBA": {},
+ "XBB": {}, "XBC": {}, "XBD": {}, "XTS": {}, "XXX": {},
+ "XAU": {}, "XPD": {}, "XPT": {}, "XAG": {},
+}
+
+var iso4217_numeric = map[int]struct{}{
+ 8: {}, 12: {}, 32: {}, 36: {}, 44: {},
+ 48: {}, 50: {}, 51: {}, 52: {}, 60: {},
+ 64: {}, 68: {}, 72: {}, 84: {}, 90: {},
+ 96: {}, 104: {}, 108: {}, 116: {}, 124: {},
+ 132: {}, 136: {}, 144: {}, 152: {}, 156: {},
+ 170: {}, 174: {}, 188: {}, 191: {}, 192: {},
+ 203: {}, 208: {}, 214: {}, 222: {}, 230: {},
+ 232: {}, 238: {}, 242: {}, 262: {}, 270: {},
+ 292: {}, 320: {}, 324: {}, 328: {}, 332: {},
+ 340: {}, 344: {}, 348: {}, 352: {}, 356: {},
+ 360: {}, 364: {}, 368: {}, 376: {}, 388: {},
+ 392: {}, 398: {}, 400: {}, 404: {}, 408: {},
+ 410: {}, 414: {}, 417: {}, 418: {}, 422: {},
+ 426: {}, 430: {}, 434: {}, 446: {}, 454: {},
+ 458: {}, 462: {}, 480: {}, 484: {}, 496: {},
+ 498: {}, 504: {}, 512: {}, 516: {}, 524: {},
+ 532: {}, 533: {}, 548: {}, 554: {}, 558: {},
+ 566: {}, 578: {}, 586: {}, 590: {}, 598: {},
+ 600: {}, 604: {}, 608: {}, 634: {}, 643: {},
+ 646: {}, 654: {}, 682: {}, 690: {}, 694: {},
+ 702: {}, 704: {}, 706: {}, 710: {}, 728: {},
+ 748: {}, 752: {}, 756: {}, 760: {}, 764: {},
+ 776: {}, 780: {}, 784: {}, 788: {}, 800: {},
+ 807: {}, 818: {}, 826: {}, 834: {}, 840: {},
+ 858: {}, 860: {}, 882: {}, 886: {}, 901: {},
+ 927: {}, 928: {}, 929: {}, 930: {}, 931: {},
+ 932: {}, 933: {}, 934: {}, 936: {}, 938: {},
+ 940: {}, 941: {}, 943: {}, 944: {}, 946: {},
+ 947: {}, 948: {}, 949: {}, 950: {}, 951: {},
+ 952: {}, 953: {}, 955: {}, 956: {}, 957: {},
+ 958: {}, 959: {}, 960: {}, 961: {}, 962: {},
+ 963: {}, 964: {}, 965: {}, 967: {}, 968: {},
+ 969: {}, 970: {}, 971: {}, 972: {}, 973: {},
+ 975: {}, 976: {}, 977: {}, 978: {}, 979: {},
+ 980: {}, 981: {}, 984: {}, 985: {}, 986: {},
+ 990: {}, 994: {}, 997: {}, 999: {},
+}
diff --git a/integration/assets/hydrabroker/vendor/github.com/go-playground/validator/v10/doc.go b/integration/assets/hydrabroker/vendor/github.com/go-playground/validator/v10/doc.go
index 291c629d443..90a8ade653c 100644
--- a/integration/assets/hydrabroker/vendor/github.com/go-playground/validator/v10/doc.go
+++ b/integration/assets/hydrabroker/vendor/github.com/go-playground/validator/v10/doc.go
@@ -7,7 +7,15 @@ and has the ability to dive into arrays and maps of any type.
see more examples https://github.com/go-playground/validator/tree/master/_examples
-Validation Functions Return Type error
+# Singleton
+
+Validator is designed to be thread-safe and used as a singleton instance.
+It caches information about your struct and validations,
+in essence only parsing your validation tags once per struct type.
+Using multiple instances neglects the benefit of caching.
+The not thread-safe functions are explicitly marked as such in the documentation.
+
+# Validation Functions Return Type error
Doing things this way is actually the way the standard library does, see the
file.Open method here:
@@ -26,7 +34,7 @@ if the error returned is not nil, and if it's not check if error is
InvalidValidationError ( if necessary, most of the time it isn't ) type cast
it to type ValidationErrors like so err.(validator.ValidationErrors).
-Custom Validation Functions
+# Custom Validation Functions
Custom Validation functions can be added. Example:
@@ -44,21 +52,21 @@ Custom Validation functions can be added. Example:
// NOTES: using the same tag name as an existing function
// will overwrite the existing one
-Cross-Field Validation
+# Cross-Field Validation
Cross-Field Validation can be done via the following tags:
- - eqfield
- - nefield
- - gtfield
- - gtefield
- - ltfield
- - ltefield
- - eqcsfield
- - necsfield
- - gtcsfield
- - gtecsfield
- - ltcsfield
- - ltecsfield
+ - eqfield
+ - nefield
+ - gtfield
+ - gtefield
+ - ltfield
+ - ltefield
+ - eqcsfield
+ - necsfield
+ - gtcsfield
+ - gtecsfield
+ - ltcsfield
+ - ltecsfield
If, however, some custom cross-field validation is required, it can be done
using a custom validation.
@@ -98,7 +106,7 @@ used "eqcsfield" it could be multiple levels down. Example:
// whatever you pass, struct, field...
// when calling validate.Field(field, tag) val will be nil
-Multiple Validators
+# Multiple Validators
Multiple validators on a field will process in the order defined. Example:
@@ -116,7 +124,7 @@ Bad Validator definitions are not handled by the library. Example:
// this definition of min max will never succeed
-Using Validator Tags
+# Using Validator Tags
Baked In Cross-Field validation only compares fields on the same struct.
If Cross-Field + Cross-Struct validation is needed you should implement your
@@ -138,24 +146,22 @@ use the UTF-8 hex representation 0x7C, which is replaced in the code as a pipe,
so the above will become excludesall=0x7C
type Test struct {
- Field `validate:"excludesall=|"` // BAD! Do not include a a pipe!
+ Field `validate:"excludesall=|"` // BAD! Do not include a pipe!
Field `validate:"excludesall=0x7C"` // GOOD! Use the UTF-8 hex representation.
}
-
-Baked In Validators and Tags
+# Baked In Validators and Tags
Here is a list of the current built in validators:
-
-Skip Field
+# Skip Field
Tells the validation to skip this struct field; this is particularly
handy in ignoring embedded structs from being validated. (Usage: -)
- Usage: -
+ Usage: -
-Or Operator
+# Or Operator
This is the 'or' operator allowing multiple validators to be used and
accepted. (Usage: rgb|rgba) <-- this would allow either rgb or rgba
@@ -164,7 +170,7 @@ colors to be accepted. This can also be combined with 'and' for example
Usage: |
-StructOnly
+# StructOnly
When a field that is a nested struct is encountered, and contains this flag
any validation on the nested struct will be run, but none of the nested
@@ -174,13 +180,13 @@ NOTE: only "required" and "omitempty" can be used on a struct itself.
Usage: structonly
-NoStructLevel
+# NoStructLevel
Same as structonly tag except that any struct level validations will not run.
Usage: nostructlevel
-Omit Empty
+# Omit Empty
Allows conditional validation, for example if a field is not set with
a value (Determined by the "required" validator) then other validation
@@ -188,7 +194,14 @@ such as min or max won't run, but if a value is set validation will run.
Usage: omitempty
-Dive
+# Omit Nil
+
+Allows to skip the validation if the value is nil (same as omitempty, but
+only for the nil-values).
+
+ Usage: omitnil
+
+# Dive
This tells the validator to dive into a slice, array or map and validate that
level of the slice, array or map with the validation tags that follow.
@@ -224,33 +237,67 @@ require another 'keys' and 'endkeys' tag. These tags are only valid for maps.
Example #1
- map[string]string with validation tag "gt=0,dive,keys,eg=1|eq=2,endkeys,required"
+ map[string]string with validation tag "gt=0,dive,keys,eq=1|eq=2,endkeys,required"
// gt=0 will be applied to the map itself
- // eg=1|eq=2 will be applied to the map keys
+ // eq=1|eq=2 will be applied to the map keys
// required will be applied to map values
Example #2
map[[2]string]string with validation tag "gt=0,dive,keys,dive,eq=1|eq=2,endkeys,required"
// gt=0 will be applied to the map itself
- // eg=1|eq=2 will be applied to each array element in the the map keys
+ // eq=1|eq=2 will be applied to each array element in the map keys
// required will be applied to map values
-Required
+# Required
This validates that the value is not the data types default zero value.
For numbers ensures value is not zero. For strings ensures value is
-not "". For slices, maps, pointers, interfaces, channels and functions
-ensures the value is not nil.
+not "". For booleans ensures value is not false. For slices, maps, pointers, interfaces, channels and functions
+ensures the value is not nil. For structs ensures value is not the zero value when using WithRequiredStructEnabled.
Usage: required
-Required With
+# Required If
+
+The field under validation must be present and not empty only if all
+the other specified fields are equal to the value following the specified
+field. For strings ensures value is not "". For slices, maps, pointers,
+interfaces, channels and functions ensures the value is not nil. For structs ensures value is not the zero value.
+
+ Usage: required_if
+
+Examples:
+
+ // require the field if the Field1 is equal to the parameter given:
+ Usage: required_if=Field1 foobar
+
+ // require the field if the Field1 and Field2 is equal to the value respectively:
+ Usage: required_if=Field1 foo Field2 bar
+
+# Required Unless
+
+The field under validation must be present and not empty unless all
+the other specified fields are equal to the value following the specified
+field. For strings ensures value is not "". For slices, maps, pointers,
+interfaces, channels and functions ensures the value is not nil. For structs ensures value is not the zero value.
+
+ Usage: required_unless
+
+Examples:
+
+ // require the field unless the Field1 is equal to the parameter given:
+ Usage: required_unless=Field1 foobar
+
+ // require the field unless the Field1 and Field2 is equal to the value respectively:
+ Usage: required_unless=Field1 foo Field2 bar
+
+# Required With
The field under validation must be present and not empty only if any
of the other specified fields are present. For strings ensures value is
not "". For slices, maps, pointers, interfaces, channels and functions
-ensures the value is not nil.
+ensures the value is not nil. For structs ensures value is not the zero value.
Usage: required_with
@@ -262,12 +309,12 @@ Examples:
// require the field if the Field1 or Field2 is present:
Usage: required_with=Field1 Field2
-Required With All
+# Required With All
The field under validation must be present and not empty only if all
of the other specified fields are present. For strings ensures value is
not "". For slices, maps, pointers, interfaces, channels and functions
-ensures the value is not nil.
+ensures the value is not nil. For structs ensures value is not the zero value.
Usage: required_with_all
@@ -276,12 +323,12 @@ Example:
// require the field if the Field1 and Field2 is present:
Usage: required_with_all=Field1 Field2
-Required Without
+# Required Without
The field under validation must be present and not empty only when any
of the other specified fields are not present. For strings ensures value is
not "". For slices, maps, pointers, interfaces, channels and functions
-ensures the value is not nil.
+ensures the value is not nil. For structs ensures value is not the zero value.
Usage: required_without
@@ -293,12 +340,12 @@ Examples:
// require the field if the Field1 or Field2 is not present:
Usage: required_without=Field1 Field2
-Required Without All
+# Required Without All
The field under validation must be present and not empty only when all
of the other specified fields are not present. For strings ensures value is
not "". For slices, maps, pointers, interfaces, channels and functions
-ensures the value is not nil.
+ensures the value is not nil. For structs ensures value is not the zero value.
Usage: required_without_all
@@ -307,57 +354,136 @@ Example:
// require the field if the Field1 and Field2 is not present:
Usage: required_without_all=Field1 Field2
-Is Default
+# Excluded If
+
+The field under validation must not be present or not empty only if all
+the other specified fields are equal to the value following the specified
+field. For strings ensures value is not "". For slices, maps, pointers,
+interfaces, channels and functions ensures the value is not nil. For structs ensures value is not the zero value.
+
+ Usage: excluded_if
+
+Examples:
+
+ // exclude the field if the Field1 is equal to the parameter given:
+ Usage: excluded_if=Field1 foobar
+
+ // exclude the field if the Field1 and Field2 is equal to the value respectively:
+ Usage: excluded_if=Field1 foo Field2 bar
+
+# Excluded Unless
+
+The field under validation must not be present or empty unless all
+the other specified fields are equal to the value following the specified
+field. For strings ensures value is not "". For slices, maps, pointers,
+interfaces, channels and functions ensures the value is not nil. For structs ensures value is not the zero value.
+
+ Usage: excluded_unless
+
+Examples:
+
+ // exclude the field unless the Field1 is equal to the parameter given:
+ Usage: excluded_unless=Field1 foobar
+
+ // exclude the field unless the Field1 and Field2 is equal to the value respectively:
+ Usage: excluded_unless=Field1 foo Field2 bar
+
+# Is Default
This validates that the value is the default value and is almost the
opposite of required.
Usage: isdefault
-Length
+# Length
For numbers, length will ensure that the value is
equal to the parameter given. For strings, it checks that
the string length is exactly that number of characters. For slices,
arrays, and maps, validates the number of items.
+Example #1
+
Usage: len=10
-Maximum
+Example #2 (time.Duration)
+
+For time.Duration, len will ensure that the value is equal to the duration given
+in the parameter.
+
+ Usage: len=1h30m
+
+# Maximum
For numbers, max will ensure that the value is
less than or equal to the parameter given. For strings, it checks
that the string length is at most that number of characters. For
slices, arrays, and maps, validates the number of items.
+Example #1
+
Usage: max=10
-Minimum
+Example #2 (time.Duration)
+
+For time.Duration, max will ensure that the value is less than or equal to the
+duration given in the parameter.
+
+ Usage: max=1h30m
+
+# Minimum
For numbers, min will ensure that the value is
greater or equal to the parameter given. For strings, it checks that
the string length is at least that number of characters. For slices,
arrays, and maps, validates the number of items.
+Example #1
+
Usage: min=10
-Equals
+Example #2 (time.Duration)
+
+For time.Duration, min will ensure that the value is greater than or equal to
+the duration given in the parameter.
+
+ Usage: min=1h30m
+
+# Equals
For strings & numbers, eq will ensure that the value is
equal to the parameter given. For slices, arrays, and maps,
validates the number of items.
+Example #1
+
Usage: eq=10
-Not Equal
+Example #2 (time.Duration)
+
+For time.Duration, eq will ensure that the value is equal to the duration given
+in the parameter.
+
+ Usage: eq=1h30m
+
+# Not Equal
For strings & numbers, ne will ensure that the value is not
equal to the parameter given. For slices, arrays, and maps,
validates the number of items.
+Example #1
+
Usage: ne=10
-One Of
+Example #2 (time.Duration)
+
+For time.Duration, ne will ensure that the value is not equal to the duration
+given in the parameter.
+
+ Usage: ne=1h30m
+
+# One Of
For strings, ints, and uints, oneof will ensure that the value
is one of the values in the parameter. The parameter should be
@@ -365,11 +491,11 @@ a list of values separated by whitespace. Values may be
strings or numbers. To match strings with spaces in them, include
the target string between single quotes.
- Usage: oneof=red green
- oneof='red green' 'blue yellow'
- oneof=5 7 9
+ Usage: oneof=red green
+ oneof='red green' 'blue yellow'
+ oneof=5 7 9
-Greater Than
+# Greater Than
For numbers, this will ensure that the value is greater than the
parameter given. For strings, it checks that the string length
@@ -386,10 +512,16 @@ For time.Time ensures the time value is greater than time.Now.UTC().
Usage: gt
-Greater Than or Equal
+Example #3 (time.Duration)
-Same as 'min' above. Kept both to make terminology with 'len' easier.
+For time.Duration, gt will ensure that the value is greater than the duration
+given in the parameter.
+
+ Usage: gt=1h30m
+# Greater Than or Equal
+
+Same as 'min' above. Kept both to make terminology with 'len' easier.
Example #1
@@ -401,7 +533,14 @@ For time.Time ensures the time value is greater than or equal to time.Now.UTC().
Usage: gte
-Less Than
+Example #3 (time.Duration)
+
+For time.Duration, gte will ensure that the value is greater than or equal to
+the duration given in the parameter.
+
+ Usage: gte=1h30m
+
+# Less Than
For numbers, this will ensure that the value is less than the parameter given.
For strings, it checks that the string length is less than that number of
@@ -412,11 +551,19 @@ Example #1
Usage: lt=10
Example #2 (time.Time)
+
For time.Time ensures the time value is less than time.Now.UTC().
Usage: lt
-Less Than or Equal
+Example #3 (time.Duration)
+
+For time.Duration, lt will ensure that the value is less than the duration given
+in the parameter.
+
+ Usage: lt=1h30m
+
+# Less Than or Equal
Same as 'max' above. Kept both to make terminology with 'len' easier.
@@ -430,7 +577,14 @@ For time.Time ensures the time value is less than or equal to time.Now.UTC().
Usage: lte
-Field Equals Another Field
+Example #3 (time.Duration)
+
+For time.Duration, lte will ensure that the value is less than or equal to the
+duration given in the parameter.
+
+ Usage: lte=1h30m
+
+# Field Equals Another Field
This will validate the field value against another fields value either within
a struct or passed in field.
@@ -452,7 +606,7 @@ to the top level struct.
Usage: eqcsfield=InnerStructField.Field)
-Field Does Not Equal Another Field
+# Field Does Not Equal Another Field
This will validate the field value against another fields value either within
a struct or passed in field.
@@ -474,11 +628,11 @@ relative to the top level struct.
Usage: necsfield=InnerStructField.Field
-Field Greater Than Another Field
+# Field Greater Than Another Field
-Only valid for Numbers and time.Time types, this will validate the field value
-against another fields value either within a struct or passed in field.
-usage examples are for validation of a Start and End date:
+Only valid for Numbers, time.Duration and time.Time types, this will validate
+the field value against another fields value either within a struct or passed in
+field. usage examples are for validation of a Start and End date:
Example #1:
@@ -490,19 +644,18 @@ Example #2:
// Validating by field:
validate.VarWithValue(start, end, "gtfield")
-
-Field Greater Than Another Relative Field
+# Field Greater Than Another Relative Field
This does the same as gtfield except that it validates the field provided
relative to the top level struct.
Usage: gtcsfield=InnerStructField.Field
-Field Greater Than or Equal To Another Field
+# Field Greater Than or Equal To Another Field
-Only valid for Numbers and time.Time types, this will validate the field value
-against another fields value either within a struct or passed in field.
-usage examples are for validation of a Start and End date:
+Only valid for Numbers, time.Duration and time.Time types, this will validate
+the field value against another fields value either within a struct or passed in
+field. usage examples are for validation of a Start and End date:
Example #1:
@@ -514,18 +667,18 @@ Example #2:
// Validating by field:
validate.VarWithValue(start, end, "gtefield")
-Field Greater Than or Equal To Another Relative Field
+# Field Greater Than or Equal To Another Relative Field
This does the same as gtefield except that it validates the field provided relative
to the top level struct.
Usage: gtecsfield=InnerStructField.Field
-Less Than Another Field
+# Less Than Another Field
-Only valid for Numbers and time.Time types, this will validate the field value
-against another fields value either within a struct or passed in field.
-usage examples are for validation of a Start and End date:
+Only valid for Numbers, time.Duration and time.Time types, this will validate
+the field value against another fields value either within a struct or passed in
+field. usage examples are for validation of a Start and End date:
Example #1:
@@ -537,18 +690,18 @@ Example #2:
// Validating by field:
validate.VarWithValue(start, end, "ltfield")
-Less Than Another Relative Field
+# Less Than Another Relative Field
This does the same as ltfield except that it validates the field provided relative
to the top level struct.
Usage: ltcsfield=InnerStructField.Field
-Less Than or Equal To Another Field
+# Less Than or Equal To Another Field
-Only valid for Numbers and time.Time types, this will validate the field value
-against another fields value either within a struct or passed in field.
-usage examples are for validation of a Start and End date:
+Only valid for Numbers, time.Duration and time.Time types, this will validate
+the field value against another fields value either within a struct or passed in
+field. usage examples are for validation of a Start and End date:
Example #1:
@@ -560,14 +713,14 @@ Example #2:
// Validating by field:
validate.VarWithValue(start, end, "ltefield")
-Less Than or Equal To Another Relative Field
+# Less Than or Equal To Another Relative Field
This does the same as ltefield except that it validates the field provided relative
to the top level struct.
Usage: ltecsfield=InnerStructField.Field
-Field Contains Another Field
+# Field Contains Another Field
This does the same as contains except for struct fields. It should only be used
with string types. See the behavior of reflect.Value.String() for behavior on
@@ -575,7 +728,7 @@ other types.
Usage: containsfield=InnerStructField.Field
-Field Excludes Another Field
+# Field Excludes Another Field
This does the same as excludes except for struct fields. It should only be used
with string types. See the behavior of reflect.Value.String() for behavior on
@@ -583,7 +736,7 @@ other types.
Usage: excludesfield=InnerStructField.Field
-Unique
+# Unique
For arrays & slices, unique will ensure that there are no duplicates.
For maps, unique will ensure that there are no duplicate values.
@@ -596,31 +749,44 @@ in a field of the struct specified via a parameter.
// For slices of struct:
Usage: unique=field
-Alpha Only
+# Alpha Only
This validates that a string value contains ASCII alpha characters only
Usage: alpha
-Alphanumeric
+# Alphanumeric
This validates that a string value contains ASCII alphanumeric characters only
Usage: alphanum
-Alpha Unicode
+# Alpha Unicode
This validates that a string value contains unicode alpha characters only
Usage: alphaunicode
-Alphanumeric Unicode
+# Alphanumeric Unicode
This validates that a string value contains unicode alphanumeric characters only
Usage: alphanumunicode
-Numeric
+# Boolean
+
+This validates that a string value can successfully be parsed into a boolean with strconv.ParseBool
+
+ Usage: boolean
+
+# Number
+
+This validates that a string value contains number values only.
+For integers or float it returns true.
+
+ Usage: number
+
+# Numeric
This validates that a string value contains a basic numeric value.
basic excludes exponents etc...
@@ -628,56 +794,63 @@ for integers or float it returns true.
Usage: numeric
-Hexadecimal String
+# Hexadecimal String
This validates that a string value contains a valid hexadecimal.
Usage: hexadecimal
-Hexcolor String
+# Hexcolor String
This validates that a string value contains a valid hex color including
hashtag (#)
- Usage: hexcolor
+ Usage: hexcolor
-Lowercase String
+# Lowercase String
This validates that a string value contains only lowercase characters. An empty string is not a valid lowercase string.
Usage: lowercase
-Uppercase String
+# Uppercase String
This validates that a string value contains only uppercase characters. An empty string is not a valid uppercase string.
Usage: uppercase
-RGB String
+# RGB String
This validates that a string value contains a valid rgb color
Usage: rgb
-RGBA String
+# RGBA String
This validates that a string value contains a valid rgba color
Usage: rgba
-HSL String
+# HSL String
This validates that a string value contains a valid hsl color
Usage: hsl
-HSLA String
+# HSLA String
This validates that a string value contains a valid hsla color
Usage: hsla
-E-mail String
+# E.164 Phone Number String
+
+This validates that a string value contains a valid E.164 Phone number
+https://en.wikipedia.org/wiki/E.164 (ex. +1123456789)
+
+ Usage: e164
+
+# E-mail String
This validates that a string value contains a valid email
This may not conform to all possibilities of any rfc standard, but neither
@@ -685,13 +858,19 @@ does any email provider accept all possibilities.
Usage: email
-JSON String
+# JSON String
This validates that a string value is valid JSON
Usage: json
-File path
+# JWT String
+
+This validates that a string value is a valid JWT
+
+ Usage: jwt
+
+# File
This validates that a string value contains a valid file path and that
the file exists on the machine.
@@ -699,7 +878,23 @@ This is done using os.Stat, which is a platform independent function.
Usage: file
-URL String
+# Image path
+
+This validates that a string value contains a valid file path and that
+the file exists on the machine and is an image.
+This is done using os.Stat and github.com/gabriel-vasile/mimetype
+
+ Usage: image
+
+# File Path
+
+This validates that a string value contains a valid file path but does not
+validate the existence of that file.
+This is done using os.Stat, which is a platform independent function.
+
+ Usage: filepath
+
+# URL String
This validates that a string value contains a valid url
This will accept any url the golang request uri accepts but must contain
@@ -707,21 +902,30 @@ a schema for example http:// or rtmp://
Usage: url
-URI String
+# URI String
This validates that a string value contains a valid uri
This will accept any uri the golang request uri accepts
Usage: uri
-Urn RFC 2141 String
+# Urn RFC 2141 String
-This validataes that a string value contains a valid URN
+This validates that a string value contains a valid URN
according to the RFC 2141 spec.
Usage: urn_rfc2141
-Base64 String
+# Base32 String
+
+This validates that a string value contains a valid bas324 value.
+Although an empty string is valid base32 this will report an empty string
+as an error, if you wish to accept an empty string as valid you can use
+this with the omitempty tag.
+
+ Usage: base32
+
+# Base64 String
This validates that a string value contains a valid base64 value.
Although an empty string is valid base64 this will report an empty string
@@ -730,17 +934,27 @@ this with the omitempty tag.
Usage: base64
-Base64URL String
+# Base64URL String
This validates that a string value contains a valid base64 URL safe value
-according the the RFC4648 spec.
+according the RFC4648 spec.
Although an empty string is a valid base64 URL safe value, this will report
an empty string as an error, if you wish to accept an empty string as valid
you can use this with the omitempty tag.
Usage: base64url
-Bitcoin Address
+# Base64RawURL String
+
+This validates that a string value contains a valid base64 URL safe value,
+but without = padding, according the RFC4648 spec, section 3.2.
+Although an empty string is a valid base64 URL safe value, this will report
+an empty string as an error, if you wish to accept an empty string as valid
+you can use this with the omitempty tag.
+
+ Usage: base64url
+
+# Bitcoin Address
This validates that a string value contains a valid bitcoin address.
The format of the string is checked to ensure it matches one of the three formats
@@ -752,253 +966,270 @@ Bitcoin Bech32 Address (segwit)
This validates that a string value contains a valid bitcoin Bech32 address as defined
by bip-0173 (https://github.com/bitcoin/bips/blob/master/bip-0173.mediawiki)
-Special thanks to Pieter Wuille for providng reference implementations.
+Special thanks to Pieter Wuille for providing reference implementations.
Usage: btc_addr_bech32
-Ethereum Address
+# Ethereum Address
This validates that a string value contains a valid ethereum address.
-The format of the string is checked to ensure it matches the standard Ethereum address format
-Full validation is blocked by https://github.com/golang/crypto/pull/28
+The format of the string is checked to ensure it matches the standard Ethereum address format.
Usage: eth_addr
-Contains
+# Contains
This validates that a string value contains the substring value.
Usage: contains=@
-Contains Any
+# Contains Any
This validates that a string value contains any Unicode code points
in the substring value.
Usage: containsany=!@#?
-Contains Rune
+# Contains Rune
This validates that a string value contains the supplied rune value.
Usage: containsrune=@
-Excludes
+# Excludes
This validates that a string value does not contain the substring value.
Usage: excludes=@
-Excludes All
+# Excludes All
This validates that a string value does not contain any Unicode code
points in the substring value.
Usage: excludesall=!@#?
-Excludes Rune
+# Excludes Rune
This validates that a string value does not contain the supplied rune value.
Usage: excludesrune=@
-Starts With
+# Starts With
This validates that a string value starts with the supplied string value
Usage: startswith=hello
-Ends With
+# Ends With
This validates that a string value ends with the supplied string value
Usage: endswith=goodbye
-International Standard Book Number
+# Does Not Start With
+
+This validates that a string value does not start with the supplied string value
+
+ Usage: startsnotwith=hello
+
+# Does Not End With
+
+This validates that a string value does not end with the supplied string value
+
+ Usage: endsnotwith=goodbye
+
+# International Standard Book Number
This validates that a string value contains a valid isbn10 or isbn13 value.
Usage: isbn
-International Standard Book Number 10
+# International Standard Book Number 10
This validates that a string value contains a valid isbn10 value.
Usage: isbn10
-International Standard Book Number 13
+# International Standard Book Number 13
This validates that a string value contains a valid isbn13 value.
Usage: isbn13
-Universally Unique Identifier UUID
+# Universally Unique Identifier UUID
This validates that a string value contains a valid UUID. Uppercase UUID values will not pass - use `uuid_rfc4122` instead.
Usage: uuid
-Universally Unique Identifier UUID v3
+# Universally Unique Identifier UUID v3
This validates that a string value contains a valid version 3 UUID. Uppercase UUID values will not pass - use `uuid3_rfc4122` instead.
Usage: uuid3
-Universally Unique Identifier UUID v4
+# Universally Unique Identifier UUID v4
This validates that a string value contains a valid version 4 UUID. Uppercase UUID values will not pass - use `uuid4_rfc4122` instead.
Usage: uuid4
-Universally Unique Identifier UUID v5
+# Universally Unique Identifier UUID v5
This validates that a string value contains a valid version 5 UUID. Uppercase UUID values will not pass - use `uuid5_rfc4122` instead.
Usage: uuid5
-ASCII
+# Universally Unique Lexicographically Sortable Identifier ULID
+
+This validates that a string value contains a valid ULID value.
+
+ Usage: ulid
+
+# ASCII
This validates that a string value contains only ASCII characters.
NOTE: if the string is blank, this validates as true.
Usage: ascii
-Printable ASCII
+# Printable ASCII
This validates that a string value contains only printable ASCII characters.
NOTE: if the string is blank, this validates as true.
Usage: printascii
-Multi-Byte Characters
+# Multi-Byte Characters
This validates that a string value contains one or more multibyte characters.
NOTE: if the string is blank, this validates as true.
Usage: multibyte
-Data URL
+# Data URL
This validates that a string value contains a valid DataURI.
NOTE: this will also validate that the data portion is valid base64
Usage: datauri
-Latitude
+# Latitude
This validates that a string value contains a valid latitude.
Usage: latitude
-Longitude
+# Longitude
This validates that a string value contains a valid longitude.
Usage: longitude
-Social Security Number SSN
+# Social Security Number SSN
This validates that a string value contains a valid U.S. Social Security Number.
Usage: ssn
-Internet Protocol Address IP
+# Internet Protocol Address IP
This validates that a string value contains a valid IP Address.
Usage: ip
-Internet Protocol Address IPv4
+# Internet Protocol Address IPv4
This validates that a string value contains a valid v4 IP Address.
Usage: ipv4
-Internet Protocol Address IPv6
+# Internet Protocol Address IPv6
This validates that a string value contains a valid v6 IP Address.
Usage: ipv6
-Classless Inter-Domain Routing CIDR
+# Classless Inter-Domain Routing CIDR
This validates that a string value contains a valid CIDR Address.
Usage: cidr
-Classless Inter-Domain Routing CIDRv4
+# Classless Inter-Domain Routing CIDRv4
This validates that a string value contains a valid v4 CIDR Address.
Usage: cidrv4
-Classless Inter-Domain Routing CIDRv6
+# Classless Inter-Domain Routing CIDRv6
This validates that a string value contains a valid v6 CIDR Address.
Usage: cidrv6
-Transmission Control Protocol Address TCP
+# Transmission Control Protocol Address TCP
This validates that a string value contains a valid resolvable TCP Address.
Usage: tcp_addr
-Transmission Control Protocol Address TCPv4
+# Transmission Control Protocol Address TCPv4
This validates that a string value contains a valid resolvable v4 TCP Address.
Usage: tcp4_addr
-Transmission Control Protocol Address TCPv6
+# Transmission Control Protocol Address TCPv6
This validates that a string value contains a valid resolvable v6 TCP Address.
Usage: tcp6_addr
-User Datagram Protocol Address UDP
+# User Datagram Protocol Address UDP
This validates that a string value contains a valid resolvable UDP Address.
Usage: udp_addr
-User Datagram Protocol Address UDPv4
+# User Datagram Protocol Address UDPv4
This validates that a string value contains a valid resolvable v4 UDP Address.
Usage: udp4_addr
-User Datagram Protocol Address UDPv6
+# User Datagram Protocol Address UDPv6
This validates that a string value contains a valid resolvable v6 UDP Address.
Usage: udp6_addr
-Internet Protocol Address IP
+# Internet Protocol Address IP
This validates that a string value contains a valid resolvable IP Address.
Usage: ip_addr
-Internet Protocol Address IPv4
+# Internet Protocol Address IPv4
This validates that a string value contains a valid resolvable v4 IP Address.
Usage: ip4_addr
-Internet Protocol Address IPv6
+# Internet Protocol Address IPv6
This validates that a string value contains a valid resolvable v6 IP Address.
Usage: ip6_addr
-Unix domain socket end point Address
+# Unix domain socket end point Address
This validates that a string value contains a valid Unix Address.
Usage: unix_addr
-Media Access Control Address MAC
+# Media Access Control Address MAC
This validates that a string value contains a valid MAC Address.
@@ -1008,13 +1239,13 @@ Note: See Go's ParseMAC for accepted formats and types:
http://golang.org/src/net/mac.go?s=866:918#L29
-Hostname RFC 952
+# Hostname RFC 952
This validates that a string value is a valid Hostname according to RFC 952 https://tools.ietf.org/html/rfc952
Usage: hostname
-Hostname RFC 1123
+# Hostname RFC 1123
This validates that a string value is a valid Hostname according to RFC 1123 https://tools.ietf.org/html/rfc1123
@@ -1026,28 +1257,28 @@ This validates that a string value contains a valid FQDN.
Usage: fqdn
-HTML Tags
+# HTML Tags
This validates that a string value appears to be an HTML element tag
including those described at https://developer.mozilla.org/en-US/docs/Web/HTML/Element
Usage: html
-HTML Encoded
+# HTML Encoded
This validates that a string value is a proper character reference in decimal
or hexadecimal format
Usage: html_encoded
-URL Encoded
+# URL Encoded
This validates that a string value is percent-encoded (URL encoded) according
to https://tools.ietf.org/html/rfc3986#section-2.1
Usage: url_encoded
-Directory
+# Directory
This validates that a string value contains a valid directory and that
it exists on the machine.
@@ -1055,22 +1286,135 @@ This is done using os.Stat, which is a platform independent function.
Usage: dir
-HostPort
+# Directory Path
+
+This validates that a string value contains a valid directory but does
+not validate the existence of that directory.
+This is done using os.Stat, which is a platform independent function.
+It is safest to suffix the string with os.PathSeparator if the directory
+may not exist at the time of validation.
+
+ Usage: dirpath
+
+# HostPort
This validates that a string value contains a valid DNS hostname and port that
-can be used to valiate fields typically passed to sockets and connections.
+can be used to validate fields typically passed to sockets and connections.
Usage: hostname_port
-Datetime
+# Datetime
This validates that a string value is a valid datetime based on the supplied datetime format.
Supplied format must match the official Go time format layout as documented in https://golang.org/pkg/time/
Usage: datetime=2006-01-02
-Alias Validators and Tags
+# Iso3166-1 alpha-2
+
+This validates that a string value is a valid country code based on iso3166-1 alpha-2 standard.
+see: https://www.iso.org/iso-3166-country-codes.html
+
+ Usage: iso3166_1_alpha2
+
+# Iso3166-1 alpha-3
+
+This validates that a string value is a valid country code based on iso3166-1 alpha-3 standard.
+see: https://www.iso.org/iso-3166-country-codes.html
+
+ Usage: iso3166_1_alpha3
+# Iso3166-1 alpha-numeric
+
+This validates that a string value is a valid country code based on iso3166-1 alpha-numeric standard.
+see: https://www.iso.org/iso-3166-country-codes.html
+
+ Usage: iso3166_1_alpha3
+
+# BCP 47 Language Tag
+
+This validates that a string value is a valid BCP 47 language tag, as parsed by language.Parse.
+More information on https://pkg.go.dev/golang.org/x/text/language
+
+ Usage: bcp47_language_tag
+
+BIC (SWIFT code)
+
+This validates that a string value is a valid Business Identifier Code (SWIFT code), defined in ISO 9362.
+More information on https://www.iso.org/standard/60390.html
+
+ Usage: bic
+
+# RFC 1035 label
+
+This validates that a string value is a valid dns RFC 1035 label, defined in RFC 1035.
+More information on https://datatracker.ietf.org/doc/html/rfc1035
+
+ Usage: dns_rfc1035_label
+
+# TimeZone
+
+This validates that a string value is a valid time zone based on the time zone database present on the system.
+Although empty value and Local value are allowed by time.LoadLocation golang function, they are not allowed by this validator.
+More information on https://golang.org/pkg/time/#LoadLocation
+
+ Usage: timezone
+
+# Semantic Version
+
+This validates that a string value is a valid semver version, defined in Semantic Versioning 2.0.0.
+More information on https://semver.org/
+
+ Usage: semver
+
+# CVE Identifier
+
+This validates that a string value is a valid cve id, defined in cve mitre.
+More information on https://cve.mitre.org/
+
+ Usage: cve
+
+# Credit Card
+
+This validates that a string value contains a valid credit card number using Luhn algorithm.
+
+ Usage: credit_card
+
+# Luhn Checksum
+
+ Usage: luhn_checksum
+
+This validates that a string or (u)int value contains a valid checksum using the Luhn algorithm.
+
+# MongoDB
+
+This validates that a string is a valid 24 character hexadecimal string or valid connection string.
+
+ Usage: mongodb
+ mongodb_connection_string
+
+Example:
+
+ type Test struct {
+ ObjectIdField string `validate:"mongodb"`
+ ConnectionStringField string `validate:"mongodb_connection_string"`
+ }
+
+# Cron
+
+This validates that a string value contains a valid cron expression.
+
+ Usage: cron
+
+# SpiceDb ObjectID/Permission/Object Type
+
+This validates that a string is valid for use with SpiceDb for the indicated purpose. If no purpose is given, a purpose of 'id' is assumed.
+
+ Usage: spicedb=id|permission|type
+
+# Alias Validators and Tags
+
+Alias Validators and Tags
NOTE: When returning an error, the tag returned in "FieldError" will be
the alias tag unless the dive tag is part of the alias. Everything after the
dive tag is not reported as the alias tag. Also, the "ActualTag" in the before
@@ -1080,6 +1424,8 @@ Here is a list of the current built in alias tags:
"iscolor"
alias is "hexcolor|rgb|rgba|hsl|hsla" (Usage: iscolor)
+ "country_code"
+ alias is "iso3166_1_alpha2|iso3166_1_alpha3|iso3166_1_alpha_numeric" (Usage: country_code)
Validator notes:
@@ -1098,7 +1444,7 @@ Validator notes:
And the best reason, you can submit a pull request and we can keep on
adding to the validation library of this package!
-Non standard validators
+# Non standard validators
A collection of validation rules that are frequently needed but are more
complex than the ones found in the baked in validators.
@@ -1127,7 +1473,7 @@ Here is a list of the current non standard validators:
Usage: notblank
-Panics
+# Panics
This package panics when bad input is provided, this is by design, bad code like
that should not make it to production.
diff --git a/integration/assets/hydrabroker/vendor/github.com/go-playground/validator/v10/errors.go b/integration/assets/hydrabroker/vendor/github.com/go-playground/validator/v10/errors.go
index 46c24c9b283..be2676e9e15 100644
--- a/integration/assets/hydrabroker/vendor/github.com/go-playground/validator/v10/errors.go
+++ b/integration/assets/hydrabroker/vendor/github.com/go-playground/validator/v10/errors.go
@@ -44,12 +44,9 @@ func (ve ValidationErrors) Error() string {
buff := bytes.NewBufferString("")
- var fe *fieldError
-
for i := 0; i < len(ve); i++ {
- fe = ve[i].(*fieldError)
- buff.WriteString(fe.Error())
+ buff.WriteString(ve[i].Error())
buff.WriteString("\n")
}
@@ -82,7 +79,7 @@ func (ve ValidationErrors) Translate(ut ut.Translator) ValidationErrorsTranslati
// FieldError contains all functions to get error details
type FieldError interface {
- // returns the validation tag that failed. if the
+ // Tag returns the validation tag that failed. if the
// validation was an alias, this will return the
// alias name and not the underlying tag that failed.
//
@@ -90,7 +87,7 @@ type FieldError interface {
// will return "iscolor"
Tag() string
- // returns the validation tag that failed, even if an
+ // ActualTag returns the validation tag that failed, even if an
// alias the actual tag within the alias will be returned.
// If an 'or' validation fails the entire or will be returned.
//
@@ -98,8 +95,8 @@ type FieldError interface {
// will return "hexcolor|rgb|rgba|hsl|hsla"
ActualTag() string
- // returns the namespace for the field error, with the tag
- // name taking precedence over the fields actual name.
+ // Namespace returns the namespace for the field error, with the tag
+ // name taking precedence over the field's actual name.
//
// eg. JSON name "User.fname"
//
@@ -109,33 +106,33 @@ type FieldError interface {
// using validate.Field(...) as there is no way to extract it's name
Namespace() string
- // returns the namespace for the field error, with the fields
+ // StructNamespace returns the namespace for the field error, with the field's
// actual name.
//
// eq. "User.FirstName" see Namespace for comparison
//
// NOTE: this field can be blank when validating a single primitive field
- // using validate.Field(...) as there is no way to extract it's name
+ // using validate.Field(...) as there is no way to extract its name
StructNamespace() string
- // returns the fields name with the tag name taking precedence over the
- // fields actual name.
+ // Field returns the fields name with the tag name taking precedence over the
+ // field's actual name.
//
// eq. JSON name "fname"
// see StructField for comparison
Field() string
- // returns the fields actual name from the struct, when able to determine.
+ // StructField returns the field's actual name from the struct, when able to determine.
//
// eq. "FirstName"
// see Field for comparison
StructField() string
- // returns the actual fields value in case needed for creating the error
+ // Value returns the actual field's value in case needed for creating the error
// message
Value() interface{}
- // returns the param value, in string form for comparison; this will also
+ // Param returns the param value, in string form for comparison; this will also
// help with generating an error message
Param() string
@@ -146,15 +143,18 @@ type FieldError interface {
// Type returns the Field's reflect Type
//
- // // eg. time.Time's type is time.Time
+ // eg. time.Time's type is time.Time
Type() reflect.Type
- // returns the FieldError's translated error
+ // Translate returns the FieldError's translated error
// from the provided 'ut.Translator' and registered 'TranslationFunc'
//
// NOTE: if no registered translator can be found it returns the same as
// calling fe.Error()
Translate(ut ut.Translator) string
+
+ // Error returns the FieldError's message
+ Error() string
}
// compile time interface checks
@@ -190,19 +190,19 @@ func (fe *fieldError) ActualTag() string {
}
// Namespace returns the namespace for the field error, with the tag
-// name taking precedence over the fields actual name.
+// name taking precedence over the field's actual name.
func (fe *fieldError) Namespace() string {
return fe.ns
}
-// StructNamespace returns the namespace for the field error, with the fields
+// StructNamespace returns the namespace for the field error, with the field's
// actual name.
func (fe *fieldError) StructNamespace() string {
return fe.structNs
}
-// Field returns the fields name with the tag name taking precedence over the
-// fields actual name.
+// Field returns the field's name with the tag name taking precedence over the
+// field's actual name.
func (fe *fieldError) Field() string {
return fe.ns[len(fe.ns)-int(fe.fieldLen):]
@@ -218,13 +218,13 @@ func (fe *fieldError) Field() string {
// return fld
}
-// returns the fields actual name from the struct, when able to determine.
+// StructField returns the field's actual name from the struct, when able to determine.
func (fe *fieldError) StructField() string {
// return fe.structField
return fe.structNs[len(fe.structNs)-int(fe.structfieldLen):]
}
-// Value returns the actual fields value in case needed for creating the error
+// Value returns the actual field's value in case needed for creating the error
// message
func (fe *fieldError) Value() interface{} {
return fe.value
@@ -254,18 +254,22 @@ func (fe *fieldError) Error() string {
// Translate returns the FieldError's translated error
// from the provided 'ut.Translator' and registered 'TranslationFunc'
//
-// NOTE: is not registered translation can be found it returns the same
-// as calling fe.Error()
+// NOTE: if no registered translation can be found, it returns the original
+// untranslated error message.
func (fe *fieldError) Translate(ut ut.Translator) string {
+ var fn TranslationFunc
m, ok := fe.v.transTagFunc[ut]
if !ok {
return fe.Error()
}
- fn, ok := m[fe.tag]
+ fn, ok = m[fe.tag]
if !ok {
- return fe.Error()
+ fn, ok = m[fe.actualTag]
+ if !ok {
+ return fe.Error()
+ }
}
return fn(ut, fe)
diff --git a/integration/assets/hydrabroker/vendor/github.com/go-playground/validator/v10/field_level.go b/integration/assets/hydrabroker/vendor/github.com/go-playground/validator/v10/field_level.go
index f0e2a9a855e..ef35826ee6f 100644
--- a/integration/assets/hydrabroker/vendor/github.com/go-playground/validator/v10/field_level.go
+++ b/integration/assets/hydrabroker/vendor/github.com/go-playground/validator/v10/field_level.go
@@ -5,24 +5,25 @@ import "reflect"
// FieldLevel contains all the information and helper functions
// to validate a field
type FieldLevel interface {
- // returns the top level struct, if any
+
+ // Top returns the top level struct, if any
Top() reflect.Value
- // returns the current fields parent struct, if any or
+ // Parent returns the current fields parent struct, if any or
// the comparison value if called 'VarWithValue'
Parent() reflect.Value
- // returns current field for validation
+ // Field returns current field for validation
Field() reflect.Value
- // returns the field's name with the tag
+ // FieldName returns the field's name with the tag
// name taking precedence over the fields actual name.
FieldName() string
- // returns the struct field's name
+ // StructFieldName returns the struct field's name
StructFieldName() string
- // returns param for validation against current field
+ // Param returns param for validation against current field
Param() string
// GetTag returns the current validations tag name
@@ -33,7 +34,7 @@ type FieldLevel interface {
// underlying value and it's kind.
ExtractType(field reflect.Value) (value reflect.Value, kind reflect.Kind, nullable bool)
- // traverses the parent struct to retrieve a specific field denoted by the provided namespace
+ // GetStructFieldOK traverses the parent struct to retrieve a specific field denoted by the provided namespace
// in the param and returns the field, field kind and whether is was successful in retrieving
// the field at all.
//
@@ -49,7 +50,7 @@ type FieldLevel interface {
// Deprecated: Use GetStructFieldOKAdvanced2() instead which also return if the value is nullable.
GetStructFieldOKAdvanced(val reflect.Value, namespace string) (reflect.Value, reflect.Kind, bool)
- // traverses the parent struct to retrieve a specific field denoted by the provided namespace
+ // GetStructFieldOK2 traverses the parent struct to retrieve a specific field denoted by the provided namespace
// in the param and returns the field, field kind, if it's a nullable type and whether is was successful in retrieving
// the field at all.
//
@@ -57,7 +58,7 @@ type FieldLevel interface {
// could not be retrieved because it didn't exist.
GetStructFieldOK2() (reflect.Value, reflect.Kind, bool, bool)
- // GetStructFieldOKAdvanced is the same as GetStructFieldOK except that it accepts the parent struct to start looking for
+ // GetStructFieldOKAdvanced2 is the same as GetStructFieldOK except that it accepts the parent struct to start looking for
// the field and namespace allowing more extensibility for validators.
GetStructFieldOKAdvanced2(val reflect.Value, namespace string) (reflect.Value, reflect.Kind, bool, bool)
}
@@ -107,12 +108,12 @@ func (v *validate) GetStructFieldOKAdvanced(val reflect.Value, namespace string)
return current, kind, found
}
-// GetStructFieldOK returns Param returns param for validation against current field
+// GetStructFieldOK2 returns Param returns param for validation against current field
func (v *validate) GetStructFieldOK2() (reflect.Value, reflect.Kind, bool, bool) {
return v.getStructFieldOKInternal(v.slflParent, v.ct.param)
}
-// GetStructFieldOKAdvanced is the same as GetStructFieldOK except that it accepts the parent struct to start looking for
+// GetStructFieldOKAdvanced2 is the same as GetStructFieldOK except that it accepts the parent struct to start looking for
// the field and namespace allowing more extensibility for validators.
func (v *validate) GetStructFieldOKAdvanced2(val reflect.Value, namespace string) (reflect.Value, reflect.Kind, bool, bool) {
return v.getStructFieldOKInternal(val, namespace)
diff --git a/integration/assets/hydrabroker/vendor/github.com/go-playground/validator/v10/go.mod b/integration/assets/hydrabroker/vendor/github.com/go-playground/validator/v10/go.mod
index 21316b3abc7..c77643fc598 100644
--- a/integration/assets/hydrabroker/vendor/github.com/go-playground/validator/v10/go.mod
+++ b/integration/assets/hydrabroker/vendor/github.com/go-playground/validator/v10/go.mod
@@ -1,10 +1,18 @@
module github.com/go-playground/validator/v10
-go 1.13
+go 1.18
require (
- github.com/go-playground/assert/v2 v2.0.1
- github.com/go-playground/locales v0.13.0
- github.com/go-playground/universal-translator v0.17.0
- github.com/leodido/go-urn v1.2.0
+ github.com/gabriel-vasile/mimetype v1.4.3
+ github.com/go-playground/assert/v2 v2.2.0
+ github.com/go-playground/locales v0.14.1
+ github.com/go-playground/universal-translator v0.18.1
+ github.com/leodido/go-urn v1.4.0
+ golang.org/x/crypto v0.19.0
+ golang.org/x/text v0.14.0
+)
+
+require (
+ golang.org/x/net v0.21.0 // indirect
+ golang.org/x/sys v0.17.0 // indirect
)
diff --git a/integration/assets/hydrabroker/vendor/github.com/go-playground/validator/v10/go.sum b/integration/assets/hydrabroker/vendor/github.com/go-playground/validator/v10/go.sum
index dc7dec4dee6..3bf8a6e6372 100644
--- a/integration/assets/hydrabroker/vendor/github.com/go-playground/validator/v10/go.sum
+++ b/integration/assets/hydrabroker/vendor/github.com/go-playground/validator/v10/go.sum
@@ -1,21 +1,22 @@
-github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=
-github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
-github.com/go-playground/assert/v2 v2.0.1 h1:MsBgLAaY856+nPRTKrp3/OZK38U/wa0CcBYNjji3q3A=
-github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4=
-github.com/go-playground/locales v0.13.0 h1:HyWk6mgj5qFqCT5fjGBuRArbVDfE4hi8+e8ceBS/t7Q=
-github.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTMfnGwq/HNwmWNS8=
-github.com/go-playground/universal-translator v0.17.0 h1:icxd5fm+REJzpZx7ZfpaD876Lmtgy7VtROAbHHXk8no=
-github.com/go-playground/universal-translator v0.17.0/go.mod h1:UkSxE5sNxxRwHyU+Scu5vgOQjsIJAF8j9muTVoKLVtA=
-github.com/leodido/go-urn v1.2.0 h1:hpXL4XnriNwQ/ABnpepYM/1vCLWNDfUNts8dX3xTG6Y=
-github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII=
+github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
+github.com/gabriel-vasile/mimetype v1.4.3 h1:in2uUcidCuFcDKtdcBxlR0rJ1+fsokWf+uqxgUFjbI0=
+github.com/gabriel-vasile/mimetype v1.4.3/go.mod h1:d8uq/6HKRL6CGdk+aubisF/M5GcPfT7nKyLpA0lbSSk=
+github.com/go-playground/assert/v2 v2.2.0 h1:JvknZsQTYeFEAhQwI4qEt9cyV5ONwRHC+lYKSsYSR8s=
+github.com/go-playground/assert/v2 v2.2.0/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4=
+github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA=
+github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY=
+github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY=
+github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY=
+github.com/leodido/go-urn v1.4.0 h1:WT9HwE9SGECu3lg4d/dIA+jxlljEa1/ffXKmRjqdmIQ=
+github.com/leodido/go-urn v1.4.0/go.mod h1:bvxc+MVxLKB4z00jd1z+Dvzr47oO32F/QSNjSBOlFxI=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
-github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
-github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
-github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk=
-github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
-golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
-golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
-gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
-gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=
-gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=
+golang.org/x/crypto v0.19.0 h1:ENy+Az/9Y1vSrlrvBSyna3PITt4tiZLf7sgCjZBX7Wo=
+golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU=
+golang.org/x/net v0.21.0 h1:AQyQV4dYCvJ7vGmJyKki9+PBdyvhkSd8EIx/qb0AYv4=
+golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44=
+golang.org/x/sys v0.17.0 h1:25cE3gD+tdBA7lp7QfhuV+rJiE9YXTcS3VG1SqssI/Y=
+golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
+golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
+gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
diff --git a/integration/assets/hydrabroker/vendor/github.com/go-playground/validator/v10/options.go b/integration/assets/hydrabroker/vendor/github.com/go-playground/validator/v10/options.go
new file mode 100644
index 00000000000..86a0db218e1
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/go-playground/validator/v10/options.go
@@ -0,0 +1,26 @@
+package validator
+
+// Option represents a configurations option to be applied to validator during initialization.
+type Option func(*Validate)
+
+// WithRequiredStructEnabled enables required tag on non-pointer structs to be applied instead of ignored.
+//
+// This was made opt-in behaviour in order to maintain backward compatibility with the behaviour previous
+// to being able to apply struct level validations on struct fields directly.
+//
+// It is recommended you enabled this as it will be the default behaviour in v11+
+func WithRequiredStructEnabled() Option {
+ return func(v *Validate) {
+ v.requiredStructEnabled = true
+ }
+}
+
+// WithPrivateFieldValidation activates validation for unexported fields via the use of the `unsafe` package.
+//
+// By opting into this feature you are acknowledging that you are aware of the risks and accept any current or future
+// consequences of using this feature.
+func WithPrivateFieldValidation() Option {
+ return func(v *Validate) {
+ v.privateFieldValidation = true
+ }
+}
diff --git a/integration/assets/hydrabroker/vendor/github.com/go-playground/validator/v10/postcode_regexes.go b/integration/assets/hydrabroker/vendor/github.com/go-playground/validator/v10/postcode_regexes.go
new file mode 100644
index 00000000000..326b8f7538f
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/go-playground/validator/v10/postcode_regexes.go
@@ -0,0 +1,179 @@
+package validator
+
+import (
+ "regexp"
+ "sync"
+)
+
+var postCodePatternDict = map[string]string{
+ "GB": `^GIR[ ]?0AA|((AB|AL|B|BA|BB|BD|BH|BL|BN|BR|BS|BT|CA|CB|CF|CH|CM|CO|CR|CT|CV|CW|DA|DD|DE|DG|DH|DL|DN|DT|DY|E|EC|EH|EN|EX|FK|FY|G|GL|GY|GU|HA|HD|HG|HP|HR|HS|HU|HX|IG|IM|IP|IV|JE|KA|KT|KW|KY|L|LA|LD|LE|LL|LN|LS|LU|M|ME|MK|ML|N|NE|NG|NN|NP|NR|NW|OL|OX|PA|PE|PH|PL|PO|PR|RG|RH|RM|S|SA|SE|SG|SK|SL|SM|SN|SO|SP|SR|SS|ST|SW|SY|TA|TD|TF|TN|TQ|TR|TS|TW|UB|W|WA|WC|WD|WF|WN|WR|WS|WV|YO|ZE)(\d[\dA-Z]?[ ]?\d[ABD-HJLN-UW-Z]{2}))|BFPO[ ]?\d{1,4}$`,
+ "JE": `^JE\d[\dA-Z]?[ ]?\d[ABD-HJLN-UW-Z]{2}$`,
+ "GG": `^GY\d[\dA-Z]?[ ]?\d[ABD-HJLN-UW-Z]{2}$`,
+ "IM": `^IM\d[\dA-Z]?[ ]?\d[ABD-HJLN-UW-Z]{2}$`,
+ "US": `^\d{5}([ \-]\d{4})?$`,
+ "CA": `^[ABCEGHJKLMNPRSTVXY]\d[ABCEGHJ-NPRSTV-Z][ ]?\d[ABCEGHJ-NPRSTV-Z]\d$`,
+ "DE": `^\d{5}$`,
+ "JP": `^\d{3}-\d{4}$`,
+ "FR": `^\d{2}[ ]?\d{3}$`,
+ "AU": `^\d{4}$`,
+ "IT": `^\d{5}$`,
+ "CH": `^\d{4}$`,
+ "AT": `^\d{4}$`,
+ "ES": `^\d{5}$`,
+ "NL": `^\d{4}[ ]?[A-Z]{2}$`,
+ "BE": `^\d{4}$`,
+ "DK": `^\d{4}$`,
+ "SE": `^\d{3}[ ]?\d{2}$`,
+ "NO": `^\d{4}$`,
+ "BR": `^\d{5}[\-]?\d{3}$`,
+ "PT": `^\d{4}([\-]\d{3})?$`,
+ "FI": `^\d{5}$`,
+ "AX": `^22\d{3}$`,
+ "KR": `^\d{3}[\-]\d{3}$`,
+ "CN": `^\d{6}$`,
+ "TW": `^\d{3}(\d{2})?$`,
+ "SG": `^\d{6}$`,
+ "DZ": `^\d{5}$`,
+ "AD": `^AD\d{3}$`,
+ "AR": `^([A-HJ-NP-Z])?\d{4}([A-Z]{3})?$`,
+ "AM": `^(37)?\d{4}$`,
+ "AZ": `^\d{4}$`,
+ "BH": `^((1[0-2]|[2-9])\d{2})?$`,
+ "BD": `^\d{4}$`,
+ "BB": `^(BB\d{5})?$`,
+ "BY": `^\d{6}$`,
+ "BM": `^[A-Z]{2}[ ]?[A-Z0-9]{2}$`,
+ "BA": `^\d{5}$`,
+ "IO": `^BBND 1ZZ$`,
+ "BN": `^[A-Z]{2}[ ]?\d{4}$`,
+ "BG": `^\d{4}$`,
+ "KH": `^\d{5}$`,
+ "CV": `^\d{4}$`,
+ "CL": `^\d{7}$`,
+ "CR": `^\d{4,5}|\d{3}-\d{4}$`,
+ "HR": `^\d{5}$`,
+ "CY": `^\d{4}$`,
+ "CZ": `^\d{3}[ ]?\d{2}$`,
+ "DO": `^\d{5}$`,
+ "EC": `^([A-Z]\d{4}[A-Z]|(?:[A-Z]{2})?\d{6})?$`,
+ "EG": `^\d{5}$`,
+ "EE": `^\d{5}$`,
+ "FO": `^\d{3}$`,
+ "GE": `^\d{4}$`,
+ "GR": `^\d{3}[ ]?\d{2}$`,
+ "GL": `^39\d{2}$`,
+ "GT": `^\d{5}$`,
+ "HT": `^\d{4}$`,
+ "HN": `^(?:\d{5})?$`,
+ "HU": `^\d{4}$`,
+ "IS": `^\d{3}$`,
+ "IN": `^\d{6}$`,
+ "ID": `^\d{5}$`,
+ "IL": `^\d{5}$`,
+ "JO": `^\d{5}$`,
+ "KZ": `^\d{6}$`,
+ "KE": `^\d{5}$`,
+ "KW": `^\d{5}$`,
+ "LA": `^\d{5}$`,
+ "LV": `^\d{4}$`,
+ "LB": `^(\d{4}([ ]?\d{4})?)?$`,
+ "LI": `^(948[5-9])|(949[0-7])$`,
+ "LT": `^\d{5}$`,
+ "LU": `^\d{4}$`,
+ "MK": `^\d{4}$`,
+ "MY": `^\d{5}$`,
+ "MV": `^\d{5}$`,
+ "MT": `^[A-Z]{3}[ ]?\d{2,4}$`,
+ "MU": `^(\d{3}[A-Z]{2}\d{3})?$`,
+ "MX": `^\d{5}$`,
+ "MD": `^\d{4}$`,
+ "MC": `^980\d{2}$`,
+ "MA": `^\d{5}$`,
+ "NP": `^\d{5}$`,
+ "NZ": `^\d{4}$`,
+ "NI": `^((\d{4}-)?\d{3}-\d{3}(-\d{1})?)?$`,
+ "NG": `^(\d{6})?$`,
+ "OM": `^(PC )?\d{3}$`,
+ "PK": `^\d{5}$`,
+ "PY": `^\d{4}$`,
+ "PH": `^\d{4}$`,
+ "PL": `^\d{2}-\d{3}$`,
+ "PR": `^00[679]\d{2}([ \-]\d{4})?$`,
+ "RO": `^\d{6}$`,
+ "RU": `^\d{6}$`,
+ "SM": `^4789\d$`,
+ "SA": `^\d{5}$`,
+ "SN": `^\d{5}$`,
+ "SK": `^\d{3}[ ]?\d{2}$`,
+ "SI": `^\d{4}$`,
+ "ZA": `^\d{4}$`,
+ "LK": `^\d{5}$`,
+ "TJ": `^\d{6}$`,
+ "TH": `^\d{5}$`,
+ "TN": `^\d{4}$`,
+ "TR": `^\d{5}$`,
+ "TM": `^\d{6}$`,
+ "UA": `^\d{5}$`,
+ "UY": `^\d{5}$`,
+ "UZ": `^\d{6}$`,
+ "VA": `^00120$`,
+ "VE": `^\d{4}$`,
+ "ZM": `^\d{5}$`,
+ "AS": `^96799$`,
+ "CC": `^6799$`,
+ "CK": `^\d{4}$`,
+ "RS": `^\d{6}$`,
+ "ME": `^8\d{4}$`,
+ "CS": `^\d{5}$`,
+ "YU": `^\d{5}$`,
+ "CX": `^6798$`,
+ "ET": `^\d{4}$`,
+ "FK": `^FIQQ 1ZZ$`,
+ "NF": `^2899$`,
+ "FM": `^(9694[1-4])([ \-]\d{4})?$`,
+ "GF": `^9[78]3\d{2}$`,
+ "GN": `^\d{3}$`,
+ "GP": `^9[78][01]\d{2}$`,
+ "GS": `^SIQQ 1ZZ$`,
+ "GU": `^969[123]\d([ \-]\d{4})?$`,
+ "GW": `^\d{4}$`,
+ "HM": `^\d{4}$`,
+ "IQ": `^\d{5}$`,
+ "KG": `^\d{6}$`,
+ "LR": `^\d{4}$`,
+ "LS": `^\d{3}$`,
+ "MG": `^\d{3}$`,
+ "MH": `^969[67]\d([ \-]\d{4})?$`,
+ "MN": `^\d{6}$`,
+ "MP": `^9695[012]([ \-]\d{4})?$`,
+ "MQ": `^9[78]2\d{2}$`,
+ "NC": `^988\d{2}$`,
+ "NE": `^\d{4}$`,
+ "VI": `^008(([0-4]\d)|(5[01]))([ \-]\d{4})?$`,
+ "VN": `^[0-9]{1,6}$`,
+ "PF": `^987\d{2}$`,
+ "PG": `^\d{3}$`,
+ "PM": `^9[78]5\d{2}$`,
+ "PN": `^PCRN 1ZZ$`,
+ "PW": `^96940$`,
+ "RE": `^9[78]4\d{2}$`,
+ "SH": `^(ASCN|STHL) 1ZZ$`,
+ "SJ": `^\d{4}$`,
+ "SO": `^\d{5}$`,
+ "SZ": `^[HLMS]\d{3}$`,
+ "TC": `^TKCA 1ZZ$`,
+ "WF": `^986\d{2}$`,
+ "XK": `^\d{5}$`,
+ "YT": `^976\d{2}$`,
+}
+
+var (
+ postcodeRegexInit sync.Once
+ postCodeRegexDict = map[string]*regexp.Regexp{}
+)
+
+func initPostcodes() {
+ for countryCode, pattern := range postCodePatternDict {
+ postCodeRegexDict[countryCode] = regexp.MustCompile(pattern)
+ }
+}
diff --git a/integration/assets/hydrabroker/vendor/github.com/go-playground/validator/v10/regexes.go b/integration/assets/hydrabroker/vendor/github.com/go-playground/validator/v10/regexes.go
index b8bf25316ad..7e1dd5a03e5 100644
--- a/integration/assets/hydrabroker/vendor/github.com/go-playground/validator/v10/regexes.go
+++ b/integration/assets/hydrabroker/vendor/github.com/go-playground/validator/v10/regexes.go
@@ -1,6 +1,9 @@
package validator
-import "regexp"
+import (
+ "regexp"
+ "sync"
+)
const (
alphaRegexString = "^[a-zA-Z]+$"
@@ -9,18 +12,21 @@ const (
alphaUnicodeNumericRegexString = "^[\\p{L}\\p{N}]+$"
numericRegexString = "^[-+]?[0-9]+(?:\\.[0-9]+)?$"
numberRegexString = "^[0-9]+$"
- hexadecimalRegexString = "^[0-9a-fA-F]+$"
- hexcolorRegexString = "^#(?:[0-9a-fA-F]{3}|[0-9a-fA-F]{6})$"
+ hexadecimalRegexString = "^(0[xX])?[0-9a-fA-F]+$"
+ hexColorRegexString = "^#(?:[0-9a-fA-F]{3}|[0-9a-fA-F]{4}|[0-9a-fA-F]{6}|[0-9a-fA-F]{8})$"
rgbRegexString = "^rgb\\(\\s*(?:(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])\\s*,\\s*(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])\\s*,\\s*(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])|(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])%\\s*,\\s*(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])%\\s*,\\s*(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])%)\\s*\\)$"
rgbaRegexString = "^rgba\\(\\s*(?:(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])\\s*,\\s*(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])\\s*,\\s*(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])|(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])%\\s*,\\s*(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])%\\s*,\\s*(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])%)\\s*,\\s*(?:(?:0.[1-9]*)|[01])\\s*\\)$"
hslRegexString = "^hsl\\(\\s*(?:0|[1-9]\\d?|[12]\\d\\d|3[0-5]\\d|360)\\s*,\\s*(?:(?:0|[1-9]\\d?|100)%)\\s*,\\s*(?:(?:0|[1-9]\\d?|100)%)\\s*\\)$"
hslaRegexString = "^hsla\\(\\s*(?:0|[1-9]\\d?|[12]\\d\\d|3[0-5]\\d|360)\\s*,\\s*(?:(?:0|[1-9]\\d?|100)%)\\s*,\\s*(?:(?:0|[1-9]\\d?|100)%)\\s*,\\s*(?:(?:0.[1-9]*)|[01])\\s*\\)$"
emailRegexString = "^(?:(?:(?:(?:[a-zA-Z]|\\d|[!#\\$%&'\\*\\+\\-\\/=\\?\\^_`{\\|}~]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])+(?:\\.([a-zA-Z]|\\d|[!#\\$%&'\\*\\+\\-\\/=\\?\\^_`{\\|}~]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])+)*)|(?:(?:\\x22)(?:(?:(?:(?:\\x20|\\x09)*(?:\\x0d\\x0a))?(?:\\x20|\\x09)+)?(?:(?:[\\x01-\\x08\\x0b\\x0c\\x0e-\\x1f\\x7f]|\\x21|[\\x23-\\x5b]|[\\x5d-\\x7e]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(?:(?:[\\x01-\\x09\\x0b\\x0c\\x0d-\\x7f]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}]))))*(?:(?:(?:\\x20|\\x09)*(?:\\x0d\\x0a))?(\\x20|\\x09)+)?(?:\\x22))))@(?:(?:(?:[a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(?:(?:[a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])(?:[a-zA-Z]|\\d|-|\\.|~|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])*(?:[a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])))\\.)+(?:(?:[a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(?:(?:[a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])(?:[a-zA-Z]|\\d|-|\\.|~|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])*(?:[a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])))\\.?$"
e164RegexString = "^\\+[1-9]?[0-9]{7,14}$"
+ base32RegexString = "^(?:[A-Z2-7]{8})*(?:[A-Z2-7]{2}={6}|[A-Z2-7]{4}={4}|[A-Z2-7]{5}={3}|[A-Z2-7]{7}=|[A-Z2-7]{8})$"
base64RegexString = "^(?:[A-Za-z0-9+\\/]{4})*(?:[A-Za-z0-9+\\/]{2}==|[A-Za-z0-9+\\/]{3}=|[A-Za-z0-9+\\/]{4})$"
base64URLRegexString = "^(?:[A-Za-z0-9-_]{4})*(?:[A-Za-z0-9-_]{2}==|[A-Za-z0-9-_]{3}=|[A-Za-z0-9-_]{4})$"
+ base64RawURLRegexString = "^(?:[A-Za-z0-9-_]{4})*(?:[A-Za-z0-9-_]{2,4})$"
iSBN10RegexString = "^(?:[0-9]{9}X|[0-9]{10})$"
iSBN13RegexString = "^(?:(?:97(?:8|9))[0-9]{10})$"
+ iSSNRegexString = "^(?:[0-9]{4}-[0-9]{3}[0-9X])$"
uUID3RegexString = "^[0-9a-f]{8}-[0-9a-f]{4}-3[0-9a-f]{3}-[0-9a-f]{4}-[0-9a-f]{12}$"
uUID4RegexString = "^[0-9a-f]{8}-[0-9a-f]{4}-4[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$"
uUID5RegexString = "^[0-9a-f]{8}-[0-9a-f]{4}-5[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$"
@@ -29,6 +35,17 @@ const (
uUID4RFC4122RegexString = "^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-4[0-9a-fA-F]{3}-[89abAB][0-9a-fA-F]{3}-[0-9a-fA-F]{12}$"
uUID5RFC4122RegexString = "^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-5[0-9a-fA-F]{3}-[89abAB][0-9a-fA-F]{3}-[0-9a-fA-F]{12}$"
uUIDRFC4122RegexString = "^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}$"
+ uLIDRegexString = "^(?i)[A-HJKMNP-TV-Z0-9]{26}$"
+ md4RegexString = "^[0-9a-f]{32}$"
+ md5RegexString = "^[0-9a-f]{32}$"
+ sha256RegexString = "^[0-9a-f]{64}$"
+ sha384RegexString = "^[0-9a-f]{96}$"
+ sha512RegexString = "^[0-9a-f]{128}$"
+ ripemd128RegexString = "^[0-9a-f]{32}$"
+ ripemd160RegexString = "^[0-9a-f]{40}$"
+ tiger128RegexString = "^[0-9a-f]{32}$"
+ tiger160RegexString = "^[0-9a-f]{40}$"
+ tiger192RegexString = "^[0-9a-f]{48}$"
aSCIIRegexString = "^[\x00-\x7F]*$"
printableASCIIRegexString = "^[\x20-\x7E]*$"
multibyteRegexString = "[^\x00-\x7F]"
@@ -36,64 +53,111 @@ const (
latitudeRegexString = "^[-+]?([1-8]?\\d(\\.\\d+)?|90(\\.0+)?)$"
longitudeRegexString = "^[-+]?(180(\\.0+)?|((1[0-7]\\d)|([1-9]?\\d))(\\.\\d+)?)$"
sSNRegexString = `^[0-9]{3}[ -]?(0[1-9]|[1-9][0-9])[ -]?([1-9][0-9]{3}|[0-9][1-9][0-9]{2}|[0-9]{2}[1-9][0-9]|[0-9]{3}[1-9])$`
- hostnameRegexStringRFC952 = `^[a-zA-Z][a-zA-Z0-9\-\.]+[a-zA-Z0-9]$` // https://tools.ietf.org/html/rfc952
- hostnameRegexStringRFC1123 = `^([a-zA-Z0-9]{1}[a-zA-Z0-9_-]{0,62}){1}(\.[a-zA-Z0-9_]{1}[a-zA-Z0-9_-]{0,62})*?$` // accepts hostname starting with a digit https://tools.ietf.org/html/rfc1123
- btcAddressRegexString = `^[13][a-km-zA-HJ-NP-Z1-9]{25,34}$` // bitcoin address
- btcAddressUpperRegexStringBech32 = `^BC1[02-9AC-HJ-NP-Z]{7,76}$` // bitcoin bech32 address https://en.bitcoin.it/wiki/Bech32
- btcAddressLowerRegexStringBech32 = `^bc1[02-9ac-hj-np-z]{7,76}$` // bitcoin bech32 address https://en.bitcoin.it/wiki/Bech32
+ hostnameRegexStringRFC952 = `^[a-zA-Z]([a-zA-Z0-9\-]+[\.]?)*[a-zA-Z0-9]$` // https://tools.ietf.org/html/rfc952
+ hostnameRegexStringRFC1123 = `^([a-zA-Z0-9]{1}[a-zA-Z0-9-]{0,62}){1}(\.[a-zA-Z0-9]{1}[a-zA-Z0-9-]{0,62})*?$` // accepts hostname starting with a digit https://tools.ietf.org/html/rfc1123
+ fqdnRegexStringRFC1123 = `^([a-zA-Z0-9]{1}[a-zA-Z0-9-]{0,62})(\.[a-zA-Z0-9]{1}[a-zA-Z0-9-]{0,62})*?(\.[a-zA-Z]{1}[a-zA-Z0-9]{0,62})\.?$` // same as hostnameRegexStringRFC1123 but must contain a non numerical TLD (possibly ending with '.')
+ btcAddressRegexString = `^[13][a-km-zA-HJ-NP-Z1-9]{25,34}$` // bitcoin address
+ btcAddressUpperRegexStringBech32 = `^BC1[02-9AC-HJ-NP-Z]{7,76}$` // bitcoin bech32 address https://en.bitcoin.it/wiki/Bech32
+ btcAddressLowerRegexStringBech32 = `^bc1[02-9ac-hj-np-z]{7,76}$` // bitcoin bech32 address https://en.bitcoin.it/wiki/Bech32
ethAddressRegexString = `^0x[0-9a-fA-F]{40}$`
ethAddressUpperRegexString = `^0x[0-9A-F]{40}$`
ethAddressLowerRegexString = `^0x[0-9a-f]{40}$`
- uRLEncodedRegexString = `(%[A-Fa-f0-9]{2})`
+ uRLEncodedRegexString = `^(?:[^%]|%[0-9A-Fa-f]{2})*$`
hTMLEncodedRegexString = `[x]?([0-9a-fA-F]{2})|(>)|(<)|(")|(&)+[;]?`
hTMLRegexString = `<[/]?([a-zA-Z]+).*?>`
+ jWTRegexString = "^[A-Za-z0-9-_]+\\.[A-Za-z0-9-_]+\\.[A-Za-z0-9-_]*$"
splitParamsRegexString = `'[^']*'|\S+`
+ bicRegexString = `^[A-Za-z]{6}[A-Za-z0-9]{2}([A-Za-z0-9]{3})?$`
+ semverRegexString = `^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$` // numbered capture groups https://semver.org/
+ dnsRegexStringRFC1035Label = "^[a-z]([-a-z0-9]*[a-z0-9]){0,62}$"
+ cveRegexString = `^CVE-(1999|2\d{3})-(0[^0]\d{2}|0\d[^0]\d{1}|0\d{2}[^0]|[1-9]{1}\d{3,})$` // CVE Format Id https://cve.mitre.org/cve/identifiers/syntaxchange.html
+ mongodbIdRegexString = "^[a-f\\d]{24}$"
+ mongodbConnStringRegexString = "^mongodb(\\+srv)?:\\/\\/(([a-zA-Z\\d]+):([a-zA-Z\\d$:\\/?#\\[\\]@]+)@)?(([a-z\\d.-]+)(:[\\d]+)?)((,(([a-z\\d.-]+)(:(\\d+))?))*)?(\\/[a-zA-Z-_]{1,64})?(\\?(([a-zA-Z]+)=([a-zA-Z\\d]+))(&(([a-zA-Z\\d]+)=([a-zA-Z\\d]+))?)*)?$"
+ cronRegexString = `(@(annually|yearly|monthly|weekly|daily|hourly|reboot))|(@every (\d+(ns|us|µs|ms|s|m|h))+)|((((\d+,)+\d+|(\d+(\/|-)\d+)|\d+|\*) ?){5,7})`
+ spicedbIDRegexString = `^(([a-zA-Z0-9/_|\-=+]{1,})|\*)$`
+ spicedbPermissionRegexString = "^([a-z][a-z0-9_]{1,62}[a-z0-9])?$"
+ spicedbTypeRegexString = "^([a-z][a-z0-9_]{1,61}[a-z0-9]/)?[a-z][a-z0-9_]{1,62}[a-z0-9]$"
)
+func lazyRegexCompile(str string) func() *regexp.Regexp {
+ var regex *regexp.Regexp
+ var once sync.Once
+ return func() *regexp.Regexp {
+ once.Do(func() {
+ regex = regexp.MustCompile(str)
+ })
+ return regex
+ }
+}
+
var (
- alphaRegex = regexp.MustCompile(alphaRegexString)
- alphaNumericRegex = regexp.MustCompile(alphaNumericRegexString)
- alphaUnicodeRegex = regexp.MustCompile(alphaUnicodeRegexString)
- alphaUnicodeNumericRegex = regexp.MustCompile(alphaUnicodeNumericRegexString)
- numericRegex = regexp.MustCompile(numericRegexString)
- numberRegex = regexp.MustCompile(numberRegexString)
- hexadecimalRegex = regexp.MustCompile(hexadecimalRegexString)
- hexcolorRegex = regexp.MustCompile(hexcolorRegexString)
- rgbRegex = regexp.MustCompile(rgbRegexString)
- rgbaRegex = regexp.MustCompile(rgbaRegexString)
- hslRegex = regexp.MustCompile(hslRegexString)
- hslaRegex = regexp.MustCompile(hslaRegexString)
- e164Regex = regexp.MustCompile(e164RegexString)
- emailRegex = regexp.MustCompile(emailRegexString)
- base64Regex = regexp.MustCompile(base64RegexString)
- base64URLRegex = regexp.MustCompile(base64URLRegexString)
- iSBN10Regex = regexp.MustCompile(iSBN10RegexString)
- iSBN13Regex = regexp.MustCompile(iSBN13RegexString)
- uUID3Regex = regexp.MustCompile(uUID3RegexString)
- uUID4Regex = regexp.MustCompile(uUID4RegexString)
- uUID5Regex = regexp.MustCompile(uUID5RegexString)
- uUIDRegex = regexp.MustCompile(uUIDRegexString)
- uUID3RFC4122Regex = regexp.MustCompile(uUID3RFC4122RegexString)
- uUID4RFC4122Regex = regexp.MustCompile(uUID4RFC4122RegexString)
- uUID5RFC4122Regex = regexp.MustCompile(uUID5RFC4122RegexString)
- uUIDRFC4122Regex = regexp.MustCompile(uUIDRFC4122RegexString)
- aSCIIRegex = regexp.MustCompile(aSCIIRegexString)
- printableASCIIRegex = regexp.MustCompile(printableASCIIRegexString)
- multibyteRegex = regexp.MustCompile(multibyteRegexString)
- dataURIRegex = regexp.MustCompile(dataURIRegexString)
- latitudeRegex = regexp.MustCompile(latitudeRegexString)
- longitudeRegex = regexp.MustCompile(longitudeRegexString)
- sSNRegex = regexp.MustCompile(sSNRegexString)
- hostnameRegexRFC952 = regexp.MustCompile(hostnameRegexStringRFC952)
- hostnameRegexRFC1123 = regexp.MustCompile(hostnameRegexStringRFC1123)
- btcAddressRegex = regexp.MustCompile(btcAddressRegexString)
- btcUpperAddressRegexBech32 = regexp.MustCompile(btcAddressUpperRegexStringBech32)
- btcLowerAddressRegexBech32 = regexp.MustCompile(btcAddressLowerRegexStringBech32)
- ethAddressRegex = regexp.MustCompile(ethAddressRegexString)
- ethaddressRegexUpper = regexp.MustCompile(ethAddressUpperRegexString)
- ethAddressRegexLower = regexp.MustCompile(ethAddressLowerRegexString)
- uRLEncodedRegex = regexp.MustCompile(uRLEncodedRegexString)
- hTMLEncodedRegex = regexp.MustCompile(hTMLEncodedRegexString)
- hTMLRegex = regexp.MustCompile(hTMLRegexString)
- splitParamsRegex = regexp.MustCompile(splitParamsRegexString)
+ alphaRegex = lazyRegexCompile(alphaRegexString)
+ alphaNumericRegex = lazyRegexCompile(alphaNumericRegexString)
+ alphaUnicodeRegex = lazyRegexCompile(alphaUnicodeRegexString)
+ alphaUnicodeNumericRegex = lazyRegexCompile(alphaUnicodeNumericRegexString)
+ numericRegex = lazyRegexCompile(numericRegexString)
+ numberRegex = lazyRegexCompile(numberRegexString)
+ hexadecimalRegex = lazyRegexCompile(hexadecimalRegexString)
+ hexColorRegex = lazyRegexCompile(hexColorRegexString)
+ rgbRegex = lazyRegexCompile(rgbRegexString)
+ rgbaRegex = lazyRegexCompile(rgbaRegexString)
+ hslRegex = lazyRegexCompile(hslRegexString)
+ hslaRegex = lazyRegexCompile(hslaRegexString)
+ e164Regex = lazyRegexCompile(e164RegexString)
+ emailRegex = lazyRegexCompile(emailRegexString)
+ base32Regex = lazyRegexCompile(base32RegexString)
+ base64Regex = lazyRegexCompile(base64RegexString)
+ base64URLRegex = lazyRegexCompile(base64URLRegexString)
+ base64RawURLRegex = lazyRegexCompile(base64RawURLRegexString)
+ iSBN10Regex = lazyRegexCompile(iSBN10RegexString)
+ iSBN13Regex = lazyRegexCompile(iSBN13RegexString)
+ iSSNRegex = lazyRegexCompile(iSSNRegexString)
+ uUID3Regex = lazyRegexCompile(uUID3RegexString)
+ uUID4Regex = lazyRegexCompile(uUID4RegexString)
+ uUID5Regex = lazyRegexCompile(uUID5RegexString)
+ uUIDRegex = lazyRegexCompile(uUIDRegexString)
+ uUID3RFC4122Regex = lazyRegexCompile(uUID3RFC4122RegexString)
+ uUID4RFC4122Regex = lazyRegexCompile(uUID4RFC4122RegexString)
+ uUID5RFC4122Regex = lazyRegexCompile(uUID5RFC4122RegexString)
+ uUIDRFC4122Regex = lazyRegexCompile(uUIDRFC4122RegexString)
+ uLIDRegex = lazyRegexCompile(uLIDRegexString)
+ md4Regex = lazyRegexCompile(md4RegexString)
+ md5Regex = lazyRegexCompile(md5RegexString)
+ sha256Regex = lazyRegexCompile(sha256RegexString)
+ sha384Regex = lazyRegexCompile(sha384RegexString)
+ sha512Regex = lazyRegexCompile(sha512RegexString)
+ ripemd128Regex = lazyRegexCompile(ripemd128RegexString)
+ ripemd160Regex = lazyRegexCompile(ripemd160RegexString)
+ tiger128Regex = lazyRegexCompile(tiger128RegexString)
+ tiger160Regex = lazyRegexCompile(tiger160RegexString)
+ tiger192Regex = lazyRegexCompile(tiger192RegexString)
+ aSCIIRegex = lazyRegexCompile(aSCIIRegexString)
+ printableASCIIRegex = lazyRegexCompile(printableASCIIRegexString)
+ multibyteRegex = lazyRegexCompile(multibyteRegexString)
+ dataURIRegex = lazyRegexCompile(dataURIRegexString)
+ latitudeRegex = lazyRegexCompile(latitudeRegexString)
+ longitudeRegex = lazyRegexCompile(longitudeRegexString)
+ sSNRegex = lazyRegexCompile(sSNRegexString)
+ hostnameRegexRFC952 = lazyRegexCompile(hostnameRegexStringRFC952)
+ hostnameRegexRFC1123 = lazyRegexCompile(hostnameRegexStringRFC1123)
+ fqdnRegexRFC1123 = lazyRegexCompile(fqdnRegexStringRFC1123)
+ btcAddressRegex = lazyRegexCompile(btcAddressRegexString)
+ btcUpperAddressRegexBech32 = lazyRegexCompile(btcAddressUpperRegexStringBech32)
+ btcLowerAddressRegexBech32 = lazyRegexCompile(btcAddressLowerRegexStringBech32)
+ ethAddressRegex = lazyRegexCompile(ethAddressRegexString)
+ uRLEncodedRegex = lazyRegexCompile(uRLEncodedRegexString)
+ hTMLEncodedRegex = lazyRegexCompile(hTMLEncodedRegexString)
+ hTMLRegex = lazyRegexCompile(hTMLRegexString)
+ jWTRegex = lazyRegexCompile(jWTRegexString)
+ splitParamsRegex = lazyRegexCompile(splitParamsRegexString)
+ bicRegex = lazyRegexCompile(bicRegexString)
+ semverRegex = lazyRegexCompile(semverRegexString)
+ dnsRegexRFC1035Label = lazyRegexCompile(dnsRegexStringRFC1035Label)
+ cveRegex = lazyRegexCompile(cveRegexString)
+ mongodbIdRegex = lazyRegexCompile(mongodbIdRegexString)
+ mongodbConnectionRegex = lazyRegexCompile(mongodbConnStringRegexString)
+ cronRegex = lazyRegexCompile(cronRegexString)
+ spicedbIDRegex = lazyRegexCompile(spicedbIDRegexString)
+ spicedbPermissionRegex = lazyRegexCompile(spicedbPermissionRegexString)
+ spicedbTypeRegex = lazyRegexCompile(spicedbTypeRegexString)
)
diff --git a/integration/assets/hydrabroker/vendor/github.com/go-playground/validator/v10/struct_level.go b/integration/assets/hydrabroker/vendor/github.com/go-playground/validator/v10/struct_level.go
index 57691ee380c..271328f7107 100644
--- a/integration/assets/hydrabroker/vendor/github.com/go-playground/validator/v10/struct_level.go
+++ b/integration/assets/hydrabroker/vendor/github.com/go-playground/validator/v10/struct_level.go
@@ -23,18 +23,18 @@ func wrapStructLevelFunc(fn StructLevelFunc) StructLevelFuncCtx {
// to validate a struct
type StructLevel interface {
- // returns the main validation object, in case one wants to call validations internally.
+ // Validator returns the main validation object, in case one wants to call validations internally.
// this is so you don't have to use anonymous functions to get access to the validate
// instance.
Validator() *Validate
- // returns the top level struct, if any
+ // Top returns the top level struct, if any
Top() reflect.Value
- // returns the current fields parent struct, if any
+ // Parent returns the current fields parent struct, if any
Parent() reflect.Value
- // returns the current struct.
+ // Current returns the current struct.
Current() reflect.Value
// ExtractType gets the actual underlying type of field value.
@@ -42,7 +42,7 @@ type StructLevel interface {
// underlying value and its kind.
ExtractType(field reflect.Value) (value reflect.Value, kind reflect.Kind, nullable bool)
- // reports an error just by passing the field and tag information
+ // ReportError reports an error just by passing the field and tag information
//
// NOTES:
//
@@ -54,7 +54,7 @@ type StructLevel interface {
// and process on the flip side it's up to you.
ReportError(field interface{}, fieldName, structFieldName string, tag, param string)
- // reports an error just by passing ValidationErrors
+ // ReportValidationErrors reports an error just by passing ValidationErrors
//
// NOTES:
//
@@ -62,7 +62,7 @@ type StructLevel interface {
// existing namespace that validator is on.
// e.g. pass 'User.FirstName' or 'Users[0].FirstName' depending
// on the nesting. most of the time they will be blank, unless you validate
- // at a level lower the the current field depth
+ // at a level lower the current field depth
ReportValidationErrors(relativeNamespace, relativeActualNamespace string, errs ValidationErrors)
}
@@ -74,7 +74,7 @@ var _ StructLevel = new(validate)
// if not is a nested struct.
//
// this is only called when within Struct and Field Level validation and
-// should not be relied upon for an acurate value otherwise.
+// should not be relied upon for an accurate value otherwise.
func (v *validate) Top() reflect.Value {
return v.top
}
@@ -85,7 +85,7 @@ func (v *validate) Top() reflect.Value {
// if not is a nested struct.
//
// this is only called when within Struct and Field Level validation and
-// should not be relied upon for an acurate value otherwise.
+// should not be relied upon for an accurate value otherwise.
func (v *validate) Parent() reflect.Value {
return v.slflParent
}
diff --git a/integration/assets/hydrabroker/vendor/github.com/go-playground/validator/v10/util.go b/integration/assets/hydrabroker/vendor/github.com/go-playground/validator/v10/util.go
index 0d3a1810329..9285223a2fe 100644
--- a/integration/assets/hydrabroker/vendor/github.com/go-playground/validator/v10/util.go
+++ b/integration/assets/hydrabroker/vendor/github.com/go-playground/validator/v10/util.go
@@ -1,9 +1,12 @@
package validator
import (
+ "fmt"
"reflect"
+ "regexp"
"strconv"
"strings"
+ "time"
)
// extractTypeInternal gets the actual underlying type of field value.
@@ -81,7 +84,7 @@ BEGIN:
fld := namespace
var ns string
- if typ != timeType {
+ if !typ.ConvertibleTo(timeType) {
idx := strings.Index(namespace, namespaceSeparator)
@@ -222,13 +225,34 @@ BEGIN:
// asInt returns the parameter as a int64
// or panics if it can't convert
func asInt(param string) int64 {
-
i, err := strconv.ParseInt(param, 0, 64)
panicIf(err)
return i
}
+// asIntFromTimeDuration parses param as time.Duration and returns it as int64
+// or panics on error.
+func asIntFromTimeDuration(param string) int64 {
+ d, err := time.ParseDuration(param)
+ if err != nil {
+ // attempt parsing as an integer assuming nanosecond precision
+ return asInt(param)
+ }
+ return int64(d)
+}
+
+// asIntFromType calls the proper function to parse param as int64,
+// given a field's Type t.
+func asIntFromType(t reflect.Type, param string) int64 {
+ switch t {
+ case timeDurationType:
+ return asIntFromTimeDuration(param)
+ default:
+ return asInt(param)
+ }
+}
+
// asUint returns the parameter as a uint64
// or panics if it can't convert
func asUint(param string) uint64 {
@@ -239,13 +263,19 @@ func asUint(param string) uint64 {
return i
}
-// asFloat returns the parameter as a float64
+// asFloat64 returns the parameter as a float64
// or panics if it can't convert
-func asFloat(param string) float64 {
-
+func asFloat64(param string) float64 {
i, err := strconv.ParseFloat(param, 64)
panicIf(err)
+ return i
+}
+// asFloat32 returns the parameter as a float32
+// or panics if it can't convert
+func asFloat32(param string) float64 {
+ i, err := strconv.ParseFloat(param, 32)
+ panicIf(err)
return i
}
@@ -264,3 +294,19 @@ func panicIf(err error) {
panic(err.Error())
}
}
+
+// Checks if field value matches regex. If fl.Field can be cast to Stringer, it uses the Stringer interfaces
+// String() return value. Otherwise, it uses fl.Field's String() value.
+func fieldMatchesRegexByStringerValOrString(regexFn func() *regexp.Regexp, fl FieldLevel) bool {
+ regex := regexFn()
+ switch fl.Field().Kind() {
+ case reflect.String:
+ return regex.MatchString(fl.Field().String())
+ default:
+ if stringer, ok := fl.Field().Interface().(fmt.Stringer); ok {
+ return regex.MatchString(stringer.String())
+ } else {
+ return regex.MatchString(fl.Field().String())
+ }
+ }
+}
diff --git a/integration/assets/hydrabroker/vendor/github.com/go-playground/validator/v10/validator.go b/integration/assets/hydrabroker/vendor/github.com/go-playground/validator/v10/validator.go
index 342e72e315c..901e7b50a4b 100644
--- a/integration/assets/hydrabroker/vendor/github.com/go-playground/validator/v10/validator.go
+++ b/integration/assets/hydrabroker/vendor/github.com/go-playground/validator/v10/validator.go
@@ -5,6 +5,7 @@ import (
"fmt"
"reflect"
"strconv"
+ "unsafe"
)
// per validate construct
@@ -74,7 +75,7 @@ func (v *validate) validateStruct(ctx context.Context, parent reflect.Value, cur
}
}
- v.traverseField(ctx, parent, current.Field(f.idx), ns, structNs, f, f.cTags)
+ v.traverseField(ctx, current, current.Field(f.idx), ns, structNs, f, f.cTags)
}
}
@@ -99,6 +100,8 @@ func (v *validate) traverseField(ctx context.Context, parent reflect.Value, curr
current, kind, v.fldIsPointer = v.extractTypeInternal(current, false)
+ var isNestedStruct bool
+
switch kind {
case reflect.Ptr, reflect.Interface, reflect.Invalid:
@@ -110,6 +113,10 @@ func (v *validate) traverseField(ctx context.Context, parent reflect.Value, curr
return
}
+ if ct.typeof == typeOmitNil && (kind != reflect.Invalid && current.IsNil()) {
+ return
+ }
+
if ct.hasTag {
if kind == reflect.Invalid {
v.str1 = string(append(ns, cf.altName...))
@@ -150,7 +157,7 @@ func (v *validate) traverseField(ctx context.Context, parent reflect.Value, curr
structNs: v.str2,
fieldLen: uint8(len(cf.altName)),
structfieldLen: uint8(len(cf.name)),
- value: current.Interface(),
+ value: getValue(current),
param: ct.param,
kind: kind,
typ: current.Type(),
@@ -160,86 +167,61 @@ func (v *validate) traverseField(ctx context.Context, parent reflect.Value, curr
}
}
- case reflect.Struct:
-
- typ = current.Type()
-
- if typ != timeType {
-
- if ct != nil {
-
- if ct.typeof == typeStructOnly {
- goto CONTINUE
- } else if ct.typeof == typeIsDefault {
- // set Field Level fields
- v.slflParent = parent
- v.flField = current
- v.cf = cf
- v.ct = ct
-
- if !ct.fn(ctx, v) {
- v.str1 = string(append(ns, cf.altName...))
-
- if v.v.hasTagNameFunc {
- v.str2 = string(append(structNs, cf.name...))
- } else {
- v.str2 = v.str1
- }
-
- v.errs = append(v.errs,
- &fieldError{
- v: v.v,
- tag: ct.aliasTag,
- actualTag: ct.tag,
- ns: v.str1,
- structNs: v.str2,
- fieldLen: uint8(len(cf.altName)),
- structfieldLen: uint8(len(cf.name)),
- value: current.Interface(),
- param: ct.param,
- kind: kind,
- typ: typ,
- },
- )
- return
- }
- }
-
- ct = ct.next
- }
-
- if ct != nil && ct.typeof == typeNoStructLevel {
- return
- }
-
- CONTINUE:
- // if len == 0 then validating using 'Var' or 'VarWithValue'
- // Var - doesn't make much sense to do it that way, should call 'Struct', but no harm...
- // VarWithField - this allows for validating against each field within the struct against a specific value
- // pretty handy in certain situations
- if len(cf.name) > 0 {
- ns = append(append(ns, cf.altName...), '.')
- structNs = append(append(structNs, cf.name...), '.')
- }
-
- v.validateStruct(ctx, current, current, typ, ns, structNs, ct)
+ if kind == reflect.Invalid {
return
}
- }
- if !ct.hasTag {
- return
+ case reflect.Struct:
+ isNestedStruct = !current.Type().ConvertibleTo(timeType)
+ // For backward compatibility before struct level validation tags were supported
+ // as there were a number of projects relying on `required` not failing on non-pointer
+ // structs. Since it's basically nonsensical to use `required` with a non-pointer struct
+ // are explicitly skipping the required validation for it. This WILL be removed in the
+ // next major version.
+ if isNestedStruct && !v.v.requiredStructEnabled && ct != nil && ct.tag == requiredTag {
+ ct = ct.next
+ }
}
typ = current.Type()
OUTER:
for {
- if ct == nil {
+ if ct == nil || !ct.hasTag || (isNestedStruct && len(cf.name) == 0) {
+ // isNestedStruct check here
+ if isNestedStruct {
+ // if len == 0 then validating using 'Var' or 'VarWithValue'
+ // Var - doesn't make much sense to do it that way, should call 'Struct', but no harm...
+ // VarWithField - this allows for validating against each field within the struct against a specific value
+ // pretty handy in certain situations
+ if len(cf.name) > 0 {
+ ns = append(append(ns, cf.altName...), '.')
+ structNs = append(append(structNs, cf.name...), '.')
+ }
+
+ v.validateStruct(ctx, parent, current, typ, ns, structNs, ct)
+ }
return
}
switch ct.typeof {
+ case typeNoStructLevel:
+ return
+
+ case typeStructOnly:
+ if isNestedStruct {
+ // if len == 0 then validating using 'Var' or 'VarWithValue'
+ // Var - doesn't make much sense to do it that way, should call 'Struct', but no harm...
+ // VarWithField - this allows for validating against each field within the struct against a specific value
+ // pretty handy in certain situations
+ if len(cf.name) > 0 {
+ ns = append(append(ns, cf.altName...), '.')
+ structNs = append(append(structNs, cf.name...), '.')
+ }
+
+ v.validateStruct(ctx, parent, current, typ, ns, structNs, ct)
+ }
+ return
case typeOmitEmpty:
@@ -249,13 +231,33 @@ OUTER:
v.cf = cf
v.ct = ct
- if !v.fldIsPointer && !hasValue(v) {
+ if !hasValue(v) {
return
}
ct = ct.next
continue
+ case typeOmitNil:
+ v.slflParent = parent
+ v.flField = current
+ v.cf = cf
+ v.ct = ct
+
+ switch field := v.Field(); field.Kind() {
+ case reflect.Slice, reflect.Map, reflect.Ptr, reflect.Interface, reflect.Chan, reflect.Func:
+ if field.IsNil() {
+ return
+ }
+ default:
+ if v.fldIsPointer && field.Interface() == nil {
+ return
+ }
+ }
+
+ ct = ct.next
+ continue
+
case typeEndKeys:
return
@@ -355,6 +357,10 @@ OUTER:
v.ct = ct
if ct.fn(ctx, v) {
+ if ct.isBlockEnd {
+ ct = ct.next
+ continue OUTER
+ }
// drain rest of the 'or' values, then continue or leave
for {
@@ -362,12 +368,17 @@ OUTER:
ct = ct.next
if ct == nil {
- return
+ continue OUTER
}
if ct.typeof != typeOr {
continue OUTER
}
+
+ if ct.isBlockEnd {
+ ct = ct.next
+ continue OUTER
+ }
}
}
@@ -400,7 +411,7 @@ OUTER:
structNs: v.str2,
fieldLen: uint8(len(cf.altName)),
structfieldLen: uint8(len(cf.name)),
- value: current.Interface(),
+ value: getValue(current),
param: ct.param,
kind: kind,
typ: typ,
@@ -420,7 +431,7 @@ OUTER:
structNs: v.str2,
fieldLen: uint8(len(cf.altName)),
structfieldLen: uint8(len(cf.name)),
- value: current.Interface(),
+ value: getValue(current),
param: ct.param,
kind: kind,
typ: typ,
@@ -443,7 +454,6 @@ OUTER:
v.ct = ct
if !ct.fn(ctx, v) {
-
v.str1 = string(append(ns, cf.altName...))
if v.v.hasTagNameFunc {
@@ -461,7 +471,7 @@ OUTER:
structNs: v.str2,
fieldLen: uint8(len(cf.altName)),
structfieldLen: uint8(len(cf.name)),
- value: current.Interface(),
+ value: getValue(current),
param: ct.param,
kind: kind,
typ: typ,
@@ -475,3 +485,26 @@ OUTER:
}
}
+
+func getValue(val reflect.Value) interface{} {
+ if val.CanInterface() {
+ return val.Interface()
+ }
+
+ if val.CanAddr() {
+ return reflect.NewAt(val.Type(), unsafe.Pointer(val.UnsafeAddr())).Elem().Interface()
+ }
+
+ switch val.Kind() {
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return val.Int()
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return val.Uint()
+ case reflect.Complex64, reflect.Complex128:
+ return val.Complex()
+ case reflect.Float32, reflect.Float64:
+ return val.Float()
+ default:
+ return val.String()
+ }
+}
diff --git a/integration/assets/hydrabroker/vendor/github.com/go-playground/validator/v10/validator_instance.go b/integration/assets/hydrabroker/vendor/github.com/go-playground/validator/v10/validator_instance.go
index 4a89d406106..d9f148dbaec 100644
--- a/integration/assets/hydrabroker/vendor/github.com/go-playground/validator/v10/validator_instance.go
+++ b/integration/assets/hydrabroker/vendor/github.com/go-playground/validator/v10/validator_instance.go
@@ -22,11 +22,21 @@ const (
structOnlyTag = "structonly"
noStructLevelTag = "nostructlevel"
omitempty = "omitempty"
+ omitnil = "omitnil"
isdefault = "isdefault"
requiredWithoutAllTag = "required_without_all"
requiredWithoutTag = "required_without"
requiredWithTag = "required_with"
requiredWithAllTag = "required_with_all"
+ requiredIfTag = "required_if"
+ requiredUnlessTag = "required_unless"
+ skipUnlessTag = "skip_unless"
+ excludedWithoutAllTag = "excluded_without_all"
+ excludedWithoutTag = "excluded_without"
+ excludedWithTag = "excluded_with"
+ excludedWithAllTag = "excluded_with_all"
+ excludedIfTag = "excluded_if"
+ excludedUnlessTag = "excluded_unless"
skipValidationTag = "-"
diveTag = "dive"
keysTag = "keys"
@@ -41,13 +51,17 @@ const (
)
var (
- timeType = reflect.TypeOf(time.Time{})
+ timeDurationType = reflect.TypeOf(time.Duration(0))
+ timeType = reflect.TypeOf(time.Time{})
+
+ byteSliceType = reflect.TypeOf([]byte{})
+
defaultCField = &cField{namesEqual: true}
)
// FilterFunc is the type used to filter fields using
// StructFiltered(...) function.
-// returning true results in the field being filtered/skiped from
+// returning true results in the field being filtered/skipped from
// validation
type FilterFunc func(ns []byte) bool
@@ -60,28 +74,35 @@ type CustomTypeFunc func(field reflect.Value) interface{}
type TagNameFunc func(field reflect.StructField) string
type internalValidationFuncWrapper struct {
- fn FuncCtx
- runValidatinOnNil bool
+ fn FuncCtx
+ runValidationOnNil bool
}
// Validate contains the validator settings and cache
type Validate struct {
- tagName string
- pool *sync.Pool
- hasCustomFuncs bool
- hasTagNameFunc bool
- tagNameFunc TagNameFunc
- structLevelFuncs map[reflect.Type]StructLevelFuncCtx
- customFuncs map[reflect.Type]CustomTypeFunc
- aliases map[string]string
- validations map[string]internalValidationFuncWrapper
- transTagFunc map[ut.Translator]map[string]TranslationFunc // map[]map[]TranslationFunc
- tagCache *tagCache
- structCache *structCache
+ tagName string
+ pool *sync.Pool
+ tagNameFunc TagNameFunc
+ structLevelFuncs map[reflect.Type]StructLevelFuncCtx
+ customFuncs map[reflect.Type]CustomTypeFunc
+ aliases map[string]string
+ validations map[string]internalValidationFuncWrapper
+ transTagFunc map[ut.Translator]map[string]TranslationFunc // map[]map[]TranslationFunc
+ rules map[reflect.Type]map[string]string
+ tagCache *tagCache
+ structCache *structCache
+ hasCustomFuncs bool
+ hasTagNameFunc bool
+ requiredStructEnabled bool
+ privateFieldValidation bool
}
// New returns a new instance of 'validate' with sane defaults.
-func New() *Validate {
+// Validate is designed to be thread-safe and used as a singleton instance.
+// It caches information about your struct and validations,
+// in essence only parsing your validation tags once per struct type.
+// Using multiple instances neglects the benefit of caching.
+func New(options ...Option) *Validate {
tc := new(tagCache)
tc.m.Store(make(map[string]*cTag))
@@ -107,7 +128,9 @@ func New() *Validate {
switch k {
// these require that even if the value is nil that the validation should run, omitempty still overrides this behaviour
- case requiredWithTag, requiredWithAllTag, requiredWithoutTag, requiredWithoutAllTag:
+ case requiredIfTag, requiredUnlessTag, requiredWithTag, requiredWithAllTag, requiredWithoutTag, requiredWithoutAllTag,
+ excludedIfTag, excludedUnlessTag, excludedWithTag, excludedWithAllTag, excludedWithoutTag, excludedWithoutAllTag,
+ skipUnlessTag:
_ = v.registerValidation(k, wrapFunc(val), true, true)
default:
// no need to error check here, baked in will always be valid
@@ -126,6 +149,9 @@ func New() *Validate {
},
}
+ for _, o := range options {
+ o(v)
+ }
return v
}
@@ -134,17 +160,54 @@ func (v *Validate) SetTagName(name string) {
v.tagName = name
}
+// ValidateMapCtx validates a map using a map of validation rules and allows passing of contextual
+// validation information via context.Context.
+func (v Validate) ValidateMapCtx(ctx context.Context, data map[string]interface{}, rules map[string]interface{}) map[string]interface{} {
+ errs := make(map[string]interface{})
+ for field, rule := range rules {
+ if ruleObj, ok := rule.(map[string]interface{}); ok {
+ if dataObj, ok := data[field].(map[string]interface{}); ok {
+ err := v.ValidateMapCtx(ctx, dataObj, ruleObj)
+ if len(err) > 0 {
+ errs[field] = err
+ }
+ } else if dataObjs, ok := data[field].([]map[string]interface{}); ok {
+ for _, obj := range dataObjs {
+ err := v.ValidateMapCtx(ctx, obj, ruleObj)
+ if len(err) > 0 {
+ errs[field] = err
+ }
+ }
+ } else {
+ errs[field] = errors.New("The field: '" + field + "' is not a map to dive")
+ }
+ } else if ruleStr, ok := rule.(string); ok {
+ err := v.VarCtx(ctx, data[field], ruleStr)
+ if err != nil {
+ errs[field] = err
+ }
+ }
+ }
+ return errs
+}
+
+// ValidateMap validates map data from a map of tags
+func (v *Validate) ValidateMap(data map[string]interface{}, rules map[string]interface{}) map[string]interface{} {
+ return v.ValidateMapCtx(context.Background(), data, rules)
+}
+
// RegisterTagNameFunc registers a function to get alternate names for StructFields.
//
// eg. to use the names which have been specified for JSON representations of structs, rather than normal Go field names:
//
-// validate.RegisterTagNameFunc(func(fld reflect.StructField) string {
-// name := strings.SplitN(fld.Tag.Get("json"), ",", 2)[0]
-// if name == "-" {
-// return ""
-// }
-// return name
-// })
+// validate.RegisterTagNameFunc(func(fld reflect.StructField) string {
+// name := strings.SplitN(fld.Tag.Get("json"), ",", 2)[0]
+// // skip if tag key says it should be ignored
+// if name == "-" {
+// return ""
+// }
+// return name
+// })
func (v *Validate) RegisterTagNameFunc(fn TagNameFunc) {
v.tagNameFunc = fn
v.hasTagNameFunc = true
@@ -171,18 +234,18 @@ func (v *Validate) RegisterValidationCtx(tag string, fn FuncCtx, callValidationE
func (v *Validate) registerValidation(tag string, fn FuncCtx, bakedIn bool, nilCheckable bool) error {
if len(tag) == 0 {
- return errors.New("Function Key cannot be empty")
+ return errors.New("function Key cannot be empty")
}
if fn == nil {
- return errors.New("Function cannot be empty")
+ return errors.New("function cannot be empty")
}
_, ok := restrictedTags[tag]
if !bakedIn && (ok || strings.ContainsAny(tag, restrictedTagChars)) {
panic(fmt.Sprintf(restrictedTagErr, tag))
}
- v.validations[tag] = internalValidationFuncWrapper{fn: fn, runValidatinOnNil: nilCheckable}
+ v.validations[tag] = internalValidationFuncWrapper{fn: fn, runValidationOnNil: nilCheckable}
return nil
}
@@ -231,6 +294,34 @@ func (v *Validate) RegisterStructValidationCtx(fn StructLevelFuncCtx, types ...i
}
}
+// RegisterStructValidationMapRules registers validate map rules.
+// Be aware that map validation rules supersede those defined on a/the struct if present.
+//
+// NOTE: this method is not thread-safe it is intended that these all be registered prior to any validation
+func (v *Validate) RegisterStructValidationMapRules(rules map[string]string, types ...interface{}) {
+ if v.rules == nil {
+ v.rules = make(map[reflect.Type]map[string]string)
+ }
+
+ deepCopyRules := make(map[string]string)
+ for i, rule := range rules {
+ deepCopyRules[i] = rule
+ }
+
+ for _, t := range types {
+ typ := reflect.TypeOf(t)
+
+ if typ.Kind() == reflect.Ptr {
+ typ = typ.Elem()
+ }
+
+ if typ.Kind() != reflect.Struct {
+ continue
+ }
+ v.rules[typ] = deepCopyRules
+ }
+}
+
// RegisterCustomTypeFunc registers a CustomTypeFunc against a number of types
//
// NOTE: this method is not thread-safe it is intended that these all be registered prior to any validation
@@ -291,7 +382,7 @@ func (v *Validate) StructCtx(ctx context.Context, s interface{}) (err error) {
val = val.Elem()
}
- if val.Kind() != reflect.Struct || val.Type() == timeType {
+ if val.Kind() != reflect.Struct || val.Type().ConvertibleTo(timeType) {
return &InvalidValidationError{Type: reflect.TypeOf(s)}
}
@@ -336,7 +427,7 @@ func (v *Validate) StructFilteredCtx(ctx context.Context, s interface{}, fn Filt
val = val.Elem()
}
- if val.Kind() != reflect.Struct || val.Type() == timeType {
+ if val.Kind() != reflect.Struct || val.Type().ConvertibleTo(timeType) {
return &InvalidValidationError{Type: reflect.TypeOf(s)}
}
@@ -370,7 +461,7 @@ func (v *Validate) StructPartial(s interface{}, fields ...string) error {
}
// StructPartialCtx validates the fields passed in only, ignoring all others and allows passing of contextual
-// validation validation information via context.Context
+// validation information via context.Context
// Fields may be provided in a namespaced fashion relative to the struct provided
// eg. NestedStruct.Field or NestedArrayField[0].Struct.Name
//
@@ -384,7 +475,7 @@ func (v *Validate) StructPartialCtx(ctx context.Context, s interface{}, fields .
val = val.Elem()
}
- if val.Kind() != reflect.Struct || val.Type() == timeType {
+ if val.Kind() != reflect.Struct || val.Type().ConvertibleTo(timeType) {
return &InvalidValidationError{Type: reflect.TypeOf(s)}
}
@@ -405,7 +496,10 @@ func (v *Validate) StructPartialCtx(ctx context.Context, s interface{}, fields .
if len(flds) > 0 {
vd.misc = append(vd.misc[0:0], name...)
- vd.misc = append(vd.misc, '.')
+ // Don't append empty name for unnamed structs
+ if len(vd.misc) != 0 {
+ vd.misc = append(vd.misc, '.')
+ }
for _, s := range flds {
@@ -457,7 +551,7 @@ func (v *Validate) StructExcept(s interface{}, fields ...string) error {
}
// StructExceptCtx validates all fields except the ones passed in and allows passing of contextual
-// validation validation information via context.Context
+// validation information via context.Context
// Fields may be provided in a namespaced fashion relative to the struct provided
// i.e. NestedStruct.Field or NestedArrayField[0].Struct.Name
//
@@ -471,7 +565,7 @@ func (v *Validate) StructExceptCtx(ctx context.Context, s interface{}, fields ..
val = val.Elem()
}
- if val.Kind() != reflect.Struct || val.Type() == timeType {
+ if val.Kind() != reflect.Struct || val.Type().ConvertibleTo(timeType) {
return &InvalidValidationError{Type: reflect.TypeOf(s)}
}
@@ -529,7 +623,7 @@ func (v *Validate) Var(field interface{}, tag string) error {
}
// VarCtx validates a single variable using tag style validation and allows passing of contextual
-// validation validation information via context.Context.
+// validation information via context.Context.
// eg.
// var i int
// validate.Var(i, "gt=1,lt=10")
@@ -548,6 +642,7 @@ func (v *Validate) VarCtx(ctx context.Context, field interface{}, tag string) (e
}
ctag := v.fetchCacheTag(tag)
+
val := reflect.ValueOf(field)
vd := v.pool.Get().(*validate)
vd.top = val
@@ -581,7 +676,7 @@ func (v *Validate) VarWithValue(field interface{}, other interface{}, tag string
}
// VarWithValueCtx validates a single variable, against another variable/field's value using tag style validation and
-// allows passing of contextual validation validation information via context.Context.
+// allows passing of contextual validation information via context.Context.
// eg.
// s1 := "abcd"
// s2 := "abcd"
diff --git a/integration/assets/hydrabroker/vendor/github.com/go-task/slim-sprig/v3/.editorconfig b/integration/assets/hydrabroker/vendor/github.com/go-task/slim-sprig/v3/.editorconfig
new file mode 100644
index 00000000000..b0c95367e75
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/go-task/slim-sprig/v3/.editorconfig
@@ -0,0 +1,14 @@
+# editorconfig.org
+
+root = true
+
+[*]
+insert_final_newline = true
+charset = utf-8
+trim_trailing_whitespace = true
+indent_style = tab
+indent_size = 8
+
+[*.{md,yml,yaml,json}]
+indent_style = space
+indent_size = 2
diff --git a/integration/assets/hydrabroker/vendor/github.com/go-task/slim-sprig/v3/.gitattributes b/integration/assets/hydrabroker/vendor/github.com/go-task/slim-sprig/v3/.gitattributes
new file mode 100644
index 00000000000..176a458f94e
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/go-task/slim-sprig/v3/.gitattributes
@@ -0,0 +1 @@
+* text=auto
diff --git a/integration/assets/hydrabroker/vendor/github.com/go-task/slim-sprig/v3/.gitignore b/integration/assets/hydrabroker/vendor/github.com/go-task/slim-sprig/v3/.gitignore
new file mode 100644
index 00000000000..5e3002f88f5
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/go-task/slim-sprig/v3/.gitignore
@@ -0,0 +1,2 @@
+vendor/
+/.glide
diff --git a/integration/assets/hydrabroker/vendor/github.com/go-task/slim-sprig/v3/CHANGELOG.md b/integration/assets/hydrabroker/vendor/github.com/go-task/slim-sprig/v3/CHANGELOG.md
new file mode 100644
index 00000000000..2ce45dd4eca
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/go-task/slim-sprig/v3/CHANGELOG.md
@@ -0,0 +1,383 @@
+# Changelog
+
+## Release 3.2.3 (2022-11-29)
+
+### Changed
+
+- Updated docs (thanks @book987 @aJetHorn @neelayu @pellizzetti @apricote @SaigyoujiYuyuko233 @AlekSi)
+- #348: Updated huandu/xstrings which fixed a snake case bug (thanks @yxxhero)
+- #353: Updated masterminds/semver which included bug fixes
+- #354: Updated golang.org/x/crypto which included bug fixes
+
+## Release 3.2.2 (2021-02-04)
+
+This is a re-release of 3.2.1 to satisfy something with the Go module system.
+
+## Release 3.2.1 (2021-02-04)
+
+### Changed
+
+- Upgraded `Masterminds/goutils` to `v1.1.1`. see the [Security Advisory](https://github.com/Masterminds/goutils/security/advisories/GHSA-xg2h-wx96-xgxr)
+
+## Release 3.2.0 (2020-12-14)
+
+### Added
+
+- #211: Added randInt function (thanks @kochurovro)
+- #223: Added fromJson and mustFromJson functions (thanks @mholt)
+- #242: Added a bcrypt function (thanks @robbiet480)
+- #253: Added randBytes function (thanks @MikaelSmith)
+- #254: Added dig function for dicts (thanks @nyarly)
+- #257: Added regexQuoteMeta for quoting regex metadata (thanks @rheaton)
+- #261: Added filepath functions osBase, osDir, osExt, osClean, osIsAbs (thanks @zugl)
+- #268: Added and and all functions for testing conditions (thanks @phuslu)
+- #181: Added float64 arithmetic addf, add1f, subf, divf, mulf, maxf, and minf
+ (thanks @andrewmostello)
+- #265: Added chunk function to split array into smaller arrays (thanks @karelbilek)
+- #270: Extend certificate functions to handle non-RSA keys + add support for
+ ed25519 keys (thanks @misberner)
+
+### Changed
+
+- Removed testing and support for Go 1.12. ed25519 support requires Go 1.13 or newer
+- Using semver 3.1.1 and mergo 0.3.11
+
+### Fixed
+
+- #249: Fix htmlDateInZone example (thanks @spawnia)
+
+NOTE: The dependency github.com/imdario/mergo reverted the breaking change in
+0.3.9 via 0.3.10 release.
+
+## Release 3.1.0 (2020-04-16)
+
+NOTE: The dependency github.com/imdario/mergo made a behavior change in 0.3.9
+that impacts sprig functionality. Do not use sprig with a version newer than 0.3.8.
+
+### Added
+
+- #225: Added support for generating htpasswd hash (thanks @rustycl0ck)
+- #224: Added duration filter (thanks @frebib)
+- #205: Added `seq` function (thanks @thadc23)
+
+### Changed
+
+- #203: Unlambda functions with correct signature (thanks @muesli)
+- #236: Updated the license formatting for GitHub display purposes
+- #238: Updated package dependency versions. Note, mergo not updated to 0.3.9
+ as it causes a breaking change for sprig. That issue is tracked at
+ https://github.com/imdario/mergo/issues/139
+
+### Fixed
+
+- #229: Fix `seq` example in docs (thanks @kalmant)
+
+## Release 3.0.2 (2019-12-13)
+
+### Fixed
+
+- #220: Updating to semver v3.0.3 to fix issue with <= ranges
+- #218: fix typo elyptical->elliptic in ecdsa key description (thanks @laverya)
+
+## Release 3.0.1 (2019-12-08)
+
+### Fixed
+
+- #212: Updated semver fixing broken constraint checking with ^0.0
+
+## Release 3.0.0 (2019-10-02)
+
+### Added
+
+- #187: Added durationRound function (thanks @yjp20)
+- #189: Added numerous template functions that return errors rather than panic (thanks @nrvnrvn)
+- #193: Added toRawJson support (thanks @Dean-Coakley)
+- #197: Added get support to dicts (thanks @Dean-Coakley)
+
+### Changed
+
+- #186: Moving dependency management to Go modules
+- #186: Updated semver to v3. This has changes in the way ^ is handled
+- #194: Updated documentation on merging and how it copies. Added example using deepCopy
+- #196: trunc now supports negative values (thanks @Dean-Coakley)
+
+## Release 2.22.0 (2019-10-02)
+
+### Added
+
+- #173: Added getHostByName function to resolve dns names to ips (thanks @fcgravalos)
+- #195: Added deepCopy function for use with dicts
+
+### Changed
+
+- Updated merge and mergeOverwrite documentation to explain copying and how to
+ use deepCopy with it
+
+## Release 2.21.0 (2019-09-18)
+
+### Added
+
+- #122: Added encryptAES/decryptAES functions (thanks @n0madic)
+- #128: Added toDecimal support (thanks @Dean-Coakley)
+- #169: Added list contcat (thanks @astorath)
+- #174: Added deepEqual function (thanks @bonifaido)
+- #170: Added url parse and join functions (thanks @astorath)
+
+### Changed
+
+- #171: Updated glide config for Google UUID to v1 and to add ranges to semver and testify
+
+### Fixed
+
+- #172: Fix semver wildcard example (thanks @piepmatz)
+- #175: Fix dateInZone doc example (thanks @s3than)
+
+## Release 2.20.0 (2019-06-18)
+
+### Added
+
+- #164: Adding function to get unix epoch for a time (@mattfarina)
+- #166: Adding tests for date_in_zone (@mattfarina)
+
+### Changed
+
+- #144: Fix function comments based on best practices from Effective Go (@CodeLingoTeam)
+- #150: Handles pointer type for time.Time in "htmlDate" (@mapreal19)
+- #161, #157, #160, #153, #158, #156, #155, #159, #152 documentation updates (@badeadan)
+
+### Fixed
+
+## Release 2.19.0 (2019-03-02)
+
+IMPORTANT: This release reverts a change from 2.18.0
+
+In the previous release (2.18), we prematurely merged a partial change to the crypto functions that led to creating two sets of crypto functions (I blame @technosophos -- since that's me). This release rolls back that change, and does what was originally intended: It alters the existing crypto functions to use secure random.
+
+We debated whether this classifies as a change worthy of major revision, but given the proximity to the last release, we have decided that treating 2.18 as a faulty release is the correct course of action. We apologize for any inconvenience.
+
+### Changed
+
+- Fix substr panic 35fb796 (Alexey igrychev)
+- Remove extra period 1eb7729 (Matthew Lorimor)
+- Make random string functions use crypto by default 6ceff26 (Matthew Lorimor)
+- README edits/fixes/suggestions 08fe136 (Lauri Apple)
+
+
+## Release 2.18.0 (2019-02-12)
+
+### Added
+
+- Added mergeOverwrite function
+- cryptographic functions that use secure random (see fe1de12)
+
+### Changed
+
+- Improve documentation of regexMatch function, resolves #139 90b89ce (Jan Tagscherer)
+- Handle has for nil list 9c10885 (Daniel Cohen)
+- Document behaviour of mergeOverwrite fe0dbe9 (Lukas Rieder)
+- doc: adds missing documentation. 4b871e6 (Fernandez Ludovic)
+- Replace outdated goutils imports 01893d2 (Matthew Lorimor)
+- Surface crypto secure random strings from goutils fe1de12 (Matthew Lorimor)
+- Handle untyped nil values as paramters to string functions 2b2ec8f (Morten Torkildsen)
+
+### Fixed
+
+- Fix dict merge issue and provide mergeOverwrite .dst .src1 to overwrite from src -> dst 4c59c12 (Lukas Rieder)
+- Fix substr var names and comments d581f80 (Dean Coakley)
+- Fix substr documentation 2737203 (Dean Coakley)
+
+## Release 2.17.1 (2019-01-03)
+
+### Fixed
+
+The 2.17.0 release did not have a version pinned for xstrings, which caused compilation failures when xstrings < 1.2 was used. This adds the correct version string to glide.yaml.
+
+## Release 2.17.0 (2019-01-03)
+
+### Added
+
+- adds alder32sum function and test 6908fc2 (marshallford)
+- Added kebabcase function ca331a1 (Ilyes512)
+
+### Changed
+
+- Update goutils to 1.1.0 4e1125d (Matt Butcher)
+
+### Fixed
+
+- Fix 'has' documentation e3f2a85 (dean-coakley)
+- docs(dict): fix typo in pick example dc424f9 (Dustin Specker)
+- fixes spelling errors... not sure how that happened 4cf188a (marshallford)
+
+## Release 2.16.0 (2018-08-13)
+
+### Added
+
+- add splitn function fccb0b0 (Helgi Þorbjörnsson)
+- Add slice func df28ca7 (gongdo)
+- Generate serial number a3bdffd (Cody Coons)
+- Extract values of dict with values function df39312 (Lawrence Jones)
+
+### Changed
+
+- Modify panic message for list.slice ae38335 (gongdo)
+- Minor improvement in code quality - Removed an unreachable piece of code at defaults.go#L26:6 - Resolve formatting issues. 5834241 (Abhishek Kashyap)
+- Remove duplicated documentation 1d97af1 (Matthew Fisher)
+- Test on go 1.11 49df809 (Helgi Þormar Þorbjörnsson)
+
+### Fixed
+
+- Fix file permissions c5f40b5 (gongdo)
+- Fix example for buildCustomCert 7779e0d (Tin Lam)
+
+## Release 2.15.0 (2018-04-02)
+
+### Added
+
+- #68 and #69: Add json helpers to docs (thanks @arunvelsriram)
+- #66: Add ternary function (thanks @binoculars)
+- #67: Allow keys function to take multiple dicts (thanks @binoculars)
+- #89: Added sha1sum to crypto function (thanks @benkeil)
+- #81: Allow customizing Root CA that used by genSignedCert (thanks @chenzhiwei)
+- #92: Add travis testing for go 1.10
+- #93: Adding appveyor config for windows testing
+
+### Changed
+
+- #90: Updating to more recent dependencies
+- #73: replace satori/go.uuid with google/uuid (thanks @petterw)
+
+### Fixed
+
+- #76: Fixed documentation typos (thanks @Thiht)
+- Fixed rounding issue on the `ago` function. Note, the removes support for Go 1.8 and older
+
+## Release 2.14.1 (2017-12-01)
+
+### Fixed
+
+- #60: Fix typo in function name documentation (thanks @neil-ca-moore)
+- #61: Removing line with {{ due to blocking github pages genertion
+- #64: Update the list functions to handle int, string, and other slices for compatibility
+
+## Release 2.14.0 (2017-10-06)
+
+This new version of Sprig adds a set of functions for generating and working with SSL certificates.
+
+- `genCA` generates an SSL Certificate Authority
+- `genSelfSignedCert` generates an SSL self-signed certificate
+- `genSignedCert` generates an SSL certificate and key based on a given CA
+
+## Release 2.13.0 (2017-09-18)
+
+This release adds new functions, including:
+
+- `regexMatch`, `regexFindAll`, `regexFind`, `regexReplaceAll`, `regexReplaceAllLiteral`, and `regexSplit` to work with regular expressions
+- `floor`, `ceil`, and `round` math functions
+- `toDate` converts a string to a date
+- `nindent` is just like `indent` but also prepends a new line
+- `ago` returns the time from `time.Now`
+
+### Added
+
+- #40: Added basic regex functionality (thanks @alanquillin)
+- #41: Added ceil floor and round functions (thanks @alanquillin)
+- #48: Added toDate function (thanks @andreynering)
+- #50: Added nindent function (thanks @binoculars)
+- #46: Added ago function (thanks @slayer)
+
+### Changed
+
+- #51: Updated godocs to include new string functions (thanks @curtisallen)
+- #49: Added ability to merge multiple dicts (thanks @binoculars)
+
+## Release 2.12.0 (2017-05-17)
+
+- `snakecase`, `camelcase`, and `shuffle` are three new string functions
+- `fail` allows you to bail out of a template render when conditions are not met
+
+## Release 2.11.0 (2017-05-02)
+
+- Added `toJson` and `toPrettyJson`
+- Added `merge`
+- Refactored documentation
+
+## Release 2.10.0 (2017-03-15)
+
+- Added `semver` and `semverCompare` for Semantic Versions
+- `list` replaces `tuple`
+- Fixed issue with `join`
+- Added `first`, `last`, `intial`, `rest`, `prepend`, `append`, `toString`, `toStrings`, `sortAlpha`, `reverse`, `coalesce`, `pluck`, `pick`, `compact`, `keys`, `omit`, `uniq`, `has`, `without`
+
+## Release 2.9.0 (2017-02-23)
+
+- Added `splitList` to split a list
+- Added crypto functions of `genPrivateKey` and `derivePassword`
+
+## Release 2.8.0 (2016-12-21)
+
+- Added access to several path functions (`base`, `dir`, `clean`, `ext`, and `abs`)
+- Added functions for _mutating_ dictionaries (`set`, `unset`, `hasKey`)
+
+## Release 2.7.0 (2016-12-01)
+
+- Added `sha256sum` to generate a hash of an input
+- Added functions to convert a numeric or string to `int`, `int64`, `float64`
+
+## Release 2.6.0 (2016-10-03)
+
+- Added a `uuidv4` template function for generating UUIDs inside of a template.
+
+## Release 2.5.0 (2016-08-19)
+
+- New `trimSuffix`, `trimPrefix`, `hasSuffix`, and `hasPrefix` functions
+- New aliases have been added for a few functions that didn't follow the naming conventions (`trimAll` and `abbrevBoth`)
+- `trimall` and `abbrevboth` (notice the case) are deprecated and will be removed in 3.0.0
+
+## Release 2.4.0 (2016-08-16)
+
+- Adds two functions: `until` and `untilStep`
+
+## Release 2.3.0 (2016-06-21)
+
+- cat: Concatenate strings with whitespace separators.
+- replace: Replace parts of a string: `replace " " "-" "Me First"` renders "Me-First"
+- plural: Format plurals: `len "foo" | plural "one foo" "many foos"` renders "many foos"
+- indent: Indent blocks of text in a way that is sensitive to "\n" characters.
+
+## Release 2.2.0 (2016-04-21)
+
+- Added a `genPrivateKey` function (Thanks @bacongobbler)
+
+## Release 2.1.0 (2016-03-30)
+
+- `default` now prints the default value when it does not receive a value down the pipeline. It is much safer now to do `{{.Foo | default "bar"}}`.
+- Added accessors for "hermetic" functions. These return only functions that, when given the same input, produce the same output.
+
+## Release 2.0.0 (2016-03-29)
+
+Because we switched from `int` to `int64` as the return value for all integer math functions, the library's major version number has been incremented.
+
+- `min` complements `max` (formerly `biggest`)
+- `empty` indicates that a value is the empty value for its type
+- `tuple` creates a tuple inside of a template: `{{$t := tuple "a", "b" "c"}}`
+- `dict` creates a dictionary inside of a template `{{$d := dict "key1" "val1" "key2" "val2"}}`
+- Date formatters have been added for HTML dates (as used in `date` input fields)
+- Integer math functions can convert from a number of types, including `string` (via `strconv.ParseInt`).
+
+## Release 1.2.0 (2016-02-01)
+
+- Added quote and squote
+- Added b32enc and b32dec
+- add now takes varargs
+- biggest now takes varargs
+
+## Release 1.1.0 (2015-12-29)
+
+- Added #4: Added contains function. strings.Contains, but with the arguments
+ switched to simplify common pipelines. (thanks krancour)
+- Added Travis-CI testing support
+
+## Release 1.0.0 (2015-12-23)
+
+- Initial release
diff --git a/integration/assets/hydrabroker/vendor/github.com/hpcloud/tail/LICENSE.txt b/integration/assets/hydrabroker/vendor/github.com/go-task/slim-sprig/v3/LICENSE.txt
similarity index 82%
rename from integration/assets/hydrabroker/vendor/github.com/hpcloud/tail/LICENSE.txt
rename to integration/assets/hydrabroker/vendor/github.com/go-task/slim-sprig/v3/LICENSE.txt
index 818d802a59a..f311b1eaaaa 100644
--- a/integration/assets/hydrabroker/vendor/github.com/hpcloud/tail/LICENSE.txt
+++ b/integration/assets/hydrabroker/vendor/github.com/go-task/slim-sprig/v3/LICENSE.txt
@@ -1,7 +1,4 @@
-# The MIT License (MIT)
-
-# © Copyright 2015 Hewlett Packard Enterprise Development LP
-Copyright (c) 2014 ActiveState
+Copyright (C) 2013-2020 Masterminds
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
@@ -9,13 +6,14 @@ in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/integration/assets/hydrabroker/vendor/github.com/go-task/slim-sprig/v3/README.md b/integration/assets/hydrabroker/vendor/github.com/go-task/slim-sprig/v3/README.md
new file mode 100644
index 00000000000..b5ab564254f
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/go-task/slim-sprig/v3/README.md
@@ -0,0 +1,73 @@
+# Slim-Sprig: Template functions for Go templates [![Go Reference](https://pkg.go.dev/badge/github.com/go-task/slim-sprig/v3.svg)](https://pkg.go.dev/github.com/go-task/slim-sprig/v3)
+
+Slim-Sprig is a fork of [Sprig](https://github.com/Masterminds/sprig), but with
+all functions that depend on external (non standard library) or crypto packages
+removed.
+The reason for this is to make this library more lightweight. Most of these
+functions (specially crypto ones) are not needed on most apps, but costs a lot
+in terms of binary size and compilation time.
+
+## Usage
+
+**Template developers**: Please use Slim-Sprig's [function documentation](https://go-task.github.io/slim-sprig/) for
+detailed instructions and code snippets for the >100 template functions available.
+
+**Go developers**: If you'd like to include Slim-Sprig as a library in your program,
+our API documentation is available [at GoDoc.org](http://godoc.org/github.com/go-task/slim-sprig).
+
+For standard usage, read on.
+
+### Load the Slim-Sprig library
+
+To load the Slim-Sprig `FuncMap`:
+
+```go
+
+import (
+ "html/template"
+
+ "github.com/go-task/slim-sprig"
+)
+
+// This example illustrates that the FuncMap *must* be set before the
+// templates themselves are loaded.
+tpl := template.Must(
+ template.New("base").Funcs(sprig.FuncMap()).ParseGlob("*.html")
+)
+```
+
+### Calling the functions inside of templates
+
+By convention, all functions are lowercase. This seems to follow the Go
+idiom for template functions (as opposed to template methods, which are
+TitleCase). For example, this:
+
+```
+{{ "hello!" | upper | repeat 5 }}
+```
+
+produces this:
+
+```
+HELLO!HELLO!HELLO!HELLO!HELLO!
+```
+
+## Principles Driving Our Function Selection
+
+We followed these principles to decide which functions to add and how to implement them:
+
+- Use template functions to build layout. The following
+ types of operations are within the domain of template functions:
+ - Formatting
+ - Layout
+ - Simple type conversions
+ - Utilities that assist in handling common formatting and layout needs (e.g. arithmetic)
+- Template functions should not return errors unless there is no way to print
+ a sensible value. For example, converting a string to an integer should not
+ produce an error if conversion fails. Instead, it should display a default
+ value.
+- Simple math is necessary for grid layouts, pagers, and so on. Complex math
+ (anything other than arithmetic) should be done outside of templates.
+- Template functions only deal with the data passed into them. They never retrieve
+ data from a source.
+- Finally, do not override core Go template functions.
diff --git a/integration/assets/hydrabroker/vendor/github.com/go-task/slim-sprig/v3/Taskfile.yml b/integration/assets/hydrabroker/vendor/github.com/go-task/slim-sprig/v3/Taskfile.yml
new file mode 100644
index 00000000000..8e6346bb19e
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/go-task/slim-sprig/v3/Taskfile.yml
@@ -0,0 +1,12 @@
+# https://taskfile.dev
+
+version: '3'
+
+tasks:
+ default:
+ cmds:
+ - task: test
+
+ test:
+ cmds:
+ - go test -v .
diff --git a/integration/assets/hydrabroker/vendor/github.com/go-task/slim-sprig/v3/crypto.go b/integration/assets/hydrabroker/vendor/github.com/go-task/slim-sprig/v3/crypto.go
new file mode 100644
index 00000000000..d06e516d49e
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/go-task/slim-sprig/v3/crypto.go
@@ -0,0 +1,24 @@
+package sprig
+
+import (
+ "crypto/sha1"
+ "crypto/sha256"
+ "encoding/hex"
+ "fmt"
+ "hash/adler32"
+)
+
+func sha256sum(input string) string {
+ hash := sha256.Sum256([]byte(input))
+ return hex.EncodeToString(hash[:])
+}
+
+func sha1sum(input string) string {
+ hash := sha1.Sum([]byte(input))
+ return hex.EncodeToString(hash[:])
+}
+
+func adler32sum(input string) string {
+ hash := adler32.Checksum([]byte(input))
+ return fmt.Sprintf("%d", hash)
+}
diff --git a/integration/assets/hydrabroker/vendor/github.com/go-task/slim-sprig/v3/date.go b/integration/assets/hydrabroker/vendor/github.com/go-task/slim-sprig/v3/date.go
new file mode 100644
index 00000000000..ed022ddacac
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/go-task/slim-sprig/v3/date.go
@@ -0,0 +1,152 @@
+package sprig
+
+import (
+ "strconv"
+ "time"
+)
+
+// Given a format and a date, format the date string.
+//
+// Date can be a `time.Time` or an `int, int32, int64`.
+// In the later case, it is treated as seconds since UNIX
+// epoch.
+func date(fmt string, date interface{}) string {
+ return dateInZone(fmt, date, "Local")
+}
+
+func htmlDate(date interface{}) string {
+ return dateInZone("2006-01-02", date, "Local")
+}
+
+func htmlDateInZone(date interface{}, zone string) string {
+ return dateInZone("2006-01-02", date, zone)
+}
+
+func dateInZone(fmt string, date interface{}, zone string) string {
+ var t time.Time
+ switch date := date.(type) {
+ default:
+ t = time.Now()
+ case time.Time:
+ t = date
+ case *time.Time:
+ t = *date
+ case int64:
+ t = time.Unix(date, 0)
+ case int:
+ t = time.Unix(int64(date), 0)
+ case int32:
+ t = time.Unix(int64(date), 0)
+ }
+
+ loc, err := time.LoadLocation(zone)
+ if err != nil {
+ loc, _ = time.LoadLocation("UTC")
+ }
+
+ return t.In(loc).Format(fmt)
+}
+
+func dateModify(fmt string, date time.Time) time.Time {
+ d, err := time.ParseDuration(fmt)
+ if err != nil {
+ return date
+ }
+ return date.Add(d)
+}
+
+func mustDateModify(fmt string, date time.Time) (time.Time, error) {
+ d, err := time.ParseDuration(fmt)
+ if err != nil {
+ return time.Time{}, err
+ }
+ return date.Add(d), nil
+}
+
+func dateAgo(date interface{}) string {
+ var t time.Time
+
+ switch date := date.(type) {
+ default:
+ t = time.Now()
+ case time.Time:
+ t = date
+ case int64:
+ t = time.Unix(date, 0)
+ case int:
+ t = time.Unix(int64(date), 0)
+ }
+ // Drop resolution to seconds
+ duration := time.Since(t).Round(time.Second)
+ return duration.String()
+}
+
+func duration(sec interface{}) string {
+ var n int64
+ switch value := sec.(type) {
+ default:
+ n = 0
+ case string:
+ n, _ = strconv.ParseInt(value, 10, 64)
+ case int64:
+ n = value
+ }
+ return (time.Duration(n) * time.Second).String()
+}
+
+func durationRound(duration interface{}) string {
+ var d time.Duration
+ switch duration := duration.(type) {
+ default:
+ d = 0
+ case string:
+ d, _ = time.ParseDuration(duration)
+ case int64:
+ d = time.Duration(duration)
+ case time.Time:
+ d = time.Since(duration)
+ }
+
+ u := uint64(d)
+ neg := d < 0
+ if neg {
+ u = -u
+ }
+
+ var (
+ year = uint64(time.Hour) * 24 * 365
+ month = uint64(time.Hour) * 24 * 30
+ day = uint64(time.Hour) * 24
+ hour = uint64(time.Hour)
+ minute = uint64(time.Minute)
+ second = uint64(time.Second)
+ )
+ switch {
+ case u > year:
+ return strconv.FormatUint(u/year, 10) + "y"
+ case u > month:
+ return strconv.FormatUint(u/month, 10) + "mo"
+ case u > day:
+ return strconv.FormatUint(u/day, 10) + "d"
+ case u > hour:
+ return strconv.FormatUint(u/hour, 10) + "h"
+ case u > minute:
+ return strconv.FormatUint(u/minute, 10) + "m"
+ case u > second:
+ return strconv.FormatUint(u/second, 10) + "s"
+ }
+ return "0s"
+}
+
+func toDate(fmt, str string) time.Time {
+ t, _ := time.ParseInLocation(fmt, str, time.Local)
+ return t
+}
+
+func mustToDate(fmt, str string) (time.Time, error) {
+ return time.ParseInLocation(fmt, str, time.Local)
+}
+
+func unixEpoch(date time.Time) string {
+ return strconv.FormatInt(date.Unix(), 10)
+}
diff --git a/integration/assets/hydrabroker/vendor/github.com/go-task/slim-sprig/v3/defaults.go b/integration/assets/hydrabroker/vendor/github.com/go-task/slim-sprig/v3/defaults.go
new file mode 100644
index 00000000000..b9f979666dd
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/go-task/slim-sprig/v3/defaults.go
@@ -0,0 +1,163 @@
+package sprig
+
+import (
+ "bytes"
+ "encoding/json"
+ "math/rand"
+ "reflect"
+ "strings"
+ "time"
+)
+
+func init() {
+ rand.Seed(time.Now().UnixNano())
+}
+
+// dfault checks whether `given` is set, and returns default if not set.
+//
+// This returns `d` if `given` appears not to be set, and `given` otherwise.
+//
+// For numeric types 0 is unset.
+// For strings, maps, arrays, and slices, len() = 0 is considered unset.
+// For bool, false is unset.
+// Structs are never considered unset.
+//
+// For everything else, including pointers, a nil value is unset.
+func dfault(d interface{}, given ...interface{}) interface{} {
+
+ if empty(given) || empty(given[0]) {
+ return d
+ }
+ return given[0]
+}
+
+// empty returns true if the given value has the zero value for its type.
+func empty(given interface{}) bool {
+ g := reflect.ValueOf(given)
+ if !g.IsValid() {
+ return true
+ }
+
+ // Basically adapted from text/template.isTrue
+ switch g.Kind() {
+ default:
+ return g.IsNil()
+ case reflect.Array, reflect.Slice, reflect.Map, reflect.String:
+ return g.Len() == 0
+ case reflect.Bool:
+ return !g.Bool()
+ case reflect.Complex64, reflect.Complex128:
+ return g.Complex() == 0
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return g.Int() == 0
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return g.Uint() == 0
+ case reflect.Float32, reflect.Float64:
+ return g.Float() == 0
+ case reflect.Struct:
+ return false
+ }
+}
+
+// coalesce returns the first non-empty value.
+func coalesce(v ...interface{}) interface{} {
+ for _, val := range v {
+ if !empty(val) {
+ return val
+ }
+ }
+ return nil
+}
+
+// all returns true if empty(x) is false for all values x in the list.
+// If the list is empty, return true.
+func all(v ...interface{}) bool {
+ for _, val := range v {
+ if empty(val) {
+ return false
+ }
+ }
+ return true
+}
+
+// any returns true if empty(x) is false for any x in the list.
+// If the list is empty, return false.
+func any(v ...interface{}) bool {
+ for _, val := range v {
+ if !empty(val) {
+ return true
+ }
+ }
+ return false
+}
+
+// fromJson decodes JSON into a structured value, ignoring errors.
+func fromJson(v string) interface{} {
+ output, _ := mustFromJson(v)
+ return output
+}
+
+// mustFromJson decodes JSON into a structured value, returning errors.
+func mustFromJson(v string) (interface{}, error) {
+ var output interface{}
+ err := json.Unmarshal([]byte(v), &output)
+ return output, err
+}
+
+// toJson encodes an item into a JSON string
+func toJson(v interface{}) string {
+ output, _ := json.Marshal(v)
+ return string(output)
+}
+
+func mustToJson(v interface{}) (string, error) {
+ output, err := json.Marshal(v)
+ if err != nil {
+ return "", err
+ }
+ return string(output), nil
+}
+
+// toPrettyJson encodes an item into a pretty (indented) JSON string
+func toPrettyJson(v interface{}) string {
+ output, _ := json.MarshalIndent(v, "", " ")
+ return string(output)
+}
+
+func mustToPrettyJson(v interface{}) (string, error) {
+ output, err := json.MarshalIndent(v, "", " ")
+ if err != nil {
+ return "", err
+ }
+ return string(output), nil
+}
+
+// toRawJson encodes an item into a JSON string with no escaping of HTML characters.
+func toRawJson(v interface{}) string {
+ output, err := mustToRawJson(v)
+ if err != nil {
+ panic(err)
+ }
+ return string(output)
+}
+
+// mustToRawJson encodes an item into a JSON string with no escaping of HTML characters.
+func mustToRawJson(v interface{}) (string, error) {
+ buf := new(bytes.Buffer)
+ enc := json.NewEncoder(buf)
+ enc.SetEscapeHTML(false)
+ err := enc.Encode(&v)
+ if err != nil {
+ return "", err
+ }
+ return strings.TrimSuffix(buf.String(), "\n"), nil
+}
+
+// ternary returns the first value if the last value is true, otherwise returns the second value.
+func ternary(vt interface{}, vf interface{}, v bool) interface{} {
+ if v {
+ return vt
+ }
+
+ return vf
+}
diff --git a/integration/assets/hydrabroker/vendor/github.com/go-task/slim-sprig/v3/dict.go b/integration/assets/hydrabroker/vendor/github.com/go-task/slim-sprig/v3/dict.go
new file mode 100644
index 00000000000..77ebc61b189
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/go-task/slim-sprig/v3/dict.go
@@ -0,0 +1,118 @@
+package sprig
+
+func get(d map[string]interface{}, key string) interface{} {
+ if val, ok := d[key]; ok {
+ return val
+ }
+ return ""
+}
+
+func set(d map[string]interface{}, key string, value interface{}) map[string]interface{} {
+ d[key] = value
+ return d
+}
+
+func unset(d map[string]interface{}, key string) map[string]interface{} {
+ delete(d, key)
+ return d
+}
+
+func hasKey(d map[string]interface{}, key string) bool {
+ _, ok := d[key]
+ return ok
+}
+
+func pluck(key string, d ...map[string]interface{}) []interface{} {
+ res := []interface{}{}
+ for _, dict := range d {
+ if val, ok := dict[key]; ok {
+ res = append(res, val)
+ }
+ }
+ return res
+}
+
+func keys(dicts ...map[string]interface{}) []string {
+ k := []string{}
+ for _, dict := range dicts {
+ for key := range dict {
+ k = append(k, key)
+ }
+ }
+ return k
+}
+
+func pick(dict map[string]interface{}, keys ...string) map[string]interface{} {
+ res := map[string]interface{}{}
+ for _, k := range keys {
+ if v, ok := dict[k]; ok {
+ res[k] = v
+ }
+ }
+ return res
+}
+
+func omit(dict map[string]interface{}, keys ...string) map[string]interface{} {
+ res := map[string]interface{}{}
+
+ omit := make(map[string]bool, len(keys))
+ for _, k := range keys {
+ omit[k] = true
+ }
+
+ for k, v := range dict {
+ if _, ok := omit[k]; !ok {
+ res[k] = v
+ }
+ }
+ return res
+}
+
+func dict(v ...interface{}) map[string]interface{} {
+ dict := map[string]interface{}{}
+ lenv := len(v)
+ for i := 0; i < lenv; i += 2 {
+ key := strval(v[i])
+ if i+1 >= lenv {
+ dict[key] = ""
+ continue
+ }
+ dict[key] = v[i+1]
+ }
+ return dict
+}
+
+func values(dict map[string]interface{}) []interface{} {
+ values := []interface{}{}
+ for _, value := range dict {
+ values = append(values, value)
+ }
+
+ return values
+}
+
+func dig(ps ...interface{}) (interface{}, error) {
+ if len(ps) < 3 {
+ panic("dig needs at least three arguments")
+ }
+ dict := ps[len(ps)-1].(map[string]interface{})
+ def := ps[len(ps)-2]
+ ks := make([]string, len(ps)-2)
+ for i := 0; i < len(ks); i++ {
+ ks[i] = ps[i].(string)
+ }
+
+ return digFromDict(dict, def, ks)
+}
+
+func digFromDict(dict map[string]interface{}, d interface{}, ks []string) (interface{}, error) {
+ k, ns := ks[0], ks[1:len(ks)]
+ step, has := dict[k]
+ if !has {
+ return d, nil
+ }
+ if len(ns) == 0 {
+ return step, nil
+ }
+ return digFromDict(step.(map[string]interface{}), d, ns)
+}
diff --git a/integration/assets/hydrabroker/vendor/github.com/go-task/slim-sprig/v3/doc.go b/integration/assets/hydrabroker/vendor/github.com/go-task/slim-sprig/v3/doc.go
new file mode 100644
index 00000000000..aabb9d4489f
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/go-task/slim-sprig/v3/doc.go
@@ -0,0 +1,19 @@
+/*
+Package sprig provides template functions for Go.
+
+This package contains a number of utility functions for working with data
+inside of Go `html/template` and `text/template` files.
+
+To add these functions, use the `template.Funcs()` method:
+
+ t := templates.New("foo").Funcs(sprig.FuncMap())
+
+Note that you should add the function map before you parse any template files.
+
+ In several cases, Sprig reverses the order of arguments from the way they
+ appear in the standard library. This is to make it easier to pipe
+ arguments into functions.
+
+See http://masterminds.github.io/sprig/ for more detailed documentation on each of the available functions.
+*/
+package sprig
diff --git a/integration/assets/hydrabroker/vendor/github.com/go-task/slim-sprig/v3/functions.go b/integration/assets/hydrabroker/vendor/github.com/go-task/slim-sprig/v3/functions.go
new file mode 100644
index 00000000000..5ea74f89930
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/go-task/slim-sprig/v3/functions.go
@@ -0,0 +1,317 @@
+package sprig
+
+import (
+ "errors"
+ "html/template"
+ "math/rand"
+ "os"
+ "path"
+ "path/filepath"
+ "reflect"
+ "strconv"
+ "strings"
+ ttemplate "text/template"
+ "time"
+)
+
+// FuncMap produces the function map.
+//
+// Use this to pass the functions into the template engine:
+//
+// tpl := template.New("foo").Funcs(sprig.FuncMap()))
+//
+func FuncMap() template.FuncMap {
+ return HtmlFuncMap()
+}
+
+// HermeticTxtFuncMap returns a 'text/template'.FuncMap with only repeatable functions.
+func HermeticTxtFuncMap() ttemplate.FuncMap {
+ r := TxtFuncMap()
+ for _, name := range nonhermeticFunctions {
+ delete(r, name)
+ }
+ return r
+}
+
+// HermeticHtmlFuncMap returns an 'html/template'.Funcmap with only repeatable functions.
+func HermeticHtmlFuncMap() template.FuncMap {
+ r := HtmlFuncMap()
+ for _, name := range nonhermeticFunctions {
+ delete(r, name)
+ }
+ return r
+}
+
+// TxtFuncMap returns a 'text/template'.FuncMap
+func TxtFuncMap() ttemplate.FuncMap {
+ return ttemplate.FuncMap(GenericFuncMap())
+}
+
+// HtmlFuncMap returns an 'html/template'.Funcmap
+func HtmlFuncMap() template.FuncMap {
+ return template.FuncMap(GenericFuncMap())
+}
+
+// GenericFuncMap returns a copy of the basic function map as a map[string]interface{}.
+func GenericFuncMap() map[string]interface{} {
+ gfm := make(map[string]interface{}, len(genericMap))
+ for k, v := range genericMap {
+ gfm[k] = v
+ }
+ return gfm
+}
+
+// These functions are not guaranteed to evaluate to the same result for given input, because they
+// refer to the environment or global state.
+var nonhermeticFunctions = []string{
+ // Date functions
+ "date",
+ "date_in_zone",
+ "date_modify",
+ "now",
+ "htmlDate",
+ "htmlDateInZone",
+ "dateInZone",
+ "dateModify",
+
+ // Strings
+ "randAlphaNum",
+ "randAlpha",
+ "randAscii",
+ "randNumeric",
+ "randBytes",
+ "uuidv4",
+
+ // OS
+ "env",
+ "expandenv",
+
+ // Network
+ "getHostByName",
+}
+
+var genericMap = map[string]interface{}{
+ "hello": func() string { return "Hello!" },
+
+ // Date functions
+ "ago": dateAgo,
+ "date": date,
+ "date_in_zone": dateInZone,
+ "date_modify": dateModify,
+ "dateInZone": dateInZone,
+ "dateModify": dateModify,
+ "duration": duration,
+ "durationRound": durationRound,
+ "htmlDate": htmlDate,
+ "htmlDateInZone": htmlDateInZone,
+ "must_date_modify": mustDateModify,
+ "mustDateModify": mustDateModify,
+ "mustToDate": mustToDate,
+ "now": time.Now,
+ "toDate": toDate,
+ "unixEpoch": unixEpoch,
+
+ // Strings
+ "trunc": trunc,
+ "trim": strings.TrimSpace,
+ "upper": strings.ToUpper,
+ "lower": strings.ToLower,
+ "title": strings.Title,
+ "substr": substring,
+ // Switch order so that "foo" | repeat 5
+ "repeat": func(count int, str string) string { return strings.Repeat(str, count) },
+ // Deprecated: Use trimAll.
+ "trimall": func(a, b string) string { return strings.Trim(b, a) },
+ // Switch order so that "$foo" | trimall "$"
+ "trimAll": func(a, b string) string { return strings.Trim(b, a) },
+ "trimSuffix": func(a, b string) string { return strings.TrimSuffix(b, a) },
+ "trimPrefix": func(a, b string) string { return strings.TrimPrefix(b, a) },
+ // Switch order so that "foobar" | contains "foo"
+ "contains": func(substr string, str string) bool { return strings.Contains(str, substr) },
+ "hasPrefix": func(substr string, str string) bool { return strings.HasPrefix(str, substr) },
+ "hasSuffix": func(substr string, str string) bool { return strings.HasSuffix(str, substr) },
+ "quote": quote,
+ "squote": squote,
+ "cat": cat,
+ "indent": indent,
+ "nindent": nindent,
+ "replace": replace,
+ "plural": plural,
+ "sha1sum": sha1sum,
+ "sha256sum": sha256sum,
+ "adler32sum": adler32sum,
+ "toString": strval,
+
+ // Wrap Atoi to stop errors.
+ "atoi": func(a string) int { i, _ := strconv.Atoi(a); return i },
+ "int64": toInt64,
+ "int": toInt,
+ "float64": toFloat64,
+ "seq": seq,
+ "toDecimal": toDecimal,
+
+ //"gt": func(a, b int) bool {return a > b},
+ //"gte": func(a, b int) bool {return a >= b},
+ //"lt": func(a, b int) bool {return a < b},
+ //"lte": func(a, b int) bool {return a <= b},
+
+ // split "/" foo/bar returns map[int]string{0: foo, 1: bar}
+ "split": split,
+ "splitList": func(sep, orig string) []string { return strings.Split(orig, sep) },
+ // splitn "/" foo/bar/fuu returns map[int]string{0: foo, 1: bar/fuu}
+ "splitn": splitn,
+ "toStrings": strslice,
+
+ "until": until,
+ "untilStep": untilStep,
+
+ // VERY basic arithmetic.
+ "add1": func(i interface{}) int64 { return toInt64(i) + 1 },
+ "add": func(i ...interface{}) int64 {
+ var a int64 = 0
+ for _, b := range i {
+ a += toInt64(b)
+ }
+ return a
+ },
+ "sub": func(a, b interface{}) int64 { return toInt64(a) - toInt64(b) },
+ "div": func(a, b interface{}) int64 { return toInt64(a) / toInt64(b) },
+ "mod": func(a, b interface{}) int64 { return toInt64(a) % toInt64(b) },
+ "mul": func(a interface{}, v ...interface{}) int64 {
+ val := toInt64(a)
+ for _, b := range v {
+ val = val * toInt64(b)
+ }
+ return val
+ },
+ "randInt": func(min, max int) int { return rand.Intn(max-min) + min },
+ "biggest": max,
+ "max": max,
+ "min": min,
+ "maxf": maxf,
+ "minf": minf,
+ "ceil": ceil,
+ "floor": floor,
+ "round": round,
+
+ // string slices. Note that we reverse the order b/c that's better
+ // for template processing.
+ "join": join,
+ "sortAlpha": sortAlpha,
+
+ // Defaults
+ "default": dfault,
+ "empty": empty,
+ "coalesce": coalesce,
+ "all": all,
+ "any": any,
+ "compact": compact,
+ "mustCompact": mustCompact,
+ "fromJson": fromJson,
+ "toJson": toJson,
+ "toPrettyJson": toPrettyJson,
+ "toRawJson": toRawJson,
+ "mustFromJson": mustFromJson,
+ "mustToJson": mustToJson,
+ "mustToPrettyJson": mustToPrettyJson,
+ "mustToRawJson": mustToRawJson,
+ "ternary": ternary,
+
+ // Reflection
+ "typeOf": typeOf,
+ "typeIs": typeIs,
+ "typeIsLike": typeIsLike,
+ "kindOf": kindOf,
+ "kindIs": kindIs,
+ "deepEqual": reflect.DeepEqual,
+
+ // OS:
+ "env": os.Getenv,
+ "expandenv": os.ExpandEnv,
+
+ // Network:
+ "getHostByName": getHostByName,
+
+ // Paths:
+ "base": path.Base,
+ "dir": path.Dir,
+ "clean": path.Clean,
+ "ext": path.Ext,
+ "isAbs": path.IsAbs,
+
+ // Filepaths:
+ "osBase": filepath.Base,
+ "osClean": filepath.Clean,
+ "osDir": filepath.Dir,
+ "osExt": filepath.Ext,
+ "osIsAbs": filepath.IsAbs,
+
+ // Encoding:
+ "b64enc": base64encode,
+ "b64dec": base64decode,
+ "b32enc": base32encode,
+ "b32dec": base32decode,
+
+ // Data Structures:
+ "tuple": list, // FIXME: with the addition of append/prepend these are no longer immutable.
+ "list": list,
+ "dict": dict,
+ "get": get,
+ "set": set,
+ "unset": unset,
+ "hasKey": hasKey,
+ "pluck": pluck,
+ "keys": keys,
+ "pick": pick,
+ "omit": omit,
+ "values": values,
+
+ "append": push, "push": push,
+ "mustAppend": mustPush, "mustPush": mustPush,
+ "prepend": prepend,
+ "mustPrepend": mustPrepend,
+ "first": first,
+ "mustFirst": mustFirst,
+ "rest": rest,
+ "mustRest": mustRest,
+ "last": last,
+ "mustLast": mustLast,
+ "initial": initial,
+ "mustInitial": mustInitial,
+ "reverse": reverse,
+ "mustReverse": mustReverse,
+ "uniq": uniq,
+ "mustUniq": mustUniq,
+ "without": without,
+ "mustWithout": mustWithout,
+ "has": has,
+ "mustHas": mustHas,
+ "slice": slice,
+ "mustSlice": mustSlice,
+ "concat": concat,
+ "dig": dig,
+ "chunk": chunk,
+ "mustChunk": mustChunk,
+
+ // Flow Control:
+ "fail": func(msg string) (string, error) { return "", errors.New(msg) },
+
+ // Regex
+ "regexMatch": regexMatch,
+ "mustRegexMatch": mustRegexMatch,
+ "regexFindAll": regexFindAll,
+ "mustRegexFindAll": mustRegexFindAll,
+ "regexFind": regexFind,
+ "mustRegexFind": mustRegexFind,
+ "regexReplaceAll": regexReplaceAll,
+ "mustRegexReplaceAll": mustRegexReplaceAll,
+ "regexReplaceAllLiteral": regexReplaceAllLiteral,
+ "mustRegexReplaceAllLiteral": mustRegexReplaceAllLiteral,
+ "regexSplit": regexSplit,
+ "mustRegexSplit": mustRegexSplit,
+ "regexQuoteMeta": regexQuoteMeta,
+
+ // URLs:
+ "urlParse": urlParse,
+ "urlJoin": urlJoin,
+}
diff --git a/integration/assets/hydrabroker/vendor/github.com/go-task/slim-sprig/v3/go.mod b/integration/assets/hydrabroker/vendor/github.com/go-task/slim-sprig/v3/go.mod
new file mode 100644
index 00000000000..fcc726800b1
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/go-task/slim-sprig/v3/go.mod
@@ -0,0 +1,11 @@
+module github.com/go-task/slim-sprig/v3
+
+go 1.20
+
+require github.com/stretchr/testify v1.8.4
+
+require (
+ github.com/davecgh/go-spew v1.1.1 // indirect
+ github.com/pmezard/go-difflib v1.0.0 // indirect
+ gopkg.in/yaml.v3 v3.0.1 // indirect
+)
diff --git a/integration/assets/hydrabroker/vendor/github.com/go-task/slim-sprig/v3/go.sum b/integration/assets/hydrabroker/vendor/github.com/go-task/slim-sprig/v3/go.sum
new file mode 100644
index 00000000000..fa4b6e6825c
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/go-task/slim-sprig/v3/go.sum
@@ -0,0 +1,10 @@
+github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
+github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=
+github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
+gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
diff --git a/integration/assets/hydrabroker/vendor/github.com/go-task/slim-sprig/v3/list.go b/integration/assets/hydrabroker/vendor/github.com/go-task/slim-sprig/v3/list.go
new file mode 100644
index 00000000000..ca0fbb78932
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/go-task/slim-sprig/v3/list.go
@@ -0,0 +1,464 @@
+package sprig
+
+import (
+ "fmt"
+ "math"
+ "reflect"
+ "sort"
+)
+
+// Reflection is used in these functions so that slices and arrays of strings,
+// ints, and other types not implementing []interface{} can be worked with.
+// For example, this is useful if you need to work on the output of regexs.
+
+func list(v ...interface{}) []interface{} {
+ return v
+}
+
+func push(list interface{}, v interface{}) []interface{} {
+ l, err := mustPush(list, v)
+ if err != nil {
+ panic(err)
+ }
+
+ return l
+}
+
+func mustPush(list interface{}, v interface{}) ([]interface{}, error) {
+ tp := reflect.TypeOf(list).Kind()
+ switch tp {
+ case reflect.Slice, reflect.Array:
+ l2 := reflect.ValueOf(list)
+
+ l := l2.Len()
+ nl := make([]interface{}, l)
+ for i := 0; i < l; i++ {
+ nl[i] = l2.Index(i).Interface()
+ }
+
+ return append(nl, v), nil
+
+ default:
+ return nil, fmt.Errorf("Cannot push on type %s", tp)
+ }
+}
+
+func prepend(list interface{}, v interface{}) []interface{} {
+ l, err := mustPrepend(list, v)
+ if err != nil {
+ panic(err)
+ }
+
+ return l
+}
+
+func mustPrepend(list interface{}, v interface{}) ([]interface{}, error) {
+ //return append([]interface{}{v}, list...)
+
+ tp := reflect.TypeOf(list).Kind()
+ switch tp {
+ case reflect.Slice, reflect.Array:
+ l2 := reflect.ValueOf(list)
+
+ l := l2.Len()
+ nl := make([]interface{}, l)
+ for i := 0; i < l; i++ {
+ nl[i] = l2.Index(i).Interface()
+ }
+
+ return append([]interface{}{v}, nl...), nil
+
+ default:
+ return nil, fmt.Errorf("Cannot prepend on type %s", tp)
+ }
+}
+
+func chunk(size int, list interface{}) [][]interface{} {
+ l, err := mustChunk(size, list)
+ if err != nil {
+ panic(err)
+ }
+
+ return l
+}
+
+func mustChunk(size int, list interface{}) ([][]interface{}, error) {
+ tp := reflect.TypeOf(list).Kind()
+ switch tp {
+ case reflect.Slice, reflect.Array:
+ l2 := reflect.ValueOf(list)
+
+ l := l2.Len()
+
+ cs := int(math.Floor(float64(l-1)/float64(size)) + 1)
+ nl := make([][]interface{}, cs)
+
+ for i := 0; i < cs; i++ {
+ clen := size
+ if i == cs-1 {
+ clen = int(math.Floor(math.Mod(float64(l), float64(size))))
+ if clen == 0 {
+ clen = size
+ }
+ }
+
+ nl[i] = make([]interface{}, clen)
+
+ for j := 0; j < clen; j++ {
+ ix := i*size + j
+ nl[i][j] = l2.Index(ix).Interface()
+ }
+ }
+
+ return nl, nil
+
+ default:
+ return nil, fmt.Errorf("Cannot chunk type %s", tp)
+ }
+}
+
+func last(list interface{}) interface{} {
+ l, err := mustLast(list)
+ if err != nil {
+ panic(err)
+ }
+
+ return l
+}
+
+func mustLast(list interface{}) (interface{}, error) {
+ tp := reflect.TypeOf(list).Kind()
+ switch tp {
+ case reflect.Slice, reflect.Array:
+ l2 := reflect.ValueOf(list)
+
+ l := l2.Len()
+ if l == 0 {
+ return nil, nil
+ }
+
+ return l2.Index(l - 1).Interface(), nil
+ default:
+ return nil, fmt.Errorf("Cannot find last on type %s", tp)
+ }
+}
+
+func first(list interface{}) interface{} {
+ l, err := mustFirst(list)
+ if err != nil {
+ panic(err)
+ }
+
+ return l
+}
+
+func mustFirst(list interface{}) (interface{}, error) {
+ tp := reflect.TypeOf(list).Kind()
+ switch tp {
+ case reflect.Slice, reflect.Array:
+ l2 := reflect.ValueOf(list)
+
+ l := l2.Len()
+ if l == 0 {
+ return nil, nil
+ }
+
+ return l2.Index(0).Interface(), nil
+ default:
+ return nil, fmt.Errorf("Cannot find first on type %s", tp)
+ }
+}
+
+func rest(list interface{}) []interface{} {
+ l, err := mustRest(list)
+ if err != nil {
+ panic(err)
+ }
+
+ return l
+}
+
+func mustRest(list interface{}) ([]interface{}, error) {
+ tp := reflect.TypeOf(list).Kind()
+ switch tp {
+ case reflect.Slice, reflect.Array:
+ l2 := reflect.ValueOf(list)
+
+ l := l2.Len()
+ if l == 0 {
+ return nil, nil
+ }
+
+ nl := make([]interface{}, l-1)
+ for i := 1; i < l; i++ {
+ nl[i-1] = l2.Index(i).Interface()
+ }
+
+ return nl, nil
+ default:
+ return nil, fmt.Errorf("Cannot find rest on type %s", tp)
+ }
+}
+
+func initial(list interface{}) []interface{} {
+ l, err := mustInitial(list)
+ if err != nil {
+ panic(err)
+ }
+
+ return l
+}
+
+func mustInitial(list interface{}) ([]interface{}, error) {
+ tp := reflect.TypeOf(list).Kind()
+ switch tp {
+ case reflect.Slice, reflect.Array:
+ l2 := reflect.ValueOf(list)
+
+ l := l2.Len()
+ if l == 0 {
+ return nil, nil
+ }
+
+ nl := make([]interface{}, l-1)
+ for i := 0; i < l-1; i++ {
+ nl[i] = l2.Index(i).Interface()
+ }
+
+ return nl, nil
+ default:
+ return nil, fmt.Errorf("Cannot find initial on type %s", tp)
+ }
+}
+
+func sortAlpha(list interface{}) []string {
+ k := reflect.Indirect(reflect.ValueOf(list)).Kind()
+ switch k {
+ case reflect.Slice, reflect.Array:
+ a := strslice(list)
+ s := sort.StringSlice(a)
+ s.Sort()
+ return s
+ }
+ return []string{strval(list)}
+}
+
+func reverse(v interface{}) []interface{} {
+ l, err := mustReverse(v)
+ if err != nil {
+ panic(err)
+ }
+
+ return l
+}
+
+func mustReverse(v interface{}) ([]interface{}, error) {
+ tp := reflect.TypeOf(v).Kind()
+ switch tp {
+ case reflect.Slice, reflect.Array:
+ l2 := reflect.ValueOf(v)
+
+ l := l2.Len()
+ // We do not sort in place because the incoming array should not be altered.
+ nl := make([]interface{}, l)
+ for i := 0; i < l; i++ {
+ nl[l-i-1] = l2.Index(i).Interface()
+ }
+
+ return nl, nil
+ default:
+ return nil, fmt.Errorf("Cannot find reverse on type %s", tp)
+ }
+}
+
+func compact(list interface{}) []interface{} {
+ l, err := mustCompact(list)
+ if err != nil {
+ panic(err)
+ }
+
+ return l
+}
+
+func mustCompact(list interface{}) ([]interface{}, error) {
+ tp := reflect.TypeOf(list).Kind()
+ switch tp {
+ case reflect.Slice, reflect.Array:
+ l2 := reflect.ValueOf(list)
+
+ l := l2.Len()
+ nl := []interface{}{}
+ var item interface{}
+ for i := 0; i < l; i++ {
+ item = l2.Index(i).Interface()
+ if !empty(item) {
+ nl = append(nl, item)
+ }
+ }
+
+ return nl, nil
+ default:
+ return nil, fmt.Errorf("Cannot compact on type %s", tp)
+ }
+}
+
+func uniq(list interface{}) []interface{} {
+ l, err := mustUniq(list)
+ if err != nil {
+ panic(err)
+ }
+
+ return l
+}
+
+func mustUniq(list interface{}) ([]interface{}, error) {
+ tp := reflect.TypeOf(list).Kind()
+ switch tp {
+ case reflect.Slice, reflect.Array:
+ l2 := reflect.ValueOf(list)
+
+ l := l2.Len()
+ dest := []interface{}{}
+ var item interface{}
+ for i := 0; i < l; i++ {
+ item = l2.Index(i).Interface()
+ if !inList(dest, item) {
+ dest = append(dest, item)
+ }
+ }
+
+ return dest, nil
+ default:
+ return nil, fmt.Errorf("Cannot find uniq on type %s", tp)
+ }
+}
+
+func inList(haystack []interface{}, needle interface{}) bool {
+ for _, h := range haystack {
+ if reflect.DeepEqual(needle, h) {
+ return true
+ }
+ }
+ return false
+}
+
+func without(list interface{}, omit ...interface{}) []interface{} {
+ l, err := mustWithout(list, omit...)
+ if err != nil {
+ panic(err)
+ }
+
+ return l
+}
+
+func mustWithout(list interface{}, omit ...interface{}) ([]interface{}, error) {
+ tp := reflect.TypeOf(list).Kind()
+ switch tp {
+ case reflect.Slice, reflect.Array:
+ l2 := reflect.ValueOf(list)
+
+ l := l2.Len()
+ res := []interface{}{}
+ var item interface{}
+ for i := 0; i < l; i++ {
+ item = l2.Index(i).Interface()
+ if !inList(omit, item) {
+ res = append(res, item)
+ }
+ }
+
+ return res, nil
+ default:
+ return nil, fmt.Errorf("Cannot find without on type %s", tp)
+ }
+}
+
+func has(needle interface{}, haystack interface{}) bool {
+ l, err := mustHas(needle, haystack)
+ if err != nil {
+ panic(err)
+ }
+
+ return l
+}
+
+func mustHas(needle interface{}, haystack interface{}) (bool, error) {
+ if haystack == nil {
+ return false, nil
+ }
+ tp := reflect.TypeOf(haystack).Kind()
+ switch tp {
+ case reflect.Slice, reflect.Array:
+ l2 := reflect.ValueOf(haystack)
+ var item interface{}
+ l := l2.Len()
+ for i := 0; i < l; i++ {
+ item = l2.Index(i).Interface()
+ if reflect.DeepEqual(needle, item) {
+ return true, nil
+ }
+ }
+
+ return false, nil
+ default:
+ return false, fmt.Errorf("Cannot find has on type %s", tp)
+ }
+}
+
+// $list := [1, 2, 3, 4, 5]
+// slice $list -> list[0:5] = list[:]
+// slice $list 0 3 -> list[0:3] = list[:3]
+// slice $list 3 5 -> list[3:5]
+// slice $list 3 -> list[3:5] = list[3:]
+func slice(list interface{}, indices ...interface{}) interface{} {
+ l, err := mustSlice(list, indices...)
+ if err != nil {
+ panic(err)
+ }
+
+ return l
+}
+
+func mustSlice(list interface{}, indices ...interface{}) (interface{}, error) {
+ tp := reflect.TypeOf(list).Kind()
+ switch tp {
+ case reflect.Slice, reflect.Array:
+ l2 := reflect.ValueOf(list)
+
+ l := l2.Len()
+ if l == 0 {
+ return nil, nil
+ }
+
+ var start, end int
+ if len(indices) > 0 {
+ start = toInt(indices[0])
+ }
+ if len(indices) < 2 {
+ end = l
+ } else {
+ end = toInt(indices[1])
+ }
+
+ return l2.Slice(start, end).Interface(), nil
+ default:
+ return nil, fmt.Errorf("list should be type of slice or array but %s", tp)
+ }
+}
+
+func concat(lists ...interface{}) interface{} {
+ var res []interface{}
+ for _, list := range lists {
+ tp := reflect.TypeOf(list).Kind()
+ switch tp {
+ case reflect.Slice, reflect.Array:
+ l2 := reflect.ValueOf(list)
+ for i := 0; i < l2.Len(); i++ {
+ res = append(res, l2.Index(i).Interface())
+ }
+ default:
+ panic(fmt.Sprintf("Cannot concat type %s as list", tp))
+ }
+ }
+ return res
+}
diff --git a/integration/assets/hydrabroker/vendor/github.com/go-task/slim-sprig/v3/network.go b/integration/assets/hydrabroker/vendor/github.com/go-task/slim-sprig/v3/network.go
new file mode 100644
index 00000000000..108d78a9462
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/go-task/slim-sprig/v3/network.go
@@ -0,0 +1,12 @@
+package sprig
+
+import (
+ "math/rand"
+ "net"
+)
+
+func getHostByName(name string) string {
+ addrs, _ := net.LookupHost(name)
+ //TODO: add error handing when release v3 comes out
+ return addrs[rand.Intn(len(addrs))]
+}
diff --git a/integration/assets/hydrabroker/vendor/github.com/go-task/slim-sprig/v3/numeric.go b/integration/assets/hydrabroker/vendor/github.com/go-task/slim-sprig/v3/numeric.go
new file mode 100644
index 00000000000..98cbb37a193
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/go-task/slim-sprig/v3/numeric.go
@@ -0,0 +1,228 @@
+package sprig
+
+import (
+ "fmt"
+ "math"
+ "reflect"
+ "strconv"
+ "strings"
+)
+
+// toFloat64 converts 64-bit floats
+func toFloat64(v interface{}) float64 {
+ if str, ok := v.(string); ok {
+ iv, err := strconv.ParseFloat(str, 64)
+ if err != nil {
+ return 0
+ }
+ return iv
+ }
+
+ val := reflect.Indirect(reflect.ValueOf(v))
+ switch val.Kind() {
+ case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
+ return float64(val.Int())
+ case reflect.Uint8, reflect.Uint16, reflect.Uint32:
+ return float64(val.Uint())
+ case reflect.Uint, reflect.Uint64:
+ return float64(val.Uint())
+ case reflect.Float32, reflect.Float64:
+ return val.Float()
+ case reflect.Bool:
+ if val.Bool() {
+ return 1
+ }
+ return 0
+ default:
+ return 0
+ }
+}
+
+func toInt(v interface{}) int {
+ //It's not optimal. Bud I don't want duplicate toInt64 code.
+ return int(toInt64(v))
+}
+
+// toInt64 converts integer types to 64-bit integers
+func toInt64(v interface{}) int64 {
+ if str, ok := v.(string); ok {
+ iv, err := strconv.ParseInt(str, 10, 64)
+ if err != nil {
+ return 0
+ }
+ return iv
+ }
+
+ val := reflect.Indirect(reflect.ValueOf(v))
+ switch val.Kind() {
+ case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
+ return val.Int()
+ case reflect.Uint8, reflect.Uint16, reflect.Uint32:
+ return int64(val.Uint())
+ case reflect.Uint, reflect.Uint64:
+ tv := val.Uint()
+ if tv <= math.MaxInt64 {
+ return int64(tv)
+ }
+ // TODO: What is the sensible thing to do here?
+ return math.MaxInt64
+ case reflect.Float32, reflect.Float64:
+ return int64(val.Float())
+ case reflect.Bool:
+ if val.Bool() {
+ return 1
+ }
+ return 0
+ default:
+ return 0
+ }
+}
+
+func max(a interface{}, i ...interface{}) int64 {
+ aa := toInt64(a)
+ for _, b := range i {
+ bb := toInt64(b)
+ if bb > aa {
+ aa = bb
+ }
+ }
+ return aa
+}
+
+func maxf(a interface{}, i ...interface{}) float64 {
+ aa := toFloat64(a)
+ for _, b := range i {
+ bb := toFloat64(b)
+ aa = math.Max(aa, bb)
+ }
+ return aa
+}
+
+func min(a interface{}, i ...interface{}) int64 {
+ aa := toInt64(a)
+ for _, b := range i {
+ bb := toInt64(b)
+ if bb < aa {
+ aa = bb
+ }
+ }
+ return aa
+}
+
+func minf(a interface{}, i ...interface{}) float64 {
+ aa := toFloat64(a)
+ for _, b := range i {
+ bb := toFloat64(b)
+ aa = math.Min(aa, bb)
+ }
+ return aa
+}
+
+func until(count int) []int {
+ step := 1
+ if count < 0 {
+ step = -1
+ }
+ return untilStep(0, count, step)
+}
+
+func untilStep(start, stop, step int) []int {
+ v := []int{}
+
+ if stop < start {
+ if step >= 0 {
+ return v
+ }
+ for i := start; i > stop; i += step {
+ v = append(v, i)
+ }
+ return v
+ }
+
+ if step <= 0 {
+ return v
+ }
+ for i := start; i < stop; i += step {
+ v = append(v, i)
+ }
+ return v
+}
+
+func floor(a interface{}) float64 {
+ aa := toFloat64(a)
+ return math.Floor(aa)
+}
+
+func ceil(a interface{}) float64 {
+ aa := toFloat64(a)
+ return math.Ceil(aa)
+}
+
+func round(a interface{}, p int, rOpt ...float64) float64 {
+ roundOn := .5
+ if len(rOpt) > 0 {
+ roundOn = rOpt[0]
+ }
+ val := toFloat64(a)
+ places := toFloat64(p)
+
+ var round float64
+ pow := math.Pow(10, places)
+ digit := pow * val
+ _, div := math.Modf(digit)
+ if div >= roundOn {
+ round = math.Ceil(digit)
+ } else {
+ round = math.Floor(digit)
+ }
+ return round / pow
+}
+
+// converts unix octal to decimal
+func toDecimal(v interface{}) int64 {
+ result, err := strconv.ParseInt(fmt.Sprint(v), 8, 64)
+ if err != nil {
+ return 0
+ }
+ return result
+}
+
+func seq(params ...int) string {
+ increment := 1
+ switch len(params) {
+ case 0:
+ return ""
+ case 1:
+ start := 1
+ end := params[0]
+ if end < start {
+ increment = -1
+ }
+ return intArrayToString(untilStep(start, end+increment, increment), " ")
+ case 3:
+ start := params[0]
+ end := params[2]
+ step := params[1]
+ if end < start {
+ increment = -1
+ if step > 0 {
+ return ""
+ }
+ }
+ return intArrayToString(untilStep(start, end+increment, step), " ")
+ case 2:
+ start := params[0]
+ end := params[1]
+ step := 1
+ if end < start {
+ step = -1
+ }
+ return intArrayToString(untilStep(start, end+step, step), " ")
+ default:
+ return ""
+ }
+}
+
+func intArrayToString(slice []int, delimeter string) string {
+ return strings.Trim(strings.Join(strings.Fields(fmt.Sprint(slice)), delimeter), "[]")
+}
diff --git a/integration/assets/hydrabroker/vendor/github.com/go-task/slim-sprig/v3/reflect.go b/integration/assets/hydrabroker/vendor/github.com/go-task/slim-sprig/v3/reflect.go
new file mode 100644
index 00000000000..8a65c132f08
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/go-task/slim-sprig/v3/reflect.go
@@ -0,0 +1,28 @@
+package sprig
+
+import (
+ "fmt"
+ "reflect"
+)
+
+// typeIs returns true if the src is the type named in target.
+func typeIs(target string, src interface{}) bool {
+ return target == typeOf(src)
+}
+
+func typeIsLike(target string, src interface{}) bool {
+ t := typeOf(src)
+ return target == t || "*"+target == t
+}
+
+func typeOf(src interface{}) string {
+ return fmt.Sprintf("%T", src)
+}
+
+func kindIs(target string, src interface{}) bool {
+ return target == kindOf(src)
+}
+
+func kindOf(src interface{}) string {
+ return reflect.ValueOf(src).Kind().String()
+}
diff --git a/integration/assets/hydrabroker/vendor/github.com/go-task/slim-sprig/v3/regex.go b/integration/assets/hydrabroker/vendor/github.com/go-task/slim-sprig/v3/regex.go
new file mode 100644
index 00000000000..fab55101897
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/go-task/slim-sprig/v3/regex.go
@@ -0,0 +1,83 @@
+package sprig
+
+import (
+ "regexp"
+)
+
+func regexMatch(regex string, s string) bool {
+ match, _ := regexp.MatchString(regex, s)
+ return match
+}
+
+func mustRegexMatch(regex string, s string) (bool, error) {
+ return regexp.MatchString(regex, s)
+}
+
+func regexFindAll(regex string, s string, n int) []string {
+ r := regexp.MustCompile(regex)
+ return r.FindAllString(s, n)
+}
+
+func mustRegexFindAll(regex string, s string, n int) ([]string, error) {
+ r, err := regexp.Compile(regex)
+ if err != nil {
+ return []string{}, err
+ }
+ return r.FindAllString(s, n), nil
+}
+
+func regexFind(regex string, s string) string {
+ r := regexp.MustCompile(regex)
+ return r.FindString(s)
+}
+
+func mustRegexFind(regex string, s string) (string, error) {
+ r, err := regexp.Compile(regex)
+ if err != nil {
+ return "", err
+ }
+ return r.FindString(s), nil
+}
+
+func regexReplaceAll(regex string, s string, repl string) string {
+ r := regexp.MustCompile(regex)
+ return r.ReplaceAllString(s, repl)
+}
+
+func mustRegexReplaceAll(regex string, s string, repl string) (string, error) {
+ r, err := regexp.Compile(regex)
+ if err != nil {
+ return "", err
+ }
+ return r.ReplaceAllString(s, repl), nil
+}
+
+func regexReplaceAllLiteral(regex string, s string, repl string) string {
+ r := regexp.MustCompile(regex)
+ return r.ReplaceAllLiteralString(s, repl)
+}
+
+func mustRegexReplaceAllLiteral(regex string, s string, repl string) (string, error) {
+ r, err := regexp.Compile(regex)
+ if err != nil {
+ return "", err
+ }
+ return r.ReplaceAllLiteralString(s, repl), nil
+}
+
+func regexSplit(regex string, s string, n int) []string {
+ r := regexp.MustCompile(regex)
+ return r.Split(s, n)
+}
+
+func mustRegexSplit(regex string, s string, n int) ([]string, error) {
+ r, err := regexp.Compile(regex)
+ if err != nil {
+ return []string{}, err
+ }
+ return r.Split(s, n), nil
+}
+
+func regexQuoteMeta(s string) string {
+ return regexp.QuoteMeta(s)
+}
diff --git a/integration/assets/hydrabroker/vendor/github.com/go-task/slim-sprig/v3/strings.go b/integration/assets/hydrabroker/vendor/github.com/go-task/slim-sprig/v3/strings.go
new file mode 100644
index 00000000000..3c62d6b6f21
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/go-task/slim-sprig/v3/strings.go
@@ -0,0 +1,189 @@
+package sprig
+
+import (
+ "encoding/base32"
+ "encoding/base64"
+ "fmt"
+ "reflect"
+ "strconv"
+ "strings"
+)
+
+func base64encode(v string) string {
+ return base64.StdEncoding.EncodeToString([]byte(v))
+}
+
+func base64decode(v string) string {
+ data, err := base64.StdEncoding.DecodeString(v)
+ if err != nil {
+ return err.Error()
+ }
+ return string(data)
+}
+
+func base32encode(v string) string {
+ return base32.StdEncoding.EncodeToString([]byte(v))
+}
+
+func base32decode(v string) string {
+ data, err := base32.StdEncoding.DecodeString(v)
+ if err != nil {
+ return err.Error()
+ }
+ return string(data)
+}
+
+func quote(str ...interface{}) string {
+ out := make([]string, 0, len(str))
+ for _, s := range str {
+ if s != nil {
+ out = append(out, fmt.Sprintf("%q", strval(s)))
+ }
+ }
+ return strings.Join(out, " ")
+}
+
+func squote(str ...interface{}) string {
+ out := make([]string, 0, len(str))
+ for _, s := range str {
+ if s != nil {
+ out = append(out, fmt.Sprintf("'%v'", s))
+ }
+ }
+ return strings.Join(out, " ")
+}
+
+func cat(v ...interface{}) string {
+ v = removeNilElements(v)
+ r := strings.TrimSpace(strings.Repeat("%v ", len(v)))
+ return fmt.Sprintf(r, v...)
+}
+
+func indent(spaces int, v string) string {
+ pad := strings.Repeat(" ", spaces)
+ return pad + strings.Replace(v, "\n", "\n"+pad, -1)
+}
+
+func nindent(spaces int, v string) string {
+ return "\n" + indent(spaces, v)
+}
+
+func replace(old, new, src string) string {
+ return strings.Replace(src, old, new, -1)
+}
+
+func plural(one, many string, count int) string {
+ if count == 1 {
+ return one
+ }
+ return many
+}
+
+func strslice(v interface{}) []string {
+ switch v := v.(type) {
+ case []string:
+ return v
+ case []interface{}:
+ b := make([]string, 0, len(v))
+ for _, s := range v {
+ if s != nil {
+ b = append(b, strval(s))
+ }
+ }
+ return b
+ default:
+ val := reflect.ValueOf(v)
+ switch val.Kind() {
+ case reflect.Array, reflect.Slice:
+ l := val.Len()
+ b := make([]string, 0, l)
+ for i := 0; i < l; i++ {
+ value := val.Index(i).Interface()
+ if value != nil {
+ b = append(b, strval(value))
+ }
+ }
+ return b
+ default:
+ if v == nil {
+ return []string{}
+ }
+
+ return []string{strval(v)}
+ }
+ }
+}
+
+func removeNilElements(v []interface{}) []interface{} {
+ newSlice := make([]interface{}, 0, len(v))
+ for _, i := range v {
+ if i != nil {
+ newSlice = append(newSlice, i)
+ }
+ }
+ return newSlice
+}
+
+func strval(v interface{}) string {
+ switch v := v.(type) {
+ case string:
+ return v
+ case []byte:
+ return string(v)
+ case error:
+ return v.Error()
+ case fmt.Stringer:
+ return v.String()
+ default:
+ return fmt.Sprintf("%v", v)
+ }
+}
+
+func trunc(c int, s string) string {
+ if c < 0 && len(s)+c > 0 {
+ return s[len(s)+c:]
+ }
+ if c >= 0 && len(s) > c {
+ return s[:c]
+ }
+ return s
+}
+
+func join(sep string, v interface{}) string {
+ return strings.Join(strslice(v), sep)
+}
+
+func split(sep, orig string) map[string]string {
+ parts := strings.Split(orig, sep)
+ res := make(map[string]string, len(parts))
+ for i, v := range parts {
+ res["_"+strconv.Itoa(i)] = v
+ }
+ return res
+}
+
+func splitn(sep string, n int, orig string) map[string]string {
+ parts := strings.SplitN(orig, sep, n)
+ res := make(map[string]string, len(parts))
+ for i, v := range parts {
+ res["_"+strconv.Itoa(i)] = v
+ }
+ return res
+}
+
+// substring creates a substring of the given string.
+//
+// If start is < 0, this calls string[:end].
+//
+// If start is >= 0 and end < 0 or end bigger than s length, this calls string[start:]
+//
+// Otherwise, this calls string[start, end].
+func substring(start, end int, s string) string {
+ if start < 0 {
+ return s[:end]
+ }
+ if end < 0 || end > len(s) {
+ return s[start:]
+ }
+ return s[start:end]
+}
diff --git a/integration/assets/hydrabroker/vendor/github.com/go-task/slim-sprig/v3/url.go b/integration/assets/hydrabroker/vendor/github.com/go-task/slim-sprig/v3/url.go
new file mode 100644
index 00000000000..b8e120e19ba
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/go-task/slim-sprig/v3/url.go
@@ -0,0 +1,66 @@
+package sprig
+
+import (
+ "fmt"
+ "net/url"
+ "reflect"
+)
+
+func dictGetOrEmpty(dict map[string]interface{}, key string) string {
+ value, ok := dict[key]
+ if !ok {
+ return ""
+ }
+ tp := reflect.TypeOf(value).Kind()
+ if tp != reflect.String {
+ panic(fmt.Sprintf("unable to parse %s key, must be of type string, but %s found", key, tp.String()))
+ }
+ return reflect.ValueOf(value).String()
+}
+
+// parses given URL to return dict object
+func urlParse(v string) map[string]interface{} {
+ dict := map[string]interface{}{}
+ parsedURL, err := url.Parse(v)
+ if err != nil {
+ panic(fmt.Sprintf("unable to parse url: %s", err))
+ }
+ dict["scheme"] = parsedURL.Scheme
+ dict["host"] = parsedURL.Host
+ dict["hostname"] = parsedURL.Hostname()
+ dict["path"] = parsedURL.Path
+ dict["query"] = parsedURL.RawQuery
+ dict["opaque"] = parsedURL.Opaque
+ dict["fragment"] = parsedURL.Fragment
+ if parsedURL.User != nil {
+ dict["userinfo"] = parsedURL.User.String()
+ } else {
+ dict["userinfo"] = ""
+ }
+
+ return dict
+}
+
+// join given dict to URL string
+func urlJoin(d map[string]interface{}) string {
+ resURL := url.URL{
+ Scheme: dictGetOrEmpty(d, "scheme"),
+ Host: dictGetOrEmpty(d, "host"),
+ Path: dictGetOrEmpty(d, "path"),
+ RawQuery: dictGetOrEmpty(d, "query"),
+ Opaque: dictGetOrEmpty(d, "opaque"),
+ Fragment: dictGetOrEmpty(d, "fragment"),
+ }
+ userinfo := dictGetOrEmpty(d, "userinfo")
+ var user *url.Userinfo
+ if userinfo != "" {
+ tempURL, err := url.Parse(fmt.Sprintf("proto://%s@host", userinfo))
+ if err != nil {
+ panic(fmt.Sprintf("unable to parse userinfo in dict: %s", err))
+ }
+ user = tempURL.User
+ }
+
+ resURL.User = user
+ return resURL.String()
+}
diff --git a/integration/assets/hydrabroker/vendor/golang.org/x/xerrors/LICENSE b/integration/assets/hydrabroker/vendor/github.com/google/go-cmp/LICENSE
similarity index 96%
rename from integration/assets/hydrabroker/vendor/golang.org/x/xerrors/LICENSE
rename to integration/assets/hydrabroker/vendor/github.com/google/go-cmp/LICENSE
index e4a47e17f14..32017f8fa1d 100644
--- a/integration/assets/hydrabroker/vendor/golang.org/x/xerrors/LICENSE
+++ b/integration/assets/hydrabroker/vendor/github.com/google/go-cmp/LICENSE
@@ -1,4 +1,4 @@
-Copyright (c) 2019 The Go Authors. All rights reserved.
+Copyright (c) 2017 The Go Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
diff --git a/integration/assets/hydrabroker/vendor/github.com/google/go-cmp/cmp/compare.go b/integration/assets/hydrabroker/vendor/github.com/google/go-cmp/cmp/compare.go
new file mode 100644
index 00000000000..0f5b8a48c6b
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/google/go-cmp/cmp/compare.go
@@ -0,0 +1,671 @@
+// Copyright 2017, The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package cmp determines equality of values.
+//
+// This package is intended to be a more powerful and safer alternative to
+// [reflect.DeepEqual] for comparing whether two values are semantically equal.
+// It is intended to only be used in tests, as performance is not a goal and
+// it may panic if it cannot compare the values. Its propensity towards
+// panicking means that its unsuitable for production environments where a
+// spurious panic may be fatal.
+//
+// The primary features of cmp are:
+//
+// - When the default behavior of equality does not suit the test's needs,
+// custom equality functions can override the equality operation.
+// For example, an equality function may report floats as equal so long as
+// they are within some tolerance of each other.
+//
+// - Types with an Equal method (e.g., [time.Time.Equal]) may use that method
+// to determine equality. This allows package authors to determine
+// the equality operation for the types that they define.
+//
+// - If no custom equality functions are used and no Equal method is defined,
+// equality is determined by recursively comparing the primitive kinds on
+// both values, much like [reflect.DeepEqual]. Unlike [reflect.DeepEqual],
+// unexported fields are not compared by default; they result in panics
+// unless suppressed by using an [Ignore] option
+// (see [github.com/google/go-cmp/cmp/cmpopts.IgnoreUnexported])
+// or explicitly compared using the [Exporter] option.
+package cmp
+
+import (
+ "fmt"
+ "reflect"
+ "strings"
+
+ "github.com/google/go-cmp/cmp/internal/diff"
+ "github.com/google/go-cmp/cmp/internal/function"
+ "github.com/google/go-cmp/cmp/internal/value"
+)
+
+// TODO(≥go1.18): Use any instead of interface{}.
+
+// Equal reports whether x and y are equal by recursively applying the
+// following rules in the given order to x and y and all of their sub-values:
+//
+// - Let S be the set of all [Ignore], [Transformer], and [Comparer] options that
+// remain after applying all path filters, value filters, and type filters.
+// If at least one [Ignore] exists in S, then the comparison is ignored.
+// If the number of [Transformer] and [Comparer] options in S is non-zero,
+// then Equal panics because it is ambiguous which option to use.
+// If S contains a single [Transformer], then use that to transform
+// the current values and recursively call Equal on the output values.
+// If S contains a single [Comparer], then use that to compare the current values.
+// Otherwise, evaluation proceeds to the next rule.
+//
+// - If the values have an Equal method of the form "(T) Equal(T) bool" or
+// "(T) Equal(I) bool" where T is assignable to I, then use the result of
+// x.Equal(y) even if x or y is nil. Otherwise, no such method exists and
+// evaluation proceeds to the next rule.
+//
+// - Lastly, try to compare x and y based on their basic kinds.
+// Simple kinds like booleans, integers, floats, complex numbers, strings,
+// and channels are compared using the equivalent of the == operator in Go.
+// Functions are only equal if they are both nil, otherwise they are unequal.
+//
+// Structs are equal if recursively calling Equal on all fields report equal.
+// If a struct contains unexported fields, Equal panics unless an [Ignore] option
+// (e.g., [github.com/google/go-cmp/cmp/cmpopts.IgnoreUnexported]) ignores that field
+// or the [Exporter] option explicitly permits comparing the unexported field.
+//
+// Slices are equal if they are both nil or both non-nil, where recursively
+// calling Equal on all non-ignored slice or array elements report equal.
+// Empty non-nil slices and nil slices are not equal; to equate empty slices,
+// consider using [github.com/google/go-cmp/cmp/cmpopts.EquateEmpty].
+//
+// Maps are equal if they are both nil or both non-nil, where recursively
+// calling Equal on all non-ignored map entries report equal.
+// Map keys are equal according to the == operator.
+// To use custom comparisons for map keys, consider using
+// [github.com/google/go-cmp/cmp/cmpopts.SortMaps].
+// Empty non-nil maps and nil maps are not equal; to equate empty maps,
+// consider using [github.com/google/go-cmp/cmp/cmpopts.EquateEmpty].
+//
+// Pointers and interfaces are equal if they are both nil or both non-nil,
+// where they have the same underlying concrete type and recursively
+// calling Equal on the underlying values reports equal.
+//
+// Before recursing into a pointer, slice element, or map, the current path
+// is checked to detect whether the address has already been visited.
+// If there is a cycle, then the pointed at values are considered equal
+// only if both addresses were previously visited in the same path step.
+func Equal(x, y interface{}, opts ...Option) bool {
+ s := newState(opts)
+ s.compareAny(rootStep(x, y))
+ return s.result.Equal()
+}
+
+// Diff returns a human-readable report of the differences between two values:
+// y - x. It returns an empty string if and only if Equal returns true for the
+// same input values and options.
+//
+// The output is displayed as a literal in pseudo-Go syntax.
+// At the start of each line, a "-" prefix indicates an element removed from x,
+// a "+" prefix to indicates an element added from y, and the lack of a prefix
+// indicates an element common to both x and y. If possible, the output
+// uses fmt.Stringer.String or error.Error methods to produce more humanly
+// readable outputs. In such cases, the string is prefixed with either an
+// 's' or 'e' character, respectively, to indicate that the method was called.
+//
+// Do not depend on this output being stable. If you need the ability to
+// programmatically interpret the difference, consider using a custom Reporter.
+func Diff(x, y interface{}, opts ...Option) string {
+ s := newState(opts)
+
+ // Optimization: If there are no other reporters, we can optimize for the
+ // common case where the result is equal (and thus no reported difference).
+ // This avoids the expensive construction of a difference tree.
+ if len(s.reporters) == 0 {
+ s.compareAny(rootStep(x, y))
+ if s.result.Equal() {
+ return ""
+ }
+ s.result = diff.Result{} // Reset results
+ }
+
+ r := new(defaultReporter)
+ s.reporters = append(s.reporters, reporter{r})
+ s.compareAny(rootStep(x, y))
+ d := r.String()
+ if (d == "") != s.result.Equal() {
+ panic("inconsistent difference and equality results")
+ }
+ return d
+}
+
+// rootStep constructs the first path step. If x and y have differing types,
+// then they are stored within an empty interface type.
+func rootStep(x, y interface{}) PathStep {
+ vx := reflect.ValueOf(x)
+ vy := reflect.ValueOf(y)
+
+ // If the inputs are different types, auto-wrap them in an empty interface
+ // so that they have the same parent type.
+ var t reflect.Type
+ if !vx.IsValid() || !vy.IsValid() || vx.Type() != vy.Type() {
+ t = anyType
+ if vx.IsValid() {
+ vvx := reflect.New(t).Elem()
+ vvx.Set(vx)
+ vx = vvx
+ }
+ if vy.IsValid() {
+ vvy := reflect.New(t).Elem()
+ vvy.Set(vy)
+ vy = vvy
+ }
+ } else {
+ t = vx.Type()
+ }
+
+ return &pathStep{t, vx, vy}
+}
+
+type state struct {
+ // These fields represent the "comparison state".
+ // Calling statelessCompare must not result in observable changes to these.
+ result diff.Result // The current result of comparison
+ curPath Path // The current path in the value tree
+ curPtrs pointerPath // The current set of visited pointers
+ reporters []reporter // Optional reporters
+
+ // recChecker checks for infinite cycles applying the same set of
+ // transformers upon the output of itself.
+ recChecker recChecker
+
+ // dynChecker triggers pseudo-random checks for option correctness.
+ // It is safe for statelessCompare to mutate this value.
+ dynChecker dynChecker
+
+ // These fields, once set by processOption, will not change.
+ exporters []exporter // List of exporters for structs with unexported fields
+ opts Options // List of all fundamental and filter options
+}
+
+func newState(opts []Option) *state {
+ // Always ensure a validator option exists to validate the inputs.
+ s := &state{opts: Options{validator{}}}
+ s.curPtrs.Init()
+ s.processOption(Options(opts))
+ return s
+}
+
+func (s *state) processOption(opt Option) {
+ switch opt := opt.(type) {
+ case nil:
+ case Options:
+ for _, o := range opt {
+ s.processOption(o)
+ }
+ case coreOption:
+ type filtered interface {
+ isFiltered() bool
+ }
+ if fopt, ok := opt.(filtered); ok && !fopt.isFiltered() {
+ panic(fmt.Sprintf("cannot use an unfiltered option: %v", opt))
+ }
+ s.opts = append(s.opts, opt)
+ case exporter:
+ s.exporters = append(s.exporters, opt)
+ case reporter:
+ s.reporters = append(s.reporters, opt)
+ default:
+ panic(fmt.Sprintf("unknown option %T", opt))
+ }
+}
+
+// statelessCompare compares two values and returns the result.
+// This function is stateless in that it does not alter the current result,
+// or output to any registered reporters.
+func (s *state) statelessCompare(step PathStep) diff.Result {
+ // We do not save and restore curPath and curPtrs because all of the
+ // compareX methods should properly push and pop from them.
+ // It is an implementation bug if the contents of the paths differ from
+ // when calling this function to when returning from it.
+
+ oldResult, oldReporters := s.result, s.reporters
+ s.result = diff.Result{} // Reset result
+ s.reporters = nil // Remove reporters to avoid spurious printouts
+ s.compareAny(step)
+ res := s.result
+ s.result, s.reporters = oldResult, oldReporters
+ return res
+}
+
+func (s *state) compareAny(step PathStep) {
+ // Update the path stack.
+ s.curPath.push(step)
+ defer s.curPath.pop()
+ for _, r := range s.reporters {
+ r.PushStep(step)
+ defer r.PopStep()
+ }
+ s.recChecker.Check(s.curPath)
+
+ // Cycle-detection for slice elements (see NOTE in compareSlice).
+ t := step.Type()
+ vx, vy := step.Values()
+ if si, ok := step.(SliceIndex); ok && si.isSlice && vx.IsValid() && vy.IsValid() {
+ px, py := vx.Addr(), vy.Addr()
+ if eq, visited := s.curPtrs.Push(px, py); visited {
+ s.report(eq, reportByCycle)
+ return
+ }
+ defer s.curPtrs.Pop(px, py)
+ }
+
+ // Rule 1: Check whether an option applies on this node in the value tree.
+ if s.tryOptions(t, vx, vy) {
+ return
+ }
+
+ // Rule 2: Check whether the type has a valid Equal method.
+ if s.tryMethod(t, vx, vy) {
+ return
+ }
+
+ // Rule 3: Compare based on the underlying kind.
+ switch t.Kind() {
+ case reflect.Bool:
+ s.report(vx.Bool() == vy.Bool(), 0)
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ s.report(vx.Int() == vy.Int(), 0)
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ s.report(vx.Uint() == vy.Uint(), 0)
+ case reflect.Float32, reflect.Float64:
+ s.report(vx.Float() == vy.Float(), 0)
+ case reflect.Complex64, reflect.Complex128:
+ s.report(vx.Complex() == vy.Complex(), 0)
+ case reflect.String:
+ s.report(vx.String() == vy.String(), 0)
+ case reflect.Chan, reflect.UnsafePointer:
+ s.report(vx.Pointer() == vy.Pointer(), 0)
+ case reflect.Func:
+ s.report(vx.IsNil() && vy.IsNil(), 0)
+ case reflect.Struct:
+ s.compareStruct(t, vx, vy)
+ case reflect.Slice, reflect.Array:
+ s.compareSlice(t, vx, vy)
+ case reflect.Map:
+ s.compareMap(t, vx, vy)
+ case reflect.Ptr:
+ s.comparePtr(t, vx, vy)
+ case reflect.Interface:
+ s.compareInterface(t, vx, vy)
+ default:
+ panic(fmt.Sprintf("%v kind not handled", t.Kind()))
+ }
+}
+
+func (s *state) tryOptions(t reflect.Type, vx, vy reflect.Value) bool {
+ // Evaluate all filters and apply the remaining options.
+ if opt := s.opts.filter(s, t, vx, vy); opt != nil {
+ opt.apply(s, vx, vy)
+ return true
+ }
+ return false
+}
+
+func (s *state) tryMethod(t reflect.Type, vx, vy reflect.Value) bool {
+ // Check if this type even has an Equal method.
+ m, ok := t.MethodByName("Equal")
+ if !ok || !function.IsType(m.Type, function.EqualAssignable) {
+ return false
+ }
+
+ eq := s.callTTBFunc(m.Func, vx, vy)
+ s.report(eq, reportByMethod)
+ return true
+}
+
+func (s *state) callTRFunc(f, v reflect.Value, step Transform) reflect.Value {
+ if !s.dynChecker.Next() {
+ return f.Call([]reflect.Value{v})[0]
+ }
+
+ // Run the function twice and ensure that we get the same results back.
+ // We run in goroutines so that the race detector (if enabled) can detect
+ // unsafe mutations to the input.
+ c := make(chan reflect.Value)
+ go detectRaces(c, f, v)
+ got := <-c
+ want := f.Call([]reflect.Value{v})[0]
+ if step.vx, step.vy = got, want; !s.statelessCompare(step).Equal() {
+ // To avoid false-positives with non-reflexive equality operations,
+ // we sanity check whether a value is equal to itself.
+ if step.vx, step.vy = want, want; !s.statelessCompare(step).Equal() {
+ return want
+ }
+ panic(fmt.Sprintf("non-deterministic function detected: %s", function.NameOf(f)))
+ }
+ return want
+}
+
+func (s *state) callTTBFunc(f, x, y reflect.Value) bool {
+ if !s.dynChecker.Next() {
+ return f.Call([]reflect.Value{x, y})[0].Bool()
+ }
+
+ // Swapping the input arguments is sufficient to check that
+ // f is symmetric and deterministic.
+ // We run in goroutines so that the race detector (if enabled) can detect
+ // unsafe mutations to the input.
+ c := make(chan reflect.Value)
+ go detectRaces(c, f, y, x)
+ got := <-c
+ want := f.Call([]reflect.Value{x, y})[0].Bool()
+ if !got.IsValid() || got.Bool() != want {
+ panic(fmt.Sprintf("non-deterministic or non-symmetric function detected: %s", function.NameOf(f)))
+ }
+ return want
+}
+
+func detectRaces(c chan<- reflect.Value, f reflect.Value, vs ...reflect.Value) {
+ var ret reflect.Value
+ defer func() {
+ recover() // Ignore panics, let the other call to f panic instead
+ c <- ret
+ }()
+ ret = f.Call(vs)[0]
+}
+
+func (s *state) compareStruct(t reflect.Type, vx, vy reflect.Value) {
+ var addr bool
+ var vax, vay reflect.Value // Addressable versions of vx and vy
+
+ var mayForce, mayForceInit bool
+ step := StructField{&structField{}}
+ for i := 0; i < t.NumField(); i++ {
+ step.typ = t.Field(i).Type
+ step.vx = vx.Field(i)
+ step.vy = vy.Field(i)
+ step.name = t.Field(i).Name
+ step.idx = i
+ step.unexported = !isExported(step.name)
+ if step.unexported {
+ if step.name == "_" {
+ continue
+ }
+ // Defer checking of unexported fields until later to give an
+ // Ignore a chance to ignore the field.
+ if !vax.IsValid() || !vay.IsValid() {
+ // For retrieveUnexportedField to work, the parent struct must
+ // be addressable. Create a new copy of the values if
+ // necessary to make them addressable.
+ addr = vx.CanAddr() || vy.CanAddr()
+ vax = makeAddressable(vx)
+ vay = makeAddressable(vy)
+ }
+ if !mayForceInit {
+ for _, xf := range s.exporters {
+ mayForce = mayForce || xf(t)
+ }
+ mayForceInit = true
+ }
+ step.mayForce = mayForce
+ step.paddr = addr
+ step.pvx = vax
+ step.pvy = vay
+ step.field = t.Field(i)
+ }
+ s.compareAny(step)
+ }
+}
+
+func (s *state) compareSlice(t reflect.Type, vx, vy reflect.Value) {
+ isSlice := t.Kind() == reflect.Slice
+ if isSlice && (vx.IsNil() || vy.IsNil()) {
+ s.report(vx.IsNil() && vy.IsNil(), 0)
+ return
+ }
+
+ // NOTE: It is incorrect to call curPtrs.Push on the slice header pointer
+ // since slices represents a list of pointers, rather than a single pointer.
+ // The pointer checking logic must be handled on a per-element basis
+ // in compareAny.
+ //
+ // A slice header (see reflect.SliceHeader) in Go is a tuple of a starting
+ // pointer P, a length N, and a capacity C. Supposing each slice element has
+ // a memory size of M, then the slice is equivalent to the list of pointers:
+ // [P+i*M for i in range(N)]
+ //
+ // For example, v[:0] and v[:1] are slices with the same starting pointer,
+ // but they are clearly different values. Using the slice pointer alone
+ // violates the assumption that equal pointers implies equal values.
+
+ step := SliceIndex{&sliceIndex{pathStep: pathStep{typ: t.Elem()}, isSlice: isSlice}}
+ withIndexes := func(ix, iy int) SliceIndex {
+ if ix >= 0 {
+ step.vx, step.xkey = vx.Index(ix), ix
+ } else {
+ step.vx, step.xkey = reflect.Value{}, -1
+ }
+ if iy >= 0 {
+ step.vy, step.ykey = vy.Index(iy), iy
+ } else {
+ step.vy, step.ykey = reflect.Value{}, -1
+ }
+ return step
+ }
+
+ // Ignore options are able to ignore missing elements in a slice.
+ // However, detecting these reliably requires an optimal differencing
+ // algorithm, for which diff.Difference is not.
+ //
+ // Instead, we first iterate through both slices to detect which elements
+ // would be ignored if standing alone. The index of non-discarded elements
+ // are stored in a separate slice, which diffing is then performed on.
+ var indexesX, indexesY []int
+ var ignoredX, ignoredY []bool
+ for ix := 0; ix < vx.Len(); ix++ {
+ ignored := s.statelessCompare(withIndexes(ix, -1)).NumDiff == 0
+ if !ignored {
+ indexesX = append(indexesX, ix)
+ }
+ ignoredX = append(ignoredX, ignored)
+ }
+ for iy := 0; iy < vy.Len(); iy++ {
+ ignored := s.statelessCompare(withIndexes(-1, iy)).NumDiff == 0
+ if !ignored {
+ indexesY = append(indexesY, iy)
+ }
+ ignoredY = append(ignoredY, ignored)
+ }
+
+ // Compute an edit-script for slices vx and vy (excluding ignored elements).
+ edits := diff.Difference(len(indexesX), len(indexesY), func(ix, iy int) diff.Result {
+ return s.statelessCompare(withIndexes(indexesX[ix], indexesY[iy]))
+ })
+
+ // Replay the ignore-scripts and the edit-script.
+ var ix, iy int
+ for ix < vx.Len() || iy < vy.Len() {
+ var e diff.EditType
+ switch {
+ case ix < len(ignoredX) && ignoredX[ix]:
+ e = diff.UniqueX
+ case iy < len(ignoredY) && ignoredY[iy]:
+ e = diff.UniqueY
+ default:
+ e, edits = edits[0], edits[1:]
+ }
+ switch e {
+ case diff.UniqueX:
+ s.compareAny(withIndexes(ix, -1))
+ ix++
+ case diff.UniqueY:
+ s.compareAny(withIndexes(-1, iy))
+ iy++
+ default:
+ s.compareAny(withIndexes(ix, iy))
+ ix++
+ iy++
+ }
+ }
+}
+
+func (s *state) compareMap(t reflect.Type, vx, vy reflect.Value) {
+ if vx.IsNil() || vy.IsNil() {
+ s.report(vx.IsNil() && vy.IsNil(), 0)
+ return
+ }
+
+ // Cycle-detection for maps.
+ if eq, visited := s.curPtrs.Push(vx, vy); visited {
+ s.report(eq, reportByCycle)
+ return
+ }
+ defer s.curPtrs.Pop(vx, vy)
+
+ // We combine and sort the two map keys so that we can perform the
+ // comparisons in a deterministic order.
+ step := MapIndex{&mapIndex{pathStep: pathStep{typ: t.Elem()}}}
+ for _, k := range value.SortKeys(append(vx.MapKeys(), vy.MapKeys()...)) {
+ step.vx = vx.MapIndex(k)
+ step.vy = vy.MapIndex(k)
+ step.key = k
+ if !step.vx.IsValid() && !step.vy.IsValid() {
+ // It is possible for both vx and vy to be invalid if the
+ // key contained a NaN value in it.
+ //
+ // Even with the ability to retrieve NaN keys in Go 1.12,
+ // there still isn't a sensible way to compare the values since
+ // a NaN key may map to multiple unordered values.
+ // The most reasonable way to compare NaNs would be to compare the
+ // set of values. However, this is impossible to do efficiently
+ // since set equality is provably an O(n^2) operation given only
+ // an Equal function. If we had a Less function or Hash function,
+ // this could be done in O(n*log(n)) or O(n), respectively.
+ //
+ // Rather than adding complex logic to deal with NaNs, make it
+ // the user's responsibility to compare such obscure maps.
+ const help = "consider providing a Comparer to compare the map"
+ panic(fmt.Sprintf("%#v has map key with NaNs\n%s", s.curPath, help))
+ }
+ s.compareAny(step)
+ }
+}
+
+func (s *state) comparePtr(t reflect.Type, vx, vy reflect.Value) {
+ if vx.IsNil() || vy.IsNil() {
+ s.report(vx.IsNil() && vy.IsNil(), 0)
+ return
+ }
+
+ // Cycle-detection for pointers.
+ if eq, visited := s.curPtrs.Push(vx, vy); visited {
+ s.report(eq, reportByCycle)
+ return
+ }
+ defer s.curPtrs.Pop(vx, vy)
+
+ vx, vy = vx.Elem(), vy.Elem()
+ s.compareAny(Indirect{&indirect{pathStep{t.Elem(), vx, vy}}})
+}
+
+func (s *state) compareInterface(t reflect.Type, vx, vy reflect.Value) {
+ if vx.IsNil() || vy.IsNil() {
+ s.report(vx.IsNil() && vy.IsNil(), 0)
+ return
+ }
+ vx, vy = vx.Elem(), vy.Elem()
+ if vx.Type() != vy.Type() {
+ s.report(false, 0)
+ return
+ }
+ s.compareAny(TypeAssertion{&typeAssertion{pathStep{vx.Type(), vx, vy}}})
+}
+
+func (s *state) report(eq bool, rf resultFlags) {
+ if rf&reportByIgnore == 0 {
+ if eq {
+ s.result.NumSame++
+ rf |= reportEqual
+ } else {
+ s.result.NumDiff++
+ rf |= reportUnequal
+ }
+ }
+ for _, r := range s.reporters {
+ r.Report(Result{flags: rf})
+ }
+}
+
+// recChecker tracks the state needed to periodically perform checks that
+// user provided transformers are not stuck in an infinitely recursive cycle.
+type recChecker struct{ next int }
+
+// Check scans the Path for any recursive transformers and panics when any
+// recursive transformers are detected. Note that the presence of a
+// recursive Transformer does not necessarily imply an infinite cycle.
+// As such, this check only activates after some minimal number of path steps.
+func (rc *recChecker) Check(p Path) {
+ const minLen = 1 << 16
+ if rc.next == 0 {
+ rc.next = minLen
+ }
+ if len(p) < rc.next {
+ return
+ }
+ rc.next <<= 1
+
+ // Check whether the same transformer has appeared at least twice.
+ var ss []string
+ m := map[Option]int{}
+ for _, ps := range p {
+ if t, ok := ps.(Transform); ok {
+ t := t.Option()
+ if m[t] == 1 { // Transformer was used exactly once before
+ tf := t.(*transformer).fnc.Type()
+ ss = append(ss, fmt.Sprintf("%v: %v => %v", t, tf.In(0), tf.Out(0)))
+ }
+ m[t]++
+ }
+ }
+ if len(ss) > 0 {
+ const warning = "recursive set of Transformers detected"
+ const help = "consider using cmpopts.AcyclicTransformer"
+ set := strings.Join(ss, "\n\t")
+ panic(fmt.Sprintf("%s:\n\t%s\n%s", warning, set, help))
+ }
+}
+
+// dynChecker tracks the state needed to periodically perform checks that
+// user provided functions are symmetric and deterministic.
+// The zero value is safe for immediate use.
+type dynChecker struct{ curr, next int }
+
+// Next increments the state and reports whether a check should be performed.
+//
+// Checks occur every Nth function call, where N is a triangular number:
+//
+// 0 1 3 6 10 15 21 28 36 45 55 66 78 91 105 120 136 153 171 190 ...
+//
+// See https://en.wikipedia.org/wiki/Triangular_number
+//
+// This sequence ensures that the cost of checks drops significantly as
+// the number of functions calls grows larger.
+func (dc *dynChecker) Next() bool {
+ ok := dc.curr == dc.next
+ if ok {
+ dc.curr = 0
+ dc.next++
+ }
+ dc.curr++
+ return ok
+}
+
+// makeAddressable returns a value that is always addressable.
+// It returns the input verbatim if it is already addressable,
+// otherwise it creates a new value and returns an addressable copy.
+func makeAddressable(v reflect.Value) reflect.Value {
+ if v.CanAddr() {
+ return v
+ }
+ vc := reflect.New(v.Type()).Elem()
+ vc.Set(v)
+ return vc
+}
diff --git a/integration/assets/hydrabroker/vendor/github.com/google/go-cmp/cmp/export.go b/integration/assets/hydrabroker/vendor/github.com/google/go-cmp/cmp/export.go
new file mode 100644
index 00000000000..29f82fe6b2f
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/google/go-cmp/cmp/export.go
@@ -0,0 +1,31 @@
+// Copyright 2017, The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cmp
+
+import (
+ "reflect"
+ "unsafe"
+)
+
+// retrieveUnexportedField uses unsafe to forcibly retrieve any field from
+// a struct such that the value has read-write permissions.
+//
+// The parent struct, v, must be addressable, while f must be a StructField
+// describing the field to retrieve. If addr is false,
+// then the returned value will be shallowed copied to be non-addressable.
+func retrieveUnexportedField(v reflect.Value, f reflect.StructField, addr bool) reflect.Value {
+ ve := reflect.NewAt(f.Type, unsafe.Pointer(uintptr(unsafe.Pointer(v.UnsafeAddr()))+f.Offset)).Elem()
+ if !addr {
+ // A field is addressable if and only if the struct is addressable.
+ // If the original parent value was not addressable, shallow copy the
+ // value to make it non-addressable to avoid leaking an implementation
+ // detail of how forcibly exporting a field works.
+ if ve.Kind() == reflect.Interface && ve.IsNil() {
+ return reflect.Zero(f.Type)
+ }
+ return reflect.ValueOf(ve.Interface()).Convert(f.Type)
+ }
+ return ve
+}
diff --git a/integration/assets/hydrabroker/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_disable.go b/integration/assets/hydrabroker/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_disable.go
new file mode 100644
index 00000000000..36062a604ca
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_disable.go
@@ -0,0 +1,18 @@
+// Copyright 2017, The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !cmp_debug
+// +build !cmp_debug
+
+package diff
+
+var debug debugger
+
+type debugger struct{}
+
+func (debugger) Begin(_, _ int, f EqualFunc, _, _ *EditScript) EqualFunc {
+ return f
+}
+func (debugger) Update() {}
+func (debugger) Finish() {}
diff --git a/integration/assets/hydrabroker/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_enable.go b/integration/assets/hydrabroker/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_enable.go
new file mode 100644
index 00000000000..a3b97a1ad57
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_enable.go
@@ -0,0 +1,123 @@
+// Copyright 2017, The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build cmp_debug
+// +build cmp_debug
+
+package diff
+
+import (
+ "fmt"
+ "strings"
+ "sync"
+ "time"
+)
+
+// The algorithm can be seen running in real-time by enabling debugging:
+// go test -tags=cmp_debug -v
+//
+// Example output:
+// === RUN TestDifference/#34
+// ┌───────────────────────────────┐
+// │ \ · · · · · · · · · · · · · · │
+// │ · # · · · · · · · · · · · · · │
+// │ · \ · · · · · · · · · · · · · │
+// │ · · \ · · · · · · · · · · · · │
+// │ · · · X # · · · · · · · · · · │
+// │ · · · # \ · · · · · · · · · · │
+// │ · · · · · # # · · · · · · · · │
+// │ · · · · · # \ · · · · · · · · │
+// │ · · · · · · · \ · · · · · · · │
+// │ · · · · · · · · \ · · · · · · │
+// │ · · · · · · · · · \ · · · · · │
+// │ · · · · · · · · · · \ · · # · │
+// │ · · · · · · · · · · · \ # # · │
+// │ · · · · · · · · · · · # # # · │
+// │ · · · · · · · · · · # # # # · │
+// │ · · · · · · · · · # # # # # · │
+// │ · · · · · · · · · · · · · · \ │
+// └───────────────────────────────┘
+// [.Y..M.XY......YXYXY.|]
+//
+// The grid represents the edit-graph where the horizontal axis represents
+// list X and the vertical axis represents list Y. The start of the two lists
+// is the top-left, while the ends are the bottom-right. The '·' represents
+// an unexplored node in the graph. The '\' indicates that the two symbols
+// from list X and Y are equal. The 'X' indicates that two symbols are similar
+// (but not exactly equal) to each other. The '#' indicates that the two symbols
+// are different (and not similar). The algorithm traverses this graph trying to
+// make the paths starting in the top-left and the bottom-right connect.
+//
+// The series of '.', 'X', 'Y', and 'M' characters at the bottom represents
+// the currently established path from the forward and reverse searches,
+// separated by a '|' character.
+
+const (
+ updateDelay = 100 * time.Millisecond
+ finishDelay = 500 * time.Millisecond
+ ansiTerminal = true // ANSI escape codes used to move terminal cursor
+)
+
+var debug debugger
+
+type debugger struct {
+ sync.Mutex
+ p1, p2 EditScript
+ fwdPath, revPath *EditScript
+ grid []byte
+ lines int
+}
+
+func (dbg *debugger) Begin(nx, ny int, f EqualFunc, p1, p2 *EditScript) EqualFunc {
+ dbg.Lock()
+ dbg.fwdPath, dbg.revPath = p1, p2
+ top := "┌─" + strings.Repeat("──", nx) + "┐\n"
+ row := "│ " + strings.Repeat("· ", nx) + "│\n"
+ btm := "└─" + strings.Repeat("──", nx) + "┘\n"
+ dbg.grid = []byte(top + strings.Repeat(row, ny) + btm)
+ dbg.lines = strings.Count(dbg.String(), "\n")
+ fmt.Print(dbg)
+
+ // Wrap the EqualFunc so that we can intercept each result.
+ return func(ix, iy int) (r Result) {
+ cell := dbg.grid[len(top)+iy*len(row):][len("│ ")+len("· ")*ix:][:len("·")]
+ for i := range cell {
+ cell[i] = 0 // Zero out the multiple bytes of UTF-8 middle-dot
+ }
+ switch r = f(ix, iy); {
+ case r.Equal():
+ cell[0] = '\\'
+ case r.Similar():
+ cell[0] = 'X'
+ default:
+ cell[0] = '#'
+ }
+ return
+ }
+}
+
+func (dbg *debugger) Update() {
+ dbg.print(updateDelay)
+}
+
+func (dbg *debugger) Finish() {
+ dbg.print(finishDelay)
+ dbg.Unlock()
+}
+
+func (dbg *debugger) String() string {
+ dbg.p1, dbg.p2 = *dbg.fwdPath, dbg.p2[:0]
+ for i := len(*dbg.revPath) - 1; i >= 0; i-- {
+ dbg.p2 = append(dbg.p2, (*dbg.revPath)[i])
+ }
+ return fmt.Sprintf("%s[%v|%v]\n\n", dbg.grid, dbg.p1, dbg.p2)
+}
+
+func (dbg *debugger) print(d time.Duration) {
+ if ansiTerminal {
+ fmt.Printf("\x1b[%dA", dbg.lines) // Reset terminal cursor
+ }
+ fmt.Print(dbg)
+ time.Sleep(d)
+}
diff --git a/integration/assets/hydrabroker/vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go b/integration/assets/hydrabroker/vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go
new file mode 100644
index 00000000000..a248e5436d9
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go
@@ -0,0 +1,402 @@
+// Copyright 2017, The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package diff implements an algorithm for producing edit-scripts.
+// The edit-script is a sequence of operations needed to transform one list
+// of symbols into another (or vice-versa). The edits allowed are insertions,
+// deletions, and modifications. The summation of all edits is called the
+// Levenshtein distance as this problem is well-known in computer science.
+//
+// This package prioritizes performance over accuracy. That is, the run time
+// is more important than obtaining a minimal Levenshtein distance.
+package diff
+
+import (
+ "math/rand"
+ "time"
+
+ "github.com/google/go-cmp/cmp/internal/flags"
+)
+
+// EditType represents a single operation within an edit-script.
+type EditType uint8
+
+const (
+ // Identity indicates that a symbol pair is identical in both list X and Y.
+ Identity EditType = iota
+ // UniqueX indicates that a symbol only exists in X and not Y.
+ UniqueX
+ // UniqueY indicates that a symbol only exists in Y and not X.
+ UniqueY
+ // Modified indicates that a symbol pair is a modification of each other.
+ Modified
+)
+
+// EditScript represents the series of differences between two lists.
+type EditScript []EditType
+
+// String returns a human-readable string representing the edit-script where
+// Identity, UniqueX, UniqueY, and Modified are represented by the
+// '.', 'X', 'Y', and 'M' characters, respectively.
+func (es EditScript) String() string {
+ b := make([]byte, len(es))
+ for i, e := range es {
+ switch e {
+ case Identity:
+ b[i] = '.'
+ case UniqueX:
+ b[i] = 'X'
+ case UniqueY:
+ b[i] = 'Y'
+ case Modified:
+ b[i] = 'M'
+ default:
+ panic("invalid edit-type")
+ }
+ }
+ return string(b)
+}
+
+// stats returns a histogram of the number of each type of edit operation.
+func (es EditScript) stats() (s struct{ NI, NX, NY, NM int }) {
+ for _, e := range es {
+ switch e {
+ case Identity:
+ s.NI++
+ case UniqueX:
+ s.NX++
+ case UniqueY:
+ s.NY++
+ case Modified:
+ s.NM++
+ default:
+ panic("invalid edit-type")
+ }
+ }
+ return
+}
+
+// Dist is the Levenshtein distance and is guaranteed to be 0 if and only if
+// lists X and Y are equal.
+func (es EditScript) Dist() int { return len(es) - es.stats().NI }
+
+// LenX is the length of the X list.
+func (es EditScript) LenX() int { return len(es) - es.stats().NY }
+
+// LenY is the length of the Y list.
+func (es EditScript) LenY() int { return len(es) - es.stats().NX }
+
+// EqualFunc reports whether the symbols at indexes ix and iy are equal.
+// When called by Difference, the index is guaranteed to be within nx and ny.
+type EqualFunc func(ix int, iy int) Result
+
+// Result is the result of comparison.
+// NumSame is the number of sub-elements that are equal.
+// NumDiff is the number of sub-elements that are not equal.
+type Result struct{ NumSame, NumDiff int }
+
+// BoolResult returns a Result that is either Equal or not Equal.
+func BoolResult(b bool) Result {
+ if b {
+ return Result{NumSame: 1} // Equal, Similar
+ } else {
+ return Result{NumDiff: 2} // Not Equal, not Similar
+ }
+}
+
+// Equal indicates whether the symbols are equal. Two symbols are equal
+// if and only if NumDiff == 0. If Equal, then they are also Similar.
+func (r Result) Equal() bool { return r.NumDiff == 0 }
+
+// Similar indicates whether two symbols are similar and may be represented
+// by using the Modified type. As a special case, we consider binary comparisons
+// (i.e., those that return Result{1, 0} or Result{0, 1}) to be similar.
+//
+// The exact ratio of NumSame to NumDiff to determine similarity may change.
+func (r Result) Similar() bool {
+ // Use NumSame+1 to offset NumSame so that binary comparisons are similar.
+ return r.NumSame+1 >= r.NumDiff
+}
+
+var randBool = rand.New(rand.NewSource(time.Now().Unix())).Intn(2) == 0
+
+// Difference reports whether two lists of lengths nx and ny are equal
+// given the definition of equality provided as f.
+//
+// This function returns an edit-script, which is a sequence of operations
+// needed to convert one list into the other. The following invariants for
+// the edit-script are maintained:
+// - eq == (es.Dist()==0)
+// - nx == es.LenX()
+// - ny == es.LenY()
+//
+// This algorithm is not guaranteed to be an optimal solution (i.e., one that
+// produces an edit-script with a minimal Levenshtein distance). This algorithm
+// favors performance over optimality. The exact output is not guaranteed to
+// be stable and may change over time.
+func Difference(nx, ny int, f EqualFunc) (es EditScript) {
+ // This algorithm is based on traversing what is known as an "edit-graph".
+ // See Figure 1 from "An O(ND) Difference Algorithm and Its Variations"
+ // by Eugene W. Myers. Since D can be as large as N itself, this is
+ // effectively O(N^2). Unlike the algorithm from that paper, we are not
+ // interested in the optimal path, but at least some "decent" path.
+ //
+ // For example, let X and Y be lists of symbols:
+ // X = [A B C A B B A]
+ // Y = [C B A B A C]
+ //
+ // The edit-graph can be drawn as the following:
+ // A B C A B B A
+ // ┌─────────────┐
+ // C │_|_|\|_|_|_|_│ 0
+ // B │_|\|_|_|\|\|_│ 1
+ // A │\|_|_|\|_|_|\│ 2
+ // B │_|\|_|_|\|\|_│ 3
+ // A │\|_|_|\|_|_|\│ 4
+ // C │ | |\| | | | │ 5
+ // └─────────────┘ 6
+ // 0 1 2 3 4 5 6 7
+ //
+ // List X is written along the horizontal axis, while list Y is written
+ // along the vertical axis. At any point on this grid, if the symbol in
+ // list X matches the corresponding symbol in list Y, then a '\' is drawn.
+ // The goal of any minimal edit-script algorithm is to find a path from the
+ // top-left corner to the bottom-right corner, while traveling through the
+ // fewest horizontal or vertical edges.
+ // A horizontal edge is equivalent to inserting a symbol from list X.
+ // A vertical edge is equivalent to inserting a symbol from list Y.
+ // A diagonal edge is equivalent to a matching symbol between both X and Y.
+
+ // Invariants:
+ // - 0 ≤ fwdPath.X ≤ (fwdFrontier.X, revFrontier.X) ≤ revPath.X ≤ nx
+ // - 0 ≤ fwdPath.Y ≤ (fwdFrontier.Y, revFrontier.Y) ≤ revPath.Y ≤ ny
+ //
+ // In general:
+ // - fwdFrontier.X < revFrontier.X
+ // - fwdFrontier.Y < revFrontier.Y
+ //
+ // Unless, it is time for the algorithm to terminate.
+ fwdPath := path{+1, point{0, 0}, make(EditScript, 0, (nx+ny)/2)}
+ revPath := path{-1, point{nx, ny}, make(EditScript, 0)}
+ fwdFrontier := fwdPath.point // Forward search frontier
+ revFrontier := revPath.point // Reverse search frontier
+
+ // Search budget bounds the cost of searching for better paths.
+ // The longest sequence of non-matching symbols that can be tolerated is
+ // approximately the square-root of the search budget.
+ searchBudget := 4 * (nx + ny) // O(n)
+
+ // Running the tests with the "cmp_debug" build tag prints a visualization
+ // of the algorithm running in real-time. This is educational for
+ // understanding how the algorithm works. See debug_enable.go.
+ f = debug.Begin(nx, ny, f, &fwdPath.es, &revPath.es)
+
+ // The algorithm below is a greedy, meet-in-the-middle algorithm for
+ // computing sub-optimal edit-scripts between two lists.
+ //
+ // The algorithm is approximately as follows:
+ // - Searching for differences switches back-and-forth between
+ // a search that starts at the beginning (the top-left corner), and
+ // a search that starts at the end (the bottom-right corner).
+ // The goal of the search is connect with the search
+ // from the opposite corner.
+ // - As we search, we build a path in a greedy manner,
+ // where the first match seen is added to the path (this is sub-optimal,
+ // but provides a decent result in practice). When matches are found,
+ // we try the next pair of symbols in the lists and follow all matches
+ // as far as possible.
+ // - When searching for matches, we search along a diagonal going through
+ // through the "frontier" point. If no matches are found,
+ // we advance the frontier towards the opposite corner.
+ // - This algorithm terminates when either the X coordinates or the
+ // Y coordinates of the forward and reverse frontier points ever intersect.
+
+ // This algorithm is correct even if searching only in the forward direction
+ // or in the reverse direction. We do both because it is commonly observed
+ // that two lists commonly differ because elements were added to the front
+ // or end of the other list.
+ //
+ // Non-deterministically start with either the forward or reverse direction
+ // to introduce some deliberate instability so that we have the flexibility
+ // to change this algorithm in the future.
+ if flags.Deterministic || randBool {
+ goto forwardSearch
+ } else {
+ goto reverseSearch
+ }
+
+forwardSearch:
+ {
+ // Forward search from the beginning.
+ if fwdFrontier.X >= revFrontier.X || fwdFrontier.Y >= revFrontier.Y || searchBudget == 0 {
+ goto finishSearch
+ }
+ for stop1, stop2, i := false, false, 0; !(stop1 && stop2) && searchBudget > 0; i++ {
+ // Search in a diagonal pattern for a match.
+ z := zigzag(i)
+ p := point{fwdFrontier.X + z, fwdFrontier.Y - z}
+ switch {
+ case p.X >= revPath.X || p.Y < fwdPath.Y:
+ stop1 = true // Hit top-right corner
+ case p.Y >= revPath.Y || p.X < fwdPath.X:
+ stop2 = true // Hit bottom-left corner
+ case f(p.X, p.Y).Equal():
+ // Match found, so connect the path to this point.
+ fwdPath.connect(p, f)
+ fwdPath.append(Identity)
+ // Follow sequence of matches as far as possible.
+ for fwdPath.X < revPath.X && fwdPath.Y < revPath.Y {
+ if !f(fwdPath.X, fwdPath.Y).Equal() {
+ break
+ }
+ fwdPath.append(Identity)
+ }
+ fwdFrontier = fwdPath.point
+ stop1, stop2 = true, true
+ default:
+ searchBudget-- // Match not found
+ }
+ debug.Update()
+ }
+ // Advance the frontier towards reverse point.
+ if revPath.X-fwdFrontier.X >= revPath.Y-fwdFrontier.Y {
+ fwdFrontier.X++
+ } else {
+ fwdFrontier.Y++
+ }
+ goto reverseSearch
+ }
+
+reverseSearch:
+ {
+ // Reverse search from the end.
+ if fwdFrontier.X >= revFrontier.X || fwdFrontier.Y >= revFrontier.Y || searchBudget == 0 {
+ goto finishSearch
+ }
+ for stop1, stop2, i := false, false, 0; !(stop1 && stop2) && searchBudget > 0; i++ {
+ // Search in a diagonal pattern for a match.
+ z := zigzag(i)
+ p := point{revFrontier.X - z, revFrontier.Y + z}
+ switch {
+ case fwdPath.X >= p.X || revPath.Y < p.Y:
+ stop1 = true // Hit bottom-left corner
+ case fwdPath.Y >= p.Y || revPath.X < p.X:
+ stop2 = true // Hit top-right corner
+ case f(p.X-1, p.Y-1).Equal():
+ // Match found, so connect the path to this point.
+ revPath.connect(p, f)
+ revPath.append(Identity)
+ // Follow sequence of matches as far as possible.
+ for fwdPath.X < revPath.X && fwdPath.Y < revPath.Y {
+ if !f(revPath.X-1, revPath.Y-1).Equal() {
+ break
+ }
+ revPath.append(Identity)
+ }
+ revFrontier = revPath.point
+ stop1, stop2 = true, true
+ default:
+ searchBudget-- // Match not found
+ }
+ debug.Update()
+ }
+ // Advance the frontier towards forward point.
+ if revFrontier.X-fwdPath.X >= revFrontier.Y-fwdPath.Y {
+ revFrontier.X--
+ } else {
+ revFrontier.Y--
+ }
+ goto forwardSearch
+ }
+
+finishSearch:
+ // Join the forward and reverse paths and then append the reverse path.
+ fwdPath.connect(revPath.point, f)
+ for i := len(revPath.es) - 1; i >= 0; i-- {
+ t := revPath.es[i]
+ revPath.es = revPath.es[:i]
+ fwdPath.append(t)
+ }
+ debug.Finish()
+ return fwdPath.es
+}
+
+type path struct {
+ dir int // +1 if forward, -1 if reverse
+ point // Leading point of the EditScript path
+ es EditScript
+}
+
+// connect appends any necessary Identity, Modified, UniqueX, or UniqueY types
+// to the edit-script to connect p.point to dst.
+func (p *path) connect(dst point, f EqualFunc) {
+ if p.dir > 0 {
+ // Connect in forward direction.
+ for dst.X > p.X && dst.Y > p.Y {
+ switch r := f(p.X, p.Y); {
+ case r.Equal():
+ p.append(Identity)
+ case r.Similar():
+ p.append(Modified)
+ case dst.X-p.X >= dst.Y-p.Y:
+ p.append(UniqueX)
+ default:
+ p.append(UniqueY)
+ }
+ }
+ for dst.X > p.X {
+ p.append(UniqueX)
+ }
+ for dst.Y > p.Y {
+ p.append(UniqueY)
+ }
+ } else {
+ // Connect in reverse direction.
+ for p.X > dst.X && p.Y > dst.Y {
+ switch r := f(p.X-1, p.Y-1); {
+ case r.Equal():
+ p.append(Identity)
+ case r.Similar():
+ p.append(Modified)
+ case p.Y-dst.Y >= p.X-dst.X:
+ p.append(UniqueY)
+ default:
+ p.append(UniqueX)
+ }
+ }
+ for p.X > dst.X {
+ p.append(UniqueX)
+ }
+ for p.Y > dst.Y {
+ p.append(UniqueY)
+ }
+ }
+}
+
+func (p *path) append(t EditType) {
+ p.es = append(p.es, t)
+ switch t {
+ case Identity, Modified:
+ p.add(p.dir, p.dir)
+ case UniqueX:
+ p.add(p.dir, 0)
+ case UniqueY:
+ p.add(0, p.dir)
+ }
+ debug.Update()
+}
+
+type point struct{ X, Y int }
+
+func (p *point) add(dx, dy int) { p.X += dx; p.Y += dy }
+
+// zigzag maps a consecutive sequence of integers to a zig-zag sequence.
+//
+// [0 1 2 3 4 5 ...] => [0 -1 +1 -2 +2 ...]
+func zigzag(x int) int {
+ if x&1 != 0 {
+ x = ^x
+ }
+ return x >> 1
+}
diff --git a/integration/assets/hydrabroker/vendor/github.com/google/go-cmp/cmp/internal/flags/flags.go b/integration/assets/hydrabroker/vendor/github.com/google/go-cmp/cmp/internal/flags/flags.go
new file mode 100644
index 00000000000..d8e459c9b93
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/google/go-cmp/cmp/internal/flags/flags.go
@@ -0,0 +1,9 @@
+// Copyright 2019, The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package flags
+
+// Deterministic controls whether the output of Diff should be deterministic.
+// This is only used for testing.
+var Deterministic bool
diff --git a/integration/assets/hydrabroker/vendor/github.com/google/go-cmp/cmp/internal/function/func.go b/integration/assets/hydrabroker/vendor/github.com/google/go-cmp/cmp/internal/function/func.go
new file mode 100644
index 00000000000..d127d436230
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/google/go-cmp/cmp/internal/function/func.go
@@ -0,0 +1,99 @@
+// Copyright 2017, The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package function provides functionality for identifying function types.
+package function
+
+import (
+ "reflect"
+ "regexp"
+ "runtime"
+ "strings"
+)
+
+type funcType int
+
+const (
+ _ funcType = iota
+
+ tbFunc // func(T) bool
+ ttbFunc // func(T, T) bool
+ trbFunc // func(T, R) bool
+ tibFunc // func(T, I) bool
+ trFunc // func(T) R
+
+ Equal = ttbFunc // func(T, T) bool
+ EqualAssignable = tibFunc // func(T, I) bool; encapsulates func(T, T) bool
+ Transformer = trFunc // func(T) R
+ ValueFilter = ttbFunc // func(T, T) bool
+ Less = ttbFunc // func(T, T) bool
+ ValuePredicate = tbFunc // func(T) bool
+ KeyValuePredicate = trbFunc // func(T, R) bool
+)
+
+var boolType = reflect.TypeOf(true)
+
+// IsType reports whether the reflect.Type is of the specified function type.
+func IsType(t reflect.Type, ft funcType) bool {
+ if t == nil || t.Kind() != reflect.Func || t.IsVariadic() {
+ return false
+ }
+ ni, no := t.NumIn(), t.NumOut()
+ switch ft {
+ case tbFunc: // func(T) bool
+ if ni == 1 && no == 1 && t.Out(0) == boolType {
+ return true
+ }
+ case ttbFunc: // func(T, T) bool
+ if ni == 2 && no == 1 && t.In(0) == t.In(1) && t.Out(0) == boolType {
+ return true
+ }
+ case trbFunc: // func(T, R) bool
+ if ni == 2 && no == 1 && t.Out(0) == boolType {
+ return true
+ }
+ case tibFunc: // func(T, I) bool
+ if ni == 2 && no == 1 && t.In(0).AssignableTo(t.In(1)) && t.Out(0) == boolType {
+ return true
+ }
+ case trFunc: // func(T) R
+ if ni == 1 && no == 1 {
+ return true
+ }
+ }
+ return false
+}
+
+var lastIdentRx = regexp.MustCompile(`[_\p{L}][_\p{L}\p{N}]*$`)
+
+// NameOf returns the name of the function value.
+func NameOf(v reflect.Value) string {
+ fnc := runtime.FuncForPC(v.Pointer())
+ if fnc == nil {
+ return ""
+ }
+ fullName := fnc.Name() // e.g., "long/path/name/mypkg.(*MyType).(long/path/name/mypkg.myMethod)-fm"
+
+ // Method closures have a "-fm" suffix.
+ fullName = strings.TrimSuffix(fullName, "-fm")
+
+ var name string
+ for len(fullName) > 0 {
+ inParen := strings.HasSuffix(fullName, ")")
+ fullName = strings.TrimSuffix(fullName, ")")
+
+ s := lastIdentRx.FindString(fullName)
+ if s == "" {
+ break
+ }
+ name = s + "." + name
+ fullName = strings.TrimSuffix(fullName, s)
+
+ if i := strings.LastIndexByte(fullName, '('); inParen && i >= 0 {
+ fullName = fullName[:i]
+ }
+ fullName = strings.TrimSuffix(fullName, ".")
+ }
+ return strings.TrimSuffix(name, ".")
+}
diff --git a/integration/assets/hydrabroker/vendor/github.com/google/go-cmp/cmp/internal/value/name.go b/integration/assets/hydrabroker/vendor/github.com/google/go-cmp/cmp/internal/value/name.go
new file mode 100644
index 00000000000..7b498bb2cb9
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/google/go-cmp/cmp/internal/value/name.go
@@ -0,0 +1,164 @@
+// Copyright 2020, The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package value
+
+import (
+ "reflect"
+ "strconv"
+)
+
+var anyType = reflect.TypeOf((*interface{})(nil)).Elem()
+
+// TypeString is nearly identical to reflect.Type.String,
+// but has an additional option to specify that full type names be used.
+func TypeString(t reflect.Type, qualified bool) string {
+ return string(appendTypeName(nil, t, qualified, false))
+}
+
+func appendTypeName(b []byte, t reflect.Type, qualified, elideFunc bool) []byte {
+ // BUG: Go reflection provides no way to disambiguate two named types
+ // of the same name and within the same package,
+ // but declared within the namespace of different functions.
+
+ // Use the "any" alias instead of "interface{}" for better readability.
+ if t == anyType {
+ return append(b, "any"...)
+ }
+
+ // Named type.
+ if t.Name() != "" {
+ if qualified && t.PkgPath() != "" {
+ b = append(b, '"')
+ b = append(b, t.PkgPath()...)
+ b = append(b, '"')
+ b = append(b, '.')
+ b = append(b, t.Name()...)
+ } else {
+ b = append(b, t.String()...)
+ }
+ return b
+ }
+
+ // Unnamed type.
+ switch k := t.Kind(); k {
+ case reflect.Bool, reflect.String, reflect.UnsafePointer,
+ reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
+ reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr,
+ reflect.Float32, reflect.Float64, reflect.Complex64, reflect.Complex128:
+ b = append(b, k.String()...)
+ case reflect.Chan:
+ if t.ChanDir() == reflect.RecvDir {
+ b = append(b, "<-"...)
+ }
+ b = append(b, "chan"...)
+ if t.ChanDir() == reflect.SendDir {
+ b = append(b, "<-"...)
+ }
+ b = append(b, ' ')
+ b = appendTypeName(b, t.Elem(), qualified, false)
+ case reflect.Func:
+ if !elideFunc {
+ b = append(b, "func"...)
+ }
+ b = append(b, '(')
+ for i := 0; i < t.NumIn(); i++ {
+ if i > 0 {
+ b = append(b, ", "...)
+ }
+ if i == t.NumIn()-1 && t.IsVariadic() {
+ b = append(b, "..."...)
+ b = appendTypeName(b, t.In(i).Elem(), qualified, false)
+ } else {
+ b = appendTypeName(b, t.In(i), qualified, false)
+ }
+ }
+ b = append(b, ')')
+ switch t.NumOut() {
+ case 0:
+ // Do nothing
+ case 1:
+ b = append(b, ' ')
+ b = appendTypeName(b, t.Out(0), qualified, false)
+ default:
+ b = append(b, " ("...)
+ for i := 0; i < t.NumOut(); i++ {
+ if i > 0 {
+ b = append(b, ", "...)
+ }
+ b = appendTypeName(b, t.Out(i), qualified, false)
+ }
+ b = append(b, ')')
+ }
+ case reflect.Struct:
+ b = append(b, "struct{ "...)
+ for i := 0; i < t.NumField(); i++ {
+ if i > 0 {
+ b = append(b, "; "...)
+ }
+ sf := t.Field(i)
+ if !sf.Anonymous {
+ if qualified && sf.PkgPath != "" {
+ b = append(b, '"')
+ b = append(b, sf.PkgPath...)
+ b = append(b, '"')
+ b = append(b, '.')
+ }
+ b = append(b, sf.Name...)
+ b = append(b, ' ')
+ }
+ b = appendTypeName(b, sf.Type, qualified, false)
+ if sf.Tag != "" {
+ b = append(b, ' ')
+ b = strconv.AppendQuote(b, string(sf.Tag))
+ }
+ }
+ if b[len(b)-1] == ' ' {
+ b = b[:len(b)-1]
+ } else {
+ b = append(b, ' ')
+ }
+ b = append(b, '}')
+ case reflect.Slice, reflect.Array:
+ b = append(b, '[')
+ if k == reflect.Array {
+ b = strconv.AppendUint(b, uint64(t.Len()), 10)
+ }
+ b = append(b, ']')
+ b = appendTypeName(b, t.Elem(), qualified, false)
+ case reflect.Map:
+ b = append(b, "map["...)
+ b = appendTypeName(b, t.Key(), qualified, false)
+ b = append(b, ']')
+ b = appendTypeName(b, t.Elem(), qualified, false)
+ case reflect.Ptr:
+ b = append(b, '*')
+ b = appendTypeName(b, t.Elem(), qualified, false)
+ case reflect.Interface:
+ b = append(b, "interface{ "...)
+ for i := 0; i < t.NumMethod(); i++ {
+ if i > 0 {
+ b = append(b, "; "...)
+ }
+ m := t.Method(i)
+ if qualified && m.PkgPath != "" {
+ b = append(b, '"')
+ b = append(b, m.PkgPath...)
+ b = append(b, '"')
+ b = append(b, '.')
+ }
+ b = append(b, m.Name...)
+ b = appendTypeName(b, m.Type, qualified, true)
+ }
+ if b[len(b)-1] == ' ' {
+ b = b[:len(b)-1]
+ } else {
+ b = append(b, ' ')
+ }
+ b = append(b, '}')
+ default:
+ panic("invalid kind: " + k.String())
+ }
+ return b
+}
diff --git a/integration/assets/hydrabroker/vendor/github.com/google/go-cmp/cmp/internal/value/pointer.go b/integration/assets/hydrabroker/vendor/github.com/google/go-cmp/cmp/internal/value/pointer.go
new file mode 100644
index 00000000000..e5dfff69afa
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/google/go-cmp/cmp/internal/value/pointer.go
@@ -0,0 +1,34 @@
+// Copyright 2018, The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package value
+
+import (
+ "reflect"
+ "unsafe"
+)
+
+// Pointer is an opaque typed pointer and is guaranteed to be comparable.
+type Pointer struct {
+ p unsafe.Pointer
+ t reflect.Type
+}
+
+// PointerOf returns a Pointer from v, which must be a
+// reflect.Ptr, reflect.Slice, or reflect.Map.
+func PointerOf(v reflect.Value) Pointer {
+ // The proper representation of a pointer is unsafe.Pointer,
+ // which is necessary if the GC ever uses a moving collector.
+ return Pointer{unsafe.Pointer(v.Pointer()), v.Type()}
+}
+
+// IsNil reports whether the pointer is nil.
+func (p Pointer) IsNil() bool {
+ return p.p == nil
+}
+
+// Uintptr returns the pointer as a uintptr.
+func (p Pointer) Uintptr() uintptr {
+ return uintptr(p.p)
+}
diff --git a/integration/assets/hydrabroker/vendor/github.com/google/go-cmp/cmp/internal/value/sort.go b/integration/assets/hydrabroker/vendor/github.com/google/go-cmp/cmp/internal/value/sort.go
new file mode 100644
index 00000000000..98533b036cc
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/google/go-cmp/cmp/internal/value/sort.go
@@ -0,0 +1,106 @@
+// Copyright 2017, The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package value
+
+import (
+ "fmt"
+ "math"
+ "reflect"
+ "sort"
+)
+
+// SortKeys sorts a list of map keys, deduplicating keys if necessary.
+// The type of each value must be comparable.
+func SortKeys(vs []reflect.Value) []reflect.Value {
+ if len(vs) == 0 {
+ return vs
+ }
+
+ // Sort the map keys.
+ sort.SliceStable(vs, func(i, j int) bool { return isLess(vs[i], vs[j]) })
+
+ // Deduplicate keys (fails for NaNs).
+ vs2 := vs[:1]
+ for _, v := range vs[1:] {
+ if isLess(vs2[len(vs2)-1], v) {
+ vs2 = append(vs2, v)
+ }
+ }
+ return vs2
+}
+
+// isLess is a generic function for sorting arbitrary map keys.
+// The inputs must be of the same type and must be comparable.
+func isLess(x, y reflect.Value) bool {
+ switch x.Type().Kind() {
+ case reflect.Bool:
+ return !x.Bool() && y.Bool()
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return x.Int() < y.Int()
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return x.Uint() < y.Uint()
+ case reflect.Float32, reflect.Float64:
+ // NOTE: This does not sort -0 as less than +0
+ // since Go maps treat -0 and +0 as equal keys.
+ fx, fy := x.Float(), y.Float()
+ return fx < fy || math.IsNaN(fx) && !math.IsNaN(fy)
+ case reflect.Complex64, reflect.Complex128:
+ cx, cy := x.Complex(), y.Complex()
+ rx, ix, ry, iy := real(cx), imag(cx), real(cy), imag(cy)
+ if rx == ry || (math.IsNaN(rx) && math.IsNaN(ry)) {
+ return ix < iy || math.IsNaN(ix) && !math.IsNaN(iy)
+ }
+ return rx < ry || math.IsNaN(rx) && !math.IsNaN(ry)
+ case reflect.Ptr, reflect.UnsafePointer, reflect.Chan:
+ return x.Pointer() < y.Pointer()
+ case reflect.String:
+ return x.String() < y.String()
+ case reflect.Array:
+ for i := 0; i < x.Len(); i++ {
+ if isLess(x.Index(i), y.Index(i)) {
+ return true
+ }
+ if isLess(y.Index(i), x.Index(i)) {
+ return false
+ }
+ }
+ return false
+ case reflect.Struct:
+ for i := 0; i < x.NumField(); i++ {
+ if isLess(x.Field(i), y.Field(i)) {
+ return true
+ }
+ if isLess(y.Field(i), x.Field(i)) {
+ return false
+ }
+ }
+ return false
+ case reflect.Interface:
+ vx, vy := x.Elem(), y.Elem()
+ if !vx.IsValid() || !vy.IsValid() {
+ return !vx.IsValid() && vy.IsValid()
+ }
+ tx, ty := vx.Type(), vy.Type()
+ if tx == ty {
+ return isLess(x.Elem(), y.Elem())
+ }
+ if tx.Kind() != ty.Kind() {
+ return vx.Kind() < vy.Kind()
+ }
+ if tx.String() != ty.String() {
+ return tx.String() < ty.String()
+ }
+ if tx.PkgPath() != ty.PkgPath() {
+ return tx.PkgPath() < ty.PkgPath()
+ }
+ // This can happen in rare situations, so we fallback to just comparing
+ // the unique pointer for a reflect.Type. This guarantees deterministic
+ // ordering within a program, but it is obviously not stable.
+ return reflect.ValueOf(vx.Type()).Pointer() < reflect.ValueOf(vy.Type()).Pointer()
+ default:
+ // Must be Func, Map, or Slice; which are not comparable.
+ panic(fmt.Sprintf("%T is not comparable", x.Type()))
+ }
+}
diff --git a/integration/assets/hydrabroker/vendor/github.com/google/go-cmp/cmp/options.go b/integration/assets/hydrabroker/vendor/github.com/google/go-cmp/cmp/options.go
new file mode 100644
index 00000000000..754496f3b3f
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/google/go-cmp/cmp/options.go
@@ -0,0 +1,554 @@
+// Copyright 2017, The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cmp
+
+import (
+ "fmt"
+ "reflect"
+ "regexp"
+ "strings"
+
+ "github.com/google/go-cmp/cmp/internal/function"
+)
+
+// Option configures for specific behavior of [Equal] and [Diff]. In particular,
+// the fundamental Option functions ([Ignore], [Transformer], and [Comparer]),
+// configure how equality is determined.
+//
+// The fundamental options may be composed with filters ([FilterPath] and
+// [FilterValues]) to control the scope over which they are applied.
+//
+// The [github.com/google/go-cmp/cmp/cmpopts] package provides helper functions
+// for creating options that may be used with [Equal] and [Diff].
+type Option interface {
+ // filter applies all filters and returns the option that remains.
+ // Each option may only read s.curPath and call s.callTTBFunc.
+ //
+ // An Options is returned only if multiple comparers or transformers
+ // can apply simultaneously and will only contain values of those types
+ // or sub-Options containing values of those types.
+ filter(s *state, t reflect.Type, vx, vy reflect.Value) applicableOption
+}
+
+// applicableOption represents the following types:
+//
+// Fundamental: ignore | validator | *comparer | *transformer
+// Grouping: Options
+type applicableOption interface {
+ Option
+
+ // apply executes the option, which may mutate s or panic.
+ apply(s *state, vx, vy reflect.Value)
+}
+
+// coreOption represents the following types:
+//
+// Fundamental: ignore | validator | *comparer | *transformer
+// Filters: *pathFilter | *valuesFilter
+type coreOption interface {
+ Option
+ isCore()
+}
+
+type core struct{}
+
+func (core) isCore() {}
+
+// Options is a list of [Option] values that also satisfies the [Option] interface.
+// Helper comparison packages may return an Options value when packing multiple
+// [Option] values into a single [Option]. When this package processes an Options,
+// it will be implicitly expanded into a flat list.
+//
+// Applying a filter on an Options is equivalent to applying that same filter
+// on all individual options held within.
+type Options []Option
+
+func (opts Options) filter(s *state, t reflect.Type, vx, vy reflect.Value) (out applicableOption) {
+ for _, opt := range opts {
+ switch opt := opt.filter(s, t, vx, vy); opt.(type) {
+ case ignore:
+ return ignore{} // Only ignore can short-circuit evaluation
+ case validator:
+ out = validator{} // Takes precedence over comparer or transformer
+ case *comparer, *transformer, Options:
+ switch out.(type) {
+ case nil:
+ out = opt
+ case validator:
+ // Keep validator
+ case *comparer, *transformer, Options:
+ out = Options{out, opt} // Conflicting comparers or transformers
+ }
+ }
+ }
+ return out
+}
+
+func (opts Options) apply(s *state, _, _ reflect.Value) {
+ const warning = "ambiguous set of applicable options"
+ const help = "consider using filters to ensure at most one Comparer or Transformer may apply"
+ var ss []string
+ for _, opt := range flattenOptions(nil, opts) {
+ ss = append(ss, fmt.Sprint(opt))
+ }
+ set := strings.Join(ss, "\n\t")
+ panic(fmt.Sprintf("%s at %#v:\n\t%s\n%s", warning, s.curPath, set, help))
+}
+
+func (opts Options) String() string {
+ var ss []string
+ for _, opt := range opts {
+ ss = append(ss, fmt.Sprint(opt))
+ }
+ return fmt.Sprintf("Options{%s}", strings.Join(ss, ", "))
+}
+
+// FilterPath returns a new [Option] where opt is only evaluated if filter f
+// returns true for the current [Path] in the value tree.
+//
+// This filter is called even if a slice element or map entry is missing and
+// provides an opportunity to ignore such cases. The filter function must be
+// symmetric such that the filter result is identical regardless of whether the
+// missing value is from x or y.
+//
+// The option passed in may be an [Ignore], [Transformer], [Comparer], [Options], or
+// a previously filtered [Option].
+func FilterPath(f func(Path) bool, opt Option) Option {
+ if f == nil {
+ panic("invalid path filter function")
+ }
+ if opt := normalizeOption(opt); opt != nil {
+ return &pathFilter{fnc: f, opt: opt}
+ }
+ return nil
+}
+
+type pathFilter struct {
+ core
+ fnc func(Path) bool
+ opt Option
+}
+
+func (f pathFilter) filter(s *state, t reflect.Type, vx, vy reflect.Value) applicableOption {
+ if f.fnc(s.curPath) {
+ return f.opt.filter(s, t, vx, vy)
+ }
+ return nil
+}
+
+func (f pathFilter) String() string {
+ return fmt.Sprintf("FilterPath(%s, %v)", function.NameOf(reflect.ValueOf(f.fnc)), f.opt)
+}
+
+// FilterValues returns a new [Option] where opt is only evaluated if filter f,
+// which is a function of the form "func(T, T) bool", returns true for the
+// current pair of values being compared. If either value is invalid or
+// the type of the values is not assignable to T, then this filter implicitly
+// returns false.
+//
+// The filter function must be
+// symmetric (i.e., agnostic to the order of the inputs) and
+// deterministic (i.e., produces the same result when given the same inputs).
+// If T is an interface, it is possible that f is called with two values with
+// different concrete types that both implement T.
+//
+// The option passed in may be an [Ignore], [Transformer], [Comparer], [Options], or
+// a previously filtered [Option].
+func FilterValues(f interface{}, opt Option) Option {
+ v := reflect.ValueOf(f)
+ if !function.IsType(v.Type(), function.ValueFilter) || v.IsNil() {
+ panic(fmt.Sprintf("invalid values filter function: %T", f))
+ }
+ if opt := normalizeOption(opt); opt != nil {
+ vf := &valuesFilter{fnc: v, opt: opt}
+ if ti := v.Type().In(0); ti.Kind() != reflect.Interface || ti.NumMethod() > 0 {
+ vf.typ = ti
+ }
+ return vf
+ }
+ return nil
+}
+
+type valuesFilter struct {
+ core
+ typ reflect.Type // T
+ fnc reflect.Value // func(T, T) bool
+ opt Option
+}
+
+func (f valuesFilter) filter(s *state, t reflect.Type, vx, vy reflect.Value) applicableOption {
+ if !vx.IsValid() || !vx.CanInterface() || !vy.IsValid() || !vy.CanInterface() {
+ return nil
+ }
+ if (f.typ == nil || t.AssignableTo(f.typ)) && s.callTTBFunc(f.fnc, vx, vy) {
+ return f.opt.filter(s, t, vx, vy)
+ }
+ return nil
+}
+
+func (f valuesFilter) String() string {
+ return fmt.Sprintf("FilterValues(%s, %v)", function.NameOf(f.fnc), f.opt)
+}
+
+// Ignore is an [Option] that causes all comparisons to be ignored.
+// This value is intended to be combined with [FilterPath] or [FilterValues].
+// It is an error to pass an unfiltered Ignore option to [Equal].
+func Ignore() Option { return ignore{} }
+
+type ignore struct{ core }
+
+func (ignore) isFiltered() bool { return false }
+func (ignore) filter(_ *state, _ reflect.Type, _, _ reflect.Value) applicableOption { return ignore{} }
+func (ignore) apply(s *state, _, _ reflect.Value) { s.report(true, reportByIgnore) }
+func (ignore) String() string { return "Ignore()" }
+
+// validator is a sentinel Option type to indicate that some options could not
+// be evaluated due to unexported fields, missing slice elements, or
+// missing map entries. Both values are validator only for unexported fields.
+type validator struct{ core }
+
+func (validator) filter(_ *state, _ reflect.Type, vx, vy reflect.Value) applicableOption {
+ if !vx.IsValid() || !vy.IsValid() {
+ return validator{}
+ }
+ if !vx.CanInterface() || !vy.CanInterface() {
+ return validator{}
+ }
+ return nil
+}
+func (validator) apply(s *state, vx, vy reflect.Value) {
+ // Implies missing slice element or map entry.
+ if !vx.IsValid() || !vy.IsValid() {
+ s.report(vx.IsValid() == vy.IsValid(), 0)
+ return
+ }
+
+ // Unable to Interface implies unexported field without visibility access.
+ if !vx.CanInterface() || !vy.CanInterface() {
+ help := "consider using a custom Comparer; if you control the implementation of type, you can also consider using an Exporter, AllowUnexported, or cmpopts.IgnoreUnexported"
+ var name string
+ if t := s.curPath.Index(-2).Type(); t.Name() != "" {
+ // Named type with unexported fields.
+ name = fmt.Sprintf("%q.%v", t.PkgPath(), t.Name()) // e.g., "path/to/package".MyType
+ if _, ok := reflect.New(t).Interface().(error); ok {
+ help = "consider using cmpopts.EquateErrors to compare error values"
+ } else if t.Comparable() {
+ help = "consider using cmpopts.EquateComparable to compare comparable Go types"
+ }
+ } else {
+ // Unnamed type with unexported fields. Derive PkgPath from field.
+ var pkgPath string
+ for i := 0; i < t.NumField() && pkgPath == ""; i++ {
+ pkgPath = t.Field(i).PkgPath
+ }
+ name = fmt.Sprintf("%q.(%v)", pkgPath, t.String()) // e.g., "path/to/package".(struct { a int })
+ }
+ panic(fmt.Sprintf("cannot handle unexported field at %#v:\n\t%v\n%s", s.curPath, name, help))
+ }
+
+ panic("not reachable")
+}
+
+// identRx represents a valid identifier according to the Go specification.
+const identRx = `[_\p{L}][_\p{L}\p{N}]*`
+
+var identsRx = regexp.MustCompile(`^` + identRx + `(\.` + identRx + `)*$`)
+
+// Transformer returns an [Option] that applies a transformation function that
+// converts values of a certain type into that of another.
+//
+// The transformer f must be a function "func(T) R" that converts values of
+// type T to those of type R and is implicitly filtered to input values
+// assignable to T. The transformer must not mutate T in any way.
+//
+// To help prevent some cases of infinite recursive cycles applying the
+// same transform to the output of itself (e.g., in the case where the
+// input and output types are the same), an implicit filter is added such that
+// a transformer is applicable only if that exact transformer is not already
+// in the tail of the [Path] since the last non-[Transform] step.
+// For situations where the implicit filter is still insufficient,
+// consider using [github.com/google/go-cmp/cmp/cmpopts.AcyclicTransformer],
+// which adds a filter to prevent the transformer from
+// being recursively applied upon itself.
+//
+// The name is a user provided label that is used as the [Transform.Name] in the
+// transformation [PathStep] (and eventually shown in the [Diff] output).
+// The name must be a valid identifier or qualified identifier in Go syntax.
+// If empty, an arbitrary name is used.
+func Transformer(name string, f interface{}) Option {
+ v := reflect.ValueOf(f)
+ if !function.IsType(v.Type(), function.Transformer) || v.IsNil() {
+ panic(fmt.Sprintf("invalid transformer function: %T", f))
+ }
+ if name == "" {
+ name = function.NameOf(v)
+ if !identsRx.MatchString(name) {
+ name = "λ" // Lambda-symbol as placeholder name
+ }
+ } else if !identsRx.MatchString(name) {
+ panic(fmt.Sprintf("invalid name: %q", name))
+ }
+ tr := &transformer{name: name, fnc: reflect.ValueOf(f)}
+ if ti := v.Type().In(0); ti.Kind() != reflect.Interface || ti.NumMethod() > 0 {
+ tr.typ = ti
+ }
+ return tr
+}
+
+type transformer struct {
+ core
+ name string
+ typ reflect.Type // T
+ fnc reflect.Value // func(T) R
+}
+
+func (tr *transformer) isFiltered() bool { return tr.typ != nil }
+
+func (tr *transformer) filter(s *state, t reflect.Type, _, _ reflect.Value) applicableOption {
+ for i := len(s.curPath) - 1; i >= 0; i-- {
+ if t, ok := s.curPath[i].(Transform); !ok {
+ break // Hit most recent non-Transform step
+ } else if tr == t.trans {
+ return nil // Cannot directly use same Transform
+ }
+ }
+ if tr.typ == nil || t.AssignableTo(tr.typ) {
+ return tr
+ }
+ return nil
+}
+
+func (tr *transformer) apply(s *state, vx, vy reflect.Value) {
+ step := Transform{&transform{pathStep{typ: tr.fnc.Type().Out(0)}, tr}}
+ vvx := s.callTRFunc(tr.fnc, vx, step)
+ vvy := s.callTRFunc(tr.fnc, vy, step)
+ step.vx, step.vy = vvx, vvy
+ s.compareAny(step)
+}
+
+func (tr transformer) String() string {
+ return fmt.Sprintf("Transformer(%s, %s)", tr.name, function.NameOf(tr.fnc))
+}
+
+// Comparer returns an [Option] that determines whether two values are equal
+// to each other.
+//
+// The comparer f must be a function "func(T, T) bool" and is implicitly
+// filtered to input values assignable to T. If T is an interface, it is
+// possible that f is called with two values of different concrete types that
+// both implement T.
+//
+// The equality function must be:
+// - Symmetric: equal(x, y) == equal(y, x)
+// - Deterministic: equal(x, y) == equal(x, y)
+// - Pure: equal(x, y) does not modify x or y
+func Comparer(f interface{}) Option {
+ v := reflect.ValueOf(f)
+ if !function.IsType(v.Type(), function.Equal) || v.IsNil() {
+ panic(fmt.Sprintf("invalid comparer function: %T", f))
+ }
+ cm := &comparer{fnc: v}
+ if ti := v.Type().In(0); ti.Kind() != reflect.Interface || ti.NumMethod() > 0 {
+ cm.typ = ti
+ }
+ return cm
+}
+
+type comparer struct {
+ core
+ typ reflect.Type // T
+ fnc reflect.Value // func(T, T) bool
+}
+
+func (cm *comparer) isFiltered() bool { return cm.typ != nil }
+
+func (cm *comparer) filter(_ *state, t reflect.Type, _, _ reflect.Value) applicableOption {
+ if cm.typ == nil || t.AssignableTo(cm.typ) {
+ return cm
+ }
+ return nil
+}
+
+func (cm *comparer) apply(s *state, vx, vy reflect.Value) {
+ eq := s.callTTBFunc(cm.fnc, vx, vy)
+ s.report(eq, reportByFunc)
+}
+
+func (cm comparer) String() string {
+ return fmt.Sprintf("Comparer(%s)", function.NameOf(cm.fnc))
+}
+
+// Exporter returns an [Option] that specifies whether [Equal] is allowed to
+// introspect into the unexported fields of certain struct types.
+//
+// Users of this option must understand that comparing on unexported fields
+// from external packages is not safe since changes in the internal
+// implementation of some external package may cause the result of [Equal]
+// to unexpectedly change. However, it may be valid to use this option on types
+// defined in an internal package where the semantic meaning of an unexported
+// field is in the control of the user.
+//
+// In many cases, a custom [Comparer] should be used instead that defines
+// equality as a function of the public API of a type rather than the underlying
+// unexported implementation.
+//
+// For example, the [reflect.Type] documentation defines equality to be determined
+// by the == operator on the interface (essentially performing a shallow pointer
+// comparison) and most attempts to compare *[regexp.Regexp] types are interested
+// in only checking that the regular expression strings are equal.
+// Both of these are accomplished using [Comparer] options:
+//
+// Comparer(func(x, y reflect.Type) bool { return x == y })
+// Comparer(func(x, y *regexp.Regexp) bool { return x.String() == y.String() })
+//
+// In other cases, the [github.com/google/go-cmp/cmp/cmpopts.IgnoreUnexported]
+// option can be used to ignore all unexported fields on specified struct types.
+func Exporter(f func(reflect.Type) bool) Option {
+ return exporter(f)
+}
+
+type exporter func(reflect.Type) bool
+
+func (exporter) filter(_ *state, _ reflect.Type, _, _ reflect.Value) applicableOption {
+ panic("not implemented")
+}
+
+// AllowUnexported returns an [Option] that allows [Equal] to forcibly introspect
+// unexported fields of the specified struct types.
+//
+// See [Exporter] for the proper use of this option.
+func AllowUnexported(types ...interface{}) Option {
+ m := make(map[reflect.Type]bool)
+ for _, typ := range types {
+ t := reflect.TypeOf(typ)
+ if t.Kind() != reflect.Struct {
+ panic(fmt.Sprintf("invalid struct type: %T", typ))
+ }
+ m[t] = true
+ }
+ return exporter(func(t reflect.Type) bool { return m[t] })
+}
+
+// Result represents the comparison result for a single node and
+// is provided by cmp when calling Report (see [Reporter]).
+type Result struct {
+ _ [0]func() // Make Result incomparable
+ flags resultFlags
+}
+
+// Equal reports whether the node was determined to be equal or not.
+// As a special case, ignored nodes are considered equal.
+func (r Result) Equal() bool {
+ return r.flags&(reportEqual|reportByIgnore) != 0
+}
+
+// ByIgnore reports whether the node is equal because it was ignored.
+// This never reports true if [Result.Equal] reports false.
+func (r Result) ByIgnore() bool {
+ return r.flags&reportByIgnore != 0
+}
+
+// ByMethod reports whether the Equal method determined equality.
+func (r Result) ByMethod() bool {
+ return r.flags&reportByMethod != 0
+}
+
+// ByFunc reports whether a [Comparer] function determined equality.
+func (r Result) ByFunc() bool {
+ return r.flags&reportByFunc != 0
+}
+
+// ByCycle reports whether a reference cycle was detected.
+func (r Result) ByCycle() bool {
+ return r.flags&reportByCycle != 0
+}
+
+type resultFlags uint
+
+const (
+ _ resultFlags = (1 << iota) / 2
+
+ reportEqual
+ reportUnequal
+ reportByIgnore
+ reportByMethod
+ reportByFunc
+ reportByCycle
+)
+
+// Reporter is an [Option] that can be passed to [Equal]. When [Equal] traverses
+// the value trees, it calls PushStep as it descends into each node in the
+// tree and PopStep as it ascend out of the node. The leaves of the tree are
+// either compared (determined to be equal or not equal) or ignored and reported
+// as such by calling the Report method.
+func Reporter(r interface {
+ // PushStep is called when a tree-traversal operation is performed.
+ // The PathStep itself is only valid until the step is popped.
+ // The PathStep.Values are valid for the duration of the entire traversal
+ // and must not be mutated.
+ //
+ // Equal always calls PushStep at the start to provide an operation-less
+ // PathStep used to report the root values.
+ //
+ // Within a slice, the exact set of inserted, removed, or modified elements
+ // is unspecified and may change in future implementations.
+ // The entries of a map are iterated through in an unspecified order.
+ PushStep(PathStep)
+
+ // Report is called exactly once on leaf nodes to report whether the
+ // comparison identified the node as equal, unequal, or ignored.
+ // A leaf node is one that is immediately preceded by and followed by
+ // a pair of PushStep and PopStep calls.
+ Report(Result)
+
+ // PopStep ascends back up the value tree.
+ // There is always a matching pop call for every push call.
+ PopStep()
+}) Option {
+ return reporter{r}
+}
+
+type reporter struct{ reporterIface }
+type reporterIface interface {
+ PushStep(PathStep)
+ Report(Result)
+ PopStep()
+}
+
+func (reporter) filter(_ *state, _ reflect.Type, _, _ reflect.Value) applicableOption {
+ panic("not implemented")
+}
+
+// normalizeOption normalizes the input options such that all Options groups
+// are flattened and groups with a single element are reduced to that element.
+// Only coreOptions and Options containing coreOptions are allowed.
+func normalizeOption(src Option) Option {
+ switch opts := flattenOptions(nil, Options{src}); len(opts) {
+ case 0:
+ return nil
+ case 1:
+ return opts[0]
+ default:
+ return opts
+ }
+}
+
+// flattenOptions copies all options in src to dst as a flat list.
+// Only coreOptions and Options containing coreOptions are allowed.
+func flattenOptions(dst, src Options) Options {
+ for _, opt := range src {
+ switch opt := opt.(type) {
+ case nil:
+ continue
+ case Options:
+ dst = flattenOptions(dst, opt)
+ case coreOption:
+ dst = append(dst, opt)
+ default:
+ panic(fmt.Sprintf("invalid option type: %T", opt))
+ }
+ }
+ return dst
+}
diff --git a/integration/assets/hydrabroker/vendor/github.com/google/go-cmp/cmp/path.go b/integration/assets/hydrabroker/vendor/github.com/google/go-cmp/cmp/path.go
new file mode 100644
index 00000000000..c3c1456423c
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/google/go-cmp/cmp/path.go
@@ -0,0 +1,390 @@
+// Copyright 2017, The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cmp
+
+import (
+ "fmt"
+ "reflect"
+ "strings"
+ "unicode"
+ "unicode/utf8"
+
+ "github.com/google/go-cmp/cmp/internal/value"
+)
+
+// Path is a list of [PathStep] describing the sequence of operations to get
+// from some root type to the current position in the value tree.
+// The first Path element is always an operation-less [PathStep] that exists
+// simply to identify the initial type.
+//
+// When traversing structs with embedded structs, the embedded struct will
+// always be accessed as a field before traversing the fields of the
+// embedded struct themselves. That is, an exported field from the
+// embedded struct will never be accessed directly from the parent struct.
+type Path []PathStep
+
+// PathStep is a union-type for specific operations to traverse
+// a value's tree structure. Users of this package never need to implement
+// these types as values of this type will be returned by this package.
+//
+// Implementations of this interface:
+// - [StructField]
+// - [SliceIndex]
+// - [MapIndex]
+// - [Indirect]
+// - [TypeAssertion]
+// - [Transform]
+type PathStep interface {
+ String() string
+
+ // Type is the resulting type after performing the path step.
+ Type() reflect.Type
+
+ // Values is the resulting values after performing the path step.
+ // The type of each valid value is guaranteed to be identical to Type.
+ //
+ // In some cases, one or both may be invalid or have restrictions:
+ // - For StructField, both are not interface-able if the current field
+ // is unexported and the struct type is not explicitly permitted by
+ // an Exporter to traverse unexported fields.
+ // - For SliceIndex, one may be invalid if an element is missing from
+ // either the x or y slice.
+ // - For MapIndex, one may be invalid if an entry is missing from
+ // either the x or y map.
+ //
+ // The provided values must not be mutated.
+ Values() (vx, vy reflect.Value)
+}
+
+var (
+ _ PathStep = StructField{}
+ _ PathStep = SliceIndex{}
+ _ PathStep = MapIndex{}
+ _ PathStep = Indirect{}
+ _ PathStep = TypeAssertion{}
+ _ PathStep = Transform{}
+)
+
+func (pa *Path) push(s PathStep) {
+ *pa = append(*pa, s)
+}
+
+func (pa *Path) pop() {
+ *pa = (*pa)[:len(*pa)-1]
+}
+
+// Last returns the last [PathStep] in the Path.
+// If the path is empty, this returns a non-nil [PathStep]
+// that reports a nil [PathStep.Type].
+func (pa Path) Last() PathStep {
+ return pa.Index(-1)
+}
+
+// Index returns the ith step in the Path and supports negative indexing.
+// A negative index starts counting from the tail of the Path such that -1
+// refers to the last step, -2 refers to the second-to-last step, and so on.
+// If index is invalid, this returns a non-nil [PathStep]
+// that reports a nil [PathStep.Type].
+func (pa Path) Index(i int) PathStep {
+ if i < 0 {
+ i = len(pa) + i
+ }
+ if i < 0 || i >= len(pa) {
+ return pathStep{}
+ }
+ return pa[i]
+}
+
+// String returns the simplified path to a node.
+// The simplified path only contains struct field accesses.
+//
+// For example:
+//
+// MyMap.MySlices.MyField
+func (pa Path) String() string {
+ var ss []string
+ for _, s := range pa {
+ if _, ok := s.(StructField); ok {
+ ss = append(ss, s.String())
+ }
+ }
+ return strings.TrimPrefix(strings.Join(ss, ""), ".")
+}
+
+// GoString returns the path to a specific node using Go syntax.
+//
+// For example:
+//
+// (*root.MyMap["key"].(*mypkg.MyStruct).MySlices)[2][3].MyField
+func (pa Path) GoString() string {
+ var ssPre, ssPost []string
+ var numIndirect int
+ for i, s := range pa {
+ var nextStep PathStep
+ if i+1 < len(pa) {
+ nextStep = pa[i+1]
+ }
+ switch s := s.(type) {
+ case Indirect:
+ numIndirect++
+ pPre, pPost := "(", ")"
+ switch nextStep.(type) {
+ case Indirect:
+ continue // Next step is indirection, so let them batch up
+ case StructField:
+ numIndirect-- // Automatic indirection on struct fields
+ case nil:
+ pPre, pPost = "", "" // Last step; no need for parenthesis
+ }
+ if numIndirect > 0 {
+ ssPre = append(ssPre, pPre+strings.Repeat("*", numIndirect))
+ ssPost = append(ssPost, pPost)
+ }
+ numIndirect = 0
+ continue
+ case Transform:
+ ssPre = append(ssPre, s.trans.name+"(")
+ ssPost = append(ssPost, ")")
+ continue
+ }
+ ssPost = append(ssPost, s.String())
+ }
+ for i, j := 0, len(ssPre)-1; i < j; i, j = i+1, j-1 {
+ ssPre[i], ssPre[j] = ssPre[j], ssPre[i]
+ }
+ return strings.Join(ssPre, "") + strings.Join(ssPost, "")
+}
+
+type pathStep struct {
+ typ reflect.Type
+ vx, vy reflect.Value
+}
+
+func (ps pathStep) Type() reflect.Type { return ps.typ }
+func (ps pathStep) Values() (vx, vy reflect.Value) { return ps.vx, ps.vy }
+func (ps pathStep) String() string {
+ if ps.typ == nil {
+ return ""
+ }
+ s := value.TypeString(ps.typ, false)
+ if s == "" || strings.ContainsAny(s, "{}\n") {
+ return "root" // Type too simple or complex to print
+ }
+ return fmt.Sprintf("{%s}", s)
+}
+
+// StructField is a [PathStep] that represents a struct field access
+// on a field called [StructField.Name].
+type StructField struct{ *structField }
+type structField struct {
+ pathStep
+ name string
+ idx int
+
+ // These fields are used for forcibly accessing an unexported field.
+ // pvx, pvy, and field are only valid if unexported is true.
+ unexported bool
+ mayForce bool // Forcibly allow visibility
+ paddr bool // Was parent addressable?
+ pvx, pvy reflect.Value // Parent values (always addressable)
+ field reflect.StructField // Field information
+}
+
+func (sf StructField) Type() reflect.Type { return sf.typ }
+func (sf StructField) Values() (vx, vy reflect.Value) {
+ if !sf.unexported {
+ return sf.vx, sf.vy // CanInterface reports true
+ }
+
+ // Forcibly obtain read-write access to an unexported struct field.
+ if sf.mayForce {
+ vx = retrieveUnexportedField(sf.pvx, sf.field, sf.paddr)
+ vy = retrieveUnexportedField(sf.pvy, sf.field, sf.paddr)
+ return vx, vy // CanInterface reports true
+ }
+ return sf.vx, sf.vy // CanInterface reports false
+}
+func (sf StructField) String() string { return fmt.Sprintf(".%s", sf.name) }
+
+// Name is the field name.
+func (sf StructField) Name() string { return sf.name }
+
+// Index is the index of the field in the parent struct type.
+// See [reflect.Type.Field].
+func (sf StructField) Index() int { return sf.idx }
+
+// SliceIndex is a [PathStep] that represents an index operation on
+// a slice or array at some index [SliceIndex.Key].
+type SliceIndex struct{ *sliceIndex }
+type sliceIndex struct {
+ pathStep
+ xkey, ykey int
+ isSlice bool // False for reflect.Array
+}
+
+func (si SliceIndex) Type() reflect.Type { return si.typ }
+func (si SliceIndex) Values() (vx, vy reflect.Value) { return si.vx, si.vy }
+func (si SliceIndex) String() string {
+ switch {
+ case si.xkey == si.ykey:
+ return fmt.Sprintf("[%d]", si.xkey)
+ case si.ykey == -1:
+ // [5->?] means "I don't know where X[5] went"
+ return fmt.Sprintf("[%d->?]", si.xkey)
+ case si.xkey == -1:
+ // [?->3] means "I don't know where Y[3] came from"
+ return fmt.Sprintf("[?->%d]", si.ykey)
+ default:
+ // [5->3] means "X[5] moved to Y[3]"
+ return fmt.Sprintf("[%d->%d]", si.xkey, si.ykey)
+ }
+}
+
+// Key is the index key; it may return -1 if in a split state
+func (si SliceIndex) Key() int {
+ if si.xkey != si.ykey {
+ return -1
+ }
+ return si.xkey
+}
+
+// SplitKeys are the indexes for indexing into slices in the
+// x and y values, respectively. These indexes may differ due to the
+// insertion or removal of an element in one of the slices, causing
+// all of the indexes to be shifted. If an index is -1, then that
+// indicates that the element does not exist in the associated slice.
+//
+// [SliceIndex.Key] is guaranteed to return -1 if and only if the indexes
+// returned by SplitKeys are not the same. SplitKeys will never return -1 for
+// both indexes.
+func (si SliceIndex) SplitKeys() (ix, iy int) { return si.xkey, si.ykey }
+
+// MapIndex is a [PathStep] that represents an index operation on a map at some index Key.
+type MapIndex struct{ *mapIndex }
+type mapIndex struct {
+ pathStep
+ key reflect.Value
+}
+
+func (mi MapIndex) Type() reflect.Type { return mi.typ }
+func (mi MapIndex) Values() (vx, vy reflect.Value) { return mi.vx, mi.vy }
+func (mi MapIndex) String() string { return fmt.Sprintf("[%#v]", mi.key) }
+
+// Key is the value of the map key.
+func (mi MapIndex) Key() reflect.Value { return mi.key }
+
+// Indirect is a [PathStep] that represents pointer indirection on the parent type.
+type Indirect struct{ *indirect }
+type indirect struct {
+ pathStep
+}
+
+func (in Indirect) Type() reflect.Type { return in.typ }
+func (in Indirect) Values() (vx, vy reflect.Value) { return in.vx, in.vy }
+func (in Indirect) String() string { return "*" }
+
+// TypeAssertion is a [PathStep] that represents a type assertion on an interface.
+type TypeAssertion struct{ *typeAssertion }
+type typeAssertion struct {
+ pathStep
+}
+
+func (ta TypeAssertion) Type() reflect.Type { return ta.typ }
+func (ta TypeAssertion) Values() (vx, vy reflect.Value) { return ta.vx, ta.vy }
+func (ta TypeAssertion) String() string { return fmt.Sprintf(".(%v)", value.TypeString(ta.typ, false)) }
+
+// Transform is a [PathStep] that represents a transformation
+// from the parent type to the current type.
+type Transform struct{ *transform }
+type transform struct {
+ pathStep
+ trans *transformer
+}
+
+func (tf Transform) Type() reflect.Type { return tf.typ }
+func (tf Transform) Values() (vx, vy reflect.Value) { return tf.vx, tf.vy }
+func (tf Transform) String() string { return fmt.Sprintf("%s()", tf.trans.name) }
+
+// Name is the name of the [Transformer].
+func (tf Transform) Name() string { return tf.trans.name }
+
+// Func is the function pointer to the transformer function.
+func (tf Transform) Func() reflect.Value { return tf.trans.fnc }
+
+// Option returns the originally constructed [Transformer] option.
+// The == operator can be used to detect the exact option used.
+func (tf Transform) Option() Option { return tf.trans }
+
+// pointerPath represents a dual-stack of pointers encountered when
+// recursively traversing the x and y values. This data structure supports
+// detection of cycles and determining whether the cycles are equal.
+// In Go, cycles can occur via pointers, slices, and maps.
+//
+// The pointerPath uses a map to represent a stack; where descension into a
+// pointer pushes the address onto the stack, and ascension from a pointer
+// pops the address from the stack. Thus, when traversing into a pointer from
+// reflect.Ptr, reflect.Slice element, or reflect.Map, we can detect cycles
+// by checking whether the pointer has already been visited. The cycle detection
+// uses a separate stack for the x and y values.
+//
+// If a cycle is detected we need to determine whether the two pointers
+// should be considered equal. The definition of equality chosen by Equal
+// requires two graphs to have the same structure. To determine this, both the
+// x and y values must have a cycle where the previous pointers were also
+// encountered together as a pair.
+//
+// Semantically, this is equivalent to augmenting Indirect, SliceIndex, and
+// MapIndex with pointer information for the x and y values.
+// Suppose px and py are two pointers to compare, we then search the
+// Path for whether px was ever encountered in the Path history of x, and
+// similarly so with py. If either side has a cycle, the comparison is only
+// equal if both px and py have a cycle resulting from the same PathStep.
+//
+// Using a map as a stack is more performant as we can perform cycle detection
+// in O(1) instead of O(N) where N is len(Path).
+type pointerPath struct {
+ // mx is keyed by x pointers, where the value is the associated y pointer.
+ mx map[value.Pointer]value.Pointer
+ // my is keyed by y pointers, where the value is the associated x pointer.
+ my map[value.Pointer]value.Pointer
+}
+
+func (p *pointerPath) Init() {
+ p.mx = make(map[value.Pointer]value.Pointer)
+ p.my = make(map[value.Pointer]value.Pointer)
+}
+
+// Push indicates intent to descend into pointers vx and vy where
+// visited reports whether either has been seen before. If visited before,
+// equal reports whether both pointers were encountered together.
+// Pop must be called if and only if the pointers were never visited.
+//
+// The pointers vx and vy must be a reflect.Ptr, reflect.Slice, or reflect.Map
+// and be non-nil.
+func (p pointerPath) Push(vx, vy reflect.Value) (equal, visited bool) {
+ px := value.PointerOf(vx)
+ py := value.PointerOf(vy)
+ _, ok1 := p.mx[px]
+ _, ok2 := p.my[py]
+ if ok1 || ok2 {
+ equal = p.mx[px] == py && p.my[py] == px // Pointers paired together
+ return equal, true
+ }
+ p.mx[px] = py
+ p.my[py] = px
+ return false, false
+}
+
+// Pop ascends from pointers vx and vy.
+func (p pointerPath) Pop(vx, vy reflect.Value) {
+ delete(p.mx, value.PointerOf(vx))
+ delete(p.my, value.PointerOf(vy))
+}
+
+// isExported reports whether the identifier is exported.
+func isExported(id string) bool {
+ r, _ := utf8.DecodeRuneInString(id)
+ return unicode.IsUpper(r)
+}
diff --git a/integration/assets/hydrabroker/vendor/github.com/google/go-cmp/cmp/report.go b/integration/assets/hydrabroker/vendor/github.com/google/go-cmp/cmp/report.go
new file mode 100644
index 00000000000..f43cd12eb5f
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/google/go-cmp/cmp/report.go
@@ -0,0 +1,54 @@
+// Copyright 2017, The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cmp
+
+// defaultReporter implements the reporter interface.
+//
+// As Equal serially calls the PushStep, Report, and PopStep methods, the
+// defaultReporter constructs a tree-based representation of the compared value
+// and the result of each comparison (see valueNode).
+//
+// When the String method is called, the FormatDiff method transforms the
+// valueNode tree into a textNode tree, which is a tree-based representation
+// of the textual output (see textNode).
+//
+// Lastly, the textNode.String method produces the final report as a string.
+type defaultReporter struct {
+ root *valueNode
+ curr *valueNode
+}
+
+func (r *defaultReporter) PushStep(ps PathStep) {
+ r.curr = r.curr.PushStep(ps)
+ if r.root == nil {
+ r.root = r.curr
+ }
+}
+func (r *defaultReporter) Report(rs Result) {
+ r.curr.Report(rs)
+}
+func (r *defaultReporter) PopStep() {
+ r.curr = r.curr.PopStep()
+}
+
+// String provides a full report of the differences detected as a structured
+// literal in pseudo-Go syntax. String may only be called after the entire tree
+// has been traversed.
+func (r *defaultReporter) String() string {
+ assert(r.root != nil && r.curr == nil)
+ if r.root.NumDiff == 0 {
+ return ""
+ }
+ ptrs := new(pointerReferences)
+ text := formatOptions{}.FormatDiff(r.root, ptrs)
+ resolveReferences(text)
+ return text.String()
+}
+
+func assert(ok bool) {
+ if !ok {
+ panic("assertion failure")
+ }
+}
diff --git a/integration/assets/hydrabroker/vendor/github.com/google/go-cmp/cmp/report_compare.go b/integration/assets/hydrabroker/vendor/github.com/google/go-cmp/cmp/report_compare.go
new file mode 100644
index 00000000000..2050bf6b46b
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/google/go-cmp/cmp/report_compare.go
@@ -0,0 +1,433 @@
+// Copyright 2019, The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cmp
+
+import (
+ "fmt"
+ "reflect"
+)
+
+// numContextRecords is the number of surrounding equal records to print.
+const numContextRecords = 2
+
+type diffMode byte
+
+const (
+ diffUnknown diffMode = 0
+ diffIdentical diffMode = ' '
+ diffRemoved diffMode = '-'
+ diffInserted diffMode = '+'
+)
+
+type typeMode int
+
+const (
+ // emitType always prints the type.
+ emitType typeMode = iota
+ // elideType never prints the type.
+ elideType
+ // autoType prints the type only for composite kinds
+ // (i.e., structs, slices, arrays, and maps).
+ autoType
+)
+
+type formatOptions struct {
+ // DiffMode controls the output mode of FormatDiff.
+ //
+ // If diffUnknown, then produce a diff of the x and y values.
+ // If diffIdentical, then emit values as if they were equal.
+ // If diffRemoved, then only emit x values (ignoring y values).
+ // If diffInserted, then only emit y values (ignoring x values).
+ DiffMode diffMode
+
+ // TypeMode controls whether to print the type for the current node.
+ //
+ // As a general rule of thumb, we always print the type of the next node
+ // after an interface, and always elide the type of the next node after
+ // a slice or map node.
+ TypeMode typeMode
+
+ // formatValueOptions are options specific to printing reflect.Values.
+ formatValueOptions
+}
+
+func (opts formatOptions) WithDiffMode(d diffMode) formatOptions {
+ opts.DiffMode = d
+ return opts
+}
+func (opts formatOptions) WithTypeMode(t typeMode) formatOptions {
+ opts.TypeMode = t
+ return opts
+}
+func (opts formatOptions) WithVerbosity(level int) formatOptions {
+ opts.VerbosityLevel = level
+ opts.LimitVerbosity = true
+ return opts
+}
+func (opts formatOptions) verbosity() uint {
+ switch {
+ case opts.VerbosityLevel < 0:
+ return 0
+ case opts.VerbosityLevel > 16:
+ return 16 // some reasonable maximum to avoid shift overflow
+ default:
+ return uint(opts.VerbosityLevel)
+ }
+}
+
+const maxVerbosityPreset = 6
+
+// verbosityPreset modifies the verbosity settings given an index
+// between 0 and maxVerbosityPreset, inclusive.
+func verbosityPreset(opts formatOptions, i int) formatOptions {
+ opts.VerbosityLevel = int(opts.verbosity()) + 2*i
+ if i > 0 {
+ opts.AvoidStringer = true
+ }
+ if i >= maxVerbosityPreset {
+ opts.PrintAddresses = true
+ opts.QualifiedNames = true
+ }
+ return opts
+}
+
+// FormatDiff converts a valueNode tree into a textNode tree, where the later
+// is a textual representation of the differences detected in the former.
+func (opts formatOptions) FormatDiff(v *valueNode, ptrs *pointerReferences) (out textNode) {
+ if opts.DiffMode == diffIdentical {
+ opts = opts.WithVerbosity(1)
+ } else if opts.verbosity() < 3 {
+ opts = opts.WithVerbosity(3)
+ }
+
+ // Check whether we have specialized formatting for this node.
+ // This is not necessary, but helpful for producing more readable outputs.
+ if opts.CanFormatDiffSlice(v) {
+ return opts.FormatDiffSlice(v)
+ }
+
+ var parentKind reflect.Kind
+ if v.parent != nil && v.parent.TransformerName == "" {
+ parentKind = v.parent.Type.Kind()
+ }
+
+ // For leaf nodes, format the value based on the reflect.Values alone.
+ // As a special case, treat equal []byte as a leaf nodes.
+ isBytes := v.Type.Kind() == reflect.Slice && v.Type.Elem() == byteType
+ isEqualBytes := isBytes && v.NumDiff+v.NumIgnored+v.NumTransformed == 0
+ if v.MaxDepth == 0 || isEqualBytes {
+ switch opts.DiffMode {
+ case diffUnknown, diffIdentical:
+ // Format Equal.
+ if v.NumDiff == 0 {
+ outx := opts.FormatValue(v.ValueX, parentKind, ptrs)
+ outy := opts.FormatValue(v.ValueY, parentKind, ptrs)
+ if v.NumIgnored > 0 && v.NumSame == 0 {
+ return textEllipsis
+ } else if outx.Len() < outy.Len() {
+ return outx
+ } else {
+ return outy
+ }
+ }
+
+ // Format unequal.
+ assert(opts.DiffMode == diffUnknown)
+ var list textList
+ outx := opts.WithTypeMode(elideType).FormatValue(v.ValueX, parentKind, ptrs)
+ outy := opts.WithTypeMode(elideType).FormatValue(v.ValueY, parentKind, ptrs)
+ for i := 0; i <= maxVerbosityPreset && outx != nil && outy != nil && outx.Equal(outy); i++ {
+ opts2 := verbosityPreset(opts, i).WithTypeMode(elideType)
+ outx = opts2.FormatValue(v.ValueX, parentKind, ptrs)
+ outy = opts2.FormatValue(v.ValueY, parentKind, ptrs)
+ }
+ if outx != nil {
+ list = append(list, textRecord{Diff: '-', Value: outx})
+ }
+ if outy != nil {
+ list = append(list, textRecord{Diff: '+', Value: outy})
+ }
+ return opts.WithTypeMode(emitType).FormatType(v.Type, list)
+ case diffRemoved:
+ return opts.FormatValue(v.ValueX, parentKind, ptrs)
+ case diffInserted:
+ return opts.FormatValue(v.ValueY, parentKind, ptrs)
+ default:
+ panic("invalid diff mode")
+ }
+ }
+
+ // Register slice element to support cycle detection.
+ if parentKind == reflect.Slice {
+ ptrRefs := ptrs.PushPair(v.ValueX, v.ValueY, opts.DiffMode, true)
+ defer ptrs.Pop()
+ defer func() { out = wrapTrunkReferences(ptrRefs, out) }()
+ }
+
+ // Descend into the child value node.
+ if v.TransformerName != "" {
+ out := opts.WithTypeMode(emitType).FormatDiff(v.Value, ptrs)
+ out = &textWrap{Prefix: "Inverse(" + v.TransformerName + ", ", Value: out, Suffix: ")"}
+ return opts.FormatType(v.Type, out)
+ } else {
+ switch k := v.Type.Kind(); k {
+ case reflect.Struct, reflect.Array, reflect.Slice:
+ out = opts.formatDiffList(v.Records, k, ptrs)
+ out = opts.FormatType(v.Type, out)
+ case reflect.Map:
+ // Register map to support cycle detection.
+ ptrRefs := ptrs.PushPair(v.ValueX, v.ValueY, opts.DiffMode, false)
+ defer ptrs.Pop()
+
+ out = opts.formatDiffList(v.Records, k, ptrs)
+ out = wrapTrunkReferences(ptrRefs, out)
+ out = opts.FormatType(v.Type, out)
+ case reflect.Ptr:
+ // Register pointer to support cycle detection.
+ ptrRefs := ptrs.PushPair(v.ValueX, v.ValueY, opts.DiffMode, false)
+ defer ptrs.Pop()
+
+ out = opts.FormatDiff(v.Value, ptrs)
+ out = wrapTrunkReferences(ptrRefs, out)
+ out = &textWrap{Prefix: "&", Value: out}
+ case reflect.Interface:
+ out = opts.WithTypeMode(emitType).FormatDiff(v.Value, ptrs)
+ default:
+ panic(fmt.Sprintf("%v cannot have children", k))
+ }
+ return out
+ }
+}
+
+func (opts formatOptions) formatDiffList(recs []reportRecord, k reflect.Kind, ptrs *pointerReferences) textNode {
+ // Derive record name based on the data structure kind.
+ var name string
+ var formatKey func(reflect.Value) string
+ switch k {
+ case reflect.Struct:
+ name = "field"
+ opts = opts.WithTypeMode(autoType)
+ formatKey = func(v reflect.Value) string { return v.String() }
+ case reflect.Slice, reflect.Array:
+ name = "element"
+ opts = opts.WithTypeMode(elideType)
+ formatKey = func(reflect.Value) string { return "" }
+ case reflect.Map:
+ name = "entry"
+ opts = opts.WithTypeMode(elideType)
+ formatKey = func(v reflect.Value) string { return formatMapKey(v, false, ptrs) }
+ }
+
+ maxLen := -1
+ if opts.LimitVerbosity {
+ if opts.DiffMode == diffIdentical {
+ maxLen = ((1 << opts.verbosity()) >> 1) << 2 // 0, 4, 8, 16, 32, etc...
+ } else {
+ maxLen = (1 << opts.verbosity()) << 1 // 2, 4, 8, 16, 32, 64, etc...
+ }
+ opts.VerbosityLevel--
+ }
+
+ // Handle unification.
+ switch opts.DiffMode {
+ case diffIdentical, diffRemoved, diffInserted:
+ var list textList
+ var deferredEllipsis bool // Add final "..." to indicate records were dropped
+ for _, r := range recs {
+ if len(list) == maxLen {
+ deferredEllipsis = true
+ break
+ }
+
+ // Elide struct fields that are zero value.
+ if k == reflect.Struct {
+ var isZero bool
+ switch opts.DiffMode {
+ case diffIdentical:
+ isZero = r.Value.ValueX.IsZero() || r.Value.ValueY.IsZero()
+ case diffRemoved:
+ isZero = r.Value.ValueX.IsZero()
+ case diffInserted:
+ isZero = r.Value.ValueY.IsZero()
+ }
+ if isZero {
+ continue
+ }
+ }
+ // Elide ignored nodes.
+ if r.Value.NumIgnored > 0 && r.Value.NumSame+r.Value.NumDiff == 0 {
+ deferredEllipsis = !(k == reflect.Slice || k == reflect.Array)
+ if !deferredEllipsis {
+ list.AppendEllipsis(diffStats{})
+ }
+ continue
+ }
+ if out := opts.FormatDiff(r.Value, ptrs); out != nil {
+ list = append(list, textRecord{Key: formatKey(r.Key), Value: out})
+ }
+ }
+ if deferredEllipsis {
+ list.AppendEllipsis(diffStats{})
+ }
+ return &textWrap{Prefix: "{", Value: list, Suffix: "}"}
+ case diffUnknown:
+ default:
+ panic("invalid diff mode")
+ }
+
+ // Handle differencing.
+ var numDiffs int
+ var list textList
+ var keys []reflect.Value // invariant: len(list) == len(keys)
+ groups := coalesceAdjacentRecords(name, recs)
+ maxGroup := diffStats{Name: name}
+ for i, ds := range groups {
+ if maxLen >= 0 && numDiffs >= maxLen {
+ maxGroup = maxGroup.Append(ds)
+ continue
+ }
+
+ // Handle equal records.
+ if ds.NumDiff() == 0 {
+ // Compute the number of leading and trailing records to print.
+ var numLo, numHi int
+ numEqual := ds.NumIgnored + ds.NumIdentical
+ for numLo < numContextRecords && numLo+numHi < numEqual && i != 0 {
+ if r := recs[numLo].Value; r.NumIgnored > 0 && r.NumSame+r.NumDiff == 0 {
+ break
+ }
+ numLo++
+ }
+ for numHi < numContextRecords && numLo+numHi < numEqual && i != len(groups)-1 {
+ if r := recs[numEqual-numHi-1].Value; r.NumIgnored > 0 && r.NumSame+r.NumDiff == 0 {
+ break
+ }
+ numHi++
+ }
+ if numEqual-(numLo+numHi) == 1 && ds.NumIgnored == 0 {
+ numHi++ // Avoid pointless coalescing of a single equal record
+ }
+
+ // Format the equal values.
+ for _, r := range recs[:numLo] {
+ out := opts.WithDiffMode(diffIdentical).FormatDiff(r.Value, ptrs)
+ list = append(list, textRecord{Key: formatKey(r.Key), Value: out})
+ keys = append(keys, r.Key)
+ }
+ if numEqual > numLo+numHi {
+ ds.NumIdentical -= numLo + numHi
+ list.AppendEllipsis(ds)
+ for len(keys) < len(list) {
+ keys = append(keys, reflect.Value{})
+ }
+ }
+ for _, r := range recs[numEqual-numHi : numEqual] {
+ out := opts.WithDiffMode(diffIdentical).FormatDiff(r.Value, ptrs)
+ list = append(list, textRecord{Key: formatKey(r.Key), Value: out})
+ keys = append(keys, r.Key)
+ }
+ recs = recs[numEqual:]
+ continue
+ }
+
+ // Handle unequal records.
+ for _, r := range recs[:ds.NumDiff()] {
+ switch {
+ case opts.CanFormatDiffSlice(r.Value):
+ out := opts.FormatDiffSlice(r.Value)
+ list = append(list, textRecord{Key: formatKey(r.Key), Value: out})
+ keys = append(keys, r.Key)
+ case r.Value.NumChildren == r.Value.MaxDepth:
+ outx := opts.WithDiffMode(diffRemoved).FormatDiff(r.Value, ptrs)
+ outy := opts.WithDiffMode(diffInserted).FormatDiff(r.Value, ptrs)
+ for i := 0; i <= maxVerbosityPreset && outx != nil && outy != nil && outx.Equal(outy); i++ {
+ opts2 := verbosityPreset(opts, i)
+ outx = opts2.WithDiffMode(diffRemoved).FormatDiff(r.Value, ptrs)
+ outy = opts2.WithDiffMode(diffInserted).FormatDiff(r.Value, ptrs)
+ }
+ if outx != nil {
+ list = append(list, textRecord{Diff: diffRemoved, Key: formatKey(r.Key), Value: outx})
+ keys = append(keys, r.Key)
+ }
+ if outy != nil {
+ list = append(list, textRecord{Diff: diffInserted, Key: formatKey(r.Key), Value: outy})
+ keys = append(keys, r.Key)
+ }
+ default:
+ out := opts.FormatDiff(r.Value, ptrs)
+ list = append(list, textRecord{Key: formatKey(r.Key), Value: out})
+ keys = append(keys, r.Key)
+ }
+ }
+ recs = recs[ds.NumDiff():]
+ numDiffs += ds.NumDiff()
+ }
+ if maxGroup.IsZero() {
+ assert(len(recs) == 0)
+ } else {
+ list.AppendEllipsis(maxGroup)
+ for len(keys) < len(list) {
+ keys = append(keys, reflect.Value{})
+ }
+ }
+ assert(len(list) == len(keys))
+
+ // For maps, the default formatting logic uses fmt.Stringer which may
+ // produce ambiguous output. Avoid calling String to disambiguate.
+ if k == reflect.Map {
+ var ambiguous bool
+ seenKeys := map[string]reflect.Value{}
+ for i, currKey := range keys {
+ if currKey.IsValid() {
+ strKey := list[i].Key
+ prevKey, seen := seenKeys[strKey]
+ if seen && prevKey.CanInterface() && currKey.CanInterface() {
+ ambiguous = prevKey.Interface() != currKey.Interface()
+ if ambiguous {
+ break
+ }
+ }
+ seenKeys[strKey] = currKey
+ }
+ }
+ if ambiguous {
+ for i, k := range keys {
+ if k.IsValid() {
+ list[i].Key = formatMapKey(k, true, ptrs)
+ }
+ }
+ }
+ }
+
+ return &textWrap{Prefix: "{", Value: list, Suffix: "}"}
+}
+
+// coalesceAdjacentRecords coalesces the list of records into groups of
+// adjacent equal, or unequal counts.
+func coalesceAdjacentRecords(name string, recs []reportRecord) (groups []diffStats) {
+ var prevCase int // Arbitrary index into which case last occurred
+ lastStats := func(i int) *diffStats {
+ if prevCase != i {
+ groups = append(groups, diffStats{Name: name})
+ prevCase = i
+ }
+ return &groups[len(groups)-1]
+ }
+ for _, r := range recs {
+ switch rv := r.Value; {
+ case rv.NumIgnored > 0 && rv.NumSame+rv.NumDiff == 0:
+ lastStats(1).NumIgnored++
+ case rv.NumDiff == 0:
+ lastStats(1).NumIdentical++
+ case rv.NumDiff > 0 && !rv.ValueY.IsValid():
+ lastStats(2).NumRemoved++
+ case rv.NumDiff > 0 && !rv.ValueX.IsValid():
+ lastStats(2).NumInserted++
+ default:
+ lastStats(2).NumModified++
+ }
+ }
+ return groups
+}
diff --git a/integration/assets/hydrabroker/vendor/github.com/google/go-cmp/cmp/report_references.go b/integration/assets/hydrabroker/vendor/github.com/google/go-cmp/cmp/report_references.go
new file mode 100644
index 00000000000..be31b33a9e1
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/google/go-cmp/cmp/report_references.go
@@ -0,0 +1,264 @@
+// Copyright 2020, The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cmp
+
+import (
+ "fmt"
+ "reflect"
+ "strings"
+
+ "github.com/google/go-cmp/cmp/internal/flags"
+ "github.com/google/go-cmp/cmp/internal/value"
+)
+
+const (
+ pointerDelimPrefix = "⟪"
+ pointerDelimSuffix = "⟫"
+)
+
+// formatPointer prints the address of the pointer.
+func formatPointer(p value.Pointer, withDelims bool) string {
+ v := p.Uintptr()
+ if flags.Deterministic {
+ v = 0xdeadf00f // Only used for stable testing purposes
+ }
+ if withDelims {
+ return pointerDelimPrefix + formatHex(uint64(v)) + pointerDelimSuffix
+ }
+ return formatHex(uint64(v))
+}
+
+// pointerReferences is a stack of pointers visited so far.
+type pointerReferences [][2]value.Pointer
+
+func (ps *pointerReferences) PushPair(vx, vy reflect.Value, d diffMode, deref bool) (pp [2]value.Pointer) {
+ if deref && vx.IsValid() {
+ vx = vx.Addr()
+ }
+ if deref && vy.IsValid() {
+ vy = vy.Addr()
+ }
+ switch d {
+ case diffUnknown, diffIdentical:
+ pp = [2]value.Pointer{value.PointerOf(vx), value.PointerOf(vy)}
+ case diffRemoved:
+ pp = [2]value.Pointer{value.PointerOf(vx), value.Pointer{}}
+ case diffInserted:
+ pp = [2]value.Pointer{value.Pointer{}, value.PointerOf(vy)}
+ }
+ *ps = append(*ps, pp)
+ return pp
+}
+
+func (ps *pointerReferences) Push(v reflect.Value) (p value.Pointer, seen bool) {
+ p = value.PointerOf(v)
+ for _, pp := range *ps {
+ if p == pp[0] || p == pp[1] {
+ return p, true
+ }
+ }
+ *ps = append(*ps, [2]value.Pointer{p, p})
+ return p, false
+}
+
+func (ps *pointerReferences) Pop() {
+ *ps = (*ps)[:len(*ps)-1]
+}
+
+// trunkReferences is metadata for a textNode indicating that the sub-tree
+// represents the value for either pointer in a pair of references.
+type trunkReferences struct{ pp [2]value.Pointer }
+
+// trunkReference is metadata for a textNode indicating that the sub-tree
+// represents the value for the given pointer reference.
+type trunkReference struct{ p value.Pointer }
+
+// leafReference is metadata for a textNode indicating that the value is
+// truncated as it refers to another part of the tree (i.e., a trunk).
+type leafReference struct{ p value.Pointer }
+
+func wrapTrunkReferences(pp [2]value.Pointer, s textNode) textNode {
+ switch {
+ case pp[0].IsNil():
+ return &textWrap{Value: s, Metadata: trunkReference{pp[1]}}
+ case pp[1].IsNil():
+ return &textWrap{Value: s, Metadata: trunkReference{pp[0]}}
+ case pp[0] == pp[1]:
+ return &textWrap{Value: s, Metadata: trunkReference{pp[0]}}
+ default:
+ return &textWrap{Value: s, Metadata: trunkReferences{pp}}
+ }
+}
+func wrapTrunkReference(p value.Pointer, printAddress bool, s textNode) textNode {
+ var prefix string
+ if printAddress {
+ prefix = formatPointer(p, true)
+ }
+ return &textWrap{Prefix: prefix, Value: s, Metadata: trunkReference{p}}
+}
+func makeLeafReference(p value.Pointer, printAddress bool) textNode {
+ out := &textWrap{Prefix: "(", Value: textEllipsis, Suffix: ")"}
+ var prefix string
+ if printAddress {
+ prefix = formatPointer(p, true)
+ }
+ return &textWrap{Prefix: prefix, Value: out, Metadata: leafReference{p}}
+}
+
+// resolveReferences walks the textNode tree searching for any leaf reference
+// metadata and resolves each against the corresponding trunk references.
+// Since pointer addresses in memory are not particularly readable to the user,
+// it replaces each pointer value with an arbitrary and unique reference ID.
+func resolveReferences(s textNode) {
+ var walkNodes func(textNode, func(textNode))
+ walkNodes = func(s textNode, f func(textNode)) {
+ f(s)
+ switch s := s.(type) {
+ case *textWrap:
+ walkNodes(s.Value, f)
+ case textList:
+ for _, r := range s {
+ walkNodes(r.Value, f)
+ }
+ }
+ }
+
+ // Collect all trunks and leaves with reference metadata.
+ var trunks, leaves []*textWrap
+ walkNodes(s, func(s textNode) {
+ if s, ok := s.(*textWrap); ok {
+ switch s.Metadata.(type) {
+ case leafReference:
+ leaves = append(leaves, s)
+ case trunkReference, trunkReferences:
+ trunks = append(trunks, s)
+ }
+ }
+ })
+
+ // No leaf references to resolve.
+ if len(leaves) == 0 {
+ return
+ }
+
+ // Collect the set of all leaf references to resolve.
+ leafPtrs := make(map[value.Pointer]bool)
+ for _, leaf := range leaves {
+ leafPtrs[leaf.Metadata.(leafReference).p] = true
+ }
+
+ // Collect the set of trunk pointers that are always paired together.
+ // This allows us to assign a single ID to both pointers for brevity.
+ // If a pointer in a pair ever occurs by itself or as a different pair,
+ // then the pair is broken.
+ pairedTrunkPtrs := make(map[value.Pointer]value.Pointer)
+ unpair := func(p value.Pointer) {
+ if !pairedTrunkPtrs[p].IsNil() {
+ pairedTrunkPtrs[pairedTrunkPtrs[p]] = value.Pointer{} // invalidate other half
+ }
+ pairedTrunkPtrs[p] = value.Pointer{} // invalidate this half
+ }
+ for _, trunk := range trunks {
+ switch p := trunk.Metadata.(type) {
+ case trunkReference:
+ unpair(p.p) // standalone pointer cannot be part of a pair
+ case trunkReferences:
+ p0, ok0 := pairedTrunkPtrs[p.pp[0]]
+ p1, ok1 := pairedTrunkPtrs[p.pp[1]]
+ switch {
+ case !ok0 && !ok1:
+ // Register the newly seen pair.
+ pairedTrunkPtrs[p.pp[0]] = p.pp[1]
+ pairedTrunkPtrs[p.pp[1]] = p.pp[0]
+ case ok0 && ok1 && p0 == p.pp[1] && p1 == p.pp[0]:
+ // Exact pair already seen; do nothing.
+ default:
+ // Pair conflicts with some other pair; break all pairs.
+ unpair(p.pp[0])
+ unpair(p.pp[1])
+ }
+ }
+ }
+
+ // Correlate each pointer referenced by leaves to a unique identifier,
+ // and print the IDs for each trunk that matches those pointers.
+ var nextID uint
+ ptrIDs := make(map[value.Pointer]uint)
+ newID := func() uint {
+ id := nextID
+ nextID++
+ return id
+ }
+ for _, trunk := range trunks {
+ switch p := trunk.Metadata.(type) {
+ case trunkReference:
+ if print := leafPtrs[p.p]; print {
+ id, ok := ptrIDs[p.p]
+ if !ok {
+ id = newID()
+ ptrIDs[p.p] = id
+ }
+ trunk.Prefix = updateReferencePrefix(trunk.Prefix, formatReference(id))
+ }
+ case trunkReferences:
+ print0 := leafPtrs[p.pp[0]]
+ print1 := leafPtrs[p.pp[1]]
+ if print0 || print1 {
+ id0, ok0 := ptrIDs[p.pp[0]]
+ id1, ok1 := ptrIDs[p.pp[1]]
+ isPair := pairedTrunkPtrs[p.pp[0]] == p.pp[1] && pairedTrunkPtrs[p.pp[1]] == p.pp[0]
+ if isPair {
+ var id uint
+ assert(ok0 == ok1) // must be seen together or not at all
+ if ok0 {
+ assert(id0 == id1) // must have the same ID
+ id = id0
+ } else {
+ id = newID()
+ ptrIDs[p.pp[0]] = id
+ ptrIDs[p.pp[1]] = id
+ }
+ trunk.Prefix = updateReferencePrefix(trunk.Prefix, formatReference(id))
+ } else {
+ if print0 && !ok0 {
+ id0 = newID()
+ ptrIDs[p.pp[0]] = id0
+ }
+ if print1 && !ok1 {
+ id1 = newID()
+ ptrIDs[p.pp[1]] = id1
+ }
+ switch {
+ case print0 && print1:
+ trunk.Prefix = updateReferencePrefix(trunk.Prefix, formatReference(id0)+","+formatReference(id1))
+ case print0:
+ trunk.Prefix = updateReferencePrefix(trunk.Prefix, formatReference(id0))
+ case print1:
+ trunk.Prefix = updateReferencePrefix(trunk.Prefix, formatReference(id1))
+ }
+ }
+ }
+ }
+ }
+
+ // Update all leaf references with the unique identifier.
+ for _, leaf := range leaves {
+ if id, ok := ptrIDs[leaf.Metadata.(leafReference).p]; ok {
+ leaf.Prefix = updateReferencePrefix(leaf.Prefix, formatReference(id))
+ }
+ }
+}
+
+func formatReference(id uint) string {
+ return fmt.Sprintf("ref#%d", id)
+}
+
+func updateReferencePrefix(prefix, ref string) string {
+ if prefix == "" {
+ return pointerDelimPrefix + ref + pointerDelimSuffix
+ }
+ suffix := strings.TrimPrefix(prefix, pointerDelimPrefix)
+ return pointerDelimPrefix + ref + ": " + suffix
+}
diff --git a/integration/assets/hydrabroker/vendor/github.com/google/go-cmp/cmp/report_reflect.go b/integration/assets/hydrabroker/vendor/github.com/google/go-cmp/cmp/report_reflect.go
new file mode 100644
index 00000000000..e39f42284ee
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/google/go-cmp/cmp/report_reflect.go
@@ -0,0 +1,414 @@
+// Copyright 2019, The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cmp
+
+import (
+ "bytes"
+ "fmt"
+ "reflect"
+ "strconv"
+ "strings"
+ "unicode"
+ "unicode/utf8"
+
+ "github.com/google/go-cmp/cmp/internal/value"
+)
+
+var (
+ anyType = reflect.TypeOf((*interface{})(nil)).Elem()
+ stringType = reflect.TypeOf((*string)(nil)).Elem()
+ bytesType = reflect.TypeOf((*[]byte)(nil)).Elem()
+ byteType = reflect.TypeOf((*byte)(nil)).Elem()
+)
+
+type formatValueOptions struct {
+ // AvoidStringer controls whether to avoid calling custom stringer
+ // methods like error.Error or fmt.Stringer.String.
+ AvoidStringer bool
+
+ // PrintAddresses controls whether to print the address of all pointers,
+ // slice elements, and maps.
+ PrintAddresses bool
+
+ // QualifiedNames controls whether FormatType uses the fully qualified name
+ // (including the full package path as opposed to just the package name).
+ QualifiedNames bool
+
+ // VerbosityLevel controls the amount of output to produce.
+ // A higher value produces more output. A value of zero or lower produces
+ // no output (represented using an ellipsis).
+ // If LimitVerbosity is false, then the level is treated as infinite.
+ VerbosityLevel int
+
+ // LimitVerbosity specifies that formatting should respect VerbosityLevel.
+ LimitVerbosity bool
+}
+
+// FormatType prints the type as if it were wrapping s.
+// This may return s as-is depending on the current type and TypeMode mode.
+func (opts formatOptions) FormatType(t reflect.Type, s textNode) textNode {
+ // Check whether to emit the type or not.
+ switch opts.TypeMode {
+ case autoType:
+ switch t.Kind() {
+ case reflect.Struct, reflect.Slice, reflect.Array, reflect.Map:
+ if s.Equal(textNil) {
+ return s
+ }
+ default:
+ return s
+ }
+ if opts.DiffMode == diffIdentical {
+ return s // elide type for identical nodes
+ }
+ case elideType:
+ return s
+ }
+
+ // Determine the type label, applying special handling for unnamed types.
+ typeName := value.TypeString(t, opts.QualifiedNames)
+ if t.Name() == "" {
+ // According to Go grammar, certain type literals contain symbols that
+ // do not strongly bind to the next lexicographical token (e.g., *T).
+ switch t.Kind() {
+ case reflect.Chan, reflect.Func, reflect.Ptr:
+ typeName = "(" + typeName + ")"
+ }
+ }
+ return &textWrap{Prefix: typeName, Value: wrapParens(s)}
+}
+
+// wrapParens wraps s with a set of parenthesis, but avoids it if the
+// wrapped node itself is already surrounded by a pair of parenthesis or braces.
+// It handles unwrapping one level of pointer-reference nodes.
+func wrapParens(s textNode) textNode {
+ var refNode *textWrap
+ if s2, ok := s.(*textWrap); ok {
+ // Unwrap a single pointer reference node.
+ switch s2.Metadata.(type) {
+ case leafReference, trunkReference, trunkReferences:
+ refNode = s2
+ if s3, ok := refNode.Value.(*textWrap); ok {
+ s2 = s3
+ }
+ }
+
+ // Already has delimiters that make parenthesis unnecessary.
+ hasParens := strings.HasPrefix(s2.Prefix, "(") && strings.HasSuffix(s2.Suffix, ")")
+ hasBraces := strings.HasPrefix(s2.Prefix, "{") && strings.HasSuffix(s2.Suffix, "}")
+ if hasParens || hasBraces {
+ return s
+ }
+ }
+ if refNode != nil {
+ refNode.Value = &textWrap{Prefix: "(", Value: refNode.Value, Suffix: ")"}
+ return s
+ }
+ return &textWrap{Prefix: "(", Value: s, Suffix: ")"}
+}
+
+// FormatValue prints the reflect.Value, taking extra care to avoid descending
+// into pointers already in ptrs. As pointers are visited, ptrs is also updated.
+func (opts formatOptions) FormatValue(v reflect.Value, parentKind reflect.Kind, ptrs *pointerReferences) (out textNode) {
+ if !v.IsValid() {
+ return nil
+ }
+ t := v.Type()
+
+ // Check slice element for cycles.
+ if parentKind == reflect.Slice {
+ ptrRef, visited := ptrs.Push(v.Addr())
+ if visited {
+ return makeLeafReference(ptrRef, false)
+ }
+ defer ptrs.Pop()
+ defer func() { out = wrapTrunkReference(ptrRef, false, out) }()
+ }
+
+ // Check whether there is an Error or String method to call.
+ if !opts.AvoidStringer && v.CanInterface() {
+ // Avoid calling Error or String methods on nil receivers since many
+ // implementations crash when doing so.
+ if (t.Kind() != reflect.Ptr && t.Kind() != reflect.Interface) || !v.IsNil() {
+ var prefix, strVal string
+ func() {
+ // Swallow and ignore any panics from String or Error.
+ defer func() { recover() }()
+ switch v := v.Interface().(type) {
+ case error:
+ strVal = v.Error()
+ prefix = "e"
+ case fmt.Stringer:
+ strVal = v.String()
+ prefix = "s"
+ }
+ }()
+ if prefix != "" {
+ return opts.formatString(prefix, strVal)
+ }
+ }
+ }
+
+ // Check whether to explicitly wrap the result with the type.
+ var skipType bool
+ defer func() {
+ if !skipType {
+ out = opts.FormatType(t, out)
+ }
+ }()
+
+ switch t.Kind() {
+ case reflect.Bool:
+ return textLine(fmt.Sprint(v.Bool()))
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return textLine(fmt.Sprint(v.Int()))
+ case reflect.Uint, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+ return textLine(fmt.Sprint(v.Uint()))
+ case reflect.Uint8:
+ if parentKind == reflect.Slice || parentKind == reflect.Array {
+ return textLine(formatHex(v.Uint()))
+ }
+ return textLine(fmt.Sprint(v.Uint()))
+ case reflect.Uintptr:
+ return textLine(formatHex(v.Uint()))
+ case reflect.Float32, reflect.Float64:
+ return textLine(fmt.Sprint(v.Float()))
+ case reflect.Complex64, reflect.Complex128:
+ return textLine(fmt.Sprint(v.Complex()))
+ case reflect.String:
+ return opts.formatString("", v.String())
+ case reflect.UnsafePointer, reflect.Chan, reflect.Func:
+ return textLine(formatPointer(value.PointerOf(v), true))
+ case reflect.Struct:
+ var list textList
+ v := makeAddressable(v) // needed for retrieveUnexportedField
+ maxLen := v.NumField()
+ if opts.LimitVerbosity {
+ maxLen = ((1 << opts.verbosity()) >> 1) << 2 // 0, 4, 8, 16, 32, etc...
+ opts.VerbosityLevel--
+ }
+ for i := 0; i < v.NumField(); i++ {
+ vv := v.Field(i)
+ if vv.IsZero() {
+ continue // Elide fields with zero values
+ }
+ if len(list) == maxLen {
+ list.AppendEllipsis(diffStats{})
+ break
+ }
+ sf := t.Field(i)
+ if !isExported(sf.Name) {
+ vv = retrieveUnexportedField(v, sf, true)
+ }
+ s := opts.WithTypeMode(autoType).FormatValue(vv, t.Kind(), ptrs)
+ list = append(list, textRecord{Key: sf.Name, Value: s})
+ }
+ return &textWrap{Prefix: "{", Value: list, Suffix: "}"}
+ case reflect.Slice:
+ if v.IsNil() {
+ return textNil
+ }
+
+ // Check whether this is a []byte of text data.
+ if t.Elem() == byteType {
+ b := v.Bytes()
+ isPrintSpace := func(r rune) bool { return unicode.IsPrint(r) || unicode.IsSpace(r) }
+ if len(b) > 0 && utf8.Valid(b) && len(bytes.TrimFunc(b, isPrintSpace)) == 0 {
+ out = opts.formatString("", string(b))
+ skipType = true
+ return opts.FormatType(t, out)
+ }
+ }
+
+ fallthrough
+ case reflect.Array:
+ maxLen := v.Len()
+ if opts.LimitVerbosity {
+ maxLen = ((1 << opts.verbosity()) >> 1) << 2 // 0, 4, 8, 16, 32, etc...
+ opts.VerbosityLevel--
+ }
+ var list textList
+ for i := 0; i < v.Len(); i++ {
+ if len(list) == maxLen {
+ list.AppendEllipsis(diffStats{})
+ break
+ }
+ s := opts.WithTypeMode(elideType).FormatValue(v.Index(i), t.Kind(), ptrs)
+ list = append(list, textRecord{Value: s})
+ }
+
+ out = &textWrap{Prefix: "{", Value: list, Suffix: "}"}
+ if t.Kind() == reflect.Slice && opts.PrintAddresses {
+ header := fmt.Sprintf("ptr:%v, len:%d, cap:%d", formatPointer(value.PointerOf(v), false), v.Len(), v.Cap())
+ out = &textWrap{Prefix: pointerDelimPrefix + header + pointerDelimSuffix, Value: out}
+ }
+ return out
+ case reflect.Map:
+ if v.IsNil() {
+ return textNil
+ }
+
+ // Check pointer for cycles.
+ ptrRef, visited := ptrs.Push(v)
+ if visited {
+ return makeLeafReference(ptrRef, opts.PrintAddresses)
+ }
+ defer ptrs.Pop()
+
+ maxLen := v.Len()
+ if opts.LimitVerbosity {
+ maxLen = ((1 << opts.verbosity()) >> 1) << 2 // 0, 4, 8, 16, 32, etc...
+ opts.VerbosityLevel--
+ }
+ var list textList
+ for _, k := range value.SortKeys(v.MapKeys()) {
+ if len(list) == maxLen {
+ list.AppendEllipsis(diffStats{})
+ break
+ }
+ sk := formatMapKey(k, false, ptrs)
+ sv := opts.WithTypeMode(elideType).FormatValue(v.MapIndex(k), t.Kind(), ptrs)
+ list = append(list, textRecord{Key: sk, Value: sv})
+ }
+
+ out = &textWrap{Prefix: "{", Value: list, Suffix: "}"}
+ out = wrapTrunkReference(ptrRef, opts.PrintAddresses, out)
+ return out
+ case reflect.Ptr:
+ if v.IsNil() {
+ return textNil
+ }
+
+ // Check pointer for cycles.
+ ptrRef, visited := ptrs.Push(v)
+ if visited {
+ out = makeLeafReference(ptrRef, opts.PrintAddresses)
+ return &textWrap{Prefix: "&", Value: out}
+ }
+ defer ptrs.Pop()
+
+ // Skip the name only if this is an unnamed pointer type.
+ // Otherwise taking the address of a value does not reproduce
+ // the named pointer type.
+ if v.Type().Name() == "" {
+ skipType = true // Let the underlying value print the type instead
+ }
+ out = opts.FormatValue(v.Elem(), t.Kind(), ptrs)
+ out = wrapTrunkReference(ptrRef, opts.PrintAddresses, out)
+ out = &textWrap{Prefix: "&", Value: out}
+ return out
+ case reflect.Interface:
+ if v.IsNil() {
+ return textNil
+ }
+ // Interfaces accept different concrete types,
+ // so configure the underlying value to explicitly print the type.
+ return opts.WithTypeMode(emitType).FormatValue(v.Elem(), t.Kind(), ptrs)
+ default:
+ panic(fmt.Sprintf("%v kind not handled", v.Kind()))
+ }
+}
+
+func (opts formatOptions) formatString(prefix, s string) textNode {
+ maxLen := len(s)
+ maxLines := strings.Count(s, "\n") + 1
+ if opts.LimitVerbosity {
+ maxLen = (1 << opts.verbosity()) << 5 // 32, 64, 128, 256, etc...
+ maxLines = (1 << opts.verbosity()) << 2 // 4, 8, 16, 32, 64, etc...
+ }
+
+ // For multiline strings, use the triple-quote syntax,
+ // but only use it when printing removed or inserted nodes since
+ // we only want the extra verbosity for those cases.
+ lines := strings.Split(strings.TrimSuffix(s, "\n"), "\n")
+ isTripleQuoted := len(lines) >= 4 && (opts.DiffMode == '-' || opts.DiffMode == '+')
+ for i := 0; i < len(lines) && isTripleQuoted; i++ {
+ lines[i] = strings.TrimPrefix(strings.TrimSuffix(lines[i], "\r"), "\r") // trim leading/trailing carriage returns for legacy Windows endline support
+ isPrintable := func(r rune) bool {
+ return unicode.IsPrint(r) || r == '\t' // specially treat tab as printable
+ }
+ line := lines[i]
+ isTripleQuoted = !strings.HasPrefix(strings.TrimPrefix(line, prefix), `"""`) && !strings.HasPrefix(line, "...") && strings.TrimFunc(line, isPrintable) == "" && len(line) <= maxLen
+ }
+ if isTripleQuoted {
+ var list textList
+ list = append(list, textRecord{Diff: opts.DiffMode, Value: textLine(prefix + `"""`), ElideComma: true})
+ for i, line := range lines {
+ if numElided := len(lines) - i; i == maxLines-1 && numElided > 1 {
+ comment := commentString(fmt.Sprintf("%d elided lines", numElided))
+ list = append(list, textRecord{Diff: opts.DiffMode, Value: textEllipsis, ElideComma: true, Comment: comment})
+ break
+ }
+ list = append(list, textRecord{Diff: opts.DiffMode, Value: textLine(line), ElideComma: true})
+ }
+ list = append(list, textRecord{Diff: opts.DiffMode, Value: textLine(prefix + `"""`), ElideComma: true})
+ return &textWrap{Prefix: "(", Value: list, Suffix: ")"}
+ }
+
+ // Format the string as a single-line quoted string.
+ if len(s) > maxLen+len(textEllipsis) {
+ return textLine(prefix + formatString(s[:maxLen]) + string(textEllipsis))
+ }
+ return textLine(prefix + formatString(s))
+}
+
+// formatMapKey formats v as if it were a map key.
+// The result is guaranteed to be a single line.
+func formatMapKey(v reflect.Value, disambiguate bool, ptrs *pointerReferences) string {
+ var opts formatOptions
+ opts.DiffMode = diffIdentical
+ opts.TypeMode = elideType
+ opts.PrintAddresses = disambiguate
+ opts.AvoidStringer = disambiguate
+ opts.QualifiedNames = disambiguate
+ opts.VerbosityLevel = maxVerbosityPreset
+ opts.LimitVerbosity = true
+ s := opts.FormatValue(v, reflect.Map, ptrs).String()
+ return strings.TrimSpace(s)
+}
+
+// formatString prints s as a double-quoted or backtick-quoted string.
+func formatString(s string) string {
+ // Use quoted string if it the same length as a raw string literal.
+ // Otherwise, attempt to use the raw string form.
+ qs := strconv.Quote(s)
+ if len(qs) == 1+len(s)+1 {
+ return qs
+ }
+
+ // Disallow newlines to ensure output is a single line.
+ // Only allow printable runes for readability purposes.
+ rawInvalid := func(r rune) bool {
+ return r == '`' || r == '\n' || !(unicode.IsPrint(r) || r == '\t')
+ }
+ if utf8.ValidString(s) && strings.IndexFunc(s, rawInvalid) < 0 {
+ return "`" + s + "`"
+ }
+ return qs
+}
+
+// formatHex prints u as a hexadecimal integer in Go notation.
+func formatHex(u uint64) string {
+ var f string
+ switch {
+ case u <= 0xff:
+ f = "0x%02x"
+ case u <= 0xffff:
+ f = "0x%04x"
+ case u <= 0xffffff:
+ f = "0x%06x"
+ case u <= 0xffffffff:
+ f = "0x%08x"
+ case u <= 0xffffffffff:
+ f = "0x%010x"
+ case u <= 0xffffffffffff:
+ f = "0x%012x"
+ case u <= 0xffffffffffffff:
+ f = "0x%014x"
+ case u <= 0xffffffffffffffff:
+ f = "0x%016x"
+ }
+ return fmt.Sprintf(f, u)
+}
diff --git a/integration/assets/hydrabroker/vendor/github.com/google/go-cmp/cmp/report_slices.go b/integration/assets/hydrabroker/vendor/github.com/google/go-cmp/cmp/report_slices.go
new file mode 100644
index 00000000000..23e444f62f3
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/google/go-cmp/cmp/report_slices.go
@@ -0,0 +1,614 @@
+// Copyright 2019, The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cmp
+
+import (
+ "bytes"
+ "fmt"
+ "math"
+ "reflect"
+ "strconv"
+ "strings"
+ "unicode"
+ "unicode/utf8"
+
+ "github.com/google/go-cmp/cmp/internal/diff"
+)
+
+// CanFormatDiffSlice reports whether we support custom formatting for nodes
+// that are slices of primitive kinds or strings.
+func (opts formatOptions) CanFormatDiffSlice(v *valueNode) bool {
+ switch {
+ case opts.DiffMode != diffUnknown:
+ return false // Must be formatting in diff mode
+ case v.NumDiff == 0:
+ return false // No differences detected
+ case !v.ValueX.IsValid() || !v.ValueY.IsValid():
+ return false // Both values must be valid
+ case v.NumIgnored > 0:
+ return false // Some ignore option was used
+ case v.NumTransformed > 0:
+ return false // Some transform option was used
+ case v.NumCompared > 1:
+ return false // More than one comparison was used
+ case v.NumCompared == 1 && v.Type.Name() != "":
+ // The need for cmp to check applicability of options on every element
+ // in a slice is a significant performance detriment for large []byte.
+ // The workaround is to specify Comparer(bytes.Equal),
+ // which enables cmp to compare []byte more efficiently.
+ // If they differ, we still want to provide batched diffing.
+ // The logic disallows named types since they tend to have their own
+ // String method, with nicer formatting than what this provides.
+ return false
+ }
+
+ // Check whether this is an interface with the same concrete types.
+ t := v.Type
+ vx, vy := v.ValueX, v.ValueY
+ if t.Kind() == reflect.Interface && !vx.IsNil() && !vy.IsNil() && vx.Elem().Type() == vy.Elem().Type() {
+ vx, vy = vx.Elem(), vy.Elem()
+ t = vx.Type()
+ }
+
+ // Check whether we provide specialized diffing for this type.
+ switch t.Kind() {
+ case reflect.String:
+ case reflect.Array, reflect.Slice:
+ // Only slices of primitive types have specialized handling.
+ switch t.Elem().Kind() {
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
+ reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr,
+ reflect.Bool, reflect.Float32, reflect.Float64, reflect.Complex64, reflect.Complex128:
+ default:
+ return false
+ }
+
+ // Both slice values have to be non-empty.
+ if t.Kind() == reflect.Slice && (vx.Len() == 0 || vy.Len() == 0) {
+ return false
+ }
+
+ // If a sufficient number of elements already differ,
+ // use specialized formatting even if length requirement is not met.
+ if v.NumDiff > v.NumSame {
+ return true
+ }
+ default:
+ return false
+ }
+
+ // Use specialized string diffing for longer slices or strings.
+ const minLength = 32
+ return vx.Len() >= minLength && vy.Len() >= minLength
+}
+
+// FormatDiffSlice prints a diff for the slices (or strings) represented by v.
+// This provides custom-tailored logic to make printing of differences in
+// textual strings and slices of primitive kinds more readable.
+func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode {
+ assert(opts.DiffMode == diffUnknown)
+ t, vx, vy := v.Type, v.ValueX, v.ValueY
+ if t.Kind() == reflect.Interface {
+ vx, vy = vx.Elem(), vy.Elem()
+ t = vx.Type()
+ opts = opts.WithTypeMode(emitType)
+ }
+
+ // Auto-detect the type of the data.
+ var sx, sy string
+ var ssx, ssy []string
+ var isString, isMostlyText, isPureLinedText, isBinary bool
+ switch {
+ case t.Kind() == reflect.String:
+ sx, sy = vx.String(), vy.String()
+ isString = true
+ case t.Kind() == reflect.Slice && t.Elem() == byteType:
+ sx, sy = string(vx.Bytes()), string(vy.Bytes())
+ isString = true
+ case t.Kind() == reflect.Array:
+ // Arrays need to be addressable for slice operations to work.
+ vx2, vy2 := reflect.New(t).Elem(), reflect.New(t).Elem()
+ vx2.Set(vx)
+ vy2.Set(vy)
+ vx, vy = vx2, vy2
+ }
+ if isString {
+ var numTotalRunes, numValidRunes, numLines, lastLineIdx, maxLineLen int
+ for i, r := range sx + sy {
+ numTotalRunes++
+ if (unicode.IsPrint(r) || unicode.IsSpace(r)) && r != utf8.RuneError {
+ numValidRunes++
+ }
+ if r == '\n' {
+ if maxLineLen < i-lastLineIdx {
+ maxLineLen = i - lastLineIdx
+ }
+ lastLineIdx = i + 1
+ numLines++
+ }
+ }
+ isPureText := numValidRunes == numTotalRunes
+ isMostlyText = float64(numValidRunes) > math.Floor(0.90*float64(numTotalRunes))
+ isPureLinedText = isPureText && numLines >= 4 && maxLineLen <= 1024
+ isBinary = !isMostlyText
+
+ // Avoid diffing by lines if it produces a significantly more complex
+ // edit script than diffing by bytes.
+ if isPureLinedText {
+ ssx = strings.Split(sx, "\n")
+ ssy = strings.Split(sy, "\n")
+ esLines := diff.Difference(len(ssx), len(ssy), func(ix, iy int) diff.Result {
+ return diff.BoolResult(ssx[ix] == ssy[iy])
+ })
+ esBytes := diff.Difference(len(sx), len(sy), func(ix, iy int) diff.Result {
+ return diff.BoolResult(sx[ix] == sy[iy])
+ })
+ efficiencyLines := float64(esLines.Dist()) / float64(len(esLines))
+ efficiencyBytes := float64(esBytes.Dist()) / float64(len(esBytes))
+ quotedLength := len(strconv.Quote(sx + sy))
+ unquotedLength := len(sx) + len(sy)
+ escapeExpansionRatio := float64(quotedLength) / float64(unquotedLength)
+ isPureLinedText = efficiencyLines < 4*efficiencyBytes || escapeExpansionRatio > 1.1
+ }
+ }
+
+ // Format the string into printable records.
+ var list textList
+ var delim string
+ switch {
+ // If the text appears to be multi-lined text,
+ // then perform differencing across individual lines.
+ case isPureLinedText:
+ list = opts.formatDiffSlice(
+ reflect.ValueOf(ssx), reflect.ValueOf(ssy), 1, "line",
+ func(v reflect.Value, d diffMode) textRecord {
+ s := formatString(v.Index(0).String())
+ return textRecord{Diff: d, Value: textLine(s)}
+ },
+ )
+ delim = "\n"
+
+ // If possible, use a custom triple-quote (""") syntax for printing
+ // differences in a string literal. This format is more readable,
+ // but has edge-cases where differences are visually indistinguishable.
+ // This format is avoided under the following conditions:
+ // - A line starts with `"""`
+ // - A line starts with "..."
+ // - A line contains non-printable characters
+ // - Adjacent different lines differ only by whitespace
+ //
+ // For example:
+ //
+ // """
+ // ... // 3 identical lines
+ // foo
+ // bar
+ // - baz
+ // + BAZ
+ // """
+ isTripleQuoted := true
+ prevRemoveLines := map[string]bool{}
+ prevInsertLines := map[string]bool{}
+ var list2 textList
+ list2 = append(list2, textRecord{Value: textLine(`"""`), ElideComma: true})
+ for _, r := range list {
+ if !r.Value.Equal(textEllipsis) {
+ line, _ := strconv.Unquote(string(r.Value.(textLine)))
+ line = strings.TrimPrefix(strings.TrimSuffix(line, "\r"), "\r") // trim leading/trailing carriage returns for legacy Windows endline support
+ normLine := strings.Map(func(r rune) rune {
+ if unicode.IsSpace(r) {
+ return -1 // drop whitespace to avoid visually indistinguishable output
+ }
+ return r
+ }, line)
+ isPrintable := func(r rune) bool {
+ return unicode.IsPrint(r) || r == '\t' // specially treat tab as printable
+ }
+ isTripleQuoted = !strings.HasPrefix(line, `"""`) && !strings.HasPrefix(line, "...") && strings.TrimFunc(line, isPrintable) == ""
+ switch r.Diff {
+ case diffRemoved:
+ isTripleQuoted = isTripleQuoted && !prevInsertLines[normLine]
+ prevRemoveLines[normLine] = true
+ case diffInserted:
+ isTripleQuoted = isTripleQuoted && !prevRemoveLines[normLine]
+ prevInsertLines[normLine] = true
+ }
+ if !isTripleQuoted {
+ break
+ }
+ r.Value = textLine(line)
+ r.ElideComma = true
+ }
+ if !(r.Diff == diffRemoved || r.Diff == diffInserted) { // start a new non-adjacent difference group
+ prevRemoveLines = map[string]bool{}
+ prevInsertLines = map[string]bool{}
+ }
+ list2 = append(list2, r)
+ }
+ if r := list2[len(list2)-1]; r.Diff == diffIdentical && len(r.Value.(textLine)) == 0 {
+ list2 = list2[:len(list2)-1] // elide single empty line at the end
+ }
+ list2 = append(list2, textRecord{Value: textLine(`"""`), ElideComma: true})
+ if isTripleQuoted {
+ var out textNode = &textWrap{Prefix: "(", Value: list2, Suffix: ")"}
+ switch t.Kind() {
+ case reflect.String:
+ if t != stringType {
+ out = opts.FormatType(t, out)
+ }
+ case reflect.Slice:
+ // Always emit type for slices since the triple-quote syntax
+ // looks like a string (not a slice).
+ opts = opts.WithTypeMode(emitType)
+ out = opts.FormatType(t, out)
+ }
+ return out
+ }
+
+ // If the text appears to be single-lined text,
+ // then perform differencing in approximately fixed-sized chunks.
+ // The output is printed as quoted strings.
+ case isMostlyText:
+ list = opts.formatDiffSlice(
+ reflect.ValueOf(sx), reflect.ValueOf(sy), 64, "byte",
+ func(v reflect.Value, d diffMode) textRecord {
+ s := formatString(v.String())
+ return textRecord{Diff: d, Value: textLine(s)}
+ },
+ )
+
+ // If the text appears to be binary data,
+ // then perform differencing in approximately fixed-sized chunks.
+ // The output is inspired by hexdump.
+ case isBinary:
+ list = opts.formatDiffSlice(
+ reflect.ValueOf(sx), reflect.ValueOf(sy), 16, "byte",
+ func(v reflect.Value, d diffMode) textRecord {
+ var ss []string
+ for i := 0; i < v.Len(); i++ {
+ ss = append(ss, formatHex(v.Index(i).Uint()))
+ }
+ s := strings.Join(ss, ", ")
+ comment := commentString(fmt.Sprintf("%c|%v|", d, formatASCII(v.String())))
+ return textRecord{Diff: d, Value: textLine(s), Comment: comment}
+ },
+ )
+
+ // For all other slices of primitive types,
+ // then perform differencing in approximately fixed-sized chunks.
+ // The size of each chunk depends on the width of the element kind.
+ default:
+ var chunkSize int
+ if t.Elem().Kind() == reflect.Bool {
+ chunkSize = 16
+ } else {
+ switch t.Elem().Bits() {
+ case 8:
+ chunkSize = 16
+ case 16:
+ chunkSize = 12
+ case 32:
+ chunkSize = 8
+ default:
+ chunkSize = 8
+ }
+ }
+ list = opts.formatDiffSlice(
+ vx, vy, chunkSize, t.Elem().Kind().String(),
+ func(v reflect.Value, d diffMode) textRecord {
+ var ss []string
+ for i := 0; i < v.Len(); i++ {
+ switch t.Elem().Kind() {
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ ss = append(ss, fmt.Sprint(v.Index(i).Int()))
+ case reflect.Uint, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+ ss = append(ss, fmt.Sprint(v.Index(i).Uint()))
+ case reflect.Uint8, reflect.Uintptr:
+ ss = append(ss, formatHex(v.Index(i).Uint()))
+ case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Complex64, reflect.Complex128:
+ ss = append(ss, fmt.Sprint(v.Index(i).Interface()))
+ }
+ }
+ s := strings.Join(ss, ", ")
+ return textRecord{Diff: d, Value: textLine(s)}
+ },
+ )
+ }
+
+ // Wrap the output with appropriate type information.
+ var out textNode = &textWrap{Prefix: "{", Value: list, Suffix: "}"}
+ if !isMostlyText {
+ // The "{...}" byte-sequence literal is not valid Go syntax for strings.
+ // Emit the type for extra clarity (e.g. "string{...}").
+ if t.Kind() == reflect.String {
+ opts = opts.WithTypeMode(emitType)
+ }
+ return opts.FormatType(t, out)
+ }
+ switch t.Kind() {
+ case reflect.String:
+ out = &textWrap{Prefix: "strings.Join(", Value: out, Suffix: fmt.Sprintf(", %q)", delim)}
+ if t != stringType {
+ out = opts.FormatType(t, out)
+ }
+ case reflect.Slice:
+ out = &textWrap{Prefix: "bytes.Join(", Value: out, Suffix: fmt.Sprintf(", %q)", delim)}
+ if t != bytesType {
+ out = opts.FormatType(t, out)
+ }
+ }
+ return out
+}
+
+// formatASCII formats s as an ASCII string.
+// This is useful for printing binary strings in a semi-legible way.
+func formatASCII(s string) string {
+ b := bytes.Repeat([]byte{'.'}, len(s))
+ for i := 0; i < len(s); i++ {
+ if ' ' <= s[i] && s[i] <= '~' {
+ b[i] = s[i]
+ }
+ }
+ return string(b)
+}
+
+func (opts formatOptions) formatDiffSlice(
+ vx, vy reflect.Value, chunkSize int, name string,
+ makeRec func(reflect.Value, diffMode) textRecord,
+) (list textList) {
+ eq := func(ix, iy int) bool {
+ return vx.Index(ix).Interface() == vy.Index(iy).Interface()
+ }
+ es := diff.Difference(vx.Len(), vy.Len(), func(ix, iy int) diff.Result {
+ return diff.BoolResult(eq(ix, iy))
+ })
+
+ appendChunks := func(v reflect.Value, d diffMode) int {
+ n0 := v.Len()
+ for v.Len() > 0 {
+ n := chunkSize
+ if n > v.Len() {
+ n = v.Len()
+ }
+ list = append(list, makeRec(v.Slice(0, n), d))
+ v = v.Slice(n, v.Len())
+ }
+ return n0 - v.Len()
+ }
+
+ var numDiffs int
+ maxLen := -1
+ if opts.LimitVerbosity {
+ maxLen = (1 << opts.verbosity()) << 2 // 4, 8, 16, 32, 64, etc...
+ opts.VerbosityLevel--
+ }
+
+ groups := coalesceAdjacentEdits(name, es)
+ groups = coalesceInterveningIdentical(groups, chunkSize/4)
+ groups = cleanupSurroundingIdentical(groups, eq)
+ maxGroup := diffStats{Name: name}
+ for i, ds := range groups {
+ if maxLen >= 0 && numDiffs >= maxLen {
+ maxGroup = maxGroup.Append(ds)
+ continue
+ }
+
+ // Print equal.
+ if ds.NumDiff() == 0 {
+ // Compute the number of leading and trailing equal bytes to print.
+ var numLo, numHi int
+ numEqual := ds.NumIgnored + ds.NumIdentical
+ for numLo < chunkSize*numContextRecords && numLo+numHi < numEqual && i != 0 {
+ numLo++
+ }
+ for numHi < chunkSize*numContextRecords && numLo+numHi < numEqual && i != len(groups)-1 {
+ numHi++
+ }
+ if numEqual-(numLo+numHi) <= chunkSize && ds.NumIgnored == 0 {
+ numHi = numEqual - numLo // Avoid pointless coalescing of single equal row
+ }
+
+ // Print the equal bytes.
+ appendChunks(vx.Slice(0, numLo), diffIdentical)
+ if numEqual > numLo+numHi {
+ ds.NumIdentical -= numLo + numHi
+ list.AppendEllipsis(ds)
+ }
+ appendChunks(vx.Slice(numEqual-numHi, numEqual), diffIdentical)
+ vx = vx.Slice(numEqual, vx.Len())
+ vy = vy.Slice(numEqual, vy.Len())
+ continue
+ }
+
+ // Print unequal.
+ len0 := len(list)
+ nx := appendChunks(vx.Slice(0, ds.NumIdentical+ds.NumRemoved+ds.NumModified), diffRemoved)
+ vx = vx.Slice(nx, vx.Len())
+ ny := appendChunks(vy.Slice(0, ds.NumIdentical+ds.NumInserted+ds.NumModified), diffInserted)
+ vy = vy.Slice(ny, vy.Len())
+ numDiffs += len(list) - len0
+ }
+ if maxGroup.IsZero() {
+ assert(vx.Len() == 0 && vy.Len() == 0)
+ } else {
+ list.AppendEllipsis(maxGroup)
+ }
+ return list
+}
+
+// coalesceAdjacentEdits coalesces the list of edits into groups of adjacent
+// equal or unequal counts.
+//
+// Example:
+//
+// Input: "..XXY...Y"
+// Output: [
+// {NumIdentical: 2},
+// {NumRemoved: 2, NumInserted 1},
+// {NumIdentical: 3},
+// {NumInserted: 1},
+// ]
+func coalesceAdjacentEdits(name string, es diff.EditScript) (groups []diffStats) {
+ var prevMode byte
+ lastStats := func(mode byte) *diffStats {
+ if prevMode != mode {
+ groups = append(groups, diffStats{Name: name})
+ prevMode = mode
+ }
+ return &groups[len(groups)-1]
+ }
+ for _, e := range es {
+ switch e {
+ case diff.Identity:
+ lastStats('=').NumIdentical++
+ case diff.UniqueX:
+ lastStats('!').NumRemoved++
+ case diff.UniqueY:
+ lastStats('!').NumInserted++
+ case diff.Modified:
+ lastStats('!').NumModified++
+ }
+ }
+ return groups
+}
+
+// coalesceInterveningIdentical coalesces sufficiently short (<= windowSize)
+// equal groups into adjacent unequal groups that currently result in a
+// dual inserted/removed printout. This acts as a high-pass filter to smooth
+// out high-frequency changes within the windowSize.
+//
+// Example:
+//
+// WindowSize: 16,
+// Input: [
+// {NumIdentical: 61}, // group 0
+// {NumRemoved: 3, NumInserted: 1}, // group 1
+// {NumIdentical: 6}, // ├── coalesce
+// {NumInserted: 2}, // ├── coalesce
+// {NumIdentical: 1}, // ├── coalesce
+// {NumRemoved: 9}, // └── coalesce
+// {NumIdentical: 64}, // group 2
+// {NumRemoved: 3, NumInserted: 1}, // group 3
+// {NumIdentical: 6}, // ├── coalesce
+// {NumInserted: 2}, // ├── coalesce
+// {NumIdentical: 1}, // ├── coalesce
+// {NumRemoved: 7}, // ├── coalesce
+// {NumIdentical: 1}, // ├── coalesce
+// {NumRemoved: 2}, // └── coalesce
+// {NumIdentical: 63}, // group 4
+// ]
+// Output: [
+// {NumIdentical: 61},
+// {NumIdentical: 7, NumRemoved: 12, NumInserted: 3},
+// {NumIdentical: 64},
+// {NumIdentical: 8, NumRemoved: 12, NumInserted: 3},
+// {NumIdentical: 63},
+// ]
+func coalesceInterveningIdentical(groups []diffStats, windowSize int) []diffStats {
+ groups, groupsOrig := groups[:0], groups
+ for i, ds := range groupsOrig {
+ if len(groups) >= 2 && ds.NumDiff() > 0 {
+ prev := &groups[len(groups)-2] // Unequal group
+ curr := &groups[len(groups)-1] // Equal group
+ next := &groupsOrig[i] // Unequal group
+ hadX, hadY := prev.NumRemoved > 0, prev.NumInserted > 0
+ hasX, hasY := next.NumRemoved > 0, next.NumInserted > 0
+ if ((hadX || hasX) && (hadY || hasY)) && curr.NumIdentical <= windowSize {
+ *prev = prev.Append(*curr).Append(*next)
+ groups = groups[:len(groups)-1] // Truncate off equal group
+ continue
+ }
+ }
+ groups = append(groups, ds)
+ }
+ return groups
+}
+
+// cleanupSurroundingIdentical scans through all unequal groups, and
+// moves any leading sequence of equal elements to the preceding equal group and
+// moves and trailing sequence of equal elements to the succeeding equal group.
+//
+// This is necessary since coalesceInterveningIdentical may coalesce edit groups
+// together such that leading/trailing spans of equal elements becomes possible.
+// Note that this can occur even with an optimal diffing algorithm.
+//
+// Example:
+//
+// Input: [
+// {NumIdentical: 61},
+// {NumIdentical: 1 , NumRemoved: 11, NumInserted: 2}, // assume 3 leading identical elements
+// {NumIdentical: 67},
+// {NumIdentical: 7, NumRemoved: 12, NumInserted: 3}, // assume 10 trailing identical elements
+// {NumIdentical: 54},
+// ]
+// Output: [
+// {NumIdentical: 64}, // incremented by 3
+// {NumRemoved: 9},
+// {NumIdentical: 67},
+// {NumRemoved: 9},
+// {NumIdentical: 64}, // incremented by 10
+// ]
+func cleanupSurroundingIdentical(groups []diffStats, eq func(i, j int) bool) []diffStats {
+ var ix, iy int // indexes into sequence x and y
+ for i, ds := range groups {
+ // Handle equal group.
+ if ds.NumDiff() == 0 {
+ ix += ds.NumIdentical
+ iy += ds.NumIdentical
+ continue
+ }
+
+ // Handle unequal group.
+ nx := ds.NumIdentical + ds.NumRemoved + ds.NumModified
+ ny := ds.NumIdentical + ds.NumInserted + ds.NumModified
+ var numLeadingIdentical, numTrailingIdentical int
+ for j := 0; j < nx && j < ny && eq(ix+j, iy+j); j++ {
+ numLeadingIdentical++
+ }
+ for j := 0; j < nx && j < ny && eq(ix+nx-1-j, iy+ny-1-j); j++ {
+ numTrailingIdentical++
+ }
+ if numIdentical := numLeadingIdentical + numTrailingIdentical; numIdentical > 0 {
+ if numLeadingIdentical > 0 {
+ // Remove leading identical span from this group and
+ // insert it into the preceding group.
+ if i-1 >= 0 {
+ groups[i-1].NumIdentical += numLeadingIdentical
+ } else {
+ // No preceding group exists, so prepend a new group,
+ // but do so after we finish iterating over all groups.
+ defer func() {
+ groups = append([]diffStats{{Name: groups[0].Name, NumIdentical: numLeadingIdentical}}, groups...)
+ }()
+ }
+ // Increment indexes since the preceding group would have handled this.
+ ix += numLeadingIdentical
+ iy += numLeadingIdentical
+ }
+ if numTrailingIdentical > 0 {
+ // Remove trailing identical span from this group and
+ // insert it into the succeeding group.
+ if i+1 < len(groups) {
+ groups[i+1].NumIdentical += numTrailingIdentical
+ } else {
+ // No succeeding group exists, so append a new group,
+ // but do so after we finish iterating over all groups.
+ defer func() {
+ groups = append(groups, diffStats{Name: groups[len(groups)-1].Name, NumIdentical: numTrailingIdentical})
+ }()
+ }
+ // Do not increment indexes since the succeeding group will handle this.
+ }
+
+ // Update this group since some identical elements were removed.
+ nx -= numIdentical
+ ny -= numIdentical
+ groups[i] = diffStats{Name: ds.Name, NumRemoved: nx, NumInserted: ny}
+ }
+ ix += nx
+ iy += ny
+ }
+ return groups
+}
diff --git a/integration/assets/hydrabroker/vendor/github.com/google/go-cmp/cmp/report_text.go b/integration/assets/hydrabroker/vendor/github.com/google/go-cmp/cmp/report_text.go
new file mode 100644
index 00000000000..388fcf57120
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/google/go-cmp/cmp/report_text.go
@@ -0,0 +1,432 @@
+// Copyright 2019, The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cmp
+
+import (
+ "bytes"
+ "fmt"
+ "math/rand"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "github.com/google/go-cmp/cmp/internal/flags"
+)
+
+var randBool = rand.New(rand.NewSource(time.Now().Unix())).Intn(2) == 0
+
+const maxColumnLength = 80
+
+type indentMode int
+
+func (n indentMode) appendIndent(b []byte, d diffMode) []byte {
+ // The output of Diff is documented as being unstable to provide future
+ // flexibility in changing the output for more humanly readable reports.
+ // This logic intentionally introduces instability to the exact output
+ // so that users can detect accidental reliance on stability early on,
+ // rather than much later when an actual change to the format occurs.
+ if flags.Deterministic || randBool {
+ // Use regular spaces (U+0020).
+ switch d {
+ case diffUnknown, diffIdentical:
+ b = append(b, " "...)
+ case diffRemoved:
+ b = append(b, "- "...)
+ case diffInserted:
+ b = append(b, "+ "...)
+ }
+ } else {
+ // Use non-breaking spaces (U+00a0).
+ switch d {
+ case diffUnknown, diffIdentical:
+ b = append(b, " "...)
+ case diffRemoved:
+ b = append(b, "- "...)
+ case diffInserted:
+ b = append(b, "+ "...)
+ }
+ }
+ return repeatCount(n).appendChar(b, '\t')
+}
+
+type repeatCount int
+
+func (n repeatCount) appendChar(b []byte, c byte) []byte {
+ for ; n > 0; n-- {
+ b = append(b, c)
+ }
+ return b
+}
+
+// textNode is a simplified tree-based representation of structured text.
+// Possible node types are textWrap, textList, or textLine.
+type textNode interface {
+ // Len reports the length in bytes of a single-line version of the tree.
+ // Nested textRecord.Diff and textRecord.Comment fields are ignored.
+ Len() int
+ // Equal reports whether the two trees are structurally identical.
+ // Nested textRecord.Diff and textRecord.Comment fields are compared.
+ Equal(textNode) bool
+ // String returns the string representation of the text tree.
+ // It is not guaranteed that len(x.String()) == x.Len(),
+ // nor that x.String() == y.String() implies that x.Equal(y).
+ String() string
+
+ // formatCompactTo formats the contents of the tree as a single-line string
+ // to the provided buffer. Any nested textRecord.Diff and textRecord.Comment
+ // fields are ignored.
+ //
+ // However, not all nodes in the tree should be collapsed as a single-line.
+ // If a node can be collapsed as a single-line, it is replaced by a textLine
+ // node. Since the top-level node cannot replace itself, this also returns
+ // the current node itself.
+ //
+ // This does not mutate the receiver.
+ formatCompactTo([]byte, diffMode) ([]byte, textNode)
+ // formatExpandedTo formats the contents of the tree as a multi-line string
+ // to the provided buffer. In order for column alignment to operate well,
+ // formatCompactTo must be called before calling formatExpandedTo.
+ formatExpandedTo([]byte, diffMode, indentMode) []byte
+}
+
+// textWrap is a wrapper that concatenates a prefix and/or a suffix
+// to the underlying node.
+type textWrap struct {
+ Prefix string // e.g., "bytes.Buffer{"
+ Value textNode // textWrap | textList | textLine
+ Suffix string // e.g., "}"
+ Metadata interface{} // arbitrary metadata; has no effect on formatting
+}
+
+func (s *textWrap) Len() int {
+ return len(s.Prefix) + s.Value.Len() + len(s.Suffix)
+}
+func (s1 *textWrap) Equal(s2 textNode) bool {
+ if s2, ok := s2.(*textWrap); ok {
+ return s1.Prefix == s2.Prefix && s1.Value.Equal(s2.Value) && s1.Suffix == s2.Suffix
+ }
+ return false
+}
+func (s *textWrap) String() string {
+ var d diffMode
+ var n indentMode
+ _, s2 := s.formatCompactTo(nil, d)
+ b := n.appendIndent(nil, d) // Leading indent
+ b = s2.formatExpandedTo(b, d, n) // Main body
+ b = append(b, '\n') // Trailing newline
+ return string(b)
+}
+func (s *textWrap) formatCompactTo(b []byte, d diffMode) ([]byte, textNode) {
+ n0 := len(b) // Original buffer length
+ b = append(b, s.Prefix...)
+ b, s.Value = s.Value.formatCompactTo(b, d)
+ b = append(b, s.Suffix...)
+ if _, ok := s.Value.(textLine); ok {
+ return b, textLine(b[n0:])
+ }
+ return b, s
+}
+func (s *textWrap) formatExpandedTo(b []byte, d diffMode, n indentMode) []byte {
+ b = append(b, s.Prefix...)
+ b = s.Value.formatExpandedTo(b, d, n)
+ b = append(b, s.Suffix...)
+ return b
+}
+
+// textList is a comma-separated list of textWrap or textLine nodes.
+// The list may be formatted as multi-lines or single-line at the discretion
+// of the textList.formatCompactTo method.
+type textList []textRecord
+type textRecord struct {
+ Diff diffMode // e.g., 0 or '-' or '+'
+ Key string // e.g., "MyField"
+ Value textNode // textWrap | textLine
+ ElideComma bool // avoid trailing comma
+ Comment fmt.Stringer // e.g., "6 identical fields"
+}
+
+// AppendEllipsis appends a new ellipsis node to the list if none already
+// exists at the end. If cs is non-zero it coalesces the statistics with the
+// previous diffStats.
+func (s *textList) AppendEllipsis(ds diffStats) {
+ hasStats := !ds.IsZero()
+ if len(*s) == 0 || !(*s)[len(*s)-1].Value.Equal(textEllipsis) {
+ if hasStats {
+ *s = append(*s, textRecord{Value: textEllipsis, ElideComma: true, Comment: ds})
+ } else {
+ *s = append(*s, textRecord{Value: textEllipsis, ElideComma: true})
+ }
+ return
+ }
+ if hasStats {
+ (*s)[len(*s)-1].Comment = (*s)[len(*s)-1].Comment.(diffStats).Append(ds)
+ }
+}
+
+func (s textList) Len() (n int) {
+ for i, r := range s {
+ n += len(r.Key)
+ if r.Key != "" {
+ n += len(": ")
+ }
+ n += r.Value.Len()
+ if i < len(s)-1 {
+ n += len(", ")
+ }
+ }
+ return n
+}
+
+func (s1 textList) Equal(s2 textNode) bool {
+ if s2, ok := s2.(textList); ok {
+ if len(s1) != len(s2) {
+ return false
+ }
+ for i := range s1 {
+ r1, r2 := s1[i], s2[i]
+ if !(r1.Diff == r2.Diff && r1.Key == r2.Key && r1.Value.Equal(r2.Value) && r1.Comment == r2.Comment) {
+ return false
+ }
+ }
+ return true
+ }
+ return false
+}
+
+func (s textList) String() string {
+ return (&textWrap{Prefix: "{", Value: s, Suffix: "}"}).String()
+}
+
+func (s textList) formatCompactTo(b []byte, d diffMode) ([]byte, textNode) {
+ s = append(textList(nil), s...) // Avoid mutating original
+
+ // Determine whether we can collapse this list as a single line.
+ n0 := len(b) // Original buffer length
+ var multiLine bool
+ for i, r := range s {
+ if r.Diff == diffInserted || r.Diff == diffRemoved {
+ multiLine = true
+ }
+ b = append(b, r.Key...)
+ if r.Key != "" {
+ b = append(b, ": "...)
+ }
+ b, s[i].Value = r.Value.formatCompactTo(b, d|r.Diff)
+ if _, ok := s[i].Value.(textLine); !ok {
+ multiLine = true
+ }
+ if r.Comment != nil {
+ multiLine = true
+ }
+ if i < len(s)-1 {
+ b = append(b, ", "...)
+ }
+ }
+ // Force multi-lined output when printing a removed/inserted node that
+ // is sufficiently long.
+ if (d == diffInserted || d == diffRemoved) && len(b[n0:]) > maxColumnLength {
+ multiLine = true
+ }
+ if !multiLine {
+ return b, textLine(b[n0:])
+ }
+ return b, s
+}
+
+func (s textList) formatExpandedTo(b []byte, d diffMode, n indentMode) []byte {
+ alignKeyLens := s.alignLens(
+ func(r textRecord) bool {
+ _, isLine := r.Value.(textLine)
+ return r.Key == "" || !isLine
+ },
+ func(r textRecord) int { return utf8.RuneCountInString(r.Key) },
+ )
+ alignValueLens := s.alignLens(
+ func(r textRecord) bool {
+ _, isLine := r.Value.(textLine)
+ return !isLine || r.Value.Equal(textEllipsis) || r.Comment == nil
+ },
+ func(r textRecord) int { return utf8.RuneCount(r.Value.(textLine)) },
+ )
+
+ // Format lists of simple lists in a batched form.
+ // If the list is sequence of only textLine values,
+ // then batch multiple values on a single line.
+ var isSimple bool
+ for _, r := range s {
+ _, isLine := r.Value.(textLine)
+ isSimple = r.Diff == 0 && r.Key == "" && isLine && r.Comment == nil
+ if !isSimple {
+ break
+ }
+ }
+ if isSimple {
+ n++
+ var batch []byte
+ emitBatch := func() {
+ if len(batch) > 0 {
+ b = n.appendIndent(append(b, '\n'), d)
+ b = append(b, bytes.TrimRight(batch, " ")...)
+ batch = batch[:0]
+ }
+ }
+ for _, r := range s {
+ line := r.Value.(textLine)
+ if len(batch)+len(line)+len(", ") > maxColumnLength {
+ emitBatch()
+ }
+ batch = append(batch, line...)
+ batch = append(batch, ", "...)
+ }
+ emitBatch()
+ n--
+ return n.appendIndent(append(b, '\n'), d)
+ }
+
+ // Format the list as a multi-lined output.
+ n++
+ for i, r := range s {
+ b = n.appendIndent(append(b, '\n'), d|r.Diff)
+ if r.Key != "" {
+ b = append(b, r.Key+": "...)
+ }
+ b = alignKeyLens[i].appendChar(b, ' ')
+
+ b = r.Value.formatExpandedTo(b, d|r.Diff, n)
+ if !r.ElideComma {
+ b = append(b, ',')
+ }
+ b = alignValueLens[i].appendChar(b, ' ')
+
+ if r.Comment != nil {
+ b = append(b, " // "+r.Comment.String()...)
+ }
+ }
+ n--
+
+ return n.appendIndent(append(b, '\n'), d)
+}
+
+func (s textList) alignLens(
+ skipFunc func(textRecord) bool,
+ lenFunc func(textRecord) int,
+) []repeatCount {
+ var startIdx, endIdx, maxLen int
+ lens := make([]repeatCount, len(s))
+ for i, r := range s {
+ if skipFunc(r) {
+ for j := startIdx; j < endIdx && j < len(s); j++ {
+ lens[j] = repeatCount(maxLen - lenFunc(s[j]))
+ }
+ startIdx, endIdx, maxLen = i+1, i+1, 0
+ } else {
+ if maxLen < lenFunc(r) {
+ maxLen = lenFunc(r)
+ }
+ endIdx = i + 1
+ }
+ }
+ for j := startIdx; j < endIdx && j < len(s); j++ {
+ lens[j] = repeatCount(maxLen - lenFunc(s[j]))
+ }
+ return lens
+}
+
+// textLine is a single-line segment of text and is always a leaf node
+// in the textNode tree.
+type textLine []byte
+
+var (
+ textNil = textLine("nil")
+ textEllipsis = textLine("...")
+)
+
+func (s textLine) Len() int {
+ return len(s)
+}
+func (s1 textLine) Equal(s2 textNode) bool {
+ if s2, ok := s2.(textLine); ok {
+ return bytes.Equal([]byte(s1), []byte(s2))
+ }
+ return false
+}
+func (s textLine) String() string {
+ return string(s)
+}
+func (s textLine) formatCompactTo(b []byte, d diffMode) ([]byte, textNode) {
+ return append(b, s...), s
+}
+func (s textLine) formatExpandedTo(b []byte, _ diffMode, _ indentMode) []byte {
+ return append(b, s...)
+}
+
+type diffStats struct {
+ Name string
+ NumIgnored int
+ NumIdentical int
+ NumRemoved int
+ NumInserted int
+ NumModified int
+}
+
+func (s diffStats) IsZero() bool {
+ s.Name = ""
+ return s == diffStats{}
+}
+
+func (s diffStats) NumDiff() int {
+ return s.NumRemoved + s.NumInserted + s.NumModified
+}
+
+func (s diffStats) Append(ds diffStats) diffStats {
+ assert(s.Name == ds.Name)
+ s.NumIgnored += ds.NumIgnored
+ s.NumIdentical += ds.NumIdentical
+ s.NumRemoved += ds.NumRemoved
+ s.NumInserted += ds.NumInserted
+ s.NumModified += ds.NumModified
+ return s
+}
+
+// String prints a humanly-readable summary of coalesced records.
+//
+// Example:
+//
+// diffStats{Name: "Field", NumIgnored: 5}.String() => "5 ignored fields"
+func (s diffStats) String() string {
+ var ss []string
+ var sum int
+ labels := [...]string{"ignored", "identical", "removed", "inserted", "modified"}
+ counts := [...]int{s.NumIgnored, s.NumIdentical, s.NumRemoved, s.NumInserted, s.NumModified}
+ for i, n := range counts {
+ if n > 0 {
+ ss = append(ss, fmt.Sprintf("%d %v", n, labels[i]))
+ }
+ sum += n
+ }
+
+ // Pluralize the name (adjusting for some obscure English grammar rules).
+ name := s.Name
+ if sum > 1 {
+ name += "s"
+ if strings.HasSuffix(name, "ys") {
+ name = name[:len(name)-2] + "ies" // e.g., "entrys" => "entries"
+ }
+ }
+
+ // Format the list according to English grammar (with Oxford comma).
+ switch n := len(ss); n {
+ case 0:
+ return ""
+ case 1, 2:
+ return strings.Join(ss, " and ") + " " + name
+ default:
+ return strings.Join(ss[:n-1], ", ") + ", and " + ss[n-1] + " " + name
+ }
+}
+
+type commentString string
+
+func (s commentString) String() string { return string(s) }
diff --git a/integration/assets/hydrabroker/vendor/github.com/google/go-cmp/cmp/report_value.go b/integration/assets/hydrabroker/vendor/github.com/google/go-cmp/cmp/report_value.go
new file mode 100644
index 00000000000..668d470fd83
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/google/go-cmp/cmp/report_value.go
@@ -0,0 +1,121 @@
+// Copyright 2019, The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cmp
+
+import "reflect"
+
+// valueNode represents a single node within a report, which is a
+// structured representation of the value tree, containing information
+// regarding which nodes are equal or not.
+type valueNode struct {
+ parent *valueNode
+
+ Type reflect.Type
+ ValueX reflect.Value
+ ValueY reflect.Value
+
+ // NumSame is the number of leaf nodes that are equal.
+ // All descendants are equal only if NumDiff is 0.
+ NumSame int
+ // NumDiff is the number of leaf nodes that are not equal.
+ NumDiff int
+ // NumIgnored is the number of leaf nodes that are ignored.
+ NumIgnored int
+ // NumCompared is the number of leaf nodes that were compared
+ // using an Equal method or Comparer function.
+ NumCompared int
+ // NumTransformed is the number of non-leaf nodes that were transformed.
+ NumTransformed int
+ // NumChildren is the number of transitive descendants of this node.
+ // This counts from zero; thus, leaf nodes have no descendants.
+ NumChildren int
+ // MaxDepth is the maximum depth of the tree. This counts from zero;
+ // thus, leaf nodes have a depth of zero.
+ MaxDepth int
+
+ // Records is a list of struct fields, slice elements, or map entries.
+ Records []reportRecord // If populated, implies Value is not populated
+
+ // Value is the result of a transformation, pointer indirect, of
+ // type assertion.
+ Value *valueNode // If populated, implies Records is not populated
+
+ // TransformerName is the name of the transformer.
+ TransformerName string // If non-empty, implies Value is populated
+}
+type reportRecord struct {
+ Key reflect.Value // Invalid for slice element
+ Value *valueNode
+}
+
+func (parent *valueNode) PushStep(ps PathStep) (child *valueNode) {
+ vx, vy := ps.Values()
+ child = &valueNode{parent: parent, Type: ps.Type(), ValueX: vx, ValueY: vy}
+ switch s := ps.(type) {
+ case StructField:
+ assert(parent.Value == nil)
+ parent.Records = append(parent.Records, reportRecord{Key: reflect.ValueOf(s.Name()), Value: child})
+ case SliceIndex:
+ assert(parent.Value == nil)
+ parent.Records = append(parent.Records, reportRecord{Value: child})
+ case MapIndex:
+ assert(parent.Value == nil)
+ parent.Records = append(parent.Records, reportRecord{Key: s.Key(), Value: child})
+ case Indirect:
+ assert(parent.Value == nil && parent.Records == nil)
+ parent.Value = child
+ case TypeAssertion:
+ assert(parent.Value == nil && parent.Records == nil)
+ parent.Value = child
+ case Transform:
+ assert(parent.Value == nil && parent.Records == nil)
+ parent.Value = child
+ parent.TransformerName = s.Name()
+ parent.NumTransformed++
+ default:
+ assert(parent == nil) // Must be the root step
+ }
+ return child
+}
+
+func (r *valueNode) Report(rs Result) {
+ assert(r.MaxDepth == 0) // May only be called on leaf nodes
+
+ if rs.ByIgnore() {
+ r.NumIgnored++
+ } else {
+ if rs.Equal() {
+ r.NumSame++
+ } else {
+ r.NumDiff++
+ }
+ }
+ assert(r.NumSame+r.NumDiff+r.NumIgnored == 1)
+
+ if rs.ByMethod() {
+ r.NumCompared++
+ }
+ if rs.ByFunc() {
+ r.NumCompared++
+ }
+ assert(r.NumCompared <= 1)
+}
+
+func (child *valueNode) PopStep() (parent *valueNode) {
+ if child.parent == nil {
+ return nil
+ }
+ parent = child.parent
+ parent.NumSame += child.NumSame
+ parent.NumDiff += child.NumDiff
+ parent.NumIgnored += child.NumIgnored
+ parent.NumCompared += child.NumCompared
+ parent.NumTransformed += child.NumTransformed
+ parent.NumChildren += child.NumChildren + 1
+ if parent.MaxDepth < child.MaxDepth+1 {
+ parent.MaxDepth = child.MaxDepth + 1
+ }
+ return parent
+}
diff --git a/integration/assets/hydrabroker/vendor/github.com/google/pprof/AUTHORS b/integration/assets/hydrabroker/vendor/github.com/google/pprof/AUTHORS
new file mode 100644
index 00000000000..fd736cb1cfb
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/google/pprof/AUTHORS
@@ -0,0 +1,7 @@
+# This is the official list of pprof authors for copyright purposes.
+# This file is distinct from the CONTRIBUTORS files.
+# See the latter for an explanation.
+# Names should be added to this file as:
+# Name or Organization
+# The email address is not required for organizations.
+Google Inc.
\ No newline at end of file
diff --git a/integration/assets/hydrabroker/vendor/github.com/google/pprof/CONTRIBUTORS b/integration/assets/hydrabroker/vendor/github.com/google/pprof/CONTRIBUTORS
new file mode 100644
index 00000000000..8c8c37d2c8f
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/google/pprof/CONTRIBUTORS
@@ -0,0 +1,16 @@
+# People who have agreed to one of the CLAs and can contribute patches.
+# The AUTHORS file lists the copyright holders; this file
+# lists people. For example, Google employees are listed here
+# but not in AUTHORS, because Google holds the copyright.
+#
+# https://developers.google.com/open-source/cla/individual
+# https://developers.google.com/open-source/cla/corporate
+#
+# Names should be added to this file as:
+# Name
+Raul Silvera
+Tipp Moseley
+Hyoun Kyu Cho
+Martin Spier
+Taco de Wolff
+Andrew Hunter
diff --git a/integration/assets/hydrabroker/vendor/github.com/google/pprof/LICENSE b/integration/assets/hydrabroker/vendor/github.com/google/pprof/LICENSE
new file mode 100644
index 00000000000..d6456956733
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/google/pprof/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/integration/assets/hydrabroker/vendor/github.com/google/pprof/profile/encode.go b/integration/assets/hydrabroker/vendor/github.com/google/pprof/profile/encode.go
new file mode 100644
index 00000000000..860bb304c34
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/google/pprof/profile/encode.go
@@ -0,0 +1,591 @@
+// Copyright 2014 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package profile
+
+import (
+ "errors"
+ "sort"
+ "strings"
+)
+
+func (p *Profile) decoder() []decoder {
+ return profileDecoder
+}
+
+// preEncode populates the unexported fields to be used by encode
+// (with suffix X) from the corresponding exported fields. The
+// exported fields are cleared up to facilitate testing.
+func (p *Profile) preEncode() {
+ strings := make(map[string]int)
+ addString(strings, "")
+
+ for _, st := range p.SampleType {
+ st.typeX = addString(strings, st.Type)
+ st.unitX = addString(strings, st.Unit)
+ }
+
+ for _, s := range p.Sample {
+ s.labelX = nil
+ var keys []string
+ for k := range s.Label {
+ keys = append(keys, k)
+ }
+ sort.Strings(keys)
+ for _, k := range keys {
+ vs := s.Label[k]
+ for _, v := range vs {
+ s.labelX = append(s.labelX,
+ label{
+ keyX: addString(strings, k),
+ strX: addString(strings, v),
+ },
+ )
+ }
+ }
+ var numKeys []string
+ for k := range s.NumLabel {
+ numKeys = append(numKeys, k)
+ }
+ sort.Strings(numKeys)
+ for _, k := range numKeys {
+ keyX := addString(strings, k)
+ vs := s.NumLabel[k]
+ units := s.NumUnit[k]
+ for i, v := range vs {
+ var unitX int64
+ if len(units) != 0 {
+ unitX = addString(strings, units[i])
+ }
+ s.labelX = append(s.labelX,
+ label{
+ keyX: keyX,
+ numX: v,
+ unitX: unitX,
+ },
+ )
+ }
+ }
+ s.locationIDX = make([]uint64, len(s.Location))
+ for i, loc := range s.Location {
+ s.locationIDX[i] = loc.ID
+ }
+ }
+
+ for _, m := range p.Mapping {
+ m.fileX = addString(strings, m.File)
+ m.buildIDX = addString(strings, m.BuildID)
+ }
+
+ for _, l := range p.Location {
+ for i, ln := range l.Line {
+ if ln.Function != nil {
+ l.Line[i].functionIDX = ln.Function.ID
+ } else {
+ l.Line[i].functionIDX = 0
+ }
+ }
+ if l.Mapping != nil {
+ l.mappingIDX = l.Mapping.ID
+ } else {
+ l.mappingIDX = 0
+ }
+ }
+ for _, f := range p.Function {
+ f.nameX = addString(strings, f.Name)
+ f.systemNameX = addString(strings, f.SystemName)
+ f.filenameX = addString(strings, f.Filename)
+ }
+
+ p.dropFramesX = addString(strings, p.DropFrames)
+ p.keepFramesX = addString(strings, p.KeepFrames)
+
+ if pt := p.PeriodType; pt != nil {
+ pt.typeX = addString(strings, pt.Type)
+ pt.unitX = addString(strings, pt.Unit)
+ }
+
+ p.commentX = nil
+ for _, c := range p.Comments {
+ p.commentX = append(p.commentX, addString(strings, c))
+ }
+
+ p.defaultSampleTypeX = addString(strings, p.DefaultSampleType)
+
+ p.stringTable = make([]string, len(strings))
+ for s, i := range strings {
+ p.stringTable[i] = s
+ }
+}
+
+func (p *Profile) encode(b *buffer) {
+ for _, x := range p.SampleType {
+ encodeMessage(b, 1, x)
+ }
+ for _, x := range p.Sample {
+ encodeMessage(b, 2, x)
+ }
+ for _, x := range p.Mapping {
+ encodeMessage(b, 3, x)
+ }
+ for _, x := range p.Location {
+ encodeMessage(b, 4, x)
+ }
+ for _, x := range p.Function {
+ encodeMessage(b, 5, x)
+ }
+ encodeStrings(b, 6, p.stringTable)
+ encodeInt64Opt(b, 7, p.dropFramesX)
+ encodeInt64Opt(b, 8, p.keepFramesX)
+ encodeInt64Opt(b, 9, p.TimeNanos)
+ encodeInt64Opt(b, 10, p.DurationNanos)
+ if pt := p.PeriodType; pt != nil && (pt.typeX != 0 || pt.unitX != 0) {
+ encodeMessage(b, 11, p.PeriodType)
+ }
+ encodeInt64Opt(b, 12, p.Period)
+ encodeInt64s(b, 13, p.commentX)
+ encodeInt64(b, 14, p.defaultSampleTypeX)
+}
+
+var profileDecoder = []decoder{
+ nil, // 0
+ // repeated ValueType sample_type = 1
+ func(b *buffer, m message) error {
+ x := new(ValueType)
+ pp := m.(*Profile)
+ pp.SampleType = append(pp.SampleType, x)
+ return decodeMessage(b, x)
+ },
+ // repeated Sample sample = 2
+ func(b *buffer, m message) error {
+ x := new(Sample)
+ pp := m.(*Profile)
+ pp.Sample = append(pp.Sample, x)
+ return decodeMessage(b, x)
+ },
+ // repeated Mapping mapping = 3
+ func(b *buffer, m message) error {
+ x := new(Mapping)
+ pp := m.(*Profile)
+ pp.Mapping = append(pp.Mapping, x)
+ return decodeMessage(b, x)
+ },
+ // repeated Location location = 4
+ func(b *buffer, m message) error {
+ x := new(Location)
+ x.Line = b.tmpLines[:0] // Use shared space temporarily
+ pp := m.(*Profile)
+ pp.Location = append(pp.Location, x)
+ err := decodeMessage(b, x)
+ b.tmpLines = x.Line[:0]
+ // Copy to shrink size and detach from shared space.
+ x.Line = append([]Line(nil), x.Line...)
+ return err
+ },
+ // repeated Function function = 5
+ func(b *buffer, m message) error {
+ x := new(Function)
+ pp := m.(*Profile)
+ pp.Function = append(pp.Function, x)
+ return decodeMessage(b, x)
+ },
+ // repeated string string_table = 6
+ func(b *buffer, m message) error {
+ err := decodeStrings(b, &m.(*Profile).stringTable)
+ if err != nil {
+ return err
+ }
+ if m.(*Profile).stringTable[0] != "" {
+ return errors.New("string_table[0] must be ''")
+ }
+ return nil
+ },
+ // int64 drop_frames = 7
+ func(b *buffer, m message) error { return decodeInt64(b, &m.(*Profile).dropFramesX) },
+ // int64 keep_frames = 8
+ func(b *buffer, m message) error { return decodeInt64(b, &m.(*Profile).keepFramesX) },
+ // int64 time_nanos = 9
+ func(b *buffer, m message) error {
+ if m.(*Profile).TimeNanos != 0 {
+ return errConcatProfile
+ }
+ return decodeInt64(b, &m.(*Profile).TimeNanos)
+ },
+ // int64 duration_nanos = 10
+ func(b *buffer, m message) error { return decodeInt64(b, &m.(*Profile).DurationNanos) },
+ // ValueType period_type = 11
+ func(b *buffer, m message) error {
+ x := new(ValueType)
+ pp := m.(*Profile)
+ pp.PeriodType = x
+ return decodeMessage(b, x)
+ },
+ // int64 period = 12
+ func(b *buffer, m message) error { return decodeInt64(b, &m.(*Profile).Period) },
+ // repeated int64 comment = 13
+ func(b *buffer, m message) error { return decodeInt64s(b, &m.(*Profile).commentX) },
+ // int64 defaultSampleType = 14
+ func(b *buffer, m message) error { return decodeInt64(b, &m.(*Profile).defaultSampleTypeX) },
+}
+
+// postDecode takes the unexported fields populated by decode (with
+// suffix X) and populates the corresponding exported fields.
+// The unexported fields are cleared up to facilitate testing.
+func (p *Profile) postDecode() error {
+ var err error
+ mappings := make(map[uint64]*Mapping, len(p.Mapping))
+ mappingIds := make([]*Mapping, len(p.Mapping)+1)
+ for _, m := range p.Mapping {
+ m.File, err = getString(p.stringTable, &m.fileX, err)
+ m.BuildID, err = getString(p.stringTable, &m.buildIDX, err)
+ if m.ID < uint64(len(mappingIds)) {
+ mappingIds[m.ID] = m
+ } else {
+ mappings[m.ID] = m
+ }
+
+ // If this a main linux kernel mapping with a relocation symbol suffix
+ // ("[kernel.kallsyms]_text"), extract said suffix.
+ // It is fairly hacky to handle at this level, but the alternatives appear even worse.
+ const prefix = "[kernel.kallsyms]"
+ if strings.HasPrefix(m.File, prefix) {
+ m.KernelRelocationSymbol = m.File[len(prefix):]
+ }
+ }
+
+ functions := make(map[uint64]*Function, len(p.Function))
+ functionIds := make([]*Function, len(p.Function)+1)
+ for _, f := range p.Function {
+ f.Name, err = getString(p.stringTable, &f.nameX, err)
+ f.SystemName, err = getString(p.stringTable, &f.systemNameX, err)
+ f.Filename, err = getString(p.stringTable, &f.filenameX, err)
+ if f.ID < uint64(len(functionIds)) {
+ functionIds[f.ID] = f
+ } else {
+ functions[f.ID] = f
+ }
+ }
+
+ locations := make(map[uint64]*Location, len(p.Location))
+ locationIds := make([]*Location, len(p.Location)+1)
+ for _, l := range p.Location {
+ if id := l.mappingIDX; id < uint64(len(mappingIds)) {
+ l.Mapping = mappingIds[id]
+ } else {
+ l.Mapping = mappings[id]
+ }
+ l.mappingIDX = 0
+ for i, ln := range l.Line {
+ if id := ln.functionIDX; id != 0 {
+ l.Line[i].functionIDX = 0
+ if id < uint64(len(functionIds)) {
+ l.Line[i].Function = functionIds[id]
+ } else {
+ l.Line[i].Function = functions[id]
+ }
+ }
+ }
+ if l.ID < uint64(len(locationIds)) {
+ locationIds[l.ID] = l
+ } else {
+ locations[l.ID] = l
+ }
+ }
+
+ for _, st := range p.SampleType {
+ st.Type, err = getString(p.stringTable, &st.typeX, err)
+ st.Unit, err = getString(p.stringTable, &st.unitX, err)
+ }
+
+ // Pre-allocate space for all locations.
+ numLocations := 0
+ for _, s := range p.Sample {
+ numLocations += len(s.locationIDX)
+ }
+ locBuffer := make([]*Location, numLocations)
+
+ for _, s := range p.Sample {
+ if len(s.labelX) > 0 {
+ labels := make(map[string][]string, len(s.labelX))
+ numLabels := make(map[string][]int64, len(s.labelX))
+ numUnits := make(map[string][]string, len(s.labelX))
+ for _, l := range s.labelX {
+ var key, value string
+ key, err = getString(p.stringTable, &l.keyX, err)
+ if l.strX != 0 {
+ value, err = getString(p.stringTable, &l.strX, err)
+ labels[key] = append(labels[key], value)
+ } else if l.numX != 0 || l.unitX != 0 {
+ numValues := numLabels[key]
+ units := numUnits[key]
+ if l.unitX != 0 {
+ var unit string
+ unit, err = getString(p.stringTable, &l.unitX, err)
+ units = padStringArray(units, len(numValues))
+ numUnits[key] = append(units, unit)
+ }
+ numLabels[key] = append(numLabels[key], l.numX)
+ }
+ }
+ if len(labels) > 0 {
+ s.Label = labels
+ }
+ if len(numLabels) > 0 {
+ s.NumLabel = numLabels
+ for key, units := range numUnits {
+ if len(units) > 0 {
+ numUnits[key] = padStringArray(units, len(numLabels[key]))
+ }
+ }
+ s.NumUnit = numUnits
+ }
+ }
+
+ s.Location = locBuffer[:len(s.locationIDX)]
+ locBuffer = locBuffer[len(s.locationIDX):]
+ for i, lid := range s.locationIDX {
+ if lid < uint64(len(locationIds)) {
+ s.Location[i] = locationIds[lid]
+ } else {
+ s.Location[i] = locations[lid]
+ }
+ }
+ s.locationIDX = nil
+ }
+
+ p.DropFrames, err = getString(p.stringTable, &p.dropFramesX, err)
+ p.KeepFrames, err = getString(p.stringTable, &p.keepFramesX, err)
+
+ if pt := p.PeriodType; pt == nil {
+ p.PeriodType = &ValueType{}
+ }
+
+ if pt := p.PeriodType; pt != nil {
+ pt.Type, err = getString(p.stringTable, &pt.typeX, err)
+ pt.Unit, err = getString(p.stringTable, &pt.unitX, err)
+ }
+
+ for _, i := range p.commentX {
+ var c string
+ c, err = getString(p.stringTable, &i, err)
+ p.Comments = append(p.Comments, c)
+ }
+
+ p.commentX = nil
+ p.DefaultSampleType, err = getString(p.stringTable, &p.defaultSampleTypeX, err)
+ p.stringTable = nil
+ return err
+}
+
+// padStringArray pads arr with enough empty strings to make arr
+// length l when arr's length is less than l.
+func padStringArray(arr []string, l int) []string {
+ if l <= len(arr) {
+ return arr
+ }
+ return append(arr, make([]string, l-len(arr))...)
+}
+
+func (p *ValueType) decoder() []decoder {
+ return valueTypeDecoder
+}
+
+func (p *ValueType) encode(b *buffer) {
+ encodeInt64Opt(b, 1, p.typeX)
+ encodeInt64Opt(b, 2, p.unitX)
+}
+
+var valueTypeDecoder = []decoder{
+ nil, // 0
+ // optional int64 type = 1
+ func(b *buffer, m message) error { return decodeInt64(b, &m.(*ValueType).typeX) },
+ // optional int64 unit = 2
+ func(b *buffer, m message) error { return decodeInt64(b, &m.(*ValueType).unitX) },
+}
+
+func (p *Sample) decoder() []decoder {
+ return sampleDecoder
+}
+
+func (p *Sample) encode(b *buffer) {
+ encodeUint64s(b, 1, p.locationIDX)
+ encodeInt64s(b, 2, p.Value)
+ for _, x := range p.labelX {
+ encodeMessage(b, 3, x)
+ }
+}
+
+var sampleDecoder = []decoder{
+ nil, // 0
+ // repeated uint64 location = 1
+ func(b *buffer, m message) error { return decodeUint64s(b, &m.(*Sample).locationIDX) },
+ // repeated int64 value = 2
+ func(b *buffer, m message) error { return decodeInt64s(b, &m.(*Sample).Value) },
+ // repeated Label label = 3
+ func(b *buffer, m message) error {
+ s := m.(*Sample)
+ n := len(s.labelX)
+ s.labelX = append(s.labelX, label{})
+ return decodeMessage(b, &s.labelX[n])
+ },
+}
+
+func (p label) decoder() []decoder {
+ return labelDecoder
+}
+
+func (p label) encode(b *buffer) {
+ encodeInt64Opt(b, 1, p.keyX)
+ encodeInt64Opt(b, 2, p.strX)
+ encodeInt64Opt(b, 3, p.numX)
+ encodeInt64Opt(b, 4, p.unitX)
+}
+
+var labelDecoder = []decoder{
+ nil, // 0
+ // optional int64 key = 1
+ func(b *buffer, m message) error { return decodeInt64(b, &m.(*label).keyX) },
+ // optional int64 str = 2
+ func(b *buffer, m message) error { return decodeInt64(b, &m.(*label).strX) },
+ // optional int64 num = 3
+ func(b *buffer, m message) error { return decodeInt64(b, &m.(*label).numX) },
+ // optional int64 num = 4
+ func(b *buffer, m message) error { return decodeInt64(b, &m.(*label).unitX) },
+}
+
+func (p *Mapping) decoder() []decoder {
+ return mappingDecoder
+}
+
+func (p *Mapping) encode(b *buffer) {
+ encodeUint64Opt(b, 1, p.ID)
+ encodeUint64Opt(b, 2, p.Start)
+ encodeUint64Opt(b, 3, p.Limit)
+ encodeUint64Opt(b, 4, p.Offset)
+ encodeInt64Opt(b, 5, p.fileX)
+ encodeInt64Opt(b, 6, p.buildIDX)
+ encodeBoolOpt(b, 7, p.HasFunctions)
+ encodeBoolOpt(b, 8, p.HasFilenames)
+ encodeBoolOpt(b, 9, p.HasLineNumbers)
+ encodeBoolOpt(b, 10, p.HasInlineFrames)
+}
+
+var mappingDecoder = []decoder{
+ nil, // 0
+ func(b *buffer, m message) error { return decodeUint64(b, &m.(*Mapping).ID) }, // optional uint64 id = 1
+ func(b *buffer, m message) error { return decodeUint64(b, &m.(*Mapping).Start) }, // optional uint64 memory_offset = 2
+ func(b *buffer, m message) error { return decodeUint64(b, &m.(*Mapping).Limit) }, // optional uint64 memory_limit = 3
+ func(b *buffer, m message) error { return decodeUint64(b, &m.(*Mapping).Offset) }, // optional uint64 file_offset = 4
+ func(b *buffer, m message) error { return decodeInt64(b, &m.(*Mapping).fileX) }, // optional int64 filename = 5
+ func(b *buffer, m message) error { return decodeInt64(b, &m.(*Mapping).buildIDX) }, // optional int64 build_id = 6
+ func(b *buffer, m message) error { return decodeBool(b, &m.(*Mapping).HasFunctions) }, // optional bool has_functions = 7
+ func(b *buffer, m message) error { return decodeBool(b, &m.(*Mapping).HasFilenames) }, // optional bool has_filenames = 8
+ func(b *buffer, m message) error { return decodeBool(b, &m.(*Mapping).HasLineNumbers) }, // optional bool has_line_numbers = 9
+ func(b *buffer, m message) error { return decodeBool(b, &m.(*Mapping).HasInlineFrames) }, // optional bool has_inline_frames = 10
+}
+
+func (p *Location) decoder() []decoder {
+ return locationDecoder
+}
+
+func (p *Location) encode(b *buffer) {
+ encodeUint64Opt(b, 1, p.ID)
+ encodeUint64Opt(b, 2, p.mappingIDX)
+ encodeUint64Opt(b, 3, p.Address)
+ for i := range p.Line {
+ encodeMessage(b, 4, &p.Line[i])
+ }
+ encodeBoolOpt(b, 5, p.IsFolded)
+}
+
+var locationDecoder = []decoder{
+ nil, // 0
+ func(b *buffer, m message) error { return decodeUint64(b, &m.(*Location).ID) }, // optional uint64 id = 1;
+ func(b *buffer, m message) error { return decodeUint64(b, &m.(*Location).mappingIDX) }, // optional uint64 mapping_id = 2;
+ func(b *buffer, m message) error { return decodeUint64(b, &m.(*Location).Address) }, // optional uint64 address = 3;
+ func(b *buffer, m message) error { // repeated Line line = 4
+ pp := m.(*Location)
+ n := len(pp.Line)
+ pp.Line = append(pp.Line, Line{})
+ return decodeMessage(b, &pp.Line[n])
+ },
+ func(b *buffer, m message) error { return decodeBool(b, &m.(*Location).IsFolded) }, // optional bool is_folded = 5;
+}
+
+func (p *Line) decoder() []decoder {
+ return lineDecoder
+}
+
+func (p *Line) encode(b *buffer) {
+ encodeUint64Opt(b, 1, p.functionIDX)
+ encodeInt64Opt(b, 2, p.Line)
+ encodeInt64Opt(b, 3, p.Column)
+}
+
+var lineDecoder = []decoder{
+ nil, // 0
+ // optional uint64 function_id = 1
+ func(b *buffer, m message) error { return decodeUint64(b, &m.(*Line).functionIDX) },
+ // optional int64 line = 2
+ func(b *buffer, m message) error { return decodeInt64(b, &m.(*Line).Line) },
+ // optional int64 column = 3
+ func(b *buffer, m message) error { return decodeInt64(b, &m.(*Line).Column) },
+}
+
+func (p *Function) decoder() []decoder {
+ return functionDecoder
+}
+
+func (p *Function) encode(b *buffer) {
+ encodeUint64Opt(b, 1, p.ID)
+ encodeInt64Opt(b, 2, p.nameX)
+ encodeInt64Opt(b, 3, p.systemNameX)
+ encodeInt64Opt(b, 4, p.filenameX)
+ encodeInt64Opt(b, 5, p.StartLine)
+}
+
+var functionDecoder = []decoder{
+ nil, // 0
+ // optional uint64 id = 1
+ func(b *buffer, m message) error { return decodeUint64(b, &m.(*Function).ID) },
+ // optional int64 function_name = 2
+ func(b *buffer, m message) error { return decodeInt64(b, &m.(*Function).nameX) },
+ // optional int64 function_system_name = 3
+ func(b *buffer, m message) error { return decodeInt64(b, &m.(*Function).systemNameX) },
+ // repeated int64 filename = 4
+ func(b *buffer, m message) error { return decodeInt64(b, &m.(*Function).filenameX) },
+ // optional int64 start_line = 5
+ func(b *buffer, m message) error { return decodeInt64(b, &m.(*Function).StartLine) },
+}
+
+func addString(strings map[string]int, s string) int64 {
+ i, ok := strings[s]
+ if !ok {
+ i = len(strings)
+ strings[s] = i
+ }
+ return int64(i)
+}
+
+func getString(strings []string, strng *int64, err error) (string, error) {
+ if err != nil {
+ return "", err
+ }
+ s := int(*strng)
+ if s < 0 || s >= len(strings) {
+ return "", errMalformed
+ }
+ *strng = 0
+ return strings[s], nil
+}
diff --git a/integration/assets/hydrabroker/vendor/github.com/google/pprof/profile/filter.go b/integration/assets/hydrabroker/vendor/github.com/google/pprof/profile/filter.go
new file mode 100644
index 00000000000..c794b939067
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/google/pprof/profile/filter.go
@@ -0,0 +1,274 @@
+// Copyright 2014 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package profile
+
+// Implements methods to filter samples from profiles.
+
+import "regexp"
+
+// FilterSamplesByName filters the samples in a profile and only keeps
+// samples where at least one frame matches focus but none match ignore.
+// Returns true is the corresponding regexp matched at least one sample.
+func (p *Profile) FilterSamplesByName(focus, ignore, hide, show *regexp.Regexp) (fm, im, hm, hnm bool) {
+ if focus == nil && ignore == nil && hide == nil && show == nil {
+ fm = true // Missing focus implies a match
+ return
+ }
+ focusOrIgnore := make(map[uint64]bool)
+ hidden := make(map[uint64]bool)
+ for _, l := range p.Location {
+ if ignore != nil && l.matchesName(ignore) {
+ im = true
+ focusOrIgnore[l.ID] = false
+ } else if focus == nil || l.matchesName(focus) {
+ fm = true
+ focusOrIgnore[l.ID] = true
+ }
+
+ if hide != nil && l.matchesName(hide) {
+ hm = true
+ l.Line = l.unmatchedLines(hide)
+ if len(l.Line) == 0 {
+ hidden[l.ID] = true
+ }
+ }
+ if show != nil {
+ l.Line = l.matchedLines(show)
+ if len(l.Line) == 0 {
+ hidden[l.ID] = true
+ } else {
+ hnm = true
+ }
+ }
+ }
+
+ s := make([]*Sample, 0, len(p.Sample))
+ for _, sample := range p.Sample {
+ if focusedAndNotIgnored(sample.Location, focusOrIgnore) {
+ if len(hidden) > 0 {
+ var locs []*Location
+ for _, loc := range sample.Location {
+ if !hidden[loc.ID] {
+ locs = append(locs, loc)
+ }
+ }
+ if len(locs) == 0 {
+ // Remove sample with no locations (by not adding it to s).
+ continue
+ }
+ sample.Location = locs
+ }
+ s = append(s, sample)
+ }
+ }
+ p.Sample = s
+
+ return
+}
+
+// ShowFrom drops all stack frames above the highest matching frame and returns
+// whether a match was found. If showFrom is nil it returns false and does not
+// modify the profile.
+//
+// Example: consider a sample with frames [A, B, C, B], where A is the root.
+// ShowFrom(nil) returns false and has frames [A, B, C, B].
+// ShowFrom(A) returns true and has frames [A, B, C, B].
+// ShowFrom(B) returns true and has frames [B, C, B].
+// ShowFrom(C) returns true and has frames [C, B].
+// ShowFrom(D) returns false and drops the sample because no frames remain.
+func (p *Profile) ShowFrom(showFrom *regexp.Regexp) (matched bool) {
+ if showFrom == nil {
+ return false
+ }
+ // showFromLocs stores location IDs that matched ShowFrom.
+ showFromLocs := make(map[uint64]bool)
+ // Apply to locations.
+ for _, loc := range p.Location {
+ if filterShowFromLocation(loc, showFrom) {
+ showFromLocs[loc.ID] = true
+ matched = true
+ }
+ }
+ // For all samples, strip locations after the highest matching one.
+ s := make([]*Sample, 0, len(p.Sample))
+ for _, sample := range p.Sample {
+ for i := len(sample.Location) - 1; i >= 0; i-- {
+ if showFromLocs[sample.Location[i].ID] {
+ sample.Location = sample.Location[:i+1]
+ s = append(s, sample)
+ break
+ }
+ }
+ }
+ p.Sample = s
+ return matched
+}
+
+// filterShowFromLocation tests a showFrom regex against a location, removes
+// lines after the last match and returns whether a match was found. If the
+// mapping is matched, then all lines are kept.
+func filterShowFromLocation(loc *Location, showFrom *regexp.Regexp) bool {
+ if m := loc.Mapping; m != nil && showFrom.MatchString(m.File) {
+ return true
+ }
+ if i := loc.lastMatchedLineIndex(showFrom); i >= 0 {
+ loc.Line = loc.Line[:i+1]
+ return true
+ }
+ return false
+}
+
+// lastMatchedLineIndex returns the index of the last line that matches a regex,
+// or -1 if no match is found.
+func (loc *Location) lastMatchedLineIndex(re *regexp.Regexp) int {
+ for i := len(loc.Line) - 1; i >= 0; i-- {
+ if fn := loc.Line[i].Function; fn != nil {
+ if re.MatchString(fn.Name) || re.MatchString(fn.Filename) {
+ return i
+ }
+ }
+ }
+ return -1
+}
+
+// FilterTagsByName filters the tags in a profile and only keeps
+// tags that match show and not hide.
+func (p *Profile) FilterTagsByName(show, hide *regexp.Regexp) (sm, hm bool) {
+ matchRemove := func(name string) bool {
+ matchShow := show == nil || show.MatchString(name)
+ matchHide := hide != nil && hide.MatchString(name)
+
+ if matchShow {
+ sm = true
+ }
+ if matchHide {
+ hm = true
+ }
+ return !matchShow || matchHide
+ }
+ for _, s := range p.Sample {
+ for lab := range s.Label {
+ if matchRemove(lab) {
+ delete(s.Label, lab)
+ }
+ }
+ for lab := range s.NumLabel {
+ if matchRemove(lab) {
+ delete(s.NumLabel, lab)
+ }
+ }
+ }
+ return
+}
+
+// matchesName returns whether the location matches the regular
+// expression. It checks any available function names, file names, and
+// mapping object filename.
+func (loc *Location) matchesName(re *regexp.Regexp) bool {
+ for _, ln := range loc.Line {
+ if fn := ln.Function; fn != nil {
+ if re.MatchString(fn.Name) || re.MatchString(fn.Filename) {
+ return true
+ }
+ }
+ }
+ if m := loc.Mapping; m != nil && re.MatchString(m.File) {
+ return true
+ }
+ return false
+}
+
+// unmatchedLines returns the lines in the location that do not match
+// the regular expression.
+func (loc *Location) unmatchedLines(re *regexp.Regexp) []Line {
+ if m := loc.Mapping; m != nil && re.MatchString(m.File) {
+ return nil
+ }
+ var lines []Line
+ for _, ln := range loc.Line {
+ if fn := ln.Function; fn != nil {
+ if re.MatchString(fn.Name) || re.MatchString(fn.Filename) {
+ continue
+ }
+ }
+ lines = append(lines, ln)
+ }
+ return lines
+}
+
+// matchedLines returns the lines in the location that match
+// the regular expression.
+func (loc *Location) matchedLines(re *regexp.Regexp) []Line {
+ if m := loc.Mapping; m != nil && re.MatchString(m.File) {
+ return loc.Line
+ }
+ var lines []Line
+ for _, ln := range loc.Line {
+ if fn := ln.Function; fn != nil {
+ if !re.MatchString(fn.Name) && !re.MatchString(fn.Filename) {
+ continue
+ }
+ }
+ lines = append(lines, ln)
+ }
+ return lines
+}
+
+// focusedAndNotIgnored looks up a slice of ids against a map of
+// focused/ignored locations. The map only contains locations that are
+// explicitly focused or ignored. Returns whether there is at least
+// one focused location but no ignored locations.
+func focusedAndNotIgnored(locs []*Location, m map[uint64]bool) bool {
+ var f bool
+ for _, loc := range locs {
+ if focus, focusOrIgnore := m[loc.ID]; focusOrIgnore {
+ if focus {
+ // Found focused location. Must keep searching in case there
+ // is an ignored one as well.
+ f = true
+ } else {
+ // Found ignored location. Can return false right away.
+ return false
+ }
+ }
+ }
+ return f
+}
+
+// TagMatch selects tags for filtering
+type TagMatch func(s *Sample) bool
+
+// FilterSamplesByTag removes all samples from the profile, except
+// those that match focus and do not match the ignore regular
+// expression.
+func (p *Profile) FilterSamplesByTag(focus, ignore TagMatch) (fm, im bool) {
+ samples := make([]*Sample, 0, len(p.Sample))
+ for _, s := range p.Sample {
+ focused, ignored := true, false
+ if focus != nil {
+ focused = focus(s)
+ }
+ if ignore != nil {
+ ignored = ignore(s)
+ }
+ fm = fm || focused
+ im = im || ignored
+ if focused && !ignored {
+ samples = append(samples, s)
+ }
+ }
+ p.Sample = samples
+ return
+}
diff --git a/integration/assets/hydrabroker/vendor/github.com/google/pprof/profile/index.go b/integration/assets/hydrabroker/vendor/github.com/google/pprof/profile/index.go
new file mode 100644
index 00000000000..bef1d60467c
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/google/pprof/profile/index.go
@@ -0,0 +1,64 @@
+// Copyright 2016 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package profile
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+// SampleIndexByName returns the appropriate index for a value of sample index.
+// If numeric, it returns the number, otherwise it looks up the text in the
+// profile sample types.
+func (p *Profile) SampleIndexByName(sampleIndex string) (int, error) {
+ if sampleIndex == "" {
+ if dst := p.DefaultSampleType; dst != "" {
+ for i, t := range sampleTypes(p) {
+ if t == dst {
+ return i, nil
+ }
+ }
+ }
+ // By default select the last sample value
+ return len(p.SampleType) - 1, nil
+ }
+ if i, err := strconv.Atoi(sampleIndex); err == nil {
+ if i < 0 || i >= len(p.SampleType) {
+ return 0, fmt.Errorf("sample_index %s is outside the range [0..%d]", sampleIndex, len(p.SampleType)-1)
+ }
+ return i, nil
+ }
+
+ // Remove the inuse_ prefix to support legacy pprof options
+ // "inuse_space" and "inuse_objects" for profiles containing types
+ // "space" and "objects".
+ noInuse := strings.TrimPrefix(sampleIndex, "inuse_")
+ for i, t := range p.SampleType {
+ if t.Type == sampleIndex || t.Type == noInuse {
+ return i, nil
+ }
+ }
+
+ return 0, fmt.Errorf("sample_index %q must be one of: %v", sampleIndex, sampleTypes(p))
+}
+
+func sampleTypes(p *Profile) []string {
+ types := make([]string, len(p.SampleType))
+ for i, t := range p.SampleType {
+ types[i] = t.Type
+ }
+ return types
+}
diff --git a/integration/assets/hydrabroker/vendor/github.com/google/pprof/profile/legacy_java_profile.go b/integration/assets/hydrabroker/vendor/github.com/google/pprof/profile/legacy_java_profile.go
new file mode 100644
index 00000000000..4580bab1839
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/google/pprof/profile/legacy_java_profile.go
@@ -0,0 +1,315 @@
+// Copyright 2014 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// This file implements parsers to convert java legacy profiles into
+// the profile.proto format.
+
+package profile
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "path/filepath"
+ "regexp"
+ "strconv"
+ "strings"
+)
+
+var (
+ attributeRx = regexp.MustCompile(`([\w ]+)=([\w ]+)`)
+ javaSampleRx = regexp.MustCompile(` *(\d+) +(\d+) +@ +([ x0-9a-f]*)`)
+ javaLocationRx = regexp.MustCompile(`^\s*0x([[:xdigit:]]+)\s+(.*)\s*$`)
+ javaLocationFileLineRx = regexp.MustCompile(`^(.*)\s+\((.+):(-?[[:digit:]]+)\)$`)
+ javaLocationPathRx = regexp.MustCompile(`^(.*)\s+\((.*)\)$`)
+)
+
+// javaCPUProfile returns a new Profile from profilez data.
+// b is the profile bytes after the header, period is the profiling
+// period, and parse is a function to parse 8-byte chunks from the
+// profile in its native endianness.
+func javaCPUProfile(b []byte, period int64, parse func(b []byte) (uint64, []byte)) (*Profile, error) {
+ p := &Profile{
+ Period: period * 1000,
+ PeriodType: &ValueType{Type: "cpu", Unit: "nanoseconds"},
+ SampleType: []*ValueType{{Type: "samples", Unit: "count"}, {Type: "cpu", Unit: "nanoseconds"}},
+ }
+ var err error
+ var locs map[uint64]*Location
+ if b, locs, err = parseCPUSamples(b, parse, false, p); err != nil {
+ return nil, err
+ }
+
+ if err = parseJavaLocations(b, locs, p); err != nil {
+ return nil, err
+ }
+
+ // Strip out addresses for better merge.
+ if err = p.Aggregate(true, true, true, true, false, false); err != nil {
+ return nil, err
+ }
+
+ return p, nil
+}
+
+// parseJavaProfile returns a new profile from heapz or contentionz
+// data. b is the profile bytes after the header.
+func parseJavaProfile(b []byte) (*Profile, error) {
+ h := bytes.SplitAfterN(b, []byte("\n"), 2)
+ if len(h) < 2 {
+ return nil, errUnrecognized
+ }
+
+ p := &Profile{
+ PeriodType: &ValueType{},
+ }
+ header := string(bytes.TrimSpace(h[0]))
+
+ var err error
+ var pType string
+ switch header {
+ case "--- heapz 1 ---":
+ pType = "heap"
+ case "--- contentionz 1 ---":
+ pType = "contention"
+ default:
+ return nil, errUnrecognized
+ }
+
+ if b, err = parseJavaHeader(pType, h[1], p); err != nil {
+ return nil, err
+ }
+ var locs map[uint64]*Location
+ if b, locs, err = parseJavaSamples(pType, b, p); err != nil {
+ return nil, err
+ }
+ if err = parseJavaLocations(b, locs, p); err != nil {
+ return nil, err
+ }
+
+ // Strip out addresses for better merge.
+ if err = p.Aggregate(true, true, true, true, false, false); err != nil {
+ return nil, err
+ }
+
+ return p, nil
+}
+
+// parseJavaHeader parses the attribute section on a java profile and
+// populates a profile. Returns the remainder of the buffer after all
+// attributes.
+func parseJavaHeader(pType string, b []byte, p *Profile) ([]byte, error) {
+ nextNewLine := bytes.IndexByte(b, byte('\n'))
+ for nextNewLine != -1 {
+ line := string(bytes.TrimSpace(b[0:nextNewLine]))
+ if line != "" {
+ h := attributeRx.FindStringSubmatch(line)
+ if h == nil {
+ // Not a valid attribute, exit.
+ return b, nil
+ }
+
+ attribute, value := strings.TrimSpace(h[1]), strings.TrimSpace(h[2])
+ var err error
+ switch pType + "/" + attribute {
+ case "heap/format", "cpu/format", "contention/format":
+ if value != "java" {
+ return nil, errUnrecognized
+ }
+ case "heap/resolution":
+ p.SampleType = []*ValueType{
+ {Type: "inuse_objects", Unit: "count"},
+ {Type: "inuse_space", Unit: value},
+ }
+ case "contention/resolution":
+ p.SampleType = []*ValueType{
+ {Type: "contentions", Unit: "count"},
+ {Type: "delay", Unit: value},
+ }
+ case "contention/sampling period":
+ p.PeriodType = &ValueType{
+ Type: "contentions", Unit: "count",
+ }
+ if p.Period, err = strconv.ParseInt(value, 0, 64); err != nil {
+ return nil, fmt.Errorf("failed to parse attribute %s: %v", line, err)
+ }
+ case "contention/ms since reset":
+ millis, err := strconv.ParseInt(value, 0, 64)
+ if err != nil {
+ return nil, fmt.Errorf("failed to parse attribute %s: %v", line, err)
+ }
+ p.DurationNanos = millis * 1000 * 1000
+ default:
+ return nil, errUnrecognized
+ }
+ }
+ // Grab next line.
+ b = b[nextNewLine+1:]
+ nextNewLine = bytes.IndexByte(b, byte('\n'))
+ }
+ return b, nil
+}
+
+// parseJavaSamples parses the samples from a java profile and
+// populates the Samples in a profile. Returns the remainder of the
+// buffer after the samples.
+func parseJavaSamples(pType string, b []byte, p *Profile) ([]byte, map[uint64]*Location, error) {
+ nextNewLine := bytes.IndexByte(b, byte('\n'))
+ locs := make(map[uint64]*Location)
+ for nextNewLine != -1 {
+ line := string(bytes.TrimSpace(b[0:nextNewLine]))
+ if line != "" {
+ sample := javaSampleRx.FindStringSubmatch(line)
+ if sample == nil {
+ // Not a valid sample, exit.
+ return b, locs, nil
+ }
+
+ // Java profiles have data/fields inverted compared to other
+ // profile types.
+ var err error
+ value1, value2, value3 := sample[2], sample[1], sample[3]
+ addrs, err := parseHexAddresses(value3)
+ if err != nil {
+ return nil, nil, fmt.Errorf("malformed sample: %s: %v", line, err)
+ }
+
+ var sloc []*Location
+ for _, addr := range addrs {
+ loc := locs[addr]
+ if locs[addr] == nil {
+ loc = &Location{
+ Address: addr,
+ }
+ p.Location = append(p.Location, loc)
+ locs[addr] = loc
+ }
+ sloc = append(sloc, loc)
+ }
+ s := &Sample{
+ Value: make([]int64, 2),
+ Location: sloc,
+ }
+
+ if s.Value[0], err = strconv.ParseInt(value1, 0, 64); err != nil {
+ return nil, nil, fmt.Errorf("parsing sample %s: %v", line, err)
+ }
+ if s.Value[1], err = strconv.ParseInt(value2, 0, 64); err != nil {
+ return nil, nil, fmt.Errorf("parsing sample %s: %v", line, err)
+ }
+
+ switch pType {
+ case "heap":
+ const javaHeapzSamplingRate = 524288 // 512K
+ if s.Value[0] == 0 {
+ return nil, nil, fmt.Errorf("parsing sample %s: second value must be non-zero", line)
+ }
+ s.NumLabel = map[string][]int64{"bytes": {s.Value[1] / s.Value[0]}}
+ s.Value[0], s.Value[1] = scaleHeapSample(s.Value[0], s.Value[1], javaHeapzSamplingRate)
+ case "contention":
+ if period := p.Period; period != 0 {
+ s.Value[0] = s.Value[0] * p.Period
+ s.Value[1] = s.Value[1] * p.Period
+ }
+ }
+ p.Sample = append(p.Sample, s)
+ }
+ // Grab next line.
+ b = b[nextNewLine+1:]
+ nextNewLine = bytes.IndexByte(b, byte('\n'))
+ }
+ return b, locs, nil
+}
+
+// parseJavaLocations parses the location information in a java
+// profile and populates the Locations in a profile. It uses the
+// location addresses from the profile as both the ID of each
+// location.
+func parseJavaLocations(b []byte, locs map[uint64]*Location, p *Profile) error {
+ r := bytes.NewBuffer(b)
+ fns := make(map[string]*Function)
+ for {
+ line, err := r.ReadString('\n')
+ if err != nil {
+ if err != io.EOF {
+ return err
+ }
+ if line == "" {
+ break
+ }
+ }
+
+ if line = strings.TrimSpace(line); line == "" {
+ continue
+ }
+
+ jloc := javaLocationRx.FindStringSubmatch(line)
+ if len(jloc) != 3 {
+ continue
+ }
+ addr, err := strconv.ParseUint(jloc[1], 16, 64)
+ if err != nil {
+ return fmt.Errorf("parsing sample %s: %v", line, err)
+ }
+ loc := locs[addr]
+ if loc == nil {
+ // Unused/unseen
+ continue
+ }
+ var lineFunc, lineFile string
+ var lineNo int64
+
+ if fileLine := javaLocationFileLineRx.FindStringSubmatch(jloc[2]); len(fileLine) == 4 {
+ // Found a line of the form: "function (file:line)"
+ lineFunc, lineFile = fileLine[1], fileLine[2]
+ if n, err := strconv.ParseInt(fileLine[3], 10, 64); err == nil && n > 0 {
+ lineNo = n
+ }
+ } else if filePath := javaLocationPathRx.FindStringSubmatch(jloc[2]); len(filePath) == 3 {
+ // If there's not a file:line, it's a shared library path.
+ // The path isn't interesting, so just give the .so.
+ lineFunc, lineFile = filePath[1], filepath.Base(filePath[2])
+ } else if strings.Contains(jloc[2], "generated stub/JIT") {
+ lineFunc = "STUB"
+ } else {
+ // Treat whole line as the function name. This is used by the
+ // java agent for internal states such as "GC" or "VM".
+ lineFunc = jloc[2]
+ }
+ fn := fns[lineFunc]
+
+ if fn == nil {
+ fn = &Function{
+ Name: lineFunc,
+ SystemName: lineFunc,
+ Filename: lineFile,
+ }
+ fns[lineFunc] = fn
+ p.Function = append(p.Function, fn)
+ }
+ loc.Line = []Line{
+ {
+ Function: fn,
+ Line: lineNo,
+ },
+ }
+ loc.Address = 0
+ }
+
+ p.remapLocationIDs()
+ p.remapFunctionIDs()
+ p.remapMappingIDs()
+
+ return nil
+}
diff --git a/integration/assets/hydrabroker/vendor/github.com/google/pprof/profile/legacy_profile.go b/integration/assets/hydrabroker/vendor/github.com/google/pprof/profile/legacy_profile.go
new file mode 100644
index 00000000000..8d07fd6c27c
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/google/pprof/profile/legacy_profile.go
@@ -0,0 +1,1228 @@
+// Copyright 2014 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// This file implements parsers to convert legacy profiles into the
+// profile.proto format.
+
+package profile
+
+import (
+ "bufio"
+ "bytes"
+ "fmt"
+ "io"
+ "math"
+ "regexp"
+ "strconv"
+ "strings"
+)
+
+var (
+ countStartRE = regexp.MustCompile(`\A(\S+) profile: total \d+\z`)
+ countRE = regexp.MustCompile(`\A(\d+) @(( 0x[0-9a-f]+)+)\z`)
+
+ heapHeaderRE = regexp.MustCompile(`heap profile: *(\d+): *(\d+) *\[ *(\d+): *(\d+) *\] *@ *(heap[_a-z0-9]*)/?(\d*)`)
+ heapSampleRE = regexp.MustCompile(`(-?\d+): *(-?\d+) *\[ *(\d+): *(\d+) *] @([ x0-9a-f]*)`)
+
+ contentionSampleRE = regexp.MustCompile(`(\d+) *(\d+) @([ x0-9a-f]*)`)
+
+ hexNumberRE = regexp.MustCompile(`0x[0-9a-f]+`)
+
+ growthHeaderRE = regexp.MustCompile(`heap profile: *(\d+): *(\d+) *\[ *(\d+): *(\d+) *\] @ growthz?`)
+
+ fragmentationHeaderRE = regexp.MustCompile(`heap profile: *(\d+): *(\d+) *\[ *(\d+): *(\d+) *\] @ fragmentationz?`)
+
+ threadzStartRE = regexp.MustCompile(`--- threadz \d+ ---`)
+ threadStartRE = regexp.MustCompile(`--- Thread ([[:xdigit:]]+) \(name: (.*)/(\d+)\) stack: ---`)
+
+ // Regular expressions to parse process mappings. Support the format used by Linux /proc/.../maps and other tools.
+ // Recommended format:
+ // Start End object file name offset(optional) linker build id
+ // 0x40000-0x80000 /path/to/binary (@FF00) abc123456
+ spaceDigits = `\s+[[:digit:]]+`
+ hexPair = `\s+[[:xdigit:]]+:[[:xdigit:]]+`
+ oSpace = `\s*`
+ // Capturing expressions.
+ cHex = `(?:0x)?([[:xdigit:]]+)`
+ cHexRange = `\s*` + cHex + `[\s-]?` + oSpace + cHex + `:?`
+ cSpaceString = `(?:\s+(\S+))?`
+ cSpaceHex = `(?:\s+([[:xdigit:]]+))?`
+ cSpaceAtOffset = `(?:\s+\(@([[:xdigit:]]+)\))?`
+ cPerm = `(?:\s+([-rwxp]+))?`
+
+ procMapsRE = regexp.MustCompile(`^` + cHexRange + cPerm + cSpaceHex + hexPair + spaceDigits + cSpaceString)
+ briefMapsRE = regexp.MustCompile(`^` + cHexRange + cPerm + cSpaceString + cSpaceAtOffset + cSpaceHex)
+
+ // Regular expression to parse log data, of the form:
+ // ... file:line] msg...
+ logInfoRE = regexp.MustCompile(`^[^\[\]]+:[0-9]+]\s`)
+)
+
+func isSpaceOrComment(line string) bool {
+ trimmed := strings.TrimSpace(line)
+ return len(trimmed) == 0 || trimmed[0] == '#'
+}
+
+// parseGoCount parses a Go count profile (e.g., threadcreate or
+// goroutine) and returns a new Profile.
+func parseGoCount(b []byte) (*Profile, error) {
+ s := bufio.NewScanner(bytes.NewBuffer(b))
+ // Skip comments at the beginning of the file.
+ for s.Scan() && isSpaceOrComment(s.Text()) {
+ }
+ if err := s.Err(); err != nil {
+ return nil, err
+ }
+ m := countStartRE.FindStringSubmatch(s.Text())
+ if m == nil {
+ return nil, errUnrecognized
+ }
+ profileType := m[1]
+ p := &Profile{
+ PeriodType: &ValueType{Type: profileType, Unit: "count"},
+ Period: 1,
+ SampleType: []*ValueType{{Type: profileType, Unit: "count"}},
+ }
+ locations := make(map[uint64]*Location)
+ for s.Scan() {
+ line := s.Text()
+ if isSpaceOrComment(line) {
+ continue
+ }
+ if strings.HasPrefix(line, "---") {
+ break
+ }
+ m := countRE.FindStringSubmatch(line)
+ if m == nil {
+ return nil, errMalformed
+ }
+ n, err := strconv.ParseInt(m[1], 0, 64)
+ if err != nil {
+ return nil, errMalformed
+ }
+ fields := strings.Fields(m[2])
+ locs := make([]*Location, 0, len(fields))
+ for _, stk := range fields {
+ addr, err := strconv.ParseUint(stk, 0, 64)
+ if err != nil {
+ return nil, errMalformed
+ }
+ // Adjust all frames by -1 to land on top of the call instruction.
+ addr--
+ loc := locations[addr]
+ if loc == nil {
+ loc = &Location{
+ Address: addr,
+ }
+ locations[addr] = loc
+ p.Location = append(p.Location, loc)
+ }
+ locs = append(locs, loc)
+ }
+ p.Sample = append(p.Sample, &Sample{
+ Location: locs,
+ Value: []int64{n},
+ })
+ }
+ if err := s.Err(); err != nil {
+ return nil, err
+ }
+
+ if err := parseAdditionalSections(s, p); err != nil {
+ return nil, err
+ }
+ return p, nil
+}
+
+// remapLocationIDs ensures there is a location for each address
+// referenced by a sample, and remaps the samples to point to the new
+// location ids.
+func (p *Profile) remapLocationIDs() {
+ seen := make(map[*Location]bool, len(p.Location))
+ var locs []*Location
+
+ for _, s := range p.Sample {
+ for _, l := range s.Location {
+ if seen[l] {
+ continue
+ }
+ l.ID = uint64(len(locs) + 1)
+ locs = append(locs, l)
+ seen[l] = true
+ }
+ }
+ p.Location = locs
+}
+
+func (p *Profile) remapFunctionIDs() {
+ seen := make(map[*Function]bool, len(p.Function))
+ var fns []*Function
+
+ for _, l := range p.Location {
+ for _, ln := range l.Line {
+ fn := ln.Function
+ if fn == nil || seen[fn] {
+ continue
+ }
+ fn.ID = uint64(len(fns) + 1)
+ fns = append(fns, fn)
+ seen[fn] = true
+ }
+ }
+ p.Function = fns
+}
+
+// remapMappingIDs matches location addresses with existing mappings
+// and updates them appropriately. This is O(N*M), if this ever shows
+// up as a bottleneck, evaluate sorting the mappings and doing a
+// binary search, which would make it O(N*log(M)).
+func (p *Profile) remapMappingIDs() {
+ // Some profile handlers will incorrectly set regions for the main
+ // executable if its section is remapped. Fix them through heuristics.
+
+ if len(p.Mapping) > 0 {
+ // Remove the initial mapping if named '/anon_hugepage' and has a
+ // consecutive adjacent mapping.
+ if m := p.Mapping[0]; strings.HasPrefix(m.File, "/anon_hugepage") {
+ if len(p.Mapping) > 1 && m.Limit == p.Mapping[1].Start {
+ p.Mapping = p.Mapping[1:]
+ }
+ }
+ }
+
+ // Subtract the offset from the start of the main mapping if it
+ // ends up at a recognizable start address.
+ if len(p.Mapping) > 0 {
+ const expectedStart = 0x400000
+ if m := p.Mapping[0]; m.Start-m.Offset == expectedStart {
+ m.Start = expectedStart
+ m.Offset = 0
+ }
+ }
+
+ // Associate each location with an address to the corresponding
+ // mapping. Create fake mapping if a suitable one isn't found.
+ var fake *Mapping
+nextLocation:
+ for _, l := range p.Location {
+ a := l.Address
+ if l.Mapping != nil || a == 0 {
+ continue
+ }
+ for _, m := range p.Mapping {
+ if m.Start <= a && a < m.Limit {
+ l.Mapping = m
+ continue nextLocation
+ }
+ }
+ // Work around legacy handlers failing to encode the first
+ // part of mappings split into adjacent ranges.
+ for _, m := range p.Mapping {
+ if m.Offset != 0 && m.Start-m.Offset <= a && a < m.Start {
+ m.Start -= m.Offset
+ m.Offset = 0
+ l.Mapping = m
+ continue nextLocation
+ }
+ }
+ // If there is still no mapping, create a fake one.
+ // This is important for the Go legacy handler, which produced
+ // no mappings.
+ if fake == nil {
+ fake = &Mapping{
+ ID: 1,
+ Limit: ^uint64(0),
+ }
+ p.Mapping = append(p.Mapping, fake)
+ }
+ l.Mapping = fake
+ }
+
+ // Reset all mapping IDs.
+ for i, m := range p.Mapping {
+ m.ID = uint64(i + 1)
+ }
+}
+
+var cpuInts = []func([]byte) (uint64, []byte){
+ get32l,
+ get32b,
+ get64l,
+ get64b,
+}
+
+func get32l(b []byte) (uint64, []byte) {
+ if len(b) < 4 {
+ return 0, nil
+ }
+ return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24, b[4:]
+}
+
+func get32b(b []byte) (uint64, []byte) {
+ if len(b) < 4 {
+ return 0, nil
+ }
+ return uint64(b[3]) | uint64(b[2])<<8 | uint64(b[1])<<16 | uint64(b[0])<<24, b[4:]
+}
+
+func get64l(b []byte) (uint64, []byte) {
+ if len(b) < 8 {
+ return 0, nil
+ }
+ return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56, b[8:]
+}
+
+func get64b(b []byte) (uint64, []byte) {
+ if len(b) < 8 {
+ return 0, nil
+ }
+ return uint64(b[7]) | uint64(b[6])<<8 | uint64(b[5])<<16 | uint64(b[4])<<24 | uint64(b[3])<<32 | uint64(b[2])<<40 | uint64(b[1])<<48 | uint64(b[0])<<56, b[8:]
+}
+
+// parseCPU parses a profilez legacy profile and returns a newly
+// populated Profile.
+//
+// The general format for profilez samples is a sequence of words in
+// binary format. The first words are a header with the following data:
+//
+// 1st word -- 0
+// 2nd word -- 3
+// 3rd word -- 0 if a c++ application, 1 if a java application.
+// 4th word -- Sampling period (in microseconds).
+// 5th word -- Padding.
+func parseCPU(b []byte) (*Profile, error) {
+ var parse func([]byte) (uint64, []byte)
+ var n1, n2, n3, n4, n5 uint64
+ for _, parse = range cpuInts {
+ var tmp []byte
+ n1, tmp = parse(b)
+ n2, tmp = parse(tmp)
+ n3, tmp = parse(tmp)
+ n4, tmp = parse(tmp)
+ n5, tmp = parse(tmp)
+
+ if tmp != nil && n1 == 0 && n2 == 3 && n3 == 0 && n4 > 0 && n5 == 0 {
+ b = tmp
+ return cpuProfile(b, int64(n4), parse)
+ }
+ if tmp != nil && n1 == 0 && n2 == 3 && n3 == 1 && n4 > 0 && n5 == 0 {
+ b = tmp
+ return javaCPUProfile(b, int64(n4), parse)
+ }
+ }
+ return nil, errUnrecognized
+}
+
+// cpuProfile returns a new Profile from C++ profilez data.
+// b is the profile bytes after the header, period is the profiling
+// period, and parse is a function to parse 8-byte chunks from the
+// profile in its native endianness.
+func cpuProfile(b []byte, period int64, parse func(b []byte) (uint64, []byte)) (*Profile, error) {
+ p := &Profile{
+ Period: period * 1000,
+ PeriodType: &ValueType{Type: "cpu", Unit: "nanoseconds"},
+ SampleType: []*ValueType{
+ {Type: "samples", Unit: "count"},
+ {Type: "cpu", Unit: "nanoseconds"},
+ },
+ }
+ var err error
+ if b, _, err = parseCPUSamples(b, parse, true, p); err != nil {
+ return nil, err
+ }
+
+ // If *most* samples have the same second-to-the-bottom frame, it
+ // strongly suggests that it is an uninteresting artifact of
+ // measurement -- a stack frame pushed by the signal handler. The
+ // bottom frame is always correct as it is picked up from the signal
+ // structure, not the stack. Check if this is the case and if so,
+ // remove.
+
+ // Remove up to two frames.
+ maxiter := 2
+ // Allow one different sample for this many samples with the same
+ // second-to-last frame.
+ similarSamples := 32
+ margin := len(p.Sample) / similarSamples
+
+ for iter := 0; iter < maxiter; iter++ {
+ addr1 := make(map[uint64]int)
+ for _, s := range p.Sample {
+ if len(s.Location) > 1 {
+ a := s.Location[1].Address
+ addr1[a] = addr1[a] + 1
+ }
+ }
+
+ for id1, count := range addr1 {
+ if count >= len(p.Sample)-margin {
+ // Found uninteresting frame, strip it out from all samples
+ for _, s := range p.Sample {
+ if len(s.Location) > 1 && s.Location[1].Address == id1 {
+ s.Location = append(s.Location[:1], s.Location[2:]...)
+ }
+ }
+ break
+ }
+ }
+ }
+
+ if err := p.ParseMemoryMap(bytes.NewBuffer(b)); err != nil {
+ return nil, err
+ }
+
+ cleanupDuplicateLocations(p)
+ return p, nil
+}
+
+func cleanupDuplicateLocations(p *Profile) {
+ // The profile handler may duplicate the leaf frame, because it gets
+ // its address both from stack unwinding and from the signal
+ // context. Detect this and delete the duplicate, which has been
+ // adjusted by -1. The leaf address should not be adjusted as it is
+ // not a call.
+ for _, s := range p.Sample {
+ if len(s.Location) > 1 && s.Location[0].Address == s.Location[1].Address+1 {
+ s.Location = append(s.Location[:1], s.Location[2:]...)
+ }
+ }
+}
+
+// parseCPUSamples parses a collection of profilez samples from a
+// profile.
+//
+// profilez samples are a repeated sequence of stack frames of the
+// form:
+//
+// 1st word -- The number of times this stack was encountered.
+// 2nd word -- The size of the stack (StackSize).
+// 3rd word -- The first address on the stack.
+// ...
+// StackSize + 2 -- The last address on the stack
+//
+// The last stack trace is of the form:
+//
+// 1st word -- 0
+// 2nd word -- 1
+// 3rd word -- 0
+//
+// Addresses from stack traces may point to the next instruction after
+// each call. Optionally adjust by -1 to land somewhere on the actual
+// call (except for the leaf, which is not a call).
+func parseCPUSamples(b []byte, parse func(b []byte) (uint64, []byte), adjust bool, p *Profile) ([]byte, map[uint64]*Location, error) {
+ locs := make(map[uint64]*Location)
+ for len(b) > 0 {
+ var count, nstk uint64
+ count, b = parse(b)
+ nstk, b = parse(b)
+ if b == nil || nstk > uint64(len(b)/4) {
+ return nil, nil, errUnrecognized
+ }
+ var sloc []*Location
+ addrs := make([]uint64, nstk)
+ for i := 0; i < int(nstk); i++ {
+ addrs[i], b = parse(b)
+ }
+
+ if count == 0 && nstk == 1 && addrs[0] == 0 {
+ // End of data marker
+ break
+ }
+ for i, addr := range addrs {
+ if adjust && i > 0 {
+ addr--
+ }
+ loc := locs[addr]
+ if loc == nil {
+ loc = &Location{
+ Address: addr,
+ }
+ locs[addr] = loc
+ p.Location = append(p.Location, loc)
+ }
+ sloc = append(sloc, loc)
+ }
+ p.Sample = append(p.Sample,
+ &Sample{
+ Value: []int64{int64(count), int64(count) * p.Period},
+ Location: sloc,
+ })
+ }
+ // Reached the end without finding the EOD marker.
+ return b, locs, nil
+}
+
+// parseHeap parses a heapz legacy or a growthz profile and
+// returns a newly populated Profile.
+func parseHeap(b []byte) (p *Profile, err error) {
+ s := bufio.NewScanner(bytes.NewBuffer(b))
+ if !s.Scan() {
+ if err := s.Err(); err != nil {
+ return nil, err
+ }
+ return nil, errUnrecognized
+ }
+ p = &Profile{}
+
+ sampling := ""
+ hasAlloc := false
+
+ line := s.Text()
+ p.PeriodType = &ValueType{Type: "space", Unit: "bytes"}
+ if header := heapHeaderRE.FindStringSubmatch(line); header != nil {
+ sampling, p.Period, hasAlloc, err = parseHeapHeader(line)
+ if err != nil {
+ return nil, err
+ }
+ } else if header = growthHeaderRE.FindStringSubmatch(line); header != nil {
+ p.Period = 1
+ } else if header = fragmentationHeaderRE.FindStringSubmatch(line); header != nil {
+ p.Period = 1
+ } else {
+ return nil, errUnrecognized
+ }
+
+ if hasAlloc {
+ // Put alloc before inuse so that default pprof selection
+ // will prefer inuse_space.
+ p.SampleType = []*ValueType{
+ {Type: "alloc_objects", Unit: "count"},
+ {Type: "alloc_space", Unit: "bytes"},
+ {Type: "inuse_objects", Unit: "count"},
+ {Type: "inuse_space", Unit: "bytes"},
+ }
+ } else {
+ p.SampleType = []*ValueType{
+ {Type: "objects", Unit: "count"},
+ {Type: "space", Unit: "bytes"},
+ }
+ }
+
+ locs := make(map[uint64]*Location)
+ for s.Scan() {
+ line := strings.TrimSpace(s.Text())
+
+ if isSpaceOrComment(line) {
+ continue
+ }
+
+ if isMemoryMapSentinel(line) {
+ break
+ }
+
+ value, blocksize, addrs, err := parseHeapSample(line, p.Period, sampling, hasAlloc)
+ if err != nil {
+ return nil, err
+ }
+
+ var sloc []*Location
+ for _, addr := range addrs {
+ // Addresses from stack traces point to the next instruction after
+ // each call. Adjust by -1 to land somewhere on the actual call.
+ addr--
+ loc := locs[addr]
+ if locs[addr] == nil {
+ loc = &Location{
+ Address: addr,
+ }
+ p.Location = append(p.Location, loc)
+ locs[addr] = loc
+ }
+ sloc = append(sloc, loc)
+ }
+
+ p.Sample = append(p.Sample, &Sample{
+ Value: value,
+ Location: sloc,
+ NumLabel: map[string][]int64{"bytes": {blocksize}},
+ })
+ }
+ if err := s.Err(); err != nil {
+ return nil, err
+ }
+ if err := parseAdditionalSections(s, p); err != nil {
+ return nil, err
+ }
+ return p, nil
+}
+
+func parseHeapHeader(line string) (sampling string, period int64, hasAlloc bool, err error) {
+ header := heapHeaderRE.FindStringSubmatch(line)
+ if header == nil {
+ return "", 0, false, errUnrecognized
+ }
+
+ if len(header[6]) > 0 {
+ if period, err = strconv.ParseInt(header[6], 10, 64); err != nil {
+ return "", 0, false, errUnrecognized
+ }
+ }
+
+ if (header[3] != header[1] && header[3] != "0") || (header[4] != header[2] && header[4] != "0") {
+ hasAlloc = true
+ }
+
+ switch header[5] {
+ case "heapz_v2", "heap_v2":
+ return "v2", period, hasAlloc, nil
+ case "heapprofile":
+ return "", 1, hasAlloc, nil
+ case "heap":
+ return "v2", period / 2, hasAlloc, nil
+ default:
+ return "", 0, false, errUnrecognized
+ }
+}
+
+// parseHeapSample parses a single row from a heap profile into a new Sample.
+func parseHeapSample(line string, rate int64, sampling string, includeAlloc bool) (value []int64, blocksize int64, addrs []uint64, err error) {
+ sampleData := heapSampleRE.FindStringSubmatch(line)
+ if len(sampleData) != 6 {
+ return nil, 0, nil, fmt.Errorf("unexpected number of sample values: got %d, want 6", len(sampleData))
+ }
+
+ // This is a local-scoped helper function to avoid needing to pass
+ // around rate, sampling and many return parameters.
+ addValues := func(countString, sizeString string, label string) error {
+ count, err := strconv.ParseInt(countString, 10, 64)
+ if err != nil {
+ return fmt.Errorf("malformed sample: %s: %v", line, err)
+ }
+ size, err := strconv.ParseInt(sizeString, 10, 64)
+ if err != nil {
+ return fmt.Errorf("malformed sample: %s: %v", line, err)
+ }
+ if count == 0 && size != 0 {
+ return fmt.Errorf("%s count was 0 but %s bytes was %d", label, label, size)
+ }
+ if count != 0 {
+ blocksize = size / count
+ if sampling == "v2" {
+ count, size = scaleHeapSample(count, size, rate)
+ }
+ }
+ value = append(value, count, size)
+ return nil
+ }
+
+ if includeAlloc {
+ if err := addValues(sampleData[3], sampleData[4], "allocation"); err != nil {
+ return nil, 0, nil, err
+ }
+ }
+
+ if err := addValues(sampleData[1], sampleData[2], "inuse"); err != nil {
+ return nil, 0, nil, err
+ }
+
+ addrs, err = parseHexAddresses(sampleData[5])
+ if err != nil {
+ return nil, 0, nil, fmt.Errorf("malformed sample: %s: %v", line, err)
+ }
+
+ return value, blocksize, addrs, nil
+}
+
+// parseHexAddresses extracts hex numbers from a string, attempts to convert
+// each to an unsigned 64-bit number and returns the resulting numbers as a
+// slice, or an error if the string contains hex numbers which are too large to
+// handle (which means a malformed profile).
+func parseHexAddresses(s string) ([]uint64, error) {
+ hexStrings := hexNumberRE.FindAllString(s, -1)
+ var addrs []uint64
+ for _, s := range hexStrings {
+ if addr, err := strconv.ParseUint(s, 0, 64); err == nil {
+ addrs = append(addrs, addr)
+ } else {
+ return nil, fmt.Errorf("failed to parse as hex 64-bit number: %s", s)
+ }
+ }
+ return addrs, nil
+}
+
+// scaleHeapSample adjusts the data from a heapz Sample to
+// account for its probability of appearing in the collected
+// data. heapz profiles are a sampling of the memory allocations
+// requests in a program. We estimate the unsampled value by dividing
+// each collected sample by its probability of appearing in the
+// profile. heapz v2 profiles rely on a poisson process to determine
+// which samples to collect, based on the desired average collection
+// rate R. The probability of a sample of size S to appear in that
+// profile is 1-exp(-S/R).
+func scaleHeapSample(count, size, rate int64) (int64, int64) {
+ if count == 0 || size == 0 {
+ return 0, 0
+ }
+
+ if rate <= 1 {
+ // if rate==1 all samples were collected so no adjustment is needed.
+ // if rate<1 treat as unknown and skip scaling.
+ return count, size
+ }
+
+ avgSize := float64(size) / float64(count)
+ scale := 1 / (1 - math.Exp(-avgSize/float64(rate)))
+
+ return int64(float64(count) * scale), int64(float64(size) * scale)
+}
+
+// parseContention parses a mutex or contention profile. There are 2 cases:
+// "--- contentionz " for legacy C++ profiles (and backwards compatibility)
+// "--- mutex:" or "--- contention:" for profiles generated by the Go runtime.
+func parseContention(b []byte) (*Profile, error) {
+ s := bufio.NewScanner(bytes.NewBuffer(b))
+ if !s.Scan() {
+ if err := s.Err(); err != nil {
+ return nil, err
+ }
+ return nil, errUnrecognized
+ }
+
+ switch l := s.Text(); {
+ case strings.HasPrefix(l, "--- contentionz "):
+ case strings.HasPrefix(l, "--- mutex:"):
+ case strings.HasPrefix(l, "--- contention:"):
+ default:
+ return nil, errUnrecognized
+ }
+
+ p := &Profile{
+ PeriodType: &ValueType{Type: "contentions", Unit: "count"},
+ Period: 1,
+ SampleType: []*ValueType{
+ {Type: "contentions", Unit: "count"},
+ {Type: "delay", Unit: "nanoseconds"},
+ },
+ }
+
+ var cpuHz int64
+ // Parse text of the form "attribute = value" before the samples.
+ const delimiter = "="
+ for s.Scan() {
+ line := s.Text()
+ if line = strings.TrimSpace(line); isSpaceOrComment(line) {
+ continue
+ }
+ if strings.HasPrefix(line, "---") {
+ break
+ }
+ attr := strings.SplitN(line, delimiter, 2)
+ if len(attr) != 2 {
+ break
+ }
+ key, val := strings.TrimSpace(attr[0]), strings.TrimSpace(attr[1])
+ var err error
+ switch key {
+ case "cycles/second":
+ if cpuHz, err = strconv.ParseInt(val, 0, 64); err != nil {
+ return nil, errUnrecognized
+ }
+ case "sampling period":
+ if p.Period, err = strconv.ParseInt(val, 0, 64); err != nil {
+ return nil, errUnrecognized
+ }
+ case "ms since reset":
+ ms, err := strconv.ParseInt(val, 0, 64)
+ if err != nil {
+ return nil, errUnrecognized
+ }
+ p.DurationNanos = ms * 1000 * 1000
+ case "format":
+ // CPP contentionz profiles don't have format.
+ return nil, errUnrecognized
+ case "resolution":
+ // CPP contentionz profiles don't have resolution.
+ return nil, errUnrecognized
+ case "discarded samples":
+ default:
+ return nil, errUnrecognized
+ }
+ }
+ if err := s.Err(); err != nil {
+ return nil, err
+ }
+
+ locs := make(map[uint64]*Location)
+ for {
+ line := strings.TrimSpace(s.Text())
+ if strings.HasPrefix(line, "---") {
+ break
+ }
+ if !isSpaceOrComment(line) {
+ value, addrs, err := parseContentionSample(line, p.Period, cpuHz)
+ if err != nil {
+ return nil, err
+ }
+ var sloc []*Location
+ for _, addr := range addrs {
+ // Addresses from stack traces point to the next instruction after
+ // each call. Adjust by -1 to land somewhere on the actual call.
+ addr--
+ loc := locs[addr]
+ if locs[addr] == nil {
+ loc = &Location{
+ Address: addr,
+ }
+ p.Location = append(p.Location, loc)
+ locs[addr] = loc
+ }
+ sloc = append(sloc, loc)
+ }
+ p.Sample = append(p.Sample, &Sample{
+ Value: value,
+ Location: sloc,
+ })
+ }
+ if !s.Scan() {
+ break
+ }
+ }
+ if err := s.Err(); err != nil {
+ return nil, err
+ }
+
+ if err := parseAdditionalSections(s, p); err != nil {
+ return nil, err
+ }
+
+ return p, nil
+}
+
+// parseContentionSample parses a single row from a contention profile
+// into a new Sample.
+func parseContentionSample(line string, period, cpuHz int64) (value []int64, addrs []uint64, err error) {
+ sampleData := contentionSampleRE.FindStringSubmatch(line)
+ if sampleData == nil {
+ return nil, nil, errUnrecognized
+ }
+
+ v1, err := strconv.ParseInt(sampleData[1], 10, 64)
+ if err != nil {
+ return nil, nil, fmt.Errorf("malformed sample: %s: %v", line, err)
+ }
+ v2, err := strconv.ParseInt(sampleData[2], 10, 64)
+ if err != nil {
+ return nil, nil, fmt.Errorf("malformed sample: %s: %v", line, err)
+ }
+
+ // Unsample values if period and cpuHz are available.
+ // - Delays are scaled to cycles and then to nanoseconds.
+ // - Contentions are scaled to cycles.
+ if period > 0 {
+ if cpuHz > 0 {
+ cpuGHz := float64(cpuHz) / 1e9
+ v1 = int64(float64(v1) * float64(period) / cpuGHz)
+ }
+ v2 = v2 * period
+ }
+
+ value = []int64{v2, v1}
+ addrs, err = parseHexAddresses(sampleData[3])
+ if err != nil {
+ return nil, nil, fmt.Errorf("malformed sample: %s: %v", line, err)
+ }
+
+ return value, addrs, nil
+}
+
+// parseThread parses a Threadz profile and returns a new Profile.
+func parseThread(b []byte) (*Profile, error) {
+ s := bufio.NewScanner(bytes.NewBuffer(b))
+ // Skip past comments and empty lines seeking a real header.
+ for s.Scan() && isSpaceOrComment(s.Text()) {
+ }
+
+ line := s.Text()
+ if m := threadzStartRE.FindStringSubmatch(line); m != nil {
+ // Advance over initial comments until first stack trace.
+ for s.Scan() {
+ if line = s.Text(); isMemoryMapSentinel(line) || strings.HasPrefix(line, "-") {
+ break
+ }
+ }
+ } else if t := threadStartRE.FindStringSubmatch(line); len(t) != 4 {
+ return nil, errUnrecognized
+ }
+
+ p := &Profile{
+ SampleType: []*ValueType{{Type: "thread", Unit: "count"}},
+ PeriodType: &ValueType{Type: "thread", Unit: "count"},
+ Period: 1,
+ }
+
+ locs := make(map[uint64]*Location)
+ // Recognize each thread and populate profile samples.
+ for !isMemoryMapSentinel(line) {
+ if strings.HasPrefix(line, "---- no stack trace for") {
+ break
+ }
+ if t := threadStartRE.FindStringSubmatch(line); len(t) != 4 {
+ return nil, errUnrecognized
+ }
+
+ var addrs []uint64
+ var err error
+ line, addrs, err = parseThreadSample(s)
+ if err != nil {
+ return nil, err
+ }
+ if len(addrs) == 0 {
+ // We got a --same as previous threads--. Bump counters.
+ if len(p.Sample) > 0 {
+ s := p.Sample[len(p.Sample)-1]
+ s.Value[0]++
+ }
+ continue
+ }
+
+ var sloc []*Location
+ for i, addr := range addrs {
+ // Addresses from stack traces point to the next instruction after
+ // each call. Adjust by -1 to land somewhere on the actual call
+ // (except for the leaf, which is not a call).
+ if i > 0 {
+ addr--
+ }
+ loc := locs[addr]
+ if locs[addr] == nil {
+ loc = &Location{
+ Address: addr,
+ }
+ p.Location = append(p.Location, loc)
+ locs[addr] = loc
+ }
+ sloc = append(sloc, loc)
+ }
+
+ p.Sample = append(p.Sample, &Sample{
+ Value: []int64{1},
+ Location: sloc,
+ })
+ }
+
+ if err := parseAdditionalSections(s, p); err != nil {
+ return nil, err
+ }
+
+ cleanupDuplicateLocations(p)
+ return p, nil
+}
+
+// parseThreadSample parses a symbolized or unsymbolized stack trace.
+// Returns the first line after the traceback, the sample (or nil if
+// it hits a 'same-as-previous' marker) and an error.
+func parseThreadSample(s *bufio.Scanner) (nextl string, addrs []uint64, err error) {
+ var line string
+ sameAsPrevious := false
+ for s.Scan() {
+ line = strings.TrimSpace(s.Text())
+ if line == "" {
+ continue
+ }
+
+ if strings.HasPrefix(line, "---") {
+ break
+ }
+ if strings.Contains(line, "same as previous thread") {
+ sameAsPrevious = true
+ continue
+ }
+
+ curAddrs, err := parseHexAddresses(line)
+ if err != nil {
+ return "", nil, fmt.Errorf("malformed sample: %s: %v", line, err)
+ }
+ addrs = append(addrs, curAddrs...)
+ }
+ if err := s.Err(); err != nil {
+ return "", nil, err
+ }
+ if sameAsPrevious {
+ return line, nil, nil
+ }
+ return line, addrs, nil
+}
+
+// parseAdditionalSections parses any additional sections in the
+// profile, ignoring any unrecognized sections.
+func parseAdditionalSections(s *bufio.Scanner, p *Profile) error {
+ for !isMemoryMapSentinel(s.Text()) && s.Scan() {
+ }
+ if err := s.Err(); err != nil {
+ return err
+ }
+ return p.ParseMemoryMapFromScanner(s)
+}
+
+// ParseProcMaps parses a memory map in the format of /proc/self/maps.
+// ParseMemoryMap should be called after setting on a profile to
+// associate locations to the corresponding mapping based on their
+// address.
+func ParseProcMaps(rd io.Reader) ([]*Mapping, error) {
+ s := bufio.NewScanner(rd)
+ return parseProcMapsFromScanner(s)
+}
+
+func parseProcMapsFromScanner(s *bufio.Scanner) ([]*Mapping, error) {
+ var mapping []*Mapping
+
+ var attrs []string
+ const delimiter = "="
+ r := strings.NewReplacer()
+ for s.Scan() {
+ line := r.Replace(removeLoggingInfo(s.Text()))
+ m, err := parseMappingEntry(line)
+ if err != nil {
+ if err == errUnrecognized {
+ // Recognize assignments of the form: attr=value, and replace
+ // $attr with value on subsequent mappings.
+ if attr := strings.SplitN(line, delimiter, 2); len(attr) == 2 {
+ attrs = append(attrs, "$"+strings.TrimSpace(attr[0]), strings.TrimSpace(attr[1]))
+ r = strings.NewReplacer(attrs...)
+ }
+ // Ignore any unrecognized entries
+ continue
+ }
+ return nil, err
+ }
+ if m == nil {
+ continue
+ }
+ mapping = append(mapping, m)
+ }
+ if err := s.Err(); err != nil {
+ return nil, err
+ }
+ return mapping, nil
+}
+
+// removeLoggingInfo detects and removes log prefix entries generated
+// by the glog package. If no logging prefix is detected, the string
+// is returned unmodified.
+func removeLoggingInfo(line string) string {
+ if match := logInfoRE.FindStringIndex(line); match != nil {
+ return line[match[1]:]
+ }
+ return line
+}
+
+// ParseMemoryMap parses a memory map in the format of
+// /proc/self/maps, and overrides the mappings in the current profile.
+// It renumbers the samples and locations in the profile correspondingly.
+func (p *Profile) ParseMemoryMap(rd io.Reader) error {
+ return p.ParseMemoryMapFromScanner(bufio.NewScanner(rd))
+}
+
+// ParseMemoryMapFromScanner parses a memory map in the format of
+// /proc/self/maps or a variety of legacy format, and overrides the
+// mappings in the current profile. It renumbers the samples and
+// locations in the profile correspondingly.
+func (p *Profile) ParseMemoryMapFromScanner(s *bufio.Scanner) error {
+ mapping, err := parseProcMapsFromScanner(s)
+ if err != nil {
+ return err
+ }
+ p.Mapping = append(p.Mapping, mapping...)
+ p.massageMappings()
+ p.remapLocationIDs()
+ p.remapFunctionIDs()
+ p.remapMappingIDs()
+ return nil
+}
+
+func parseMappingEntry(l string) (*Mapping, error) {
+ var start, end, perm, file, offset, buildID string
+ if me := procMapsRE.FindStringSubmatch(l); len(me) == 6 {
+ start, end, perm, offset, file = me[1], me[2], me[3], me[4], me[5]
+ } else if me := briefMapsRE.FindStringSubmatch(l); len(me) == 7 {
+ start, end, perm, file, offset, buildID = me[1], me[2], me[3], me[4], me[5], me[6]
+ } else {
+ return nil, errUnrecognized
+ }
+
+ var err error
+ mapping := &Mapping{
+ File: file,
+ BuildID: buildID,
+ }
+ if perm != "" && !strings.Contains(perm, "x") {
+ // Skip non-executable entries.
+ return nil, nil
+ }
+ if mapping.Start, err = strconv.ParseUint(start, 16, 64); err != nil {
+ return nil, errUnrecognized
+ }
+ if mapping.Limit, err = strconv.ParseUint(end, 16, 64); err != nil {
+ return nil, errUnrecognized
+ }
+ if offset != "" {
+ if mapping.Offset, err = strconv.ParseUint(offset, 16, 64); err != nil {
+ return nil, errUnrecognized
+ }
+ }
+ return mapping, nil
+}
+
+var memoryMapSentinels = []string{
+ "--- Memory map: ---",
+ "MAPPED_LIBRARIES:",
+}
+
+// isMemoryMapSentinel returns true if the string contains one of the
+// known sentinels for memory map information.
+func isMemoryMapSentinel(line string) bool {
+ for _, s := range memoryMapSentinels {
+ if strings.Contains(line, s) {
+ return true
+ }
+ }
+ return false
+}
+
+func (p *Profile) addLegacyFrameInfo() {
+ switch {
+ case isProfileType(p, heapzSampleTypes):
+ p.DropFrames, p.KeepFrames = allocRxStr, allocSkipRxStr
+ case isProfileType(p, contentionzSampleTypes):
+ p.DropFrames, p.KeepFrames = lockRxStr, ""
+ default:
+ p.DropFrames, p.KeepFrames = cpuProfilerRxStr, ""
+ }
+}
+
+var heapzSampleTypes = [][]string{
+ {"allocations", "size"}, // early Go pprof profiles
+ {"objects", "space"},
+ {"inuse_objects", "inuse_space"},
+ {"alloc_objects", "alloc_space"},
+ {"alloc_objects", "alloc_space", "inuse_objects", "inuse_space"}, // Go pprof legacy profiles
+}
+var contentionzSampleTypes = [][]string{
+ {"contentions", "delay"},
+}
+
+func isProfileType(p *Profile, types [][]string) bool {
+ st := p.SampleType
+nextType:
+ for _, t := range types {
+ if len(st) != len(t) {
+ continue
+ }
+
+ for i := range st {
+ if st[i].Type != t[i] {
+ continue nextType
+ }
+ }
+ return true
+ }
+ return false
+}
+
+var allocRxStr = strings.Join([]string{
+ // POSIX entry points.
+ `calloc`,
+ `cfree`,
+ `malloc`,
+ `free`,
+ `memalign`,
+ `do_memalign`,
+ `(__)?posix_memalign`,
+ `pvalloc`,
+ `valloc`,
+ `realloc`,
+
+ // TC malloc.
+ `tcmalloc::.*`,
+ `tc_calloc`,
+ `tc_cfree`,
+ `tc_malloc`,
+ `tc_free`,
+ `tc_memalign`,
+ `tc_posix_memalign`,
+ `tc_pvalloc`,
+ `tc_valloc`,
+ `tc_realloc`,
+ `tc_new`,
+ `tc_delete`,
+ `tc_newarray`,
+ `tc_deletearray`,
+ `tc_new_nothrow`,
+ `tc_newarray_nothrow`,
+
+ // Memory-allocation routines on OS X.
+ `malloc_zone_malloc`,
+ `malloc_zone_calloc`,
+ `malloc_zone_valloc`,
+ `malloc_zone_realloc`,
+ `malloc_zone_memalign`,
+ `malloc_zone_free`,
+
+ // Go runtime
+ `runtime\..*`,
+
+ // Other misc. memory allocation routines
+ `BaseArena::.*`,
+ `(::)?do_malloc_no_errno`,
+ `(::)?do_malloc_pages`,
+ `(::)?do_malloc`,
+ `DoSampledAllocation`,
+ `MallocedMemBlock::MallocedMemBlock`,
+ `_M_allocate`,
+ `__builtin_(vec_)?delete`,
+ `__builtin_(vec_)?new`,
+ `__gnu_cxx::new_allocator::allocate`,
+ `__libc_malloc`,
+ `__malloc_alloc_template::allocate`,
+ `allocate`,
+ `cpp_alloc`,
+ `operator new(\[\])?`,
+ `simple_alloc::allocate`,
+}, `|`)
+
+var allocSkipRxStr = strings.Join([]string{
+ // Preserve Go runtime frames that appear in the middle/bottom of
+ // the stack.
+ `runtime\.panic`,
+ `runtime\.reflectcall`,
+ `runtime\.call[0-9]*`,
+}, `|`)
+
+var cpuProfilerRxStr = strings.Join([]string{
+ `ProfileData::Add`,
+ `ProfileData::prof_handler`,
+ `CpuProfiler::prof_handler`,
+ `__pthread_sighandler`,
+ `__restore`,
+}, `|`)
+
+var lockRxStr = strings.Join([]string{
+ `RecordLockProfileData`,
+ `(base::)?RecordLockProfileData.*`,
+ `(base::)?SubmitMutexProfileData.*`,
+ `(base::)?SubmitSpinLockProfileData.*`,
+ `(base::Mutex::)?AwaitCommon.*`,
+ `(base::Mutex::)?Unlock.*`,
+ `(base::Mutex::)?UnlockSlow.*`,
+ `(base::Mutex::)?ReaderUnlock.*`,
+ `(base::MutexLock::)?~MutexLock.*`,
+ `(Mutex::)?AwaitCommon.*`,
+ `(Mutex::)?Unlock.*`,
+ `(Mutex::)?UnlockSlow.*`,
+ `(Mutex::)?ReaderUnlock.*`,
+ `(MutexLock::)?~MutexLock.*`,
+ `(SpinLock::)?Unlock.*`,
+ `(SpinLock::)?SlowUnlock.*`,
+ `(SpinLockHolder::)?~SpinLockHolder.*`,
+}, `|`)
diff --git a/integration/assets/hydrabroker/vendor/github.com/google/pprof/profile/merge.go b/integration/assets/hydrabroker/vendor/github.com/google/pprof/profile/merge.go
new file mode 100644
index 00000000000..eee0132e740
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/google/pprof/profile/merge.go
@@ -0,0 +1,669 @@
+// Copyright 2014 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package profile
+
+import (
+ "encoding/binary"
+ "fmt"
+ "sort"
+ "strconv"
+ "strings"
+)
+
+// Compact performs garbage collection on a profile to remove any
+// unreferenced fields. This is useful to reduce the size of a profile
+// after samples or locations have been removed.
+func (p *Profile) Compact() *Profile {
+ p, _ = Merge([]*Profile{p})
+ return p
+}
+
+// Merge merges all the profiles in profs into a single Profile.
+// Returns a new profile independent of the input profiles. The merged
+// profile is compacted to eliminate unused samples, locations,
+// functions and mappings. Profiles must have identical profile sample
+// and period types or the merge will fail. profile.Period of the
+// resulting profile will be the maximum of all profiles, and
+// profile.TimeNanos will be the earliest nonzero one. Merges are
+// associative with the caveat of the first profile having some
+// specialization in how headers are combined. There may be other
+// subtleties now or in the future regarding associativity.
+func Merge(srcs []*Profile) (*Profile, error) {
+ if len(srcs) == 0 {
+ return nil, fmt.Errorf("no profiles to merge")
+ }
+ p, err := combineHeaders(srcs)
+ if err != nil {
+ return nil, err
+ }
+
+ pm := &profileMerger{
+ p: p,
+ samples: make(map[sampleKey]*Sample, len(srcs[0].Sample)),
+ locations: make(map[locationKey]*Location, len(srcs[0].Location)),
+ functions: make(map[functionKey]*Function, len(srcs[0].Function)),
+ mappings: make(map[mappingKey]*Mapping, len(srcs[0].Mapping)),
+ }
+
+ for _, src := range srcs {
+ // Clear the profile-specific hash tables
+ pm.locationsByID = makeLocationIDMap(len(src.Location))
+ pm.functionsByID = make(map[uint64]*Function, len(src.Function))
+ pm.mappingsByID = make(map[uint64]mapInfo, len(src.Mapping))
+
+ if len(pm.mappings) == 0 && len(src.Mapping) > 0 {
+ // The Mapping list has the property that the first mapping
+ // represents the main binary. Take the first Mapping we see,
+ // otherwise the operations below will add mappings in an
+ // arbitrary order.
+ pm.mapMapping(src.Mapping[0])
+ }
+
+ for _, s := range src.Sample {
+ if !isZeroSample(s) {
+ pm.mapSample(s)
+ }
+ }
+ }
+
+ for _, s := range p.Sample {
+ if isZeroSample(s) {
+ // If there are any zero samples, re-merge the profile to GC
+ // them.
+ return Merge([]*Profile{p})
+ }
+ }
+
+ return p, nil
+}
+
+// Normalize normalizes the source profile by multiplying each value in profile by the
+// ratio of the sum of the base profile's values of that sample type to the sum of the
+// source profile's value of that sample type.
+func (p *Profile) Normalize(pb *Profile) error {
+
+ if err := p.compatible(pb); err != nil {
+ return err
+ }
+
+ baseVals := make([]int64, len(p.SampleType))
+ for _, s := range pb.Sample {
+ for i, v := range s.Value {
+ baseVals[i] += v
+ }
+ }
+
+ srcVals := make([]int64, len(p.SampleType))
+ for _, s := range p.Sample {
+ for i, v := range s.Value {
+ srcVals[i] += v
+ }
+ }
+
+ normScale := make([]float64, len(baseVals))
+ for i := range baseVals {
+ if srcVals[i] == 0 {
+ normScale[i] = 0.0
+ } else {
+ normScale[i] = float64(baseVals[i]) / float64(srcVals[i])
+ }
+ }
+ p.ScaleN(normScale)
+ return nil
+}
+
+func isZeroSample(s *Sample) bool {
+ for _, v := range s.Value {
+ if v != 0 {
+ return false
+ }
+ }
+ return true
+}
+
+type profileMerger struct {
+ p *Profile
+
+ // Memoization tables within a profile.
+ locationsByID locationIDMap
+ functionsByID map[uint64]*Function
+ mappingsByID map[uint64]mapInfo
+
+ // Memoization tables for profile entities.
+ samples map[sampleKey]*Sample
+ locations map[locationKey]*Location
+ functions map[functionKey]*Function
+ mappings map[mappingKey]*Mapping
+}
+
+type mapInfo struct {
+ m *Mapping
+ offset int64
+}
+
+func (pm *profileMerger) mapSample(src *Sample) *Sample {
+ // Check memoization table
+ k := pm.sampleKey(src)
+ if ss, ok := pm.samples[k]; ok {
+ for i, v := range src.Value {
+ ss.Value[i] += v
+ }
+ return ss
+ }
+
+ // Make new sample.
+ s := &Sample{
+ Location: make([]*Location, len(src.Location)),
+ Value: make([]int64, len(src.Value)),
+ Label: make(map[string][]string, len(src.Label)),
+ NumLabel: make(map[string][]int64, len(src.NumLabel)),
+ NumUnit: make(map[string][]string, len(src.NumLabel)),
+ }
+ for i, l := range src.Location {
+ s.Location[i] = pm.mapLocation(l)
+ }
+ for k, v := range src.Label {
+ vv := make([]string, len(v))
+ copy(vv, v)
+ s.Label[k] = vv
+ }
+ for k, v := range src.NumLabel {
+ u := src.NumUnit[k]
+ vv := make([]int64, len(v))
+ uu := make([]string, len(u))
+ copy(vv, v)
+ copy(uu, u)
+ s.NumLabel[k] = vv
+ s.NumUnit[k] = uu
+ }
+ copy(s.Value, src.Value)
+ pm.samples[k] = s
+ pm.p.Sample = append(pm.p.Sample, s)
+ return s
+}
+
+func (pm *profileMerger) sampleKey(sample *Sample) sampleKey {
+ // Accumulate contents into a string.
+ var buf strings.Builder
+ buf.Grow(64) // Heuristic to avoid extra allocs
+
+ // encode a number
+ putNumber := func(v uint64) {
+ var num [binary.MaxVarintLen64]byte
+ n := binary.PutUvarint(num[:], v)
+ buf.Write(num[:n])
+ }
+
+ // encode a string prefixed with its length.
+ putDelimitedString := func(s string) {
+ putNumber(uint64(len(s)))
+ buf.WriteString(s)
+ }
+
+ for _, l := range sample.Location {
+ // Get the location in the merged profile, which may have a different ID.
+ if loc := pm.mapLocation(l); loc != nil {
+ putNumber(loc.ID)
+ }
+ }
+ putNumber(0) // Delimiter
+
+ for _, l := range sortedKeys1(sample.Label) {
+ putDelimitedString(l)
+ values := sample.Label[l]
+ putNumber(uint64(len(values)))
+ for _, v := range values {
+ putDelimitedString(v)
+ }
+ }
+
+ for _, l := range sortedKeys2(sample.NumLabel) {
+ putDelimitedString(l)
+ values := sample.NumLabel[l]
+ putNumber(uint64(len(values)))
+ for _, v := range values {
+ putNumber(uint64(v))
+ }
+ units := sample.NumUnit[l]
+ putNumber(uint64(len(units)))
+ for _, v := range units {
+ putDelimitedString(v)
+ }
+ }
+
+ return sampleKey(buf.String())
+}
+
+type sampleKey string
+
+// sortedKeys1 returns the sorted keys found in a string->[]string map.
+//
+// Note: this is currently non-generic since github pprof runs golint,
+// which does not support generics. When that issue is fixed, it can
+// be merged with sortedKeys2 and made into a generic function.
+func sortedKeys1(m map[string][]string) []string {
+ if len(m) == 0 {
+ return nil
+ }
+ keys := make([]string, 0, len(m))
+ for k := range m {
+ keys = append(keys, k)
+ }
+ sort.Strings(keys)
+ return keys
+}
+
+// sortedKeys2 returns the sorted keys found in a string->[]int64 map.
+//
+// Note: this is currently non-generic since github pprof runs golint,
+// which does not support generics. When that issue is fixed, it can
+// be merged with sortedKeys1 and made into a generic function.
+func sortedKeys2(m map[string][]int64) []string {
+ if len(m) == 0 {
+ return nil
+ }
+ keys := make([]string, 0, len(m))
+ for k := range m {
+ keys = append(keys, k)
+ }
+ sort.Strings(keys)
+ return keys
+}
+
+func (pm *profileMerger) mapLocation(src *Location) *Location {
+ if src == nil {
+ return nil
+ }
+
+ if l := pm.locationsByID.get(src.ID); l != nil {
+ return l
+ }
+
+ mi := pm.mapMapping(src.Mapping)
+ l := &Location{
+ ID: uint64(len(pm.p.Location) + 1),
+ Mapping: mi.m,
+ Address: uint64(int64(src.Address) + mi.offset),
+ Line: make([]Line, len(src.Line)),
+ IsFolded: src.IsFolded,
+ }
+ for i, ln := range src.Line {
+ l.Line[i] = pm.mapLine(ln)
+ }
+ // Check memoization table. Must be done on the remapped location to
+ // account for the remapped mapping ID.
+ k := l.key()
+ if ll, ok := pm.locations[k]; ok {
+ pm.locationsByID.set(src.ID, ll)
+ return ll
+ }
+ pm.locationsByID.set(src.ID, l)
+ pm.locations[k] = l
+ pm.p.Location = append(pm.p.Location, l)
+ return l
+}
+
+// key generates locationKey to be used as a key for maps.
+func (l *Location) key() locationKey {
+ key := locationKey{
+ addr: l.Address,
+ isFolded: l.IsFolded,
+ }
+ if l.Mapping != nil {
+ // Normalizes address to handle address space randomization.
+ key.addr -= l.Mapping.Start
+ key.mappingID = l.Mapping.ID
+ }
+ lines := make([]string, len(l.Line)*3)
+ for i, line := range l.Line {
+ if line.Function != nil {
+ lines[i*2] = strconv.FormatUint(line.Function.ID, 16)
+ }
+ lines[i*2+1] = strconv.FormatInt(line.Line, 16)
+ lines[i*2+2] = strconv.FormatInt(line.Column, 16)
+ }
+ key.lines = strings.Join(lines, "|")
+ return key
+}
+
+type locationKey struct {
+ addr, mappingID uint64
+ lines string
+ isFolded bool
+}
+
+func (pm *profileMerger) mapMapping(src *Mapping) mapInfo {
+ if src == nil {
+ return mapInfo{}
+ }
+
+ if mi, ok := pm.mappingsByID[src.ID]; ok {
+ return mi
+ }
+
+ // Check memoization tables.
+ mk := src.key()
+ if m, ok := pm.mappings[mk]; ok {
+ mi := mapInfo{m, int64(m.Start) - int64(src.Start)}
+ pm.mappingsByID[src.ID] = mi
+ return mi
+ }
+ m := &Mapping{
+ ID: uint64(len(pm.p.Mapping) + 1),
+ Start: src.Start,
+ Limit: src.Limit,
+ Offset: src.Offset,
+ File: src.File,
+ KernelRelocationSymbol: src.KernelRelocationSymbol,
+ BuildID: src.BuildID,
+ HasFunctions: src.HasFunctions,
+ HasFilenames: src.HasFilenames,
+ HasLineNumbers: src.HasLineNumbers,
+ HasInlineFrames: src.HasInlineFrames,
+ }
+ pm.p.Mapping = append(pm.p.Mapping, m)
+
+ // Update memoization tables.
+ pm.mappings[mk] = m
+ mi := mapInfo{m, 0}
+ pm.mappingsByID[src.ID] = mi
+ return mi
+}
+
+// key generates encoded strings of Mapping to be used as a key for
+// maps.
+func (m *Mapping) key() mappingKey {
+ // Normalize addresses to handle address space randomization.
+ // Round up to next 4K boundary to avoid minor discrepancies.
+ const mapsizeRounding = 0x1000
+
+ size := m.Limit - m.Start
+ size = size + mapsizeRounding - 1
+ size = size - (size % mapsizeRounding)
+ key := mappingKey{
+ size: size,
+ offset: m.Offset,
+ }
+
+ switch {
+ case m.BuildID != "":
+ key.buildIDOrFile = m.BuildID
+ case m.File != "":
+ key.buildIDOrFile = m.File
+ default:
+ // A mapping containing neither build ID nor file name is a fake mapping. A
+ // key with empty buildIDOrFile is used for fake mappings so that they are
+ // treated as the same mapping during merging.
+ }
+ return key
+}
+
+type mappingKey struct {
+ size, offset uint64
+ buildIDOrFile string
+}
+
+func (pm *profileMerger) mapLine(src Line) Line {
+ ln := Line{
+ Function: pm.mapFunction(src.Function),
+ Line: src.Line,
+ Column: src.Column,
+ }
+ return ln
+}
+
+func (pm *profileMerger) mapFunction(src *Function) *Function {
+ if src == nil {
+ return nil
+ }
+ if f, ok := pm.functionsByID[src.ID]; ok {
+ return f
+ }
+ k := src.key()
+ if f, ok := pm.functions[k]; ok {
+ pm.functionsByID[src.ID] = f
+ return f
+ }
+ f := &Function{
+ ID: uint64(len(pm.p.Function) + 1),
+ Name: src.Name,
+ SystemName: src.SystemName,
+ Filename: src.Filename,
+ StartLine: src.StartLine,
+ }
+ pm.functions[k] = f
+ pm.functionsByID[src.ID] = f
+ pm.p.Function = append(pm.p.Function, f)
+ return f
+}
+
+// key generates a struct to be used as a key for maps.
+func (f *Function) key() functionKey {
+ return functionKey{
+ f.StartLine,
+ f.Name,
+ f.SystemName,
+ f.Filename,
+ }
+}
+
+type functionKey struct {
+ startLine int64
+ name, systemName, fileName string
+}
+
+// combineHeaders checks that all profiles can be merged and returns
+// their combined profile.
+func combineHeaders(srcs []*Profile) (*Profile, error) {
+ for _, s := range srcs[1:] {
+ if err := srcs[0].compatible(s); err != nil {
+ return nil, err
+ }
+ }
+
+ var timeNanos, durationNanos, period int64
+ var comments []string
+ seenComments := map[string]bool{}
+ var defaultSampleType string
+ for _, s := range srcs {
+ if timeNanos == 0 || s.TimeNanos < timeNanos {
+ timeNanos = s.TimeNanos
+ }
+ durationNanos += s.DurationNanos
+ if period == 0 || period < s.Period {
+ period = s.Period
+ }
+ for _, c := range s.Comments {
+ if seen := seenComments[c]; !seen {
+ comments = append(comments, c)
+ seenComments[c] = true
+ }
+ }
+ if defaultSampleType == "" {
+ defaultSampleType = s.DefaultSampleType
+ }
+ }
+
+ p := &Profile{
+ SampleType: make([]*ValueType, len(srcs[0].SampleType)),
+
+ DropFrames: srcs[0].DropFrames,
+ KeepFrames: srcs[0].KeepFrames,
+
+ TimeNanos: timeNanos,
+ DurationNanos: durationNanos,
+ PeriodType: srcs[0].PeriodType,
+ Period: period,
+
+ Comments: comments,
+ DefaultSampleType: defaultSampleType,
+ }
+ copy(p.SampleType, srcs[0].SampleType)
+ return p, nil
+}
+
+// compatible determines if two profiles can be compared/merged.
+// returns nil if the profiles are compatible; otherwise an error with
+// details on the incompatibility.
+func (p *Profile) compatible(pb *Profile) error {
+ if !equalValueType(p.PeriodType, pb.PeriodType) {
+ return fmt.Errorf("incompatible period types %v and %v", p.PeriodType, pb.PeriodType)
+ }
+
+ if len(p.SampleType) != len(pb.SampleType) {
+ return fmt.Errorf("incompatible sample types %v and %v", p.SampleType, pb.SampleType)
+ }
+
+ for i := range p.SampleType {
+ if !equalValueType(p.SampleType[i], pb.SampleType[i]) {
+ return fmt.Errorf("incompatible sample types %v and %v", p.SampleType, pb.SampleType)
+ }
+ }
+ return nil
+}
+
+// equalValueType returns true if the two value types are semantically
+// equal. It ignores the internal fields used during encode/decode.
+func equalValueType(st1, st2 *ValueType) bool {
+ return st1.Type == st2.Type && st1.Unit == st2.Unit
+}
+
+// locationIDMap is like a map[uint64]*Location, but provides efficiency for
+// ids that are densely numbered, which is often the case.
+type locationIDMap struct {
+ dense []*Location // indexed by id for id < len(dense)
+ sparse map[uint64]*Location // indexed by id for id >= len(dense)
+}
+
+func makeLocationIDMap(n int) locationIDMap {
+ return locationIDMap{
+ dense: make([]*Location, n),
+ sparse: map[uint64]*Location{},
+ }
+}
+
+func (lm locationIDMap) get(id uint64) *Location {
+ if id < uint64(len(lm.dense)) {
+ return lm.dense[int(id)]
+ }
+ return lm.sparse[id]
+}
+
+func (lm locationIDMap) set(id uint64, loc *Location) {
+ if id < uint64(len(lm.dense)) {
+ lm.dense[id] = loc
+ return
+ }
+ lm.sparse[id] = loc
+}
+
+// CompatibilizeSampleTypes makes profiles compatible to be compared/merged. It
+// keeps sample types that appear in all profiles only and drops/reorders the
+// sample types as necessary.
+//
+// In the case of sample types order is not the same for given profiles the
+// order is derived from the first profile.
+//
+// Profiles are modified in-place.
+//
+// It returns an error if the sample type's intersection is empty.
+func CompatibilizeSampleTypes(ps []*Profile) error {
+ sTypes := commonSampleTypes(ps)
+ if len(sTypes) == 0 {
+ return fmt.Errorf("profiles have empty common sample type list")
+ }
+ for _, p := range ps {
+ if err := compatibilizeSampleTypes(p, sTypes); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// commonSampleTypes returns sample types that appear in all profiles in the
+// order how they ordered in the first profile.
+func commonSampleTypes(ps []*Profile) []string {
+ if len(ps) == 0 {
+ return nil
+ }
+ sTypes := map[string]int{}
+ for _, p := range ps {
+ for _, st := range p.SampleType {
+ sTypes[st.Type]++
+ }
+ }
+ var res []string
+ for _, st := range ps[0].SampleType {
+ if sTypes[st.Type] == len(ps) {
+ res = append(res, st.Type)
+ }
+ }
+ return res
+}
+
+// compatibilizeSampleTypes drops sample types that are not present in sTypes
+// list and reorder them if needed.
+//
+// It sets DefaultSampleType to sType[0] if it is not in sType list.
+//
+// It assumes that all sample types from the sTypes list are present in the
+// given profile otherwise it returns an error.
+func compatibilizeSampleTypes(p *Profile, sTypes []string) error {
+ if len(sTypes) == 0 {
+ return fmt.Errorf("sample type list is empty")
+ }
+ defaultSampleType := sTypes[0]
+ reMap, needToModify := make([]int, len(sTypes)), false
+ for i, st := range sTypes {
+ if st == p.DefaultSampleType {
+ defaultSampleType = p.DefaultSampleType
+ }
+ idx := searchValueType(p.SampleType, st)
+ if idx < 0 {
+ return fmt.Errorf("%q sample type is not found in profile", st)
+ }
+ reMap[i] = idx
+ if idx != i {
+ needToModify = true
+ }
+ }
+ if !needToModify && len(sTypes) == len(p.SampleType) {
+ return nil
+ }
+ p.DefaultSampleType = defaultSampleType
+ oldSampleTypes := p.SampleType
+ p.SampleType = make([]*ValueType, len(sTypes))
+ for i, idx := range reMap {
+ p.SampleType[i] = oldSampleTypes[idx]
+ }
+ values := make([]int64, len(sTypes))
+ for _, s := range p.Sample {
+ for i, idx := range reMap {
+ values[i] = s.Value[idx]
+ }
+ s.Value = s.Value[:len(values)]
+ copy(s.Value, values)
+ }
+ return nil
+}
+
+func searchValueType(vts []*ValueType, s string) int {
+ for i, vt := range vts {
+ if vt.Type == s {
+ return i
+ }
+ }
+ return -1
+}
diff --git a/integration/assets/hydrabroker/vendor/github.com/google/pprof/profile/profile.go b/integration/assets/hydrabroker/vendor/github.com/google/pprof/profile/profile.go
new file mode 100644
index 00000000000..62df80a5563
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/google/pprof/profile/profile.go
@@ -0,0 +1,864 @@
+// Copyright 2014 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package profile provides a representation of profile.proto and
+// methods to encode/decode profiles in this format.
+package profile
+
+import (
+ "bytes"
+ "compress/gzip"
+ "fmt"
+ "io"
+ "math"
+ "path/filepath"
+ "regexp"
+ "sort"
+ "strings"
+ "sync"
+ "time"
+)
+
+// Profile is an in-memory representation of profile.proto.
+type Profile struct {
+ SampleType []*ValueType
+ DefaultSampleType string
+ Sample []*Sample
+ Mapping []*Mapping
+ Location []*Location
+ Function []*Function
+ Comments []string
+
+ DropFrames string
+ KeepFrames string
+
+ TimeNanos int64
+ DurationNanos int64
+ PeriodType *ValueType
+ Period int64
+
+ // The following fields are modified during encoding and copying,
+ // so are protected by a Mutex.
+ encodeMu sync.Mutex
+
+ commentX []int64
+ dropFramesX int64
+ keepFramesX int64
+ stringTable []string
+ defaultSampleTypeX int64
+}
+
+// ValueType corresponds to Profile.ValueType
+type ValueType struct {
+ Type string // cpu, wall, inuse_space, etc
+ Unit string // seconds, nanoseconds, bytes, etc
+
+ typeX int64
+ unitX int64
+}
+
+// Sample corresponds to Profile.Sample
+type Sample struct {
+ Location []*Location
+ Value []int64
+ // Label is a per-label-key map to values for string labels.
+ //
+ // In general, having multiple values for the given label key is strongly
+ // discouraged - see docs for the sample label field in profile.proto. The
+ // main reason this unlikely state is tracked here is to make the
+ // decoding->encoding roundtrip not lossy. But we expect that the value
+ // slices present in this map are always of length 1.
+ Label map[string][]string
+ // NumLabel is a per-label-key map to values for numeric labels. See a note
+ // above on handling multiple values for a label.
+ NumLabel map[string][]int64
+ // NumUnit is a per-label-key map to the unit names of corresponding numeric
+ // label values. The unit info may be missing even if the label is in
+ // NumLabel, see the docs in profile.proto for details. When the value is
+ // slice is present and not nil, its length must be equal to the length of
+ // the corresponding value slice in NumLabel.
+ NumUnit map[string][]string
+
+ locationIDX []uint64
+ labelX []label
+}
+
+// label corresponds to Profile.Label
+type label struct {
+ keyX int64
+ // Exactly one of the two following values must be set
+ strX int64
+ numX int64 // Integer value for this label
+ // can be set if numX has value
+ unitX int64
+}
+
+// Mapping corresponds to Profile.Mapping
+type Mapping struct {
+ ID uint64
+ Start uint64
+ Limit uint64
+ Offset uint64
+ File string
+ BuildID string
+ HasFunctions bool
+ HasFilenames bool
+ HasLineNumbers bool
+ HasInlineFrames bool
+
+ fileX int64
+ buildIDX int64
+
+ // Name of the kernel relocation symbol ("_text" or "_stext"), extracted from File.
+ // For linux kernel mappings generated by some tools, correct symbolization depends
+ // on knowing which of the two possible relocation symbols was used for `Start`.
+ // This is given to us as a suffix in `File` (e.g. "[kernel.kallsyms]_stext").
+ //
+ // Note, this public field is not persisted in the proto. For the purposes of
+ // copying / merging / hashing profiles, it is considered subsumed by `File`.
+ KernelRelocationSymbol string
+}
+
+// Location corresponds to Profile.Location
+type Location struct {
+ ID uint64
+ Mapping *Mapping
+ Address uint64
+ Line []Line
+ IsFolded bool
+
+ mappingIDX uint64
+}
+
+// Line corresponds to Profile.Line
+type Line struct {
+ Function *Function
+ Line int64
+ Column int64
+
+ functionIDX uint64
+}
+
+// Function corresponds to Profile.Function
+type Function struct {
+ ID uint64
+ Name string
+ SystemName string
+ Filename string
+ StartLine int64
+
+ nameX int64
+ systemNameX int64
+ filenameX int64
+}
+
+// Parse parses a profile and checks for its validity. The input
+// may be a gzip-compressed encoded protobuf or one of many legacy
+// profile formats which may be unsupported in the future.
+func Parse(r io.Reader) (*Profile, error) {
+ data, err := io.ReadAll(r)
+ if err != nil {
+ return nil, err
+ }
+ return ParseData(data)
+}
+
+// ParseData parses a profile from a buffer and checks for its
+// validity.
+func ParseData(data []byte) (*Profile, error) {
+ var p *Profile
+ var err error
+ if len(data) >= 2 && data[0] == 0x1f && data[1] == 0x8b {
+ gz, err := gzip.NewReader(bytes.NewBuffer(data))
+ if err == nil {
+ data, err = io.ReadAll(gz)
+ }
+ if err != nil {
+ return nil, fmt.Errorf("decompressing profile: %v", err)
+ }
+ }
+ if p, err = ParseUncompressed(data); err != nil && err != errNoData && err != errConcatProfile {
+ p, err = parseLegacy(data)
+ }
+
+ if err != nil {
+ return nil, fmt.Errorf("parsing profile: %v", err)
+ }
+
+ if err := p.CheckValid(); err != nil {
+ return nil, fmt.Errorf("malformed profile: %v", err)
+ }
+ return p, nil
+}
+
+var errUnrecognized = fmt.Errorf("unrecognized profile format")
+var errMalformed = fmt.Errorf("malformed profile format")
+var errNoData = fmt.Errorf("empty input file")
+var errConcatProfile = fmt.Errorf("concatenated profiles detected")
+
+func parseLegacy(data []byte) (*Profile, error) {
+ parsers := []func([]byte) (*Profile, error){
+ parseCPU,
+ parseHeap,
+ parseGoCount, // goroutine, threadcreate
+ parseThread,
+ parseContention,
+ parseJavaProfile,
+ }
+
+ for _, parser := range parsers {
+ p, err := parser(data)
+ if err == nil {
+ p.addLegacyFrameInfo()
+ return p, nil
+ }
+ if err != errUnrecognized {
+ return nil, err
+ }
+ }
+ return nil, errUnrecognized
+}
+
+// ParseUncompressed parses an uncompressed protobuf into a profile.
+func ParseUncompressed(data []byte) (*Profile, error) {
+ if len(data) == 0 {
+ return nil, errNoData
+ }
+ p := &Profile{}
+ if err := unmarshal(data, p); err != nil {
+ return nil, err
+ }
+
+ if err := p.postDecode(); err != nil {
+ return nil, err
+ }
+
+ return p, nil
+}
+
+var libRx = regexp.MustCompile(`([.]so$|[.]so[._][0-9]+)`)
+
+// massageMappings applies heuristic-based changes to the profile
+// mappings to account for quirks of some environments.
+func (p *Profile) massageMappings() {
+ // Merge adjacent regions with matching names, checking that the offsets match
+ if len(p.Mapping) > 1 {
+ mappings := []*Mapping{p.Mapping[0]}
+ for _, m := range p.Mapping[1:] {
+ lm := mappings[len(mappings)-1]
+ if adjacent(lm, m) {
+ lm.Limit = m.Limit
+ if m.File != "" {
+ lm.File = m.File
+ }
+ if m.BuildID != "" {
+ lm.BuildID = m.BuildID
+ }
+ p.updateLocationMapping(m, lm)
+ continue
+ }
+ mappings = append(mappings, m)
+ }
+ p.Mapping = mappings
+ }
+
+ // Use heuristics to identify main binary and move it to the top of the list of mappings
+ for i, m := range p.Mapping {
+ file := strings.TrimSpace(strings.Replace(m.File, "(deleted)", "", -1))
+ if len(file) == 0 {
+ continue
+ }
+ if len(libRx.FindStringSubmatch(file)) > 0 {
+ continue
+ }
+ if file[0] == '[' {
+ continue
+ }
+ // Swap what we guess is main to position 0.
+ p.Mapping[0], p.Mapping[i] = p.Mapping[i], p.Mapping[0]
+ break
+ }
+
+ // Keep the mapping IDs neatly sorted
+ for i, m := range p.Mapping {
+ m.ID = uint64(i + 1)
+ }
+}
+
+// adjacent returns whether two mapping entries represent the same
+// mapping that has been split into two. Check that their addresses are adjacent,
+// and if the offsets match, if they are available.
+func adjacent(m1, m2 *Mapping) bool {
+ if m1.File != "" && m2.File != "" {
+ if m1.File != m2.File {
+ return false
+ }
+ }
+ if m1.BuildID != "" && m2.BuildID != "" {
+ if m1.BuildID != m2.BuildID {
+ return false
+ }
+ }
+ if m1.Limit != m2.Start {
+ return false
+ }
+ if m1.Offset != 0 && m2.Offset != 0 {
+ offset := m1.Offset + (m1.Limit - m1.Start)
+ if offset != m2.Offset {
+ return false
+ }
+ }
+ return true
+}
+
+func (p *Profile) updateLocationMapping(from, to *Mapping) {
+ for _, l := range p.Location {
+ if l.Mapping == from {
+ l.Mapping = to
+ }
+ }
+}
+
+func serialize(p *Profile) []byte {
+ p.encodeMu.Lock()
+ p.preEncode()
+ b := marshal(p)
+ p.encodeMu.Unlock()
+ return b
+}
+
+// Write writes the profile as a gzip-compressed marshaled protobuf.
+func (p *Profile) Write(w io.Writer) error {
+ zw := gzip.NewWriter(w)
+ defer zw.Close()
+ _, err := zw.Write(serialize(p))
+ return err
+}
+
+// WriteUncompressed writes the profile as a marshaled protobuf.
+func (p *Profile) WriteUncompressed(w io.Writer) error {
+ _, err := w.Write(serialize(p))
+ return err
+}
+
+// CheckValid tests whether the profile is valid. Checks include, but are
+// not limited to:
+// - len(Profile.Sample[n].value) == len(Profile.value_unit)
+// - Sample.id has a corresponding Profile.Location
+func (p *Profile) CheckValid() error {
+ // Check that sample values are consistent
+ sampleLen := len(p.SampleType)
+ if sampleLen == 0 && len(p.Sample) != 0 {
+ return fmt.Errorf("missing sample type information")
+ }
+ for _, s := range p.Sample {
+ if s == nil {
+ return fmt.Errorf("profile has nil sample")
+ }
+ if len(s.Value) != sampleLen {
+ return fmt.Errorf("mismatch: sample has %d values vs. %d types", len(s.Value), len(p.SampleType))
+ }
+ for _, l := range s.Location {
+ if l == nil {
+ return fmt.Errorf("sample has nil location")
+ }
+ }
+ }
+
+ // Check that all mappings/locations/functions are in the tables
+ // Check that there are no duplicate ids
+ mappings := make(map[uint64]*Mapping, len(p.Mapping))
+ for _, m := range p.Mapping {
+ if m == nil {
+ return fmt.Errorf("profile has nil mapping")
+ }
+ if m.ID == 0 {
+ return fmt.Errorf("found mapping with reserved ID=0")
+ }
+ if mappings[m.ID] != nil {
+ return fmt.Errorf("multiple mappings with same id: %d", m.ID)
+ }
+ mappings[m.ID] = m
+ }
+ functions := make(map[uint64]*Function, len(p.Function))
+ for _, f := range p.Function {
+ if f == nil {
+ return fmt.Errorf("profile has nil function")
+ }
+ if f.ID == 0 {
+ return fmt.Errorf("found function with reserved ID=0")
+ }
+ if functions[f.ID] != nil {
+ return fmt.Errorf("multiple functions with same id: %d", f.ID)
+ }
+ functions[f.ID] = f
+ }
+ locations := make(map[uint64]*Location, len(p.Location))
+ for _, l := range p.Location {
+ if l == nil {
+ return fmt.Errorf("profile has nil location")
+ }
+ if l.ID == 0 {
+ return fmt.Errorf("found location with reserved id=0")
+ }
+ if locations[l.ID] != nil {
+ return fmt.Errorf("multiple locations with same id: %d", l.ID)
+ }
+ locations[l.ID] = l
+ if m := l.Mapping; m != nil {
+ if m.ID == 0 || mappings[m.ID] != m {
+ return fmt.Errorf("inconsistent mapping %p: %d", m, m.ID)
+ }
+ }
+ for _, ln := range l.Line {
+ f := ln.Function
+ if f == nil {
+ return fmt.Errorf("location id: %d has a line with nil function", l.ID)
+ }
+ if f.ID == 0 || functions[f.ID] != f {
+ return fmt.Errorf("inconsistent function %p: %d", f, f.ID)
+ }
+ }
+ }
+ return nil
+}
+
+// Aggregate merges the locations in the profile into equivalence
+// classes preserving the request attributes. It also updates the
+// samples to point to the merged locations.
+func (p *Profile) Aggregate(inlineFrame, function, filename, linenumber, columnnumber, address bool) error {
+ for _, m := range p.Mapping {
+ m.HasInlineFrames = m.HasInlineFrames && inlineFrame
+ m.HasFunctions = m.HasFunctions && function
+ m.HasFilenames = m.HasFilenames && filename
+ m.HasLineNumbers = m.HasLineNumbers && linenumber
+ }
+
+ // Aggregate functions
+ if !function || !filename {
+ for _, f := range p.Function {
+ if !function {
+ f.Name = ""
+ f.SystemName = ""
+ }
+ if !filename {
+ f.Filename = ""
+ }
+ }
+ }
+
+ // Aggregate locations
+ if !inlineFrame || !address || !linenumber || !columnnumber {
+ for _, l := range p.Location {
+ if !inlineFrame && len(l.Line) > 1 {
+ l.Line = l.Line[len(l.Line)-1:]
+ }
+ if !linenumber {
+ for i := range l.Line {
+ l.Line[i].Line = 0
+ l.Line[i].Column = 0
+ }
+ }
+ if !columnnumber {
+ for i := range l.Line {
+ l.Line[i].Column = 0
+ }
+ }
+ if !address {
+ l.Address = 0
+ }
+ }
+ }
+
+ return p.CheckValid()
+}
+
+// NumLabelUnits returns a map of numeric label keys to the units
+// associated with those keys and a map of those keys to any units
+// that were encountered but not used.
+// Unit for a given key is the first encountered unit for that key. If multiple
+// units are encountered for values paired with a particular key, then the first
+// unit encountered is used and all other units are returned in sorted order
+// in map of ignored units.
+// If no units are encountered for a particular key, the unit is then inferred
+// based on the key.
+func (p *Profile) NumLabelUnits() (map[string]string, map[string][]string) {
+ numLabelUnits := map[string]string{}
+ ignoredUnits := map[string]map[string]bool{}
+ encounteredKeys := map[string]bool{}
+
+ // Determine units based on numeric tags for each sample.
+ for _, s := range p.Sample {
+ for k := range s.NumLabel {
+ encounteredKeys[k] = true
+ for _, unit := range s.NumUnit[k] {
+ if unit == "" {
+ continue
+ }
+ if wantUnit, ok := numLabelUnits[k]; !ok {
+ numLabelUnits[k] = unit
+ } else if wantUnit != unit {
+ if v, ok := ignoredUnits[k]; ok {
+ v[unit] = true
+ } else {
+ ignoredUnits[k] = map[string]bool{unit: true}
+ }
+ }
+ }
+ }
+ }
+ // Infer units for keys without any units associated with
+ // numeric tag values.
+ for key := range encounteredKeys {
+ unit := numLabelUnits[key]
+ if unit == "" {
+ switch key {
+ case "alignment", "request":
+ numLabelUnits[key] = "bytes"
+ default:
+ numLabelUnits[key] = key
+ }
+ }
+ }
+
+ // Copy ignored units into more readable format
+ unitsIgnored := make(map[string][]string, len(ignoredUnits))
+ for key, values := range ignoredUnits {
+ units := make([]string, len(values))
+ i := 0
+ for unit := range values {
+ units[i] = unit
+ i++
+ }
+ sort.Strings(units)
+ unitsIgnored[key] = units
+ }
+
+ return numLabelUnits, unitsIgnored
+}
+
+// String dumps a text representation of a profile. Intended mainly
+// for debugging purposes.
+func (p *Profile) String() string {
+ ss := make([]string, 0, len(p.Comments)+len(p.Sample)+len(p.Mapping)+len(p.Location))
+ for _, c := range p.Comments {
+ ss = append(ss, "Comment: "+c)
+ }
+ if pt := p.PeriodType; pt != nil {
+ ss = append(ss, fmt.Sprintf("PeriodType: %s %s", pt.Type, pt.Unit))
+ }
+ ss = append(ss, fmt.Sprintf("Period: %d", p.Period))
+ if p.TimeNanos != 0 {
+ ss = append(ss, fmt.Sprintf("Time: %v", time.Unix(0, p.TimeNanos)))
+ }
+ if p.DurationNanos != 0 {
+ ss = append(ss, fmt.Sprintf("Duration: %.4v", time.Duration(p.DurationNanos)))
+ }
+
+ ss = append(ss, "Samples:")
+ var sh1 string
+ for _, s := range p.SampleType {
+ dflt := ""
+ if s.Type == p.DefaultSampleType {
+ dflt = "[dflt]"
+ }
+ sh1 = sh1 + fmt.Sprintf("%s/%s%s ", s.Type, s.Unit, dflt)
+ }
+ ss = append(ss, strings.TrimSpace(sh1))
+ for _, s := range p.Sample {
+ ss = append(ss, s.string())
+ }
+
+ ss = append(ss, "Locations")
+ for _, l := range p.Location {
+ ss = append(ss, l.string())
+ }
+
+ ss = append(ss, "Mappings")
+ for _, m := range p.Mapping {
+ ss = append(ss, m.string())
+ }
+
+ return strings.Join(ss, "\n") + "\n"
+}
+
+// string dumps a text representation of a mapping. Intended mainly
+// for debugging purposes.
+func (m *Mapping) string() string {
+ bits := ""
+ if m.HasFunctions {
+ bits = bits + "[FN]"
+ }
+ if m.HasFilenames {
+ bits = bits + "[FL]"
+ }
+ if m.HasLineNumbers {
+ bits = bits + "[LN]"
+ }
+ if m.HasInlineFrames {
+ bits = bits + "[IN]"
+ }
+ return fmt.Sprintf("%d: %#x/%#x/%#x %s %s %s",
+ m.ID,
+ m.Start, m.Limit, m.Offset,
+ m.File,
+ m.BuildID,
+ bits)
+}
+
+// string dumps a text representation of a location. Intended mainly
+// for debugging purposes.
+func (l *Location) string() string {
+ ss := []string{}
+ locStr := fmt.Sprintf("%6d: %#x ", l.ID, l.Address)
+ if m := l.Mapping; m != nil {
+ locStr = locStr + fmt.Sprintf("M=%d ", m.ID)
+ }
+ if l.IsFolded {
+ locStr = locStr + "[F] "
+ }
+ if len(l.Line) == 0 {
+ ss = append(ss, locStr)
+ }
+ for li := range l.Line {
+ lnStr := "??"
+ if fn := l.Line[li].Function; fn != nil {
+ lnStr = fmt.Sprintf("%s %s:%d:%d s=%d",
+ fn.Name,
+ fn.Filename,
+ l.Line[li].Line,
+ l.Line[li].Column,
+ fn.StartLine)
+ if fn.Name != fn.SystemName {
+ lnStr = lnStr + "(" + fn.SystemName + ")"
+ }
+ }
+ ss = append(ss, locStr+lnStr)
+ // Do not print location details past the first line
+ locStr = " "
+ }
+ return strings.Join(ss, "\n")
+}
+
+// string dumps a text representation of a sample. Intended mainly
+// for debugging purposes.
+func (s *Sample) string() string {
+ ss := []string{}
+ var sv string
+ for _, v := range s.Value {
+ sv = fmt.Sprintf("%s %10d", sv, v)
+ }
+ sv = sv + ": "
+ for _, l := range s.Location {
+ sv = sv + fmt.Sprintf("%d ", l.ID)
+ }
+ ss = append(ss, sv)
+ const labelHeader = " "
+ if len(s.Label) > 0 {
+ ss = append(ss, labelHeader+labelsToString(s.Label))
+ }
+ if len(s.NumLabel) > 0 {
+ ss = append(ss, labelHeader+numLabelsToString(s.NumLabel, s.NumUnit))
+ }
+ return strings.Join(ss, "\n")
+}
+
+// labelsToString returns a string representation of a
+// map representing labels.
+func labelsToString(labels map[string][]string) string {
+ ls := []string{}
+ for k, v := range labels {
+ ls = append(ls, fmt.Sprintf("%s:%v", k, v))
+ }
+ sort.Strings(ls)
+ return strings.Join(ls, " ")
+}
+
+// numLabelsToString returns a string representation of a map
+// representing numeric labels.
+func numLabelsToString(numLabels map[string][]int64, numUnits map[string][]string) string {
+ ls := []string{}
+ for k, v := range numLabels {
+ units := numUnits[k]
+ var labelString string
+ if len(units) == len(v) {
+ values := make([]string, len(v))
+ for i, vv := range v {
+ values[i] = fmt.Sprintf("%d %s", vv, units[i])
+ }
+ labelString = fmt.Sprintf("%s:%v", k, values)
+ } else {
+ labelString = fmt.Sprintf("%s:%v", k, v)
+ }
+ ls = append(ls, labelString)
+ }
+ sort.Strings(ls)
+ return strings.Join(ls, " ")
+}
+
+// SetLabel sets the specified key to the specified value for all samples in the
+// profile.
+func (p *Profile) SetLabel(key string, value []string) {
+ for _, sample := range p.Sample {
+ if sample.Label == nil {
+ sample.Label = map[string][]string{key: value}
+ } else {
+ sample.Label[key] = value
+ }
+ }
+}
+
+// RemoveLabel removes all labels associated with the specified key for all
+// samples in the profile.
+func (p *Profile) RemoveLabel(key string) {
+ for _, sample := range p.Sample {
+ delete(sample.Label, key)
+ }
+}
+
+// HasLabel returns true if a sample has a label with indicated key and value.
+func (s *Sample) HasLabel(key, value string) bool {
+ for _, v := range s.Label[key] {
+ if v == value {
+ return true
+ }
+ }
+ return false
+}
+
+// SetNumLabel sets the specified key to the specified value for all samples in the
+// profile. "unit" is a slice that describes the units that each corresponding member
+// of "values" is measured in (e.g. bytes or seconds). If there is no relevant
+// unit for a given value, that member of "unit" should be the empty string.
+// "unit" must either have the same length as "value", or be nil.
+func (p *Profile) SetNumLabel(key string, value []int64, unit []string) {
+ for _, sample := range p.Sample {
+ if sample.NumLabel == nil {
+ sample.NumLabel = map[string][]int64{key: value}
+ } else {
+ sample.NumLabel[key] = value
+ }
+ if sample.NumUnit == nil {
+ sample.NumUnit = map[string][]string{key: unit}
+ } else {
+ sample.NumUnit[key] = unit
+ }
+ }
+}
+
+// RemoveNumLabel removes all numerical labels associated with the specified key for all
+// samples in the profile.
+func (p *Profile) RemoveNumLabel(key string) {
+ for _, sample := range p.Sample {
+ delete(sample.NumLabel, key)
+ delete(sample.NumUnit, key)
+ }
+}
+
+// DiffBaseSample returns true if a sample belongs to the diff base and false
+// otherwise.
+func (s *Sample) DiffBaseSample() bool {
+ return s.HasLabel("pprof::base", "true")
+}
+
+// Scale multiplies all sample values in a profile by a constant and keeps
+// only samples that have at least one non-zero value.
+func (p *Profile) Scale(ratio float64) {
+ if ratio == 1 {
+ return
+ }
+ ratios := make([]float64, len(p.SampleType))
+ for i := range p.SampleType {
+ ratios[i] = ratio
+ }
+ p.ScaleN(ratios)
+}
+
+// ScaleN multiplies each sample values in a sample by a different amount
+// and keeps only samples that have at least one non-zero value.
+func (p *Profile) ScaleN(ratios []float64) error {
+ if len(p.SampleType) != len(ratios) {
+ return fmt.Errorf("mismatched scale ratios, got %d, want %d", len(ratios), len(p.SampleType))
+ }
+ allOnes := true
+ for _, r := range ratios {
+ if r != 1 {
+ allOnes = false
+ break
+ }
+ }
+ if allOnes {
+ return nil
+ }
+ fillIdx := 0
+ for _, s := range p.Sample {
+ keepSample := false
+ for i, v := range s.Value {
+ if ratios[i] != 1 {
+ val := int64(math.Round(float64(v) * ratios[i]))
+ s.Value[i] = val
+ keepSample = keepSample || val != 0
+ }
+ }
+ if keepSample {
+ p.Sample[fillIdx] = s
+ fillIdx++
+ }
+ }
+ p.Sample = p.Sample[:fillIdx]
+ return nil
+}
+
+// HasFunctions determines if all locations in this profile have
+// symbolized function information.
+func (p *Profile) HasFunctions() bool {
+ for _, l := range p.Location {
+ if l.Mapping != nil && !l.Mapping.HasFunctions {
+ return false
+ }
+ }
+ return true
+}
+
+// HasFileLines determines if all locations in this profile have
+// symbolized file and line number information.
+func (p *Profile) HasFileLines() bool {
+ for _, l := range p.Location {
+ if l.Mapping != nil && (!l.Mapping.HasFilenames || !l.Mapping.HasLineNumbers) {
+ return false
+ }
+ }
+ return true
+}
+
+// Unsymbolizable returns true if a mapping points to a binary for which
+// locations can't be symbolized in principle, at least now. Examples are
+// "[vdso]", [vsyscall]" and some others, see the code.
+func (m *Mapping) Unsymbolizable() bool {
+ name := filepath.Base(m.File)
+ return strings.HasPrefix(name, "[") || strings.HasPrefix(name, "linux-vdso") || strings.HasPrefix(m.File, "/dev/dri/")
+}
+
+// Copy makes a fully independent copy of a profile.
+func (p *Profile) Copy() *Profile {
+ pp := &Profile{}
+ if err := unmarshal(serialize(p), pp); err != nil {
+ panic(err)
+ }
+ if err := pp.postDecode(); err != nil {
+ panic(err)
+ }
+
+ return pp
+}
diff --git a/integration/assets/hydrabroker/vendor/github.com/google/pprof/profile/proto.go b/integration/assets/hydrabroker/vendor/github.com/google/pprof/profile/proto.go
new file mode 100644
index 00000000000..a15696ba16f
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/google/pprof/profile/proto.go
@@ -0,0 +1,367 @@
+// Copyright 2014 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// This file is a simple protocol buffer encoder and decoder.
+// The format is described at
+// https://developers.google.com/protocol-buffers/docs/encoding
+//
+// A protocol message must implement the message interface:
+// decoder() []decoder
+// encode(*buffer)
+//
+// The decode method returns a slice indexed by field number that gives the
+// function to decode that field.
+// The encode method encodes its receiver into the given buffer.
+//
+// The two methods are simple enough to be implemented by hand rather than
+// by using a protocol compiler.
+//
+// See profile.go for examples of messages implementing this interface.
+//
+// There is no support for groups, message sets, or "has" bits.
+
+package profile
+
+import (
+ "errors"
+ "fmt"
+)
+
+type buffer struct {
+ field int // field tag
+ typ int // proto wire type code for field
+ u64 uint64
+ data []byte
+ tmp [16]byte
+ tmpLines []Line // temporary storage used while decoding "repeated Line".
+}
+
+type decoder func(*buffer, message) error
+
+type message interface {
+ decoder() []decoder
+ encode(*buffer)
+}
+
+func marshal(m message) []byte {
+ var b buffer
+ m.encode(&b)
+ return b.data
+}
+
+func encodeVarint(b *buffer, x uint64) {
+ for x >= 128 {
+ b.data = append(b.data, byte(x)|0x80)
+ x >>= 7
+ }
+ b.data = append(b.data, byte(x))
+}
+
+func encodeLength(b *buffer, tag int, len int) {
+ encodeVarint(b, uint64(tag)<<3|2)
+ encodeVarint(b, uint64(len))
+}
+
+func encodeUint64(b *buffer, tag int, x uint64) {
+ // append varint to b.data
+ encodeVarint(b, uint64(tag)<<3)
+ encodeVarint(b, x)
+}
+
+func encodeUint64s(b *buffer, tag int, x []uint64) {
+ if len(x) > 2 {
+ // Use packed encoding
+ n1 := len(b.data)
+ for _, u := range x {
+ encodeVarint(b, u)
+ }
+ n2 := len(b.data)
+ encodeLength(b, tag, n2-n1)
+ n3 := len(b.data)
+ copy(b.tmp[:], b.data[n2:n3])
+ copy(b.data[n1+(n3-n2):], b.data[n1:n2])
+ copy(b.data[n1:], b.tmp[:n3-n2])
+ return
+ }
+ for _, u := range x {
+ encodeUint64(b, tag, u)
+ }
+}
+
+func encodeUint64Opt(b *buffer, tag int, x uint64) {
+ if x == 0 {
+ return
+ }
+ encodeUint64(b, tag, x)
+}
+
+func encodeInt64(b *buffer, tag int, x int64) {
+ u := uint64(x)
+ encodeUint64(b, tag, u)
+}
+
+func encodeInt64s(b *buffer, tag int, x []int64) {
+ if len(x) > 2 {
+ // Use packed encoding
+ n1 := len(b.data)
+ for _, u := range x {
+ encodeVarint(b, uint64(u))
+ }
+ n2 := len(b.data)
+ encodeLength(b, tag, n2-n1)
+ n3 := len(b.data)
+ copy(b.tmp[:], b.data[n2:n3])
+ copy(b.data[n1+(n3-n2):], b.data[n1:n2])
+ copy(b.data[n1:], b.tmp[:n3-n2])
+ return
+ }
+ for _, u := range x {
+ encodeInt64(b, tag, u)
+ }
+}
+
+func encodeInt64Opt(b *buffer, tag int, x int64) {
+ if x == 0 {
+ return
+ }
+ encodeInt64(b, tag, x)
+}
+
+func encodeString(b *buffer, tag int, x string) {
+ encodeLength(b, tag, len(x))
+ b.data = append(b.data, x...)
+}
+
+func encodeStrings(b *buffer, tag int, x []string) {
+ for _, s := range x {
+ encodeString(b, tag, s)
+ }
+}
+
+func encodeBool(b *buffer, tag int, x bool) {
+ if x {
+ encodeUint64(b, tag, 1)
+ } else {
+ encodeUint64(b, tag, 0)
+ }
+}
+
+func encodeBoolOpt(b *buffer, tag int, x bool) {
+ if x {
+ encodeBool(b, tag, x)
+ }
+}
+
+func encodeMessage(b *buffer, tag int, m message) {
+ n1 := len(b.data)
+ m.encode(b)
+ n2 := len(b.data)
+ encodeLength(b, tag, n2-n1)
+ n3 := len(b.data)
+ copy(b.tmp[:], b.data[n2:n3])
+ copy(b.data[n1+(n3-n2):], b.data[n1:n2])
+ copy(b.data[n1:], b.tmp[:n3-n2])
+}
+
+func unmarshal(data []byte, m message) (err error) {
+ b := buffer{data: data, typ: 2}
+ return decodeMessage(&b, m)
+}
+
+func le64(p []byte) uint64 {
+ return uint64(p[0]) | uint64(p[1])<<8 | uint64(p[2])<<16 | uint64(p[3])<<24 | uint64(p[4])<<32 | uint64(p[5])<<40 | uint64(p[6])<<48 | uint64(p[7])<<56
+}
+
+func le32(p []byte) uint32 {
+ return uint32(p[0]) | uint32(p[1])<<8 | uint32(p[2])<<16 | uint32(p[3])<<24
+}
+
+func decodeVarint(data []byte) (uint64, []byte, error) {
+ var u uint64
+ for i := 0; ; i++ {
+ if i >= 10 || i >= len(data) {
+ return 0, nil, errors.New("bad varint")
+ }
+ u |= uint64(data[i]&0x7F) << uint(7*i)
+ if data[i]&0x80 == 0 {
+ return u, data[i+1:], nil
+ }
+ }
+}
+
+func decodeField(b *buffer, data []byte) ([]byte, error) {
+ x, data, err := decodeVarint(data)
+ if err != nil {
+ return nil, err
+ }
+ b.field = int(x >> 3)
+ b.typ = int(x & 7)
+ b.data = nil
+ b.u64 = 0
+ switch b.typ {
+ case 0:
+ b.u64, data, err = decodeVarint(data)
+ if err != nil {
+ return nil, err
+ }
+ case 1:
+ if len(data) < 8 {
+ return nil, errors.New("not enough data")
+ }
+ b.u64 = le64(data[:8])
+ data = data[8:]
+ case 2:
+ var n uint64
+ n, data, err = decodeVarint(data)
+ if err != nil {
+ return nil, err
+ }
+ if n > uint64(len(data)) {
+ return nil, errors.New("too much data")
+ }
+ b.data = data[:n]
+ data = data[n:]
+ case 5:
+ if len(data) < 4 {
+ return nil, errors.New("not enough data")
+ }
+ b.u64 = uint64(le32(data[:4]))
+ data = data[4:]
+ default:
+ return nil, fmt.Errorf("unknown wire type: %d", b.typ)
+ }
+
+ return data, nil
+}
+
+func checkType(b *buffer, typ int) error {
+ if b.typ != typ {
+ return errors.New("type mismatch")
+ }
+ return nil
+}
+
+func decodeMessage(b *buffer, m message) error {
+ if err := checkType(b, 2); err != nil {
+ return err
+ }
+ dec := m.decoder()
+ data := b.data
+ for len(data) > 0 {
+ // pull varint field# + type
+ var err error
+ data, err = decodeField(b, data)
+ if err != nil {
+ return err
+ }
+ if b.field >= len(dec) || dec[b.field] == nil {
+ continue
+ }
+ if err := dec[b.field](b, m); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func decodeInt64(b *buffer, x *int64) error {
+ if err := checkType(b, 0); err != nil {
+ return err
+ }
+ *x = int64(b.u64)
+ return nil
+}
+
+func decodeInt64s(b *buffer, x *[]int64) error {
+ if b.typ == 2 {
+ // Packed encoding
+ data := b.data
+ for len(data) > 0 {
+ var u uint64
+ var err error
+
+ if u, data, err = decodeVarint(data); err != nil {
+ return err
+ }
+ *x = append(*x, int64(u))
+ }
+ return nil
+ }
+ var i int64
+ if err := decodeInt64(b, &i); err != nil {
+ return err
+ }
+ *x = append(*x, i)
+ return nil
+}
+
+func decodeUint64(b *buffer, x *uint64) error {
+ if err := checkType(b, 0); err != nil {
+ return err
+ }
+ *x = b.u64
+ return nil
+}
+
+func decodeUint64s(b *buffer, x *[]uint64) error {
+ if b.typ == 2 {
+ data := b.data
+ // Packed encoding
+ for len(data) > 0 {
+ var u uint64
+ var err error
+
+ if u, data, err = decodeVarint(data); err != nil {
+ return err
+ }
+ *x = append(*x, u)
+ }
+ return nil
+ }
+ var u uint64
+ if err := decodeUint64(b, &u); err != nil {
+ return err
+ }
+ *x = append(*x, u)
+ return nil
+}
+
+func decodeString(b *buffer, x *string) error {
+ if err := checkType(b, 2); err != nil {
+ return err
+ }
+ *x = string(b.data)
+ return nil
+}
+
+func decodeStrings(b *buffer, x *[]string) error {
+ var s string
+ if err := decodeString(b, &s); err != nil {
+ return err
+ }
+ *x = append(*x, s)
+ return nil
+}
+
+func decodeBool(b *buffer, x *bool) error {
+ if err := checkType(b, 0); err != nil {
+ return err
+ }
+ if int64(b.u64) == 0 {
+ *x = false
+ } else {
+ *x = true
+ }
+ return nil
+}
diff --git a/integration/assets/hydrabroker/vendor/github.com/google/pprof/profile/prune.go b/integration/assets/hydrabroker/vendor/github.com/google/pprof/profile/prune.go
new file mode 100644
index 00000000000..b2f9fd54660
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/google/pprof/profile/prune.go
@@ -0,0 +1,194 @@
+// Copyright 2014 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Implements methods to remove frames from profiles.
+
+package profile
+
+import (
+ "fmt"
+ "regexp"
+ "strings"
+)
+
+var (
+ reservedNames = []string{"(anonymous namespace)", "operator()"}
+ bracketRx = func() *regexp.Regexp {
+ var quotedNames []string
+ for _, name := range append(reservedNames, "(") {
+ quotedNames = append(quotedNames, regexp.QuoteMeta(name))
+ }
+ return regexp.MustCompile(strings.Join(quotedNames, "|"))
+ }()
+)
+
+// simplifyFunc does some primitive simplification of function names.
+func simplifyFunc(f string) string {
+ // Account for leading '.' on the PPC ELF v1 ABI.
+ funcName := strings.TrimPrefix(f, ".")
+ // Account for unsimplified names -- try to remove the argument list by trimming
+ // starting from the first '(', but skipping reserved names that have '('.
+ for _, ind := range bracketRx.FindAllStringSubmatchIndex(funcName, -1) {
+ foundReserved := false
+ for _, res := range reservedNames {
+ if funcName[ind[0]:ind[1]] == res {
+ foundReserved = true
+ break
+ }
+ }
+ if !foundReserved {
+ funcName = funcName[:ind[0]]
+ break
+ }
+ }
+ return funcName
+}
+
+// Prune removes all nodes beneath a node matching dropRx, and not
+// matching keepRx. If the root node of a Sample matches, the sample
+// will have an empty stack.
+func (p *Profile) Prune(dropRx, keepRx *regexp.Regexp) {
+ prune := make(map[uint64]bool)
+ pruneBeneath := make(map[uint64]bool)
+
+ // simplifyFunc can be expensive, so cache results.
+ // Note that the same function name can be encountered many times due
+ // different lines and addresses in the same function.
+ pruneCache := map[string]bool{} // Map from function to whether or not to prune
+ pruneFromHere := func(s string) bool {
+ if r, ok := pruneCache[s]; ok {
+ return r
+ }
+ funcName := simplifyFunc(s)
+ if dropRx.MatchString(funcName) {
+ if keepRx == nil || !keepRx.MatchString(funcName) {
+ pruneCache[s] = true
+ return true
+ }
+ }
+ pruneCache[s] = false
+ return false
+ }
+
+ for _, loc := range p.Location {
+ var i int
+ for i = len(loc.Line) - 1; i >= 0; i-- {
+ if fn := loc.Line[i].Function; fn != nil && fn.Name != "" {
+ if pruneFromHere(fn.Name) {
+ break
+ }
+ }
+ }
+
+ if i >= 0 {
+ // Found matching entry to prune.
+ pruneBeneath[loc.ID] = true
+
+ // Remove the matching location.
+ if i == len(loc.Line)-1 {
+ // Matched the top entry: prune the whole location.
+ prune[loc.ID] = true
+ } else {
+ loc.Line = loc.Line[i+1:]
+ }
+ }
+ }
+
+ // Prune locs from each Sample
+ for _, sample := range p.Sample {
+ // Scan from the root to the leaves to find the prune location.
+ // Do not prune frames before the first user frame, to avoid
+ // pruning everything.
+ foundUser := false
+ for i := len(sample.Location) - 1; i >= 0; i-- {
+ id := sample.Location[i].ID
+ if !prune[id] && !pruneBeneath[id] {
+ foundUser = true
+ continue
+ }
+ if !foundUser {
+ continue
+ }
+ if prune[id] {
+ sample.Location = sample.Location[i+1:]
+ break
+ }
+ if pruneBeneath[id] {
+ sample.Location = sample.Location[i:]
+ break
+ }
+ }
+ }
+}
+
+// RemoveUninteresting prunes and elides profiles using built-in
+// tables of uninteresting function names.
+func (p *Profile) RemoveUninteresting() error {
+ var keep, drop *regexp.Regexp
+ var err error
+
+ if p.DropFrames != "" {
+ if drop, err = regexp.Compile("^(" + p.DropFrames + ")$"); err != nil {
+ return fmt.Errorf("failed to compile regexp %s: %v", p.DropFrames, err)
+ }
+ if p.KeepFrames != "" {
+ if keep, err = regexp.Compile("^(" + p.KeepFrames + ")$"); err != nil {
+ return fmt.Errorf("failed to compile regexp %s: %v", p.KeepFrames, err)
+ }
+ }
+ p.Prune(drop, keep)
+ }
+ return nil
+}
+
+// PruneFrom removes all nodes beneath the lowest node matching dropRx, not including itself.
+//
+// Please see the example below to understand this method as well as
+// the difference from Prune method.
+//
+// A sample contains Location of [A,B,C,B,D] where D is the top frame and there's no inline.
+//
+// PruneFrom(A) returns [A,B,C,B,D] because there's no node beneath A.
+// Prune(A, nil) returns [B,C,B,D] by removing A itself.
+//
+// PruneFrom(B) returns [B,C,B,D] by removing all nodes beneath the first B when scanning from the bottom.
+// Prune(B, nil) returns [D] because a matching node is found by scanning from the root.
+func (p *Profile) PruneFrom(dropRx *regexp.Regexp) {
+ pruneBeneath := make(map[uint64]bool)
+
+ for _, loc := range p.Location {
+ for i := 0; i < len(loc.Line); i++ {
+ if fn := loc.Line[i].Function; fn != nil && fn.Name != "" {
+ funcName := simplifyFunc(fn.Name)
+ if dropRx.MatchString(funcName) {
+ // Found matching entry to prune.
+ pruneBeneath[loc.ID] = true
+ loc.Line = loc.Line[i:]
+ break
+ }
+ }
+ }
+ }
+
+ // Prune locs from each Sample
+ for _, sample := range p.Sample {
+ // Scan from the bottom leaf to the root to find the prune location.
+ for i, loc := range sample.Location {
+ if pruneBeneath[loc.ID] {
+ sample.Location = sample.Location[i:]
+ break
+ }
+ }
+ }
+}
diff --git a/integration/assets/hydrabroker/vendor/github.com/gorilla/mux/.editorconfig b/integration/assets/hydrabroker/vendor/github.com/gorilla/mux/.editorconfig
new file mode 100644
index 00000000000..c6b74c3e0d0
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/gorilla/mux/.editorconfig
@@ -0,0 +1,20 @@
+; https://editorconfig.org/
+
+root = true
+
+[*]
+insert_final_newline = true
+charset = utf-8
+trim_trailing_whitespace = true
+indent_style = space
+indent_size = 2
+
+[{Makefile,go.mod,go.sum,*.go,.gitmodules}]
+indent_style = tab
+indent_size = 4
+
+[*.md]
+indent_size = 4
+trim_trailing_whitespace = false
+
+eclint_indent_style = unset
\ No newline at end of file
diff --git a/integration/assets/hydrabroker/vendor/github.com/gorilla/mux/.gitignore b/integration/assets/hydrabroker/vendor/github.com/gorilla/mux/.gitignore
new file mode 100644
index 00000000000..84039fec687
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/gorilla/mux/.gitignore
@@ -0,0 +1 @@
+coverage.coverprofile
diff --git a/integration/assets/hydrabroker/vendor/github.com/gorilla/mux/AUTHORS b/integration/assets/hydrabroker/vendor/github.com/gorilla/mux/AUTHORS
deleted file mode 100644
index b722392ee59..00000000000
--- a/integration/assets/hydrabroker/vendor/github.com/gorilla/mux/AUTHORS
+++ /dev/null
@@ -1,8 +0,0 @@
-# This is the official list of gorilla/mux authors for copyright purposes.
-#
-# Please keep the list sorted.
-
-Google LLC (https://opensource.google.com/)
-Kamil Kisielk
-Matt Silverlock
-Rodrigo Moraes (https://github.com/moraes)
diff --git a/integration/assets/hydrabroker/vendor/github.com/gorilla/mux/LICENSE b/integration/assets/hydrabroker/vendor/github.com/gorilla/mux/LICENSE
index 6903df6386e..bb9d80bc9b6 100644
--- a/integration/assets/hydrabroker/vendor/github.com/gorilla/mux/LICENSE
+++ b/integration/assets/hydrabroker/vendor/github.com/gorilla/mux/LICENSE
@@ -1,4 +1,4 @@
-Copyright (c) 2012-2018 The Gorilla Authors. All rights reserved.
+Copyright (c) 2023 The Gorilla Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
diff --git a/integration/assets/hydrabroker/vendor/github.com/gorilla/mux/Makefile b/integration/assets/hydrabroker/vendor/github.com/gorilla/mux/Makefile
new file mode 100644
index 00000000000..98f5ab75f9d
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/gorilla/mux/Makefile
@@ -0,0 +1,34 @@
+GO_LINT=$(shell which golangci-lint 2> /dev/null || echo '')
+GO_LINT_URI=github.com/golangci/golangci-lint/cmd/golangci-lint@latest
+
+GO_SEC=$(shell which gosec 2> /dev/null || echo '')
+GO_SEC_URI=github.com/securego/gosec/v2/cmd/gosec@latest
+
+GO_VULNCHECK=$(shell which govulncheck 2> /dev/null || echo '')
+GO_VULNCHECK_URI=golang.org/x/vuln/cmd/govulncheck@latest
+
+.PHONY: golangci-lint
+golangci-lint:
+ $(if $(GO_LINT), ,go install $(GO_LINT_URI))
+ @echo "##### Running golangci-lint"
+ golangci-lint run -v
+
+.PHONY: gosec
+gosec:
+ $(if $(GO_SEC), ,go install $(GO_SEC_URI))
+ @echo "##### Running gosec"
+ gosec ./...
+
+.PHONY: govulncheck
+govulncheck:
+ $(if $(GO_VULNCHECK), ,go install $(GO_VULNCHECK_URI))
+ @echo "##### Running govulncheck"
+ govulncheck ./...
+
+.PHONY: verify
+verify: golangci-lint gosec govulncheck
+
+.PHONY: test
+test:
+ @echo "##### Running tests"
+ go test -race -cover -coverprofile=coverage.coverprofile -covermode=atomic -v ./...
\ No newline at end of file
diff --git a/integration/assets/hydrabroker/vendor/github.com/gorilla/mux/README.md b/integration/assets/hydrabroker/vendor/github.com/gorilla/mux/README.md
index 35eea9f1063..382513d57c4 100644
--- a/integration/assets/hydrabroker/vendor/github.com/gorilla/mux/README.md
+++ b/integration/assets/hydrabroker/vendor/github.com/gorilla/mux/README.md
@@ -1,12 +1,12 @@
# gorilla/mux
-[![GoDoc](https://godoc.org/github.com/gorilla/mux?status.svg)](https://godoc.org/github.com/gorilla/mux)
-[![CircleCI](https://circleci.com/gh/gorilla/mux.svg?style=svg)](https://circleci.com/gh/gorilla/mux)
-[![Sourcegraph](https://sourcegraph.com/github.com/gorilla/mux/-/badge.svg)](https://sourcegraph.com/github.com/gorilla/mux?badge)
+![testing](https://github.com/gorilla/mux/actions/workflows/test.yml/badge.svg)
+[![codecov](https://codecov.io/github/gorilla/mux/branch/main/graph/badge.svg)](https://codecov.io/github/gorilla/mux)
+[![godoc](https://godoc.org/github.com/gorilla/mux?status.svg)](https://godoc.org/github.com/gorilla/mux)
+[![sourcegraph](https://sourcegraph.com/github.com/gorilla/mux/-/badge.svg)](https://sourcegraph.com/github.com/gorilla/mux?badge)
-![Gorilla Logo](https://cloud-cdn.questionable.services/gorilla-icon-64.png)
-https://www.gorillatoolkit.org/pkg/mux
+![Gorilla Logo](https://github.com/gorilla/.github/assets/53367916/d92caabf-98e0-473e-bfbf-ab554ba435e5)
Package `gorilla/mux` implements a request router and dispatcher for matching incoming requests to
their respective handler.
@@ -247,32 +247,25 @@ type spaHandler struct {
// file located at the index path on the SPA handler will be served. This
// is suitable behavior for serving an SPA (single page application).
func (h spaHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
- // get the absolute path to prevent directory traversal
- path, err := filepath.Abs(r.URL.Path)
- if err != nil {
- // if we failed to get the absolute path respond with a 400 bad request
- // and stop
- http.Error(w, err.Error(), http.StatusBadRequest)
- return
- }
-
- // prepend the path with the path to the static directory
- path = filepath.Join(h.staticPath, path)
+ // Join internally call path.Clean to prevent directory traversal
+ path := filepath.Join(h.staticPath, r.URL.Path)
- // check whether a file exists at the given path
- _, err = os.Stat(path)
- if os.IsNotExist(err) {
- // file does not exist, serve index.html
+ // check whether a file exists or is a directory at the given path
+ fi, err := os.Stat(path)
+ if os.IsNotExist(err) || fi.IsDir() {
+ // file does not exist or path is a directory, serve index.html
http.ServeFile(w, r, filepath.Join(h.staticPath, h.indexPath))
return
- } else if err != nil {
- // if we got an error (that wasn't that the file doesn't exist) stating the
- // file, return a 500 internal server error and stop
+ }
+
+ if err != nil {
+ // if we got an error (that wasn't that the file doesn't exist) stating the
+ // file, return a 500 internal server error and stop
http.Error(w, err.Error(), http.StatusInternalServerError)
- return
+ return
}
- // otherwise, use http.FileServer to serve the static dir
+ // otherwise, use http.FileServer to serve the static file
http.FileServer(http.Dir(h.staticPath)).ServeHTTP(w, r)
}
@@ -375,6 +368,19 @@ url, err := r.Get("article").URL("subdomain", "news",
"id", "42")
```
+To find all the required variables for a given route when calling `URL()`, the method `GetVarNames()` is available:
+```go
+r := mux.NewRouter()
+r.Host("{domain}").
+ Path("/{group}/{item_id}").
+ Queries("some_data1", "{some_data1}").
+ Queries("some_data2", "{some_data2}").
+ Name("article")
+
+// Will print [domain group item_id some_data1 some_data2]
+fmt.Println(r.Get("article").GetVarNames())
+
+```
### Walking Routes
The `Walk` function on `mux.Router` can be used to visit all of the routes that are registered on a router. For example,
@@ -572,7 +578,7 @@ func (amw *authenticationMiddleware) Middleware(next http.Handler) http.Handler
r := mux.NewRouter()
r.HandleFunc("/", handler)
-amw := authenticationMiddleware{}
+amw := authenticationMiddleware{tokenUsers: make(map[string]string)}
amw.Populate()
r.Use(amw.Middleware)
@@ -758,7 +764,8 @@ func TestMetricsHandler(t *testing.T) {
rr := httptest.NewRecorder()
- // Need to create a router that we can pass the request through so that the vars will be added to the context
+ // To add the vars to the context,
+ // we need to create a router through which we can pass the request.
router := mux.NewRouter()
router.HandleFunc("/metrics/{type}", MetricsHandler)
router.ServeHTTP(rr, req)
diff --git a/integration/assets/hydrabroker/vendor/github.com/gorilla/mux/doc.go b/integration/assets/hydrabroker/vendor/github.com/gorilla/mux/doc.go
index bd5a38b55d8..80601351fd0 100644
--- a/integration/assets/hydrabroker/vendor/github.com/gorilla/mux/doc.go
+++ b/integration/assets/hydrabroker/vendor/github.com/gorilla/mux/doc.go
@@ -10,18 +10,18 @@ http.ServeMux, mux.Router matches incoming requests against a list of
registered routes and calls a handler for the route that matches the URL
or other conditions. The main features are:
- * Requests can be matched based on URL host, path, path prefix, schemes,
- header and query values, HTTP methods or using custom matchers.
- * URL hosts, paths and query values can have variables with an optional
- regular expression.
- * Registered URLs can be built, or "reversed", which helps maintaining
- references to resources.
- * Routes can be used as subrouters: nested routes are only tested if the
- parent route matches. This is useful to define groups of routes that
- share common conditions like a host, a path prefix or other repeated
- attributes. As a bonus, this optimizes request matching.
- * It implements the http.Handler interface so it is compatible with the
- standard http.ServeMux.
+ - Requests can be matched based on URL host, path, path prefix, schemes,
+ header and query values, HTTP methods or using custom matchers.
+ - URL hosts, paths and query values can have variables with an optional
+ regular expression.
+ - Registered URLs can be built, or "reversed", which helps maintaining
+ references to resources.
+ - Routes can be used as subrouters: nested routes are only tested if the
+ parent route matches. This is useful to define groups of routes that
+ share common conditions like a host, a path prefix or other repeated
+ attributes. As a bonus, this optimizes request matching.
+ - It implements the http.Handler interface so it is compatible with the
+ standard http.ServeMux.
Let's start registering a couple of URL paths and handlers:
@@ -301,6 +301,5 @@ A more complex authentication middleware, which maps session token to users, cou
r.Use(amw.Middleware)
Note: The handler chain will be stopped if your middleware doesn't call `next.ServeHTTP()` with the corresponding parameters. This can be used to abort a request if the middleware writer wants to.
-
*/
package mux
diff --git a/integration/assets/hydrabroker/vendor/github.com/gorilla/mux/go.mod b/integration/assets/hydrabroker/vendor/github.com/gorilla/mux/go.mod
index df170a39940..7c6f375b81e 100644
--- a/integration/assets/hydrabroker/vendor/github.com/gorilla/mux/go.mod
+++ b/integration/assets/hydrabroker/vendor/github.com/gorilla/mux/go.mod
@@ -1,3 +1,3 @@
module github.com/gorilla/mux
-go 1.12
+go 1.20
diff --git a/integration/assets/hydrabroker/vendor/github.com/gorilla/mux/mux.go b/integration/assets/hydrabroker/vendor/github.com/gorilla/mux/mux.go
index c9ba6470735..1e089906fad 100644
--- a/integration/assets/hydrabroker/vendor/github.com/gorilla/mux/mux.go
+++ b/integration/assets/hydrabroker/vendor/github.com/gorilla/mux/mux.go
@@ -31,24 +31,26 @@ func NewRouter() *Router {
// It implements the http.Handler interface, so it can be registered to serve
// requests:
//
-// var router = mux.NewRouter()
+// var router = mux.NewRouter()
//
-// func main() {
-// http.Handle("/", router)
-// }
+// func main() {
+// http.Handle("/", router)
+// }
//
// Or, for Google App Engine, register it in a init() function:
//
-// func init() {
-// http.Handle("/", router)
-// }
+// func init() {
+// http.Handle("/", router)
+// }
//
// This will send all incoming requests to the router.
type Router struct {
// Configurable Handler to be used when no route matches.
+ // This can be used to render your own 404 Not Found errors.
NotFoundHandler http.Handler
// Configurable Handler to be used when the request method does not match the route.
+ // This can be used to render your own 405 Method Not Allowed errors.
MethodNotAllowedHandler http.Handler
// Routes to be matched, in order.
@@ -435,8 +437,7 @@ func Vars(r *http.Request) map[string]string {
// CurrentRoute returns the matched route for the current request, if any.
// This only works when called inside the handler of the matched route
// because the matched route is stored in the request context which is cleared
-// after the handler returns, unless the KeepContext option is set on the
-// Router.
+// after the handler returns.
func CurrentRoute(r *http.Request) *Route {
if rv := r.Context().Value(routeKey); rv != nil {
return rv.(*Route)
diff --git a/integration/assets/hydrabroker/vendor/github.com/gorilla/mux/regexp.go b/integration/assets/hydrabroker/vendor/github.com/gorilla/mux/regexp.go
index 96dd94ad130..5d05cfa0e9e 100644
--- a/integration/assets/hydrabroker/vendor/github.com/gorilla/mux/regexp.go
+++ b/integration/assets/hydrabroker/vendor/github.com/gorilla/mux/regexp.go
@@ -22,10 +22,10 @@ type routeRegexpOptions struct {
type regexpType int
const (
- regexpTypePath regexpType = 0
- regexpTypeHost regexpType = 1
- regexpTypePrefix regexpType = 2
- regexpTypeQuery regexpType = 3
+ regexpTypePath regexpType = iota
+ regexpTypeHost
+ regexpTypePrefix
+ regexpTypeQuery
)
// newRouteRegexp parses a route template and returns a routeRegexp,
@@ -195,7 +195,7 @@ func (r *routeRegexp) Match(req *http.Request, match *RouteMatch) bool {
// url builds a URL part using the given values.
func (r *routeRegexp) url(values map[string]string) (string, error) {
- urlValues := make([]interface{}, len(r.varsN), len(r.varsN))
+ urlValues := make([]interface{}, len(r.varsN))
for k, v := range r.varsN {
value, ok := values[v]
if !ok {
@@ -325,6 +325,12 @@ func (v routeRegexpGroup) setMatch(req *http.Request, m *RouteMatch, r *Route) {
// Store host variables.
if v.host != nil {
host := getHost(req)
+ if v.host.wildcardHostPort {
+ // Don't be strict on the port match
+ if i := strings.Index(host, ":"); i != -1 {
+ host = host[:i]
+ }
+ }
matches := v.host.regexp.FindStringSubmatchIndex(host)
if len(matches) > 0 {
extractVars(host, matches, v.host.varsN, m.Vars)
diff --git a/integration/assets/hydrabroker/vendor/github.com/gorilla/mux/route.go b/integration/assets/hydrabroker/vendor/github.com/gorilla/mux/route.go
index 750afe570d0..e8f11df221f 100644
--- a/integration/assets/hydrabroker/vendor/github.com/gorilla/mux/route.go
+++ b/integration/assets/hydrabroker/vendor/github.com/gorilla/mux/route.go
@@ -64,8 +64,18 @@ func (r *Route) Match(req *http.Request, match *RouteMatch) bool {
match.MatchErr = nil
}
- matchErr = nil
+ matchErr = nil // nolint:ineffassign
return false
+ } else {
+ // Multiple routes may share the same path but use different HTTP methods. For instance:
+ // Route 1: POST "/users/{id}".
+ // Route 2: GET "/users/{id}", parameters: "id": "[0-9]+".
+ //
+ // The router must handle these cases correctly. For a GET request to "/users/abc" with "id" as "-2",
+ // The router should return a "Not Found" error as no route fully matches this request.
+ if match.MatchErr == ErrMethodMismatch {
+ match.MatchErr = nil
+ }
}
}
@@ -230,9 +240,9 @@ func (m headerMatcher) Match(r *http.Request, match *RouteMatch) bool {
// Headers adds a matcher for request header values.
// It accepts a sequence of key/value pairs to be matched. For example:
//
-// r := mux.NewRouter()
-// r.Headers("Content-Type", "application/json",
-// "X-Requested-With", "XMLHttpRequest")
+// r := mux.NewRouter().NewRoute()
+// r.Headers("Content-Type", "application/json",
+// "X-Requested-With", "XMLHttpRequest")
//
// The above route will only match if both request header values match.
// If the value is an empty string, it will match any value if the key is set.
@@ -255,9 +265,9 @@ func (m headerRegexMatcher) Match(r *http.Request, match *RouteMatch) bool {
// HeadersRegexp accepts a sequence of key/value pairs, where the value has regex
// support. For example:
//
-// r := mux.NewRouter()
-// r.HeadersRegexp("Content-Type", "application/(text|json)",
-// "X-Requested-With", "XMLHttpRequest")
+// r := mux.NewRouter().NewRoute()
+// r.HeadersRegexp("Content-Type", "application/(text|json)",
+// "X-Requested-With", "XMLHttpRequest")
//
// The above route will only match if both the request header matches both regular expressions.
// If the value is an empty string, it will match any value if the key is set.
@@ -283,10 +293,10 @@ func (r *Route) HeadersRegexp(pairs ...string) *Route {
//
// For example:
//
-// r := mux.NewRouter()
-// r.Host("www.example.com")
-// r.Host("{subdomain}.domain.com")
-// r.Host("{subdomain:[a-z]+}.domain.com")
+// r := mux.NewRouter().NewRoute()
+// r.Host("www.example.com")
+// r.Host("{subdomain}.domain.com")
+// r.Host("{subdomain:[a-z]+}.domain.com")
//
// Variable names must be unique in a given route. They can be retrieved
// calling mux.Vars(request).
@@ -342,11 +352,11 @@ func (r *Route) Methods(methods ...string) *Route {
//
// For example:
//
-// r := mux.NewRouter()
-// r.Path("/products/").Handler(ProductsHandler)
-// r.Path("/products/{key}").Handler(ProductsHandler)
-// r.Path("/articles/{category}/{id:[0-9]+}").
-// Handler(ArticleHandler)
+// r := mux.NewRouter().NewRoute()
+// r.Path("/products/").Handler(ProductsHandler)
+// r.Path("/products/{key}").Handler(ProductsHandler)
+// r.Path("/articles/{category}/{id:[0-9]+}").
+// Handler(ArticleHandler)
//
// Variable names must be unique in a given route. They can be retrieved
// calling mux.Vars(request).
@@ -377,8 +387,8 @@ func (r *Route) PathPrefix(tpl string) *Route {
// It accepts a sequence of key/value pairs. Values may define variables.
// For example:
//
-// r := mux.NewRouter()
-// r.Queries("foo", "bar", "id", "{id:[0-9]+}")
+// r := mux.NewRouter().NewRoute()
+// r.Queries("foo", "bar", "id", "{id:[0-9]+}")
//
// The above route will only match if the URL contains the defined queries
// values, e.g.: ?foo=bar&id=42.
@@ -473,11 +483,11 @@ func (r *Route) BuildVarsFunc(f BuildVarsFunc) *Route {
//
// It will test the inner routes only if the parent route matched. For example:
//
-// r := mux.NewRouter()
-// s := r.Host("www.example.com").Subrouter()
-// s.HandleFunc("/products/", ProductsHandler)
-// s.HandleFunc("/products/{key}", ProductHandler)
-// s.HandleFunc("/articles/{category}/{id:[0-9]+}"), ArticleHandler)
+// r := mux.NewRouter().NewRoute()
+// s := r.Host("www.example.com").Subrouter()
+// s.HandleFunc("/products/", ProductsHandler)
+// s.HandleFunc("/products/{key}", ProductHandler)
+// s.HandleFunc("/articles/{category}/{id:[0-9]+}"), ArticleHandler)
//
// Here, the routes registered in the subrouter won't be tested if the host
// doesn't match.
@@ -497,36 +507,36 @@ func (r *Route) Subrouter() *Router {
// It accepts a sequence of key/value pairs for the route variables. For
// example, given this route:
//
-// r := mux.NewRouter()
-// r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler).
-// Name("article")
+// r := mux.NewRouter()
+// r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler).
+// Name("article")
//
// ...a URL for it can be built using:
//
-// url, err := r.Get("article").URL("category", "technology", "id", "42")
+// url, err := r.Get("article").URL("category", "technology", "id", "42")
//
// ...which will return an url.URL with the following path:
//
-// "/articles/technology/42"
+// "/articles/technology/42"
//
// This also works for host variables:
//
-// r := mux.NewRouter()
-// r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler).
-// Host("{subdomain}.domain.com").
-// Name("article")
+// r := mux.NewRouter()
+// r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler).
+// Host("{subdomain}.domain.com").
+// Name("article")
//
-// // url.String() will be "http://news.domain.com/articles/technology/42"
-// url, err := r.Get("article").URL("subdomain", "news",
-// "category", "technology",
-// "id", "42")
+// // url.String() will be "http://news.domain.com/articles/technology/42"
+// url, err := r.Get("article").URL("subdomain", "news",
+// "category", "technology",
+// "id", "42")
//
// The scheme of the resulting url will be the first argument that was passed to Schemes:
//
-// // url.String() will be "https://example.com"
-// r := mux.NewRouter()
-// url, err := r.Host("example.com")
-// .Schemes("https", "http").URL()
+// // url.String() will be "https://example.com"
+// r := mux.NewRouter().NewRoute()
+// url, err := r.Host("example.com")
+// .Schemes("https", "http").URL()
//
// All variables defined in the route are required, and their values must
// conform to the corresponding patterns.
@@ -718,6 +728,25 @@ func (r *Route) GetHostTemplate() (string, error) {
return r.regexp.host.template, nil
}
+// GetVarNames returns the names of all variables added by regexp matchers
+// These can be used to know which route variables should be passed into r.URL()
+func (r *Route) GetVarNames() ([]string, error) {
+ if r.err != nil {
+ return nil, r.err
+ }
+ var varNames []string
+ if r.regexp.host != nil {
+ varNames = append(varNames, r.regexp.host.varsN...)
+ }
+ if r.regexp.path != nil {
+ varNames = append(varNames, r.regexp.path.varsN...)
+ }
+ for _, regx := range r.regexp.queries {
+ varNames = append(varNames, regx.varsN...)
+ }
+ return varNames, nil
+}
+
// prepareVars converts the route variable pairs into a map. If the route has a
// BuildVarsFunc, it is invoked.
func (r *Route) prepareVars(pairs ...string) (map[string]string, error) {
diff --git a/integration/assets/hydrabroker/vendor/github.com/hpcloud/tail/.gitignore b/integration/assets/hydrabroker/vendor/github.com/hpcloud/tail/.gitignore
deleted file mode 100644
index 6d9953c3c6a..00000000000
--- a/integration/assets/hydrabroker/vendor/github.com/hpcloud/tail/.gitignore
+++ /dev/null
@@ -1,3 +0,0 @@
-.test
-.go
-
diff --git a/integration/assets/hydrabroker/vendor/github.com/hpcloud/tail/.travis.yml b/integration/assets/hydrabroker/vendor/github.com/hpcloud/tail/.travis.yml
deleted file mode 100644
index 9cf8bb7fc5f..00000000000
--- a/integration/assets/hydrabroker/vendor/github.com/hpcloud/tail/.travis.yml
+++ /dev/null
@@ -1,18 +0,0 @@
-language: go
-
-script:
- - go test -race -v ./...
-
-go:
- - 1.4
- - 1.5
- - 1.6
- - tip
-
-matrix:
- allow_failures:
- - go: tip
-
-install:
- - go get gopkg.in/fsnotify.v1
- - go get gopkg.in/tomb.v1
diff --git a/integration/assets/hydrabroker/vendor/github.com/hpcloud/tail/CHANGES.md b/integration/assets/hydrabroker/vendor/github.com/hpcloud/tail/CHANGES.md
deleted file mode 100644
index 422790c073d..00000000000
--- a/integration/assets/hydrabroker/vendor/github.com/hpcloud/tail/CHANGES.md
+++ /dev/null
@@ -1,63 +0,0 @@
-# API v1 (gopkg.in/hpcloud/tail.v1)
-
-## April, 2016
-
-* Migrated to godep, as depman is not longer supported
-* Introduced golang vendoring feature
-* Fixed issue [#57](https://github.com/hpcloud/tail/issues/57) related to reopen deleted file
-
-## July, 2015
-
-* Fix inotify watcher leak; remove `Cleanup` (#51)
-
-# API v0 (gopkg.in/hpcloud/tail.v0)
-
-## June, 2015
-
-* Don't return partial lines (PR #40)
-* Use stable version of fsnotify (#46)
-
-## July, 2014
-
-* Fix tail for Windows (PR #36)
-
-## May, 2014
-
-* Improved rate limiting using leaky bucket (PR #29)
-* Fix odd line splitting (PR #30)
-
-## Apr, 2014
-
-* LimitRate now discards read buffer (PR #28)
-* allow reading of longer lines if MaxLineSize is unset (PR #24)
-* updated deps.json to latest fsnotify (441bbc86b1)
-
-## Feb, 2014
-
-* added `Config.Logger` to suppress library logging
-
-## Nov, 2013
-
-* add Cleanup to remove leaky inotify watches (PR #20)
-
-## Aug, 2013
-
-* redesigned Location field (PR #12)
-* add tail.Tell (PR #14)
-
-## July, 2013
-
-* Rate limiting (PR #10)
-
-## May, 2013
-
-* Detect file deletions/renames in polling file watcher (PR #1)
-* Detect file truncation
-* Fix potential race condition when reopening the file (issue 5)
-* Fix potential blocking of `tail.Stop` (issue 4)
-* Fix uncleaned up ChangeEvents goroutines after calling tail.Stop
-* Support Follow=false
-
-## Feb, 2013
-
-* Initial open source release
diff --git a/integration/assets/hydrabroker/vendor/github.com/hpcloud/tail/Dockerfile b/integration/assets/hydrabroker/vendor/github.com/hpcloud/tail/Dockerfile
deleted file mode 100644
index cd297b940a9..00000000000
--- a/integration/assets/hydrabroker/vendor/github.com/hpcloud/tail/Dockerfile
+++ /dev/null
@@ -1,19 +0,0 @@
-FROM golang
-
-RUN mkdir -p $GOPATH/src/github.com/hpcloud/tail/
-ADD . $GOPATH/src/github.com/hpcloud/tail/
-
-# expecting to fetch dependencies successfully.
-RUN go get -v github.com/hpcloud/tail
-
-# expecting to run the test successfully.
-RUN go test -v github.com/hpcloud/tail
-
-# expecting to install successfully
-RUN go install -v github.com/hpcloud/tail
-RUN go install -v github.com/hpcloud/tail/cmd/gotail
-
-RUN $GOPATH/bin/gotail -h || true
-
-ENV PATH $GOPATH/bin:$PATH
-CMD ["gotail"]
diff --git a/integration/assets/hydrabroker/vendor/github.com/hpcloud/tail/Makefile b/integration/assets/hydrabroker/vendor/github.com/hpcloud/tail/Makefile
deleted file mode 100644
index 6591b24fc11..00000000000
--- a/integration/assets/hydrabroker/vendor/github.com/hpcloud/tail/Makefile
+++ /dev/null
@@ -1,11 +0,0 @@
-default: test
-
-test: *.go
- go test -v -race ./...
-
-fmt:
- gofmt -w .
-
-# Run the test in an isolated environment.
-fulltest:
- docker build -t hpcloud/tail .
diff --git a/integration/assets/hydrabroker/vendor/github.com/hpcloud/tail/README.md b/integration/assets/hydrabroker/vendor/github.com/hpcloud/tail/README.md
deleted file mode 100644
index fb7fbc26c67..00000000000
--- a/integration/assets/hydrabroker/vendor/github.com/hpcloud/tail/README.md
+++ /dev/null
@@ -1,28 +0,0 @@
-[![Build Status](https://travis-ci.org/hpcloud/tail.svg)](https://travis-ci.org/hpcloud/tail)
-[![Build status](https://ci.appveyor.com/api/projects/status/kohpsf3rvhjhrox6?svg=true)](https://ci.appveyor.com/project/HelionCloudFoundry/tail)
-
-# Go package for tail-ing files
-
-A Go package striving to emulate the features of the BSD `tail` program.
-
-```Go
-t, err := tail.TailFile("/var/log/nginx.log", tail.Config{Follow: true})
-for line := range t.Lines {
- fmt.Println(line.Text)
-}
-```
-
-See [API documentation](http://godoc.org/github.com/hpcloud/tail).
-
-## Log rotation
-
-Tail comes with full support for truncation/move detection as it is
-designed to work with log rotation tools.
-
-## Installing
-
- go get github.com/hpcloud/tail/...
-
-## Windows support
-
-This package [needs assistance](https://github.com/hpcloud/tail/labels/Windows) for full Windows support.
diff --git a/integration/assets/hydrabroker/vendor/github.com/hpcloud/tail/appveyor.yml b/integration/assets/hydrabroker/vendor/github.com/hpcloud/tail/appveyor.yml
deleted file mode 100644
index d370055b6f0..00000000000
--- a/integration/assets/hydrabroker/vendor/github.com/hpcloud/tail/appveyor.yml
+++ /dev/null
@@ -1,11 +0,0 @@
-version: 0.{build}
-skip_tags: true
-cache: C:\Users\appveyor\AppData\Local\NuGet\Cache
-build_script:
-- SET GOPATH=c:\workspace
-- go test -v -race ./...
-test: off
-clone_folder: c:\workspace\src\github.com\hpcloud\tail
-branches:
- only:
- - master
diff --git a/integration/assets/hydrabroker/vendor/github.com/hpcloud/tail/ratelimiter/Licence b/integration/assets/hydrabroker/vendor/github.com/hpcloud/tail/ratelimiter/Licence
deleted file mode 100644
index 434aab19f1a..00000000000
--- a/integration/assets/hydrabroker/vendor/github.com/hpcloud/tail/ratelimiter/Licence
+++ /dev/null
@@ -1,7 +0,0 @@
-Copyright (C) 2013 99designs
-
-Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/integration/assets/hydrabroker/vendor/github.com/hpcloud/tail/ratelimiter/leakybucket.go b/integration/assets/hydrabroker/vendor/github.com/hpcloud/tail/ratelimiter/leakybucket.go
deleted file mode 100644
index 358b69e7f5c..00000000000
--- a/integration/assets/hydrabroker/vendor/github.com/hpcloud/tail/ratelimiter/leakybucket.go
+++ /dev/null
@@ -1,97 +0,0 @@
-// Package ratelimiter implements the Leaky Bucket ratelimiting algorithm with memcached and in-memory backends.
-package ratelimiter
-
-import (
- "time"
-)
-
-type LeakyBucket struct {
- Size uint16
- Fill float64
- LeakInterval time.Duration // time.Duration for 1 unit of size to leak
- Lastupdate time.Time
- Now func() time.Time
-}
-
-func NewLeakyBucket(size uint16, leakInterval time.Duration) *LeakyBucket {
- bucket := LeakyBucket{
- Size: size,
- Fill: 0,
- LeakInterval: leakInterval,
- Now: time.Now,
- Lastupdate: time.Now(),
- }
-
- return &bucket
-}
-
-func (b *LeakyBucket) updateFill() {
- now := b.Now()
- if b.Fill > 0 {
- elapsed := now.Sub(b.Lastupdate)
-
- b.Fill -= float64(elapsed) / float64(b.LeakInterval)
- if b.Fill < 0 {
- b.Fill = 0
- }
- }
- b.Lastupdate = now
-}
-
-func (b *LeakyBucket) Pour(amount uint16) bool {
- b.updateFill()
-
- var newfill float64 = b.Fill + float64(amount)
-
- if newfill > float64(b.Size) {
- return false
- }
-
- b.Fill = newfill
-
- return true
-}
-
-// The time at which this bucket will be completely drained
-func (b *LeakyBucket) DrainedAt() time.Time {
- return b.Lastupdate.Add(time.Duration(b.Fill * float64(b.LeakInterval)))
-}
-
-// The duration until this bucket is completely drained
-func (b *LeakyBucket) TimeToDrain() time.Duration {
- return b.DrainedAt().Sub(b.Now())
-}
-
-func (b *LeakyBucket) TimeSinceLastUpdate() time.Duration {
- return b.Now().Sub(b.Lastupdate)
-}
-
-type LeakyBucketSer struct {
- Size uint16
- Fill float64
- LeakInterval time.Duration // time.Duration for 1 unit of size to leak
- Lastupdate time.Time
-}
-
-func (b *LeakyBucket) Serialise() *LeakyBucketSer {
- bucket := LeakyBucketSer{
- Size: b.Size,
- Fill: b.Fill,
- LeakInterval: b.LeakInterval,
- Lastupdate: b.Lastupdate,
- }
-
- return &bucket
-}
-
-func (b *LeakyBucketSer) DeSerialise() *LeakyBucket {
- bucket := LeakyBucket{
- Size: b.Size,
- Fill: b.Fill,
- LeakInterval: b.LeakInterval,
- Lastupdate: b.Lastupdate,
- Now: time.Now,
- }
-
- return &bucket
-}
diff --git a/integration/assets/hydrabroker/vendor/github.com/hpcloud/tail/ratelimiter/memory.go b/integration/assets/hydrabroker/vendor/github.com/hpcloud/tail/ratelimiter/memory.go
deleted file mode 100644
index 8f6a5784a9a..00000000000
--- a/integration/assets/hydrabroker/vendor/github.com/hpcloud/tail/ratelimiter/memory.go
+++ /dev/null
@@ -1,58 +0,0 @@
-package ratelimiter
-
-import (
- "errors"
- "time"
-)
-
-const GC_SIZE int = 100
-
-type Memory struct {
- store map[string]LeakyBucket
- lastGCCollected time.Time
-}
-
-func NewMemory() *Memory {
- m := new(Memory)
- m.store = make(map[string]LeakyBucket)
- m.lastGCCollected = time.Now()
- return m
-}
-
-func (m *Memory) GetBucketFor(key string) (*LeakyBucket, error) {
-
- bucket, ok := m.store[key]
- if !ok {
- return nil, errors.New("miss")
- }
-
- return &bucket, nil
-}
-
-func (m *Memory) SetBucketFor(key string, bucket LeakyBucket) error {
-
- if len(m.store) > GC_SIZE {
- m.GarbageCollect()
- }
-
- m.store[key] = bucket
-
- return nil
-}
-
-func (m *Memory) GarbageCollect() {
- now := time.Now()
-
- // rate limit GC to once per minute
- if now.Add(60*time.Second).Unix() > m.lastGCCollected.Unix() {
-
- for key, bucket := range m.store {
- // if the bucket is drained, then GC
- if bucket.DrainedAt().Unix() > now.Unix() {
- delete(m.store, key)
- }
- }
-
- m.lastGCCollected = now
- }
-}
diff --git a/integration/assets/hydrabroker/vendor/github.com/hpcloud/tail/ratelimiter/storage.go b/integration/assets/hydrabroker/vendor/github.com/hpcloud/tail/ratelimiter/storage.go
deleted file mode 100644
index 89b2fe8826e..00000000000
--- a/integration/assets/hydrabroker/vendor/github.com/hpcloud/tail/ratelimiter/storage.go
+++ /dev/null
@@ -1,6 +0,0 @@
-package ratelimiter
-
-type Storage interface {
- GetBucketFor(string) (*LeakyBucket, error)
- SetBucketFor(string, LeakyBucket) error
-}
diff --git a/integration/assets/hydrabroker/vendor/github.com/hpcloud/tail/tail.go b/integration/assets/hydrabroker/vendor/github.com/hpcloud/tail/tail.go
deleted file mode 100644
index 2d252d60572..00000000000
--- a/integration/assets/hydrabroker/vendor/github.com/hpcloud/tail/tail.go
+++ /dev/null
@@ -1,438 +0,0 @@
-// Copyright (c) 2015 HPE Software Inc. All rights reserved.
-// Copyright (c) 2013 ActiveState Software Inc. All rights reserved.
-
-package tail
-
-import (
- "bufio"
- "errors"
- "fmt"
- "io"
- "io/ioutil"
- "log"
- "os"
- "strings"
- "sync"
- "time"
-
- "github.com/hpcloud/tail/ratelimiter"
- "github.com/hpcloud/tail/util"
- "github.com/hpcloud/tail/watch"
- "gopkg.in/tomb.v1"
-)
-
-var (
- ErrStop = fmt.Errorf("tail should now stop")
-)
-
-type Line struct {
- Text string
- Time time.Time
- Err error // Error from tail
-}
-
-// NewLine returns a Line with present time.
-func NewLine(text string) *Line {
- return &Line{text, time.Now(), nil}
-}
-
-// SeekInfo represents arguments to `os.Seek`
-type SeekInfo struct {
- Offset int64
- Whence int // os.SEEK_*
-}
-
-type logger interface {
- Fatal(v ...interface{})
- Fatalf(format string, v ...interface{})
- Fatalln(v ...interface{})
- Panic(v ...interface{})
- Panicf(format string, v ...interface{})
- Panicln(v ...interface{})
- Print(v ...interface{})
- Printf(format string, v ...interface{})
- Println(v ...interface{})
-}
-
-// Config is used to specify how a file must be tailed.
-type Config struct {
- // File-specifc
- Location *SeekInfo // Seek to this location before tailing
- ReOpen bool // Reopen recreated files (tail -F)
- MustExist bool // Fail early if the file does not exist
- Poll bool // Poll for file changes instead of using inotify
- Pipe bool // Is a named pipe (mkfifo)
- RateLimiter *ratelimiter.LeakyBucket
-
- // Generic IO
- Follow bool // Continue looking for new lines (tail -f)
- MaxLineSize int // If non-zero, split longer lines into multiple lines
-
- // Logger, when nil, is set to tail.DefaultLogger
- // To disable logging: set field to tail.DiscardingLogger
- Logger logger
-}
-
-type Tail struct {
- Filename string
- Lines chan *Line
- Config
-
- file *os.File
- reader *bufio.Reader
-
- watcher watch.FileWatcher
- changes *watch.FileChanges
-
- tomb.Tomb // provides: Done, Kill, Dying
-
- lk sync.Mutex
-}
-
-var (
- // DefaultLogger is used when Config.Logger == nil
- DefaultLogger = log.New(os.Stderr, "", log.LstdFlags)
- // DiscardingLogger can be used to disable logging output
- DiscardingLogger = log.New(ioutil.Discard, "", 0)
-)
-
-// TailFile begins tailing the file. Output stream is made available
-// via the `Tail.Lines` channel. To handle errors during tailing,
-// invoke the `Wait` or `Err` method after finishing reading from the
-// `Lines` channel.
-func TailFile(filename string, config Config) (*Tail, error) {
- if config.ReOpen && !config.Follow {
- util.Fatal("cannot set ReOpen without Follow.")
- }
-
- t := &Tail{
- Filename: filename,
- Lines: make(chan *Line),
- Config: config,
- }
-
- // when Logger was not specified in config, use default logger
- if t.Logger == nil {
- t.Logger = log.New(os.Stderr, "", log.LstdFlags)
- }
-
- if t.Poll {
- t.watcher = watch.NewPollingFileWatcher(filename)
- } else {
- t.watcher = watch.NewInotifyFileWatcher(filename)
- }
-
- if t.MustExist {
- var err error
- t.file, err = OpenFile(t.Filename)
- if err != nil {
- return nil, err
- }
- }
-
- go t.tailFileSync()
-
- return t, nil
-}
-
-// Return the file's current position, like stdio's ftell().
-// But this value is not very accurate.
-// it may readed one line in the chan(tail.Lines),
-// so it may lost one line.
-func (tail *Tail) Tell() (offset int64, err error) {
- if tail.file == nil {
- return
- }
- offset, err = tail.file.Seek(0, os.SEEK_CUR)
- if err != nil {
- return
- }
-
- tail.lk.Lock()
- defer tail.lk.Unlock()
- if tail.reader == nil {
- return
- }
-
- offset -= int64(tail.reader.Buffered())
- return
-}
-
-// Stop stops the tailing activity.
-func (tail *Tail) Stop() error {
- tail.Kill(nil)
- return tail.Wait()
-}
-
-// StopAtEOF stops tailing as soon as the end of the file is reached.
-func (tail *Tail) StopAtEOF() error {
- tail.Kill(errStopAtEOF)
- return tail.Wait()
-}
-
-var errStopAtEOF = errors.New("tail: stop at eof")
-
-func (tail *Tail) close() {
- close(tail.Lines)
- tail.closeFile()
-}
-
-func (tail *Tail) closeFile() {
- if tail.file != nil {
- tail.file.Close()
- tail.file = nil
- }
-}
-
-func (tail *Tail) reopen() error {
- tail.closeFile()
- for {
- var err error
- tail.file, err = OpenFile(tail.Filename)
- if err != nil {
- if os.IsNotExist(err) {
- tail.Logger.Printf("Waiting for %s to appear...", tail.Filename)
- if err := tail.watcher.BlockUntilExists(&tail.Tomb); err != nil {
- if err == tomb.ErrDying {
- return err
- }
- return fmt.Errorf("Failed to detect creation of %s: %s", tail.Filename, err)
- }
- continue
- }
- return fmt.Errorf("Unable to open file %s: %s", tail.Filename, err)
- }
- break
- }
- return nil
-}
-
-func (tail *Tail) readLine() (string, error) {
- tail.lk.Lock()
- line, err := tail.reader.ReadString('\n')
- tail.lk.Unlock()
- if err != nil {
- // Note ReadString "returns the data read before the error" in
- // case of an error, including EOF, so we return it as is. The
- // caller is expected to process it if err is EOF.
- return line, err
- }
-
- line = strings.TrimRight(line, "\n")
-
- return line, err
-}
-
-func (tail *Tail) tailFileSync() {
- defer tail.Done()
- defer tail.close()
-
- if !tail.MustExist {
- // deferred first open.
- err := tail.reopen()
- if err != nil {
- if err != tomb.ErrDying {
- tail.Kill(err)
- }
- return
- }
- }
-
- // Seek to requested location on first open of the file.
- if tail.Location != nil {
- _, err := tail.file.Seek(tail.Location.Offset, tail.Location.Whence)
- tail.Logger.Printf("Seeked %s - %+v\n", tail.Filename, tail.Location)
- if err != nil {
- tail.Killf("Seek error on %s: %s", tail.Filename, err)
- return
- }
- }
-
- tail.openReader()
-
- var offset int64 = 0
- var err error
-
- // Read line by line.
- for {
- // do not seek in named pipes
- if !tail.Pipe {
- // grab the position in case we need to back up in the event of a half-line
- offset, err = tail.Tell()
- if err != nil {
- tail.Kill(err)
- return
- }
- }
-
- line, err := tail.readLine()
-
- // Process `line` even if err is EOF.
- if err == nil {
- cooloff := !tail.sendLine(line)
- if cooloff {
- // Wait a second before seeking till the end of
- // file when rate limit is reached.
- msg := fmt.Sprintf(
- "Too much log activity; waiting a second " +
- "before resuming tailing")
- tail.Lines <- &Line{msg, time.Now(), fmt.Errorf(msg)}
- select {
- case <-time.After(time.Second):
- case <-tail.Dying():
- return
- }
- if err := tail.seekEnd(); err != nil {
- tail.Kill(err)
- return
- }
- }
- } else if err == io.EOF {
- if !tail.Follow {
- if line != "" {
- tail.sendLine(line)
- }
- return
- }
-
- if tail.Follow && line != "" {
- // this has the potential to never return the last line if
- // it's not followed by a newline; seems a fair trade here
- err := tail.seekTo(SeekInfo{Offset: offset, Whence: 0})
- if err != nil {
- tail.Kill(err)
- return
- }
- }
-
- // When EOF is reached, wait for more data to become
- // available. Wait strategy is based on the `tail.watcher`
- // implementation (inotify or polling).
- err := tail.waitForChanges()
- if err != nil {
- if err != ErrStop {
- tail.Kill(err)
- }
- return
- }
- } else {
- // non-EOF error
- tail.Killf("Error reading %s: %s", tail.Filename, err)
- return
- }
-
- select {
- case <-tail.Dying():
- if tail.Err() == errStopAtEOF {
- continue
- }
- return
- default:
- }
- }
-}
-
-// waitForChanges waits until the file has been appended, deleted,
-// moved or truncated. When moved or deleted - the file will be
-// reopened if ReOpen is true. Truncated files are always reopened.
-func (tail *Tail) waitForChanges() error {
- if tail.changes == nil {
- pos, err := tail.file.Seek(0, os.SEEK_CUR)
- if err != nil {
- return err
- }
- tail.changes, err = tail.watcher.ChangeEvents(&tail.Tomb, pos)
- if err != nil {
- return err
- }
- }
-
- select {
- case <-tail.changes.Modified:
- return nil
- case <-tail.changes.Deleted:
- tail.changes = nil
- if tail.ReOpen {
- // XXX: we must not log from a library.
- tail.Logger.Printf("Re-opening moved/deleted file %s ...", tail.Filename)
- if err := tail.reopen(); err != nil {
- return err
- }
- tail.Logger.Printf("Successfully reopened %s", tail.Filename)
- tail.openReader()
- return nil
- } else {
- tail.Logger.Printf("Stopping tail as file no longer exists: %s", tail.Filename)
- return ErrStop
- }
- case <-tail.changes.Truncated:
- // Always reopen truncated files (Follow is true)
- tail.Logger.Printf("Re-opening truncated file %s ...", tail.Filename)
- if err := tail.reopen(); err != nil {
- return err
- }
- tail.Logger.Printf("Successfully reopened truncated %s", tail.Filename)
- tail.openReader()
- return nil
- case <-tail.Dying():
- return ErrStop
- }
- panic("unreachable")
-}
-
-func (tail *Tail) openReader() {
- if tail.MaxLineSize > 0 {
- // add 2 to account for newline characters
- tail.reader = bufio.NewReaderSize(tail.file, tail.MaxLineSize+2)
- } else {
- tail.reader = bufio.NewReader(tail.file)
- }
-}
-
-func (tail *Tail) seekEnd() error {
- return tail.seekTo(SeekInfo{Offset: 0, Whence: os.SEEK_END})
-}
-
-func (tail *Tail) seekTo(pos SeekInfo) error {
- _, err := tail.file.Seek(pos.Offset, pos.Whence)
- if err != nil {
- return fmt.Errorf("Seek error on %s: %s", tail.Filename, err)
- }
- // Reset the read buffer whenever the file is re-seek'ed
- tail.reader.Reset(tail.file)
- return nil
-}
-
-// sendLine sends the line(s) to Lines channel, splitting longer lines
-// if necessary. Return false if rate limit is reached.
-func (tail *Tail) sendLine(line string) bool {
- now := time.Now()
- lines := []string{line}
-
- // Split longer lines
- if tail.MaxLineSize > 0 && len(line) > tail.MaxLineSize {
- lines = util.PartitionString(line, tail.MaxLineSize)
- }
-
- for _, line := range lines {
- tail.Lines <- &Line{line, now, nil}
- }
-
- if tail.Config.RateLimiter != nil {
- ok := tail.Config.RateLimiter.Pour(uint16(len(lines)))
- if !ok {
- tail.Logger.Printf("Leaky bucket full (%v); entering 1s cooloff period.\n",
- tail.Filename)
- return false
- }
- }
-
- return true
-}
-
-// Cleanup removes inotify watches added by the tail package. This function is
-// meant to be invoked from a process's exit handler. Linux kernel may not
-// automatically remove inotify watches after the process exits.
-func (tail *Tail) Cleanup() {
- watch.Cleanup(tail.Filename)
-}
diff --git a/integration/assets/hydrabroker/vendor/github.com/hpcloud/tail/tail_posix.go b/integration/assets/hydrabroker/vendor/github.com/hpcloud/tail/tail_posix.go
deleted file mode 100644
index 34423dd806a..00000000000
--- a/integration/assets/hydrabroker/vendor/github.com/hpcloud/tail/tail_posix.go
+++ /dev/null
@@ -1,12 +0,0 @@
-//go:build linux || darwin || freebsd || netbsd || openbsd
-// +build linux darwin freebsd netbsd openbsd
-
-package tail
-
-import (
- "os"
-)
-
-func OpenFile(name string) (file *os.File, err error) {
- return os.Open(name)
-}
diff --git a/integration/assets/hydrabroker/vendor/github.com/hpcloud/tail/tail_windows.go b/integration/assets/hydrabroker/vendor/github.com/hpcloud/tail/tail_windows.go
deleted file mode 100644
index cf23482745e..00000000000
--- a/integration/assets/hydrabroker/vendor/github.com/hpcloud/tail/tail_windows.go
+++ /dev/null
@@ -1,13 +0,0 @@
-//go:build windows
-// +build windows
-
-package tail
-
-import (
- "github.com/hpcloud/tail/winfile"
- "os"
-)
-
-func OpenFile(name string) (file *os.File, err error) {
- return winfile.OpenFile(name, os.O_RDONLY, 0)
-}
diff --git a/integration/assets/hydrabroker/vendor/github.com/hpcloud/tail/util/util.go b/integration/assets/hydrabroker/vendor/github.com/hpcloud/tail/util/util.go
deleted file mode 100644
index 54151fe39f1..00000000000
--- a/integration/assets/hydrabroker/vendor/github.com/hpcloud/tail/util/util.go
+++ /dev/null
@@ -1,48 +0,0 @@
-// Copyright (c) 2015 HPE Software Inc. All rights reserved.
-// Copyright (c) 2013 ActiveState Software Inc. All rights reserved.
-
-package util
-
-import (
- "fmt"
- "log"
- "os"
- "runtime/debug"
-)
-
-type Logger struct {
- *log.Logger
-}
-
-var LOGGER = &Logger{log.New(os.Stderr, "", log.LstdFlags)}
-
-// fatal is like panic except it displays only the current goroutine's stack.
-func Fatal(format string, v ...interface{}) {
- // https://github.com/hpcloud/log/blob/master/log.go#L45
- LOGGER.Output(2, fmt.Sprintf("FATAL -- "+format, v...)+"\n"+string(debug.Stack()))
- os.Exit(1)
-}
-
-// partitionString partitions the string into chunks of given size,
-// with the last chunk of variable size.
-func PartitionString(s string, chunkSize int) []string {
- if chunkSize <= 0 {
- panic("invalid chunkSize")
- }
- length := len(s)
- chunks := 1 + length/chunkSize
- start := 0
- end := chunkSize
- parts := make([]string, 0, chunks)
- for {
- if end > length {
- end = length
- }
- parts = append(parts, s[start:end])
- if end == length {
- break
- }
- start, end = end, end+chunkSize
- }
- return parts
-}
diff --git a/integration/assets/hydrabroker/vendor/github.com/hpcloud/tail/watch/filechanges.go b/integration/assets/hydrabroker/vendor/github.com/hpcloud/tail/watch/filechanges.go
deleted file mode 100644
index 3ce5dcecbb2..00000000000
--- a/integration/assets/hydrabroker/vendor/github.com/hpcloud/tail/watch/filechanges.go
+++ /dev/null
@@ -1,36 +0,0 @@
-package watch
-
-type FileChanges struct {
- Modified chan bool // Channel to get notified of modifications
- Truncated chan bool // Channel to get notified of truncations
- Deleted chan bool // Channel to get notified of deletions/renames
-}
-
-func NewFileChanges() *FileChanges {
- return &FileChanges{
- make(chan bool), make(chan bool), make(chan bool)}
-}
-
-func (fc *FileChanges) NotifyModified() {
- sendOnlyIfEmpty(fc.Modified)
-}
-
-func (fc *FileChanges) NotifyTruncated() {
- sendOnlyIfEmpty(fc.Truncated)
-}
-
-func (fc *FileChanges) NotifyDeleted() {
- sendOnlyIfEmpty(fc.Deleted)
-}
-
-// sendOnlyIfEmpty sends on a bool channel only if the channel has no
-// backlog to be read by other goroutines. This concurrency pattern
-// can be used to notify other goroutines if and only if they are
-// looking for it (i.e., subsequent notifications can be compressed
-// into one).
-func sendOnlyIfEmpty(ch chan bool) {
- select {
- case ch <- true:
- default:
- }
-}
diff --git a/integration/assets/hydrabroker/vendor/github.com/hpcloud/tail/watch/inotify.go b/integration/assets/hydrabroker/vendor/github.com/hpcloud/tail/watch/inotify.go
deleted file mode 100644
index 4478f1e1a01..00000000000
--- a/integration/assets/hydrabroker/vendor/github.com/hpcloud/tail/watch/inotify.go
+++ /dev/null
@@ -1,128 +0,0 @@
-// Copyright (c) 2015 HPE Software Inc. All rights reserved.
-// Copyright (c) 2013 ActiveState Software Inc. All rights reserved.
-
-package watch
-
-import (
- "fmt"
- "os"
- "path/filepath"
-
- "github.com/hpcloud/tail/util"
-
- "gopkg.in/fsnotify.v1"
- "gopkg.in/tomb.v1"
-)
-
-// InotifyFileWatcher uses inotify to monitor file changes.
-type InotifyFileWatcher struct {
- Filename string
- Size int64
-}
-
-func NewInotifyFileWatcher(filename string) *InotifyFileWatcher {
- fw := &InotifyFileWatcher{filepath.Clean(filename), 0}
- return fw
-}
-
-func (fw *InotifyFileWatcher) BlockUntilExists(t *tomb.Tomb) error {
- err := WatchCreate(fw.Filename)
- if err != nil {
- return err
- }
- defer RemoveWatchCreate(fw.Filename)
-
- // Do a real check now as the file might have been created before
- // calling `WatchFlags` above.
- if _, err = os.Stat(fw.Filename); !os.IsNotExist(err) {
- // file exists, or stat returned an error.
- return err
- }
-
- events := Events(fw.Filename)
-
- for {
- select {
- case evt, ok := <-events:
- if !ok {
- return fmt.Errorf("inotify watcher has been closed")
- }
- evtName, err := filepath.Abs(evt.Name)
- if err != nil {
- return err
- }
- fwFilename, err := filepath.Abs(fw.Filename)
- if err != nil {
- return err
- }
- if evtName == fwFilename {
- return nil
- }
- case <-t.Dying():
- return tomb.ErrDying
- }
- }
- panic("unreachable")
-}
-
-func (fw *InotifyFileWatcher) ChangeEvents(t *tomb.Tomb, pos int64) (*FileChanges, error) {
- err := Watch(fw.Filename)
- if err != nil {
- return nil, err
- }
-
- changes := NewFileChanges()
- fw.Size = pos
-
- go func() {
- defer RemoveWatch(fw.Filename)
-
- events := Events(fw.Filename)
-
- for {
- prevSize := fw.Size
-
- var evt fsnotify.Event
- var ok bool
-
- select {
- case evt, ok = <-events:
- if !ok {
- return
- }
- case <-t.Dying():
- return
- }
-
- switch {
- case evt.Op&fsnotify.Remove == fsnotify.Remove:
- fallthrough
-
- case evt.Op&fsnotify.Rename == fsnotify.Rename:
- changes.NotifyDeleted()
- return
-
- case evt.Op&fsnotify.Write == fsnotify.Write:
- fi, err := os.Stat(fw.Filename)
- if err != nil {
- if os.IsNotExist(err) {
- changes.NotifyDeleted()
- return
- }
- // XXX: report this error back to the user
- util.Fatal("Failed to stat file %v: %v", fw.Filename, err)
- }
- fw.Size = fi.Size()
-
- if prevSize > 0 && prevSize > fw.Size {
- changes.NotifyTruncated()
- } else {
- changes.NotifyModified()
- }
- prevSize = fw.Size
- }
- }
- }()
-
- return changes, nil
-}
diff --git a/integration/assets/hydrabroker/vendor/github.com/hpcloud/tail/watch/inotify_tracker.go b/integration/assets/hydrabroker/vendor/github.com/hpcloud/tail/watch/inotify_tracker.go
deleted file mode 100644
index 03be4275ca2..00000000000
--- a/integration/assets/hydrabroker/vendor/github.com/hpcloud/tail/watch/inotify_tracker.go
+++ /dev/null
@@ -1,260 +0,0 @@
-// Copyright (c) 2015 HPE Software Inc. All rights reserved.
-// Copyright (c) 2013 ActiveState Software Inc. All rights reserved.
-
-package watch
-
-import (
- "log"
- "os"
- "path/filepath"
- "sync"
- "syscall"
-
- "github.com/hpcloud/tail/util"
-
- "gopkg.in/fsnotify.v1"
-)
-
-type InotifyTracker struct {
- mux sync.Mutex
- watcher *fsnotify.Watcher
- chans map[string]chan fsnotify.Event
- done map[string]chan bool
- watchNums map[string]int
- watch chan *watchInfo
- remove chan *watchInfo
- error chan error
-}
-
-type watchInfo struct {
- op fsnotify.Op
- fname string
-}
-
-func (this *watchInfo) isCreate() bool {
- return this.op == fsnotify.Create
-}
-
-var (
- // globally shared InotifyTracker; ensures only one fsnotify.Watcher is used
- shared *InotifyTracker
-
- // these are used to ensure the shared InotifyTracker is run exactly once
- once = sync.Once{}
- goRun = func() {
- shared = &InotifyTracker{
- mux: sync.Mutex{},
- chans: make(map[string]chan fsnotify.Event),
- done: make(map[string]chan bool),
- watchNums: make(map[string]int),
- watch: make(chan *watchInfo),
- remove: make(chan *watchInfo),
- error: make(chan error),
- }
- go shared.run()
- }
-
- logger = log.New(os.Stderr, "", log.LstdFlags)
-)
-
-// Watch signals the run goroutine to begin watching the input filename
-func Watch(fname string) error {
- return watch(&watchInfo{
- fname: fname,
- })
-}
-
-// Watch create signals the run goroutine to begin watching the input filename
-// if call the WatchCreate function, don't call the Cleanup, call the RemoveWatchCreate
-func WatchCreate(fname string) error {
- return watch(&watchInfo{
- op: fsnotify.Create,
- fname: fname,
- })
-}
-
-func watch(winfo *watchInfo) error {
- // start running the shared InotifyTracker if not already running
- once.Do(goRun)
-
- winfo.fname = filepath.Clean(winfo.fname)
- shared.watch <- winfo
- return <-shared.error
-}
-
-// RemoveWatch signals the run goroutine to remove the watch for the input filename
-func RemoveWatch(fname string) {
- remove(&watchInfo{
- fname: fname,
- })
-}
-
-// RemoveWatch create signals the run goroutine to remove the watch for the input filename
-func RemoveWatchCreate(fname string) {
- remove(&watchInfo{
- op: fsnotify.Create,
- fname: fname,
- })
-}
-
-func remove(winfo *watchInfo) {
- // start running the shared InotifyTracker if not already running
- once.Do(goRun)
-
- winfo.fname = filepath.Clean(winfo.fname)
- shared.mux.Lock()
- done := shared.done[winfo.fname]
- if done != nil {
- delete(shared.done, winfo.fname)
- close(done)
- }
-
- fname := winfo.fname
- if winfo.isCreate() {
- // Watch for new files to be created in the parent directory.
- fname = filepath.Dir(fname)
- }
- shared.watchNums[fname]--
- watchNum := shared.watchNums[fname]
- if watchNum == 0 {
- delete(shared.watchNums, fname)
- }
- shared.mux.Unlock()
-
- // If we were the last ones to watch this file, unsubscribe from inotify.
- // This needs to happen after releasing the lock because fsnotify waits
- // synchronously for the kernel to acknowledge the removal of the watch
- // for this file, which causes us to deadlock if we still held the lock.
- if watchNum == 0 {
- shared.watcher.Remove(fname)
- }
- shared.remove <- winfo
-}
-
-// Events returns a channel to which FileEvents corresponding to the input filename
-// will be sent. This channel will be closed when removeWatch is called on this
-// filename.
-func Events(fname string) <-chan fsnotify.Event {
- shared.mux.Lock()
- defer shared.mux.Unlock()
-
- return shared.chans[fname]
-}
-
-// Cleanup removes the watch for the input filename if necessary.
-func Cleanup(fname string) {
- RemoveWatch(fname)
-}
-
-// watchFlags calls fsnotify.WatchFlags for the input filename and flags, creating
-// a new Watcher if the previous Watcher was closed.
-func (shared *InotifyTracker) addWatch(winfo *watchInfo) error {
- shared.mux.Lock()
- defer shared.mux.Unlock()
-
- if shared.chans[winfo.fname] == nil {
- shared.chans[winfo.fname] = make(chan fsnotify.Event)
- shared.done[winfo.fname] = make(chan bool)
- }
-
- fname := winfo.fname
- if winfo.isCreate() {
- // Watch for new files to be created in the parent directory.
- fname = filepath.Dir(fname)
- }
-
- // already in inotify watch
- if shared.watchNums[fname] > 0 {
- shared.watchNums[fname]++
- if winfo.isCreate() {
- shared.watchNums[winfo.fname]++
- }
- return nil
- }
-
- err := shared.watcher.Add(fname)
- if err == nil {
- shared.watchNums[fname]++
- if winfo.isCreate() {
- shared.watchNums[winfo.fname]++
- }
- }
- return err
-}
-
-// removeWatch calls fsnotify.RemoveWatch for the input filename and closes the
-// corresponding events channel.
-func (shared *InotifyTracker) removeWatch(winfo *watchInfo) {
- shared.mux.Lock()
- defer shared.mux.Unlock()
-
- ch := shared.chans[winfo.fname]
- if ch == nil {
- return
- }
-
- delete(shared.chans, winfo.fname)
- close(ch)
-
- if !winfo.isCreate() {
- return
- }
-
- shared.watchNums[winfo.fname]--
- if shared.watchNums[winfo.fname] == 0 {
- delete(shared.watchNums, winfo.fname)
- }
-}
-
-// sendEvent sends the input event to the appropriate Tail.
-func (shared *InotifyTracker) sendEvent(event fsnotify.Event) {
- name := filepath.Clean(event.Name)
-
- shared.mux.Lock()
- ch := shared.chans[name]
- done := shared.done[name]
- shared.mux.Unlock()
-
- if ch != nil && done != nil {
- select {
- case ch <- event:
- case <-done:
- }
- }
-}
-
-// run starts the goroutine in which the shared struct reads events from its
-// Watcher's Event channel and sends the events to the appropriate Tail.
-func (shared *InotifyTracker) run() {
- watcher, err := fsnotify.NewWatcher()
- if err != nil {
- util.Fatal("failed to create Watcher")
- }
- shared.watcher = watcher
-
- for {
- select {
- case winfo := <-shared.watch:
- shared.error <- shared.addWatch(winfo)
-
- case winfo := <-shared.remove:
- shared.removeWatch(winfo)
-
- case event, open := <-shared.watcher.Events:
- if !open {
- return
- }
- shared.sendEvent(event)
-
- case err, open := <-shared.watcher.Errors:
- if !open {
- return
- } else if err != nil {
- sysErr, ok := err.(*os.SyscallError)
- if !ok || sysErr.Err != syscall.EINTR {
- logger.Printf("Error in Watcher Error channel: %s", err)
- }
- }
- }
- }
-}
diff --git a/integration/assets/hydrabroker/vendor/github.com/hpcloud/tail/watch/polling.go b/integration/assets/hydrabroker/vendor/github.com/hpcloud/tail/watch/polling.go
deleted file mode 100644
index 49491f21dbf..00000000000
--- a/integration/assets/hydrabroker/vendor/github.com/hpcloud/tail/watch/polling.go
+++ /dev/null
@@ -1,118 +0,0 @@
-// Copyright (c) 2015 HPE Software Inc. All rights reserved.
-// Copyright (c) 2013 ActiveState Software Inc. All rights reserved.
-
-package watch
-
-import (
- "os"
- "runtime"
- "time"
-
- "github.com/hpcloud/tail/util"
- "gopkg.in/tomb.v1"
-)
-
-// PollingFileWatcher polls the file for changes.
-type PollingFileWatcher struct {
- Filename string
- Size int64
-}
-
-func NewPollingFileWatcher(filename string) *PollingFileWatcher {
- fw := &PollingFileWatcher{filename, 0}
- return fw
-}
-
-var POLL_DURATION time.Duration
-
-func (fw *PollingFileWatcher) BlockUntilExists(t *tomb.Tomb) error {
- for {
- if _, err := os.Stat(fw.Filename); err == nil {
- return nil
- } else if !os.IsNotExist(err) {
- return err
- }
- select {
- case <-time.After(POLL_DURATION):
- continue
- case <-t.Dying():
- return tomb.ErrDying
- }
- }
- panic("unreachable")
-}
-
-func (fw *PollingFileWatcher) ChangeEvents(t *tomb.Tomb, pos int64) (*FileChanges, error) {
- origFi, err := os.Stat(fw.Filename)
- if err != nil {
- return nil, err
- }
-
- changes := NewFileChanges()
- var prevModTime time.Time
-
- // XXX: use tomb.Tomb to cleanly manage these goroutines. replace
- // the fatal (below) with tomb's Kill.
-
- fw.Size = pos
-
- go func() {
- prevSize := fw.Size
- for {
- select {
- case <-t.Dying():
- return
- default:
- }
-
- time.Sleep(POLL_DURATION)
- fi, err := os.Stat(fw.Filename)
- if err != nil {
- // Windows cannot delete a file if a handle is still open (tail keeps one open)
- // so it gives access denied to anything trying to read it until all handles are released.
- if os.IsNotExist(err) || (runtime.GOOS == "windows" && os.IsPermission(err)) {
- // File does not exist (has been deleted).
- changes.NotifyDeleted()
- return
- }
-
- // XXX: report this error back to the user
- util.Fatal("Failed to stat file %v: %v", fw.Filename, err)
- }
-
- // File got moved/renamed?
- if !os.SameFile(origFi, fi) {
- changes.NotifyDeleted()
- return
- }
-
- // File got truncated?
- fw.Size = fi.Size()
- if prevSize > 0 && prevSize > fw.Size {
- changes.NotifyTruncated()
- prevSize = fw.Size
- continue
- }
- // File got bigger?
- if prevSize > 0 && prevSize < fw.Size {
- changes.NotifyModified()
- prevSize = fw.Size
- continue
- }
- prevSize = fw.Size
-
- // File was appended to (changed)?
- modTime := fi.ModTime()
- if modTime != prevModTime {
- prevModTime = modTime
- changes.NotifyModified()
- }
- }
- }()
-
- return changes, nil
-}
-
-func init() {
- POLL_DURATION = 250 * time.Millisecond
-}
diff --git a/integration/assets/hydrabroker/vendor/github.com/hpcloud/tail/watch/watch.go b/integration/assets/hydrabroker/vendor/github.com/hpcloud/tail/watch/watch.go
deleted file mode 100644
index 2e1783ef0aa..00000000000
--- a/integration/assets/hydrabroker/vendor/github.com/hpcloud/tail/watch/watch.go
+++ /dev/null
@@ -1,20 +0,0 @@
-// Copyright (c) 2015 HPE Software Inc. All rights reserved.
-// Copyright (c) 2013 ActiveState Software Inc. All rights reserved.
-
-package watch
-
-import "gopkg.in/tomb.v1"
-
-// FileWatcher monitors file-level events.
-type FileWatcher interface {
- // BlockUntilExists blocks until the file comes into existence.
- BlockUntilExists(*tomb.Tomb) error
-
- // ChangeEvents reports on changes to a file, be it modification,
- // deletion, renames or truncations. Returned FileChanges group of
- // channels will be closed, thus become unusable, after a deletion
- // or truncation event.
- // In order to properly report truncations, ChangeEvents requires
- // the caller to pass their current offset in the file.
- ChangeEvents(*tomb.Tomb, int64) (*FileChanges, error)
-}
diff --git a/integration/assets/hydrabroker/vendor/github.com/hpcloud/tail/winfile/winfile.go b/integration/assets/hydrabroker/vendor/github.com/hpcloud/tail/winfile/winfile.go
deleted file mode 100644
index 965794105be..00000000000
--- a/integration/assets/hydrabroker/vendor/github.com/hpcloud/tail/winfile/winfile.go
+++ /dev/null
@@ -1,93 +0,0 @@
-//go:build windows
-// +build windows
-
-package winfile
-
-import (
- "os"
- "syscall"
- "unsafe"
-)
-
-// issue also described here
-//https://codereview.appspot.com/8203043/
-
-// https://github.com/jnwhiteh/golang/blob/master/src/pkg/syscall/syscall_windows.go#L218
-func Open(path string, mode int, perm uint32) (fd syscall.Handle, err error) {
- if len(path) == 0 {
- return syscall.InvalidHandle, syscall.ERROR_FILE_NOT_FOUND
- }
- pathp, err := syscall.UTF16PtrFromString(path)
- if err != nil {
- return syscall.InvalidHandle, err
- }
- var access uint32
- switch mode & (syscall.O_RDONLY | syscall.O_WRONLY | syscall.O_RDWR) {
- case syscall.O_RDONLY:
- access = syscall.GENERIC_READ
- case syscall.O_WRONLY:
- access = syscall.GENERIC_WRITE
- case syscall.O_RDWR:
- access = syscall.GENERIC_READ | syscall.GENERIC_WRITE
- }
- if mode&syscall.O_CREAT != 0 {
- access |= syscall.GENERIC_WRITE
- }
- if mode&syscall.O_APPEND != 0 {
- access &^= syscall.GENERIC_WRITE
- access |= syscall.FILE_APPEND_DATA
- }
- sharemode := uint32(syscall.FILE_SHARE_READ | syscall.FILE_SHARE_WRITE | syscall.FILE_SHARE_DELETE)
- var sa *syscall.SecurityAttributes
- if mode&syscall.O_CLOEXEC == 0 {
- sa = makeInheritSa()
- }
- var createmode uint32
- switch {
- case mode&(syscall.O_CREAT|syscall.O_EXCL) == (syscall.O_CREAT | syscall.O_EXCL):
- createmode = syscall.CREATE_NEW
- case mode&(syscall.O_CREAT|syscall.O_TRUNC) == (syscall.O_CREAT | syscall.O_TRUNC):
- createmode = syscall.CREATE_ALWAYS
- case mode&syscall.O_CREAT == syscall.O_CREAT:
- createmode = syscall.OPEN_ALWAYS
- case mode&syscall.O_TRUNC == syscall.O_TRUNC:
- createmode = syscall.TRUNCATE_EXISTING
- default:
- createmode = syscall.OPEN_EXISTING
- }
- h, e := syscall.CreateFile(pathp, access, sharemode, sa, createmode, syscall.FILE_ATTRIBUTE_NORMAL, 0)
- return h, e
-}
-
-// https://github.com/jnwhiteh/golang/blob/master/src/pkg/syscall/syscall_windows.go#L211
-func makeInheritSa() *syscall.SecurityAttributes {
- var sa syscall.SecurityAttributes
- sa.Length = uint32(unsafe.Sizeof(sa))
- sa.InheritHandle = 1
- return &sa
-}
-
-// https://github.com/jnwhiteh/golang/blob/master/src/pkg/os/file_windows.go#L133
-func OpenFile(name string, flag int, perm os.FileMode) (file *os.File, err error) {
- r, e := Open(name, flag|syscall.O_CLOEXEC, syscallMode(perm))
- if e != nil {
- return nil, e
- }
- return os.NewFile(uintptr(r), name), nil
-}
-
-// https://github.com/jnwhiteh/golang/blob/master/src/pkg/os/file_posix.go#L61
-func syscallMode(i os.FileMode) (o uint32) {
- o |= uint32(i.Perm())
- if i&os.ModeSetuid != 0 {
- o |= syscall.S_ISUID
- }
- if i&os.ModeSetgid != 0 {
- o |= syscall.S_ISGID
- }
- if i&os.ModeSticky != 0 {
- o |= syscall.S_ISVTX
- }
- // No mapping for Go's ModeTemporary (plan9 only).
- return
-}
diff --git a/integration/assets/hydrabroker/vendor/github.com/leodido/go-urn/.gitignore b/integration/assets/hydrabroker/vendor/github.com/leodido/go-urn/.gitignore
index 5bcf4bade0b..427454f8f1a 100644
--- a/integration/assets/hydrabroker/vendor/github.com/leodido/go-urn/.gitignore
+++ b/integration/assets/hydrabroker/vendor/github.com/leodido/go-urn/.gitignore
@@ -8,4 +8,6 @@
*.out
*.txt
-vendor/
\ No newline at end of file
+vendor/
+/removecomments
+/snake2camel
\ No newline at end of file
diff --git a/integration/assets/hydrabroker/vendor/github.com/leodido/go-urn/.travis.yml b/integration/assets/hydrabroker/vendor/github.com/leodido/go-urn/.travis.yml
deleted file mode 100644
index e56cf7cc066..00000000000
--- a/integration/assets/hydrabroker/vendor/github.com/leodido/go-urn/.travis.yml
+++ /dev/null
@@ -1,18 +0,0 @@
-language: go
-
-go:
- - 1.9.x
- - 1.10.x
- - 1.11.x
- - 1.12.x
- - 1.13.x
- - tip
-
-before_install:
- - go get -t -v ./...
-
-script:
- - go test -race -coverprofile=coverage.txt -covermode=atomic
-
-after_success:
- - bash <(curl -s https://codecov.io/bash)
\ No newline at end of file
diff --git a/integration/assets/hydrabroker/vendor/github.com/leodido/go-urn/README.md b/integration/assets/hydrabroker/vendor/github.com/leodido/go-urn/README.md
index cc902ec0e37..619475bfbba 100644
--- a/integration/assets/hydrabroker/vendor/github.com/leodido/go-urn/README.md
+++ b/integration/assets/hydrabroker/vendor/github.com/leodido/go-urn/README.md
@@ -1,22 +1,35 @@
-[![Build](https://img.shields.io/travis/leodido/go-urn/master.svg?style=for-the-badge)](https://travis-ci.org/leodido/go-urn) [![Coverage](https://img.shields.io/codecov/c/github/leodido/go-urn.svg?style=for-the-badge)](https://codecov.io/gh/leodido/go-urn) [![Documentation](https://img.shields.io/badge/godoc-reference-blue.svg?style=for-the-badge)](https://godoc.org/github.com/leodido/go-urn)
+[![Build](https://img.shields.io/circleci/build/github/leodido/go-urn?style=for-the-badge)](https://app.circleci.com/pipelines/github/leodido/go-urn) [![Coverage](https://img.shields.io/codecov/c/github/leodido/go-urn.svg?style=for-the-badge)](https://codecov.io/gh/leodido/go-urn) [![Documentation](https://img.shields.io/badge/godoc-reference-blue.svg?style=for-the-badge)](https://godoc.org/github.com/leodido/go-urn)
**A parser for URNs**.
-> As seen on [RFC 2141](https://tools.ietf.org/html/rfc2141#ref-1).
+> As seen on [RFC 2141](https://datatracker.ietf.org/doc/html/rfc2141), [RFC 7643](https://datatracker.ietf.org/doc/html/rfc7643#section-10), and on [RFC 8141](https://datatracker.ietf.org/doc/html/rfc8141).
[API documentation](https://godoc.org/github.com/leodido/go-urn).
+Starting with version 1.3 this library also supports [RFC 7643 SCIM URNs](https://datatracker.ietf.org/doc/html/rfc7643#section-10).
+
+Starting with version 1.4 this library also supports [RFC 8141 URNs (2017)](https://datatracker.ietf.org/doc/html/rfc8141).
+
## Installation
```
go get github.com/leodido/go-urn
```
+## Features
+
+1. RFC 2141 URNs parsing (default)
+2. RFC 8141 URNs parsing (supersedes RFC 2141)
+3. RFC 7643 SCIM URNs parsing
+4. Normalization as per RFCs
+5. Lexical equivalence as per RFCs
+6. Precise, fine-grained errors
+
## Performances
This implementation results to be really fast.
-Usually below ½ microsecond on my machine[1](#mymachine) .
+Usually below 400 ns on my machine[1](#mymachine) .
Notice it also performs, while parsing:
@@ -24,32 +37,117 @@ Notice it also performs, while parsing:
2. specific-string normalization
```
-ok/00/urn:a:b______________________________________/-4 20000000 265 ns/op 182 B/op 6 allocs/op
-ok/01/URN:foo:a123,456_____________________________/-4 30000000 296 ns/op 200 B/op 6 allocs/op
-ok/02/urn:foo:a123%2c456___________________________/-4 20000000 331 ns/op 208 B/op 6 allocs/op
-ok/03/urn:ietf:params:scim:schemas:core:2.0:User___/-4 20000000 430 ns/op 280 B/op 6 allocs/op
-ok/04/urn:ietf:params:scim:schemas:extension:enterp/-4 20000000 411 ns/op 312 B/op 6 allocs/op
-ok/05/urn:ietf:params:scim:schemas:extension:enterp/-4 20000000 472 ns/op 344 B/op 6 allocs/op
-ok/06/urn:burnout:nss______________________________/-4 30000000 257 ns/op 192 B/op 6 allocs/op
-ok/07/urn:abcdefghilmnopqrstuvzabcdefghilm:x_______/-4 20000000 375 ns/op 213 B/op 6 allocs/op
-ok/08/urn:urnurnurn:urn____________________________/-4 30000000 265 ns/op 197 B/op 6 allocs/op
-ok/09/urn:ciao:@!=%2c(xyz)+a,b.*@g=$_'_____________/-4 20000000 307 ns/op 248 B/op 6 allocs/op
-ok/10/URN:x:abc%1dz%2f%3az_________________________/-4 30000000 259 ns/op 212 B/op 6 allocs/op
-no/11/URN:-xxx:x___________________________________/-4 20000000 445 ns/op 320 B/op 6 allocs/op
-no/12/urn::colon:nss_______________________________/-4 20000000 461 ns/op 320 B/op 6 allocs/op
-no/13/urn:abcdefghilmnopqrstuvzabcdefghilmn:specifi/-4 10000000 660 ns/op 320 B/op 6 allocs/op
-no/14/URN:a!?:x____________________________________/-4 20000000 507 ns/op 320 B/op 6 allocs/op
-no/15/urn:urn:NSS__________________________________/-4 20000000 429 ns/op 288 B/op 6 allocs/op
-no/16/urn:white_space:NSS__________________________/-4 20000000 482 ns/op 320 B/op 6 allocs/op
-no/17/urn:concat:no_spaces_________________________/-4 20000000 539 ns/op 328 B/op 7 allocs/op
-no/18/urn:a:/______________________________________/-4 20000000 470 ns/op 320 B/op 7 allocs/op
-no/19/urn:UrN:NSS__________________________________/-4 20000000 399 ns/op 288 B/op 6 allocs/op
+ok/00/urn:a:b______________________________________/-10 51372006 109.0 ns/op 275 B/op 3 allocs/op
+ok/01/URN:foo:a123,456_____________________________/-10 36024072 160.8 ns/op 296 B/op 6 allocs/op
+ok/02/urn:foo:a123%2C456___________________________/-10 31901007 188.4 ns/op 320 B/op 7 allocs/op
+ok/03/urn:ietf:params:scim:schemas:core:2.0:User___/-10 22736756 266.6 ns/op 376 B/op 6 allocs/op
+ok/04/urn:ietf:params:scim:schemas:extension:enterp/-10 18291859 335.2 ns/op 408 B/op 6 allocs/op
+ok/05/urn:ietf:params:scim:schemas:extension:enterp/-10 15283087 379.4 ns/op 440 B/op 6 allocs/op
+ok/06/urn:burnout:nss______________________________/-10 39407593 155.1 ns/op 288 B/op 6 allocs/op
+ok/07/urn:abcdefghilmnopqrstuvzabcdefghilm:x_______/-10 27832718 211.4 ns/op 307 B/op 4 allocs/op
+ok/08/urn:urnurnurn:urn____________________________/-10 33269596 168.1 ns/op 293 B/op 6 allocs/op
+ok/09/urn:ciao:!!*_________________________________/-10 41100675 148.8 ns/op 288 B/op 6 allocs/op
+ok/10/urn:ciao:=@__________________________________/-10 37214253 149.7 ns/op 284 B/op 6 allocs/op
+ok/11/urn:ciao:@!=%2C(xyz)+a,b.*@g=$_'_____________/-10 26534240 229.8 ns/op 336 B/op 7 allocs/op
+ok/12/URN:x:abc%1Dz%2F%3az_________________________/-10 28166396 211.8 ns/op 336 B/op 7 allocs/op
+no/13/URN:---xxx:x_________________________________/-10 23635159 255.6 ns/op 419 B/op 5 allocs/op
+no/14/urn::colon:nss_______________________________/-10 23594779 258.4 ns/op 419 B/op 5 allocs/op
+no/15/URN:@,:x_____________________________________/-10 23742535 261.5 ns/op 419 B/op 5 allocs/op
+no/16/URN:URN:NSS__________________________________/-10 27432714 223.3 ns/op 371 B/op 5 allocs/op
+no/17/urn:UrN:NSS__________________________________/-10 26922117 224.9 ns/op 371 B/op 5 allocs/op
+no/18/urn:a:%______________________________________/-10 24926733 224.6 ns/op 371 B/op 5 allocs/op
+no/19/urn:urn:NSS__________________________________/-10 27652641 220.7 ns/op 371 B/op 5 allocs/op
+```
+
+* [1] : Apple M1 Pro
+
+
+## Example
+
+For more examples take a look at the [examples file](examples_test.go).
+
+
+```go
+package main
+
+import (
+ "fmt"
+ "github.com/leodido/go-urn"
+)
+
+func main() {
+ var uid = "URN:foo:a123,456"
+
+ // Parse the input string as a RFC 2141 URN only
+ u, e := urn.NewMachine().Parse(uid)
+ if e != nil {
+ fmt.Errorf(err)
+
+ return
+ }
+
+ fmt.Println(u.ID)
+ fmt.Println(u.SS)
+
+ // Output:
+ // foo
+ // a123,456
+}
+```
+
+```go
+package main
+
+import (
+ "fmt"
+ "github.com/leodido/go-urn"
+)
+
+func main() {
+ var uid = "URN:foo:a123,456"
+
+ // Parse the input string as a RFC 2141 URN only
+ u, ok := urn.Parse([]byte(uid))
+ if !ok {
+ panic("error parsing urn")
+ }
+
+ fmt.Println(u.ID)
+ fmt.Println(u.SS)
+
+ // Output:
+ // foo
+ // a123,456
+}
```
----
+```go
+package main
+
+import (
+ "fmt"
+ "github.com/leodido/go-urn"
+)
+
+func main() {
+ input := "urn:ietf:params:scim:api:messages:2.0:ListResponse"
-* [1] : Intel Core i7-7600U CPU @ 2.80GHz
+ // Parsing the input string as a RFC 7643 SCIM URN
+ u, ok := urn.Parse([]byte(input), urn.WithParsingMode(urn.RFC7643Only))
+ if !ok {
+ panic("error parsing urn")
+ }
----
+ fmt.Println(u.IsSCIM())
+ scim := u.SCIM()
+ fmt.Println(scim.Type.String())
+ fmt.Println(scim.Name)
+ fmt.Println(scim.Other)
-[![Analytics](https://ga-beacon.appspot.com/UA-49657176-1/go-urn?flat)](https://github.com/igrigorik/ga-beacon)
\ No newline at end of file
+ // Output:
+ // true
+ // api
+ // messages
+ // 2.0:ListResponse
+}
+```
\ No newline at end of file
diff --git a/integration/assets/hydrabroker/vendor/github.com/leodido/go-urn/go.mod b/integration/assets/hydrabroker/vendor/github.com/leodido/go-urn/go.mod
index 65bc1caf290..779cc5d581a 100644
--- a/integration/assets/hydrabroker/vendor/github.com/leodido/go-urn/go.mod
+++ b/integration/assets/hydrabroker/vendor/github.com/leodido/go-urn/go.mod
@@ -1,5 +1,11 @@
module github.com/leodido/go-urn
-go 1.13
+go 1.18
-require github.com/stretchr/testify v1.4.0
+require github.com/stretchr/testify v1.8.4
+
+require (
+ github.com/davecgh/go-spew v1.1.1 // indirect
+ github.com/pmezard/go-difflib v1.0.0 // indirect
+ gopkg.in/yaml.v3 v3.0.1 // indirect
+)
diff --git a/integration/assets/hydrabroker/vendor/github.com/leodido/go-urn/go.sum b/integration/assets/hydrabroker/vendor/github.com/leodido/go-urn/go.sum
index 8fdee5854f1..fa4b6e6825c 100644
--- a/integration/assets/hydrabroker/vendor/github.com/leodido/go-urn/go.sum
+++ b/integration/assets/hydrabroker/vendor/github.com/leodido/go-urn/go.sum
@@ -1,11 +1,10 @@
-github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=
-github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
+github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
-github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
-github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk=
-github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
+github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=
+github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=
-gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
+gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
diff --git a/integration/assets/hydrabroker/vendor/github.com/leodido/go-urn/kind.go b/integration/assets/hydrabroker/vendor/github.com/leodido/go-urn/kind.go
new file mode 100644
index 00000000000..f5e140f0a43
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/leodido/go-urn/kind.go
@@ -0,0 +1,10 @@
+package urn
+
+type Kind int
+
+const (
+ NONE Kind = iota
+ RFC2141
+ RFC7643
+ RFC8141
+)
diff --git a/integration/assets/hydrabroker/vendor/github.com/leodido/go-urn/machine.go b/integration/assets/hydrabroker/vendor/github.com/leodido/go-urn/machine.go
index fe5a0cc8611..aec1ba69cb2 100644
--- a/integration/assets/hydrabroker/vendor/github.com/leodido/go-urn/machine.go
+++ b/integration/assets/hydrabroker/vendor/github.com/leodido/go-urn/machine.go
@@ -2,27 +2,98 @@ package urn
import (
"fmt"
+
+ scimschema "github.com/leodido/go-urn/scim/schema"
)
var (
- errPrefix = "expecting the prefix to be the \"urn\" string (whatever case) [col %d]"
- errIdentifier = "expecting the identifier to be string (1..31 alnum chars, also containing dashes but not at its start) [col %d]"
- errSpecificString = "expecting the specific string to be a string containing alnum, hex, or others ([()+,-.:=@;$_!*']) chars [col %d]"
- errNoUrnWithinID = "expecting the identifier to not contain the \"urn\" reserved string [col %d]"
- errHex = "expecting the specific string hex chars to be well-formed (%%alnum{2}) [col %d]"
- errParse = "parsing error [col %d]"
+ errPrefix = "expecting the prefix to be the \"urn\" string (whatever case) [col %d]"
+ errIdentifier = "expecting the identifier to be string (1..31 alnum chars, also containing dashes but not at its beginning) [col %d]"
+ errSpecificString = "expecting the specific string to be a string containing alnum, hex, or others ([()+,-.:=@;$_!*']) chars [col %d]"
+ errNoUrnWithinID = "expecting the identifier to not contain the \"urn\" reserved string [col %d]"
+ errHex = "expecting the percent encoded chars to be well-formed (%%alnum{2}) [col %d]"
+ errSCIMNamespace = "expecing the SCIM namespace identifier (ietf:params:scim) [col %d]"
+ errSCIMType = "expecting a correct SCIM type (schemas, api, param) [col %d]"
+ errSCIMName = "expecting one or more alnum char in the SCIM name part [col %d]"
+ errSCIMOther = "expecting a well-formed other SCIM part [col %d]"
+ errSCIMOtherIncomplete = "expecting a not empty SCIM other part after colon [col %d]"
+ err8141InformalID = "informal URN namespace must be in the form urn-[1-9][0-9] [col %d]"
+ err8141SpecificString = "expecting the specific string to contain alnum, hex, or others ([~&()+,-.:=@;$_!*'] or [/?] not in first position) chars [col %d]"
+ err8141Identifier = "expecting the indentifier to be a string with (length 2 to 32 chars) containing alnum (or dashes) not starting or ending with a dash [col %d]"
+ err8141RComponentStart = "expecting only one r-component (starting with the ?+ sequence) [col %d]"
+ err8141QComponentStart = "expecting only one q-component (starting with the ?= sequence) [col %d]"
+ err8141MalformedRComp = "expecting a non-empty r-component containing alnum, hex, or others ([~&()+,-.:=@;$_!*'] or [/?] but not at its beginning) [col %d]"
+ err8141MalformedQComp = "expecting a non-empty q-component containing alnum, hex, or others ([~&()+,-.:=@;$_!*'] or [/?] but not at its beginning) [col %d]"
)
+var _toStateActions []byte = []byte{
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 33, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0,
+}
+
+var _eofActions []byte = []byte{
+ 0, 1, 1, 1, 1, 4, 6, 6,
+ 6, 6, 6, 6, 6, 6, 6, 6,
+ 6, 6, 6, 6, 6, 6, 6, 6,
+ 6, 6, 6, 6, 6, 6, 6, 6,
+ 6, 6, 6, 6, 6, 6, 8, 9,
+ 9, 4, 4, 11, 1, 1, 1, 1,
+ 12, 12, 12, 12, 12, 12, 12, 12,
+ 12, 12, 12, 12, 12, 12, 12, 12,
+ 12, 14, 14, 14, 14, 16, 18, 20,
+ 20, 14, 14, 14, 14, 14, 14, 14,
+ 14, 14, 14, 1, 1, 1, 1, 21,
+ 22, 22, 22, 22, 22, 22, 22, 22,
+ 22, 22, 22, 22, 22, 22, 22, 22,
+ 22, 22, 22, 22, 22, 22, 22, 22,
+ 22, 22, 22, 22, 22, 22, 22, 22,
+ 23, 24, 24, 25, 25, 0, 26, 28,
+ 28, 29, 29, 30, 30, 26, 26, 31,
+ 31, 22, 22, 22, 22, 22, 22, 22,
+ 22, 22, 22, 22, 22, 22, 22, 22,
+ 22, 22, 22, 22, 22, 22, 22, 22,
+ 22, 22, 22, 22, 22, 22, 22, 21,
+ 21, 22, 22, 22, 34, 34, 35, 37,
+ 37, 38, 40, 41, 41, 38, 42, 42,
+ 42, 44, 42, 48, 48, 48, 50, 44,
+ 50, 0,
+}
const start int = 1
-const firstFinal int = 44
+const firstFinal int = 172
-const enFail int = 46
+const enScimOnly int = 44
+const enRfc8141Only int = 83
+const enFail int = 193
const enMain int = 1
// Machine is the interface representing the FSM
type Machine interface {
Error() error
Parse(input []byte) (*URN, error)
+ WithParsingMode(ParsingMode)
}
type machine struct {
@@ -30,12 +101,24 @@ type machine struct {
cs int
p, pe, eof, pb int
err error
- tolower []int
+ startParsingAt int
+ parsingMode ParsingMode
+ parsingModeSet bool
}
// NewMachine creates a new FSM able to parse RFC 2141 strings.
-func NewMachine() Machine {
- m := &machine{}
+func NewMachine(options ...Option) Machine {
+ m := &machine{
+ parsingModeSet: false,
+ }
+
+ for _, o := range options {
+ o(m)
+ }
+ // Set default parsing mode
+ if !m.parsingModeSet {
+ m.WithParsingMode(DefaultParsingMode)
+ }
return m
}
@@ -51,7 +134,7 @@ func (m *machine) text() []byte {
return m.data[m.pb:m.p]
}
-// Parse parses the input byte array as a RFC 2141 string.
+// Parse parses the input byte array as a RFC 2141 or RFC7643 string.
func (m *machine) Parse(input []byte) (*URN, error) {
m.data = input
m.p = 0
@@ -59,1622 +142,4881 @@ func (m *machine) Parse(input []byte) (*URN, error) {
m.pe = len(input)
m.eof = len(input)
m.err = nil
- m.tolower = []int{}
- output := &URN{}
-
- {
- m.cs = start
+ m.cs = m.startParsingAt
+ output := &URN{
+ tolower: []int{},
}
-
{
if (m.p) == (m.pe) {
goto _testEof
}
+ if m.cs == 0 {
+ goto _out
+ }
+ _resume:
switch m.cs {
case 1:
- goto stCase1
+ switch (m.data)[(m.p)] {
+ case 85:
+ goto tr1
+ case 117:
+ goto tr1
+ }
+ goto tr0
case 0:
- goto stCase0
+ goto _out
case 2:
- goto stCase2
+ switch (m.data)[(m.p)] {
+ case 82:
+ goto tr2
+ case 114:
+ goto tr2
+ }
+ goto tr0
case 3:
- goto stCase3
+ switch (m.data)[(m.p)] {
+ case 78:
+ goto tr3
+ case 110:
+ goto tr3
+ }
+ goto tr0
case 4:
- goto stCase4
+ if (m.data)[(m.p)] == 58 {
+ goto tr4
+ }
+ goto tr0
case 5:
- goto stCase5
+ switch (m.data)[(m.p)] {
+ case 85:
+ goto tr7
+ case 117:
+ goto tr7
+ }
+ switch {
+ case (m.data)[(m.p)] < 65:
+ if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
+ goto tr6
+ }
+ case (m.data)[(m.p)] > 90:
+ if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
+ goto tr6
+ }
+ default:
+ goto tr6
+ }
+ goto tr5
case 6:
- goto stCase6
+ switch (m.data)[(m.p)] {
+ case 45:
+ goto tr9
+ case 58:
+ goto tr10
+ }
+ switch {
+ case (m.data)[(m.p)] < 65:
+ if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
+ goto tr9
+ }
+ case (m.data)[(m.p)] > 90:
+ if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
+ goto tr9
+ }
+ default:
+ goto tr9
+ }
+ goto tr8
case 7:
- goto stCase7
+ switch (m.data)[(m.p)] {
+ case 45:
+ goto tr11
+ case 58:
+ goto tr10
+ }
+ switch {
+ case (m.data)[(m.p)] < 65:
+ if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
+ goto tr11
+ }
+ case (m.data)[(m.p)] > 90:
+ if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
+ goto tr11
+ }
+ default:
+ goto tr11
+ }
+ goto tr8
case 8:
- goto stCase8
+ switch (m.data)[(m.p)] {
+ case 45:
+ goto tr12
+ case 58:
+ goto tr10
+ }
+ switch {
+ case (m.data)[(m.p)] < 65:
+ if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
+ goto tr12
+ }
+ case (m.data)[(m.p)] > 90:
+ if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
+ goto tr12
+ }
+ default:
+ goto tr12
+ }
+ goto tr8
case 9:
- goto stCase9
+ switch (m.data)[(m.p)] {
+ case 45:
+ goto tr13
+ case 58:
+ goto tr10
+ }
+ switch {
+ case (m.data)[(m.p)] < 65:
+ if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
+ goto tr13
+ }
+ case (m.data)[(m.p)] > 90:
+ if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
+ goto tr13
+ }
+ default:
+ goto tr13
+ }
+ goto tr8
case 10:
- goto stCase10
+ switch (m.data)[(m.p)] {
+ case 45:
+ goto tr14
+ case 58:
+ goto tr10
+ }
+ switch {
+ case (m.data)[(m.p)] < 65:
+ if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
+ goto tr14
+ }
+ case (m.data)[(m.p)] > 90:
+ if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
+ goto tr14
+ }
+ default:
+ goto tr14
+ }
+ goto tr8
case 11:
- goto stCase11
+ switch (m.data)[(m.p)] {
+ case 45:
+ goto tr15
+ case 58:
+ goto tr10
+ }
+ switch {
+ case (m.data)[(m.p)] < 65:
+ if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
+ goto tr15
+ }
+ case (m.data)[(m.p)] > 90:
+ if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
+ goto tr15
+ }
+ default:
+ goto tr15
+ }
+ goto tr8
case 12:
- goto stCase12
+ switch (m.data)[(m.p)] {
+ case 45:
+ goto tr16
+ case 58:
+ goto tr10
+ }
+ switch {
+ case (m.data)[(m.p)] < 65:
+ if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
+ goto tr16
+ }
+ case (m.data)[(m.p)] > 90:
+ if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
+ goto tr16
+ }
+ default:
+ goto tr16
+ }
+ goto tr8
case 13:
- goto stCase13
+ switch (m.data)[(m.p)] {
+ case 45:
+ goto tr17
+ case 58:
+ goto tr10
+ }
+ switch {
+ case (m.data)[(m.p)] < 65:
+ if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
+ goto tr17
+ }
+ case (m.data)[(m.p)] > 90:
+ if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
+ goto tr17
+ }
+ default:
+ goto tr17
+ }
+ goto tr8
case 14:
- goto stCase14
+ switch (m.data)[(m.p)] {
+ case 45:
+ goto tr18
+ case 58:
+ goto tr10
+ }
+ switch {
+ case (m.data)[(m.p)] < 65:
+ if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
+ goto tr18
+ }
+ case (m.data)[(m.p)] > 90:
+ if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
+ goto tr18
+ }
+ default:
+ goto tr18
+ }
+ goto tr8
case 15:
- goto stCase15
+ switch (m.data)[(m.p)] {
+ case 45:
+ goto tr19
+ case 58:
+ goto tr10
+ }
+ switch {
+ case (m.data)[(m.p)] < 65:
+ if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
+ goto tr19
+ }
+ case (m.data)[(m.p)] > 90:
+ if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
+ goto tr19
+ }
+ default:
+ goto tr19
+ }
+ goto tr8
case 16:
- goto stCase16
+ switch (m.data)[(m.p)] {
+ case 45:
+ goto tr20
+ case 58:
+ goto tr10
+ }
+ switch {
+ case (m.data)[(m.p)] < 65:
+ if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
+ goto tr20
+ }
+ case (m.data)[(m.p)] > 90:
+ if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
+ goto tr20
+ }
+ default:
+ goto tr20
+ }
+ goto tr8
case 17:
- goto stCase17
+ switch (m.data)[(m.p)] {
+ case 45:
+ goto tr21
+ case 58:
+ goto tr10
+ }
+ switch {
+ case (m.data)[(m.p)] < 65:
+ if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
+ goto tr21
+ }
+ case (m.data)[(m.p)] > 90:
+ if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
+ goto tr21
+ }
+ default:
+ goto tr21
+ }
+ goto tr8
case 18:
- goto stCase18
+ switch (m.data)[(m.p)] {
+ case 45:
+ goto tr22
+ case 58:
+ goto tr10
+ }
+ switch {
+ case (m.data)[(m.p)] < 65:
+ if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
+ goto tr22
+ }
+ case (m.data)[(m.p)] > 90:
+ if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
+ goto tr22
+ }
+ default:
+ goto tr22
+ }
+ goto tr8
case 19:
- goto stCase19
+ switch (m.data)[(m.p)] {
+ case 45:
+ goto tr23
+ case 58:
+ goto tr10
+ }
+ switch {
+ case (m.data)[(m.p)] < 65:
+ if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
+ goto tr23
+ }
+ case (m.data)[(m.p)] > 90:
+ if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
+ goto tr23
+ }
+ default:
+ goto tr23
+ }
+ goto tr8
case 20:
- goto stCase20
+ switch (m.data)[(m.p)] {
+ case 45:
+ goto tr24
+ case 58:
+ goto tr10
+ }
+ switch {
+ case (m.data)[(m.p)] < 65:
+ if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
+ goto tr24
+ }
+ case (m.data)[(m.p)] > 90:
+ if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
+ goto tr24
+ }
+ default:
+ goto tr24
+ }
+ goto tr8
case 21:
- goto stCase21
+ switch (m.data)[(m.p)] {
+ case 45:
+ goto tr25
+ case 58:
+ goto tr10
+ }
+ switch {
+ case (m.data)[(m.p)] < 65:
+ if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
+ goto tr25
+ }
+ case (m.data)[(m.p)] > 90:
+ if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
+ goto tr25
+ }
+ default:
+ goto tr25
+ }
+ goto tr8
case 22:
- goto stCase22
+ switch (m.data)[(m.p)] {
+ case 45:
+ goto tr26
+ case 58:
+ goto tr10
+ }
+ switch {
+ case (m.data)[(m.p)] < 65:
+ if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
+ goto tr26
+ }
+ case (m.data)[(m.p)] > 90:
+ if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
+ goto tr26
+ }
+ default:
+ goto tr26
+ }
+ goto tr8
case 23:
- goto stCase23
+ switch (m.data)[(m.p)] {
+ case 45:
+ goto tr27
+ case 58:
+ goto tr10
+ }
+ switch {
+ case (m.data)[(m.p)] < 65:
+ if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
+ goto tr27
+ }
+ case (m.data)[(m.p)] > 90:
+ if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
+ goto tr27
+ }
+ default:
+ goto tr27
+ }
+ goto tr8
case 24:
- goto stCase24
+ switch (m.data)[(m.p)] {
+ case 45:
+ goto tr28
+ case 58:
+ goto tr10
+ }
+ switch {
+ case (m.data)[(m.p)] < 65:
+ if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
+ goto tr28
+ }
+ case (m.data)[(m.p)] > 90:
+ if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
+ goto tr28
+ }
+ default:
+ goto tr28
+ }
+ goto tr8
case 25:
- goto stCase25
+ switch (m.data)[(m.p)] {
+ case 45:
+ goto tr29
+ case 58:
+ goto tr10
+ }
+ switch {
+ case (m.data)[(m.p)] < 65:
+ if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
+ goto tr29
+ }
+ case (m.data)[(m.p)] > 90:
+ if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
+ goto tr29
+ }
+ default:
+ goto tr29
+ }
+ goto tr8
case 26:
- goto stCase26
+ switch (m.data)[(m.p)] {
+ case 45:
+ goto tr30
+ case 58:
+ goto tr10
+ }
+ switch {
+ case (m.data)[(m.p)] < 65:
+ if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
+ goto tr30
+ }
+ case (m.data)[(m.p)] > 90:
+ if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
+ goto tr30
+ }
+ default:
+ goto tr30
+ }
+ goto tr8
case 27:
- goto stCase27
+ switch (m.data)[(m.p)] {
+ case 45:
+ goto tr31
+ case 58:
+ goto tr10
+ }
+ switch {
+ case (m.data)[(m.p)] < 65:
+ if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
+ goto tr31
+ }
+ case (m.data)[(m.p)] > 90:
+ if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
+ goto tr31
+ }
+ default:
+ goto tr31
+ }
+ goto tr8
case 28:
- goto stCase28
+ switch (m.data)[(m.p)] {
+ case 45:
+ goto tr32
+ case 58:
+ goto tr10
+ }
+ switch {
+ case (m.data)[(m.p)] < 65:
+ if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
+ goto tr32
+ }
+ case (m.data)[(m.p)] > 90:
+ if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
+ goto tr32
+ }
+ default:
+ goto tr32
+ }
+ goto tr8
case 29:
- goto stCase29
+ switch (m.data)[(m.p)] {
+ case 45:
+ goto tr33
+ case 58:
+ goto tr10
+ }
+ switch {
+ case (m.data)[(m.p)] < 65:
+ if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
+ goto tr33
+ }
+ case (m.data)[(m.p)] > 90:
+ if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
+ goto tr33
+ }
+ default:
+ goto tr33
+ }
+ goto tr8
case 30:
- goto stCase30
+ switch (m.data)[(m.p)] {
+ case 45:
+ goto tr34
+ case 58:
+ goto tr10
+ }
+ switch {
+ case (m.data)[(m.p)] < 65:
+ if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
+ goto tr34
+ }
+ case (m.data)[(m.p)] > 90:
+ if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
+ goto tr34
+ }
+ default:
+ goto tr34
+ }
+ goto tr8
case 31:
- goto stCase31
+ switch (m.data)[(m.p)] {
+ case 45:
+ goto tr35
+ case 58:
+ goto tr10
+ }
+ switch {
+ case (m.data)[(m.p)] < 65:
+ if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
+ goto tr35
+ }
+ case (m.data)[(m.p)] > 90:
+ if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
+ goto tr35
+ }
+ default:
+ goto tr35
+ }
+ goto tr8
case 32:
- goto stCase32
+ switch (m.data)[(m.p)] {
+ case 45:
+ goto tr36
+ case 58:
+ goto tr10
+ }
+ switch {
+ case (m.data)[(m.p)] < 65:
+ if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
+ goto tr36
+ }
+ case (m.data)[(m.p)] > 90:
+ if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
+ goto tr36
+ }
+ default:
+ goto tr36
+ }
+ goto tr8
case 33:
- goto stCase33
+ switch (m.data)[(m.p)] {
+ case 45:
+ goto tr37
+ case 58:
+ goto tr10
+ }
+ switch {
+ case (m.data)[(m.p)] < 65:
+ if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
+ goto tr37
+ }
+ case (m.data)[(m.p)] > 90:
+ if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
+ goto tr37
+ }
+ default:
+ goto tr37
+ }
+ goto tr8
case 34:
- goto stCase34
- case 35:
- goto stCase35
- case 36:
- goto stCase36
- case 37:
- goto stCase37
- case 38:
- goto stCase38
- case 44:
- goto stCase44
- case 39:
- goto stCase39
- case 40:
- goto stCase40
- case 45:
- goto stCase45
- case 41:
- goto stCase41
- case 42:
- goto stCase42
- case 43:
- goto stCase43
- case 46:
- goto stCase46
- }
- goto stOut
- stCase1:
- switch (m.data)[(m.p)] {
- case 85:
- goto tr1
- case 117:
- goto tr1
- }
- goto tr0
- tr0:
-
- m.err = fmt.Errorf(errParse, m.p)
- (m.p)--
-
- {
- goto st46
- }
-
- goto st0
- tr3:
-
- m.err = fmt.Errorf(errPrefix, m.p)
- (m.p)--
-
- {
- goto st46
- }
-
- m.err = fmt.Errorf(errParse, m.p)
- (m.p)--
-
- {
- goto st46
- }
-
- goto st0
- tr6:
-
- m.err = fmt.Errorf(errIdentifier, m.p)
- (m.p)--
-
- {
- goto st46
- }
-
- m.err = fmt.Errorf(errParse, m.p)
- (m.p)--
-
- {
- goto st46
- }
-
- goto st0
- tr41:
-
- m.err = fmt.Errorf(errSpecificString, m.p)
- (m.p)--
-
- {
- goto st46
- }
-
- m.err = fmt.Errorf(errParse, m.p)
- (m.p)--
-
- {
- goto st46
- }
-
- goto st0
- tr44:
-
- m.err = fmt.Errorf(errHex, m.p)
- (m.p)--
-
- {
- goto st46
- }
-
- m.err = fmt.Errorf(errSpecificString, m.p)
- (m.p)--
-
- {
- goto st46
- }
-
- m.err = fmt.Errorf(errParse, m.p)
- (m.p)--
-
- {
- goto st46
- }
-
- goto st0
- tr50:
-
- m.err = fmt.Errorf(errPrefix, m.p)
- (m.p)--
-
- {
- goto st46
- }
-
- m.err = fmt.Errorf(errIdentifier, m.p)
- (m.p)--
-
- {
- goto st46
- }
-
- m.err = fmt.Errorf(errParse, m.p)
- (m.p)--
-
- {
- goto st46
- }
-
- goto st0
- tr52:
-
- m.err = fmt.Errorf(errNoUrnWithinID, m.p)
- (m.p)--
-
- {
- goto st46
- }
-
- m.err = fmt.Errorf(errIdentifier, m.p)
- (m.p)--
-
- {
- goto st46
- }
-
- m.err = fmt.Errorf(errParse, m.p)
- (m.p)--
-
- {
- goto st46
- }
-
- goto st0
- stCase0:
- st0:
- m.cs = 0
- goto _out
- tr1:
-
- m.pb = m.p
-
- goto st2
- st2:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof2
- }
- stCase2:
- switch (m.data)[(m.p)] {
- case 82:
- goto st3
- case 114:
- goto st3
- }
- goto tr0
- st3:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof3
- }
- stCase3:
- switch (m.data)[(m.p)] {
- case 78:
- goto st4
- case 110:
- goto st4
- }
- goto tr3
- st4:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof4
- }
- stCase4:
- if (m.data)[(m.p)] == 58 {
- goto tr5
- }
- goto tr0
- tr5:
-
- output.prefix = string(m.text())
-
- goto st5
- st5:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof5
- }
- stCase5:
- switch (m.data)[(m.p)] {
- case 85:
- goto tr8
- case 117:
- goto tr8
- }
- switch {
- case (m.data)[(m.p)] < 65:
- if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
- goto tr7
+ switch (m.data)[(m.p)] {
+ case 45:
+ goto tr38
+ case 58:
+ goto tr10
}
- case (m.data)[(m.p)] > 90:
- if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
- goto tr7
+ switch {
+ case (m.data)[(m.p)] < 65:
+ if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
+ goto tr38
+ }
+ case (m.data)[(m.p)] > 90:
+ if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
+ goto tr38
+ }
+ default:
+ goto tr38
}
- default:
- goto tr7
- }
- goto tr6
- tr7:
-
- m.pb = m.p
-
- goto st6
- st6:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof6
- }
- stCase6:
- switch (m.data)[(m.p)] {
- case 45:
- goto st7
- case 58:
- goto tr10
- }
- switch {
- case (m.data)[(m.p)] < 65:
- if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
- goto st7
+ goto tr8
+ case 35:
+ switch (m.data)[(m.p)] {
+ case 45:
+ goto tr39
+ case 58:
+ goto tr10
}
- case (m.data)[(m.p)] > 90:
- if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
- goto st7
+ switch {
+ case (m.data)[(m.p)] < 65:
+ if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
+ goto tr39
+ }
+ case (m.data)[(m.p)] > 90:
+ if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
+ goto tr39
+ }
+ default:
+ goto tr39
}
- default:
- goto st7
- }
- goto tr6
- st7:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof7
- }
- stCase7:
- switch (m.data)[(m.p)] {
- case 45:
- goto st8
- case 58:
- goto tr10
- }
- switch {
- case (m.data)[(m.p)] < 65:
- if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
- goto st8
+ goto tr8
+ case 36:
+ switch (m.data)[(m.p)] {
+ case 45:
+ goto tr40
+ case 58:
+ goto tr10
}
- case (m.data)[(m.p)] > 90:
- if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
- goto st8
+ switch {
+ case (m.data)[(m.p)] < 65:
+ if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
+ goto tr40
+ }
+ case (m.data)[(m.p)] > 90:
+ if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
+ goto tr40
+ }
+ default:
+ goto tr40
}
- default:
- goto st8
- }
- goto tr6
- st8:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof8
- }
- stCase8:
- switch (m.data)[(m.p)] {
- case 45:
- goto st9
- case 58:
- goto tr10
- }
- switch {
- case (m.data)[(m.p)] < 65:
- if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
- goto st9
+ goto tr8
+ case 37:
+ if (m.data)[(m.p)] == 58 {
+ goto tr10
}
- case (m.data)[(m.p)] > 90:
- if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
- goto st9
+ goto tr8
+ case 38:
+ switch (m.data)[(m.p)] {
+ case 33:
+ goto tr42
+ case 36:
+ goto tr42
+ case 37:
+ goto tr43
+ case 61:
+ goto tr42
+ case 95:
+ goto tr42
}
- default:
- goto st9
- }
- goto tr6
- st9:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof9
- }
- stCase9:
- switch (m.data)[(m.p)] {
- case 45:
- goto st10
- case 58:
- goto tr10
- }
- switch {
- case (m.data)[(m.p)] < 65:
- if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
- goto st10
+ switch {
+ case (m.data)[(m.p)] < 48:
+ if 39 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 46 {
+ goto tr42
+ }
+ case (m.data)[(m.p)] > 59:
+ switch {
+ case (m.data)[(m.p)] > 90:
+ if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
+ goto tr42
+ }
+ case (m.data)[(m.p)] >= 64:
+ goto tr42
+ }
+ default:
+ goto tr42
}
- case (m.data)[(m.p)] > 90:
- if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
- goto st10
+ goto tr41
+ case 172:
+ switch (m.data)[(m.p)] {
+ case 33:
+ goto tr212
+ case 36:
+ goto tr212
+ case 37:
+ goto tr213
+ case 61:
+ goto tr212
+ case 95:
+ goto tr212
}
- default:
- goto st10
- }
- goto tr6
- st10:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof10
- }
- stCase10:
- switch (m.data)[(m.p)] {
- case 45:
- goto st11
- case 58:
- goto tr10
- }
- switch {
- case (m.data)[(m.p)] < 65:
- if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
- goto st11
+ switch {
+ case (m.data)[(m.p)] < 48:
+ if 39 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 46 {
+ goto tr212
+ }
+ case (m.data)[(m.p)] > 59:
+ switch {
+ case (m.data)[(m.p)] > 90:
+ if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
+ goto tr212
+ }
+ case (m.data)[(m.p)] >= 64:
+ goto tr212
+ }
+ default:
+ goto tr212
}
- case (m.data)[(m.p)] > 90:
- if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
- goto st11
+ goto tr41
+ case 39:
+ switch {
+ case (m.data)[(m.p)] < 65:
+ if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
+ goto tr45
+ }
+ case (m.data)[(m.p)] > 90:
+ if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
+ goto tr45
+ }
+ default:
+ goto tr46
}
- default:
- goto st11
- }
- goto tr6
- st11:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof11
- }
- stCase11:
- switch (m.data)[(m.p)] {
- case 45:
- goto st12
- case 58:
- goto tr10
- }
- switch {
- case (m.data)[(m.p)] < 65:
- if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
- goto st12
+ goto tr44
+ case 40:
+ switch {
+ case (m.data)[(m.p)] < 65:
+ if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
+ goto tr47
+ }
+ case (m.data)[(m.p)] > 90:
+ if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
+ goto tr47
+ }
+ default:
+ goto tr48
}
- case (m.data)[(m.p)] > 90:
- if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
- goto st12
+ goto tr44
+ case 173:
+ switch (m.data)[(m.p)] {
+ case 33:
+ goto tr212
+ case 36:
+ goto tr212
+ case 37:
+ goto tr213
+ case 61:
+ goto tr212
+ case 95:
+ goto tr212
}
- default:
- goto st12
- }
- goto tr6
- st12:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof12
- }
- stCase12:
- switch (m.data)[(m.p)] {
- case 45:
- goto st13
- case 58:
- goto tr10
- }
- switch {
- case (m.data)[(m.p)] < 65:
- if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
- goto st13
+ switch {
+ case (m.data)[(m.p)] < 48:
+ if 39 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 46 {
+ goto tr212
+ }
+ case (m.data)[(m.p)] > 59:
+ switch {
+ case (m.data)[(m.p)] > 90:
+ if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
+ goto tr212
+ }
+ case (m.data)[(m.p)] >= 64:
+ goto tr212
+ }
+ default:
+ goto tr212
}
- case (m.data)[(m.p)] > 90:
- if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
- goto st13
+ goto tr44
+ case 41:
+ switch (m.data)[(m.p)] {
+ case 45:
+ goto tr9
+ case 58:
+ goto tr10
+ case 82:
+ goto tr49
+ case 114:
+ goto tr49
}
- default:
- goto st13
- }
- goto tr6
- st13:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof13
- }
- stCase13:
- switch (m.data)[(m.p)] {
- case 45:
- goto st14
- case 58:
- goto tr10
- }
- switch {
- case (m.data)[(m.p)] < 65:
- if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
- goto st14
+ switch {
+ case (m.data)[(m.p)] < 65:
+ if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
+ goto tr9
+ }
+ case (m.data)[(m.p)] > 90:
+ if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
+ goto tr9
+ }
+ default:
+ goto tr9
}
- case (m.data)[(m.p)] > 90:
- if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
- goto st14
+ goto tr5
+ case 42:
+ switch (m.data)[(m.p)] {
+ case 45:
+ goto tr11
+ case 58:
+ goto tr10
+ case 78:
+ goto tr50
+ case 110:
+ goto tr50
}
- default:
- goto st14
- }
- goto tr6
- st14:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof14
- }
- stCase14:
- switch (m.data)[(m.p)] {
- case 45:
- goto st15
- case 58:
- goto tr10
- }
- switch {
- case (m.data)[(m.p)] < 65:
- if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
- goto st15
+ switch {
+ case (m.data)[(m.p)] < 65:
+ if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
+ goto tr11
+ }
+ case (m.data)[(m.p)] > 90:
+ if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
+ goto tr11
+ }
+ default:
+ goto tr11
}
- case (m.data)[(m.p)] > 90:
- if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
- goto st15
+ goto tr5
+ case 43:
+ if (m.data)[(m.p)] == 45 {
+ goto tr12
}
- default:
- goto st15
- }
- goto tr6
- st15:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof15
- }
- stCase15:
- switch (m.data)[(m.p)] {
- case 45:
- goto st16
- case 58:
- goto tr10
- }
- switch {
- case (m.data)[(m.p)] < 65:
- if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
- goto st16
+ switch {
+ case (m.data)[(m.p)] < 65:
+ if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
+ goto tr12
+ }
+ case (m.data)[(m.p)] > 90:
+ if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
+ goto tr12
+ }
+ default:
+ goto tr12
}
- case (m.data)[(m.p)] > 90:
- if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
- goto st16
+ goto tr51
+ case 44:
+ switch (m.data)[(m.p)] {
+ case 85:
+ goto tr52
+ case 117:
+ goto tr52
}
- default:
- goto st16
- }
- goto tr6
- st16:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof16
- }
- stCase16:
- switch (m.data)[(m.p)] {
+ goto tr0
case 45:
- goto st17
- case 58:
- goto tr10
- }
- switch {
- case (m.data)[(m.p)] < 65:
- if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
- goto st17
+ switch (m.data)[(m.p)] {
+ case 82:
+ goto tr53
+ case 114:
+ goto tr53
}
- case (m.data)[(m.p)] > 90:
- if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
- goto st17
+ goto tr0
+ case 46:
+ switch (m.data)[(m.p)] {
+ case 78:
+ goto tr54
+ case 110:
+ goto tr54
}
- default:
- goto st17
- }
- goto tr6
- st17:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof17
- }
- stCase17:
- switch (m.data)[(m.p)] {
- case 45:
- goto st18
- case 58:
- goto tr10
- }
- switch {
- case (m.data)[(m.p)] < 65:
- if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
- goto st18
+ goto tr0
+ case 47:
+ if (m.data)[(m.p)] == 58 {
+ goto tr55
}
- case (m.data)[(m.p)] > 90:
- if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
- goto st18
+ goto tr0
+ case 48:
+ if (m.data)[(m.p)] == 105 {
+ goto tr57
}
- default:
- goto st18
- }
- goto tr6
- st18:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof18
- }
- stCase18:
- switch (m.data)[(m.p)] {
- case 45:
- goto st19
- case 58:
- goto tr10
- }
- switch {
- case (m.data)[(m.p)] < 65:
- if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
- goto st19
+ goto tr56
+ case 49:
+ if (m.data)[(m.p)] == 101 {
+ goto tr58
}
- case (m.data)[(m.p)] > 90:
- if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
- goto st19
+ goto tr56
+ case 50:
+ if (m.data)[(m.p)] == 116 {
+ goto tr59
}
- default:
- goto st19
- }
- goto tr6
- st19:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof19
- }
- stCase19:
- switch (m.data)[(m.p)] {
- case 45:
- goto st20
- case 58:
- goto tr10
- }
- switch {
- case (m.data)[(m.p)] < 65:
- if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
- goto st20
+ goto tr56
+ case 51:
+ if (m.data)[(m.p)] == 102 {
+ goto tr60
}
- case (m.data)[(m.p)] > 90:
- if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
- goto st20
+ goto tr56
+ case 52:
+ if (m.data)[(m.p)] == 58 {
+ goto tr61
}
- default:
- goto st20
- }
- goto tr6
- st20:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof20
- }
- stCase20:
- switch (m.data)[(m.p)] {
- case 45:
- goto st21
- case 58:
- goto tr10
- }
- switch {
- case (m.data)[(m.p)] < 65:
- if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
- goto st21
+ goto tr56
+ case 53:
+ if (m.data)[(m.p)] == 112 {
+ goto tr62
}
- case (m.data)[(m.p)] > 90:
- if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
- goto st21
+ goto tr56
+ case 54:
+ if (m.data)[(m.p)] == 97 {
+ goto tr63
}
- default:
- goto st21
- }
- goto tr6
- st21:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof21
- }
- stCase21:
- switch (m.data)[(m.p)] {
- case 45:
- goto st22
- case 58:
- goto tr10
- }
- switch {
- case (m.data)[(m.p)] < 65:
- if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
- goto st22
+ goto tr56
+ case 55:
+ if (m.data)[(m.p)] == 114 {
+ goto tr64
}
- case (m.data)[(m.p)] > 90:
- if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
- goto st22
+ goto tr56
+ case 56:
+ if (m.data)[(m.p)] == 97 {
+ goto tr65
}
- default:
- goto st22
- }
- goto tr6
- st22:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof22
- }
- stCase22:
- switch (m.data)[(m.p)] {
- case 45:
- goto st23
+ goto tr56
+ case 57:
+ if (m.data)[(m.p)] == 109 {
+ goto tr66
+ }
+ goto tr56
case 58:
- goto tr10
- }
- switch {
- case (m.data)[(m.p)] < 65:
- if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
- goto st23
+ if (m.data)[(m.p)] == 115 {
+ goto tr67
}
- case (m.data)[(m.p)] > 90:
- if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
- goto st23
+ goto tr56
+ case 59:
+ if (m.data)[(m.p)] == 58 {
+ goto tr68
}
- default:
- goto st23
- }
- goto tr6
- st23:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof23
- }
- stCase23:
- switch (m.data)[(m.p)] {
- case 45:
- goto st24
- case 58:
- goto tr10
- }
- switch {
- case (m.data)[(m.p)] < 65:
- if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
- goto st24
+ goto tr56
+ case 60:
+ if (m.data)[(m.p)] == 115 {
+ goto tr69
}
- case (m.data)[(m.p)] > 90:
- if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
- goto st24
+ goto tr56
+ case 61:
+ if (m.data)[(m.p)] == 99 {
+ goto tr70
}
- default:
- goto st24
- }
- goto tr6
- st24:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof24
- }
- stCase24:
- switch (m.data)[(m.p)] {
- case 45:
- goto st25
- case 58:
- goto tr10
- }
- switch {
- case (m.data)[(m.p)] < 65:
- if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
- goto st25
+ goto tr56
+ case 62:
+ if (m.data)[(m.p)] == 105 {
+ goto tr71
}
- case (m.data)[(m.p)] > 90:
- if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
- goto st25
+ goto tr56
+ case 63:
+ if (m.data)[(m.p)] == 109 {
+ goto tr72
}
- default:
- goto st25
- }
- goto tr6
- st25:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof25
- }
- stCase25:
- switch (m.data)[(m.p)] {
- case 45:
- goto st26
- case 58:
- goto tr10
- }
- switch {
- case (m.data)[(m.p)] < 65:
- if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
- goto st26
+ goto tr56
+ case 64:
+ if (m.data)[(m.p)] == 58 {
+ goto tr73
}
- case (m.data)[(m.p)] > 90:
- if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
- goto st26
+ goto tr56
+ case 65:
+ switch (m.data)[(m.p)] {
+ case 97:
+ goto tr75
+ case 112:
+ goto tr76
+ case 115:
+ goto tr77
}
- default:
- goto st26
- }
- goto tr6
- st26:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof26
- }
- stCase26:
- switch (m.data)[(m.p)] {
- case 45:
- goto st27
- case 58:
- goto tr10
- }
- switch {
- case (m.data)[(m.p)] < 65:
- if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
- goto st27
+ goto tr74
+ case 66:
+ if (m.data)[(m.p)] == 112 {
+ goto tr78
}
- case (m.data)[(m.p)] > 90:
- if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
- goto st27
+ goto tr74
+ case 67:
+ if (m.data)[(m.p)] == 105 {
+ goto tr79
}
- default:
- goto st27
- }
- goto tr6
- st27:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof27
- }
- stCase27:
- switch (m.data)[(m.p)] {
- case 45:
- goto st28
- case 58:
- goto tr10
- }
- switch {
- case (m.data)[(m.p)] < 65:
- if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
- goto st28
+ goto tr74
+ case 68:
+ if (m.data)[(m.p)] == 58 {
+ goto tr80
}
- case (m.data)[(m.p)] > 90:
- if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
- goto st28
+ goto tr74
+ case 69:
+ switch {
+ case (m.data)[(m.p)] < 65:
+ if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
+ goto tr82
+ }
+ case (m.data)[(m.p)] > 90:
+ if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
+ goto tr82
+ }
+ default:
+ goto tr82
}
- default:
- goto st28
- }
- goto tr6
- st28:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof28
- }
- stCase28:
- switch (m.data)[(m.p)] {
- case 45:
- goto st29
- case 58:
- goto tr10
- }
- switch {
- case (m.data)[(m.p)] < 65:
- if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
- goto st29
+ goto tr81
+ case 174:
+ if (m.data)[(m.p)] == 58 {
+ goto tr215
}
- case (m.data)[(m.p)] > 90:
- if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
- goto st29
+ switch {
+ case (m.data)[(m.p)] < 65:
+ if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
+ goto tr214
+ }
+ case (m.data)[(m.p)] > 90:
+ if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
+ goto tr214
+ }
+ default:
+ goto tr214
}
- default:
- goto st29
- }
- goto tr6
- st29:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof29
- }
- stCase29:
- switch (m.data)[(m.p)] {
- case 45:
- goto st30
- case 58:
- goto tr10
- }
- switch {
- case (m.data)[(m.p)] < 65:
- if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
- goto st30
+ goto tr81
+ case 70:
+ switch (m.data)[(m.p)] {
+ case 33:
+ goto tr84
+ case 36:
+ goto tr84
+ case 37:
+ goto tr85
+ case 61:
+ goto tr84
+ case 95:
+ goto tr84
}
- case (m.data)[(m.p)] > 90:
- if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
- goto st30
+ switch {
+ case (m.data)[(m.p)] < 48:
+ if 39 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 46 {
+ goto tr84
+ }
+ case (m.data)[(m.p)] > 59:
+ switch {
+ case (m.data)[(m.p)] > 90:
+ if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
+ goto tr84
+ }
+ case (m.data)[(m.p)] >= 64:
+ goto tr84
+ }
+ default:
+ goto tr84
}
- default:
- goto st30
- }
- goto tr6
- st30:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof30
- }
- stCase30:
- switch (m.data)[(m.p)] {
- case 45:
- goto st31
- case 58:
- goto tr10
- }
- switch {
- case (m.data)[(m.p)] < 65:
- if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
- goto st31
+ goto tr83
+ case 175:
+ switch (m.data)[(m.p)] {
+ case 33:
+ goto tr216
+ case 36:
+ goto tr216
+ case 37:
+ goto tr217
+ case 61:
+ goto tr216
+ case 95:
+ goto tr216
}
- case (m.data)[(m.p)] > 90:
- if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
- goto st31
+ switch {
+ case (m.data)[(m.p)] < 48:
+ if 39 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 46 {
+ goto tr216
+ }
+ case (m.data)[(m.p)] > 59:
+ switch {
+ case (m.data)[(m.p)] > 90:
+ if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
+ goto tr216
+ }
+ case (m.data)[(m.p)] >= 64:
+ goto tr216
+ }
+ default:
+ goto tr216
}
- default:
- goto st31
- }
- goto tr6
- st31:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof31
- }
- stCase31:
- switch (m.data)[(m.p)] {
- case 45:
- goto st32
- case 58:
- goto tr10
- }
- switch {
- case (m.data)[(m.p)] < 65:
- if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
- goto st32
+ goto tr83
+ case 71:
+ switch {
+ case (m.data)[(m.p)] < 65:
+ if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
+ goto tr87
+ }
+ case (m.data)[(m.p)] > 90:
+ if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
+ goto tr87
+ }
+ default:
+ goto tr88
}
- case (m.data)[(m.p)] > 90:
- if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
- goto st32
+ goto tr86
+ case 72:
+ switch {
+ case (m.data)[(m.p)] < 65:
+ if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
+ goto tr89
+ }
+ case (m.data)[(m.p)] > 90:
+ if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
+ goto tr89
+ }
+ default:
+ goto tr90
}
- default:
- goto st32
- }
- goto tr6
- st32:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof32
- }
- stCase32:
- switch (m.data)[(m.p)] {
- case 45:
- goto st33
- case 58:
- goto tr10
- }
- switch {
- case (m.data)[(m.p)] < 65:
- if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
- goto st33
+ goto tr86
+ case 176:
+ switch (m.data)[(m.p)] {
+ case 33:
+ goto tr216
+ case 36:
+ goto tr216
+ case 37:
+ goto tr217
+ case 61:
+ goto tr216
+ case 95:
+ goto tr216
}
- case (m.data)[(m.p)] > 90:
- if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
- goto st33
+ switch {
+ case (m.data)[(m.p)] < 48:
+ if 39 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 46 {
+ goto tr216
+ }
+ case (m.data)[(m.p)] > 59:
+ switch {
+ case (m.data)[(m.p)] > 90:
+ if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
+ goto tr216
+ }
+ case (m.data)[(m.p)] >= 64:
+ goto tr216
+ }
+ default:
+ goto tr216
}
- default:
- goto st33
- }
- goto tr6
- st33:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof33
- }
- stCase33:
- switch (m.data)[(m.p)] {
- case 45:
- goto st34
- case 58:
- goto tr10
- }
- switch {
- case (m.data)[(m.p)] < 65:
- if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
- goto st34
+ goto tr86
+ case 73:
+ if (m.data)[(m.p)] == 97 {
+ goto tr91
}
- case (m.data)[(m.p)] > 90:
- if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
- goto st34
+ goto tr74
+ case 74:
+ if (m.data)[(m.p)] == 114 {
+ goto tr92
}
- default:
- goto st34
- }
- goto tr6
- st34:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof34
- }
- stCase34:
- switch (m.data)[(m.p)] {
- case 45:
- goto st35
- case 58:
- goto tr10
- }
- switch {
- case (m.data)[(m.p)] < 65:
- if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
- goto st35
+ goto tr74
+ case 75:
+ if (m.data)[(m.p)] == 97 {
+ goto tr93
}
- case (m.data)[(m.p)] > 90:
- if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
- goto st35
+ goto tr74
+ case 76:
+ if (m.data)[(m.p)] == 109 {
+ goto tr79
}
- default:
- goto st35
- }
- goto tr6
- st35:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof35
- }
- stCase35:
- switch (m.data)[(m.p)] {
- case 45:
- goto st36
- case 58:
- goto tr10
- }
- switch {
- case (m.data)[(m.p)] < 65:
- if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
- goto st36
+ goto tr74
+ case 77:
+ if (m.data)[(m.p)] == 99 {
+ goto tr94
}
- case (m.data)[(m.p)] > 90:
- if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
- goto st36
+ goto tr74
+ case 78:
+ if (m.data)[(m.p)] == 104 {
+ goto tr95
}
- default:
- goto st36
- }
- goto tr6
- st36:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof36
- }
- stCase36:
- switch (m.data)[(m.p)] {
- case 45:
- goto st37
- case 58:
- goto tr10
- }
- switch {
- case (m.data)[(m.p)] < 65:
- if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
- goto st37
+ goto tr74
+ case 79:
+ if (m.data)[(m.p)] == 101 {
+ goto tr96
}
- case (m.data)[(m.p)] > 90:
- if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
- goto st37
+ goto tr74
+ case 80:
+ if (m.data)[(m.p)] == 109 {
+ goto tr97
}
- default:
- goto st37
- }
- goto tr6
- st37:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof37
- }
- stCase37:
- if (m.data)[(m.p)] == 58 {
- goto tr10
- }
- goto tr6
- tr10:
-
- output.ID = string(m.text())
-
- goto st38
- st38:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof38
- }
- stCase38:
- switch (m.data)[(m.p)] {
- case 33:
- goto tr42
- case 36:
- goto tr42
- case 37:
- goto tr43
- case 61:
- goto tr42
- case 95:
- goto tr42
- }
- switch {
- case (m.data)[(m.p)] < 48:
- if 39 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 46 {
- goto tr42
+ goto tr74
+ case 81:
+ if (m.data)[(m.p)] == 97 {
+ goto tr98
+ }
+ goto tr74
+ case 82:
+ if (m.data)[(m.p)] == 115 {
+ goto tr79
+ }
+ goto tr74
+ case 83:
+ switch (m.data)[(m.p)] {
+ case 85:
+ goto tr99
+ case 117:
+ goto tr99
+ }
+ goto tr0
+ case 84:
+ switch (m.data)[(m.p)] {
+ case 82:
+ goto tr100
+ case 114:
+ goto tr100
+ }
+ goto tr0
+ case 85:
+ switch (m.data)[(m.p)] {
+ case 78:
+ goto tr101
+ case 110:
+ goto tr101
+ }
+ goto tr0
+ case 86:
+ if (m.data)[(m.p)] == 58 {
+ goto tr102
+ }
+ goto tr0
+ case 87:
+ switch (m.data)[(m.p)] {
+ case 85:
+ goto tr105
+ case 117:
+ goto tr105
}
- case (m.data)[(m.p)] > 59:
switch {
+ case (m.data)[(m.p)] < 65:
+ if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
+ goto tr104
+ }
case (m.data)[(m.p)] > 90:
if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
- goto tr42
+ goto tr104
}
- case (m.data)[(m.p)] >= 64:
- goto tr42
+ default:
+ goto tr104
}
- default:
- goto tr42
- }
- goto tr41
- tr42:
-
- m.pb = m.p
-
- goto st44
- st44:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof44
- }
- stCase44:
- switch (m.data)[(m.p)] {
- case 33:
- goto st44
- case 36:
- goto st44
- case 37:
- goto st39
- case 61:
- goto st44
+ goto tr103
+ case 88:
+ if (m.data)[(m.p)] == 45 {
+ goto tr107
+ }
+ switch {
+ case (m.data)[(m.p)] < 65:
+ if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
+ goto tr108
+ }
+ case (m.data)[(m.p)] > 90:
+ if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
+ goto tr108
+ }
+ default:
+ goto tr108
+ }
+ goto tr106
+ case 89:
+ if (m.data)[(m.p)] == 45 {
+ goto tr109
+ }
+ switch {
+ case (m.data)[(m.p)] < 65:
+ if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
+ goto tr110
+ }
+ case (m.data)[(m.p)] > 90:
+ if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
+ goto tr110
+ }
+ default:
+ goto tr110
+ }
+ goto tr106
+ case 90:
+ if (m.data)[(m.p)] == 45 {
+ goto tr111
+ }
+ switch {
+ case (m.data)[(m.p)] < 65:
+ if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
+ goto tr112
+ }
+ case (m.data)[(m.p)] > 90:
+ if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
+ goto tr112
+ }
+ default:
+ goto tr112
+ }
+ goto tr106
+ case 91:
+ if (m.data)[(m.p)] == 45 {
+ goto tr113
+ }
+ switch {
+ case (m.data)[(m.p)] < 65:
+ if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
+ goto tr114
+ }
+ case (m.data)[(m.p)] > 90:
+ if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
+ goto tr114
+ }
+ default:
+ goto tr114
+ }
+ goto tr106
+ case 92:
+ if (m.data)[(m.p)] == 45 {
+ goto tr115
+ }
+ switch {
+ case (m.data)[(m.p)] < 65:
+ if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
+ goto tr116
+ }
+ case (m.data)[(m.p)] > 90:
+ if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
+ goto tr116
+ }
+ default:
+ goto tr116
+ }
+ goto tr106
+ case 93:
+ if (m.data)[(m.p)] == 45 {
+ goto tr117
+ }
+ switch {
+ case (m.data)[(m.p)] < 65:
+ if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
+ goto tr118
+ }
+ case (m.data)[(m.p)] > 90:
+ if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
+ goto tr118
+ }
+ default:
+ goto tr118
+ }
+ goto tr106
+ case 94:
+ if (m.data)[(m.p)] == 45 {
+ goto tr119
+ }
+ switch {
+ case (m.data)[(m.p)] < 65:
+ if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
+ goto tr120
+ }
+ case (m.data)[(m.p)] > 90:
+ if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
+ goto tr120
+ }
+ default:
+ goto tr120
+ }
+ goto tr106
case 95:
- goto st44
- }
- switch {
- case (m.data)[(m.p)] < 48:
- if 39 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 46 {
- goto st44
+ if (m.data)[(m.p)] == 45 {
+ goto tr121
}
- case (m.data)[(m.p)] > 59:
switch {
+ case (m.data)[(m.p)] < 65:
+ if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
+ goto tr122
+ }
case (m.data)[(m.p)] > 90:
if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
- goto st44
+ goto tr122
}
- case (m.data)[(m.p)] >= 64:
- goto st44
+ default:
+ goto tr122
}
- default:
- goto st44
- }
- goto tr41
- tr43:
+ goto tr106
+ case 96:
+ if (m.data)[(m.p)] == 45 {
+ goto tr123
+ }
+ switch {
+ case (m.data)[(m.p)] < 65:
+ if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
+ goto tr124
+ }
+ case (m.data)[(m.p)] > 90:
+ if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
+ goto tr124
+ }
+ default:
+ goto tr124
+ }
+ goto tr106
+ case 97:
+ if (m.data)[(m.p)] == 45 {
+ goto tr125
+ }
+ switch {
+ case (m.data)[(m.p)] < 65:
+ if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
+ goto tr126
+ }
+ case (m.data)[(m.p)] > 90:
+ if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
+ goto tr126
+ }
+ default:
+ goto tr126
+ }
+ goto tr106
+ case 98:
+ if (m.data)[(m.p)] == 45 {
+ goto tr127
+ }
+ switch {
+ case (m.data)[(m.p)] < 65:
+ if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
+ goto tr128
+ }
+ case (m.data)[(m.p)] > 90:
+ if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
+ goto tr128
+ }
+ default:
+ goto tr128
+ }
+ goto tr106
+ case 99:
+ if (m.data)[(m.p)] == 45 {
+ goto tr129
+ }
+ switch {
+ case (m.data)[(m.p)] < 65:
+ if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
+ goto tr130
+ }
+ case (m.data)[(m.p)] > 90:
+ if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
+ goto tr130
+ }
+ default:
+ goto tr130
+ }
+ goto tr106
+ case 100:
+ if (m.data)[(m.p)] == 45 {
+ goto tr131
+ }
+ switch {
+ case (m.data)[(m.p)] < 65:
+ if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
+ goto tr132
+ }
+ case (m.data)[(m.p)] > 90:
+ if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
+ goto tr132
+ }
+ default:
+ goto tr132
+ }
+ goto tr106
+ case 101:
+ if (m.data)[(m.p)] == 45 {
+ goto tr133
+ }
+ switch {
+ case (m.data)[(m.p)] < 65:
+ if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
+ goto tr134
+ }
+ case (m.data)[(m.p)] > 90:
+ if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
+ goto tr134
+ }
+ default:
+ goto tr134
+ }
+ goto tr106
+ case 102:
+ if (m.data)[(m.p)] == 45 {
+ goto tr135
+ }
+ switch {
+ case (m.data)[(m.p)] < 65:
+ if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
+ goto tr136
+ }
+ case (m.data)[(m.p)] > 90:
+ if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
+ goto tr136
+ }
+ default:
+ goto tr136
+ }
+ goto tr106
+ case 103:
+ if (m.data)[(m.p)] == 45 {
+ goto tr137
+ }
+ switch {
+ case (m.data)[(m.p)] < 65:
+ if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
+ goto tr138
+ }
+ case (m.data)[(m.p)] > 90:
+ if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
+ goto tr138
+ }
+ default:
+ goto tr138
+ }
+ goto tr106
+ case 104:
+ if (m.data)[(m.p)] == 45 {
+ goto tr139
+ }
+ switch {
+ case (m.data)[(m.p)] < 65:
+ if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
+ goto tr140
+ }
+ case (m.data)[(m.p)] > 90:
+ if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
+ goto tr140
+ }
+ default:
+ goto tr140
+ }
+ goto tr106
+ case 105:
+ if (m.data)[(m.p)] == 45 {
+ goto tr141
+ }
+ switch {
+ case (m.data)[(m.p)] < 65:
+ if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
+ goto tr142
+ }
+ case (m.data)[(m.p)] > 90:
+ if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
+ goto tr142
+ }
+ default:
+ goto tr142
+ }
+ goto tr106
+ case 106:
+ if (m.data)[(m.p)] == 45 {
+ goto tr143
+ }
+ switch {
+ case (m.data)[(m.p)] < 65:
+ if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
+ goto tr144
+ }
+ case (m.data)[(m.p)] > 90:
+ if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
+ goto tr144
+ }
+ default:
+ goto tr144
+ }
+ goto tr106
+ case 107:
+ if (m.data)[(m.p)] == 45 {
+ goto tr145
+ }
+ switch {
+ case (m.data)[(m.p)] < 65:
+ if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
+ goto tr146
+ }
+ case (m.data)[(m.p)] > 90:
+ if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
+ goto tr146
+ }
+ default:
+ goto tr146
+ }
+ goto tr106
+ case 108:
+ if (m.data)[(m.p)] == 45 {
+ goto tr147
+ }
+ switch {
+ case (m.data)[(m.p)] < 65:
+ if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
+ goto tr148
+ }
+ case (m.data)[(m.p)] > 90:
+ if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
+ goto tr148
+ }
+ default:
+ goto tr148
+ }
+ goto tr106
+ case 109:
+ if (m.data)[(m.p)] == 45 {
+ goto tr149
+ }
+ switch {
+ case (m.data)[(m.p)] < 65:
+ if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
+ goto tr150
+ }
+ case (m.data)[(m.p)] > 90:
+ if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
+ goto tr150
+ }
+ default:
+ goto tr150
+ }
+ goto tr106
+ case 110:
+ if (m.data)[(m.p)] == 45 {
+ goto tr151
+ }
+ switch {
+ case (m.data)[(m.p)] < 65:
+ if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
+ goto tr152
+ }
+ case (m.data)[(m.p)] > 90:
+ if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
+ goto tr152
+ }
+ default:
+ goto tr152
+ }
+ goto tr106
+ case 111:
+ if (m.data)[(m.p)] == 45 {
+ goto tr153
+ }
+ switch {
+ case (m.data)[(m.p)] < 65:
+ if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
+ goto tr154
+ }
+ case (m.data)[(m.p)] > 90:
+ if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
+ goto tr154
+ }
+ default:
+ goto tr154
+ }
+ goto tr106
+ case 112:
+ if (m.data)[(m.p)] == 45 {
+ goto tr155
+ }
+ switch {
+ case (m.data)[(m.p)] < 65:
+ if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
+ goto tr156
+ }
+ case (m.data)[(m.p)] > 90:
+ if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
+ goto tr156
+ }
+ default:
+ goto tr156
+ }
+ goto tr106
+ case 113:
+ if (m.data)[(m.p)] == 45 {
+ goto tr157
+ }
+ switch {
+ case (m.data)[(m.p)] < 65:
+ if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
+ goto tr158
+ }
+ case (m.data)[(m.p)] > 90:
+ if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
+ goto tr158
+ }
+ default:
+ goto tr158
+ }
+ goto tr106
+ case 114:
+ if (m.data)[(m.p)] == 45 {
+ goto tr159
+ }
+ switch {
+ case (m.data)[(m.p)] < 65:
+ if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
+ goto tr160
+ }
+ case (m.data)[(m.p)] > 90:
+ if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
+ goto tr160
+ }
+ default:
+ goto tr160
+ }
+ goto tr106
+ case 115:
+ if (m.data)[(m.p)] == 45 {
+ goto tr161
+ }
+ switch {
+ case (m.data)[(m.p)] < 65:
+ if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
+ goto tr162
+ }
+ case (m.data)[(m.p)] > 90:
+ if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
+ goto tr162
+ }
+ default:
+ goto tr162
+ }
+ goto tr106
+ case 116:
+ if (m.data)[(m.p)] == 45 {
+ goto tr163
+ }
+ switch {
+ case (m.data)[(m.p)] < 65:
+ if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
+ goto tr164
+ }
+ case (m.data)[(m.p)] > 90:
+ if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
+ goto tr164
+ }
+ default:
+ goto tr164
+ }
+ goto tr106
+ case 117:
+ if (m.data)[(m.p)] == 45 {
+ goto tr165
+ }
+ switch {
+ case (m.data)[(m.p)] < 65:
+ if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
+ goto tr166
+ }
+ case (m.data)[(m.p)] > 90:
+ if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
+ goto tr166
+ }
+ default:
+ goto tr166
+ }
+ goto tr106
+ case 118:
+ switch {
+ case (m.data)[(m.p)] < 65:
+ if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
+ goto tr167
+ }
+ case (m.data)[(m.p)] > 90:
+ if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
+ goto tr167
+ }
+ default:
+ goto tr167
+ }
+ goto tr106
+ case 119:
+ if (m.data)[(m.p)] == 58 {
+ goto tr168
+ }
+ goto tr106
+ case 120:
+ switch (m.data)[(m.p)] {
+ case 33:
+ goto tr170
+ case 37:
+ goto tr171
+ case 61:
+ goto tr170
+ case 95:
+ goto tr170
+ case 126:
+ goto tr170
+ }
+ switch {
+ case (m.data)[(m.p)] < 48:
+ if 36 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 46 {
+ goto tr170
+ }
+ case (m.data)[(m.p)] > 59:
+ switch {
+ case (m.data)[(m.p)] > 90:
+ if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
+ goto tr170
+ }
+ case (m.data)[(m.p)] >= 64:
+ goto tr170
+ }
+ default:
+ goto tr170
+ }
+ goto tr169
+ case 177:
+ switch (m.data)[(m.p)] {
+ case 33:
+ goto tr218
+ case 35:
+ goto tr219
+ case 37:
+ goto tr220
+ case 61:
+ goto tr218
+ case 63:
+ goto tr221
+ case 95:
+ goto tr218
+ case 126:
+ goto tr218
+ }
+ switch {
+ case (m.data)[(m.p)] < 64:
+ if 36 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 59 {
+ goto tr218
+ }
+ case (m.data)[(m.p)] > 90:
+ if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
+ goto tr218
+ }
+ default:
+ goto tr218
+ }
+ goto tr169
+ case 178:
+ switch (m.data)[(m.p)] {
+ case 33:
+ goto tr222
+ case 37:
+ goto tr223
+ case 61:
+ goto tr222
+ case 95:
+ goto tr222
+ case 126:
+ goto tr222
+ }
+ switch {
+ case (m.data)[(m.p)] < 63:
+ if 36 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 59 {
+ goto tr222
+ }
+ case (m.data)[(m.p)] > 90:
+ if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
+ goto tr222
+ }
+ default:
+ goto tr222
+ }
+ goto tr183
+ case 179:
+ switch (m.data)[(m.p)] {
+ case 33:
+ goto tr224
+ case 37:
+ goto tr225
+ case 61:
+ goto tr224
+ case 95:
+ goto tr224
+ case 126:
+ goto tr224
+ }
+ switch {
+ case (m.data)[(m.p)] < 63:
+ if 36 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 59 {
+ goto tr224
+ }
+ case (m.data)[(m.p)] > 90:
+ if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
+ goto tr224
+ }
+ default:
+ goto tr224
+ }
+ goto tr183
+ case 121:
+ switch {
+ case (m.data)[(m.p)] < 65:
+ if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
+ goto tr173
+ }
+ case (m.data)[(m.p)] > 90:
+ if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
+ goto tr173
+ }
+ default:
+ goto tr174
+ }
+ goto tr172
+ case 122:
+ switch {
+ case (m.data)[(m.p)] < 65:
+ if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
+ goto tr175
+ }
+ case (m.data)[(m.p)] > 90:
+ if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
+ goto tr175
+ }
+ default:
+ goto tr176
+ }
+ goto tr172
+ case 180:
+ switch (m.data)[(m.p)] {
+ case 33:
+ goto tr224
+ case 37:
+ goto tr225
+ case 61:
+ goto tr224
+ case 95:
+ goto tr224
+ case 126:
+ goto tr224
+ }
+ switch {
+ case (m.data)[(m.p)] < 63:
+ if 36 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 59 {
+ goto tr224
+ }
+ case (m.data)[(m.p)] > 90:
+ if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
+ goto tr224
+ }
+ default:
+ goto tr224
+ }
+ goto tr172
+ case 123:
+ switch {
+ case (m.data)[(m.p)] < 65:
+ if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
+ goto tr178
+ }
+ case (m.data)[(m.p)] > 90:
+ if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
+ goto tr178
+ }
+ default:
+ goto tr179
+ }
+ goto tr177
+ case 124:
+ switch {
+ case (m.data)[(m.p)] < 65:
+ if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
+ goto tr180
+ }
+ case (m.data)[(m.p)] > 90:
+ if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
+ goto tr180
+ }
+ default:
+ goto tr181
+ }
+ goto tr177
+ case 181:
+ switch (m.data)[(m.p)] {
+ case 33:
+ goto tr218
+ case 35:
+ goto tr219
+ case 37:
+ goto tr220
+ case 61:
+ goto tr218
+ case 63:
+ goto tr221
+ case 95:
+ goto tr218
+ case 126:
+ goto tr218
+ }
+ switch {
+ case (m.data)[(m.p)] < 64:
+ if 36 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 59 {
+ goto tr218
+ }
+ case (m.data)[(m.p)] > 90:
+ if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
+ goto tr218
+ }
+ default:
+ goto tr218
+ }
+ goto tr177
+ case 125:
+ switch (m.data)[(m.p)] {
+ case 43:
+ goto tr182
+ case 61:
+ goto tr184
+ }
+ goto tr183
+ case 126:
+ switch (m.data)[(m.p)] {
+ case 33:
+ goto tr186
+ case 37:
+ goto tr187
+ case 61:
+ goto tr186
+ case 63:
+ goto tr188
+ case 95:
+ goto tr186
+ case 126:
+ goto tr186
+ }
+ switch {
+ case (m.data)[(m.p)] < 48:
+ if 36 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 46 {
+ goto tr186
+ }
+ case (m.data)[(m.p)] > 59:
+ switch {
+ case (m.data)[(m.p)] > 90:
+ if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
+ goto tr186
+ }
+ case (m.data)[(m.p)] >= 64:
+ goto tr186
+ }
+ default:
+ goto tr186
+ }
+ goto tr185
+ case 182:
+ switch (m.data)[(m.p)] {
+ case 33:
+ goto tr226
+ case 35:
+ goto tr227
+ case 37:
+ goto tr228
+ case 61:
+ goto tr226
+ case 63:
+ goto tr229
+ case 95:
+ goto tr226
+ case 126:
+ goto tr226
+ }
+ switch {
+ case (m.data)[(m.p)] < 64:
+ if 36 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 59 {
+ goto tr226
+ }
+ case (m.data)[(m.p)] > 90:
+ if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
+ goto tr226
+ }
+ default:
+ goto tr226
+ }
+ goto tr185
+ case 127:
+ switch {
+ case (m.data)[(m.p)] < 65:
+ if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
+ goto tr190
+ }
+ case (m.data)[(m.p)] > 90:
+ if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
+ goto tr190
+ }
+ default:
+ goto tr191
+ }
+ goto tr189
+ case 128:
+ switch {
+ case (m.data)[(m.p)] < 65:
+ if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
+ goto tr192
+ }
+ case (m.data)[(m.p)] > 90:
+ if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
+ goto tr192
+ }
+ default:
+ goto tr193
+ }
+ goto tr189
+ case 183:
+ switch (m.data)[(m.p)] {
+ case 33:
+ goto tr226
+ case 35:
+ goto tr227
+ case 37:
+ goto tr228
+ case 61:
+ goto tr226
+ case 63:
+ goto tr229
+ case 95:
+ goto tr226
+ case 126:
+ goto tr226
+ }
+ switch {
+ case (m.data)[(m.p)] < 64:
+ if 36 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 59 {
+ goto tr226
+ }
+ case (m.data)[(m.p)] > 90:
+ if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
+ goto tr226
+ }
+ default:
+ goto tr226
+ }
+ goto tr189
+ case 184:
+ switch (m.data)[(m.p)] {
+ case 33:
+ goto tr226
+ case 35:
+ goto tr227
+ case 37:
+ goto tr228
+ case 43:
+ goto tr230
+ case 61:
+ goto tr231
+ case 63:
+ goto tr229
+ case 95:
+ goto tr226
+ case 126:
+ goto tr226
+ }
+ switch {
+ case (m.data)[(m.p)] < 64:
+ if 36 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 59 {
+ goto tr226
+ }
+ case (m.data)[(m.p)] > 90:
+ if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
+ goto tr226
+ }
+ default:
+ goto tr226
+ }
+ goto tr185
+ case 185:
+ switch (m.data)[(m.p)] {
+ case 33:
+ goto tr232
+ case 35:
+ goto tr233
+ case 37:
+ goto tr234
+ case 47:
+ goto tr226
+ case 61:
+ goto tr232
+ case 63:
+ goto tr235
+ case 95:
+ goto tr232
+ case 126:
+ goto tr232
+ }
+ switch {
+ case (m.data)[(m.p)] < 64:
+ if 36 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 59 {
+ goto tr232
+ }
+ case (m.data)[(m.p)] > 90:
+ if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
+ goto tr232
+ }
+ default:
+ goto tr232
+ }
+ goto tr185
+ case 186:
+ switch (m.data)[(m.p)] {
+ case 33:
+ goto tr204
+ case 35:
+ goto tr227
+ case 37:
+ goto tr237
+ case 47:
+ goto tr226
+ case 61:
+ goto tr204
+ case 63:
+ goto tr229
+ case 95:
+ goto tr204
+ case 126:
+ goto tr204
+ }
+ switch {
+ case (m.data)[(m.p)] < 64:
+ if 36 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 59 {
+ goto tr204
+ }
+ case (m.data)[(m.p)] > 90:
+ if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
+ goto tr204
+ }
+ default:
+ goto tr204
+ }
+ goto tr236
+ case 187:
+ switch (m.data)[(m.p)] {
+ case 33:
+ goto tr238
+ case 35:
+ goto tr239
+ case 37:
+ goto tr240
+ case 61:
+ goto tr238
+ case 63:
+ goto tr241
+ case 95:
+ goto tr238
+ case 126:
+ goto tr238
+ }
+ switch {
+ case (m.data)[(m.p)] < 64:
+ if 36 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 59 {
+ goto tr238
+ }
+ case (m.data)[(m.p)] > 90:
+ if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
+ goto tr238
+ }
+ default:
+ goto tr238
+ }
+ goto tr203
+ case 129:
+ switch {
+ case (m.data)[(m.p)] < 65:
+ if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
+ goto tr195
+ }
+ case (m.data)[(m.p)] > 90:
+ if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
+ goto tr195
+ }
+ default:
+ goto tr196
+ }
+ goto tr194
+ case 130:
+ switch {
+ case (m.data)[(m.p)] < 65:
+ if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
+ goto tr197
+ }
+ case (m.data)[(m.p)] > 90:
+ if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
+ goto tr197
+ }
+ default:
+ goto tr198
+ }
+ goto tr194
+ case 188:
+ switch (m.data)[(m.p)] {
+ case 33:
+ goto tr238
+ case 35:
+ goto tr239
+ case 37:
+ goto tr240
+ case 61:
+ goto tr238
+ case 63:
+ goto tr241
+ case 95:
+ goto tr238
+ case 126:
+ goto tr238
+ }
+ switch {
+ case (m.data)[(m.p)] < 64:
+ if 36 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 59 {
+ goto tr238
+ }
+ case (m.data)[(m.p)] > 90:
+ if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
+ goto tr238
+ }
+ default:
+ goto tr238
+ }
+ goto tr194
+ case 189:
+ switch (m.data)[(m.p)] {
+ case 33:
+ goto tr238
+ case 35:
+ goto tr239
+ case 37:
+ goto tr240
+ case 61:
+ goto tr242
+ case 63:
+ goto tr241
+ case 95:
+ goto tr238
+ case 126:
+ goto tr238
+ }
+ switch {
+ case (m.data)[(m.p)] < 64:
+ if 36 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 59 {
+ goto tr238
+ }
+ case (m.data)[(m.p)] > 90:
+ if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
+ goto tr238
+ }
+ default:
+ goto tr238
+ }
+ goto tr203
+ case 190:
+ switch (m.data)[(m.p)] {
+ case 33:
+ goto tr243
+ case 35:
+ goto tr244
+ case 37:
+ goto tr245
+ case 47:
+ goto tr238
+ case 61:
+ goto tr243
+ case 63:
+ goto tr246
+ case 95:
+ goto tr243
+ case 126:
+ goto tr243
+ }
+ switch {
+ case (m.data)[(m.p)] < 64:
+ if 36 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 59 {
+ goto tr243
+ }
+ case (m.data)[(m.p)] > 90:
+ if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
+ goto tr243
+ }
+ default:
+ goto tr243
+ }
+ goto tr203
+ case 131:
+ switch {
+ case (m.data)[(m.p)] < 65:
+ if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
+ goto tr200
+ }
+ case (m.data)[(m.p)] > 90:
+ if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
+ goto tr200
+ }
+ default:
+ goto tr201
+ }
+ goto tr199
+ case 132:
+ switch {
+ case (m.data)[(m.p)] < 65:
+ if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
+ goto tr197
+ }
+ case (m.data)[(m.p)] > 90:
+ if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
+ goto tr197
+ }
+ default:
+ goto tr198
+ }
+ goto tr199
+ case 133:
+ if (m.data)[(m.p)] == 43 {
+ goto tr202
+ }
+ goto tr185
+ case 191:
+ switch (m.data)[(m.p)] {
+ case 33:
+ goto tr232
+ case 35:
+ goto tr233
+ case 37:
+ goto tr234
+ case 61:
+ goto tr232
+ case 63:
+ goto tr247
+ case 95:
+ goto tr232
+ case 126:
+ goto tr232
+ }
+ switch {
+ case (m.data)[(m.p)] < 48:
+ if 36 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 46 {
+ goto tr232
+ }
+ case (m.data)[(m.p)] > 59:
+ switch {
+ case (m.data)[(m.p)] > 90:
+ if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
+ goto tr232
+ }
+ case (m.data)[(m.p)] >= 64:
+ goto tr232
+ }
+ default:
+ goto tr232
+ }
+ goto tr185
+ case 134:
+ switch (m.data)[(m.p)] {
+ case 43:
+ goto tr202
+ case 61:
+ goto tr184
+ }
+ goto tr185
+ case 135:
+ switch (m.data)[(m.p)] {
+ case 33:
+ goto tr204
+ case 37:
+ goto tr205
+ case 61:
+ goto tr204
+ case 63:
+ goto tr206
+ case 95:
+ goto tr204
+ case 126:
+ goto tr204
+ }
+ switch {
+ case (m.data)[(m.p)] < 48:
+ if 36 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 46 {
+ goto tr204
+ }
+ case (m.data)[(m.p)] > 59:
+ switch {
+ case (m.data)[(m.p)] > 90:
+ if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
+ goto tr204
+ }
+ case (m.data)[(m.p)] >= 64:
+ goto tr204
+ }
+ default:
+ goto tr204
+ }
+ goto tr203
+ case 136:
+ if (m.data)[(m.p)] == 61 {
+ goto tr207
+ }
+ goto tr203
+ case 192:
+ switch (m.data)[(m.p)] {
+ case 33:
+ goto tr243
+ case 35:
+ goto tr244
+ case 37:
+ goto tr245
+ case 61:
+ goto tr243
+ case 63:
+ goto tr248
+ case 95:
+ goto tr243
+ case 126:
+ goto tr243
+ }
+ switch {
+ case (m.data)[(m.p)] < 48:
+ if 36 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 46 {
+ goto tr243
+ }
+ case (m.data)[(m.p)] > 59:
+ switch {
+ case (m.data)[(m.p)] > 90:
+ if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
+ goto tr243
+ }
+ case (m.data)[(m.p)] >= 64:
+ goto tr243
+ }
+ default:
+ goto tr243
+ }
+ goto tr203
+ case 137:
+ if (m.data)[(m.p)] == 58 {
+ goto tr168
+ }
+ switch {
+ case (m.data)[(m.p)] < 65:
+ if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
+ goto tr167
+ }
+ case (m.data)[(m.p)] > 90:
+ if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
+ goto tr167
+ }
+ default:
+ goto tr167
+ }
+ goto tr106
+ case 138:
+ switch (m.data)[(m.p)] {
+ case 45:
+ goto tr165
+ case 58:
+ goto tr168
+ }
+ switch {
+ case (m.data)[(m.p)] < 65:
+ if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
+ goto tr166
+ }
+ case (m.data)[(m.p)] > 90:
+ if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
+ goto tr166
+ }
+ default:
+ goto tr166
+ }
+ goto tr106
+ case 139:
+ switch (m.data)[(m.p)] {
+ case 45:
+ goto tr163
+ case 58:
+ goto tr168
+ }
+ switch {
+ case (m.data)[(m.p)] < 65:
+ if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
+ goto tr164
+ }
+ case (m.data)[(m.p)] > 90:
+ if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
+ goto tr164
+ }
+ default:
+ goto tr164
+ }
+ goto tr106
+ case 140:
+ switch (m.data)[(m.p)] {
+ case 45:
+ goto tr161
+ case 58:
+ goto tr168
+ }
+ switch {
+ case (m.data)[(m.p)] < 65:
+ if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
+ goto tr162
+ }
+ case (m.data)[(m.p)] > 90:
+ if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
+ goto tr162
+ }
+ default:
+ goto tr162
+ }
+ goto tr106
+ case 141:
+ switch (m.data)[(m.p)] {
+ case 45:
+ goto tr159
+ case 58:
+ goto tr168
+ }
+ switch {
+ case (m.data)[(m.p)] < 65:
+ if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
+ goto tr160
+ }
+ case (m.data)[(m.p)] > 90:
+ if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
+ goto tr160
+ }
+ default:
+ goto tr160
+ }
+ goto tr106
+ case 142:
+ switch (m.data)[(m.p)] {
+ case 45:
+ goto tr157
+ case 58:
+ goto tr168
+ }
+ switch {
+ case (m.data)[(m.p)] < 65:
+ if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
+ goto tr158
+ }
+ case (m.data)[(m.p)] > 90:
+ if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
+ goto tr158
+ }
+ default:
+ goto tr158
+ }
+ goto tr106
+ case 143:
+ switch (m.data)[(m.p)] {
+ case 45:
+ goto tr155
+ case 58:
+ goto tr168
+ }
+ switch {
+ case (m.data)[(m.p)] < 65:
+ if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
+ goto tr156
+ }
+ case (m.data)[(m.p)] > 90:
+ if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
+ goto tr156
+ }
+ default:
+ goto tr156
+ }
+ goto tr106
+ case 144:
+ switch (m.data)[(m.p)] {
+ case 45:
+ goto tr153
+ case 58:
+ goto tr168
+ }
+ switch {
+ case (m.data)[(m.p)] < 65:
+ if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
+ goto tr154
+ }
+ case (m.data)[(m.p)] > 90:
+ if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
+ goto tr154
+ }
+ default:
+ goto tr154
+ }
+ goto tr106
+ case 145:
+ switch (m.data)[(m.p)] {
+ case 45:
+ goto tr151
+ case 58:
+ goto tr168
+ }
+ switch {
+ case (m.data)[(m.p)] < 65:
+ if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
+ goto tr152
+ }
+ case (m.data)[(m.p)] > 90:
+ if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
+ goto tr152
+ }
+ default:
+ goto tr152
+ }
+ goto tr106
+ case 146:
+ switch (m.data)[(m.p)] {
+ case 45:
+ goto tr149
+ case 58:
+ goto tr168
+ }
+ switch {
+ case (m.data)[(m.p)] < 65:
+ if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
+ goto tr150
+ }
+ case (m.data)[(m.p)] > 90:
+ if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
+ goto tr150
+ }
+ default:
+ goto tr150
+ }
+ goto tr106
+ case 147:
+ switch (m.data)[(m.p)] {
+ case 45:
+ goto tr147
+ case 58:
+ goto tr168
+ }
+ switch {
+ case (m.data)[(m.p)] < 65:
+ if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
+ goto tr148
+ }
+ case (m.data)[(m.p)] > 90:
+ if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
+ goto tr148
+ }
+ default:
+ goto tr148
+ }
+ goto tr106
+ case 148:
+ switch (m.data)[(m.p)] {
+ case 45:
+ goto tr145
+ case 58:
+ goto tr168
+ }
+ switch {
+ case (m.data)[(m.p)] < 65:
+ if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
+ goto tr146
+ }
+ case (m.data)[(m.p)] > 90:
+ if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
+ goto tr146
+ }
+ default:
+ goto tr146
+ }
+ goto tr106
+ case 149:
+ switch (m.data)[(m.p)] {
+ case 45:
+ goto tr143
+ case 58:
+ goto tr168
+ }
+ switch {
+ case (m.data)[(m.p)] < 65:
+ if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
+ goto tr144
+ }
+ case (m.data)[(m.p)] > 90:
+ if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
+ goto tr144
+ }
+ default:
+ goto tr144
+ }
+ goto tr106
+ case 150:
+ switch (m.data)[(m.p)] {
+ case 45:
+ goto tr141
+ case 58:
+ goto tr168
+ }
+ switch {
+ case (m.data)[(m.p)] < 65:
+ if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
+ goto tr142
+ }
+ case (m.data)[(m.p)] > 90:
+ if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
+ goto tr142
+ }
+ default:
+ goto tr142
+ }
+ goto tr106
+ case 151:
+ switch (m.data)[(m.p)] {
+ case 45:
+ goto tr139
+ case 58:
+ goto tr168
+ }
+ switch {
+ case (m.data)[(m.p)] < 65:
+ if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
+ goto tr140
+ }
+ case (m.data)[(m.p)] > 90:
+ if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
+ goto tr140
+ }
+ default:
+ goto tr140
+ }
+ goto tr106
+ case 152:
+ switch (m.data)[(m.p)] {
+ case 45:
+ goto tr137
+ case 58:
+ goto tr168
+ }
+ switch {
+ case (m.data)[(m.p)] < 65:
+ if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
+ goto tr138
+ }
+ case (m.data)[(m.p)] > 90:
+ if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
+ goto tr138
+ }
+ default:
+ goto tr138
+ }
+ goto tr106
+ case 153:
+ switch (m.data)[(m.p)] {
+ case 45:
+ goto tr135
+ case 58:
+ goto tr168
+ }
+ switch {
+ case (m.data)[(m.p)] < 65:
+ if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
+ goto tr136
+ }
+ case (m.data)[(m.p)] > 90:
+ if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
+ goto tr136
+ }
+ default:
+ goto tr136
+ }
+ goto tr106
+ case 154:
+ switch (m.data)[(m.p)] {
+ case 45:
+ goto tr133
+ case 58:
+ goto tr168
+ }
+ switch {
+ case (m.data)[(m.p)] < 65:
+ if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
+ goto tr134
+ }
+ case (m.data)[(m.p)] > 90:
+ if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
+ goto tr134
+ }
+ default:
+ goto tr134
+ }
+ goto tr106
+ case 155:
+ switch (m.data)[(m.p)] {
+ case 45:
+ goto tr131
+ case 58:
+ goto tr168
+ }
+ switch {
+ case (m.data)[(m.p)] < 65:
+ if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
+ goto tr132
+ }
+ case (m.data)[(m.p)] > 90:
+ if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
+ goto tr132
+ }
+ default:
+ goto tr132
+ }
+ goto tr106
+ case 156:
+ switch (m.data)[(m.p)] {
+ case 45:
+ goto tr129
+ case 58:
+ goto tr168
+ }
+ switch {
+ case (m.data)[(m.p)] < 65:
+ if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
+ goto tr130
+ }
+ case (m.data)[(m.p)] > 90:
+ if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
+ goto tr130
+ }
+ default:
+ goto tr130
+ }
+ goto tr106
+ case 157:
+ switch (m.data)[(m.p)] {
+ case 45:
+ goto tr127
+ case 58:
+ goto tr168
+ }
+ switch {
+ case (m.data)[(m.p)] < 65:
+ if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
+ goto tr128
+ }
+ case (m.data)[(m.p)] > 90:
+ if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
+ goto tr128
+ }
+ default:
+ goto tr128
+ }
+ goto tr106
+ case 158:
+ switch (m.data)[(m.p)] {
+ case 45:
+ goto tr125
+ case 58:
+ goto tr168
+ }
+ switch {
+ case (m.data)[(m.p)] < 65:
+ if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
+ goto tr126
+ }
+ case (m.data)[(m.p)] > 90:
+ if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
+ goto tr126
+ }
+ default:
+ goto tr126
+ }
+ goto tr106
+ case 159:
+ switch (m.data)[(m.p)] {
+ case 45:
+ goto tr123
+ case 58:
+ goto tr168
+ }
+ switch {
+ case (m.data)[(m.p)] < 65:
+ if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
+ goto tr124
+ }
+ case (m.data)[(m.p)] > 90:
+ if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
+ goto tr124
+ }
+ default:
+ goto tr124
+ }
+ goto tr106
+ case 160:
+ switch (m.data)[(m.p)] {
+ case 45:
+ goto tr121
+ case 58:
+ goto tr168
+ }
+ switch {
+ case (m.data)[(m.p)] < 65:
+ if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
+ goto tr122
+ }
+ case (m.data)[(m.p)] > 90:
+ if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
+ goto tr122
+ }
+ default:
+ goto tr122
+ }
+ goto tr106
+ case 161:
+ switch (m.data)[(m.p)] {
+ case 45:
+ goto tr119
+ case 58:
+ goto tr168
+ }
+ switch {
+ case (m.data)[(m.p)] < 65:
+ if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
+ goto tr120
+ }
+ case (m.data)[(m.p)] > 90:
+ if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
+ goto tr120
+ }
+ default:
+ goto tr120
+ }
+ goto tr106
+ case 162:
+ switch (m.data)[(m.p)] {
+ case 45:
+ goto tr117
+ case 58:
+ goto tr168
+ }
+ switch {
+ case (m.data)[(m.p)] < 65:
+ if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
+ goto tr118
+ }
+ case (m.data)[(m.p)] > 90:
+ if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
+ goto tr118
+ }
+ default:
+ goto tr118
+ }
+ goto tr106
+ case 163:
+ switch (m.data)[(m.p)] {
+ case 45:
+ goto tr115
+ case 58:
+ goto tr168
+ }
+ switch {
+ case (m.data)[(m.p)] < 65:
+ if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
+ goto tr116
+ }
+ case (m.data)[(m.p)] > 90:
+ if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
+ goto tr116
+ }
+ default:
+ goto tr116
+ }
+ goto tr106
+ case 164:
+ switch (m.data)[(m.p)] {
+ case 45:
+ goto tr113
+ case 58:
+ goto tr168
+ }
+ switch {
+ case (m.data)[(m.p)] < 65:
+ if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
+ goto tr114
+ }
+ case (m.data)[(m.p)] > 90:
+ if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
+ goto tr114
+ }
+ default:
+ goto tr114
+ }
+ goto tr106
+ case 165:
+ switch (m.data)[(m.p)] {
+ case 45:
+ goto tr111
+ case 58:
+ goto tr168
+ }
+ switch {
+ case (m.data)[(m.p)] < 65:
+ if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
+ goto tr112
+ }
+ case (m.data)[(m.p)] > 90:
+ if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
+ goto tr112
+ }
+ default:
+ goto tr112
+ }
+ goto tr106
+ case 166:
+ switch (m.data)[(m.p)] {
+ case 45:
+ goto tr109
+ case 58:
+ goto tr168
+ }
+ switch {
+ case (m.data)[(m.p)] < 65:
+ if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
+ goto tr110
+ }
+ case (m.data)[(m.p)] > 90:
+ if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
+ goto tr110
+ }
+ default:
+ goto tr110
+ }
+ goto tr106
+ case 167:
+ switch (m.data)[(m.p)] {
+ case 45:
+ goto tr107
+ case 82:
+ goto tr208
+ case 114:
+ goto tr208
+ }
+ switch {
+ case (m.data)[(m.p)] < 65:
+ if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
+ goto tr108
+ }
+ case (m.data)[(m.p)] > 90:
+ if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
+ goto tr108
+ }
+ default:
+ goto tr108
+ }
+ goto tr103
+ case 168:
+ switch (m.data)[(m.p)] {
+ case 45:
+ goto tr109
+ case 58:
+ goto tr168
+ case 78:
+ goto tr209
+ case 110:
+ goto tr209
+ }
+ switch {
+ case (m.data)[(m.p)] < 65:
+ if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
+ goto tr110
+ }
+ case (m.data)[(m.p)] > 90:
+ if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
+ goto tr110
+ }
+ default:
+ goto tr110
+ }
+ goto tr103
+ case 169:
+ switch (m.data)[(m.p)] {
+ case 45:
+ goto tr210
+ case 58:
+ goto tr168
+ }
+ switch {
+ case (m.data)[(m.p)] < 65:
+ if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
+ goto tr112
+ }
+ case (m.data)[(m.p)] > 90:
+ if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
+ goto tr112
+ }
+ default:
+ goto tr112
+ }
+ goto tr106
+ case 170:
+ switch (m.data)[(m.p)] {
+ case 45:
+ goto tr113
+ case 48:
+ goto tr211
+ }
+ switch {
+ case (m.data)[(m.p)] < 65:
+ if 49 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
+ goto tr114
+ }
+ case (m.data)[(m.p)] > 90:
+ if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
+ goto tr211
+ }
+ default:
+ goto tr211
+ }
+ goto tr106
+ case 171:
+ if (m.data)[(m.p)] == 45 {
+ goto tr115
+ }
+ switch {
+ case (m.data)[(m.p)] < 65:
+ if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
+ goto tr116
+ }
+ case (m.data)[(m.p)] > 90:
+ if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
+ goto tr116
+ }
+ default:
+ goto tr116
+ }
+ goto tr106
+ case 193:
+ switch (m.data)[(m.p)] {
+ case 10:
+ goto tr183
+ case 13:
+ goto tr183
+ }
+ goto tr249
+ }
+
+ tr183:
+ m.cs = 0
+ goto _again
+ tr0:
+ m.cs = 0
+ goto f0
+ tr5:
+ m.cs = 0
+ goto f3
+ tr8:
+ m.cs = 0
+ goto f5
+ tr41:
+ m.cs = 0
+ goto f7
+ tr44:
+ m.cs = 0
+ goto f8
+ tr51:
+ m.cs = 0
+ goto f10
+ tr56:
+ m.cs = 0
+ goto f11
+ tr74:
+ m.cs = 0
+ goto f13
+ tr81:
+ m.cs = 0
+ goto f15
+ tr83:
+ m.cs = 0
+ goto f17
+ tr86:
+ m.cs = 0
+ goto f19
+ tr103:
+ m.cs = 0
+ goto f20
+ tr106:
+ m.cs = 0
+ goto f21
+ tr169:
+ m.cs = 0
+ goto f22
+ tr172:
+ m.cs = 0
+ goto f23
+ tr177:
+ m.cs = 0
+ goto f24
+ tr185:
+ m.cs = 0
+ goto f25
+ tr189:
+ m.cs = 0
+ goto f27
+ tr194:
+ m.cs = 0
+ goto f28
+ tr199:
+ m.cs = 0
+ goto f29
+ tr203:
+ m.cs = 0
+ goto f30
+ tr236:
+ m.cs = 0
+ goto f46
+ tr1:
+ m.cs = 2
+ goto f1
+ tr2:
+ m.cs = 3
+ goto _again
+ tr3:
+ m.cs = 4
+ goto _again
+ tr4:
+ m.cs = 5
+ goto f2
+ tr6:
+ m.cs = 6
+ goto f4
+ tr9:
+ m.cs = 7
+ goto _again
+ tr11:
+ m.cs = 8
+ goto _again
+ tr12:
+ m.cs = 9
+ goto _again
+ tr13:
+ m.cs = 10
+ goto _again
+ tr14:
+ m.cs = 11
+ goto _again
+ tr15:
+ m.cs = 12
+ goto _again
+ tr16:
+ m.cs = 13
+ goto _again
+ tr17:
+ m.cs = 14
+ goto _again
+ tr18:
+ m.cs = 15
+ goto _again
+ tr19:
+ m.cs = 16
+ goto _again
+ tr20:
+ m.cs = 17
+ goto _again
+ tr21:
+ m.cs = 18
+ goto _again
+ tr22:
+ m.cs = 19
+ goto _again
+ tr23:
+ m.cs = 20
+ goto _again
+ tr24:
+ m.cs = 21
+ goto _again
+ tr25:
+ m.cs = 22
+ goto _again
+ tr26:
+ m.cs = 23
+ goto _again
+ tr27:
+ m.cs = 24
+ goto _again
+ tr28:
+ m.cs = 25
+ goto _again
+ tr29:
+ m.cs = 26
+ goto _again
+ tr30:
+ m.cs = 27
+ goto _again
+ tr31:
+ m.cs = 28
+ goto _again
+ tr32:
+ m.cs = 29
+ goto _again
+ tr33:
+ m.cs = 30
+ goto _again
+ tr34:
+ m.cs = 31
+ goto _again
+ tr35:
+ m.cs = 32
+ goto _again
+ tr36:
+ m.cs = 33
+ goto _again
+ tr37:
+ m.cs = 34
+ goto _again
+ tr38:
+ m.cs = 35
+ goto _again
+ tr39:
+ m.cs = 36
+ goto _again
+ tr40:
+ m.cs = 37
+ goto _again
+ tr10:
+ m.cs = 38
+ goto f6
+ tr213:
+ m.cs = 39
+ goto _again
+ tr43:
+ m.cs = 39
+ goto f4
+ tr45:
+ m.cs = 40
+ goto _again
+ tr46:
+ m.cs = 40
+ goto f9
+ tr7:
+ m.cs = 41
+ goto f1
+ tr49:
+ m.cs = 42
+ goto _again
+ tr50:
+ m.cs = 43
+ goto _again
+ tr52:
+ m.cs = 45
+ goto f1
+ tr53:
+ m.cs = 46
+ goto _again
+ tr54:
+ m.cs = 47
+ goto _again
+ tr55:
+ m.cs = 48
+ goto f2
+ tr57:
+ m.cs = 49
+ goto f4
+ tr58:
+ m.cs = 50
+ goto _again
+ tr59:
+ m.cs = 51
+ goto _again
+ tr60:
+ m.cs = 52
+ goto _again
+ tr61:
+ m.cs = 53
+ goto _again
+ tr62:
+ m.cs = 54
+ goto _again
+ tr63:
+ m.cs = 55
+ goto _again
+ tr64:
+ m.cs = 56
+ goto _again
+ tr65:
+ m.cs = 57
+ goto _again
+ tr66:
+ m.cs = 58
+ goto _again
+ tr67:
+ m.cs = 59
+ goto _again
+ tr68:
+ m.cs = 60
+ goto _again
+ tr69:
+ m.cs = 61
+ goto _again
+ tr70:
+ m.cs = 62
+ goto _again
+ tr71:
+ m.cs = 63
+ goto _again
+ tr72:
+ m.cs = 64
+ goto _again
+ tr73:
+ m.cs = 65
+ goto f12
+ tr75:
+ m.cs = 66
+ goto f4
+ tr78:
+ m.cs = 67
+ goto _again
+ tr79:
+ m.cs = 68
+ goto _again
+ tr80:
+ m.cs = 69
+ goto f14
+ tr215:
+ m.cs = 70
+ goto f35
+ tr217:
+ m.cs = 71
+ goto _again
+ tr85:
+ m.cs = 71
+ goto f18
+ tr87:
+ m.cs = 72
+ goto _again
+ tr88:
+ m.cs = 72
+ goto f9
+ tr76:
+ m.cs = 73
+ goto f4
+ tr91:
+ m.cs = 74
+ goto _again
+ tr92:
+ m.cs = 75
+ goto _again
+ tr93:
+ m.cs = 76
+ goto _again
+ tr77:
+ m.cs = 77
+ goto f4
+ tr94:
+ m.cs = 78
+ goto _again
+ tr95:
+ m.cs = 79
+ goto _again
+ tr96:
+ m.cs = 80
+ goto _again
+ tr97:
+ m.cs = 81
+ goto _again
+ tr98:
+ m.cs = 82
+ goto _again
+ tr99:
+ m.cs = 84
+ goto f1
+ tr100:
+ m.cs = 85
+ goto _again
+ tr101:
+ m.cs = 86
+ goto _again
+ tr102:
+ m.cs = 87
+ goto f2
+ tr104:
+ m.cs = 88
+ goto f4
+ tr107:
+ m.cs = 89
+ goto _again
+ tr109:
+ m.cs = 90
+ goto _again
+ tr111:
+ m.cs = 91
+ goto _again
+ tr113:
+ m.cs = 92
+ goto _again
+ tr115:
+ m.cs = 93
+ goto _again
+ tr117:
+ m.cs = 94
+ goto _again
+ tr119:
+ m.cs = 95
+ goto _again
+ tr121:
+ m.cs = 96
+ goto _again
+ tr123:
+ m.cs = 97
+ goto _again
+ tr125:
+ m.cs = 98
+ goto _again
+ tr127:
+ m.cs = 99
+ goto _again
+ tr129:
+ m.cs = 100
+ goto _again
+ tr131:
+ m.cs = 101
+ goto _again
+ tr133:
+ m.cs = 102
+ goto _again
+ tr135:
+ m.cs = 103
+ goto _again
+ tr137:
+ m.cs = 104
+ goto _again
+ tr139:
+ m.cs = 105
+ goto _again
+ tr141:
+ m.cs = 106
+ goto _again
+ tr143:
+ m.cs = 107
+ goto _again
+ tr145:
+ m.cs = 108
+ goto _again
+ tr147:
+ m.cs = 109
+ goto _again
+ tr149:
+ m.cs = 110
+ goto _again
+ tr151:
+ m.cs = 111
+ goto _again
+ tr153:
+ m.cs = 112
+ goto _again
+ tr155:
+ m.cs = 113
+ goto _again
+ tr157:
+ m.cs = 114
+ goto _again
+ tr159:
+ m.cs = 115
+ goto _again
+ tr161:
+ m.cs = 116
+ goto _again
+ tr163:
+ m.cs = 117
+ goto _again
+ tr165:
+ m.cs = 118
+ goto _again
+ tr167:
+ m.cs = 119
+ goto _again
+ tr168:
+ m.cs = 120
+ goto f6
+ tr225:
+ m.cs = 121
+ goto _again
+ tr223:
+ m.cs = 121
+ goto f4
+ tr173:
+ m.cs = 122
+ goto _again
+ tr174:
+ m.cs = 122
+ goto f9
+ tr220:
+ m.cs = 123
+ goto _again
+ tr171:
+ m.cs = 123
+ goto f4
+ tr178:
+ m.cs = 124
+ goto _again
+ tr179:
+ m.cs = 124
+ goto f9
+ tr221:
+ m.cs = 125
+ goto f38
+ tr182:
+ m.cs = 126
+ goto _again
+ tr228:
+ m.cs = 127
+ goto _again
+ tr187:
+ m.cs = 127
+ goto f26
+ tr234:
+ m.cs = 127
+ goto f44
+ tr190:
+ m.cs = 128
+ goto _again
+ tr191:
+ m.cs = 128
+ goto f9
+ tr240:
+ m.cs = 129
+ goto _again
+ tr205:
+ m.cs = 129
+ goto f31
+ tr245:
+ m.cs = 129
+ goto f50
+ tr195:
+ m.cs = 130
+ goto _again
+ tr196:
+ m.cs = 130
+ goto f9
+ tr237:
+ m.cs = 131
+ goto f31
+ tr200:
+ m.cs = 132
+ goto _again
+ tr201:
+ m.cs = 132
+ goto f9
+ tr188:
+ m.cs = 133
+ goto f26
+ tr247:
+ m.cs = 134
+ goto f45
+ tr184:
+ m.cs = 135
+ goto _again
+ tr206:
+ m.cs = 136
+ goto f31
+ tr248:
+ m.cs = 136
+ goto f50
+ tr166:
+ m.cs = 137
+ goto _again
+ tr164:
+ m.cs = 138
+ goto _again
+ tr162:
+ m.cs = 139
+ goto _again
+ tr160:
+ m.cs = 140
+ goto _again
+ tr158:
+ m.cs = 141
+ goto _again
+ tr156:
+ m.cs = 142
+ goto _again
+ tr154:
+ m.cs = 143
+ goto _again
+ tr152:
+ m.cs = 144
+ goto _again
+ tr150:
+ m.cs = 145
+ goto _again
+ tr148:
+ m.cs = 146
+ goto _again
+ tr146:
+ m.cs = 147
+ goto _again
+ tr144:
+ m.cs = 148
+ goto _again
+ tr142:
+ m.cs = 149
+ goto _again
+ tr140:
+ m.cs = 150
+ goto _again
+ tr138:
+ m.cs = 151
+ goto _again
+ tr136:
+ m.cs = 152
+ goto _again
+ tr134:
+ m.cs = 153
+ goto _again
+ tr132:
+ m.cs = 154
+ goto _again
+ tr130:
+ m.cs = 155
+ goto _again
+ tr128:
+ m.cs = 156
+ goto _again
+ tr126:
+ m.cs = 157
+ goto _again
+ tr124:
+ m.cs = 158
+ goto _again
+ tr122:
+ m.cs = 159
+ goto _again
+ tr120:
+ m.cs = 160
+ goto _again
+ tr118:
+ m.cs = 161
+ goto _again
+ tr116:
+ m.cs = 162
+ goto _again
+ tr114:
+ m.cs = 163
+ goto _again
+ tr112:
+ m.cs = 164
+ goto _again
+ tr110:
+ m.cs = 165
+ goto _again
+ tr108:
+ m.cs = 166
+ goto _again
+ tr105:
+ m.cs = 167
+ goto f1
+ tr208:
+ m.cs = 168
+ goto _again
+ tr209:
+ m.cs = 169
+ goto _again
+ tr210:
+ m.cs = 170
+ goto f2
+ tr211:
+ m.cs = 171
+ goto _again
+ tr212:
+ m.cs = 172
+ goto _again
+ tr42:
+ m.cs = 172
+ goto f4
+ tr47:
+ m.cs = 173
+ goto _again
+ tr48:
+ m.cs = 173
+ goto f9
+ tr214:
+ m.cs = 174
+ goto _again
+ tr82:
+ m.cs = 174
+ goto f16
+ tr216:
+ m.cs = 175
+ goto _again
+ tr84:
+ m.cs = 175
+ goto f18
+ tr89:
+ m.cs = 176
+ goto _again
+ tr90:
+ m.cs = 176
+ goto f9
+ tr218:
+ m.cs = 177
+ goto _again
+ tr170:
+ m.cs = 177
+ goto f4
+ tr219:
+ m.cs = 178
+ goto f38
+ tr227:
+ m.cs = 178
+ goto f42
+ tr233:
+ m.cs = 178
+ goto f45
+ tr239:
+ m.cs = 178
+ goto f48
+ tr244:
+ m.cs = 178
+ goto f51
+ tr224:
+ m.cs = 179
+ goto _again
+ tr222:
+ m.cs = 179
+ goto f4
+ tr175:
+ m.cs = 180
+ goto _again
+ tr176:
+ m.cs = 180
+ goto f9
+ tr180:
+ m.cs = 181
+ goto _again
+ tr181:
+ m.cs = 181
+ goto f9
+ tr226:
+ m.cs = 182
+ goto _again
+ tr186:
+ m.cs = 182
+ goto f26
+ tr232:
+ m.cs = 182
+ goto f44
+ tr192:
+ m.cs = 183
+ goto _again
+ tr193:
+ m.cs = 183
+ goto f9
+ tr229:
+ m.cs = 184
+ goto f42
+ tr235:
+ m.cs = 184
+ goto f45
+ tr230:
+ m.cs = 185
+ goto _again
+ tr231:
+ m.cs = 186
+ goto _again
+ tr238:
+ m.cs = 187
+ goto _again
+ tr204:
+ m.cs = 187
+ goto f31
+ tr243:
+ m.cs = 187
+ goto f50
+ tr197:
+ m.cs = 188
+ goto _again
+ tr198:
+ m.cs = 188
+ goto f9
+ tr241:
+ m.cs = 189
+ goto _again
+ tr246:
+ m.cs = 189
+ goto f50
+ tr242:
+ m.cs = 190
+ goto _again
+ tr202:
+ m.cs = 191
+ goto _again
+ tr207:
+ m.cs = 192
+ goto _again
+ tr249:
+ m.cs = 193
+ goto _again
+
+ f4:
+
+ m.pb = m.p
+
+ goto _again
+ f9:
+
+ // List of positions in the buffer to later lowercase
+ output.tolower = append(output.tolower, m.p-m.pb)
+
+ goto _again
+ f2:
+
+ output.prefix = string(m.text())
+
+ goto _again
+ f6:
+
+ output.ID = string(m.text())
+
+ goto _again
+ f38:
+
+ output.SS = string(m.text())
+ // Iterate upper letters lowering them
+ for _, i := range output.tolower {
+ m.data[m.pb+i] = m.data[m.pb+i] + 32
+ }
+ output.norm = string(m.text())
+ // Revert the buffer to the original
+ for _, i := range output.tolower {
+ m.data[m.pb+i] = m.data[m.pb+i] - 32
+ }
+
+ goto _again
+ f0:
+
+ m.err = fmt.Errorf(errPrefix, m.p)
+ (m.p)--
+
+ m.cs = 193
+ goto _again
+
+ goto _again
+ f5:
+
+ m.err = fmt.Errorf(errIdentifier, m.p)
+ (m.p)--
+
+ m.cs = 193
+ goto _again
+
+ goto _again
+ f7:
+
+ m.err = fmt.Errorf(errSpecificString, m.p)
+ (m.p)--
+
+ m.cs = 193
+ goto _again
+
+ goto _again
+ f23:
+
+ if m.parsingMode == RFC2141Only || m.parsingMode == RFC8141Only {
+ m.err = fmt.Errorf(errHex, m.p)
+ (m.p)--
+
+ m.cs = 193
+ goto _again
+
+ }
+
+ goto _again
+ f11:
+
+ m.err = fmt.Errorf(errSCIMNamespace, m.p)
+ (m.p)--
+
+ m.cs = 193
+ goto _again
+
+ goto _again
+ f13:
+
+ m.err = fmt.Errorf(errSCIMType, m.p)
+ (m.p)--
+
+ m.cs = 193
+ goto _again
+
+ goto _again
+ f15:
+
+ m.err = fmt.Errorf(errSCIMName, m.p)
+ (m.p)--
+
+ m.cs = 193
+ goto _again
+
+ goto _again
+ f17:
+
+ if m.p == m.pe {
+ m.err = fmt.Errorf(errSCIMOtherIncomplete, m.p-1)
+ } else {
+ m.err = fmt.Errorf(errSCIMOther, m.p)
+ }
+ (m.p)--
+
+ m.cs = 193
+ goto _again
+
+ goto _again
+ f14:
+
+ output.scim.Type = scimschema.TypeFromString(string(m.text()))
+
+ goto _again
+ f16:
+
+ output.scim.pos = m.p
+
+ goto _again
+ f35:
+
+ output.scim.Name = string(m.data[output.scim.pos:m.p])
+
+ goto _again
+ f18:
+
+ output.scim.pos = m.p
+
+ goto _again
+ f22:
+
+ m.err = fmt.Errorf(err8141SpecificString, m.p)
+ (m.p)--
+
+ m.cs = 193
+ goto _again
+
+ goto _again
+ f21:
+
+ m.err = fmt.Errorf(err8141Identifier, m.p)
+ (m.p)--
+
+ m.cs = 193
+ goto _again
+
+ goto _again
+ f42:
+
+ output.rComponent = string(m.text())
+
+ goto _again
+ f48:
+
+ output.qComponent = string(m.text())
+
+ goto _again
+ f44:
+
+ if output.rStart {
+ m.err = fmt.Errorf(err8141RComponentStart, m.p)
+ (m.p)--
+
+ m.cs = 193
+ goto _again
+
+ }
+ output.rStart = true
+
+ goto _again
+ f50:
+
+ if output.qStart {
+ m.err = fmt.Errorf(err8141QComponentStart, m.p)
+ (m.p)--
+
+ m.cs = 193
+ goto _again
+
+ }
+ output.qStart = true
+
+ goto _again
+ f25:
+
+ m.err = fmt.Errorf(err8141MalformedRComp, m.p)
+ (m.p)--
+
+ m.cs = 193
+ goto _again
+
+ goto _again
+ f30:
+
+ m.err = fmt.Errorf(err8141MalformedQComp, m.p)
+ (m.p)--
+
+ m.cs = 193
+ goto _again
+
+ goto _again
+ f1:
+
+ m.pb = m.p
+
+ if m.parsingMode != RFC8141Only {
+ // Throw an error when:
+ // - we are entering here matching the the prefix in the namespace identifier part
+ // - looking ahead (3 chars) we find a colon
+ if pos := m.p + 3; pos < m.pe && m.data[pos] == 58 && output.prefix != "" {
+ m.err = fmt.Errorf(errNoUrnWithinID, pos)
+ (m.p)--
+
+ m.cs = 193
+ goto _again
+
+ }
+ }
+
+ goto _again
+ f12:
+
+ output.ID = string(m.text())
+
+ output.scim = &SCIM{}
+
+ goto _again
+ f3:
+
+ m.err = fmt.Errorf(errIdentifier, m.p)
+ (m.p)--
+
+ m.cs = 193
+ goto _again
+
+ m.err = fmt.Errorf(errPrefix, m.p)
+ (m.p)--
+
+ m.cs = 193
+ goto _again
+
+ goto _again
+ f10:
+
+ m.err = fmt.Errorf(errIdentifier, m.p)
+ (m.p)--
+
+ m.cs = 193
+ goto _again
+
+ m.err = fmt.Errorf(errNoUrnWithinID, m.p)
+ (m.p)--
+
+ m.cs = 193
+ goto _again
+
+ goto _again
+ f8:
+
+ if m.parsingMode == RFC2141Only || m.parsingMode == RFC8141Only {
+ m.err = fmt.Errorf(errHex, m.p)
+ (m.p)--
+
+ m.cs = 193
+ goto _again
+
+ }
+
+ m.err = fmt.Errorf(errSpecificString, m.p)
+ (m.p)--
+
+ m.cs = 193
+ goto _again
+
+ goto _again
+ f19:
+
+ if m.parsingMode == RFC2141Only || m.parsingMode == RFC8141Only {
+ m.err = fmt.Errorf(errHex, m.p)
+ (m.p)--
+
+ m.cs = 193
+ goto _again
+
+ }
+
+ if m.p == m.pe {
+ m.err = fmt.Errorf(errSCIMOtherIncomplete, m.p-1)
+ } else {
+ m.err = fmt.Errorf(errSCIMOther, m.p)
+ }
+ (m.p)--
+
+ m.cs = 193
+ goto _again
+
+ goto _again
+ f24:
+
+ if m.parsingMode == RFC2141Only || m.parsingMode == RFC8141Only {
+ m.err = fmt.Errorf(errHex, m.p)
+ (m.p)--
+
+ m.cs = 193
+ goto _again
+
+ }
+
+ m.err = fmt.Errorf(err8141SpecificString, m.p)
+ (m.p)--
+
+ m.cs = 193
+ goto _again
+
+ goto _again
+ f27:
+
+ if m.parsingMode == RFC2141Only || m.parsingMode == RFC8141Only {
+ m.err = fmt.Errorf(errHex, m.p)
+ (m.p)--
+
+ m.cs = 193
+ goto _again
+
+ }
+
+ m.err = fmt.Errorf(err8141MalformedRComp, m.p)
+ (m.p)--
+
+ m.cs = 193
+ goto _again
+
+ goto _again
+ f28:
+
+ if m.parsingMode == RFC2141Only || m.parsingMode == RFC8141Only {
+ m.err = fmt.Errorf(errHex, m.p)
+ (m.p)--
+
+ m.cs = 193
+ goto _again
+
+ }
+
+ m.err = fmt.Errorf(err8141MalformedQComp, m.p)
+ (m.p)--
+
+ m.cs = 193
+ goto _again
+
+ goto _again
+ f20:
+
+ m.err = fmt.Errorf(err8141Identifier, m.p)
+ (m.p)--
+
+ m.cs = 193
+ goto _again
+
+ m.err = fmt.Errorf(errPrefix, m.p)
+ (m.p)--
+
+ m.cs = 193
+ goto _again
+
+ goto _again
+ f26:
+
+ if output.rStart {
+ m.err = fmt.Errorf(err8141RComponentStart, m.p)
+ (m.p)--
+
+ m.cs = 193
+ goto _again
+
+ }
+ output.rStart = true
m.pb = m.p
- goto st39
- st39:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof39
- }
- stCase39:
- switch {
- case (m.data)[(m.p)] < 65:
- if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
- goto st40
- }
- case (m.data)[(m.p)] > 90:
- if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
- goto st40
- }
- default:
- goto tr46
- }
- goto tr44
- tr46:
+ goto _again
+ f45:
- m.tolower = append(m.tolower, m.p-m.pb)
+ if output.rStart {
+ m.err = fmt.Errorf(err8141RComponentStart, m.p)
+ (m.p)--
+
+ m.cs = 193
+ goto _again
- goto st40
- st40:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof40
- }
- stCase40:
- switch {
- case (m.data)[(m.p)] < 65:
- if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
- goto st45
- }
- case (m.data)[(m.p)] > 90:
- if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
- goto st45
- }
- default:
- goto tr48
}
- goto tr44
- tr48:
+ output.rStart = true
- m.tolower = append(m.tolower, m.p-m.pb)
+ output.rComponent = string(m.text())
+
+ goto _again
+ f31:
+
+ if output.qStart {
+ m.err = fmt.Errorf(err8141QComponentStart, m.p)
+ (m.p)--
+
+ m.cs = 193
+ goto _again
- goto st45
- st45:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof45
- }
- stCase45:
- switch (m.data)[(m.p)] {
- case 33:
- goto st44
- case 36:
- goto st44
- case 37:
- goto st39
- case 61:
- goto st44
- case 95:
- goto st44
- }
- switch {
- case (m.data)[(m.p)] < 48:
- if 39 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 46 {
- goto st44
- }
- case (m.data)[(m.p)] > 59:
- switch {
- case (m.data)[(m.p)] > 90:
- if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
- goto st44
- }
- case (m.data)[(m.p)] >= 64:
- goto st44
- }
- default:
- goto st44
}
- goto tr44
- tr8:
+ output.qStart = true
m.pb = m.p
- goto st41
- st41:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof41
- }
- stCase41:
- switch (m.data)[(m.p)] {
- case 45:
- goto st7
- case 58:
- goto tr10
- case 82:
- goto st42
- case 114:
- goto st42
- }
- switch {
- case (m.data)[(m.p)] < 65:
- if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
- goto st7
- }
- case (m.data)[(m.p)] > 90:
- if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
- goto st7
- }
- default:
- goto st7
- }
- goto tr6
- st42:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof42
- }
- stCase42:
- switch (m.data)[(m.p)] {
- case 45:
- goto st8
- case 58:
- goto tr10
- case 78:
- goto st43
- case 110:
- goto st43
- }
- switch {
- case (m.data)[(m.p)] < 65:
- if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
- goto st8
- }
- case (m.data)[(m.p)] > 90:
- if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
- goto st8
- }
- default:
- goto st8
- }
- goto tr50
- st43:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof43
+ goto _again
+ f51:
+
+ if output.qStart {
+ m.err = fmt.Errorf(err8141QComponentStart, m.p)
+ (m.p)--
+
+ m.cs = 193
+ goto _again
+
}
- stCase43:
- if (m.data)[(m.p)] == 45 {
- goto st9
+ output.qStart = true
+
+ output.qComponent = string(m.text())
+
+ goto _again
+ f46:
+
+ m.err = fmt.Errorf(err8141MalformedRComp, m.p)
+ (m.p)--
+
+ m.cs = 193
+ goto _again
+
+ m.err = fmt.Errorf(err8141MalformedQComp, m.p)
+ (m.p)--
+
+ m.cs = 193
+ goto _again
+
+ goto _again
+ f29:
+
+ if m.parsingMode == RFC2141Only || m.parsingMode == RFC8141Only {
+ m.err = fmt.Errorf(errHex, m.p)
+ (m.p)--
+
+ m.cs = 193
+ goto _again
+
}
- switch {
- case (m.data)[(m.p)] < 65:
- if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 {
- goto st9
- }
- case (m.data)[(m.p)] > 90:
- if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 {
- goto st9
- }
- default:
- goto st9
+
+ m.err = fmt.Errorf(err8141MalformedRComp, m.p)
+ (m.p)--
+
+ m.cs = 193
+ goto _again
+
+ m.err = fmt.Errorf(err8141MalformedQComp, m.p)
+ (m.p)--
+
+ m.cs = 193
+ goto _again
+
+ goto _again
+
+ _again:
+ switch _toStateActions[m.cs] {
+ case 33:
+
+ (m.p)--
+
+ m.err = fmt.Errorf(err8141InformalID, m.p)
+ m.cs = 193
+ goto _again
}
- goto tr52
- st46:
- if (m.p)++; (m.p) == (m.pe) {
- goto _testEof46
+
+ if m.cs == 0 {
+ goto _out
}
- stCase46:
- switch (m.data)[(m.p)] {
- case 10:
- goto st0
- case 13:
- goto st0
+ if (m.p)++; (m.p) != (m.pe) {
+ goto _resume
}
- goto st46
- stOut:
- _testEof2:
- m.cs = 2
- goto _testEof
- _testEof3:
- m.cs = 3
- goto _testEof
- _testEof4:
- m.cs = 4
- goto _testEof
- _testEof5:
- m.cs = 5
- goto _testEof
- _testEof6:
- m.cs = 6
- goto _testEof
- _testEof7:
- m.cs = 7
- goto _testEof
- _testEof8:
- m.cs = 8
- goto _testEof
- _testEof9:
- m.cs = 9
- goto _testEof
- _testEof10:
- m.cs = 10
- goto _testEof
- _testEof11:
- m.cs = 11
- goto _testEof
- _testEof12:
- m.cs = 12
- goto _testEof
- _testEof13:
- m.cs = 13
- goto _testEof
- _testEof14:
- m.cs = 14
- goto _testEof
- _testEof15:
- m.cs = 15
- goto _testEof
- _testEof16:
- m.cs = 16
- goto _testEof
- _testEof17:
- m.cs = 17
- goto _testEof
- _testEof18:
- m.cs = 18
- goto _testEof
- _testEof19:
- m.cs = 19
- goto _testEof
- _testEof20:
- m.cs = 20
- goto _testEof
- _testEof21:
- m.cs = 21
- goto _testEof
- _testEof22:
- m.cs = 22
- goto _testEof
- _testEof23:
- m.cs = 23
- goto _testEof
- _testEof24:
- m.cs = 24
- goto _testEof
- _testEof25:
- m.cs = 25
- goto _testEof
- _testEof26:
- m.cs = 26
- goto _testEof
- _testEof27:
- m.cs = 27
- goto _testEof
- _testEof28:
- m.cs = 28
- goto _testEof
- _testEof29:
- m.cs = 29
- goto _testEof
- _testEof30:
- m.cs = 30
- goto _testEof
- _testEof31:
- m.cs = 31
- goto _testEof
- _testEof32:
- m.cs = 32
- goto _testEof
- _testEof33:
- m.cs = 33
- goto _testEof
- _testEof34:
- m.cs = 34
- goto _testEof
- _testEof35:
- m.cs = 35
- goto _testEof
- _testEof36:
- m.cs = 36
- goto _testEof
- _testEof37:
- m.cs = 37
- goto _testEof
- _testEof38:
- m.cs = 38
- goto _testEof
- _testEof44:
- m.cs = 44
- goto _testEof
- _testEof39:
- m.cs = 39
- goto _testEof
- _testEof40:
- m.cs = 40
- goto _testEof
- _testEof45:
- m.cs = 45
- goto _testEof
- _testEof41:
- m.cs = 41
- goto _testEof
- _testEof42:
- m.cs = 42
- goto _testEof
- _testEof43:
- m.cs = 43
- goto _testEof
- _testEof46:
- m.cs = 46
- goto _testEof
-
_testEof:
{
}
if (m.p) == (m.eof) {
- switch m.cs {
- case 44, 45:
+ switch _eofActions[m.cs] {
+ case 1:
- raw := m.text()
- output.SS = string(raw)
- // Iterate upper letters lowering them
- for _, i := range m.tolower {
- raw[i] = raw[i] + 32
- }
- output.norm = string(raw)
+ m.err = fmt.Errorf(errPrefix, m.p)
+ (m.p)--
+
+ m.cs = 193
+ goto _again
- case 1, 2, 4:
+ case 6:
- m.err = fmt.Errorf(errParse, m.p)
+ m.err = fmt.Errorf(errIdentifier, m.p)
(m.p)--
- {
- goto st46
- }
+ m.cs = 193
+ goto _again
- case 3:
+ case 8:
- m.err = fmt.Errorf(errPrefix, m.p)
+ m.err = fmt.Errorf(errSpecificString, m.p)
(m.p)--
- {
- goto st46
+ m.cs = 193
+ goto _again
+
+ case 24:
+
+ if m.parsingMode == RFC2141Only || m.parsingMode == RFC8141Only {
+ m.err = fmt.Errorf(errHex, m.p)
+ (m.p)--
+
+ m.cs = 193
+ goto _again
+
}
- m.err = fmt.Errorf(errParse, m.p)
+ case 12:
+
+ m.err = fmt.Errorf(errSCIMNamespace, m.p)
(m.p)--
- {
- goto st46
- }
+ m.cs = 193
+ goto _again
- case 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 41:
+ case 14:
- m.err = fmt.Errorf(errIdentifier, m.p)
+ m.err = fmt.Errorf(errSCIMType, m.p)
(m.p)--
- {
- goto st46
- }
+ m.cs = 193
+ goto _again
- m.err = fmt.Errorf(errParse, m.p)
+ case 16:
+
+ m.err = fmt.Errorf(errSCIMName, m.p)
(m.p)--
- {
- goto st46
+ m.cs = 193
+ goto _again
+
+ case 18:
+
+ if m.p == m.pe {
+ m.err = fmt.Errorf(errSCIMOtherIncomplete, m.p-1)
+ } else {
+ m.err = fmt.Errorf(errSCIMOther, m.p)
}
+ (m.p)--
- case 38:
+ m.cs = 193
+ goto _again
- m.err = fmt.Errorf(errSpecificString, m.p)
+ case 23:
+
+ m.err = fmt.Errorf(err8141SpecificString, m.p)
(m.p)--
- {
- goto st46
- }
+ m.cs = 193
+ goto _again
- m.err = fmt.Errorf(errParse, m.p)
+ case 22:
+
+ m.err = fmt.Errorf(err8141Identifier, m.p)
(m.p)--
- {
- goto st46
- }
+ m.cs = 193
+ goto _again
- case 42:
+ case 26:
- m.err = fmt.Errorf(errPrefix, m.p)
+ m.err = fmt.Errorf(err8141MalformedRComp, m.p)
+ (m.p)--
+
+ m.cs = 193
+ goto _again
+
+ case 31:
+
+ m.err = fmt.Errorf(err8141MalformedQComp, m.p)
(m.p)--
- {
- goto st46
+ m.cs = 193
+ goto _again
+
+ case 34:
+
+ output.SS = string(m.text())
+ // Iterate upper letters lowering them
+ for _, i := range output.tolower {
+ m.data[m.pb+i] = m.data[m.pb+i] + 32
+ }
+ output.norm = string(m.text())
+ // Revert the buffer to the original
+ for _, i := range output.tolower {
+ m.data[m.pb+i] = m.data[m.pb+i] - 32
+ }
+
+ output.kind = RFC2141
+
+ case 38:
+
+ output.SS = string(m.text())
+ // Iterate upper letters lowering them
+ for _, i := range output.tolower {
+ m.data[m.pb+i] = m.data[m.pb+i] + 32
}
+ output.norm = string(m.text())
+ // Revert the buffer to the original
+ for _, i := range output.tolower {
+ m.data[m.pb+i] = m.data[m.pb+i] - 32
+ }
+
+ output.kind = RFC8141
+
+ case 4:
m.err = fmt.Errorf(errIdentifier, m.p)
(m.p)--
- {
- goto st46
- }
+ m.cs = 193
+ goto _again
- m.err = fmt.Errorf(errParse, m.p)
+ m.err = fmt.Errorf(errPrefix, m.p)
(m.p)--
- {
- goto st46
- }
+ m.cs = 193
+ goto _again
- case 43:
+ case 11:
+
+ m.err = fmt.Errorf(errIdentifier, m.p)
+ (m.p)--
+
+ m.cs = 193
+ goto _again
m.err = fmt.Errorf(errNoUrnWithinID, m.p)
(m.p)--
- {
- goto st46
+ m.cs = 193
+ goto _again
+
+ case 9:
+
+ if m.parsingMode == RFC2141Only || m.parsingMode == RFC8141Only {
+ m.err = fmt.Errorf(errHex, m.p)
+ (m.p)--
+
+ m.cs = 193
+ goto _again
+
}
- m.err = fmt.Errorf(errIdentifier, m.p)
+ m.err = fmt.Errorf(errSpecificString, m.p)
(m.p)--
- {
- goto st46
+ m.cs = 193
+ goto _again
+
+ case 20:
+
+ if m.parsingMode == RFC2141Only || m.parsingMode == RFC8141Only {
+ m.err = fmt.Errorf(errHex, m.p)
+ (m.p)--
+
+ m.cs = 193
+ goto _again
+
}
- m.err = fmt.Errorf(errParse, m.p)
+ if m.p == m.pe {
+ m.err = fmt.Errorf(errSCIMOtherIncomplete, m.p-1)
+ } else {
+ m.err = fmt.Errorf(errSCIMOther, m.p)
+ }
(m.p)--
- {
- goto st46
+ m.cs = 193
+ goto _again
+
+ case 25:
+
+ if m.parsingMode == RFC2141Only || m.parsingMode == RFC8141Only {
+ m.err = fmt.Errorf(errHex, m.p)
+ (m.p)--
+
+ m.cs = 193
+ goto _again
+
}
- case 39, 40:
+ m.err = fmt.Errorf(err8141SpecificString, m.p)
+ (m.p)--
+
+ m.cs = 193
+ goto _again
+
+ case 28:
+
+ if m.parsingMode == RFC2141Only || m.parsingMode == RFC8141Only {
+ m.err = fmt.Errorf(errHex, m.p)
+ (m.p)--
+
+ m.cs = 193
+ goto _again
+
+ }
- m.err = fmt.Errorf(errHex, m.p)
+ m.err = fmt.Errorf(err8141MalformedRComp, m.p)
(m.p)--
- {
- goto st46
+ m.cs = 193
+ goto _again
+
+ case 29:
+
+ if m.parsingMode == RFC2141Only || m.parsingMode == RFC8141Only {
+ m.err = fmt.Errorf(errHex, m.p)
+ (m.p)--
+
+ m.cs = 193
+ goto _again
+
}
- m.err = fmt.Errorf(errSpecificString, m.p)
+ m.err = fmt.Errorf(err8141MalformedQComp, m.p)
+ (m.p)--
+
+ m.cs = 193
+ goto _again
+
+ case 21:
+
+ m.err = fmt.Errorf(err8141Identifier, m.p)
+ (m.p)--
+
+ m.cs = 193
+ goto _again
+
+ m.err = fmt.Errorf(errPrefix, m.p)
(m.p)--
- {
- goto st46
+ m.cs = 193
+ goto _again
+
+ case 42:
+
+ output.rComponent = string(m.text())
+
+ output.kind = RFC8141
+
+ case 48:
+
+ output.qComponent = string(m.text())
+
+ output.kind = RFC8141
+
+ case 41:
+
+ output.fComponent = string(m.text())
+
+ output.kind = RFC8141
+
+ case 40:
+
+ m.pb = m.p
+
+ output.fComponent = string(m.text())
+
+ output.kind = RFC8141
+
+ case 30:
+
+ if m.parsingMode == RFC2141Only || m.parsingMode == RFC8141Only {
+ m.err = fmt.Errorf(errHex, m.p)
+ (m.p)--
+
+ m.cs = 193
+ goto _again
+
}
- m.err = fmt.Errorf(errParse, m.p)
+ m.err = fmt.Errorf(err8141MalformedRComp, m.p)
(m.p)--
- {
- goto st46
+ m.cs = 193
+ goto _again
+
+ m.err = fmt.Errorf(err8141MalformedQComp, m.p)
+ (m.p)--
+
+ m.cs = 193
+ goto _again
+
+ case 35:
+
+ output.scim.Name = string(m.data[output.scim.pos:m.p])
+
+ output.SS = string(m.text())
+ // Iterate upper letters lowering them
+ for _, i := range output.tolower {
+ m.data[m.pb+i] = m.data[m.pb+i] + 32
+ }
+ output.norm = string(m.text())
+ // Revert the buffer to the original
+ for _, i := range output.tolower {
+ m.data[m.pb+i] = m.data[m.pb+i] - 32
+ }
+
+ output.kind = RFC7643
+
+ case 37:
+
+ output.scim.Other = string(m.data[output.scim.pos:m.p])
+
+ output.SS = string(m.text())
+ // Iterate upper letters lowering them
+ for _, i := range output.tolower {
+ m.data[m.pb+i] = m.data[m.pb+i] + 32
+ }
+ output.norm = string(m.text())
+ // Revert the buffer to the original
+ for _, i := range output.tolower {
+ m.data[m.pb+i] = m.data[m.pb+i] - 32
+ }
+
+ output.kind = RFC7643
+
+ case 44:
+
+ if output.rStart {
+ m.err = fmt.Errorf(err8141RComponentStart, m.p)
+ (m.p)--
+
+ m.cs = 193
+ goto _again
+
+ }
+ output.rStart = true
+
+ output.rComponent = string(m.text())
+
+ output.kind = RFC8141
+
+ case 50:
+
+ if output.qStart {
+ m.err = fmt.Errorf(err8141QComponentStart, m.p)
+ (m.p)--
+
+ m.cs = 193
+ goto _again
+
}
+ output.qStart = true
+ output.qComponent = string(m.text())
+
+ output.kind = RFC8141
}
}
@@ -1689,3 +5031,16 @@ func (m *machine) Parse(input []byte) (*URN, error) {
return output, nil
}
+
+func (m *machine) WithParsingMode(x ParsingMode) {
+ m.parsingMode = x
+ switch m.parsingMode {
+ case RFC2141Only:
+ m.startParsingAt = enMain
+ case RFC8141Only:
+ m.startParsingAt = enRfc8141Only
+ case RFC7643Only:
+ m.startParsingAt = enScimOnly
+ }
+ m.parsingModeSet = true
+}
diff --git a/integration/assets/hydrabroker/vendor/github.com/leodido/go-urn/machine.go.rl b/integration/assets/hydrabroker/vendor/github.com/leodido/go-urn/machine.go.rl
index 3bc05a651a5..0a17421998e 100644
--- a/integration/assets/hydrabroker/vendor/github.com/leodido/go-urn/machine.go.rl
+++ b/integration/assets/hydrabroker/vendor/github.com/leodido/go-urn/machine.go.rl
@@ -2,15 +2,28 @@ package urn
import (
"fmt"
+
+ scimschema "github.com/leodido/go-urn/scim/schema"
)
var (
- errPrefix = "expecting the prefix to be the \"urn\" string (whatever case) [col %d]"
- errIdentifier = "expecting the identifier to be string (1..31 alnum chars, also containing dashes but not at its start) [col %d]"
- errSpecificString = "expecting the specific string to be a string containing alnum, hex, or others ([()+,-.:=@;$_!*']) chars [col %d]"
- errNoUrnWithinID = "expecting the identifier to not contain the \"urn\" reserved string [col %d]"
- errHex = "expecting the specific string hex chars to be well-formed (%%alnum{2}) [col %d]"
- errParse = "parsing error [col %d]"
+ errPrefix = "expecting the prefix to be the \"urn\" string (whatever case) [col %d]"
+ errIdentifier = "expecting the identifier to be string (1..31 alnum chars, also containing dashes but not at its beginning) [col %d]"
+ errSpecificString = "expecting the specific string to be a string containing alnum, hex, or others ([()+,-.:=@;$_!*']) chars [col %d]"
+ errNoUrnWithinID = "expecting the identifier to not contain the \"urn\" reserved string [col %d]"
+ errHex = "expecting the percent encoded chars to be well-formed (%%alnum{2}) [col %d]"
+ errSCIMNamespace = "expecing the SCIM namespace identifier (ietf:params:scim) [col %d]"
+ errSCIMType = "expecting a correct SCIM type (schemas, api, param) [col %d]"
+ errSCIMName = "expecting one or more alnum char in the SCIM name part [col %d]"
+ errSCIMOther = "expecting a well-formed other SCIM part [col %d]"
+ errSCIMOtherIncomplete = "expecting a not empty SCIM other part after colon [col %d]"
+ err8141InformalID = "informal URN namespace must be in the form urn-[1-9][0-9] [col %d]"
+ err8141SpecificString = "expecting the specific string to contain alnum, hex, or others ([~&()+,-.:=@;$_!*'] or [/?] not in first position) chars [col %d]"
+ err8141Identifier = "expecting the indentifier to be a string with (length 2 to 32 chars) containing alnum (or dashes) not starting or ending with a dash [col %d]"
+ err8141RComponentStart = "expecting only one r-component (starting with the ?+ sequence) [col %d]"
+ err8141QComponentStart = "expecting only one q-component (starting with the ?= sequence) [col %d]"
+ err8141MalformedRComp = "expecting a non-empty r-component containing alnum, hex, or others ([~&()+,-.:=@;$_!*'] or [/?] but not at its beginning) [col %d]"
+ err8141MalformedQComp = "expecting a non-empty q-component containing alnum, hex, or others ([~&()+,-.:=@;$_!*'] or [/?] but not at its beginning) [col %d]"
)
%%{
@@ -24,25 +37,42 @@ action mark {
}
action tolower {
- m.tolower = append(m.tolower, m.p - m.pb)
+ // List of positions in the buffer to later lowercase
+ output.tolower = append(output.tolower, m.p - m.pb)
}
action set_pre {
output.prefix = string(m.text())
}
+action throw_pre_urn_err {
+ if m.parsingMode != RFC8141Only {
+ // Throw an error when:
+ // - we are entering here matching the the prefix in the namespace identifier part
+ // - looking ahead (3 chars) we find a colon
+ if pos := m.p + 3; pos < m.pe && m.data[pos] == 58 && output.prefix != "" {
+ m.err = fmt.Errorf(errNoUrnWithinID, pos)
+ fhold;
+ fgoto fail;
+ }
+ }
+}
+
action set_nid {
output.ID = string(m.text())
}
action set_nss {
- raw := m.text()
- output.SS = string(raw)
+ output.SS = string(m.text())
// Iterate upper letters lowering them
- for _, i := range m.tolower {
- raw[i] = raw[i] + 32
+ for _, i := range output.tolower {
+ m.data[m.pb+i] = m.data[m.pb+i] + 32
+ }
+ output.norm = string(m.text())
+ // Revert the buffer to the original
+ for _, i := range output.tolower {
+ m.data[m.pb+i] = m.data[m.pb+i] - 32
}
- output.norm = string(raw)
}
action err_pre {
@@ -70,30 +100,200 @@ action err_urn {
}
action err_hex {
- m.err = fmt.Errorf(errHex, m.p)
+ if m.parsingMode == RFC2141Only || m.parsingMode == RFC8141Only {
+ m.err = fmt.Errorf(errHex, m.p)
+ fhold;
+ fgoto fail;
+ }
+}
+
+action base_type {
+ output.kind = RFC2141;
+}
+
+pre = ([uU] @err(err_pre) [rR] @err(err_pre) [nN] @err(err_pre)) >mark >throw_pre_urn_err %set_pre;
+
+nid = (alnum >mark (alnum | '-'){0,31}) $err(err_nid) %set_nid;
+
+hex = '%' (digit | lower | upper >tolower){2} $err(err_hex);
+
+sss = (alnum | [()+,\-.:=@;$_!*']);
+
+nss = (sss | hex)+ $err(err_nss);
+
+nid_not_urn = (nid - pre %err(err_urn));
+
+urn = pre ':' @err(err_pre) (nid_not_urn ':' nss >mark %set_nss) %eof(base_type);
+
+### SCIM BEG
+
+action err_scim_nid {
+ m.err = fmt.Errorf(errSCIMNamespace, m.p)
fhold;
fgoto fail;
}
-action err_parse {
- m.err = fmt.Errorf(errParse, m.p)
+action err_scim_type {
+ m.err = fmt.Errorf(errSCIMType, m.p)
fhold;
fgoto fail;
}
-pre = ([uU][rR][nN] @err(err_pre)) >mark %set_pre;
+action err_scim_name {
+ m.err = fmt.Errorf(errSCIMName, m.p)
+ fhold;
+ fgoto fail;
+}
-nid = (alnum >mark (alnum | '-'){0,31}) %set_nid;
+action err_scim_other {
+ if m.p == m.pe {
+ m.err = fmt.Errorf(errSCIMOtherIncomplete, m.p-1)
+ } else {
+ m.err = fmt.Errorf(errSCIMOther, m.p)
+ }
+ fhold;
+ fgoto fail;
+}
-hex = '%' (digit | lower | upper >tolower){2} $err(err_hex);
+action scim_type {
+ output.kind = RFC7643;
+}
-sss = (alnum | [()+,\-.:=@;$_!*']);
+action create_scim {
+ output.scim = &SCIM{};
+}
-nss = (sss | hex)+ $err(err_nss);
+action set_scim_type {
+ output.scim.Type = scimschema.TypeFromString(string(m.text()))
+}
+
+action mark_scim_name {
+ output.scim.pos = m.p
+}
+
+action set_scim_name {
+ output.scim.Name = string(m.data[output.scim.pos:m.p])
+}
+
+action mark_scim_other {
+ output.scim.pos = m.p
+}
+
+action set_scim_other {
+ output.scim.Other = string(m.data[output.scim.pos:m.p])
+}
+
+scim_nid = 'ietf:params:scim' >mark %set_nid %create_scim $err(err_scim_nid);
+
+scim_other = ':' (sss | hex)+ >mark_scim_other %set_scim_other $err(err_scim_other);
+
+scim_name = (alnum)+ >mark_scim_name %set_scim_name $err(err_scim_name);
+
+scim_type = ('schemas' | 'api' | 'param') >mark %set_scim_type $err(err_scim_type);
+
+scim_only := pre ':' @err(err_pre) (scim_nid ':' scim_type ':' scim_name scim_other? %set_nss) %eof(scim_type);
+
+### SCIM END
+
+### 8141 BEG
+
+action err_nss_8141 {
+ m.err = fmt.Errorf(err8141SpecificString, m.p)
+ fhold;
+ fgoto fail;
+}
+
+action err_nid_8141 {
+ m.err = fmt.Errorf(err8141Identifier, m.p)
+ fhold;
+ fgoto fail;
+}
+
+action rfc8141_type {
+ output.kind = RFC8141;
+}
+
+action set_r_component {
+ output.rComponent = string(m.text())
+}
+
+action set_q_component {
+ output.qComponent = string(m.text())
+}
+
+action set_f_component {
+ output.fComponent = string(m.text())
+}
+
+action informal_nid_match {
+ fhold;
+ m.err = fmt.Errorf(err8141InformalID, m.p);
+ fgoto fail;
+}
+
+action mark_r_start {
+ if output.rStart {
+ m.err = fmt.Errorf(err8141RComponentStart, m.p)
+ fhold;
+ fgoto fail;
+ }
+ output.rStart = true
+}
+
+action mark_q_start {
+ if output.qStart {
+ m.err = fmt.Errorf(err8141QComponentStart, m.p)
+ fhold;
+ fgoto fail;
+ }
+ output.qStart = true
+}
+
+action err_malformed_r_component {
+ m.err = fmt.Errorf(err8141MalformedRComp, m.p)
+ fhold;
+ fgoto fail;
+}
+
+action err_malformed_q_component {
+ m.err = fmt.Errorf(err8141MalformedQComp, m.p)
+ fhold;
+ fgoto fail;
+}
+
+pchar = (sss | '~' | '&' | hex);
+
+component = pchar (pchar | '/' | '?')*;
+
+r_start = ('?+') %mark_r_start;
+
+r_component = r_start <: (r_start | component)+ $err(err_malformed_r_component) >mark %set_r_component;
+
+q_start = ('?=') %mark_q_start;
+
+q_component = q_start <: (q_start | component)+ $err(err_malformed_q_component) >mark %set_q_component;
+
+rq_components = (r_component :>> q_component? | q_component);
+
+fragment = (pchar | '/' | '?')*;
+
+f_component = '#' fragment >mark %set_f_component;
+
+nss_rfc8141 = (pchar >mark (pchar | '/')*) $err(err_nss_8141) %set_nss;
+
+nid_rfc8141 = (alnum >mark (alnum | '-'){0,30} alnum) $err(err_nid_8141) %set_nid;
+
+informal_id = pre ('-' [a-zA-z0] %to(informal_nid_match));
+
+nid_rfc8141_not_urn = (nid_rfc8141 - informal_id?);
+
+rfc8141_only := pre ':' @err(err_pre) nid_rfc8141_not_urn ':' nss_rfc8141 rq_components? f_component? %eof(rfc8141_type);
+
+### 8141 END
fail := (any - [\n\r])* @err{ fgoto main; };
-main := (pre ':' (nid - pre %err(err_urn)) $err(err_nid) ':' nss >mark %set_nss) $err(err_parse);
+main := urn;
}%%
@@ -103,6 +303,7 @@ main := (pre ':' (nid - pre %err(err_urn)) $err(err_nid) ':' nss >mark %set_nss)
type Machine interface {
Error() error
Parse(input []byte) (*URN, error)
+ WithParsingMode(ParsingMode)
}
type machine struct {
@@ -110,12 +311,24 @@ type machine struct {
cs int
p, pe, eof, pb int
err error
- tolower []int
+ startParsingAt int
+ parsingMode ParsingMode
+ parsingModeSet bool
}
// NewMachine creates a new FSM able to parse RFC 2141 strings.
-func NewMachine() Machine {
- m := &machine{}
+func NewMachine(options ...Option) Machine {
+ m := &machine{
+ parsingModeSet: false,
+ }
+
+ for _, o := range options {
+ o(m)
+ }
+ // Set default parsing mode
+ if !m.parsingModeSet {
+ m.WithParsingMode(DefaultParsingMode)
+ }
%% access m.;
%% variable p m.p;
@@ -137,7 +350,7 @@ func (m *machine) text() []byte {
return m.data[m.pb:m.p]
}
-// Parse parses the input byte array as a RFC 2141 string.
+// Parse parses the input byte array as a RFC 2141 or RFC7643 string.
func (m *machine) Parse(input []byte) (*URN, error) {
m.data = input
m.p = 0
@@ -145,10 +358,11 @@ func (m *machine) Parse(input []byte) (*URN, error) {
m.pe = len(input)
m.eof = len(input)
m.err = nil
- m.tolower = []int{}
- output := &URN{}
+ m.cs = m.startParsingAt
+ output := &URN{
+ tolower: []int{},
+ }
- %% write init;
%% write exec;
if m.cs < first_final || m.cs == en_fail {
@@ -157,3 +371,16 @@ func (m *machine) Parse(input []byte) (*URN, error) {
return output, nil
}
+
+func (m *machine) WithParsingMode(x ParsingMode) {
+ m.parsingMode = x
+ switch m.parsingMode {
+ case RFC2141Only:
+ m.startParsingAt = en_main
+ case RFC8141Only:
+ m.startParsingAt = en_rfc8141_only
+ case RFC7643Only:
+ m.startParsingAt = en_scim_only
+ }
+ m.parsingModeSet = true
+}
\ No newline at end of file
diff --git a/integration/assets/hydrabroker/vendor/github.com/leodido/go-urn/makefile b/integration/assets/hydrabroker/vendor/github.com/leodido/go-urn/makefile
index 47026d50999..68d5dd0f1b3 100644
--- a/integration/assets/hydrabroker/vendor/github.com/leodido/go-urn/makefile
+++ b/integration/assets/hydrabroker/vendor/github.com/leodido/go-urn/makefile
@@ -1,18 +1,43 @@
SHELL := /bin/bash
+RAGEL := ragel
+GOFMT := go fmt
+export GO_TEST=env GOTRACEBACK=all go test $(GO_ARGS)
+
+.PHONY: build
build: machine.go
+.PHONY: clean
+clean:
+ @rm -rf docs
+ @rm -f machine.go
+
+.PHONY: images
images: docs/urn.png
+.PHONY: snake2camel
+snake2camel:
+ @cd ./tools/snake2camel; go build -o ../../snake2camel .
+
+.PHONY: removecomments
+removecomments:
+ @cd ./tools/removecomments; go build -o ../../removecomments .
+
machine.go: machine.go.rl
- ragel -Z -G2 -e -o $@ $<
- @sed -i '/^\/\/line/d' $@
- @$(MAKE) -s file=$@ snake2camel
- @gofmt -w -s $@
+
+machine.go: snake2camel
+
+machine.go: removecomments
+
+machine.go:
+ $(RAGEL) -Z -G1 -e -o $@ $<
+ @./removecomments $@
+ @./snake2camel $@
+ $(GOFMT) $@
docs/urn.dot: machine.go.rl
@mkdir -p docs
- ragel -Z -e -Vp $< -o $@
+ $(RAGEL) -Z -e -Vp $< -o $@
docs/urn.png: docs/urn.dot
dot $< -Tpng -o $@
@@ -22,18 +47,5 @@ bench: *_test.go machine.go
go test -bench=. -benchmem -benchtime=5s ./...
.PHONY: tests
-tests: *_test.go machine.go
- go test -race -timeout 10s -coverprofile=coverage.out -covermode=atomic -v ./...
-
-.PHONY: clean
-clean:
- @rm -rf docs
- @rm -f machine.go
-
-.PHONY: snake2camel
-snake2camel:
- @awk -i inplace '{ \
- while ( match($$0, /(.*)([a-z]+[0-9]*)_([a-zA-Z0-9])(.*)/, cap) ) \
- $$0 = cap[1] cap[2] toupper(cap[3]) cap[4]; \
- print \
- }' $(file)
\ No newline at end of file
+tests: *_test.go
+ $(GO_TEST) ./...
diff --git a/integration/assets/hydrabroker/vendor/github.com/leodido/go-urn/options.go b/integration/assets/hydrabroker/vendor/github.com/leodido/go-urn/options.go
new file mode 100644
index 00000000000..c543835a286
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/leodido/go-urn/options.go
@@ -0,0 +1,9 @@
+package urn
+
+type Option func(Machine)
+
+func WithParsingMode(mode ParsingMode) Option {
+ return func(m Machine) {
+ m.WithParsingMode(mode)
+ }
+}
diff --git a/integration/assets/hydrabroker/vendor/github.com/leodido/go-urn/parsing_mode.go b/integration/assets/hydrabroker/vendor/github.com/leodido/go-urn/parsing_mode.go
new file mode 100644
index 00000000000..fce5aadc3c8
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/leodido/go-urn/parsing_mode.go
@@ -0,0 +1,12 @@
+package urn
+
+type ParsingMode int
+
+const (
+ Default ParsingMode = iota
+ RFC2141Only
+ RFC7643Only
+ RFC8141Only
+)
+
+const DefaultParsingMode = RFC2141Only
diff --git a/integration/assets/hydrabroker/vendor/github.com/leodido/go-urn/scim.go b/integration/assets/hydrabroker/vendor/github.com/leodido/go-urn/scim.go
new file mode 100644
index 00000000000..f6b7aefbad3
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/leodido/go-urn/scim.go
@@ -0,0 +1,48 @@
+package urn
+
+import (
+ "encoding/json"
+ "fmt"
+
+ scimschema "github.com/leodido/go-urn/scim/schema"
+)
+
+const errInvalidSCIMURN = "invalid SCIM URN: %s"
+
+type SCIM struct {
+ Type scimschema.Type
+ Name string
+ Other string
+ pos int
+}
+
+func (s SCIM) MarshalJSON() ([]byte, error) {
+ return json.Marshal(s.String())
+}
+
+func (s *SCIM) UnmarshalJSON(bytes []byte) error {
+ var str string
+ if err := json.Unmarshal(bytes, &str); err != nil {
+ return err
+ }
+ // Parse as SCIM
+ value, ok := Parse([]byte(str), WithParsingMode(RFC7643Only))
+ if !ok {
+ return fmt.Errorf(errInvalidSCIMURN, str)
+ }
+ if value.RFC() != RFC7643 {
+ return fmt.Errorf(errInvalidSCIMURN, str)
+ }
+ *s = *value.SCIM()
+
+ return nil
+}
+
+func (s *SCIM) String() string {
+ ret := fmt.Sprintf("urn:ietf:params:scim:%s:%s", s.Type.String(), s.Name)
+ if s.Other != "" {
+ ret += fmt.Sprintf(":%s", s.Other)
+ }
+
+ return ret
+}
diff --git a/integration/assets/hydrabroker/vendor/github.com/leodido/go-urn/scim/schema/type.go b/integration/assets/hydrabroker/vendor/github.com/leodido/go-urn/scim/schema/type.go
new file mode 100644
index 00000000000..134918230ff
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/leodido/go-urn/scim/schema/type.go
@@ -0,0 +1,36 @@
+package scimschema
+
+type Type int
+
+const (
+ Unsupported Type = iota
+ Schemas
+ API
+ Param
+)
+
+func (t Type) String() string {
+ switch t {
+ case Schemas:
+ return "schemas"
+ case API:
+ return "api"
+ case Param:
+ return "param"
+ }
+
+ return ""
+}
+
+func TypeFromString(input string) Type {
+ switch input {
+ case "schemas":
+ return Schemas
+ case "api":
+ return API
+ case "param":
+ return Param
+ }
+
+ return Unsupported
+}
diff --git a/integration/assets/hydrabroker/vendor/github.com/leodido/go-urn/urn.go b/integration/assets/hydrabroker/vendor/github.com/leodido/go-urn/urn.go
index b903b7b3cd5..894d6258dce 100644
--- a/integration/assets/hydrabroker/vendor/github.com/leodido/go-urn/urn.go
+++ b/integration/assets/hydrabroker/vendor/github.com/leodido/go-urn/urn.go
@@ -1,9 +1,13 @@
package urn
import (
+ "encoding/json"
+ "fmt"
"strings"
)
+const errInvalidURN = "invalid URN: %s"
+
// URN represents an Uniform Resource Name.
//
// The general form represented is:
@@ -12,10 +16,18 @@ import (
//
// Details at https://tools.ietf.org/html/rfc2141.
type URN struct {
- prefix string // Static prefix. Equal to "urn" when empty.
- ID string // Namespace identifier
- SS string // Namespace specific string
- norm string // Normalized namespace specific string
+ prefix string // Static prefix. Equal to "urn" when empty.
+ ID string // Namespace identifier (NID)
+ SS string // Namespace specific string (NSS)
+ norm string // Normalized namespace specific string
+ kind Kind
+ scim *SCIM
+ rComponent string // RFC8141
+ qComponent string // RFC8141
+ fComponent string // RFC8141
+ rStart bool // RFC8141
+ qStart bool // RFC8141
+ tolower []int
}
// Normalize turns the receiving URN into its norm version.
@@ -26,12 +38,21 @@ func (u *URN) Normalize() *URN {
prefix: "urn",
ID: strings.ToLower(u.ID),
SS: u.norm,
+ // rComponent: u.rComponent,
+ // qComponent: u.qComponent,
+ // fComponent: u.fComponent,
}
}
// Equal checks the lexical equivalence of the current URN with another one.
func (u *URN) Equal(x *URN) bool {
- return *u.Normalize() == *x.Normalize()
+ if x == nil {
+ return false
+ }
+ nu := u.Normalize()
+ nx := x.Normalize()
+
+ return nu.prefix == nx.prefix && nu.ID == nx.ID && nu.SS == nx.SS
}
// String reassembles the URN into a valid URN string.
@@ -47,17 +68,74 @@ func (u *URN) String() string {
res += "urn"
}
res += u.prefix + ":" + u.ID + ":" + u.SS
+ if u.rComponent != "" {
+ res += "?+" + u.rComponent
+ }
+ if u.qComponent != "" {
+ res += "?=" + u.qComponent
+ }
+ if u.fComponent != "" {
+ res += "#" + u.fComponent
+ }
}
return res
}
-// Parse is responsible to create an URN instance from a byte array matching the correct URN syntax.
-func Parse(u []byte) (*URN, bool) {
- urn, err := NewMachine().Parse(u)
+// Parse is responsible to create an URN instance from a byte array matching the correct URN syntax (RFC 2141).
+func Parse(u []byte, options ...Option) (*URN, bool) {
+ urn, err := NewMachine(options...).Parse(u)
if err != nil {
return nil, false
}
return urn, true
}
+
+// MarshalJSON marshals the URN to JSON string form (e.g. `"urn:oid:1.2.3.4"`).
+func (u URN) MarshalJSON() ([]byte, error) {
+ return json.Marshal(u.String())
+}
+
+// UnmarshalJSON unmarshals a URN from JSON string form (e.g. `"urn:oid:1.2.3.4"`).
+func (u *URN) UnmarshalJSON(bytes []byte) error {
+ var str string
+ if err := json.Unmarshal(bytes, &str); err != nil {
+ return err
+ }
+ if value, ok := Parse([]byte(str)); !ok {
+ return fmt.Errorf(errInvalidURN, str)
+ } else {
+ *u = *value
+ }
+
+ return nil
+}
+
+func (u *URN) IsSCIM() bool {
+ return u.kind == RFC7643
+}
+
+func (u *URN) SCIM() *SCIM {
+ if u.kind != RFC7643 {
+ return nil
+ }
+
+ return u.scim
+}
+
+func (u *URN) RFC() Kind {
+ return u.kind
+}
+
+func (u *URN) FComponent() string {
+ return u.fComponent
+}
+
+func (u *URN) QComponent() string {
+ return u.qComponent
+}
+
+func (u *URN) RComponent() string {
+ return u.rComponent
+}
diff --git a/integration/assets/hydrabroker/vendor/github.com/leodido/go-urn/urn8141.go b/integration/assets/hydrabroker/vendor/github.com/leodido/go-urn/urn8141.go
new file mode 100644
index 00000000000..da4dd062e3e
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/leodido/go-urn/urn8141.go
@@ -0,0 +1,30 @@
+package urn
+
+import (
+ "encoding/json"
+ "fmt"
+)
+
+const errInvalidURN8141 = "invalid URN per RFC 8141: %s"
+
+type URN8141 struct {
+ *URN
+}
+
+func (u URN8141) MarshalJSON() ([]byte, error) {
+ return json.Marshal(u.String())
+}
+
+func (u *URN8141) UnmarshalJSON(bytes []byte) error {
+ var str string
+ if err := json.Unmarshal(bytes, &str); err != nil {
+ return err
+ }
+ if value, ok := Parse([]byte(str), WithParsingMode(RFC8141Only)); !ok {
+ return fmt.Errorf(errInvalidURN8141, str)
+ } else {
+ *u = URN8141{value}
+ }
+
+ return nil
+}
diff --git a/integration/assets/hydrabroker/vendor/github.com/nu7hatch/gouuid/uuid.go b/integration/assets/hydrabroker/vendor/github.com/nu7hatch/gouuid/uuid.go
index ca960aa96a3..ac9623b729f 100644
--- a/integration/assets/hydrabroker/vendor/github.com/nu7hatch/gouuid/uuid.go
+++ b/integration/assets/hydrabroker/vendor/github.com/nu7hatch/gouuid/uuid.go
@@ -16,7 +16,7 @@ import (
"regexp"
)
-// The UUID reserved variants.
+// The UUID reserved variants.
const (
ReservedNCS byte = 0x80
ReservedRFC4122 byte = 0x40
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/.travis.yml b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/.travis.yml
deleted file mode 100644
index b454d643cbc..00000000000
--- a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/.travis.yml
+++ /dev/null
@@ -1,17 +0,0 @@
-language: go
-go:
- - 1.12.x
- - 1.13.x
- - tip
-
-# allow internal package imports, necessary for forked repositories
-go_import_path: github.com/onsi/ginkgo
-
-install:
- - go get -v -t ./...
- - go get golang.org/x/tools/cmd/cover
- - go get github.com/onsi/gomega
- - go install github.com/onsi/ginkgo/ginkgo
- - export PATH=$PATH:$HOME/gopath/bin
-
-script: $HOME/gopath/bin/ginkgo -r --randomizeAllSpecs --randomizeSuites --race --trace && go vet
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/CHANGELOG.md b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/CHANGELOG.md
deleted file mode 100644
index 96f03ad7c2b..00000000000
--- a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/CHANGELOG.md
+++ /dev/null
@@ -1,266 +0,0 @@
-## 1.11.0
-
-### Features
-- Add syscall for riscv64 architecture [f66e896]
-- teamcity reporter: output location of test failure as well as test definition (#626) [9869142]
-- teamcity reporter: output newline after every service message (#625) [3cfa02d]
-- Add support for go module when running `generate` command (#578) [9c89e3f]
-
-## 1.10.3
-
-### Fixes
-- Set go_import_path in travis.yml to allow internal packages in forks (#607) [3b721db]
-- Add integration test [d90e0dc]
-- Fix coverage files combining [e5dde8c]
-- A new CLI option: -ginkgo.reportFile (#601) [034fd25]
-
-## 1.10.2
-
-### Fixes
-- speed up table entry generateIt() (#609) [5049dc5]
-- Fix. Write errors to stderr instead of stdout (#610) [7bb3091]
-
-## 1.10.1
-
-### Fixes
-- stack backtrace: fix skipping (#600) [2a4c0bd]
-
-## 1.10.0
-
-### Fixes
-- stack backtrace: fix alignment and skipping [66915d6]
-- fix typo in documentation [8f97b93]
-
-## 1.9.0
-
-### Features
-- Option to print output into report, when tests have passed [0545415]
-
-### Fixes
-- Fixed typos in comments [0ecbc58]
-- gofmt code [a7f8bfb]
-- Simplify code [7454d00]
-- Simplify concatenation, incrementation and function assignment [4825557]
-- Avoid unnecessary conversions [9d9403c]
-- JUnit: include more detailed information about panic [19cca4b]
-- Print help to stdout when the user asks for help [4cb7441]
-
-
-## 1.8.0
-
-### New Features
-- allow config of the vet flag for `go test` (#562) [3cd45fa]
-- Support projects using go modules [d56ee76]
-
-### Fixes and Minor Improvements
-- chore(godoc): fixes typos in Measurement funcs [dbaca8e]
-- Optimize focus to avoid allocations [f493786]
-- Ensure generated test file names are underscored [505cc35]
-
-## 1.7.0
-
-### New Features
-- Add JustAfterEach (#484) [0d4f080]
-
-### Fixes
-- Correctly round suite time in junit reporter [2445fc1]
-- Avoid using -i argument to go test for Golang 1.10+ [46bbc26]
-
-## 1.6.0
-
-### New Features
-- add --debug flag to emit node output to files (#499) [39febac]
-
-### Fixes
-- fix: for `go vet` to pass [69338ec]
-- docs: fix for contributing instructions [7004cb1]
-- consolidate and streamline contribution docs (#494) [d848015]
-- Make generated Junit file compatable with "Maven Surefire" (#488) [e51bee6]
-- all: gofmt [000d317]
-- Increase eventually timeout to 30s [c73579c]
-- Clarify asynchronous test behaviour [294d8f4]
-- Travis badge should only show master [26d2143]
-
-## 1.5.0 5/10/2018
-
-### New Features
-- Supports go v1.10 (#443, #446, #451) [e873237, 468e89e, e37dbfe, a37f4c0, c0b857d, bca5260, 4177ca8]
-- Add a When() synonym for Context() (#386) [747514b, 7484dad, 7354a07, dd826c8]
-- Re-add noisySkippings flag [652e15c]
-- Allow coverage to be displayed for focused specs (#367) [11459a8]
-- Handle -outputdir flag (#364) [228e3a8]
-- Handle -coverprofile flag (#355) [43392d5]
-
-### Fixes
-- When using custom reporters register the custom reporters *before* the default reporter. This allows users to see the output of any print statements in their customer reporters. (#365) [8382b23]
-- When running a test and calculating the coverage using the `-coverprofile` and `-outputdir` flags, Ginkgo fails with an error if the directory does not exist. This is due to an [issue in go 1.10](https://github.com/golang/go/issues/24588) (#446) [b36a6e0]
-- `unfocus` command ignores vendor folder (#459) [e5e551c, c556e43, a3b6351, 9a820dd]
-- Ignore packages whose tests are all ignored by go (#456) [7430ca7, 6d8be98]
-- Increase the threshold when checking time measuments (#455) [2f714bf, 68f622c]
-- Fix race condition in coverage tests (#423) [a5a8ff7, ab9c08b]
-- Add an extra new line after reporting spec run completion for test2json [874520d]
-- added name name field to junit reported testsuite [ae61c63]
-- Do not set the run time of a spec when the dryRun flag is used (#438) [457e2d9, ba8e856]
-- Process FWhen and FSpecify when unfocusing (#434) [9008c7b, ee65bd, df87dfe]
-- Synchronise the access to the state of specs to avoid race conditions (#430) [7d481bc, ae6829d]
-- Added Duration on GinkgoTestDescription (#383) [5f49dad, 528417e, 0747408, 329d7ed]
-- Fix Ginkgo stack trace on failure for Specify (#415) [b977ede, 65ca40e, 6c46eb8]
-- Update README with Go 1.6+, Golang -> Go (#409) [17f6b97, bc14b66, 20d1598]
-- Use fmt.Errorf instead of errors.New(fmt.Sprintf (#401) [a299f56, 44e2eaa]
-- Imports in generated code should follow conventions (#398) [0bec0b0, e8536d8]
-- Prevent data race error when Recording a benchmark value from multiple go routines (#390) [c0c4881, 7a241e9]
-- Replace GOPATH in Environment [4b883f0]
-
-
-## 1.4.0 7/16/2017
-
-- `ginkgo` now provides a hint if you accidentally forget to run `ginkgo bootstrap` to generate a `*_suite_test.go` file that actually invokes the Ginkgo test runner. [#345](https://github.com/onsi/ginkgo/pull/345)
-- thanks to improvements in `go test -c` `ginkgo` no longer needs to fix Go's compilation output to ensure compilation errors are expressed relative to the CWD. [#357]
-- `ginkgo watch -watchRegExp=...` allows you to specify a custom regular expression to watch. Only files matching the regular expression are watched for changes (the default is `\.go$`) [#356]
-- `ginkgo` now always emits compilation output. Previously, only failed compilation output was printed out. [#277]
-- `ginkgo -requireSuite` now fails the test run if there are `*_test.go` files but `go test` fails to detect any tests. Typically this means you forgot to run `ginkgo bootstrap` to generate a suite file. [#344]
-- `ginkgo -timeout=DURATION` allows you to adjust the timeout for the entire test suite (default is 24 hours) [#248]
-
-## 1.3.0 3/28/2017
-
-Improvements:
-
-- Significantly improved parallel test distribution. Now instead of pre-sharding test cases across workers (which can result in idle workers and poor test performance) Ginkgo uses a shared queue to keep all workers busy until all tests are complete. This improves test-time performance and consistency.
-- `Skip(message)` can be used to skip the current test.
-- Added `extensions/table` - a Ginkgo DSL for [Table Driven Tests](http://onsi.github.io/ginkgo/#table-driven-tests)
-- Add `GinkgoRandomSeed()` - shorthand for `config.GinkgoConfig.RandomSeed`
-- Support for retrying flaky tests with `--flakeAttempts`
-- `ginkgo ./...` now recurses as you'd expect
-- Added `Specify` a synonym for `It`
-- Support colorise on Windows
-- Broader support for various go compilation flags in the `ginkgo` CLI
-
-Bug Fixes:
-
-- Ginkgo tests now fail when you `panic(nil)` (#167)
-
-## 1.2.0 5/31/2015
-
-Improvements
-
-- `ginkgo -coverpkg` calls down to `go test -coverpkg` (#160)
-- `ginkgo -afterSuiteHook COMMAND` invokes the passed-in `COMMAND` after a test suite completes (#152)
-- Relaxed requirement for Go 1.4+. `ginkgo` now works with Go v1.3+ (#166)
-
-## 1.2.0-beta
-
-Ginkgo now requires Go 1.4+
-
-Improvements:
-
-- Call reporters in reverse order when announcing spec completion -- allows custom reporters to emit output before the default reporter does.
-- Improved focus behavior. Now, this:
-
- ```golang
- FDescribe("Some describe", func() {
- It("A", func() {})
-
- FIt("B", func() {})
- })
- ```
-
- will run `B` but *not* `A`. This tends to be a common usage pattern when in the thick of writing and debugging tests.
-- When `SIGINT` is received, Ginkgo will emit the contents of the `GinkgoWriter` before running the `AfterSuite`. Useful for debugging stuck tests.
-- When `--progress` is set, Ginkgo will write test progress (in particular, Ginkgo will say when it is about to run a BeforeEach, AfterEach, It, etc...) to the `GinkgoWriter`. This is useful for debugging stuck tests and tests that generate many logs.
-- Improved output when an error occurs in a setup or teardown block.
-- When `--dryRun` is set, Ginkgo will walk the spec tree and emit to its reporter *without* actually running anything. Best paired with `-v` to understand which specs will run in which order.
-- Add `By` to help document long `It`s. `By` simply writes to the `GinkgoWriter`.
-- Add support for precompiled tests:
- - `ginkgo build ` will now compile the package, producing a file named `package.test`
- - The compiled `package.test` file can be run directly. This runs the tests in series.
- - To run precompiled tests in parallel, you can run: `ginkgo -p package.test`
-- Support `bootstrap`ping and `generate`ing [Agouti](http://agouti.org) specs.
-- `ginkgo generate` and `ginkgo bootstrap` now honor the package name already defined in a given directory
-- The `ginkgo` CLI ignores `SIGQUIT`. Prevents its stack dump from interlacing with the underlying test suite's stack dump.
-- The `ginkgo` CLI now compiles tests into a temporary directory instead of the package directory. This necessitates upgrading to Go v1.4+.
-- `ginkgo -notify` now works on Linux
-
-Bug Fixes:
-
-- If --skipPackages is used and all packages are skipped, Ginkgo should exit 0.
-- Fix tempfile leak when running in parallel
-- Fix incorrect failure message when a panic occurs during a parallel test run
-- Fixed an issue where a pending test within a focused context (or a focused test within a pending context) would skip all other tests.
-- Be more consistent about handling SIGTERM as well as SIGINT
-- When interupted while concurrently compiling test suites in the background, Ginkgo now cleans up the compiled artifacts.
-- Fixed a long standing bug where `ginkgo -p` would hang if a process spawned by one of the Ginkgo parallel nodes does not exit. (Hooray!)
-
-## 1.1.0 (8/2/2014)
-
-No changes, just dropping the beta.
-
-## 1.1.0-beta (7/22/2014)
-New Features:
-
-- `ginkgo watch` now monitors packages *and their dependencies* for changes. The depth of the dependency tree can be modified with the `-depth` flag.
-- Test suites with a programmatic focus (`FIt`, `FDescribe`, etc...) exit with non-zero status code, even when they pass. This allows CI systems to detect accidental commits of focused test suites.
-- `ginkgo -p` runs the testsuite in parallel with an auto-detected number of nodes.
-- `ginkgo -tags=TAG_LIST` passes a list of tags down to the `go build` command.
-- `ginkgo --failFast` aborts the test suite after the first failure.
-- `ginkgo generate file_1 file_2` can take multiple file arguments.
-- Ginkgo now summarizes any spec failures that occured at the end of the test run.
-- `ginkgo --randomizeSuites` will run tests *suites* in random order using the generated/passed-in seed.
-
-Improvements:
-
-- `ginkgo -skipPackage` now takes a comma-separated list of strings. If the *relative path* to a package matches one of the entries in the comma-separated list, that package is skipped.
-- `ginkgo --untilItFails` no longer recompiles between attempts.
-- Ginkgo now panics when a runnable node (`It`, `BeforeEach`, `JustBeforeEach`, `AfterEach`, `Measure`) is nested within another runnable node. This is always a mistake. Any test suites that panic because of this change should be fixed.
-
-Bug Fixes:
-
-- `ginkgo boostrap` and `ginkgo generate` no longer fail when dealing with `hyphen-separated-packages`.
-- parallel specs are now better distributed across nodes - fixed a crashing bug where (for example) distributing 11 tests across 7 nodes would panic
-
-## 1.0.0 (5/24/2014)
-New Features:
-
-- Add `GinkgoParallelNode()` - shorthand for `config.GinkgoConfig.ParallelNode`
-
-Improvements:
-
-- When compilation fails, the compilation output is rewritten to present a correct *relative* path. Allows ⌘-clicking in iTerm open the file in your text editor.
-- `--untilItFails` and `ginkgo watch` now generate new random seeds between test runs, unless a particular random seed is specified.
-
-Bug Fixes:
-
-- `-cover` now generates a correctly combined coverprofile when running with in parallel with multiple `-node`s.
-- Print out the contents of the `GinkgoWriter` when `BeforeSuite` or `AfterSuite` fail.
-- Fix all remaining race conditions in Ginkgo's test suite.
-
-## 1.0.0-beta (4/14/2014)
-Breaking changes:
-
-- `thirdparty/gomocktestreporter` is gone. Use `GinkgoT()` instead
-- Modified the Reporter interface
-- `watch` is now a subcommand, not a flag.
-
-DSL changes:
-
-- `BeforeSuite` and `AfterSuite` for setting up and tearing down test suites.
-- `AfterSuite` is triggered on interrupt (`^C`) as well as exit.
-- `SynchronizedBeforeSuite` and `SynchronizedAfterSuite` for setting up and tearing down singleton resources across parallel nodes.
-
-CLI changes:
-
-- `watch` is now a subcommand, not a flag
-- `--nodot` flag can be passed to `ginkgo generate` and `ginkgo bootstrap` to avoid dot imports. This explicitly imports all exported identifiers in Ginkgo and Gomega. Refreshing this list can be done by running `ginkgo nodot`
-- Additional arguments can be passed to specs. Pass them after the `--` separator
-- `--skipPackage` flag takes a regexp and ignores any packages with package names passing said regexp.
-- `--trace` flag prints out full stack traces when errors occur, not just the line at which the error occurs.
-
-Misc:
-
-- Start using semantic versioning
-- Start maintaining changelog
-
-Major refactor:
-
-- Pull out Ginkgo's internal to `internal`
-- Rename `example` everywhere to `spec`
-- Much more!
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/CONTRIBUTING.md b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/CONTRIBUTING.md
deleted file mode 100644
index 908b95c2c12..00000000000
--- a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/CONTRIBUTING.md
+++ /dev/null
@@ -1,33 +0,0 @@
-# Contributing to Ginkgo
-
-Your contributions to Ginkgo are essential for its long-term maintenance and improvement.
-
-- Please **open an issue first** - describe what problem you are trying to solve and give the community a forum for input and feedback ahead of investing time in writing code!
-- Ensure adequate test coverage:
- - When adding to the Ginkgo library, add unit and/or integration tests (under the `integration` folder).
- - When adding to the Ginkgo CLI, note that there are very few unit tests. Please add an integration test.
-- Update the documentation. Ginko uses `godoc` comments and documentation on the `gh-pages` branch.
- If relevant, please submit a docs PR to that branch alongside your code PR.
-
-Thanks for supporting Ginkgo!
-
-## Setup
-
-Fork the repo, then:
-
-```
-go get github.com/onsi/ginkgo
-go get github.com/onsi/gomega/...
-cd $GOPATH/src/github.com/onsi/ginkgo
-git remote add fork git@github.com:/ginkgo.git
-
-ginkgo -r -p # ensure tests are green
-go vet ./... # ensure linter is happy
-```
-
-## Making the PR
- - go to a new branch `git checkout -b my-feature`
- - make your changes
- - run tests and linter again (see above)
- - `git push fork`
- - open PR 🎉
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/README.md b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/README.md
deleted file mode 100644
index cdf8d054a16..00000000000
--- a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/README.md
+++ /dev/null
@@ -1,121 +0,0 @@
-![Ginkgo: A Go BDD Testing Framework](http://onsi.github.io/ginkgo/images/ginkgo.png)
-
-[![Build Status](https://travis-ci.org/onsi/ginkgo.svg?branch=master)](https://travis-ci.org/onsi/ginkgo)
-
-Jump to the [docs](http://onsi.github.io/ginkgo/) to learn more. To start rolling your Ginkgo tests *now* [keep reading](#set-me-up)!
-
-If you have a question, comment, bug report, feature request, etc. please open a GitHub issue.
-
-## Feature List
-
-- Ginkgo uses Go's `testing` package and can live alongside your existing `testing` tests. It's easy to [bootstrap](http://onsi.github.io/ginkgo/#bootstrapping-a-suite) and start writing your [first tests](http://onsi.github.io/ginkgo/#adding-specs-to-a-suite)
-
-- Structure your BDD-style tests expressively:
- - Nestable [`Describe`, `Context` and `When` container blocks](http://onsi.github.io/ginkgo/#organizing-specs-with-containers-describe-and-context)
- - [`BeforeEach` and `AfterEach` blocks](http://onsi.github.io/ginkgo/#extracting-common-setup-beforeeach) for setup and teardown
- - [`It` and `Specify` blocks](http://onsi.github.io/ginkgo/#individual-specs-) that hold your assertions
- - [`JustBeforeEach` blocks](http://onsi.github.io/ginkgo/#separating-creation-and-configuration-justbeforeeach) that separate creation from configuration (also known as the subject action pattern).
- - [`BeforeSuite` and `AfterSuite` blocks](http://onsi.github.io/ginkgo/#global-setup-and-teardown-beforesuite-and-aftersuite) to prep for and cleanup after a suite.
-
-- A comprehensive test runner that lets you:
- - Mark specs as [pending](http://onsi.github.io/ginkgo/#pending-specs)
- - [Focus](http://onsi.github.io/ginkgo/#focused-specs) individual specs, and groups of specs, either programmatically or on the command line
- - Run your tests in [random order](http://onsi.github.io/ginkgo/#spec-permutation), and then reuse random seeds to replicate the same order.
- - Break up your test suite into parallel processes for straightforward [test parallelization](http://onsi.github.io/ginkgo/#parallel-specs)
-
-- `ginkgo`: a command line interface with plenty of handy command line arguments for [running your tests](http://onsi.github.io/ginkgo/#running-tests) and [generating](http://onsi.github.io/ginkgo/#generators) test files. Here are a few choice examples:
- - `ginkgo -nodes=N` runs your tests in `N` parallel processes and print out coherent output in realtime
- - `ginkgo -cover` runs your tests using Go's code coverage tool
- - `ginkgo convert` converts an XUnit-style `testing` package to a Ginkgo-style package
- - `ginkgo -focus="REGEXP"` and `ginkgo -skip="REGEXP"` allow you to specify a subset of tests to run via regular expression
- - `ginkgo -r` runs all tests suites under the current directory
- - `ginkgo -v` prints out identifying information for each tests just before it runs
-
- And much more: run `ginkgo help` for details!
-
- The `ginkgo` CLI is convenient, but purely optional -- Ginkgo works just fine with `go test`
-
-- `ginkgo watch` [watches](https://onsi.github.io/ginkgo/#watching-for-changes) packages *and their dependencies* for changes, then reruns tests. Run tests immediately as you develop!
-
-- Built-in support for testing [asynchronicity](http://onsi.github.io/ginkgo/#asynchronous-tests)
-
-- Built-in support for [benchmarking](http://onsi.github.io/ginkgo/#benchmark-tests) your code. Control the number of benchmark samples as you gather runtimes and other, arbitrary, bits of numerical information about your code.
-
-- [Completions for Sublime Text](https://github.com/onsi/ginkgo-sublime-completions): just use [Package Control](https://sublime.wbond.net/) to install `Ginkgo Completions`.
-
-- [Completions for VSCode](https://github.com/onsi/vscode-ginkgo): just use VSCode's extension installer to install `vscode-ginkgo`.
-
-- Straightforward support for third-party testing libraries such as [Gomock](https://code.google.com/p/gomock/) and [Testify](https://github.com/stretchr/testify). Check out the [docs](http://onsi.github.io/ginkgo/#third-party-integrations) for details.
-
-- A modular architecture that lets you easily:
- - Write [custom reporters](http://onsi.github.io/ginkgo/#writing-custom-reporters) (for example, Ginkgo comes with a [JUnit XML reporter](http://onsi.github.io/ginkgo/#generating-junit-xml-output) and a TeamCity reporter).
- - [Adapt an existing matcher library (or write your own!)](http://onsi.github.io/ginkgo/#using-other-matcher-libraries) to work with Ginkgo
-
-## [Gomega](http://github.com/onsi/gomega): Ginkgo's Preferred Matcher Library
-
-Ginkgo is best paired with Gomega. Learn more about Gomega [here](http://onsi.github.io/gomega/)
-
-## [Agouti](http://github.com/sclevine/agouti): A Go Acceptance Testing Framework
-
-Agouti allows you run WebDriver integration tests. Learn more about Agouti [here](http://agouti.org)
-
-## Set Me Up!
-
-You'll need the Go command-line tools. Ginkgo is tested with Go 1.6+, but preferably you should get the latest. Follow the [installation instructions](https://golang.org/doc/install) if you don't have it installed.
-
-```bash
-
-go get -u github.com/onsi/ginkgo/ginkgo # installs the ginkgo CLI
-go get -u github.com/onsi/gomega/... # fetches the matcher library
-
-cd path/to/package/you/want/to/test
-
-ginkgo bootstrap # set up a new ginkgo suite
-ginkgo generate # will create a sample test file. edit this file and add your tests then...
-
-go test # to run your tests
-
-ginkgo # also runs your tests
-
-```
-
-## I'm new to Go: What are my testing options?
-
-Of course, I heartily recommend [Ginkgo](https://github.com/onsi/ginkgo) and [Gomega](https://github.com/onsi/gomega). Both packages are seeing heavy, daily, production use on a number of projects and boast a mature and comprehensive feature-set.
-
-With that said, it's great to know what your options are :)
-
-### What Go gives you out of the box
-
-Testing is a first class citizen in Go, however Go's built-in testing primitives are somewhat limited: The [testing](http://golang.org/pkg/testing) package provides basic XUnit style tests and no assertion library.
-
-### Matcher libraries for Go's XUnit style tests
-
-A number of matcher libraries have been written to augment Go's built-in XUnit style tests. Here are two that have gained traction:
-
-- [testify](https://github.com/stretchr/testify)
-- [gocheck](http://labix.org/gocheck)
-
-You can also use Ginkgo's matcher library [Gomega](https://github.com/onsi/gomega) in [XUnit style tests](http://onsi.github.io/gomega/#using-gomega-with-golangs-xunitstyle-tests)
-
-### BDD style testing frameworks
-
-There are a handful of BDD-style testing frameworks written for Go. Here are a few:
-
-- [Ginkgo](https://github.com/onsi/ginkgo) ;)
-- [GoConvey](https://github.com/smartystreets/goconvey)
-- [Goblin](https://github.com/franela/goblin)
-- [Mao](https://github.com/azer/mao)
-- [Zen](https://github.com/pranavraja/zen)
-
-Finally, @shageman has [put together](https://github.com/shageman/gotestit) a comprehensive comparison of Go testing libraries.
-
-Go explore!
-
-## License
-
-Ginkgo is MIT-Licensed
-
-## Contributing
-
-See [CONTRIBUTING.md](CONTRIBUTING.md)
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/RELEASING.md b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/RELEASING.md
deleted file mode 100644
index 1e298c2da71..00000000000
--- a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/RELEASING.md
+++ /dev/null
@@ -1,14 +0,0 @@
-A Ginkgo release is a tagged git sha and a GitHub release. To cut a release:
-
-1. Ensure CHANGELOG.md is up to date.
- - Use `git log --pretty=format:'- %s [%h]' HEAD...vX.X.X` to list all the commits since the last release
- - Categorize the changes into
- - Breaking Changes (requires a major version)
- - New Features (minor version)
- - Fixes (fix version)
- - Maintenance (which in general should not be mentioned in `CHANGELOG.md` as they have no user impact)
-1. Update `VERSION` in `config/config.go`
-1. Create a commit with the version number as the commit message (e.g. `v1.3.0`)
-1. Tag the commit with the version number as the tag name (e.g. `v1.3.0`)
-1. Push the commit and tag to GitHub
-1. Create a new [GitHub release](https://help.github.com/articles/creating-releases/) with the version number as the tag (e.g. `v1.3.0`). List the key changes in the release notes.
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/config/config.go b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/config/config.go
deleted file mode 100644
index 14c82ec3a71..00000000000
--- a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/config/config.go
+++ /dev/null
@@ -1,213 +0,0 @@
-/*
-Ginkgo accepts a number of configuration options.
-
-These are documented [here](http://onsi.github.io/ginkgo/#the_ginkgo_cli)
-
-You can also learn more via
-
- ginkgo help
-
-or (I kid you not):
-
- go test -asdf
-*/
-package config
-
-import (
- "flag"
- "time"
-
- "fmt"
-)
-
-const VERSION = "1.11.0"
-
-type GinkgoConfigType struct {
- RandomSeed int64
- RandomizeAllSpecs bool
- RegexScansFilePath bool
- FocusString string
- SkipString string
- SkipMeasurements bool
- FailOnPending bool
- FailFast bool
- FlakeAttempts int
- EmitSpecProgress bool
- DryRun bool
- DebugParallel bool
-
- ParallelNode int
- ParallelTotal int
- SyncHost string
- StreamHost string
-}
-
-var GinkgoConfig = GinkgoConfigType{}
-
-type DefaultReporterConfigType struct {
- NoColor bool
- SlowSpecThreshold float64
- NoisyPendings bool
- NoisySkippings bool
- Succinct bool
- Verbose bool
- FullTrace bool
- ReportPassed bool
- ReportFile string
-}
-
-var DefaultReporterConfig = DefaultReporterConfigType{}
-
-func processPrefix(prefix string) string {
- if prefix != "" {
- prefix += "."
- }
- return prefix
-}
-
-func Flags(flagSet *flag.FlagSet, prefix string, includeParallelFlags bool) {
- prefix = processPrefix(prefix)
- flagSet.Int64Var(&(GinkgoConfig.RandomSeed), prefix+"seed", time.Now().Unix(), "The seed used to randomize the spec suite.")
- flagSet.BoolVar(&(GinkgoConfig.RandomizeAllSpecs), prefix+"randomizeAllSpecs", false, "If set, ginkgo will randomize all specs together. By default, ginkgo only randomizes the top level Describe, Context and When groups.")
- flagSet.BoolVar(&(GinkgoConfig.SkipMeasurements), prefix+"skipMeasurements", false, "If set, ginkgo will skip any measurement specs.")
- flagSet.BoolVar(&(GinkgoConfig.FailOnPending), prefix+"failOnPending", false, "If set, ginkgo will mark the test suite as failed if any specs are pending.")
- flagSet.BoolVar(&(GinkgoConfig.FailFast), prefix+"failFast", false, "If set, ginkgo will stop running a test suite after a failure occurs.")
-
- flagSet.BoolVar(&(GinkgoConfig.DryRun), prefix+"dryRun", false, "If set, ginkgo will walk the test hierarchy without actually running anything. Best paired with -v.")
-
- flagSet.StringVar(&(GinkgoConfig.FocusString), prefix+"focus", "", "If set, ginkgo will only run specs that match this regular expression.")
- flagSet.StringVar(&(GinkgoConfig.SkipString), prefix+"skip", "", "If set, ginkgo will only run specs that do not match this regular expression.")
-
- flagSet.BoolVar(&(GinkgoConfig.RegexScansFilePath), prefix+"regexScansFilePath", false, "If set, ginkgo regex matching also will look at the file path (code location).")
-
- flagSet.IntVar(&(GinkgoConfig.FlakeAttempts), prefix+"flakeAttempts", 1, "Make up to this many attempts to run each spec. Please note that if any of the attempts succeed, the suite will not be failed. But any failures will still be recorded.")
-
- flagSet.BoolVar(&(GinkgoConfig.EmitSpecProgress), prefix+"progress", false, "If set, ginkgo will emit progress information as each spec runs to the GinkgoWriter.")
-
- flagSet.BoolVar(&(GinkgoConfig.DebugParallel), prefix+"debug", false, "If set, ginkgo will emit node output to files when running in parallel.")
-
- if includeParallelFlags {
- flagSet.IntVar(&(GinkgoConfig.ParallelNode), prefix+"parallel.node", 1, "This worker node's (one-indexed) node number. For running specs in parallel.")
- flagSet.IntVar(&(GinkgoConfig.ParallelTotal), prefix+"parallel.total", 1, "The total number of worker nodes. For running specs in parallel.")
- flagSet.StringVar(&(GinkgoConfig.SyncHost), prefix+"parallel.synchost", "", "The address for the server that will synchronize the running nodes.")
- flagSet.StringVar(&(GinkgoConfig.StreamHost), prefix+"parallel.streamhost", "", "The address for the server that the running nodes should stream data to.")
- }
-
- flagSet.BoolVar(&(DefaultReporterConfig.NoColor), prefix+"noColor", false, "If set, suppress color output in default reporter.")
- flagSet.Float64Var(&(DefaultReporterConfig.SlowSpecThreshold), prefix+"slowSpecThreshold", 5.0, "(in seconds) Specs that take longer to run than this threshold are flagged as slow by the default reporter.")
- flagSet.BoolVar(&(DefaultReporterConfig.NoisyPendings), prefix+"noisyPendings", true, "If set, default reporter will shout about pending tests.")
- flagSet.BoolVar(&(DefaultReporterConfig.NoisySkippings), prefix+"noisySkippings", true, "If set, default reporter will shout about skipping tests.")
- flagSet.BoolVar(&(DefaultReporterConfig.Verbose), prefix+"v", false, "If set, default reporter print out all specs as they begin.")
- flagSet.BoolVar(&(DefaultReporterConfig.Succinct), prefix+"succinct", false, "If set, default reporter prints out a very succinct report")
- flagSet.BoolVar(&(DefaultReporterConfig.FullTrace), prefix+"trace", false, "If set, default reporter prints out the full stack trace when a failure occurs")
- flagSet.BoolVar(&(DefaultReporterConfig.ReportPassed), prefix+"reportPassed", false, "If set, default reporter prints out captured output of passed tests.")
- flagSet.StringVar(&(DefaultReporterConfig.ReportFile), prefix+"reportFile", "", "Override the default reporter output file path.")
-
-}
-
-func BuildFlagArgs(prefix string, ginkgo GinkgoConfigType, reporter DefaultReporterConfigType) []string {
- prefix = processPrefix(prefix)
- result := make([]string, 0)
-
- if ginkgo.RandomSeed > 0 {
- result = append(result, fmt.Sprintf("--%sseed=%d", prefix, ginkgo.RandomSeed))
- }
-
- if ginkgo.RandomizeAllSpecs {
- result = append(result, fmt.Sprintf("--%srandomizeAllSpecs", prefix))
- }
-
- if ginkgo.SkipMeasurements {
- result = append(result, fmt.Sprintf("--%sskipMeasurements", prefix))
- }
-
- if ginkgo.FailOnPending {
- result = append(result, fmt.Sprintf("--%sfailOnPending", prefix))
- }
-
- if ginkgo.FailFast {
- result = append(result, fmt.Sprintf("--%sfailFast", prefix))
- }
-
- if ginkgo.DryRun {
- result = append(result, fmt.Sprintf("--%sdryRun", prefix))
- }
-
- if ginkgo.FocusString != "" {
- result = append(result, fmt.Sprintf("--%sfocus=%s", prefix, ginkgo.FocusString))
- }
-
- if ginkgo.SkipString != "" {
- result = append(result, fmt.Sprintf("--%sskip=%s", prefix, ginkgo.SkipString))
- }
-
- if ginkgo.FlakeAttempts > 1 {
- result = append(result, fmt.Sprintf("--%sflakeAttempts=%d", prefix, ginkgo.FlakeAttempts))
- }
-
- if ginkgo.EmitSpecProgress {
- result = append(result, fmt.Sprintf("--%sprogress", prefix))
- }
-
- if ginkgo.DebugParallel {
- result = append(result, fmt.Sprintf("--%sdebug", prefix))
- }
-
- if ginkgo.ParallelNode != 0 {
- result = append(result, fmt.Sprintf("--%sparallel.node=%d", prefix, ginkgo.ParallelNode))
- }
-
- if ginkgo.ParallelTotal != 0 {
- result = append(result, fmt.Sprintf("--%sparallel.total=%d", prefix, ginkgo.ParallelTotal))
- }
-
- if ginkgo.StreamHost != "" {
- result = append(result, fmt.Sprintf("--%sparallel.streamhost=%s", prefix, ginkgo.StreamHost))
- }
-
- if ginkgo.SyncHost != "" {
- result = append(result, fmt.Sprintf("--%sparallel.synchost=%s", prefix, ginkgo.SyncHost))
- }
-
- if ginkgo.RegexScansFilePath {
- result = append(result, fmt.Sprintf("--%sregexScansFilePath", prefix))
- }
-
- if reporter.NoColor {
- result = append(result, fmt.Sprintf("--%snoColor", prefix))
- }
-
- if reporter.SlowSpecThreshold > 0 {
- result = append(result, fmt.Sprintf("--%sslowSpecThreshold=%.5f", prefix, reporter.SlowSpecThreshold))
- }
-
- if !reporter.NoisyPendings {
- result = append(result, fmt.Sprintf("--%snoisyPendings=false", prefix))
- }
-
- if !reporter.NoisySkippings {
- result = append(result, fmt.Sprintf("--%snoisySkippings=false", prefix))
- }
-
- if reporter.Verbose {
- result = append(result, fmt.Sprintf("--%sv", prefix))
- }
-
- if reporter.Succinct {
- result = append(result, fmt.Sprintf("--%ssuccinct", prefix))
- }
-
- if reporter.FullTrace {
- result = append(result, fmt.Sprintf("--%strace", prefix))
- }
-
- if reporter.ReportPassed {
- result = append(result, fmt.Sprintf("--%sreportPassed", prefix))
- }
-
- if reporter.ReportFile != "" {
- result = append(result, fmt.Sprintf("--%sreportFile=%s", prefix, reporter.ReportFile))
- }
-
- return result
-}
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/ginkgo_dsl.go b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/ginkgo_dsl.go
deleted file mode 100644
index 3cbf89a35c7..00000000000
--- a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/ginkgo_dsl.go
+++ /dev/null
@@ -1,624 +0,0 @@
-/*
-Ginkgo is a BDD-style testing framework for Golang
-
-The godoc documentation describes Ginkgo's API. More comprehensive documentation (with examples!) is available at http://onsi.github.io/ginkgo/
-
-Ginkgo's preferred matcher library is [Gomega](http://github.com/onsi/gomega)
-
-Ginkgo on Github: http://github.com/onsi/ginkgo
-
-Ginkgo is MIT-Licensed
-*/
-package ginkgo
-
-import (
- "flag"
- "fmt"
- "io"
- "net/http"
- "os"
- "strings"
- "time"
-
- "github.com/onsi/ginkgo/config"
- "github.com/onsi/ginkgo/internal/codelocation"
- "github.com/onsi/ginkgo/internal/failer"
- "github.com/onsi/ginkgo/internal/remote"
- "github.com/onsi/ginkgo/internal/suite"
- "github.com/onsi/ginkgo/internal/testingtproxy"
- "github.com/onsi/ginkgo/internal/writer"
- "github.com/onsi/ginkgo/reporters"
- "github.com/onsi/ginkgo/reporters/stenographer"
- colorable "github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable"
- "github.com/onsi/ginkgo/types"
-)
-
-const GINKGO_VERSION = config.VERSION
-const GINKGO_PANIC = `
-Your test failed.
-Ginkgo panics to prevent subsequent assertions from running.
-Normally Ginkgo rescues this panic so you shouldn't see it.
-
-But, if you make an assertion in a goroutine, Ginkgo can't capture the panic.
-To circumvent this, you should call
-
- defer GinkgoRecover()
-
-at the top of the goroutine that caused this panic.
-`
-const defaultTimeout = 1
-
-var globalSuite *suite.Suite
-var globalFailer *failer.Failer
-
-func init() {
- config.Flags(flag.CommandLine, "ginkgo", true)
- GinkgoWriter = writer.New(os.Stdout)
- globalFailer = failer.New()
- globalSuite = suite.New(globalFailer)
-}
-
-//GinkgoWriter implements an io.Writer
-//When running in verbose mode any writes to GinkgoWriter will be immediately printed
-//to stdout. Otherwise, GinkgoWriter will buffer any writes produced during the current test and flush them to screen
-//only if the current test fails.
-var GinkgoWriter io.Writer
-
-//The interface by which Ginkgo receives *testing.T
-type GinkgoTestingT interface {
- Fail()
-}
-
-//GinkgoRandomSeed returns the seed used to randomize spec execution order. It is
-//useful for seeding your own pseudorandom number generators (PRNGs) to ensure
-//consistent executions from run to run, where your tests contain variability (for
-//example, when selecting random test data).
-func GinkgoRandomSeed() int64 {
- return config.GinkgoConfig.RandomSeed
-}
-
-//GinkgoParallelNode returns the parallel node number for the current ginkgo process
-//The node number is 1-indexed
-func GinkgoParallelNode() int {
- return config.GinkgoConfig.ParallelNode
-}
-
-//Some matcher libraries or legacy codebases require a *testing.T
-//GinkgoT implements an interface analogous to *testing.T and can be used if
-//the library in question accepts *testing.T through an interface
-//
-// For example, with testify:
-// assert.Equal(GinkgoT(), 123, 123, "they should be equal")
-//
-// Or with gomock:
-// gomock.NewController(GinkgoT())
-//
-// GinkgoT() takes an optional offset argument that can be used to get the
-// correct line number associated with the failure.
-func GinkgoT(optionalOffset ...int) GinkgoTInterface {
- offset := 3
- if len(optionalOffset) > 0 {
- offset = optionalOffset[0]
- }
- return testingtproxy.New(GinkgoWriter, Fail, offset)
-}
-
-//The interface returned by GinkgoT(). This covers most of the methods
-//in the testing package's T.
-type GinkgoTInterface interface {
- Fail()
- Error(args ...interface{})
- Errorf(format string, args ...interface{})
- FailNow()
- Fatal(args ...interface{})
- Fatalf(format string, args ...interface{})
- Log(args ...interface{})
- Logf(format string, args ...interface{})
- Failed() bool
- Parallel()
- Skip(args ...interface{})
- Skipf(format string, args ...interface{})
- SkipNow()
- Skipped() bool
-}
-
-//Custom Ginkgo test reporters must implement the Reporter interface.
-//
-//The custom reporter is passed in a SuiteSummary when the suite begins and ends,
-//and a SpecSummary just before a spec begins and just after a spec ends
-type Reporter reporters.Reporter
-
-//Asynchronous specs are given a channel of the Done type. You must close or write to the channel
-//to tell Ginkgo that your async test is done.
-type Done chan<- interface{}
-
-//GinkgoTestDescription represents the information about the current running test returned by CurrentGinkgoTestDescription
-// FullTestText: a concatenation of ComponentTexts and the TestText
-// ComponentTexts: a list of all texts for the Describes & Contexts leading up to the current test
-// TestText: the text in the actual It or Measure node
-// IsMeasurement: true if the current test is a measurement
-// FileName: the name of the file containing the current test
-// LineNumber: the line number for the current test
-// Failed: if the current test has failed, this will be true (useful in an AfterEach)
-type GinkgoTestDescription struct {
- FullTestText string
- ComponentTexts []string
- TestText string
-
- IsMeasurement bool
-
- FileName string
- LineNumber int
-
- Failed bool
- Duration time.Duration
-}
-
-//CurrentGinkgoTestDescripton returns information about the current running test.
-func CurrentGinkgoTestDescription() GinkgoTestDescription {
- summary, ok := globalSuite.CurrentRunningSpecSummary()
- if !ok {
- return GinkgoTestDescription{}
- }
-
- subjectCodeLocation := summary.ComponentCodeLocations[len(summary.ComponentCodeLocations)-1]
-
- return GinkgoTestDescription{
- ComponentTexts: summary.ComponentTexts[1:],
- FullTestText: strings.Join(summary.ComponentTexts[1:], " "),
- TestText: summary.ComponentTexts[len(summary.ComponentTexts)-1],
- IsMeasurement: summary.IsMeasurement,
- FileName: subjectCodeLocation.FileName,
- LineNumber: subjectCodeLocation.LineNumber,
- Failed: summary.HasFailureState(),
- Duration: summary.RunTime,
- }
-}
-
-//Measurement tests receive a Benchmarker.
-//
-//You use the Time() function to time how long the passed in body function takes to run
-//You use the RecordValue() function to track arbitrary numerical measurements.
-//The RecordValueWithPrecision() function can be used alternatively to provide the unit
-//and resolution of the numeric measurement.
-//The optional info argument is passed to the test reporter and can be used to
-// provide the measurement data to a custom reporter with context.
-//
-//See http://onsi.github.io/ginkgo/#benchmark_tests for more details
-type Benchmarker interface {
- Time(name string, body func(), info ...interface{}) (elapsedTime time.Duration)
- RecordValue(name string, value float64, info ...interface{})
- RecordValueWithPrecision(name string, value float64, units string, precision int, info ...interface{})
-}
-
-//RunSpecs is the entry point for the Ginkgo test runner.
-//You must call this within a Golang testing TestX(t *testing.T) function.
-//
-//To bootstrap a test suite you can use the Ginkgo CLI:
-//
-// ginkgo bootstrap
-func RunSpecs(t GinkgoTestingT, description string) bool {
- specReporters := []Reporter{buildDefaultReporter()}
- if config.DefaultReporterConfig.ReportFile != "" {
- reportFile := config.DefaultReporterConfig.ReportFile
- specReporters[0] = reporters.NewJUnitReporter(reportFile)
- return RunSpecsWithDefaultAndCustomReporters(t, description, specReporters)
- }
- return RunSpecsWithCustomReporters(t, description, specReporters)
-}
-
-//To run your tests with Ginkgo's default reporter and your custom reporter(s), replace
-//RunSpecs() with this method.
-func RunSpecsWithDefaultAndCustomReporters(t GinkgoTestingT, description string, specReporters []Reporter) bool {
- specReporters = append(specReporters, buildDefaultReporter())
- return RunSpecsWithCustomReporters(t, description, specReporters)
-}
-
-//To run your tests with your custom reporter(s) (and *not* Ginkgo's default reporter), replace
-//RunSpecs() with this method. Note that parallel tests will not work correctly without the default reporter
-func RunSpecsWithCustomReporters(t GinkgoTestingT, description string, specReporters []Reporter) bool {
- writer := GinkgoWriter.(*writer.Writer)
- writer.SetStream(config.DefaultReporterConfig.Verbose)
- reporters := make([]reporters.Reporter, len(specReporters))
- for i, reporter := range specReporters {
- reporters[i] = reporter
- }
- passed, hasFocusedTests := globalSuite.Run(t, description, reporters, writer, config.GinkgoConfig)
- if passed && hasFocusedTests && strings.TrimSpace(os.Getenv("GINKGO_EDITOR_INTEGRATION")) == "" {
- fmt.Println("PASS | FOCUSED")
- os.Exit(types.GINKGO_FOCUS_EXIT_CODE)
- }
- return passed
-}
-
-func buildDefaultReporter() Reporter {
- remoteReportingServer := config.GinkgoConfig.StreamHost
- if remoteReportingServer == "" {
- stenographer := stenographer.New(!config.DefaultReporterConfig.NoColor, config.GinkgoConfig.FlakeAttempts > 1, colorable.NewColorableStdout())
- return reporters.NewDefaultReporter(config.DefaultReporterConfig, stenographer)
- } else {
- debugFile := ""
- if config.GinkgoConfig.DebugParallel {
- debugFile = fmt.Sprintf("ginkgo-node-%d.log", config.GinkgoConfig.ParallelNode)
- }
- return remote.NewForwardingReporter(config.DefaultReporterConfig, remoteReportingServer, &http.Client{}, remote.NewOutputInterceptor(), GinkgoWriter.(*writer.Writer), debugFile)
- }
-}
-
-//Skip notifies Ginkgo that the current spec was skipped.
-func Skip(message string, callerSkip ...int) {
- skip := 0
- if len(callerSkip) > 0 {
- skip = callerSkip[0]
- }
-
- globalFailer.Skip(message, codelocation.New(skip+1))
- panic(GINKGO_PANIC)
-}
-
-//Fail notifies Ginkgo that the current spec has failed. (Gomega will call Fail for you automatically when an assertion fails.)
-func Fail(message string, callerSkip ...int) {
- skip := 0
- if len(callerSkip) > 0 {
- skip = callerSkip[0]
- }
-
- globalFailer.Fail(message, codelocation.New(skip+1))
- panic(GINKGO_PANIC)
-}
-
-//GinkgoRecover should be deferred at the top of any spawned goroutine that (may) call `Fail`
-//Since Gomega assertions call fail, you should throw a `defer GinkgoRecover()` at the top of any goroutine that
-//calls out to Gomega
-//
-//Here's why: Ginkgo's `Fail` method records the failure and then panics to prevent
-//further assertions from running. This panic must be recovered. Ginkgo does this for you
-//if the panic originates in a Ginkgo node (an It, BeforeEach, etc...)
-//
-//Unfortunately, if a panic originates on a goroutine *launched* from one of these nodes there's no
-//way for Ginkgo to rescue the panic. To do this, you must remember to `defer GinkgoRecover()` at the top of such a goroutine.
-func GinkgoRecover() {
- e := recover()
- if e != nil {
- globalFailer.Panic(codelocation.New(1), e)
- }
-}
-
-//Describe blocks allow you to organize your specs. A Describe block can contain any number of
-//BeforeEach, AfterEach, JustBeforeEach, It, and Measurement blocks.
-//
-//In addition you can nest Describe, Context and When blocks. Describe, Context and When blocks are functionally
-//equivalent. The difference is purely semantic -- you typically Describe the behavior of an object
-//or method and, within that Describe, outline a number of Contexts and Whens.
-func Describe(text string, body func()) bool {
- globalSuite.PushContainerNode(text, body, types.FlagTypeNone, codelocation.New(1))
- return true
-}
-
-//You can focus the tests within a describe block using FDescribe
-func FDescribe(text string, body func()) bool {
- globalSuite.PushContainerNode(text, body, types.FlagTypeFocused, codelocation.New(1))
- return true
-}
-
-//You can mark the tests within a describe block as pending using PDescribe
-func PDescribe(text string, body func()) bool {
- globalSuite.PushContainerNode(text, body, types.FlagTypePending, codelocation.New(1))
- return true
-}
-
-//You can mark the tests within a describe block as pending using XDescribe
-func XDescribe(text string, body func()) bool {
- globalSuite.PushContainerNode(text, body, types.FlagTypePending, codelocation.New(1))
- return true
-}
-
-//Context blocks allow you to organize your specs. A Context block can contain any number of
-//BeforeEach, AfterEach, JustBeforeEach, It, and Measurement blocks.
-//
-//In addition you can nest Describe, Context and When blocks. Describe, Context and When blocks are functionally
-//equivalent. The difference is purely semantic -- you typical Describe the behavior of an object
-//or method and, within that Describe, outline a number of Contexts and Whens.
-func Context(text string, body func()) bool {
- globalSuite.PushContainerNode(text, body, types.FlagTypeNone, codelocation.New(1))
- return true
-}
-
-//You can focus the tests within a describe block using FContext
-func FContext(text string, body func()) bool {
- globalSuite.PushContainerNode(text, body, types.FlagTypeFocused, codelocation.New(1))
- return true
-}
-
-//You can mark the tests within a describe block as pending using PContext
-func PContext(text string, body func()) bool {
- globalSuite.PushContainerNode(text, body, types.FlagTypePending, codelocation.New(1))
- return true
-}
-
-//You can mark the tests within a describe block as pending using XContext
-func XContext(text string, body func()) bool {
- globalSuite.PushContainerNode(text, body, types.FlagTypePending, codelocation.New(1))
- return true
-}
-
-//When blocks allow you to organize your specs. A When block can contain any number of
-//BeforeEach, AfterEach, JustBeforeEach, It, and Measurement blocks.
-//
-//In addition you can nest Describe, Context and When blocks. Describe, Context and When blocks are functionally
-//equivalent. The difference is purely semantic -- you typical Describe the behavior of an object
-//or method and, within that Describe, outline a number of Contexts and Whens.
-func When(text string, body func()) bool {
- globalSuite.PushContainerNode("when "+text, body, types.FlagTypeNone, codelocation.New(1))
- return true
-}
-
-//You can focus the tests within a describe block using FWhen
-func FWhen(text string, body func()) bool {
- globalSuite.PushContainerNode("when "+text, body, types.FlagTypeFocused, codelocation.New(1))
- return true
-}
-
-//You can mark the tests within a describe block as pending using PWhen
-func PWhen(text string, body func()) bool {
- globalSuite.PushContainerNode("when "+text, body, types.FlagTypePending, codelocation.New(1))
- return true
-}
-
-//You can mark the tests within a describe block as pending using XWhen
-func XWhen(text string, body func()) bool {
- globalSuite.PushContainerNode("when "+text, body, types.FlagTypePending, codelocation.New(1))
- return true
-}
-
-//It blocks contain your test code and assertions. You cannot nest any other Ginkgo blocks
-//within an It block.
-//
-//Ginkgo will normally run It blocks synchronously. To perform asynchronous tests, pass a
-//function that accepts a Done channel. When you do this, you can also provide an optional timeout.
-func It(text string, body interface{}, timeout ...float64) bool {
- globalSuite.PushItNode(text, body, types.FlagTypeNone, codelocation.New(1), parseTimeout(timeout...))
- return true
-}
-
-//You can focus individual Its using FIt
-func FIt(text string, body interface{}, timeout ...float64) bool {
- globalSuite.PushItNode(text, body, types.FlagTypeFocused, codelocation.New(1), parseTimeout(timeout...))
- return true
-}
-
-//You can mark Its as pending using PIt
-func PIt(text string, _ ...interface{}) bool {
- globalSuite.PushItNode(text, func() {}, types.FlagTypePending, codelocation.New(1), 0)
- return true
-}
-
-//You can mark Its as pending using XIt
-func XIt(text string, _ ...interface{}) bool {
- globalSuite.PushItNode(text, func() {}, types.FlagTypePending, codelocation.New(1), 0)
- return true
-}
-
-//Specify blocks are aliases for It blocks and allow for more natural wording in situations
-//which "It" does not fit into a natural sentence flow. All the same protocols apply for Specify blocks
-//which apply to It blocks.
-func Specify(text string, body interface{}, timeout ...float64) bool {
- globalSuite.PushItNode(text, body, types.FlagTypeNone, codelocation.New(1), parseTimeout(timeout...))
- return true
-}
-
-//You can focus individual Specifys using FSpecify
-func FSpecify(text string, body interface{}, timeout ...float64) bool {
- globalSuite.PushItNode(text, body, types.FlagTypeFocused, codelocation.New(1), parseTimeout(timeout...))
- return true
-}
-
-//You can mark Specifys as pending using PSpecify
-func PSpecify(text string, is ...interface{}) bool {
- globalSuite.PushItNode(text, func() {}, types.FlagTypePending, codelocation.New(1), 0)
- return true
-}
-
-//You can mark Specifys as pending using XSpecify
-func XSpecify(text string, is ...interface{}) bool {
- globalSuite.PushItNode(text, func() {}, types.FlagTypePending, codelocation.New(1), 0)
- return true
-}
-
-//By allows you to better document large Its.
-//
-//Generally you should try to keep your Its short and to the point. This is not always possible, however,
-//especially in the context of integration tests that capture a particular workflow.
-//
-//By allows you to document such flows. By must be called within a runnable node (It, BeforeEach, Measure, etc...)
-//By will simply log the passed in text to the GinkgoWriter. If By is handed a function it will immediately run the function.
-func By(text string, callbacks ...func()) {
- preamble := "\x1b[1mSTEP\x1b[0m"
- if config.DefaultReporterConfig.NoColor {
- preamble = "STEP"
- }
- fmt.Fprintln(GinkgoWriter, preamble+": "+text)
- if len(callbacks) == 1 {
- callbacks[0]()
- }
- if len(callbacks) > 1 {
- panic("just one callback per By, please")
- }
-}
-
-//Measure blocks run the passed in body function repeatedly (determined by the samples argument)
-//and accumulate metrics provided to the Benchmarker by the body function.
-//
-//The body function must have the signature:
-// func(b Benchmarker)
-func Measure(text string, body interface{}, samples int) bool {
- globalSuite.PushMeasureNode(text, body, types.FlagTypeNone, codelocation.New(1), samples)
- return true
-}
-
-//You can focus individual Measures using FMeasure
-func FMeasure(text string, body interface{}, samples int) bool {
- globalSuite.PushMeasureNode(text, body, types.FlagTypeFocused, codelocation.New(1), samples)
- return true
-}
-
-//You can mark Measurements as pending using PMeasure
-func PMeasure(text string, _ ...interface{}) bool {
- globalSuite.PushMeasureNode(text, func(b Benchmarker) {}, types.FlagTypePending, codelocation.New(1), 0)
- return true
-}
-
-//You can mark Measurements as pending using XMeasure
-func XMeasure(text string, _ ...interface{}) bool {
- globalSuite.PushMeasureNode(text, func(b Benchmarker) {}, types.FlagTypePending, codelocation.New(1), 0)
- return true
-}
-
-//BeforeSuite blocks are run just once before any specs are run. When running in parallel, each
-//parallel node process will call BeforeSuite.
-//
-//BeforeSuite blocks can be made asynchronous by providing a body function that accepts a Done channel
-//
-//You may only register *one* BeforeSuite handler per test suite. You typically do so in your bootstrap file at the top level.
-func BeforeSuite(body interface{}, timeout ...float64) bool {
- globalSuite.SetBeforeSuiteNode(body, codelocation.New(1), parseTimeout(timeout...))
- return true
-}
-
-//AfterSuite blocks are *always* run after all the specs regardless of whether specs have passed or failed.
-//Moreover, if Ginkgo receives an interrupt signal (^C) it will attempt to run the AfterSuite before exiting.
-//
-//When running in parallel, each parallel node process will call AfterSuite.
-//
-//AfterSuite blocks can be made asynchronous by providing a body function that accepts a Done channel
-//
-//You may only register *one* AfterSuite handler per test suite. You typically do so in your bootstrap file at the top level.
-func AfterSuite(body interface{}, timeout ...float64) bool {
- globalSuite.SetAfterSuiteNode(body, codelocation.New(1), parseTimeout(timeout...))
- return true
-}
-
-//SynchronizedBeforeSuite blocks are primarily meant to solve the problem of setting up singleton external resources shared across
-//nodes when running tests in parallel. For example, say you have a shared database that you can only start one instance of that
-//must be used in your tests. When running in parallel, only one node should set up the database and all other nodes should wait
-//until that node is done before running.
-//
-//SynchronizedBeforeSuite accomplishes this by taking *two* function arguments. The first is only run on parallel node #1. The second is
-//run on all nodes, but *only* after the first function completes successfully. Ginkgo also makes it possible to send data from the first function (on Node 1)
-//to the second function (on all the other nodes).
-//
-//The functions have the following signatures. The first function (which only runs on node 1) has the signature:
-//
-// func() []byte
-//
-//or, to run asynchronously:
-//
-// func(done Done) []byte
-//
-//The byte array returned by the first function is then passed to the second function, which has the signature:
-//
-// func(data []byte)
-//
-//or, to run asynchronously:
-//
-// func(data []byte, done Done)
-//
-//Here's a simple pseudo-code example that starts a shared database on Node 1 and shares the database's address with the other nodes:
-//
-// var dbClient db.Client
-// var dbRunner db.Runner
-//
-// var _ = SynchronizedBeforeSuite(func() []byte {
-// dbRunner = db.NewRunner()
-// err := dbRunner.Start()
-// Ω(err).ShouldNot(HaveOccurred())
-// return []byte(dbRunner.URL)
-// }, func(data []byte) {
-// dbClient = db.NewClient()
-// err := dbClient.Connect(string(data))
-// Ω(err).ShouldNot(HaveOccurred())
-// })
-func SynchronizedBeforeSuite(node1Body interface{}, allNodesBody interface{}, timeout ...float64) bool {
- globalSuite.SetSynchronizedBeforeSuiteNode(
- node1Body,
- allNodesBody,
- codelocation.New(1),
- parseTimeout(timeout...),
- )
- return true
-}
-
-//SynchronizedAfterSuite blocks complement the SynchronizedBeforeSuite blocks in solving the problem of setting up
-//external singleton resources shared across nodes when running tests in parallel.
-//
-//SynchronizedAfterSuite accomplishes this by taking *two* function arguments. The first runs on all nodes. The second runs only on parallel node #1
-//and *only* after all other nodes have finished and exited. This ensures that node 1, and any resources it is running, remain alive until
-//all other nodes are finished.
-//
-//Both functions have the same signature: either func() or func(done Done) to run asynchronously.
-//
-//Here's a pseudo-code example that complements that given in SynchronizedBeforeSuite. Here, SynchronizedAfterSuite is used to tear down the shared database
-//only after all nodes have finished:
-//
-// var _ = SynchronizedAfterSuite(func() {
-// dbClient.Cleanup()
-// }, func() {
-// dbRunner.Stop()
-// })
-func SynchronizedAfterSuite(allNodesBody interface{}, node1Body interface{}, timeout ...float64) bool {
- globalSuite.SetSynchronizedAfterSuiteNode(
- allNodesBody,
- node1Body,
- codelocation.New(1),
- parseTimeout(timeout...),
- )
- return true
-}
-
-//BeforeEach blocks are run before It blocks. When multiple BeforeEach blocks are defined in nested
-//Describe and Context blocks the outermost BeforeEach blocks are run first.
-//
-//Like It blocks, BeforeEach blocks can be made asynchronous by providing a body function that accepts
-//a Done channel
-func BeforeEach(body interface{}, timeout ...float64) bool {
- globalSuite.PushBeforeEachNode(body, codelocation.New(1), parseTimeout(timeout...))
- return true
-}
-
-//JustBeforeEach blocks are run before It blocks but *after* all BeforeEach blocks. For more details,
-//read the [documentation](http://onsi.github.io/ginkgo/#separating_creation_and_configuration_)
-//
-//Like It blocks, BeforeEach blocks can be made asynchronous by providing a body function that accepts
-//a Done channel
-func JustBeforeEach(body interface{}, timeout ...float64) bool {
- globalSuite.PushJustBeforeEachNode(body, codelocation.New(1), parseTimeout(timeout...))
- return true
-}
-
-//JustAfterEach blocks are run after It blocks but *before* all AfterEach blocks. For more details,
-//read the [documentation](http://onsi.github.io/ginkgo/#separating_creation_and_configuration_)
-//
-//Like It blocks, JustAfterEach blocks can be made asynchronous by providing a body function that accepts
-//a Done channel
-func JustAfterEach(body interface{}, timeout ...float64) bool {
- globalSuite.PushJustAfterEachNode(body, codelocation.New(1), parseTimeout(timeout...))
- return true
-}
-
-//AfterEach blocks are run after It blocks. When multiple AfterEach blocks are defined in nested
-//Describe and Context blocks the innermost AfterEach blocks are run first.
-//
-//Like It blocks, AfterEach blocks can be made asynchronous by providing a body function that accepts
-//a Done channel
-func AfterEach(body interface{}, timeout ...float64) bool {
- globalSuite.PushAfterEachNode(body, codelocation.New(1), parseTimeout(timeout...))
- return true
-}
-
-func parseTimeout(timeout ...float64) time.Duration {
- if len(timeout) == 0 {
- return time.Duration(defaultTimeout * int64(time.Second))
- } else {
- return time.Duration(timeout[0] * float64(time.Second))
- }
-}
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/internal/codelocation/code_location.go b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/internal/codelocation/code_location.go
deleted file mode 100644
index aa89d6cba8a..00000000000
--- a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/internal/codelocation/code_location.go
+++ /dev/null
@@ -1,48 +0,0 @@
-package codelocation
-
-import (
- "regexp"
- "runtime"
- "runtime/debug"
- "strings"
-
- "github.com/onsi/ginkgo/types"
-)
-
-func New(skip int) types.CodeLocation {
- _, file, line, _ := runtime.Caller(skip + 1)
- stackTrace := PruneStack(string(debug.Stack()), skip+1)
- return types.CodeLocation{FileName: file, LineNumber: line, FullStackTrace: stackTrace}
-}
-
-// PruneStack removes references to functions that are internal to Ginkgo
-// and the Go runtime from a stack string and a certain number of stack entries
-// at the beginning of the stack. The stack string has the format
-// as returned by runtime/debug.Stack. The leading goroutine information is
-// optional and always removed if present. Beware that runtime/debug.Stack
-// adds itself as first entry, so typically skip must be >= 1 to remove that
-// entry.
-func PruneStack(fullStackTrace string, skip int) string {
- stack := strings.Split(fullStackTrace, "\n")
- // Ensure that the even entries are the method names and the
- // the odd entries the source code information.
- if len(stack) > 0 && strings.HasPrefix(stack[0], "goroutine ") {
- // Ignore "goroutine 29 [running]:" line.
- stack = stack[1:]
- }
- // The "+1" is for skipping over the initial entry, which is
- // runtime/debug.Stack() itself.
- if len(stack) > 2*(skip+1) {
- stack = stack[2*(skip+1):]
- }
- prunedStack := []string{}
- re := regexp.MustCompile(`\/ginkgo\/|\/pkg\/testing\/|\/pkg\/runtime\/`)
- for i := 0; i < len(stack)/2; i++ {
- // We filter out based on the source code file name.
- if !re.Match([]byte(stack[i*2+1])) {
- prunedStack = append(prunedStack, stack[i*2])
- prunedStack = append(prunedStack, stack[i*2+1])
- }
- }
- return strings.Join(prunedStack, "\n")
-}
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/internal/containernode/container_node.go b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/internal/containernode/container_node.go
deleted file mode 100644
index 0737746dcfe..00000000000
--- a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/internal/containernode/container_node.go
+++ /dev/null
@@ -1,151 +0,0 @@
-package containernode
-
-import (
- "math/rand"
- "sort"
-
- "github.com/onsi/ginkgo/internal/leafnodes"
- "github.com/onsi/ginkgo/types"
-)
-
-type subjectOrContainerNode struct {
- containerNode *ContainerNode
- subjectNode leafnodes.SubjectNode
-}
-
-func (n subjectOrContainerNode) text() string {
- if n.containerNode != nil {
- return n.containerNode.Text()
- } else {
- return n.subjectNode.Text()
- }
-}
-
-type CollatedNodes struct {
- Containers []*ContainerNode
- Subject leafnodes.SubjectNode
-}
-
-type ContainerNode struct {
- text string
- flag types.FlagType
- codeLocation types.CodeLocation
-
- setupNodes []leafnodes.BasicNode
- subjectAndContainerNodes []subjectOrContainerNode
-}
-
-func New(text string, flag types.FlagType, codeLocation types.CodeLocation) *ContainerNode {
- return &ContainerNode{
- text: text,
- flag: flag,
- codeLocation: codeLocation,
- }
-}
-
-func (container *ContainerNode) Shuffle(r *rand.Rand) {
- sort.Sort(container)
- permutation := r.Perm(len(container.subjectAndContainerNodes))
- shuffledNodes := make([]subjectOrContainerNode, len(container.subjectAndContainerNodes))
- for i, j := range permutation {
- shuffledNodes[i] = container.subjectAndContainerNodes[j]
- }
- container.subjectAndContainerNodes = shuffledNodes
-}
-
-func (node *ContainerNode) BackPropagateProgrammaticFocus() bool {
- if node.flag == types.FlagTypePending {
- return false
- }
-
- shouldUnfocus := false
- for _, subjectOrContainerNode := range node.subjectAndContainerNodes {
- if subjectOrContainerNode.containerNode != nil {
- shouldUnfocus = subjectOrContainerNode.containerNode.BackPropagateProgrammaticFocus() || shouldUnfocus
- } else {
- shouldUnfocus = (subjectOrContainerNode.subjectNode.Flag() == types.FlagTypeFocused) || shouldUnfocus
- }
- }
-
- if shouldUnfocus {
- if node.flag == types.FlagTypeFocused {
- node.flag = types.FlagTypeNone
- }
- return true
- }
-
- return node.flag == types.FlagTypeFocused
-}
-
-func (node *ContainerNode) Collate() []CollatedNodes {
- return node.collate([]*ContainerNode{})
-}
-
-func (node *ContainerNode) collate(enclosingContainers []*ContainerNode) []CollatedNodes {
- collated := make([]CollatedNodes, 0)
-
- containers := make([]*ContainerNode, len(enclosingContainers))
- copy(containers, enclosingContainers)
- containers = append(containers, node)
-
- for _, subjectOrContainer := range node.subjectAndContainerNodes {
- if subjectOrContainer.containerNode != nil {
- collated = append(collated, subjectOrContainer.containerNode.collate(containers)...)
- } else {
- collated = append(collated, CollatedNodes{
- Containers: containers,
- Subject: subjectOrContainer.subjectNode,
- })
- }
- }
-
- return collated
-}
-
-func (node *ContainerNode) PushContainerNode(container *ContainerNode) {
- node.subjectAndContainerNodes = append(node.subjectAndContainerNodes, subjectOrContainerNode{containerNode: container})
-}
-
-func (node *ContainerNode) PushSubjectNode(subject leafnodes.SubjectNode) {
- node.subjectAndContainerNodes = append(node.subjectAndContainerNodes, subjectOrContainerNode{subjectNode: subject})
-}
-
-func (node *ContainerNode) PushSetupNode(setupNode leafnodes.BasicNode) {
- node.setupNodes = append(node.setupNodes, setupNode)
-}
-
-func (node *ContainerNode) SetupNodesOfType(nodeType types.SpecComponentType) []leafnodes.BasicNode {
- nodes := []leafnodes.BasicNode{}
- for _, setupNode := range node.setupNodes {
- if setupNode.Type() == nodeType {
- nodes = append(nodes, setupNode)
- }
- }
- return nodes
-}
-
-func (node *ContainerNode) Text() string {
- return node.text
-}
-
-func (node *ContainerNode) CodeLocation() types.CodeLocation {
- return node.codeLocation
-}
-
-func (node *ContainerNode) Flag() types.FlagType {
- return node.flag
-}
-
-//sort.Interface
-
-func (node *ContainerNode) Len() int {
- return len(node.subjectAndContainerNodes)
-}
-
-func (node *ContainerNode) Less(i, j int) bool {
- return node.subjectAndContainerNodes[i].text() < node.subjectAndContainerNodes[j].text()
-}
-
-func (node *ContainerNode) Swap(i, j int) {
- node.subjectAndContainerNodes[i], node.subjectAndContainerNodes[j] = node.subjectAndContainerNodes[j], node.subjectAndContainerNodes[i]
-}
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/internal/leafnodes/benchmarker.go b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/internal/leafnodes/benchmarker.go
deleted file mode 100644
index 393901e11c3..00000000000
--- a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/internal/leafnodes/benchmarker.go
+++ /dev/null
@@ -1,103 +0,0 @@
-package leafnodes
-
-import (
- "math"
- "time"
-
- "sync"
-
- "github.com/onsi/ginkgo/types"
-)
-
-type benchmarker struct {
- mu sync.Mutex
- measurements map[string]*types.SpecMeasurement
- orderCounter int
-}
-
-func newBenchmarker() *benchmarker {
- return &benchmarker{
- measurements: make(map[string]*types.SpecMeasurement),
- }
-}
-
-func (b *benchmarker) Time(name string, body func(), info ...interface{}) (elapsedTime time.Duration) {
- t := time.Now()
- body()
- elapsedTime = time.Since(t)
-
- b.mu.Lock()
- defer b.mu.Unlock()
- measurement := b.getMeasurement(name, "Fastest Time", "Slowest Time", "Average Time", "s", 3, info...)
- measurement.Results = append(measurement.Results, elapsedTime.Seconds())
-
- return
-}
-
-func (b *benchmarker) RecordValue(name string, value float64, info ...interface{}) {
- b.mu.Lock()
- measurement := b.getMeasurement(name, "Smallest", " Largest", " Average", "", 3, info...)
- defer b.mu.Unlock()
- measurement.Results = append(measurement.Results, value)
-}
-
-func (b *benchmarker) RecordValueWithPrecision(name string, value float64, units string, precision int, info ...interface{}) {
- b.mu.Lock()
- measurement := b.getMeasurement(name, "Smallest", " Largest", " Average", units, precision, info...)
- defer b.mu.Unlock()
- measurement.Results = append(measurement.Results, value)
-}
-
-func (b *benchmarker) getMeasurement(name string, smallestLabel string, largestLabel string, averageLabel string, units string, precision int, info ...interface{}) *types.SpecMeasurement {
- measurement, ok := b.measurements[name]
- if !ok {
- var computedInfo interface{}
- computedInfo = nil
- if len(info) > 0 {
- computedInfo = info[0]
- }
- measurement = &types.SpecMeasurement{
- Name: name,
- Info: computedInfo,
- Order: b.orderCounter,
- SmallestLabel: smallestLabel,
- LargestLabel: largestLabel,
- AverageLabel: averageLabel,
- Units: units,
- Precision: precision,
- Results: make([]float64, 0),
- }
- b.measurements[name] = measurement
- b.orderCounter++
- }
-
- return measurement
-}
-
-func (b *benchmarker) measurementsReport() map[string]*types.SpecMeasurement {
- b.mu.Lock()
- defer b.mu.Unlock()
- for _, measurement := range b.measurements {
- measurement.Smallest = math.MaxFloat64
- measurement.Largest = -math.MaxFloat64
- sum := float64(0)
- sumOfSquares := float64(0)
-
- for _, result := range measurement.Results {
- if result > measurement.Largest {
- measurement.Largest = result
- }
- if result < measurement.Smallest {
- measurement.Smallest = result
- }
- sum += result
- sumOfSquares += result * result
- }
-
- n := float64(len(measurement.Results))
- measurement.Average = sum / n
- measurement.StdDeviation = math.Sqrt(sumOfSquares/n - (sum/n)*(sum/n))
- }
-
- return b.measurements
-}
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/internal/leafnodes/interfaces.go b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/internal/leafnodes/interfaces.go
deleted file mode 100644
index 8c3902d601c..00000000000
--- a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/internal/leafnodes/interfaces.go
+++ /dev/null
@@ -1,19 +0,0 @@
-package leafnodes
-
-import (
- "github.com/onsi/ginkgo/types"
-)
-
-type BasicNode interface {
- Type() types.SpecComponentType
- Run() (types.SpecState, types.SpecFailure)
- CodeLocation() types.CodeLocation
-}
-
-type SubjectNode interface {
- BasicNode
-
- Text() string
- Flag() types.FlagType
- Samples() int
-}
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/internal/leafnodes/it_node.go b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/internal/leafnodes/it_node.go
deleted file mode 100644
index 6eded7b763e..00000000000
--- a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/internal/leafnodes/it_node.go
+++ /dev/null
@@ -1,47 +0,0 @@
-package leafnodes
-
-import (
- "time"
-
- "github.com/onsi/ginkgo/internal/failer"
- "github.com/onsi/ginkgo/types"
-)
-
-type ItNode struct {
- runner *runner
-
- flag types.FlagType
- text string
-}
-
-func NewItNode(text string, body interface{}, flag types.FlagType, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer, componentIndex int) *ItNode {
- return &ItNode{
- runner: newRunner(body, codeLocation, timeout, failer, types.SpecComponentTypeIt, componentIndex),
- flag: flag,
- text: text,
- }
-}
-
-func (node *ItNode) Run() (outcome types.SpecState, failure types.SpecFailure) {
- return node.runner.run()
-}
-
-func (node *ItNode) Type() types.SpecComponentType {
- return types.SpecComponentTypeIt
-}
-
-func (node *ItNode) Text() string {
- return node.text
-}
-
-func (node *ItNode) Flag() types.FlagType {
- return node.flag
-}
-
-func (node *ItNode) CodeLocation() types.CodeLocation {
- return node.runner.codeLocation
-}
-
-func (node *ItNode) Samples() int {
- return 1
-}
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/internal/leafnodes/measure_node.go b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/internal/leafnodes/measure_node.go
deleted file mode 100644
index 3ab9a6d5524..00000000000
--- a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/internal/leafnodes/measure_node.go
+++ /dev/null
@@ -1,62 +0,0 @@
-package leafnodes
-
-import (
- "reflect"
-
- "github.com/onsi/ginkgo/internal/failer"
- "github.com/onsi/ginkgo/types"
-)
-
-type MeasureNode struct {
- runner *runner
-
- text string
- flag types.FlagType
- samples int
- benchmarker *benchmarker
-}
-
-func NewMeasureNode(text string, body interface{}, flag types.FlagType, codeLocation types.CodeLocation, samples int, failer *failer.Failer, componentIndex int) *MeasureNode {
- benchmarker := newBenchmarker()
-
- wrappedBody := func() {
- reflect.ValueOf(body).Call([]reflect.Value{reflect.ValueOf(benchmarker)})
- }
-
- return &MeasureNode{
- runner: newRunner(wrappedBody, codeLocation, 0, failer, types.SpecComponentTypeMeasure, componentIndex),
-
- text: text,
- flag: flag,
- samples: samples,
- benchmarker: benchmarker,
- }
-}
-
-func (node *MeasureNode) Run() (outcome types.SpecState, failure types.SpecFailure) {
- return node.runner.run()
-}
-
-func (node *MeasureNode) MeasurementsReport() map[string]*types.SpecMeasurement {
- return node.benchmarker.measurementsReport()
-}
-
-func (node *MeasureNode) Type() types.SpecComponentType {
- return types.SpecComponentTypeMeasure
-}
-
-func (node *MeasureNode) Text() string {
- return node.text
-}
-
-func (node *MeasureNode) Flag() types.FlagType {
- return node.flag
-}
-
-func (node *MeasureNode) CodeLocation() types.CodeLocation {
- return node.runner.codeLocation
-}
-
-func (node *MeasureNode) Samples() int {
- return node.samples
-}
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/internal/leafnodes/runner.go b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/internal/leafnodes/runner.go
deleted file mode 100644
index 16cb66c3e49..00000000000
--- a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/internal/leafnodes/runner.go
+++ /dev/null
@@ -1,117 +0,0 @@
-package leafnodes
-
-import (
- "fmt"
- "reflect"
- "time"
-
- "github.com/onsi/ginkgo/internal/codelocation"
- "github.com/onsi/ginkgo/internal/failer"
- "github.com/onsi/ginkgo/types"
-)
-
-type runner struct {
- isAsync bool
- asyncFunc func(chan<- interface{})
- syncFunc func()
- codeLocation types.CodeLocation
- timeoutThreshold time.Duration
- nodeType types.SpecComponentType
- componentIndex int
- failer *failer.Failer
-}
-
-func newRunner(body interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer, nodeType types.SpecComponentType, componentIndex int) *runner {
- bodyType := reflect.TypeOf(body)
- if bodyType.Kind() != reflect.Func {
- panic(fmt.Sprintf("Expected a function but got something else at %v", codeLocation))
- }
-
- runner := &runner{
- codeLocation: codeLocation,
- timeoutThreshold: timeout,
- failer: failer,
- nodeType: nodeType,
- componentIndex: componentIndex,
- }
-
- switch bodyType.NumIn() {
- case 0:
- runner.syncFunc = body.(func())
- return runner
- case 1:
- if !(bodyType.In(0).Kind() == reflect.Chan && bodyType.In(0).Elem().Kind() == reflect.Interface) {
- panic(fmt.Sprintf("Must pass a Done channel to function at %v", codeLocation))
- }
-
- wrappedBody := func(done chan<- interface{}) {
- bodyValue := reflect.ValueOf(body)
- bodyValue.Call([]reflect.Value{reflect.ValueOf(done)})
- }
-
- runner.isAsync = true
- runner.asyncFunc = wrappedBody
- return runner
- }
-
- panic(fmt.Sprintf("Too many arguments to function at %v", codeLocation))
-}
-
-func (r *runner) run() (outcome types.SpecState, failure types.SpecFailure) {
- if r.isAsync {
- return r.runAsync()
- } else {
- return r.runSync()
- }
-}
-
-func (r *runner) runAsync() (outcome types.SpecState, failure types.SpecFailure) {
- done := make(chan interface{}, 1)
-
- go func() {
- finished := false
-
- defer func() {
- if e := recover(); e != nil || !finished {
- r.failer.Panic(codelocation.New(2), e)
- select {
- case <-done:
- break
- default:
- close(done)
- }
- }
- }()
-
- r.asyncFunc(done)
- finished = true
- }()
-
- // If this goroutine gets no CPU time before the select block,
- // the <-done case may complete even if the test took longer than the timeoutThreshold.
- // This can cause flaky behaviour, but we haven't seen it in the wild.
- select {
- case <-done:
- case <-time.After(r.timeoutThreshold):
- r.failer.Timeout(r.codeLocation)
- }
-
- failure, outcome = r.failer.Drain(r.nodeType, r.componentIndex, r.codeLocation)
- return
-}
-func (r *runner) runSync() (outcome types.SpecState, failure types.SpecFailure) {
- finished := false
-
- defer func() {
- if e := recover(); e != nil || !finished {
- r.failer.Panic(codelocation.New(2), e)
- }
-
- failure, outcome = r.failer.Drain(r.nodeType, r.componentIndex, r.codeLocation)
- }()
-
- r.syncFunc()
- finished = true
-
- return
-}
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/internal/leafnodes/setup_nodes.go b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/internal/leafnodes/setup_nodes.go
deleted file mode 100644
index e3e9cb7c588..00000000000
--- a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/internal/leafnodes/setup_nodes.go
+++ /dev/null
@@ -1,48 +0,0 @@
-package leafnodes
-
-import (
- "time"
-
- "github.com/onsi/ginkgo/internal/failer"
- "github.com/onsi/ginkgo/types"
-)
-
-type SetupNode struct {
- runner *runner
-}
-
-func (node *SetupNode) Run() (outcome types.SpecState, failure types.SpecFailure) {
- return node.runner.run()
-}
-
-func (node *SetupNode) Type() types.SpecComponentType {
- return node.runner.nodeType
-}
-
-func (node *SetupNode) CodeLocation() types.CodeLocation {
- return node.runner.codeLocation
-}
-
-func NewBeforeEachNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer, componentIndex int) *SetupNode {
- return &SetupNode{
- runner: newRunner(body, codeLocation, timeout, failer, types.SpecComponentTypeBeforeEach, componentIndex),
- }
-}
-
-func NewAfterEachNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer, componentIndex int) *SetupNode {
- return &SetupNode{
- runner: newRunner(body, codeLocation, timeout, failer, types.SpecComponentTypeAfterEach, componentIndex),
- }
-}
-
-func NewJustBeforeEachNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer, componentIndex int) *SetupNode {
- return &SetupNode{
- runner: newRunner(body, codeLocation, timeout, failer, types.SpecComponentTypeJustBeforeEach, componentIndex),
- }
-}
-
-func NewJustAfterEachNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer, componentIndex int) *SetupNode {
- return &SetupNode{
- runner: newRunner(body, codeLocation, timeout, failer, types.SpecComponentTypeJustAfterEach, componentIndex),
- }
-}
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/internal/leafnodes/suite_nodes.go b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/internal/leafnodes/suite_nodes.go
deleted file mode 100644
index 80f16ed7861..00000000000
--- a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/internal/leafnodes/suite_nodes.go
+++ /dev/null
@@ -1,55 +0,0 @@
-package leafnodes
-
-import (
- "time"
-
- "github.com/onsi/ginkgo/internal/failer"
- "github.com/onsi/ginkgo/types"
-)
-
-type SuiteNode interface {
- Run(parallelNode int, parallelTotal int, syncHost string) bool
- Passed() bool
- Summary() *types.SetupSummary
-}
-
-type simpleSuiteNode struct {
- runner *runner
- outcome types.SpecState
- failure types.SpecFailure
- runTime time.Duration
-}
-
-func (node *simpleSuiteNode) Run(parallelNode int, parallelTotal int, syncHost string) bool {
- t := time.Now()
- node.outcome, node.failure = node.runner.run()
- node.runTime = time.Since(t)
-
- return node.outcome == types.SpecStatePassed
-}
-
-func (node *simpleSuiteNode) Passed() bool {
- return node.outcome == types.SpecStatePassed
-}
-
-func (node *simpleSuiteNode) Summary() *types.SetupSummary {
- return &types.SetupSummary{
- ComponentType: node.runner.nodeType,
- CodeLocation: node.runner.codeLocation,
- State: node.outcome,
- RunTime: node.runTime,
- Failure: node.failure,
- }
-}
-
-func NewBeforeSuiteNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer) SuiteNode {
- return &simpleSuiteNode{
- runner: newRunner(body, codeLocation, timeout, failer, types.SpecComponentTypeBeforeSuite, 0),
- }
-}
-
-func NewAfterSuiteNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer) SuiteNode {
- return &simpleSuiteNode{
- runner: newRunner(body, codeLocation, timeout, failer, types.SpecComponentTypeAfterSuite, 0),
- }
-}
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/internal/leafnodes/synchronized_after_suite_node.go b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/internal/leafnodes/synchronized_after_suite_node.go
deleted file mode 100644
index a721d0cf7f2..00000000000
--- a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/internal/leafnodes/synchronized_after_suite_node.go
+++ /dev/null
@@ -1,90 +0,0 @@
-package leafnodes
-
-import (
- "encoding/json"
- "io/ioutil"
- "net/http"
- "time"
-
- "github.com/onsi/ginkgo/internal/failer"
- "github.com/onsi/ginkgo/types"
-)
-
-type synchronizedAfterSuiteNode struct {
- runnerA *runner
- runnerB *runner
-
- outcome types.SpecState
- failure types.SpecFailure
- runTime time.Duration
-}
-
-func NewSynchronizedAfterSuiteNode(bodyA interface{}, bodyB interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer) SuiteNode {
- return &synchronizedAfterSuiteNode{
- runnerA: newRunner(bodyA, codeLocation, timeout, failer, types.SpecComponentTypeAfterSuite, 0),
- runnerB: newRunner(bodyB, codeLocation, timeout, failer, types.SpecComponentTypeAfterSuite, 0),
- }
-}
-
-func (node *synchronizedAfterSuiteNode) Run(parallelNode int, parallelTotal int, syncHost string) bool {
- node.outcome, node.failure = node.runnerA.run()
-
- if parallelNode == 1 {
- if parallelTotal > 1 {
- node.waitUntilOtherNodesAreDone(syncHost)
- }
-
- outcome, failure := node.runnerB.run()
-
- if node.outcome == types.SpecStatePassed {
- node.outcome, node.failure = outcome, failure
- }
- }
-
- return node.outcome == types.SpecStatePassed
-}
-
-func (node *synchronizedAfterSuiteNode) Passed() bool {
- return node.outcome == types.SpecStatePassed
-}
-
-func (node *synchronizedAfterSuiteNode) Summary() *types.SetupSummary {
- return &types.SetupSummary{
- ComponentType: node.runnerA.nodeType,
- CodeLocation: node.runnerA.codeLocation,
- State: node.outcome,
- RunTime: node.runTime,
- Failure: node.failure,
- }
-}
-
-func (node *synchronizedAfterSuiteNode) waitUntilOtherNodesAreDone(syncHost string) {
- for {
- if node.canRun(syncHost) {
- return
- }
-
- time.Sleep(50 * time.Millisecond)
- }
-}
-
-func (node *synchronizedAfterSuiteNode) canRun(syncHost string) bool {
- resp, err := http.Get(syncHost + "/RemoteAfterSuiteData")
- if err != nil || resp.StatusCode != http.StatusOK {
- return false
- }
-
- body, err := ioutil.ReadAll(resp.Body)
- if err != nil {
- return false
- }
- resp.Body.Close()
-
- afterSuiteData := types.RemoteAfterSuiteData{}
- err = json.Unmarshal(body, &afterSuiteData)
- if err != nil {
- return false
- }
-
- return afterSuiteData.CanRun
-}
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/internal/leafnodes/synchronized_before_suite_node.go b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/internal/leafnodes/synchronized_before_suite_node.go
deleted file mode 100644
index d5c88931940..00000000000
--- a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/internal/leafnodes/synchronized_before_suite_node.go
+++ /dev/null
@@ -1,181 +0,0 @@
-package leafnodes
-
-import (
- "bytes"
- "encoding/json"
- "io/ioutil"
- "net/http"
- "reflect"
- "time"
-
- "github.com/onsi/ginkgo/internal/failer"
- "github.com/onsi/ginkgo/types"
-)
-
-type synchronizedBeforeSuiteNode struct {
- runnerA *runner
- runnerB *runner
-
- data []byte
-
- outcome types.SpecState
- failure types.SpecFailure
- runTime time.Duration
-}
-
-func NewSynchronizedBeforeSuiteNode(bodyA interface{}, bodyB interface{}, codeLocation types.CodeLocation, timeout time.Duration, failer *failer.Failer) SuiteNode {
- node := &synchronizedBeforeSuiteNode{}
-
- node.runnerA = newRunner(node.wrapA(bodyA), codeLocation, timeout, failer, types.SpecComponentTypeBeforeSuite, 0)
- node.runnerB = newRunner(node.wrapB(bodyB), codeLocation, timeout, failer, types.SpecComponentTypeBeforeSuite, 0)
-
- return node
-}
-
-func (node *synchronizedBeforeSuiteNode) Run(parallelNode int, parallelTotal int, syncHost string) bool {
- t := time.Now()
- defer func() {
- node.runTime = time.Since(t)
- }()
-
- if parallelNode == 1 {
- node.outcome, node.failure = node.runA(parallelTotal, syncHost)
- } else {
- node.outcome, node.failure = node.waitForA(syncHost)
- }
-
- if node.outcome != types.SpecStatePassed {
- return false
- }
- node.outcome, node.failure = node.runnerB.run()
-
- return node.outcome == types.SpecStatePassed
-}
-
-func (node *synchronizedBeforeSuiteNode) runA(parallelTotal int, syncHost string) (types.SpecState, types.SpecFailure) {
- outcome, failure := node.runnerA.run()
-
- if parallelTotal > 1 {
- state := types.RemoteBeforeSuiteStatePassed
- if outcome != types.SpecStatePassed {
- state = types.RemoteBeforeSuiteStateFailed
- }
- json := (types.RemoteBeforeSuiteData{
- Data: node.data,
- State: state,
- }).ToJSON()
- http.Post(syncHost+"/BeforeSuiteState", "application/json", bytes.NewBuffer(json))
- }
-
- return outcome, failure
-}
-
-func (node *synchronizedBeforeSuiteNode) waitForA(syncHost string) (types.SpecState, types.SpecFailure) {
- failure := func(message string) types.SpecFailure {
- return types.SpecFailure{
- Message: message,
- Location: node.runnerA.codeLocation,
- ComponentType: node.runnerA.nodeType,
- ComponentIndex: node.runnerA.componentIndex,
- ComponentCodeLocation: node.runnerA.codeLocation,
- }
- }
- for {
- resp, err := http.Get(syncHost + "/BeforeSuiteState")
- if err != nil || resp.StatusCode != http.StatusOK {
- return types.SpecStateFailed, failure("Failed to fetch BeforeSuite state")
- }
-
- body, err := ioutil.ReadAll(resp.Body)
- if err != nil {
- return types.SpecStateFailed, failure("Failed to read BeforeSuite state")
- }
- resp.Body.Close()
-
- beforeSuiteData := types.RemoteBeforeSuiteData{}
- err = json.Unmarshal(body, &beforeSuiteData)
- if err != nil {
- return types.SpecStateFailed, failure("Failed to decode BeforeSuite state")
- }
-
- switch beforeSuiteData.State {
- case types.RemoteBeforeSuiteStatePassed:
- node.data = beforeSuiteData.Data
- return types.SpecStatePassed, types.SpecFailure{}
- case types.RemoteBeforeSuiteStateFailed:
- return types.SpecStateFailed, failure("BeforeSuite on Node 1 failed")
- case types.RemoteBeforeSuiteStateDisappeared:
- return types.SpecStateFailed, failure("Node 1 disappeared before completing BeforeSuite")
- }
-
- time.Sleep(50 * time.Millisecond)
- }
-}
-
-func (node *synchronizedBeforeSuiteNode) Passed() bool {
- return node.outcome == types.SpecStatePassed
-}
-
-func (node *synchronizedBeforeSuiteNode) Summary() *types.SetupSummary {
- return &types.SetupSummary{
- ComponentType: node.runnerA.nodeType,
- CodeLocation: node.runnerA.codeLocation,
- State: node.outcome,
- RunTime: node.runTime,
- Failure: node.failure,
- }
-}
-
-func (node *synchronizedBeforeSuiteNode) wrapA(bodyA interface{}) interface{} {
- typeA := reflect.TypeOf(bodyA)
- if typeA.Kind() != reflect.Func {
- panic("SynchronizedBeforeSuite expects a function as its first argument")
- }
-
- takesNothing := typeA.NumIn() == 0
- takesADoneChannel := typeA.NumIn() == 1 && typeA.In(0).Kind() == reflect.Chan && typeA.In(0).Elem().Kind() == reflect.Interface
- returnsBytes := typeA.NumOut() == 1 && typeA.Out(0).Kind() == reflect.Slice && typeA.Out(0).Elem().Kind() == reflect.Uint8
-
- if !((takesNothing || takesADoneChannel) && returnsBytes) {
- panic("SynchronizedBeforeSuite's first argument should be a function that returns []byte and either takes no arguments or takes a Done channel.")
- }
-
- if takesADoneChannel {
- return func(done chan<- interface{}) {
- out := reflect.ValueOf(bodyA).Call([]reflect.Value{reflect.ValueOf(done)})
- node.data = out[0].Interface().([]byte)
- }
- }
-
- return func() {
- out := reflect.ValueOf(bodyA).Call([]reflect.Value{})
- node.data = out[0].Interface().([]byte)
- }
-}
-
-func (node *synchronizedBeforeSuiteNode) wrapB(bodyB interface{}) interface{} {
- typeB := reflect.TypeOf(bodyB)
- if typeB.Kind() != reflect.Func {
- panic("SynchronizedBeforeSuite expects a function as its second argument")
- }
-
- returnsNothing := typeB.NumOut() == 0
- takesBytesOnly := typeB.NumIn() == 1 && typeB.In(0).Kind() == reflect.Slice && typeB.In(0).Elem().Kind() == reflect.Uint8
- takesBytesAndDone := typeB.NumIn() == 2 &&
- typeB.In(0).Kind() == reflect.Slice && typeB.In(0).Elem().Kind() == reflect.Uint8 &&
- typeB.In(1).Kind() == reflect.Chan && typeB.In(1).Elem().Kind() == reflect.Interface
-
- if !((takesBytesOnly || takesBytesAndDone) && returnsNothing) {
- panic("SynchronizedBeforeSuite's second argument should be a function that returns nothing and either takes []byte or ([]byte, Done)")
- }
-
- if takesBytesAndDone {
- return func(done chan<- interface{}) {
- reflect.ValueOf(bodyB).Call([]reflect.Value{reflect.ValueOf(node.data), reflect.ValueOf(done)})
- }
- }
-
- return func() {
- reflect.ValueOf(bodyB).Call([]reflect.Value{reflect.ValueOf(node.data)})
- }
-}
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/internal/remote/aggregator.go b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/internal/remote/aggregator.go
deleted file mode 100644
index f9ab3006758..00000000000
--- a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/internal/remote/aggregator.go
+++ /dev/null
@@ -1,249 +0,0 @@
-/*
-
-Aggregator is a reporter used by the Ginkgo CLI to aggregate and present parallel test output
-coherently as tests complete. You shouldn't need to use this in your code. To run tests in parallel:
-
- ginkgo -nodes=N
-
-where N is the number of nodes you desire.
-*/
-package remote
-
-import (
- "time"
-
- "github.com/onsi/ginkgo/config"
- "github.com/onsi/ginkgo/reporters/stenographer"
- "github.com/onsi/ginkgo/types"
-)
-
-type configAndSuite struct {
- config config.GinkgoConfigType
- summary *types.SuiteSummary
-}
-
-type Aggregator struct {
- nodeCount int
- config config.DefaultReporterConfigType
- stenographer stenographer.Stenographer
- result chan bool
-
- suiteBeginnings chan configAndSuite
- aggregatedSuiteBeginnings []configAndSuite
-
- beforeSuites chan *types.SetupSummary
- aggregatedBeforeSuites []*types.SetupSummary
-
- afterSuites chan *types.SetupSummary
- aggregatedAfterSuites []*types.SetupSummary
-
- specCompletions chan *types.SpecSummary
- completedSpecs []*types.SpecSummary
-
- suiteEndings chan *types.SuiteSummary
- aggregatedSuiteEndings []*types.SuiteSummary
- specs []*types.SpecSummary
-
- startTime time.Time
-}
-
-func NewAggregator(nodeCount int, result chan bool, config config.DefaultReporterConfigType, stenographer stenographer.Stenographer) *Aggregator {
- aggregator := &Aggregator{
- nodeCount: nodeCount,
- result: result,
- config: config,
- stenographer: stenographer,
-
- suiteBeginnings: make(chan configAndSuite),
- beforeSuites: make(chan *types.SetupSummary),
- afterSuites: make(chan *types.SetupSummary),
- specCompletions: make(chan *types.SpecSummary),
- suiteEndings: make(chan *types.SuiteSummary),
- }
-
- go aggregator.mux()
-
- return aggregator
-}
-
-func (aggregator *Aggregator) SpecSuiteWillBegin(config config.GinkgoConfigType, summary *types.SuiteSummary) {
- aggregator.suiteBeginnings <- configAndSuite{config, summary}
-}
-
-func (aggregator *Aggregator) BeforeSuiteDidRun(setupSummary *types.SetupSummary) {
- aggregator.beforeSuites <- setupSummary
-}
-
-func (aggregator *Aggregator) AfterSuiteDidRun(setupSummary *types.SetupSummary) {
- aggregator.afterSuites <- setupSummary
-}
-
-func (aggregator *Aggregator) SpecWillRun(specSummary *types.SpecSummary) {
- //noop
-}
-
-func (aggregator *Aggregator) SpecDidComplete(specSummary *types.SpecSummary) {
- aggregator.specCompletions <- specSummary
-}
-
-func (aggregator *Aggregator) SpecSuiteDidEnd(summary *types.SuiteSummary) {
- aggregator.suiteEndings <- summary
-}
-
-func (aggregator *Aggregator) mux() {
-loop:
- for {
- select {
- case configAndSuite := <-aggregator.suiteBeginnings:
- aggregator.registerSuiteBeginning(configAndSuite)
- case setupSummary := <-aggregator.beforeSuites:
- aggregator.registerBeforeSuite(setupSummary)
- case setupSummary := <-aggregator.afterSuites:
- aggregator.registerAfterSuite(setupSummary)
- case specSummary := <-aggregator.specCompletions:
- aggregator.registerSpecCompletion(specSummary)
- case suite := <-aggregator.suiteEndings:
- finished, passed := aggregator.registerSuiteEnding(suite)
- if finished {
- aggregator.result <- passed
- break loop
- }
- }
- }
-}
-
-func (aggregator *Aggregator) registerSuiteBeginning(configAndSuite configAndSuite) {
- aggregator.aggregatedSuiteBeginnings = append(aggregator.aggregatedSuiteBeginnings, configAndSuite)
-
- if len(aggregator.aggregatedSuiteBeginnings) == 1 {
- aggregator.startTime = time.Now()
- }
-
- if len(aggregator.aggregatedSuiteBeginnings) != aggregator.nodeCount {
- return
- }
-
- aggregator.stenographer.AnnounceSuite(configAndSuite.summary.SuiteDescription, configAndSuite.config.RandomSeed, configAndSuite.config.RandomizeAllSpecs, aggregator.config.Succinct)
-
- totalNumberOfSpecs := 0
- if len(aggregator.aggregatedSuiteBeginnings) > 0 {
- totalNumberOfSpecs = configAndSuite.summary.NumberOfSpecsBeforeParallelization
- }
-
- aggregator.stenographer.AnnounceTotalNumberOfSpecs(totalNumberOfSpecs, aggregator.config.Succinct)
- aggregator.stenographer.AnnounceAggregatedParallelRun(aggregator.nodeCount, aggregator.config.Succinct)
- aggregator.flushCompletedSpecs()
-}
-
-func (aggregator *Aggregator) registerBeforeSuite(setupSummary *types.SetupSummary) {
- aggregator.aggregatedBeforeSuites = append(aggregator.aggregatedBeforeSuites, setupSummary)
- aggregator.flushCompletedSpecs()
-}
-
-func (aggregator *Aggregator) registerAfterSuite(setupSummary *types.SetupSummary) {
- aggregator.aggregatedAfterSuites = append(aggregator.aggregatedAfterSuites, setupSummary)
- aggregator.flushCompletedSpecs()
-}
-
-func (aggregator *Aggregator) registerSpecCompletion(specSummary *types.SpecSummary) {
- aggregator.completedSpecs = append(aggregator.completedSpecs, specSummary)
- aggregator.specs = append(aggregator.specs, specSummary)
- aggregator.flushCompletedSpecs()
-}
-
-func (aggregator *Aggregator) flushCompletedSpecs() {
- if len(aggregator.aggregatedSuiteBeginnings) != aggregator.nodeCount {
- return
- }
-
- for _, setupSummary := range aggregator.aggregatedBeforeSuites {
- aggregator.announceBeforeSuite(setupSummary)
- }
-
- for _, specSummary := range aggregator.completedSpecs {
- aggregator.announceSpec(specSummary)
- }
-
- for _, setupSummary := range aggregator.aggregatedAfterSuites {
- aggregator.announceAfterSuite(setupSummary)
- }
-
- aggregator.aggregatedBeforeSuites = []*types.SetupSummary{}
- aggregator.completedSpecs = []*types.SpecSummary{}
- aggregator.aggregatedAfterSuites = []*types.SetupSummary{}
-}
-
-func (aggregator *Aggregator) announceBeforeSuite(setupSummary *types.SetupSummary) {
- aggregator.stenographer.AnnounceCapturedOutput(setupSummary.CapturedOutput)
- if setupSummary.State != types.SpecStatePassed {
- aggregator.stenographer.AnnounceBeforeSuiteFailure(setupSummary, aggregator.config.Succinct, aggregator.config.FullTrace)
- }
-}
-
-func (aggregator *Aggregator) announceAfterSuite(setupSummary *types.SetupSummary) {
- aggregator.stenographer.AnnounceCapturedOutput(setupSummary.CapturedOutput)
- if setupSummary.State != types.SpecStatePassed {
- aggregator.stenographer.AnnounceAfterSuiteFailure(setupSummary, aggregator.config.Succinct, aggregator.config.FullTrace)
- }
-}
-
-func (aggregator *Aggregator) announceSpec(specSummary *types.SpecSummary) {
- if aggregator.config.Verbose && specSummary.State != types.SpecStatePending && specSummary.State != types.SpecStateSkipped {
- aggregator.stenographer.AnnounceSpecWillRun(specSummary)
- }
-
- aggregator.stenographer.AnnounceCapturedOutput(specSummary.CapturedOutput)
-
- switch specSummary.State {
- case types.SpecStatePassed:
- if specSummary.IsMeasurement {
- aggregator.stenographer.AnnounceSuccesfulMeasurement(specSummary, aggregator.config.Succinct)
- } else if specSummary.RunTime.Seconds() >= aggregator.config.SlowSpecThreshold {
- aggregator.stenographer.AnnounceSuccesfulSlowSpec(specSummary, aggregator.config.Succinct)
- } else {
- aggregator.stenographer.AnnounceSuccesfulSpec(specSummary)
- }
-
- case types.SpecStatePending:
- aggregator.stenographer.AnnouncePendingSpec(specSummary, aggregator.config.NoisyPendings && !aggregator.config.Succinct)
- case types.SpecStateSkipped:
- aggregator.stenographer.AnnounceSkippedSpec(specSummary, aggregator.config.Succinct || !aggregator.config.NoisySkippings, aggregator.config.FullTrace)
- case types.SpecStateTimedOut:
- aggregator.stenographer.AnnounceSpecTimedOut(specSummary, aggregator.config.Succinct, aggregator.config.FullTrace)
- case types.SpecStatePanicked:
- aggregator.stenographer.AnnounceSpecPanicked(specSummary, aggregator.config.Succinct, aggregator.config.FullTrace)
- case types.SpecStateFailed:
- aggregator.stenographer.AnnounceSpecFailed(specSummary, aggregator.config.Succinct, aggregator.config.FullTrace)
- }
-}
-
-func (aggregator *Aggregator) registerSuiteEnding(suite *types.SuiteSummary) (finished bool, passed bool) {
- aggregator.aggregatedSuiteEndings = append(aggregator.aggregatedSuiteEndings, suite)
- if len(aggregator.aggregatedSuiteEndings) < aggregator.nodeCount {
- return false, false
- }
-
- aggregatedSuiteSummary := &types.SuiteSummary{}
- aggregatedSuiteSummary.SuiteSucceeded = true
-
- for _, suiteSummary := range aggregator.aggregatedSuiteEndings {
- if !suiteSummary.SuiteSucceeded {
- aggregatedSuiteSummary.SuiteSucceeded = false
- }
-
- aggregatedSuiteSummary.NumberOfSpecsThatWillBeRun += suiteSummary.NumberOfSpecsThatWillBeRun
- aggregatedSuiteSummary.NumberOfTotalSpecs += suiteSummary.NumberOfTotalSpecs
- aggregatedSuiteSummary.NumberOfPassedSpecs += suiteSummary.NumberOfPassedSpecs
- aggregatedSuiteSummary.NumberOfFailedSpecs += suiteSummary.NumberOfFailedSpecs
- aggregatedSuiteSummary.NumberOfPendingSpecs += suiteSummary.NumberOfPendingSpecs
- aggregatedSuiteSummary.NumberOfSkippedSpecs += suiteSummary.NumberOfSkippedSpecs
- aggregatedSuiteSummary.NumberOfFlakedSpecs += suiteSummary.NumberOfFlakedSpecs
- }
-
- aggregatedSuiteSummary.RunTime = time.Since(aggregator.startTime)
-
- aggregator.stenographer.SummarizeFailures(aggregator.specs)
- aggregator.stenographer.AnnounceSpecRunCompletion(aggregatedSuiteSummary, aggregator.config.Succinct)
-
- return true, aggregatedSuiteSummary.SuiteSucceeded
-}
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/internal/remote/forwarding_reporter.go b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/internal/remote/forwarding_reporter.go
deleted file mode 100644
index 284bc62e5e8..00000000000
--- a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/internal/remote/forwarding_reporter.go
+++ /dev/null
@@ -1,147 +0,0 @@
-package remote
-
-import (
- "bytes"
- "encoding/json"
- "fmt"
- "io"
- "net/http"
- "os"
-
- "github.com/onsi/ginkgo/internal/writer"
- "github.com/onsi/ginkgo/reporters"
- "github.com/onsi/ginkgo/reporters/stenographer"
-
- "github.com/onsi/ginkgo/config"
- "github.com/onsi/ginkgo/types"
-)
-
-//An interface to net/http's client to allow the injection of fakes under test
-type Poster interface {
- Post(url string, bodyType string, body io.Reader) (resp *http.Response, err error)
-}
-
-/*
-The ForwardingReporter is a Ginkgo reporter that forwards information to
-a Ginkgo remote server.
-
-When streaming parallel test output, this repoter is automatically installed by Ginkgo.
-
-This is accomplished by passing in the GINKGO_REMOTE_REPORTING_SERVER environment variable to `go test`, the Ginkgo test runner
-detects this environment variable (which should contain the host of the server) and automatically installs a ForwardingReporter
-in place of Ginkgo's DefaultReporter.
-*/
-
-type ForwardingReporter struct {
- serverHost string
- poster Poster
- outputInterceptor OutputInterceptor
- debugMode bool
- debugFile *os.File
- nestedReporter *reporters.DefaultReporter
-}
-
-func NewForwardingReporter(config config.DefaultReporterConfigType, serverHost string, poster Poster, outputInterceptor OutputInterceptor, ginkgoWriter *writer.Writer, debugFile string) *ForwardingReporter {
- reporter := &ForwardingReporter{
- serverHost: serverHost,
- poster: poster,
- outputInterceptor: outputInterceptor,
- }
-
- if debugFile != "" {
- var err error
- reporter.debugMode = true
- reporter.debugFile, err = os.Create(debugFile)
- if err != nil {
- fmt.Println(err.Error())
- os.Exit(1)
- }
-
- if !config.Verbose {
- //if verbose is true then the GinkgoWriter emits to stdout. Don't _also_ redirect GinkgoWriter output as that will result in duplication.
- ginkgoWriter.AndRedirectTo(reporter.debugFile)
- }
- outputInterceptor.StreamTo(reporter.debugFile) //This is not working
-
- stenographer := stenographer.New(false, true, reporter.debugFile)
- config.Succinct = false
- config.Verbose = true
- config.FullTrace = true
- reporter.nestedReporter = reporters.NewDefaultReporter(config, stenographer)
- }
-
- return reporter
-}
-
-func (reporter *ForwardingReporter) post(path string, data interface{}) {
- encoded, _ := json.Marshal(data)
- buffer := bytes.NewBuffer(encoded)
- reporter.poster.Post(reporter.serverHost+path, "application/json", buffer)
-}
-
-func (reporter *ForwardingReporter) SpecSuiteWillBegin(conf config.GinkgoConfigType, summary *types.SuiteSummary) {
- data := struct {
- Config config.GinkgoConfigType `json:"config"`
- Summary *types.SuiteSummary `json:"suite-summary"`
- }{
- conf,
- summary,
- }
-
- reporter.outputInterceptor.StartInterceptingOutput()
- if reporter.debugMode {
- reporter.nestedReporter.SpecSuiteWillBegin(conf, summary)
- reporter.debugFile.Sync()
- }
- reporter.post("/SpecSuiteWillBegin", data)
-}
-
-func (reporter *ForwardingReporter) BeforeSuiteDidRun(setupSummary *types.SetupSummary) {
- output, _ := reporter.outputInterceptor.StopInterceptingAndReturnOutput()
- reporter.outputInterceptor.StartInterceptingOutput()
- setupSummary.CapturedOutput = output
- if reporter.debugMode {
- reporter.nestedReporter.BeforeSuiteDidRun(setupSummary)
- reporter.debugFile.Sync()
- }
- reporter.post("/BeforeSuiteDidRun", setupSummary)
-}
-
-func (reporter *ForwardingReporter) SpecWillRun(specSummary *types.SpecSummary) {
- if reporter.debugMode {
- reporter.nestedReporter.SpecWillRun(specSummary)
- reporter.debugFile.Sync()
- }
- reporter.post("/SpecWillRun", specSummary)
-}
-
-func (reporter *ForwardingReporter) SpecDidComplete(specSummary *types.SpecSummary) {
- output, _ := reporter.outputInterceptor.StopInterceptingAndReturnOutput()
- reporter.outputInterceptor.StartInterceptingOutput()
- specSummary.CapturedOutput = output
- if reporter.debugMode {
- reporter.nestedReporter.SpecDidComplete(specSummary)
- reporter.debugFile.Sync()
- }
- reporter.post("/SpecDidComplete", specSummary)
-}
-
-func (reporter *ForwardingReporter) AfterSuiteDidRun(setupSummary *types.SetupSummary) {
- output, _ := reporter.outputInterceptor.StopInterceptingAndReturnOutput()
- reporter.outputInterceptor.StartInterceptingOutput()
- setupSummary.CapturedOutput = output
- if reporter.debugMode {
- reporter.nestedReporter.AfterSuiteDidRun(setupSummary)
- reporter.debugFile.Sync()
- }
- reporter.post("/AfterSuiteDidRun", setupSummary)
-}
-
-func (reporter *ForwardingReporter) SpecSuiteDidEnd(summary *types.SuiteSummary) {
- reporter.outputInterceptor.StopInterceptingAndReturnOutput()
- if reporter.debugMode {
- reporter.nestedReporter.SpecSuiteDidEnd(summary)
- reporter.debugFile.Sync()
- }
- reporter.post("/SpecSuiteDidEnd", summary)
-}
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor.go b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor.go
deleted file mode 100644
index 5154abe87d5..00000000000
--- a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor.go
+++ /dev/null
@@ -1,13 +0,0 @@
-package remote
-
-import "os"
-
-/*
-The OutputInterceptor is used by the ForwardingReporter to
-intercept and capture all stdin and stderr output during a test run.
-*/
-type OutputInterceptor interface {
- StartInterceptingOutput() error
- StopInterceptingAndReturnOutput() (string, error)
- StreamTo(*os.File)
-}
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_unix.go b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_unix.go
deleted file mode 100644
index b5cf3ded84e..00000000000
--- a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_unix.go
+++ /dev/null
@@ -1,84 +0,0 @@
-//go:build freebsd || openbsd || netbsd || dragonfly || darwin || linux || solaris
-// +build freebsd openbsd netbsd dragonfly darwin linux solaris
-
-package remote
-
-import (
- "errors"
- "io/ioutil"
- "os"
-
- "github.com/hpcloud/tail"
-)
-
-func NewOutputInterceptor() OutputInterceptor {
- return &outputInterceptor{}
-}
-
-type outputInterceptor struct {
- redirectFile *os.File
- streamTarget *os.File
- intercepting bool
- tailer *tail.Tail
- doneTailing chan bool
-}
-
-func (interceptor *outputInterceptor) StartInterceptingOutput() error {
- if interceptor.intercepting {
- return errors.New("Already intercepting output!")
- }
- interceptor.intercepting = true
-
- var err error
-
- interceptor.redirectFile, err = ioutil.TempFile("", "ginkgo-output")
- if err != nil {
- return err
- }
-
- // Call a function in ./syscall_dup_*.go
- // If building for everything other than linux_arm64,
- // use a "normal" syscall.Dup2(oldfd, newfd) call. If building for linux_arm64 (which doesn't have syscall.Dup2)
- // call syscall.Dup3(oldfd, newfd, 0). They are nearly identical, see: http://linux.die.net/man/2/dup3
- syscallDup(int(interceptor.redirectFile.Fd()), 1)
- syscallDup(int(interceptor.redirectFile.Fd()), 2)
-
- if interceptor.streamTarget != nil {
- interceptor.tailer, _ = tail.TailFile(interceptor.redirectFile.Name(), tail.Config{Follow: true})
- interceptor.doneTailing = make(chan bool)
-
- go func() {
- for line := range interceptor.tailer.Lines {
- interceptor.streamTarget.Write([]byte(line.Text + "\n"))
- }
- close(interceptor.doneTailing)
- }()
- }
-
- return nil
-}
-
-func (interceptor *outputInterceptor) StopInterceptingAndReturnOutput() (string, error) {
- if !interceptor.intercepting {
- return "", errors.New("Not intercepting output!")
- }
-
- interceptor.redirectFile.Close()
- output, err := ioutil.ReadFile(interceptor.redirectFile.Name())
- os.Remove(interceptor.redirectFile.Name())
-
- interceptor.intercepting = false
-
- if interceptor.streamTarget != nil {
- interceptor.tailer.Stop()
- interceptor.tailer.Cleanup()
- <-interceptor.doneTailing
- interceptor.streamTarget.Sync()
- }
-
- return string(output), err
-}
-
-func (interceptor *outputInterceptor) StreamTo(out *os.File) {
- interceptor.streamTarget = out
-}
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_win.go b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_win.go
deleted file mode 100644
index 5d088101e10..00000000000
--- a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/internal/remote/output_interceptor_win.go
+++ /dev/null
@@ -1,37 +0,0 @@
-//go:build windows
-// +build windows
-
-package remote
-
-import (
- "errors"
- "os"
-)
-
-func NewOutputInterceptor() OutputInterceptor {
- return &outputInterceptor{}
-}
-
-type outputInterceptor struct {
- intercepting bool
-}
-
-func (interceptor *outputInterceptor) StartInterceptingOutput() error {
- if interceptor.intercepting {
- return errors.New("Already intercepting output!")
- }
- interceptor.intercepting = true
-
- // not working on windows...
-
- return nil
-}
-
-func (interceptor *outputInterceptor) StopInterceptingAndReturnOutput() (string, error) {
- // not working on windows...
- interceptor.intercepting = false
-
- return "", nil
-}
-
-func (interceptor *outputInterceptor) StreamTo(*os.File) {}
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/internal/remote/server.go b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/internal/remote/server.go
deleted file mode 100644
index 93e9dac057d..00000000000
--- a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/internal/remote/server.go
+++ /dev/null
@@ -1,224 +0,0 @@
-/*
-
-The remote package provides the pieces to allow Ginkgo test suites to report to remote listeners.
-This is used, primarily, to enable streaming parallel test output but has, in principal, broader applications (e.g. streaming test output to a browser).
-
-*/
-
-package remote
-
-import (
- "encoding/json"
- "io/ioutil"
- "net"
- "net/http"
- "sync"
-
- "github.com/onsi/ginkgo/internal/spec_iterator"
-
- "github.com/onsi/ginkgo/config"
- "github.com/onsi/ginkgo/reporters"
- "github.com/onsi/ginkgo/types"
-)
-
-/*
-Server spins up on an automatically selected port and listens for communication from the forwarding reporter.
-It then forwards that communication to attached reporters.
-*/
-type Server struct {
- listener net.Listener
- reporters []reporters.Reporter
- alives []func() bool
- lock *sync.Mutex
- beforeSuiteData types.RemoteBeforeSuiteData
- parallelTotal int
- counter int
-}
-
-//Create a new server, automatically selecting a port
-func NewServer(parallelTotal int) (*Server, error) {
- listener, err := net.Listen("tcp", "127.0.0.1:0")
- if err != nil {
- return nil, err
- }
- return &Server{
- listener: listener,
- lock: &sync.Mutex{},
- alives: make([]func() bool, parallelTotal),
- beforeSuiteData: types.RemoteBeforeSuiteData{Data: nil, State: types.RemoteBeforeSuiteStatePending},
- parallelTotal: parallelTotal,
- }, nil
-}
-
-//Start the server. You don't need to `go s.Start()`, just `s.Start()`
-func (server *Server) Start() {
- httpServer := &http.Server{}
- mux := http.NewServeMux()
- httpServer.Handler = mux
-
- //streaming endpoints
- mux.HandleFunc("/SpecSuiteWillBegin", server.specSuiteWillBegin)
- mux.HandleFunc("/BeforeSuiteDidRun", server.beforeSuiteDidRun)
- mux.HandleFunc("/AfterSuiteDidRun", server.afterSuiteDidRun)
- mux.HandleFunc("/SpecWillRun", server.specWillRun)
- mux.HandleFunc("/SpecDidComplete", server.specDidComplete)
- mux.HandleFunc("/SpecSuiteDidEnd", server.specSuiteDidEnd)
-
- //synchronization endpoints
- mux.HandleFunc("/BeforeSuiteState", server.handleBeforeSuiteState)
- mux.HandleFunc("/RemoteAfterSuiteData", server.handleRemoteAfterSuiteData)
- mux.HandleFunc("/counter", server.handleCounter)
- mux.HandleFunc("/has-counter", server.handleHasCounter) //for backward compatibility
-
- go httpServer.Serve(server.listener)
-}
-
-//Stop the server
-func (server *Server) Close() {
- server.listener.Close()
-}
-
-//The address the server can be reached it. Pass this into the `ForwardingReporter`.
-func (server *Server) Address() string {
- return "http://" + server.listener.Addr().String()
-}
-
-//
-// Streaming Endpoints
-//
-
-//The server will forward all received messages to Ginkgo reporters registered with `RegisterReporters`
-func (server *Server) readAll(request *http.Request) []byte {
- defer request.Body.Close()
- body, _ := ioutil.ReadAll(request.Body)
- return body
-}
-
-func (server *Server) RegisterReporters(reporters ...reporters.Reporter) {
- server.reporters = reporters
-}
-
-func (server *Server) specSuiteWillBegin(writer http.ResponseWriter, request *http.Request) {
- body := server.readAll(request)
-
- var data struct {
- Config config.GinkgoConfigType `json:"config"`
- Summary *types.SuiteSummary `json:"suite-summary"`
- }
-
- json.Unmarshal(body, &data)
-
- for _, reporter := range server.reporters {
- reporter.SpecSuiteWillBegin(data.Config, data.Summary)
- }
-}
-
-func (server *Server) beforeSuiteDidRun(writer http.ResponseWriter, request *http.Request) {
- body := server.readAll(request)
- var setupSummary *types.SetupSummary
- json.Unmarshal(body, &setupSummary)
-
- for _, reporter := range server.reporters {
- reporter.BeforeSuiteDidRun(setupSummary)
- }
-}
-
-func (server *Server) afterSuiteDidRun(writer http.ResponseWriter, request *http.Request) {
- body := server.readAll(request)
- var setupSummary *types.SetupSummary
- json.Unmarshal(body, &setupSummary)
-
- for _, reporter := range server.reporters {
- reporter.AfterSuiteDidRun(setupSummary)
- }
-}
-
-func (server *Server) specWillRun(writer http.ResponseWriter, request *http.Request) {
- body := server.readAll(request)
- var specSummary *types.SpecSummary
- json.Unmarshal(body, &specSummary)
-
- for _, reporter := range server.reporters {
- reporter.SpecWillRun(specSummary)
- }
-}
-
-func (server *Server) specDidComplete(writer http.ResponseWriter, request *http.Request) {
- body := server.readAll(request)
- var specSummary *types.SpecSummary
- json.Unmarshal(body, &specSummary)
-
- for _, reporter := range server.reporters {
- reporter.SpecDidComplete(specSummary)
- }
-}
-
-func (server *Server) specSuiteDidEnd(writer http.ResponseWriter, request *http.Request) {
- body := server.readAll(request)
- var suiteSummary *types.SuiteSummary
- json.Unmarshal(body, &suiteSummary)
-
- for _, reporter := range server.reporters {
- reporter.SpecSuiteDidEnd(suiteSummary)
- }
-}
-
-//
-// Synchronization Endpoints
-//
-
-func (server *Server) RegisterAlive(node int, alive func() bool) {
- server.lock.Lock()
- defer server.lock.Unlock()
- server.alives[node-1] = alive
-}
-
-func (server *Server) nodeIsAlive(node int) bool {
- server.lock.Lock()
- defer server.lock.Unlock()
- alive := server.alives[node-1]
- if alive == nil {
- return true
- }
- return alive()
-}
-
-func (server *Server) handleBeforeSuiteState(writer http.ResponseWriter, request *http.Request) {
- if request.Method == "POST" {
- dec := json.NewDecoder(request.Body)
- dec.Decode(&(server.beforeSuiteData))
- } else {
- beforeSuiteData := server.beforeSuiteData
- if beforeSuiteData.State == types.RemoteBeforeSuiteStatePending && !server.nodeIsAlive(1) {
- beforeSuiteData.State = types.RemoteBeforeSuiteStateDisappeared
- }
- enc := json.NewEncoder(writer)
- enc.Encode(beforeSuiteData)
- }
-}
-
-func (server *Server) handleRemoteAfterSuiteData(writer http.ResponseWriter, request *http.Request) {
- afterSuiteData := types.RemoteAfterSuiteData{
- CanRun: true,
- }
- for i := 2; i <= server.parallelTotal; i++ {
- afterSuiteData.CanRun = afterSuiteData.CanRun && !server.nodeIsAlive(i)
- }
-
- enc := json.NewEncoder(writer)
- enc.Encode(afterSuiteData)
-}
-
-func (server *Server) handleCounter(writer http.ResponseWriter, request *http.Request) {
- c := spec_iterator.Counter{}
- server.lock.Lock()
- c.Index = server.counter
- server.counter++
- server.lock.Unlock()
-
- json.NewEncoder(writer).Encode(c)
-}
-
-func (server *Server) handleHasCounter(writer http.ResponseWriter, request *http.Request) {
- writer.Write([]byte(""))
-}
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/internal/remote/syscall_dup_linux_arm64.go b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/internal/remote/syscall_dup_linux_arm64.go
deleted file mode 100644
index 45ff7c1ca47..00000000000
--- a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/internal/remote/syscall_dup_linux_arm64.go
+++ /dev/null
@@ -1,12 +0,0 @@
-//go:build linux && arm64
-// +build linux,arm64
-
-package remote
-
-import "syscall"
-
-// linux_arm64 doesn't have syscall.Dup2 which ginkgo uses, so
-// use the nearly identical syscall.Dup3 instead
-func syscallDup(oldfd int, newfd int) (err error) {
- return syscall.Dup3(oldfd, newfd, 0)
-}
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/internal/remote/syscall_dup_linux_riscv64.go b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/internal/remote/syscall_dup_linux_riscv64.go
deleted file mode 100644
index dc0bc2e3d68..00000000000
--- a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/internal/remote/syscall_dup_linux_riscv64.go
+++ /dev/null
@@ -1,12 +0,0 @@
-//go:build linux && riscv64
-// +build linux,riscv64
-
-package remote
-
-import "syscall"
-
-// linux_riscv64 doesn't have syscall.Dup2 which ginkgo uses, so
-// use the nearly identical syscall.Dup3 instead
-func syscallDup(oldfd int, newfd int) (err error) {
- return syscall.Dup3(oldfd, newfd, 0)
-}
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/internal/remote/syscall_dup_solaris.go b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/internal/remote/syscall_dup_solaris.go
deleted file mode 100644
index dd32861567f..00000000000
--- a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/internal/remote/syscall_dup_solaris.go
+++ /dev/null
@@ -1,10 +0,0 @@
-//go:build solaris
-// +build solaris
-
-package remote
-
-import "golang.org/x/sys/unix"
-
-func syscallDup(oldfd int, newfd int) (err error) {
- return unix.Dup2(oldfd, newfd)
-}
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/internal/remote/syscall_dup_unix.go b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/internal/remote/syscall_dup_unix.go
deleted file mode 100644
index 8dd7f33213a..00000000000
--- a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/internal/remote/syscall_dup_unix.go
+++ /dev/null
@@ -1,13 +0,0 @@
-//go:build (!linux || !arm64) && (!linux || !riscv64) && !windows && !solaris
-// +build !linux !arm64
-// +build !linux !riscv64
-// +build !windows
-// +build !solaris
-
-package remote
-
-import "syscall"
-
-func syscallDup(oldfd int, newfd int) (err error) {
- return syscall.Dup2(oldfd, newfd)
-}
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/internal/spec/spec.go b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/internal/spec/spec.go
deleted file mode 100644
index 6eef40a0e0c..00000000000
--- a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/internal/spec/spec.go
+++ /dev/null
@@ -1,247 +0,0 @@
-package spec
-
-import (
- "fmt"
- "io"
- "time"
-
- "sync"
-
- "github.com/onsi/ginkgo/internal/containernode"
- "github.com/onsi/ginkgo/internal/leafnodes"
- "github.com/onsi/ginkgo/types"
-)
-
-type Spec struct {
- subject leafnodes.SubjectNode
- focused bool
- announceProgress bool
-
- containers []*containernode.ContainerNode
-
- state types.SpecState
- runTime time.Duration
- startTime time.Time
- failure types.SpecFailure
- previousFailures bool
-
- stateMutex *sync.Mutex
-}
-
-func New(subject leafnodes.SubjectNode, containers []*containernode.ContainerNode, announceProgress bool) *Spec {
- spec := &Spec{
- subject: subject,
- containers: containers,
- focused: subject.Flag() == types.FlagTypeFocused,
- announceProgress: announceProgress,
- stateMutex: &sync.Mutex{},
- }
-
- spec.processFlag(subject.Flag())
- for i := len(containers) - 1; i >= 0; i-- {
- spec.processFlag(containers[i].Flag())
- }
-
- return spec
-}
-
-func (spec *Spec) processFlag(flag types.FlagType) {
- if flag == types.FlagTypeFocused {
- spec.focused = true
- } else if flag == types.FlagTypePending {
- spec.setState(types.SpecStatePending)
- }
-}
-
-func (spec *Spec) Skip() {
- spec.setState(types.SpecStateSkipped)
-}
-
-func (spec *Spec) Failed() bool {
- return spec.getState() == types.SpecStateFailed || spec.getState() == types.SpecStatePanicked || spec.getState() == types.SpecStateTimedOut
-}
-
-func (spec *Spec) Passed() bool {
- return spec.getState() == types.SpecStatePassed
-}
-
-func (spec *Spec) Flaked() bool {
- return spec.getState() == types.SpecStatePassed && spec.previousFailures
-}
-
-func (spec *Spec) Pending() bool {
- return spec.getState() == types.SpecStatePending
-}
-
-func (spec *Spec) Skipped() bool {
- return spec.getState() == types.SpecStateSkipped
-}
-
-func (spec *Spec) Focused() bool {
- return spec.focused
-}
-
-func (spec *Spec) IsMeasurement() bool {
- return spec.subject.Type() == types.SpecComponentTypeMeasure
-}
-
-func (spec *Spec) Summary(suiteID string) *types.SpecSummary {
- componentTexts := make([]string, len(spec.containers)+1)
- componentCodeLocations := make([]types.CodeLocation, len(spec.containers)+1)
-
- for i, container := range spec.containers {
- componentTexts[i] = container.Text()
- componentCodeLocations[i] = container.CodeLocation()
- }
-
- componentTexts[len(spec.containers)] = spec.subject.Text()
- componentCodeLocations[len(spec.containers)] = spec.subject.CodeLocation()
-
- runTime := spec.runTime
- if runTime == 0 && !spec.startTime.IsZero() {
- runTime = time.Since(spec.startTime)
- }
-
- return &types.SpecSummary{
- IsMeasurement: spec.IsMeasurement(),
- NumberOfSamples: spec.subject.Samples(),
- ComponentTexts: componentTexts,
- ComponentCodeLocations: componentCodeLocations,
- State: spec.getState(),
- RunTime: runTime,
- Failure: spec.failure,
- Measurements: spec.measurementsReport(),
- SuiteID: suiteID,
- }
-}
-
-func (spec *Spec) ConcatenatedString() string {
- s := ""
- for _, container := range spec.containers {
- s += container.Text() + " "
- }
-
- return s + spec.subject.Text()
-}
-
-func (spec *Spec) Run(writer io.Writer) {
- if spec.getState() == types.SpecStateFailed {
- spec.previousFailures = true
- }
-
- spec.startTime = time.Now()
- defer func() {
- spec.runTime = time.Since(spec.startTime)
- }()
-
- for sample := 0; sample < spec.subject.Samples(); sample++ {
- spec.runSample(sample, writer)
-
- if spec.getState() != types.SpecStatePassed {
- return
- }
- }
-}
-
-func (spec *Spec) getState() types.SpecState {
- spec.stateMutex.Lock()
- defer spec.stateMutex.Unlock()
- return spec.state
-}
-
-func (spec *Spec) setState(state types.SpecState) {
- spec.stateMutex.Lock()
- defer spec.stateMutex.Unlock()
- spec.state = state
-}
-
-func (spec *Spec) runSample(sample int, writer io.Writer) {
- spec.setState(types.SpecStatePassed)
- spec.failure = types.SpecFailure{}
- innerMostContainerIndexToUnwind := -1
-
- defer func() {
- for i := innerMostContainerIndexToUnwind; i >= 0; i-- {
- container := spec.containers[i]
- for _, justAfterEach := range container.SetupNodesOfType(types.SpecComponentTypeJustAfterEach) {
- spec.announceSetupNode(writer, "JustAfterEach", container, justAfterEach)
- justAfterEachState, justAfterEachFailure := justAfterEach.Run()
- if justAfterEachState != types.SpecStatePassed && spec.state == types.SpecStatePassed {
- spec.state = justAfterEachState
- spec.failure = justAfterEachFailure
- }
- }
- }
-
- for i := innerMostContainerIndexToUnwind; i >= 0; i-- {
- container := spec.containers[i]
- for _, afterEach := range container.SetupNodesOfType(types.SpecComponentTypeAfterEach) {
- spec.announceSetupNode(writer, "AfterEach", container, afterEach)
- afterEachState, afterEachFailure := afterEach.Run()
- if afterEachState != types.SpecStatePassed && spec.getState() == types.SpecStatePassed {
- spec.setState(afterEachState)
- spec.failure = afterEachFailure
- }
- }
- }
- }()
-
- for i, container := range spec.containers {
- innerMostContainerIndexToUnwind = i
- for _, beforeEach := range container.SetupNodesOfType(types.SpecComponentTypeBeforeEach) {
- spec.announceSetupNode(writer, "BeforeEach", container, beforeEach)
- s, f := beforeEach.Run()
- spec.failure = f
- spec.setState(s)
- if spec.getState() != types.SpecStatePassed {
- return
- }
- }
- }
-
- for _, container := range spec.containers {
- for _, justBeforeEach := range container.SetupNodesOfType(types.SpecComponentTypeJustBeforeEach) {
- spec.announceSetupNode(writer, "JustBeforeEach", container, justBeforeEach)
- s, f := justBeforeEach.Run()
- spec.failure = f
- spec.setState(s)
- if spec.getState() != types.SpecStatePassed {
- return
- }
- }
- }
-
- spec.announceSubject(writer, spec.subject)
- s, f := spec.subject.Run()
- spec.failure = f
- spec.setState(s)
-}
-
-func (spec *Spec) announceSetupNode(writer io.Writer, nodeType string, container *containernode.ContainerNode, setupNode leafnodes.BasicNode) {
- if spec.announceProgress {
- s := fmt.Sprintf("[%s] %s\n %s\n", nodeType, container.Text(), setupNode.CodeLocation().String())
- writer.Write([]byte(s))
- }
-}
-
-func (spec *Spec) announceSubject(writer io.Writer, subject leafnodes.SubjectNode) {
- if spec.announceProgress {
- nodeType := ""
- switch subject.Type() {
- case types.SpecComponentTypeIt:
- nodeType = "It"
- case types.SpecComponentTypeMeasure:
- nodeType = "Measure"
- }
- s := fmt.Sprintf("[%s] %s\n %s\n", nodeType, subject.Text(), subject.CodeLocation().String())
- writer.Write([]byte(s))
- }
-}
-
-func (spec *Spec) measurementsReport() map[string]*types.SpecMeasurement {
- if !spec.IsMeasurement() || spec.Failed() {
- return map[string]*types.SpecMeasurement{}
- }
-
- return spec.subject.(*leafnodes.MeasureNode).MeasurementsReport()
-}
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/internal/spec/specs.go b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/internal/spec/specs.go
deleted file mode 100644
index 8a20071375f..00000000000
--- a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/internal/spec/specs.go
+++ /dev/null
@@ -1,144 +0,0 @@
-package spec
-
-import (
- "math/rand"
- "regexp"
- "sort"
-)
-
-type Specs struct {
- specs []*Spec
- names []string
-
- hasProgrammaticFocus bool
- RegexScansFilePath bool
-}
-
-func NewSpecs(specs []*Spec) *Specs {
- names := make([]string, len(specs))
- for i, spec := range specs {
- names[i] = spec.ConcatenatedString()
- }
- return &Specs{
- specs: specs,
- names: names,
- }
-}
-
-func (e *Specs) Specs() []*Spec {
- return e.specs
-}
-
-func (e *Specs) HasProgrammaticFocus() bool {
- return e.hasProgrammaticFocus
-}
-
-func (e *Specs) Shuffle(r *rand.Rand) {
- sort.Sort(e)
- permutation := r.Perm(len(e.specs))
- shuffledSpecs := make([]*Spec, len(e.specs))
- names := make([]string, len(e.specs))
- for i, j := range permutation {
- shuffledSpecs[i] = e.specs[j]
- names[i] = e.names[j]
- }
- e.specs = shuffledSpecs
- e.names = names
-}
-
-func (e *Specs) ApplyFocus(description string, focusString string, skipString string) {
- if focusString == "" && skipString == "" {
- e.applyProgrammaticFocus()
- } else {
- e.applyRegExpFocusAndSkip(description, focusString, skipString)
- }
-}
-
-func (e *Specs) applyProgrammaticFocus() {
- e.hasProgrammaticFocus = false
- for _, spec := range e.specs {
- if spec.Focused() && !spec.Pending() {
- e.hasProgrammaticFocus = true
- break
- }
- }
-
- if e.hasProgrammaticFocus {
- for _, spec := range e.specs {
- if !spec.Focused() {
- spec.Skip()
- }
- }
- }
-}
-
-// toMatch returns a byte[] to be used by regex matchers. When adding new behaviours to the matching function,
-// this is the place which we append to.
-func (e *Specs) toMatch(description string, i int) []byte {
- if i > len(e.names) {
- return nil
- }
- if e.RegexScansFilePath {
- return []byte(
- description + " " +
- e.names[i] + " " +
- e.specs[i].subject.CodeLocation().FileName)
- } else {
- return []byte(
- description + " " +
- e.names[i])
- }
-}
-
-func (e *Specs) applyRegExpFocusAndSkip(description string, focusString string, skipString string) {
- var focusFilter *regexp.Regexp
- if focusString != "" {
- focusFilter = regexp.MustCompile(focusString)
- }
- var skipFilter *regexp.Regexp
- if skipString != "" {
- skipFilter = regexp.MustCompile(skipString)
- }
-
- for i, spec := range e.specs {
- matchesFocus := true
- matchesSkip := false
-
- toMatch := e.toMatch(description, i)
-
- if focusFilter != nil {
- matchesFocus = focusFilter.Match(toMatch)
- }
-
- if skipFilter != nil {
- matchesSkip = skipFilter.Match(toMatch)
- }
-
- if !matchesFocus || matchesSkip {
- spec.Skip()
- }
- }
-}
-
-func (e *Specs) SkipMeasurements() {
- for _, spec := range e.specs {
- if spec.IsMeasurement() {
- spec.Skip()
- }
- }
-}
-
-//sort.Interface
-
-func (e *Specs) Len() int {
- return len(e.specs)
-}
-
-func (e *Specs) Less(i, j int) bool {
- return e.names[i] < e.names[j]
-}
-
-func (e *Specs) Swap(i, j int) {
- e.names[i], e.names[j] = e.names[j], e.names[i]
- e.specs[i], e.specs[j] = e.specs[j], e.specs[i]
-}
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/internal/spec_iterator/index_computer.go b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/internal/spec_iterator/index_computer.go
deleted file mode 100644
index 82272554aa8..00000000000
--- a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/internal/spec_iterator/index_computer.go
+++ /dev/null
@@ -1,55 +0,0 @@
-package spec_iterator
-
-func ParallelizedIndexRange(length int, parallelTotal int, parallelNode int) (startIndex int, count int) {
- if length == 0 {
- return 0, 0
- }
-
- // We have more nodes than tests. Trivial case.
- if parallelTotal >= length {
- if parallelNode > length {
- return 0, 0
- } else {
- return parallelNode - 1, 1
- }
- }
-
- // This is the minimum amount of tests that a node will be required to run
- minTestsPerNode := length / parallelTotal
-
- // This is the maximum amount of tests that a node will be required to run
- // The algorithm guarantees that this would be equal to at least the minimum amount
- // and at most one more
- maxTestsPerNode := minTestsPerNode
- if length%parallelTotal != 0 {
- maxTestsPerNode++
- }
-
- // Number of nodes that will have to run the maximum amount of tests per node
- numMaxLoadNodes := length % parallelTotal
-
- // Number of nodes that precede the current node and will have to run the maximum amount of tests per node
- var numPrecedingMaxLoadNodes int
- if parallelNode > numMaxLoadNodes {
- numPrecedingMaxLoadNodes = numMaxLoadNodes
- } else {
- numPrecedingMaxLoadNodes = parallelNode - 1
- }
-
- // Number of nodes that precede the current node and will have to run the minimum amount of tests per node
- var numPrecedingMinLoadNodes int
- if parallelNode <= numMaxLoadNodes {
- numPrecedingMinLoadNodes = 0
- } else {
- numPrecedingMinLoadNodes = parallelNode - numMaxLoadNodes - 1
- }
-
- // Evaluate the test start index and number of tests to run
- startIndex = numPrecedingMaxLoadNodes*maxTestsPerNode + numPrecedingMinLoadNodes*minTestsPerNode
- if parallelNode > numMaxLoadNodes {
- count = minTestsPerNode
- } else {
- count = maxTestsPerNode
- }
- return
-}
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/internal/spec_iterator/parallel_spec_iterator.go b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/internal/spec_iterator/parallel_spec_iterator.go
deleted file mode 100644
index 99f548bca43..00000000000
--- a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/internal/spec_iterator/parallel_spec_iterator.go
+++ /dev/null
@@ -1,59 +0,0 @@
-package spec_iterator
-
-import (
- "encoding/json"
- "fmt"
- "net/http"
-
- "github.com/onsi/ginkgo/internal/spec"
-)
-
-type ParallelIterator struct {
- specs []*spec.Spec
- host string
- client *http.Client
-}
-
-func NewParallelIterator(specs []*spec.Spec, host string) *ParallelIterator {
- return &ParallelIterator{
- specs: specs,
- host: host,
- client: &http.Client{},
- }
-}
-
-func (s *ParallelIterator) Next() (*spec.Spec, error) {
- resp, err := s.client.Get(s.host + "/counter")
- if err != nil {
- return nil, err
- }
- defer resp.Body.Close()
-
- if resp.StatusCode != http.StatusOK {
- return nil, fmt.Errorf("unexpected status code %d", resp.StatusCode)
- }
-
- var counter Counter
- err = json.NewDecoder(resp.Body).Decode(&counter)
- if err != nil {
- return nil, err
- }
-
- if counter.Index >= len(s.specs) {
- return nil, ErrClosed
- }
-
- return s.specs[counter.Index], nil
-}
-
-func (s *ParallelIterator) NumberOfSpecsPriorToIteration() int {
- return len(s.specs)
-}
-
-func (s *ParallelIterator) NumberOfSpecsToProcessIfKnown() (int, bool) {
- return -1, false
-}
-
-func (s *ParallelIterator) NumberOfSpecsThatWillBeRunIfKnown() (int, bool) {
- return -1, false
-}
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/internal/spec_iterator/serial_spec_iterator.go b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/internal/spec_iterator/serial_spec_iterator.go
deleted file mode 100644
index a51c93b8b61..00000000000
--- a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/internal/spec_iterator/serial_spec_iterator.go
+++ /dev/null
@@ -1,45 +0,0 @@
-package spec_iterator
-
-import (
- "github.com/onsi/ginkgo/internal/spec"
-)
-
-type SerialIterator struct {
- specs []*spec.Spec
- index int
-}
-
-func NewSerialIterator(specs []*spec.Spec) *SerialIterator {
- return &SerialIterator{
- specs: specs,
- index: 0,
- }
-}
-
-func (s *SerialIterator) Next() (*spec.Spec, error) {
- if s.index >= len(s.specs) {
- return nil, ErrClosed
- }
-
- spec := s.specs[s.index]
- s.index += 1
- return spec, nil
-}
-
-func (s *SerialIterator) NumberOfSpecsPriorToIteration() int {
- return len(s.specs)
-}
-
-func (s *SerialIterator) NumberOfSpecsToProcessIfKnown() (int, bool) {
- return len(s.specs), true
-}
-
-func (s *SerialIterator) NumberOfSpecsThatWillBeRunIfKnown() (int, bool) {
- count := 0
- for _, s := range s.specs {
- if !s.Skipped() && !s.Pending() {
- count += 1
- }
- }
- return count, true
-}
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/internal/spec_iterator/sharded_parallel_spec_iterator.go b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/internal/spec_iterator/sharded_parallel_spec_iterator.go
deleted file mode 100644
index ad4a3ea3c64..00000000000
--- a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/internal/spec_iterator/sharded_parallel_spec_iterator.go
+++ /dev/null
@@ -1,47 +0,0 @@
-package spec_iterator
-
-import "github.com/onsi/ginkgo/internal/spec"
-
-type ShardedParallelIterator struct {
- specs []*spec.Spec
- index int
- maxIndex int
-}
-
-func NewShardedParallelIterator(specs []*spec.Spec, total int, node int) *ShardedParallelIterator {
- startIndex, count := ParallelizedIndexRange(len(specs), total, node)
-
- return &ShardedParallelIterator{
- specs: specs,
- index: startIndex,
- maxIndex: startIndex + count,
- }
-}
-
-func (s *ShardedParallelIterator) Next() (*spec.Spec, error) {
- if s.index >= s.maxIndex {
- return nil, ErrClosed
- }
-
- spec := s.specs[s.index]
- s.index += 1
- return spec, nil
-}
-
-func (s *ShardedParallelIterator) NumberOfSpecsPriorToIteration() int {
- return len(s.specs)
-}
-
-func (s *ShardedParallelIterator) NumberOfSpecsToProcessIfKnown() (int, bool) {
- return s.maxIndex - s.index, true
-}
-
-func (s *ShardedParallelIterator) NumberOfSpecsThatWillBeRunIfKnown() (int, bool) {
- count := 0
- for i := s.index; i < s.maxIndex; i += 1 {
- if !s.specs[i].Skipped() && !s.specs[i].Pending() {
- count += 1
- }
- }
- return count, true
-}
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/internal/spec_iterator/spec_iterator.go b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/internal/spec_iterator/spec_iterator.go
deleted file mode 100644
index 74bffad6464..00000000000
--- a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/internal/spec_iterator/spec_iterator.go
+++ /dev/null
@@ -1,20 +0,0 @@
-package spec_iterator
-
-import (
- "errors"
-
- "github.com/onsi/ginkgo/internal/spec"
-)
-
-var ErrClosed = errors.New("no more specs to run")
-
-type SpecIterator interface {
- Next() (*spec.Spec, error)
- NumberOfSpecsPriorToIteration() int
- NumberOfSpecsToProcessIfKnown() (int, bool)
- NumberOfSpecsThatWillBeRunIfKnown() (int, bool)
-}
-
-type Counter struct {
- Index int `json:"index"`
-}
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/internal/specrunner/random_id.go b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/internal/specrunner/random_id.go
deleted file mode 100644
index a0b8b62d525..00000000000
--- a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/internal/specrunner/random_id.go
+++ /dev/null
@@ -1,15 +0,0 @@
-package specrunner
-
-import (
- "crypto/rand"
- "fmt"
-)
-
-func randomID() string {
- b := make([]byte, 8)
- _, err := rand.Read(b)
- if err != nil {
- return ""
- }
- return fmt.Sprintf("%x-%x-%x-%x", b[0:2], b[2:4], b[4:6], b[6:8])
-}
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/internal/specrunner/spec_runner.go b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/internal/specrunner/spec_runner.go
deleted file mode 100644
index c9a0a60d84c..00000000000
--- a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/internal/specrunner/spec_runner.go
+++ /dev/null
@@ -1,411 +0,0 @@
-package specrunner
-
-import (
- "fmt"
- "os"
- "os/signal"
- "sync"
- "syscall"
-
- "github.com/onsi/ginkgo/internal/spec_iterator"
-
- "github.com/onsi/ginkgo/config"
- "github.com/onsi/ginkgo/internal/leafnodes"
- "github.com/onsi/ginkgo/internal/spec"
- Writer "github.com/onsi/ginkgo/internal/writer"
- "github.com/onsi/ginkgo/reporters"
- "github.com/onsi/ginkgo/types"
-
- "time"
-)
-
-type SpecRunner struct {
- description string
- beforeSuiteNode leafnodes.SuiteNode
- iterator spec_iterator.SpecIterator
- afterSuiteNode leafnodes.SuiteNode
- reporters []reporters.Reporter
- startTime time.Time
- suiteID string
- runningSpec *spec.Spec
- writer Writer.WriterInterface
- config config.GinkgoConfigType
- interrupted bool
- processedSpecs []*spec.Spec
- lock *sync.Mutex
-}
-
-func New(description string, beforeSuiteNode leafnodes.SuiteNode, iterator spec_iterator.SpecIterator, afterSuiteNode leafnodes.SuiteNode, reporters []reporters.Reporter, writer Writer.WriterInterface, config config.GinkgoConfigType) *SpecRunner {
- return &SpecRunner{
- description: description,
- beforeSuiteNode: beforeSuiteNode,
- iterator: iterator,
- afterSuiteNode: afterSuiteNode,
- reporters: reporters,
- writer: writer,
- config: config,
- suiteID: randomID(),
- lock: &sync.Mutex{},
- }
-}
-
-func (runner *SpecRunner) Run() bool {
- if runner.config.DryRun {
- runner.performDryRun()
- return true
- }
-
- runner.reportSuiteWillBegin()
- signalRegistered := make(chan struct{})
- go runner.registerForInterrupts(signalRegistered)
- <-signalRegistered
-
- suitePassed := runner.runBeforeSuite()
-
- if suitePassed {
- suitePassed = runner.runSpecs()
- }
-
- runner.blockForeverIfInterrupted()
-
- suitePassed = runner.runAfterSuite() && suitePassed
-
- runner.reportSuiteDidEnd(suitePassed)
-
- return suitePassed
-}
-
-func (runner *SpecRunner) performDryRun() {
- runner.reportSuiteWillBegin()
-
- if runner.beforeSuiteNode != nil {
- summary := runner.beforeSuiteNode.Summary()
- summary.State = types.SpecStatePassed
- runner.reportBeforeSuite(summary)
- }
-
- for {
- spec, err := runner.iterator.Next()
- if err == spec_iterator.ErrClosed {
- break
- }
- if err != nil {
- fmt.Println("failed to iterate over tests:\n" + err.Error())
- break
- }
-
- runner.processedSpecs = append(runner.processedSpecs, spec)
-
- summary := spec.Summary(runner.suiteID)
- runner.reportSpecWillRun(summary)
- if summary.State == types.SpecStateInvalid {
- summary.State = types.SpecStatePassed
- }
- runner.reportSpecDidComplete(summary, false)
- }
-
- if runner.afterSuiteNode != nil {
- summary := runner.afterSuiteNode.Summary()
- summary.State = types.SpecStatePassed
- runner.reportAfterSuite(summary)
- }
-
- runner.reportSuiteDidEnd(true)
-}
-
-func (runner *SpecRunner) runBeforeSuite() bool {
- if runner.beforeSuiteNode == nil || runner.wasInterrupted() {
- return true
- }
-
- runner.writer.Truncate()
- conf := runner.config
- passed := runner.beforeSuiteNode.Run(conf.ParallelNode, conf.ParallelTotal, conf.SyncHost)
- if !passed {
- runner.writer.DumpOut()
- }
- runner.reportBeforeSuite(runner.beforeSuiteNode.Summary())
- return passed
-}
-
-func (runner *SpecRunner) runAfterSuite() bool {
- if runner.afterSuiteNode == nil {
- return true
- }
-
- runner.writer.Truncate()
- conf := runner.config
- passed := runner.afterSuiteNode.Run(conf.ParallelNode, conf.ParallelTotal, conf.SyncHost)
- if !passed {
- runner.writer.DumpOut()
- }
- runner.reportAfterSuite(runner.afterSuiteNode.Summary())
- return passed
-}
-
-func (runner *SpecRunner) runSpecs() bool {
- suiteFailed := false
- skipRemainingSpecs := false
- for {
- spec, err := runner.iterator.Next()
- if err == spec_iterator.ErrClosed {
- break
- }
- if err != nil {
- fmt.Println("failed to iterate over tests:\n" + err.Error())
- suiteFailed = true
- break
- }
-
- runner.processedSpecs = append(runner.processedSpecs, spec)
-
- if runner.wasInterrupted() {
- break
- }
- if skipRemainingSpecs {
- spec.Skip()
- }
-
- if !spec.Skipped() && !spec.Pending() {
- if passed := runner.runSpec(spec); !passed {
- suiteFailed = true
- }
- } else if spec.Pending() && runner.config.FailOnPending {
- runner.reportSpecWillRun(spec.Summary(runner.suiteID))
- suiteFailed = true
- runner.reportSpecDidComplete(spec.Summary(runner.suiteID), spec.Failed())
- } else {
- runner.reportSpecWillRun(spec.Summary(runner.suiteID))
- runner.reportSpecDidComplete(spec.Summary(runner.suiteID), spec.Failed())
- }
-
- if spec.Failed() && runner.config.FailFast {
- skipRemainingSpecs = true
- }
- }
-
- return !suiteFailed
-}
-
-func (runner *SpecRunner) runSpec(spec *spec.Spec) (passed bool) {
- maxAttempts := 1
- if runner.config.FlakeAttempts > 0 {
- // uninitialized configs count as 1
- maxAttempts = runner.config.FlakeAttempts
- }
-
- for i := 0; i < maxAttempts; i++ {
- runner.reportSpecWillRun(spec.Summary(runner.suiteID))
- runner.runningSpec = spec
- spec.Run(runner.writer)
- runner.runningSpec = nil
- runner.reportSpecDidComplete(spec.Summary(runner.suiteID), spec.Failed())
- if !spec.Failed() {
- return true
- }
- }
- return false
-}
-
-func (runner *SpecRunner) CurrentSpecSummary() (*types.SpecSummary, bool) {
- if runner.runningSpec == nil {
- return nil, false
- }
-
- return runner.runningSpec.Summary(runner.suiteID), true
-}
-
-func (runner *SpecRunner) registerForInterrupts(signalRegistered chan struct{}) {
- c := make(chan os.Signal, 1)
- signal.Notify(c, os.Interrupt, syscall.SIGTERM)
- close(signalRegistered)
-
- <-c
- signal.Stop(c)
- runner.markInterrupted()
- go runner.registerForHardInterrupts()
- runner.writer.DumpOutWithHeader(`
-Received interrupt. Emitting contents of GinkgoWriter...
----------------------------------------------------------
-`)
- if runner.afterSuiteNode != nil {
- fmt.Fprint(os.Stderr, `
----------------------------------------------------------
-Received interrupt. Running AfterSuite...
-^C again to terminate immediately
-`)
- runner.runAfterSuite()
- }
- runner.reportSuiteDidEnd(false)
- os.Exit(1)
-}
-
-func (runner *SpecRunner) registerForHardInterrupts() {
- c := make(chan os.Signal, 1)
- signal.Notify(c, os.Interrupt, syscall.SIGTERM)
-
- <-c
- fmt.Fprintln(os.Stderr, "\nReceived second interrupt. Shutting down.")
- os.Exit(1)
-}
-
-func (runner *SpecRunner) blockForeverIfInterrupted() {
- runner.lock.Lock()
- interrupted := runner.interrupted
- runner.lock.Unlock()
-
- if interrupted {
- select {}
- }
-}
-
-func (runner *SpecRunner) markInterrupted() {
- runner.lock.Lock()
- defer runner.lock.Unlock()
- runner.interrupted = true
-}
-
-func (runner *SpecRunner) wasInterrupted() bool {
- runner.lock.Lock()
- defer runner.lock.Unlock()
- return runner.interrupted
-}
-
-func (runner *SpecRunner) reportSuiteWillBegin() {
- runner.startTime = time.Now()
- summary := runner.suiteWillBeginSummary()
- for _, reporter := range runner.reporters {
- reporter.SpecSuiteWillBegin(runner.config, summary)
- }
-}
-
-func (runner *SpecRunner) reportBeforeSuite(summary *types.SetupSummary) {
- for _, reporter := range runner.reporters {
- reporter.BeforeSuiteDidRun(summary)
- }
-}
-
-func (runner *SpecRunner) reportAfterSuite(summary *types.SetupSummary) {
- for _, reporter := range runner.reporters {
- reporter.AfterSuiteDidRun(summary)
- }
-}
-
-func (runner *SpecRunner) reportSpecWillRun(summary *types.SpecSummary) {
- runner.writer.Truncate()
-
- for _, reporter := range runner.reporters {
- reporter.SpecWillRun(summary)
- }
-}
-
-func (runner *SpecRunner) reportSpecDidComplete(summary *types.SpecSummary, failed bool) {
- if len(summary.CapturedOutput) == 0 {
- summary.CapturedOutput = string(runner.writer.Bytes())
- }
- for i := len(runner.reporters) - 1; i >= 1; i-- {
- runner.reporters[i].SpecDidComplete(summary)
- }
-
- if failed {
- runner.writer.DumpOut()
- }
-
- runner.reporters[0].SpecDidComplete(summary)
-}
-
-func (runner *SpecRunner) reportSuiteDidEnd(success bool) {
- summary := runner.suiteDidEndSummary(success)
- summary.RunTime = time.Since(runner.startTime)
- for _, reporter := range runner.reporters {
- reporter.SpecSuiteDidEnd(summary)
- }
-}
-
-func (runner *SpecRunner) countSpecsThatRanSatisfying(filter func(ex *spec.Spec) bool) (count int) {
- count = 0
-
- for _, spec := range runner.processedSpecs {
- if filter(spec) {
- count++
- }
- }
-
- return count
-}
-
-func (runner *SpecRunner) suiteDidEndSummary(success bool) *types.SuiteSummary {
- numberOfSpecsThatWillBeRun := runner.countSpecsThatRanSatisfying(func(ex *spec.Spec) bool {
- return !ex.Skipped() && !ex.Pending()
- })
-
- numberOfPendingSpecs := runner.countSpecsThatRanSatisfying(func(ex *spec.Spec) bool {
- return ex.Pending()
- })
-
- numberOfSkippedSpecs := runner.countSpecsThatRanSatisfying(func(ex *spec.Spec) bool {
- return ex.Skipped()
- })
-
- numberOfPassedSpecs := runner.countSpecsThatRanSatisfying(func(ex *spec.Spec) bool {
- return ex.Passed()
- })
-
- numberOfFlakedSpecs := runner.countSpecsThatRanSatisfying(func(ex *spec.Spec) bool {
- return ex.Flaked()
- })
-
- numberOfFailedSpecs := runner.countSpecsThatRanSatisfying(func(ex *spec.Spec) bool {
- return ex.Failed()
- })
-
- if runner.beforeSuiteNode != nil && !runner.beforeSuiteNode.Passed() && !runner.config.DryRun {
- var known bool
- numberOfSpecsThatWillBeRun, known = runner.iterator.NumberOfSpecsThatWillBeRunIfKnown()
- if !known {
- numberOfSpecsThatWillBeRun = runner.iterator.NumberOfSpecsPriorToIteration()
- }
- numberOfFailedSpecs = numberOfSpecsThatWillBeRun
- }
-
- return &types.SuiteSummary{
- SuiteDescription: runner.description,
- SuiteSucceeded: success,
- SuiteID: runner.suiteID,
-
- NumberOfSpecsBeforeParallelization: runner.iterator.NumberOfSpecsPriorToIteration(),
- NumberOfTotalSpecs: len(runner.processedSpecs),
- NumberOfSpecsThatWillBeRun: numberOfSpecsThatWillBeRun,
- NumberOfPendingSpecs: numberOfPendingSpecs,
- NumberOfSkippedSpecs: numberOfSkippedSpecs,
- NumberOfPassedSpecs: numberOfPassedSpecs,
- NumberOfFailedSpecs: numberOfFailedSpecs,
- NumberOfFlakedSpecs: numberOfFlakedSpecs,
- }
-}
-
-func (runner *SpecRunner) suiteWillBeginSummary() *types.SuiteSummary {
- numTotal, known := runner.iterator.NumberOfSpecsToProcessIfKnown()
- if !known {
- numTotal = -1
- }
-
- numToRun, known := runner.iterator.NumberOfSpecsThatWillBeRunIfKnown()
- if !known {
- numToRun = -1
- }
-
- return &types.SuiteSummary{
- SuiteDescription: runner.description,
- SuiteID: runner.suiteID,
-
- NumberOfSpecsBeforeParallelization: runner.iterator.NumberOfSpecsPriorToIteration(),
- NumberOfTotalSpecs: numTotal,
- NumberOfSpecsThatWillBeRun: numToRun,
- NumberOfPendingSpecs: -1,
- NumberOfSkippedSpecs: -1,
- NumberOfPassedSpecs: -1,
- NumberOfFailedSpecs: -1,
- NumberOfFlakedSpecs: -1,
- }
-}
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/internal/suite/suite.go b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/internal/suite/suite.go
deleted file mode 100644
index 3104bbc88c7..00000000000
--- a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/internal/suite/suite.go
+++ /dev/null
@@ -1,190 +0,0 @@
-package suite
-
-import (
- "math/rand"
- "net/http"
- "time"
-
- "github.com/onsi/ginkgo/internal/spec_iterator"
-
- "github.com/onsi/ginkgo/config"
- "github.com/onsi/ginkgo/internal/containernode"
- "github.com/onsi/ginkgo/internal/failer"
- "github.com/onsi/ginkgo/internal/leafnodes"
- "github.com/onsi/ginkgo/internal/spec"
- "github.com/onsi/ginkgo/internal/specrunner"
- "github.com/onsi/ginkgo/internal/writer"
- "github.com/onsi/ginkgo/reporters"
- "github.com/onsi/ginkgo/types"
-)
-
-type ginkgoTestingT interface {
- Fail()
-}
-
-type Suite struct {
- topLevelContainer *containernode.ContainerNode
- currentContainer *containernode.ContainerNode
- containerIndex int
- beforeSuiteNode leafnodes.SuiteNode
- afterSuiteNode leafnodes.SuiteNode
- runner *specrunner.SpecRunner
- failer *failer.Failer
- running bool
-}
-
-func New(failer *failer.Failer) *Suite {
- topLevelContainer := containernode.New("[Top Level]", types.FlagTypeNone, types.CodeLocation{})
-
- return &Suite{
- topLevelContainer: topLevelContainer,
- currentContainer: topLevelContainer,
- failer: failer,
- containerIndex: 1,
- }
-}
-
-func (suite *Suite) Run(t ginkgoTestingT, description string, reporters []reporters.Reporter, writer writer.WriterInterface, config config.GinkgoConfigType) (bool, bool) {
- if config.ParallelTotal < 1 {
- panic("ginkgo.parallel.total must be >= 1")
- }
-
- if config.ParallelNode > config.ParallelTotal || config.ParallelNode < 1 {
- panic("ginkgo.parallel.node is one-indexed and must be <= ginkgo.parallel.total")
- }
-
- r := rand.New(rand.NewSource(config.RandomSeed))
- suite.topLevelContainer.Shuffle(r)
- iterator, hasProgrammaticFocus := suite.generateSpecsIterator(description, config)
- suite.runner = specrunner.New(description, suite.beforeSuiteNode, iterator, suite.afterSuiteNode, reporters, writer, config)
-
- suite.running = true
- success := suite.runner.Run()
- if !success {
- t.Fail()
- }
- return success, hasProgrammaticFocus
-}
-
-func (suite *Suite) generateSpecsIterator(description string, config config.GinkgoConfigType) (spec_iterator.SpecIterator, bool) {
- specsSlice := []*spec.Spec{}
- suite.topLevelContainer.BackPropagateProgrammaticFocus()
- for _, collatedNodes := range suite.topLevelContainer.Collate() {
- specsSlice = append(specsSlice, spec.New(collatedNodes.Subject, collatedNodes.Containers, config.EmitSpecProgress))
- }
-
- specs := spec.NewSpecs(specsSlice)
- specs.RegexScansFilePath = config.RegexScansFilePath
-
- if config.RandomizeAllSpecs {
- specs.Shuffle(rand.New(rand.NewSource(config.RandomSeed)))
- }
-
- specs.ApplyFocus(description, config.FocusString, config.SkipString)
-
- if config.SkipMeasurements {
- specs.SkipMeasurements()
- }
-
- var iterator spec_iterator.SpecIterator
-
- if config.ParallelTotal > 1 {
- iterator = spec_iterator.NewParallelIterator(specs.Specs(), config.SyncHost)
- resp, err := http.Get(config.SyncHost + "/has-counter")
- if err != nil || resp.StatusCode != http.StatusOK {
- iterator = spec_iterator.NewShardedParallelIterator(specs.Specs(), config.ParallelTotal, config.ParallelNode)
- }
- } else {
- iterator = spec_iterator.NewSerialIterator(specs.Specs())
- }
-
- return iterator, specs.HasProgrammaticFocus()
-}
-
-func (suite *Suite) CurrentRunningSpecSummary() (*types.SpecSummary, bool) {
- return suite.runner.CurrentSpecSummary()
-}
-
-func (suite *Suite) SetBeforeSuiteNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration) {
- if suite.beforeSuiteNode != nil {
- panic("You may only call BeforeSuite once!")
- }
- suite.beforeSuiteNode = leafnodes.NewBeforeSuiteNode(body, codeLocation, timeout, suite.failer)
-}
-
-func (suite *Suite) SetAfterSuiteNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration) {
- if suite.afterSuiteNode != nil {
- panic("You may only call AfterSuite once!")
- }
- suite.afterSuiteNode = leafnodes.NewAfterSuiteNode(body, codeLocation, timeout, suite.failer)
-}
-
-func (suite *Suite) SetSynchronizedBeforeSuiteNode(bodyA interface{}, bodyB interface{}, codeLocation types.CodeLocation, timeout time.Duration) {
- if suite.beforeSuiteNode != nil {
- panic("You may only call BeforeSuite once!")
- }
- suite.beforeSuiteNode = leafnodes.NewSynchronizedBeforeSuiteNode(bodyA, bodyB, codeLocation, timeout, suite.failer)
-}
-
-func (suite *Suite) SetSynchronizedAfterSuiteNode(bodyA interface{}, bodyB interface{}, codeLocation types.CodeLocation, timeout time.Duration) {
- if suite.afterSuiteNode != nil {
- panic("You may only call AfterSuite once!")
- }
- suite.afterSuiteNode = leafnodes.NewSynchronizedAfterSuiteNode(bodyA, bodyB, codeLocation, timeout, suite.failer)
-}
-
-func (suite *Suite) PushContainerNode(text string, body func(), flag types.FlagType, codeLocation types.CodeLocation) {
- container := containernode.New(text, flag, codeLocation)
- suite.currentContainer.PushContainerNode(container)
-
- previousContainer := suite.currentContainer
- suite.currentContainer = container
- suite.containerIndex++
-
- body()
-
- suite.containerIndex--
- suite.currentContainer = previousContainer
-}
-
-func (suite *Suite) PushItNode(text string, body interface{}, flag types.FlagType, codeLocation types.CodeLocation, timeout time.Duration) {
- if suite.running {
- suite.failer.Fail("You may only call It from within a Describe, Context or When", codeLocation)
- }
- suite.currentContainer.PushSubjectNode(leafnodes.NewItNode(text, body, flag, codeLocation, timeout, suite.failer, suite.containerIndex))
-}
-
-func (suite *Suite) PushMeasureNode(text string, body interface{}, flag types.FlagType, codeLocation types.CodeLocation, samples int) {
- if suite.running {
- suite.failer.Fail("You may only call Measure from within a Describe, Context or When", codeLocation)
- }
- suite.currentContainer.PushSubjectNode(leafnodes.NewMeasureNode(text, body, flag, codeLocation, samples, suite.failer, suite.containerIndex))
-}
-
-func (suite *Suite) PushBeforeEachNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration) {
- if suite.running {
- suite.failer.Fail("You may only call BeforeEach from within a Describe, Context or When", codeLocation)
- }
- suite.currentContainer.PushSetupNode(leafnodes.NewBeforeEachNode(body, codeLocation, timeout, suite.failer, suite.containerIndex))
-}
-
-func (suite *Suite) PushJustBeforeEachNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration) {
- if suite.running {
- suite.failer.Fail("You may only call JustBeforeEach from within a Describe, Context or When", codeLocation)
- }
- suite.currentContainer.PushSetupNode(leafnodes.NewJustBeforeEachNode(body, codeLocation, timeout, suite.failer, suite.containerIndex))
-}
-
-func (suite *Suite) PushJustAfterEachNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration) {
- if suite.running {
- suite.failer.Fail("You may only call JustAfterEach from within a Describe or Context", codeLocation)
- }
- suite.currentContainer.PushSetupNode(leafnodes.NewJustAfterEachNode(body, codeLocation, timeout, suite.failer, suite.containerIndex))
-}
-
-func (suite *Suite) PushAfterEachNode(body interface{}, codeLocation types.CodeLocation, timeout time.Duration) {
- if suite.running {
- suite.failer.Fail("You may only call AfterEach from within a Describe, Context or When", codeLocation)
- }
- suite.currentContainer.PushSetupNode(leafnodes.NewAfterEachNode(body, codeLocation, timeout, suite.failer, suite.containerIndex))
-}
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/internal/testingtproxy/testing_t_proxy.go b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/internal/testingtproxy/testing_t_proxy.go
deleted file mode 100644
index 090445d084b..00000000000
--- a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/internal/testingtproxy/testing_t_proxy.go
+++ /dev/null
@@ -1,76 +0,0 @@
-package testingtproxy
-
-import (
- "fmt"
- "io"
-)
-
-type failFunc func(message string, callerSkip ...int)
-
-func New(writer io.Writer, fail failFunc, offset int) *ginkgoTestingTProxy {
- return &ginkgoTestingTProxy{
- fail: fail,
- offset: offset,
- writer: writer,
- }
-}
-
-type ginkgoTestingTProxy struct {
- fail failFunc
- offset int
- writer io.Writer
-}
-
-func (t *ginkgoTestingTProxy) Error(args ...interface{}) {
- t.fail(fmt.Sprintln(args...), t.offset)
-}
-
-func (t *ginkgoTestingTProxy) Errorf(format string, args ...interface{}) {
- t.fail(fmt.Sprintf(format, args...), t.offset)
-}
-
-func (t *ginkgoTestingTProxy) Fail() {
- t.fail("failed", t.offset)
-}
-
-func (t *ginkgoTestingTProxy) FailNow() {
- t.fail("failed", t.offset)
-}
-
-func (t *ginkgoTestingTProxy) Fatal(args ...interface{}) {
- t.fail(fmt.Sprintln(args...), t.offset)
-}
-
-func (t *ginkgoTestingTProxy) Fatalf(format string, args ...interface{}) {
- t.fail(fmt.Sprintf(format, args...), t.offset)
-}
-
-func (t *ginkgoTestingTProxy) Log(args ...interface{}) {
- fmt.Fprintln(t.writer, args...)
-}
-
-func (t *ginkgoTestingTProxy) Logf(format string, args ...interface{}) {
- t.Log(fmt.Sprintf(format, args...))
-}
-
-func (t *ginkgoTestingTProxy) Failed() bool {
- return false
-}
-
-func (t *ginkgoTestingTProxy) Parallel() {
-}
-
-func (t *ginkgoTestingTProxy) Skip(args ...interface{}) {
- fmt.Println(args...)
-}
-
-func (t *ginkgoTestingTProxy) Skipf(format string, args ...interface{}) {
- t.Skip(fmt.Sprintf(format, args...))
-}
-
-func (t *ginkgoTestingTProxy) SkipNow() {
-}
-
-func (t *ginkgoTestingTProxy) Skipped() bool {
- return false
-}
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/internal/writer/fake_writer.go b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/internal/writer/fake_writer.go
deleted file mode 100644
index 6739c3f605e..00000000000
--- a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/internal/writer/fake_writer.go
+++ /dev/null
@@ -1,36 +0,0 @@
-package writer
-
-type FakeGinkgoWriter struct {
- EventStream []string
-}
-
-func NewFake() *FakeGinkgoWriter {
- return &FakeGinkgoWriter{
- EventStream: []string{},
- }
-}
-
-func (writer *FakeGinkgoWriter) AddEvent(event string) {
- writer.EventStream = append(writer.EventStream, event)
-}
-
-func (writer *FakeGinkgoWriter) Truncate() {
- writer.EventStream = append(writer.EventStream, "TRUNCATE")
-}
-
-func (writer *FakeGinkgoWriter) DumpOut() {
- writer.EventStream = append(writer.EventStream, "DUMP")
-}
-
-func (writer *FakeGinkgoWriter) DumpOutWithHeader(header string) {
- writer.EventStream = append(writer.EventStream, "DUMP_WITH_HEADER: "+header)
-}
-
-func (writer *FakeGinkgoWriter) Bytes() []byte {
- writer.EventStream = append(writer.EventStream, "BYTES")
- return nil
-}
-
-func (writer *FakeGinkgoWriter) Write(data []byte) (n int, err error) {
- return 0, nil
-}
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/internal/writer/writer.go b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/internal/writer/writer.go
deleted file mode 100644
index 98eca3bdd25..00000000000
--- a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/internal/writer/writer.go
+++ /dev/null
@@ -1,89 +0,0 @@
-package writer
-
-import (
- "bytes"
- "io"
- "sync"
-)
-
-type WriterInterface interface {
- io.Writer
-
- Truncate()
- DumpOut()
- DumpOutWithHeader(header string)
- Bytes() []byte
-}
-
-type Writer struct {
- buffer *bytes.Buffer
- outWriter io.Writer
- lock *sync.Mutex
- stream bool
- redirector io.Writer
-}
-
-func New(outWriter io.Writer) *Writer {
- return &Writer{
- buffer: &bytes.Buffer{},
- lock: &sync.Mutex{},
- outWriter: outWriter,
- stream: true,
- }
-}
-
-func (w *Writer) AndRedirectTo(writer io.Writer) {
- w.redirector = writer
-}
-
-func (w *Writer) SetStream(stream bool) {
- w.lock.Lock()
- defer w.lock.Unlock()
- w.stream = stream
-}
-
-func (w *Writer) Write(b []byte) (n int, err error) {
- w.lock.Lock()
- defer w.lock.Unlock()
-
- n, err = w.buffer.Write(b)
- if w.redirector != nil {
- w.redirector.Write(b)
- }
- if w.stream {
- return w.outWriter.Write(b)
- }
- return n, err
-}
-
-func (w *Writer) Truncate() {
- w.lock.Lock()
- defer w.lock.Unlock()
- w.buffer.Reset()
-}
-
-func (w *Writer) DumpOut() {
- w.lock.Lock()
- defer w.lock.Unlock()
- if !w.stream {
- w.buffer.WriteTo(w.outWriter)
- }
-}
-
-func (w *Writer) Bytes() []byte {
- w.lock.Lock()
- defer w.lock.Unlock()
- b := w.buffer.Bytes()
- copied := make([]byte, len(b))
- copy(copied, b)
- return copied
-}
-
-func (w *Writer) DumpOutWithHeader(header string) {
- w.lock.Lock()
- defer w.lock.Unlock()
- if !w.stream && w.buffer.Len() > 0 {
- w.outWriter.Write([]byte(header))
- w.buffer.WriteTo(w.outWriter)
- }
-}
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/reporters/default_reporter.go b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/reporters/default_reporter.go
deleted file mode 100644
index c76283b46e9..00000000000
--- a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/reporters/default_reporter.go
+++ /dev/null
@@ -1,87 +0,0 @@
-/*
-Ginkgo's Default Reporter
-
-A number of command line flags are available to tweak Ginkgo's default output.
-
-These are documented [here](http://onsi.github.io/ginkgo/#running_tests)
-*/
-package reporters
-
-import (
- "github.com/onsi/ginkgo/config"
- "github.com/onsi/ginkgo/reporters/stenographer"
- "github.com/onsi/ginkgo/types"
-)
-
-type DefaultReporter struct {
- config config.DefaultReporterConfigType
- stenographer stenographer.Stenographer
- specSummaries []*types.SpecSummary
-}
-
-func NewDefaultReporter(config config.DefaultReporterConfigType, stenographer stenographer.Stenographer) *DefaultReporter {
- return &DefaultReporter{
- config: config,
- stenographer: stenographer,
- }
-}
-
-func (reporter *DefaultReporter) SpecSuiteWillBegin(config config.GinkgoConfigType, summary *types.SuiteSummary) {
- reporter.stenographer.AnnounceSuite(summary.SuiteDescription, config.RandomSeed, config.RandomizeAllSpecs, reporter.config.Succinct)
- if config.ParallelTotal > 1 {
- reporter.stenographer.AnnounceParallelRun(config.ParallelNode, config.ParallelTotal, reporter.config.Succinct)
- } else {
- reporter.stenographer.AnnounceNumberOfSpecs(summary.NumberOfSpecsThatWillBeRun, summary.NumberOfTotalSpecs, reporter.config.Succinct)
- }
-}
-
-func (reporter *DefaultReporter) BeforeSuiteDidRun(setupSummary *types.SetupSummary) {
- if setupSummary.State != types.SpecStatePassed {
- reporter.stenographer.AnnounceBeforeSuiteFailure(setupSummary, reporter.config.Succinct, reporter.config.FullTrace)
- }
-}
-
-func (reporter *DefaultReporter) AfterSuiteDidRun(setupSummary *types.SetupSummary) {
- if setupSummary.State != types.SpecStatePassed {
- reporter.stenographer.AnnounceAfterSuiteFailure(setupSummary, reporter.config.Succinct, reporter.config.FullTrace)
- }
-}
-
-func (reporter *DefaultReporter) SpecWillRun(specSummary *types.SpecSummary) {
- if reporter.config.Verbose && !reporter.config.Succinct && specSummary.State != types.SpecStatePending && specSummary.State != types.SpecStateSkipped {
- reporter.stenographer.AnnounceSpecWillRun(specSummary)
- }
-}
-
-func (reporter *DefaultReporter) SpecDidComplete(specSummary *types.SpecSummary) {
- switch specSummary.State {
- case types.SpecStatePassed:
- if specSummary.IsMeasurement {
- reporter.stenographer.AnnounceSuccesfulMeasurement(specSummary, reporter.config.Succinct)
- } else if specSummary.RunTime.Seconds() >= reporter.config.SlowSpecThreshold {
- reporter.stenographer.AnnounceSuccesfulSlowSpec(specSummary, reporter.config.Succinct)
- } else {
- reporter.stenographer.AnnounceSuccesfulSpec(specSummary)
- if reporter.config.ReportPassed {
- reporter.stenographer.AnnounceCapturedOutput(specSummary.CapturedOutput)
- }
- }
- case types.SpecStatePending:
- reporter.stenographer.AnnouncePendingSpec(specSummary, reporter.config.NoisyPendings && !reporter.config.Succinct)
- case types.SpecStateSkipped:
- reporter.stenographer.AnnounceSkippedSpec(specSummary, reporter.config.Succinct || !reporter.config.NoisySkippings, reporter.config.FullTrace)
- case types.SpecStateTimedOut:
- reporter.stenographer.AnnounceSpecTimedOut(specSummary, reporter.config.Succinct, reporter.config.FullTrace)
- case types.SpecStatePanicked:
- reporter.stenographer.AnnounceSpecPanicked(specSummary, reporter.config.Succinct, reporter.config.FullTrace)
- case types.SpecStateFailed:
- reporter.stenographer.AnnounceSpecFailed(specSummary, reporter.config.Succinct, reporter.config.FullTrace)
- }
-
- reporter.specSummaries = append(reporter.specSummaries, specSummary)
-}
-
-func (reporter *DefaultReporter) SpecSuiteDidEnd(summary *types.SuiteSummary) {
- reporter.stenographer.SummarizeFailures(reporter.specSummaries)
- reporter.stenographer.AnnounceSpecRunCompletion(summary, reporter.config.Succinct)
-}
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/reporters/fake_reporter.go b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/reporters/fake_reporter.go
deleted file mode 100644
index 27db4794908..00000000000
--- a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/reporters/fake_reporter.go
+++ /dev/null
@@ -1,59 +0,0 @@
-package reporters
-
-import (
- "github.com/onsi/ginkgo/config"
- "github.com/onsi/ginkgo/types"
-)
-
-//FakeReporter is useful for testing purposes
-type FakeReporter struct {
- Config config.GinkgoConfigType
-
- BeginSummary *types.SuiteSummary
- BeforeSuiteSummary *types.SetupSummary
- SpecWillRunSummaries []*types.SpecSummary
- SpecSummaries []*types.SpecSummary
- AfterSuiteSummary *types.SetupSummary
- EndSummary *types.SuiteSummary
-
- SpecWillRunStub func(specSummary *types.SpecSummary)
- SpecDidCompleteStub func(specSummary *types.SpecSummary)
-}
-
-func NewFakeReporter() *FakeReporter {
- return &FakeReporter{
- SpecWillRunSummaries: make([]*types.SpecSummary, 0),
- SpecSummaries: make([]*types.SpecSummary, 0),
- }
-}
-
-func (fakeR *FakeReporter) SpecSuiteWillBegin(config config.GinkgoConfigType, summary *types.SuiteSummary) {
- fakeR.Config = config
- fakeR.BeginSummary = summary
-}
-
-func (fakeR *FakeReporter) BeforeSuiteDidRun(setupSummary *types.SetupSummary) {
- fakeR.BeforeSuiteSummary = setupSummary
-}
-
-func (fakeR *FakeReporter) SpecWillRun(specSummary *types.SpecSummary) {
- if fakeR.SpecWillRunStub != nil {
- fakeR.SpecWillRunStub(specSummary)
- }
- fakeR.SpecWillRunSummaries = append(fakeR.SpecWillRunSummaries, specSummary)
-}
-
-func (fakeR *FakeReporter) SpecDidComplete(specSummary *types.SpecSummary) {
- if fakeR.SpecDidCompleteStub != nil {
- fakeR.SpecDidCompleteStub(specSummary)
- }
- fakeR.SpecSummaries = append(fakeR.SpecSummaries, specSummary)
-}
-
-func (fakeR *FakeReporter) AfterSuiteDidRun(setupSummary *types.SetupSummary) {
- fakeR.AfterSuiteSummary = setupSummary
-}
-
-func (fakeR *FakeReporter) SpecSuiteDidEnd(summary *types.SuiteSummary) {
- fakeR.EndSummary = summary
-}
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/reporters/junit_reporter.go b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/reporters/junit_reporter.go
deleted file mode 100644
index fc987e77736..00000000000
--- a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/reporters/junit_reporter.go
+++ /dev/null
@@ -1,182 +0,0 @@
-/*
-
-JUnit XML Reporter for Ginkgo
-
-For usage instructions: http://onsi.github.io/ginkgo/#generating_junit_xml_output
-
-*/
-
-package reporters
-
-import (
- "encoding/xml"
- "fmt"
- "math"
- "os"
- "path/filepath"
- "strings"
-
- "github.com/onsi/ginkgo/config"
- "github.com/onsi/ginkgo/types"
-)
-
-type JUnitTestSuite struct {
- XMLName xml.Name `xml:"testsuite"`
- TestCases []JUnitTestCase `xml:"testcase"`
- Name string `xml:"name,attr"`
- Tests int `xml:"tests,attr"`
- Failures int `xml:"failures,attr"`
- Errors int `xml:"errors,attr"`
- Time float64 `xml:"time,attr"`
-}
-
-type JUnitTestCase struct {
- Name string `xml:"name,attr"`
- ClassName string `xml:"classname,attr"`
- PassedMessage *JUnitPassedMessage `xml:"passed,omitempty"`
- FailureMessage *JUnitFailureMessage `xml:"failure,omitempty"`
- Skipped *JUnitSkipped `xml:"skipped,omitempty"`
- Time float64 `xml:"time,attr"`
- SystemOut string `xml:"system-out,omitempty"`
-}
-
-type JUnitPassedMessage struct {
- Message string `xml:",chardata"`
-}
-
-type JUnitFailureMessage struct {
- Type string `xml:"type,attr"`
- Message string `xml:",chardata"`
-}
-
-type JUnitSkipped struct {
- XMLName xml.Name `xml:"skipped"`
-}
-
-type JUnitReporter struct {
- suite JUnitTestSuite
- filename string
- testSuiteName string
- ReporterConfig config.DefaultReporterConfigType
-}
-
-//NewJUnitReporter creates a new JUnit XML reporter. The XML will be stored in the passed in filename.
-func NewJUnitReporter(filename string) *JUnitReporter {
- return &JUnitReporter{
- filename: filename,
- }
-}
-
-func (reporter *JUnitReporter) SpecSuiteWillBegin(ginkgoConfig config.GinkgoConfigType, summary *types.SuiteSummary) {
- reporter.suite = JUnitTestSuite{
- Name: summary.SuiteDescription,
- TestCases: []JUnitTestCase{},
- }
- reporter.testSuiteName = summary.SuiteDescription
- reporter.ReporterConfig = config.DefaultReporterConfig
-}
-
-func (reporter *JUnitReporter) SpecWillRun(specSummary *types.SpecSummary) {
-}
-
-func (reporter *JUnitReporter) BeforeSuiteDidRun(setupSummary *types.SetupSummary) {
- reporter.handleSetupSummary("BeforeSuite", setupSummary)
-}
-
-func (reporter *JUnitReporter) AfterSuiteDidRun(setupSummary *types.SetupSummary) {
- reporter.handleSetupSummary("AfterSuite", setupSummary)
-}
-
-func failureMessage(failure types.SpecFailure) string {
- return fmt.Sprintf("%s\n%s\n%s", failure.ComponentCodeLocation.String(), failure.Message, failure.Location.String())
-}
-
-func (reporter *JUnitReporter) handleSetupSummary(name string, setupSummary *types.SetupSummary) {
- if setupSummary.State != types.SpecStatePassed {
- testCase := JUnitTestCase{
- Name: name,
- ClassName: reporter.testSuiteName,
- }
-
- testCase.FailureMessage = &JUnitFailureMessage{
- Type: reporter.failureTypeForState(setupSummary.State),
- Message: failureMessage(setupSummary.Failure),
- }
- testCase.SystemOut = setupSummary.CapturedOutput
- testCase.Time = setupSummary.RunTime.Seconds()
- reporter.suite.TestCases = append(reporter.suite.TestCases, testCase)
- }
-}
-
-func (reporter *JUnitReporter) SpecDidComplete(specSummary *types.SpecSummary) {
- testCase := JUnitTestCase{
- Name: strings.Join(specSummary.ComponentTexts[1:], " "),
- ClassName: reporter.testSuiteName,
- }
- if reporter.ReporterConfig.ReportPassed && specSummary.State == types.SpecStatePassed {
- testCase.PassedMessage = &JUnitPassedMessage{
- Message: specSummary.CapturedOutput,
- }
- }
- if specSummary.State == types.SpecStateFailed || specSummary.State == types.SpecStateTimedOut || specSummary.State == types.SpecStatePanicked {
- testCase.FailureMessage = &JUnitFailureMessage{
- Type: reporter.failureTypeForState(specSummary.State),
- Message: failureMessage(specSummary.Failure),
- }
- if specSummary.State == types.SpecStatePanicked {
- testCase.FailureMessage.Message += fmt.Sprintf("\n\nPanic: %s\n\nFull stack:\n%s",
- specSummary.Failure.ForwardedPanic,
- specSummary.Failure.Location.FullStackTrace)
- }
- testCase.SystemOut = specSummary.CapturedOutput
- }
- if specSummary.State == types.SpecStateSkipped || specSummary.State == types.SpecStatePending {
- testCase.Skipped = &JUnitSkipped{}
- }
- testCase.Time = specSummary.RunTime.Seconds()
- reporter.suite.TestCases = append(reporter.suite.TestCases, testCase)
-}
-
-func (reporter *JUnitReporter) SpecSuiteDidEnd(summary *types.SuiteSummary) {
- reporter.suite.Tests = summary.NumberOfSpecsThatWillBeRun
- reporter.suite.Time = math.Trunc(summary.RunTime.Seconds()*1000) / 1000
- reporter.suite.Failures = summary.NumberOfFailedSpecs
- reporter.suite.Errors = 0
- if reporter.ReporterConfig.ReportFile != "" {
- reporter.filename = reporter.ReporterConfig.ReportFile
- fmt.Printf("\nJUnit path was configured: %s\n", reporter.filename)
- }
- filePath, _ := filepath.Abs(reporter.filename)
- dirPath := filepath.Dir(filePath)
- err := os.MkdirAll(dirPath, os.ModePerm)
- if err != nil {
- fmt.Printf("\nFailed to create JUnit directory: %s\n\t%s", filePath, err.Error())
- }
- file, err := os.Create(filePath)
- if err != nil {
- fmt.Fprintf(os.Stderr, "Failed to create JUnit report file: %s\n\t%s", filePath, err.Error())
- }
- defer file.Close()
- file.WriteString(xml.Header)
- encoder := xml.NewEncoder(file)
- encoder.Indent(" ", " ")
- err = encoder.Encode(reporter.suite)
- if err == nil {
- fmt.Fprintf(os.Stdout, "\nJUnit report was created: %s\n", filePath)
- } else {
- fmt.Fprintf(os.Stderr, "\nFailed to generate JUnit report data:\n\t%s", err.Error())
- }
-}
-
-func (reporter *JUnitReporter) failureTypeForState(state types.SpecState) string {
- switch state {
- case types.SpecStateFailed:
- return "Failure"
- case types.SpecStateTimedOut:
- return "Timeout"
- case types.SpecStatePanicked:
- return "Panic"
- default:
- return ""
- }
-}
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/reporters/reporter.go b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/reporters/reporter.go
deleted file mode 100644
index 348b9dfce1f..00000000000
--- a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/reporters/reporter.go
+++ /dev/null
@@ -1,15 +0,0 @@
-package reporters
-
-import (
- "github.com/onsi/ginkgo/config"
- "github.com/onsi/ginkgo/types"
-)
-
-type Reporter interface {
- SpecSuiteWillBegin(config config.GinkgoConfigType, summary *types.SuiteSummary)
- BeforeSuiteDidRun(setupSummary *types.SetupSummary)
- SpecWillRun(specSummary *types.SpecSummary)
- SpecDidComplete(specSummary *types.SpecSummary)
- AfterSuiteDidRun(setupSummary *types.SetupSummary)
- SpecSuiteDidEnd(summary *types.SuiteSummary)
-}
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/reporters/stenographer/console_logging.go b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/reporters/stenographer/console_logging.go
deleted file mode 100644
index 45b8f88690e..00000000000
--- a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/reporters/stenographer/console_logging.go
+++ /dev/null
@@ -1,64 +0,0 @@
-package stenographer
-
-import (
- "fmt"
- "strings"
-)
-
-func (s *consoleStenographer) colorize(colorCode string, format string, args ...interface{}) string {
- var out string
-
- if len(args) > 0 {
- out = fmt.Sprintf(format, args...)
- } else {
- out = format
- }
-
- if s.color {
- return fmt.Sprintf("%s%s%s", colorCode, out, defaultStyle)
- } else {
- return out
- }
-}
-
-func (s *consoleStenographer) printBanner(text string, bannerCharacter string) {
- fmt.Fprintln(s.w, text)
- fmt.Fprintln(s.w, strings.Repeat(bannerCharacter, len(text)))
-}
-
-func (s *consoleStenographer) printNewLine() {
- fmt.Fprintln(s.w, "")
-}
-
-func (s *consoleStenographer) printDelimiter() {
- fmt.Fprintln(s.w, s.colorize(grayColor, "%s", strings.Repeat("-", 30)))
-}
-
-func (s *consoleStenographer) print(indentation int, format string, args ...interface{}) {
- fmt.Fprint(s.w, s.indent(indentation, format, args...))
-}
-
-func (s *consoleStenographer) println(indentation int, format string, args ...interface{}) {
- fmt.Fprintln(s.w, s.indent(indentation, format, args...))
-}
-
-func (s *consoleStenographer) indent(indentation int, format string, args ...interface{}) string {
- var text string
-
- if len(args) > 0 {
- text = fmt.Sprintf(format, args...)
- } else {
- text = format
- }
-
- stringArray := strings.Split(text, "\n")
- padding := ""
- if indentation >= 0 {
- padding = strings.Repeat(" ", indentation)
- }
- for i, s := range stringArray {
- stringArray[i] = fmt.Sprintf("%s%s", padding, s)
- }
-
- return strings.Join(stringArray, "\n")
-}
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/reporters/stenographer/fake_stenographer.go b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/reporters/stenographer/fake_stenographer.go
deleted file mode 100644
index 98854e7d9aa..00000000000
--- a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/reporters/stenographer/fake_stenographer.go
+++ /dev/null
@@ -1,142 +0,0 @@
-package stenographer
-
-import (
- "sync"
-
- "github.com/onsi/ginkgo/types"
-)
-
-func NewFakeStenographerCall(method string, args ...interface{}) FakeStenographerCall {
- return FakeStenographerCall{
- Method: method,
- Args: args,
- }
-}
-
-type FakeStenographer struct {
- calls []FakeStenographerCall
- lock *sync.Mutex
-}
-
-type FakeStenographerCall struct {
- Method string
- Args []interface{}
-}
-
-func NewFakeStenographer() *FakeStenographer {
- stenographer := &FakeStenographer{
- lock: &sync.Mutex{},
- }
- stenographer.Reset()
- return stenographer
-}
-
-func (stenographer *FakeStenographer) Calls() []FakeStenographerCall {
- stenographer.lock.Lock()
- defer stenographer.lock.Unlock()
-
- return stenographer.calls
-}
-
-func (stenographer *FakeStenographer) Reset() {
- stenographer.lock.Lock()
- defer stenographer.lock.Unlock()
-
- stenographer.calls = make([]FakeStenographerCall, 0)
-}
-
-func (stenographer *FakeStenographer) CallsTo(method string) []FakeStenographerCall {
- stenographer.lock.Lock()
- defer stenographer.lock.Unlock()
-
- results := make([]FakeStenographerCall, 0)
- for _, call := range stenographer.calls {
- if call.Method == method {
- results = append(results, call)
- }
- }
-
- return results
-}
-
-func (stenographer *FakeStenographer) registerCall(method string, args ...interface{}) {
- stenographer.lock.Lock()
- defer stenographer.lock.Unlock()
-
- stenographer.calls = append(stenographer.calls, NewFakeStenographerCall(method, args...))
-}
-
-func (stenographer *FakeStenographer) AnnounceSuite(description string, randomSeed int64, randomizingAll bool, succinct bool) {
- stenographer.registerCall("AnnounceSuite", description, randomSeed, randomizingAll, succinct)
-}
-
-func (stenographer *FakeStenographer) AnnounceAggregatedParallelRun(nodes int, succinct bool) {
- stenographer.registerCall("AnnounceAggregatedParallelRun", nodes, succinct)
-}
-
-func (stenographer *FakeStenographer) AnnounceParallelRun(node int, nodes int, succinct bool) {
- stenographer.registerCall("AnnounceParallelRun", node, nodes, succinct)
-}
-
-func (stenographer *FakeStenographer) AnnounceNumberOfSpecs(specsToRun int, total int, succinct bool) {
- stenographer.registerCall("AnnounceNumberOfSpecs", specsToRun, total, succinct)
-}
-
-func (stenographer *FakeStenographer) AnnounceTotalNumberOfSpecs(total int, succinct bool) {
- stenographer.registerCall("AnnounceTotalNumberOfSpecs", total, succinct)
-}
-
-func (stenographer *FakeStenographer) AnnounceSpecRunCompletion(summary *types.SuiteSummary, succinct bool) {
- stenographer.registerCall("AnnounceSpecRunCompletion", summary, succinct)
-}
-
-func (stenographer *FakeStenographer) AnnounceSpecWillRun(spec *types.SpecSummary) {
- stenographer.registerCall("AnnounceSpecWillRun", spec)
-}
-
-func (stenographer *FakeStenographer) AnnounceBeforeSuiteFailure(summary *types.SetupSummary, succinct bool, fullTrace bool) {
- stenographer.registerCall("AnnounceBeforeSuiteFailure", summary, succinct, fullTrace)
-}
-
-func (stenographer *FakeStenographer) AnnounceAfterSuiteFailure(summary *types.SetupSummary, succinct bool, fullTrace bool) {
- stenographer.registerCall("AnnounceAfterSuiteFailure", summary, succinct, fullTrace)
-}
-func (stenographer *FakeStenographer) AnnounceCapturedOutput(output string) {
- stenographer.registerCall("AnnounceCapturedOutput", output)
-}
-
-func (stenographer *FakeStenographer) AnnounceSuccesfulSpec(spec *types.SpecSummary) {
- stenographer.registerCall("AnnounceSuccesfulSpec", spec)
-}
-
-func (stenographer *FakeStenographer) AnnounceSuccesfulSlowSpec(spec *types.SpecSummary, succinct bool) {
- stenographer.registerCall("AnnounceSuccesfulSlowSpec", spec, succinct)
-}
-
-func (stenographer *FakeStenographer) AnnounceSuccesfulMeasurement(spec *types.SpecSummary, succinct bool) {
- stenographer.registerCall("AnnounceSuccesfulMeasurement", spec, succinct)
-}
-
-func (stenographer *FakeStenographer) AnnouncePendingSpec(spec *types.SpecSummary, noisy bool) {
- stenographer.registerCall("AnnouncePendingSpec", spec, noisy)
-}
-
-func (stenographer *FakeStenographer) AnnounceSkippedSpec(spec *types.SpecSummary, succinct bool, fullTrace bool) {
- stenographer.registerCall("AnnounceSkippedSpec", spec, succinct, fullTrace)
-}
-
-func (stenographer *FakeStenographer) AnnounceSpecTimedOut(spec *types.SpecSummary, succinct bool, fullTrace bool) {
- stenographer.registerCall("AnnounceSpecTimedOut", spec, succinct, fullTrace)
-}
-
-func (stenographer *FakeStenographer) AnnounceSpecPanicked(spec *types.SpecSummary, succinct bool, fullTrace bool) {
- stenographer.registerCall("AnnounceSpecPanicked", spec, succinct, fullTrace)
-}
-
-func (stenographer *FakeStenographer) AnnounceSpecFailed(spec *types.SpecSummary, succinct bool, fullTrace bool) {
- stenographer.registerCall("AnnounceSpecFailed", spec, succinct, fullTrace)
-}
-
-func (stenographer *FakeStenographer) SummarizeFailures(summaries []*types.SpecSummary) {
- stenographer.registerCall("SummarizeFailures", summaries)
-}
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/reporters/stenographer/stenographer.go b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/reporters/stenographer/stenographer.go
deleted file mode 100644
index 601c74d66eb..00000000000
--- a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/reporters/stenographer/stenographer.go
+++ /dev/null
@@ -1,572 +0,0 @@
-/*
-The stenographer is used by Ginkgo's reporters to generate output.
-
-Move along, nothing to see here.
-*/
-
-package stenographer
-
-import (
- "fmt"
- "io"
- "runtime"
- "strings"
-
- "github.com/onsi/ginkgo/types"
-)
-
-const defaultStyle = "\x1b[0m"
-const boldStyle = "\x1b[1m"
-const redColor = "\x1b[91m"
-const greenColor = "\x1b[32m"
-const yellowColor = "\x1b[33m"
-const cyanColor = "\x1b[36m"
-const grayColor = "\x1b[90m"
-const lightGrayColor = "\x1b[37m"
-
-type cursorStateType int
-
-const (
- cursorStateTop cursorStateType = iota
- cursorStateStreaming
- cursorStateMidBlock
- cursorStateEndBlock
-)
-
-type Stenographer interface {
- AnnounceSuite(description string, randomSeed int64, randomizingAll bool, succinct bool)
- AnnounceAggregatedParallelRun(nodes int, succinct bool)
- AnnounceParallelRun(node int, nodes int, succinct bool)
- AnnounceTotalNumberOfSpecs(total int, succinct bool)
- AnnounceNumberOfSpecs(specsToRun int, total int, succinct bool)
- AnnounceSpecRunCompletion(summary *types.SuiteSummary, succinct bool)
-
- AnnounceSpecWillRun(spec *types.SpecSummary)
- AnnounceBeforeSuiteFailure(summary *types.SetupSummary, succinct bool, fullTrace bool)
- AnnounceAfterSuiteFailure(summary *types.SetupSummary, succinct bool, fullTrace bool)
-
- AnnounceCapturedOutput(output string)
-
- AnnounceSuccesfulSpec(spec *types.SpecSummary)
- AnnounceSuccesfulSlowSpec(spec *types.SpecSummary, succinct bool)
- AnnounceSuccesfulMeasurement(spec *types.SpecSummary, succinct bool)
-
- AnnouncePendingSpec(spec *types.SpecSummary, noisy bool)
- AnnounceSkippedSpec(spec *types.SpecSummary, succinct bool, fullTrace bool)
-
- AnnounceSpecTimedOut(spec *types.SpecSummary, succinct bool, fullTrace bool)
- AnnounceSpecPanicked(spec *types.SpecSummary, succinct bool, fullTrace bool)
- AnnounceSpecFailed(spec *types.SpecSummary, succinct bool, fullTrace bool)
-
- SummarizeFailures(summaries []*types.SpecSummary)
-}
-
-func New(color bool, enableFlakes bool, writer io.Writer) Stenographer {
- denoter := "•"
- if runtime.GOOS == "windows" {
- denoter = "+"
- }
- return &consoleStenographer{
- color: color,
- denoter: denoter,
- cursorState: cursorStateTop,
- enableFlakes: enableFlakes,
- w: writer,
- }
-}
-
-type consoleStenographer struct {
- color bool
- denoter string
- cursorState cursorStateType
- enableFlakes bool
- w io.Writer
-}
-
-var alternatingColors = []string{defaultStyle, grayColor}
-
-func (s *consoleStenographer) AnnounceSuite(description string, randomSeed int64, randomizingAll bool, succinct bool) {
- if succinct {
- s.print(0, "[%d] %s ", randomSeed, s.colorize(boldStyle, description))
- return
- }
- s.printBanner(fmt.Sprintf("Running Suite: %s", description), "=")
- s.print(0, "Random Seed: %s", s.colorize(boldStyle, "%d", randomSeed))
- if randomizingAll {
- s.print(0, " - Will randomize all specs")
- }
- s.printNewLine()
-}
-
-func (s *consoleStenographer) AnnounceParallelRun(node int, nodes int, succinct bool) {
- if succinct {
- s.print(0, "- node #%d ", node)
- return
- }
- s.println(0,
- "Parallel test node %s/%s.",
- s.colorize(boldStyle, "%d", node),
- s.colorize(boldStyle, "%d", nodes),
- )
- s.printNewLine()
-}
-
-func (s *consoleStenographer) AnnounceAggregatedParallelRun(nodes int, succinct bool) {
- if succinct {
- s.print(0, "- %d nodes ", nodes)
- return
- }
- s.println(0,
- "Running in parallel across %s nodes",
- s.colorize(boldStyle, "%d", nodes),
- )
- s.printNewLine()
-}
-
-func (s *consoleStenographer) AnnounceNumberOfSpecs(specsToRun int, total int, succinct bool) {
- if succinct {
- s.print(0, "- %d/%d specs ", specsToRun, total)
- s.stream()
- return
- }
- s.println(0,
- "Will run %s of %s specs",
- s.colorize(boldStyle, "%d", specsToRun),
- s.colorize(boldStyle, "%d", total),
- )
-
- s.printNewLine()
-}
-
-func (s *consoleStenographer) AnnounceTotalNumberOfSpecs(total int, succinct bool) {
- if succinct {
- s.print(0, "- %d specs ", total)
- s.stream()
- return
- }
- s.println(0,
- "Will run %s specs",
- s.colorize(boldStyle, "%d", total),
- )
-
- s.printNewLine()
-}
-
-func (s *consoleStenographer) AnnounceSpecRunCompletion(summary *types.SuiteSummary, succinct bool) {
- if succinct && summary.SuiteSucceeded {
- s.print(0, " %s %s ", s.colorize(greenColor, "SUCCESS!"), summary.RunTime)
- return
- }
- s.printNewLine()
- color := greenColor
- if !summary.SuiteSucceeded {
- color = redColor
- }
- s.println(0, s.colorize(boldStyle+color, "Ran %d of %d Specs in %.3f seconds", summary.NumberOfSpecsThatWillBeRun, summary.NumberOfTotalSpecs, summary.RunTime.Seconds()))
-
- status := ""
- if summary.SuiteSucceeded {
- status = s.colorize(boldStyle+greenColor, "SUCCESS!")
- } else {
- status = s.colorize(boldStyle+redColor, "FAIL!")
- }
-
- flakes := ""
- if s.enableFlakes {
- flakes = " | " + s.colorize(yellowColor+boldStyle, "%d Flaked", summary.NumberOfFlakedSpecs)
- }
-
- s.print(0,
- "%s -- %s | %s | %s | %s\n",
- status,
- s.colorize(greenColor+boldStyle, "%d Passed", summary.NumberOfPassedSpecs),
- s.colorize(redColor+boldStyle, "%d Failed", summary.NumberOfFailedSpecs)+flakes,
- s.colorize(yellowColor+boldStyle, "%d Pending", summary.NumberOfPendingSpecs),
- s.colorize(cyanColor+boldStyle, "%d Skipped", summary.NumberOfSkippedSpecs),
- )
-}
-
-func (s *consoleStenographer) AnnounceSpecWillRun(spec *types.SpecSummary) {
- s.startBlock()
- for i, text := range spec.ComponentTexts[1 : len(spec.ComponentTexts)-1] {
- s.print(0, s.colorize(alternatingColors[i%2], text)+" ")
- }
-
- indentation := 0
- if len(spec.ComponentTexts) > 2 {
- indentation = 1
- s.printNewLine()
- }
- index := len(spec.ComponentTexts) - 1
- s.print(indentation, s.colorize(boldStyle, spec.ComponentTexts[index]))
- s.printNewLine()
- s.print(indentation, s.colorize(lightGrayColor, spec.ComponentCodeLocations[index].String()))
- s.printNewLine()
- s.midBlock()
-}
-
-func (s *consoleStenographer) AnnounceBeforeSuiteFailure(summary *types.SetupSummary, succinct bool, fullTrace bool) {
- s.announceSetupFailure("BeforeSuite", summary, succinct, fullTrace)
-}
-
-func (s *consoleStenographer) AnnounceAfterSuiteFailure(summary *types.SetupSummary, succinct bool, fullTrace bool) {
- s.announceSetupFailure("AfterSuite", summary, succinct, fullTrace)
-}
-
-func (s *consoleStenographer) announceSetupFailure(name string, summary *types.SetupSummary, succinct bool, fullTrace bool) {
- s.startBlock()
- var message string
- switch summary.State {
- case types.SpecStateFailed:
- message = "Failure"
- case types.SpecStatePanicked:
- message = "Panic"
- case types.SpecStateTimedOut:
- message = "Timeout"
- }
-
- s.println(0, s.colorize(redColor+boldStyle, "%s [%.3f seconds]", message, summary.RunTime.Seconds()))
-
- indentation := s.printCodeLocationBlock([]string{name}, []types.CodeLocation{summary.CodeLocation}, summary.ComponentType, 0, summary.State, true)
-
- s.printNewLine()
- s.printFailure(indentation, summary.State, summary.Failure, fullTrace)
-
- s.endBlock()
-}
-
-func (s *consoleStenographer) AnnounceCapturedOutput(output string) {
- if output == "" {
- return
- }
-
- s.startBlock()
- s.println(0, output)
- s.midBlock()
-}
-
-func (s *consoleStenographer) AnnounceSuccesfulSpec(spec *types.SpecSummary) {
- s.print(0, s.colorize(greenColor, s.denoter))
- s.stream()
-}
-
-func (s *consoleStenographer) AnnounceSuccesfulSlowSpec(spec *types.SpecSummary, succinct bool) {
- s.printBlockWithMessage(
- s.colorize(greenColor, "%s [SLOW TEST:%.3f seconds]", s.denoter, spec.RunTime.Seconds()),
- "",
- spec,
- succinct,
- )
-}
-
-func (s *consoleStenographer) AnnounceSuccesfulMeasurement(spec *types.SpecSummary, succinct bool) {
- s.printBlockWithMessage(
- s.colorize(greenColor, "%s [MEASUREMENT]", s.denoter),
- s.measurementReport(spec, succinct),
- spec,
- succinct,
- )
-}
-
-func (s *consoleStenographer) AnnouncePendingSpec(spec *types.SpecSummary, noisy bool) {
- if noisy {
- s.printBlockWithMessage(
- s.colorize(yellowColor, "P [PENDING]"),
- "",
- spec,
- false,
- )
- } else {
- s.print(0, s.colorize(yellowColor, "P"))
- s.stream()
- }
-}
-
-func (s *consoleStenographer) AnnounceSkippedSpec(spec *types.SpecSummary, succinct bool, fullTrace bool) {
- // Skips at runtime will have a non-empty spec.Failure. All others should be succinct.
- if succinct || spec.Failure == (types.SpecFailure{}) {
- s.print(0, s.colorize(cyanColor, "S"))
- s.stream()
- } else {
- s.startBlock()
- s.println(0, s.colorize(cyanColor+boldStyle, "S [SKIPPING]%s [%.3f seconds]", s.failureContext(spec.Failure.ComponentType), spec.RunTime.Seconds()))
-
- indentation := s.printCodeLocationBlock(spec.ComponentTexts, spec.ComponentCodeLocations, spec.Failure.ComponentType, spec.Failure.ComponentIndex, spec.State, succinct)
-
- s.printNewLine()
- s.printSkip(indentation, spec.Failure)
- s.endBlock()
- }
-}
-
-func (s *consoleStenographer) AnnounceSpecTimedOut(spec *types.SpecSummary, succinct bool, fullTrace bool) {
- s.printSpecFailure(fmt.Sprintf("%s... Timeout", s.denoter), spec, succinct, fullTrace)
-}
-
-func (s *consoleStenographer) AnnounceSpecPanicked(spec *types.SpecSummary, succinct bool, fullTrace bool) {
- s.printSpecFailure(fmt.Sprintf("%s! Panic", s.denoter), spec, succinct, fullTrace)
-}
-
-func (s *consoleStenographer) AnnounceSpecFailed(spec *types.SpecSummary, succinct bool, fullTrace bool) {
- s.printSpecFailure(fmt.Sprintf("%s Failure", s.denoter), spec, succinct, fullTrace)
-}
-
-func (s *consoleStenographer) SummarizeFailures(summaries []*types.SpecSummary) {
- failingSpecs := []*types.SpecSummary{}
-
- for _, summary := range summaries {
- if summary.HasFailureState() {
- failingSpecs = append(failingSpecs, summary)
- }
- }
-
- if len(failingSpecs) == 0 {
- return
- }
-
- s.printNewLine()
- s.printNewLine()
- plural := "s"
- if len(failingSpecs) == 1 {
- plural = ""
- }
- s.println(0, s.colorize(redColor+boldStyle, "Summarizing %d Failure%s:", len(failingSpecs), plural))
- for _, summary := range failingSpecs {
- s.printNewLine()
- if summary.HasFailureState() {
- if summary.TimedOut() {
- s.print(0, s.colorize(redColor+boldStyle, "[Timeout...] "))
- } else if summary.Panicked() {
- s.print(0, s.colorize(redColor+boldStyle, "[Panic!] "))
- } else if summary.Failed() {
- s.print(0, s.colorize(redColor+boldStyle, "[Fail] "))
- }
- s.printSpecContext(summary.ComponentTexts, summary.ComponentCodeLocations, summary.Failure.ComponentType, summary.Failure.ComponentIndex, summary.State, true)
- s.printNewLine()
- s.println(0, s.colorize(lightGrayColor, summary.Failure.Location.String()))
- }
- }
-}
-
-func (s *consoleStenographer) startBlock() {
- if s.cursorState == cursorStateStreaming {
- s.printNewLine()
- s.printDelimiter()
- } else if s.cursorState == cursorStateMidBlock {
- s.printNewLine()
- }
-}
-
-func (s *consoleStenographer) midBlock() {
- s.cursorState = cursorStateMidBlock
-}
-
-func (s *consoleStenographer) endBlock() {
- s.printDelimiter()
- s.cursorState = cursorStateEndBlock
-}
-
-func (s *consoleStenographer) stream() {
- s.cursorState = cursorStateStreaming
-}
-
-func (s *consoleStenographer) printBlockWithMessage(header string, message string, spec *types.SpecSummary, succinct bool) {
- s.startBlock()
- s.println(0, header)
-
- indentation := s.printCodeLocationBlock(spec.ComponentTexts, spec.ComponentCodeLocations, types.SpecComponentTypeInvalid, 0, spec.State, succinct)
-
- if message != "" {
- s.printNewLine()
- s.println(indentation, message)
- }
-
- s.endBlock()
-}
-
-func (s *consoleStenographer) printSpecFailure(message string, spec *types.SpecSummary, succinct bool, fullTrace bool) {
- s.startBlock()
- s.println(0, s.colorize(redColor+boldStyle, "%s%s [%.3f seconds]", message, s.failureContext(spec.Failure.ComponentType), spec.RunTime.Seconds()))
-
- indentation := s.printCodeLocationBlock(spec.ComponentTexts, spec.ComponentCodeLocations, spec.Failure.ComponentType, spec.Failure.ComponentIndex, spec.State, succinct)
-
- s.printNewLine()
- s.printFailure(indentation, spec.State, spec.Failure, fullTrace)
- s.endBlock()
-}
-
-func (s *consoleStenographer) failureContext(failedComponentType types.SpecComponentType) string {
- switch failedComponentType {
- case types.SpecComponentTypeBeforeSuite:
- return " in Suite Setup (BeforeSuite)"
- case types.SpecComponentTypeAfterSuite:
- return " in Suite Teardown (AfterSuite)"
- case types.SpecComponentTypeBeforeEach:
- return " in Spec Setup (BeforeEach)"
- case types.SpecComponentTypeJustBeforeEach:
- return " in Spec Setup (JustBeforeEach)"
- case types.SpecComponentTypeAfterEach:
- return " in Spec Teardown (AfterEach)"
- }
-
- return ""
-}
-
-func (s *consoleStenographer) printSkip(indentation int, spec types.SpecFailure) {
- s.println(indentation, s.colorize(cyanColor, spec.Message))
- s.printNewLine()
- s.println(indentation, spec.Location.String())
-}
-
-func (s *consoleStenographer) printFailure(indentation int, state types.SpecState, failure types.SpecFailure, fullTrace bool) {
- if state == types.SpecStatePanicked {
- s.println(indentation, s.colorize(redColor+boldStyle, failure.Message))
- s.println(indentation, s.colorize(redColor, failure.ForwardedPanic))
- s.println(indentation, failure.Location.String())
- s.printNewLine()
- s.println(indentation, s.colorize(redColor, "Full Stack Trace"))
- s.println(indentation, failure.Location.FullStackTrace)
- } else {
- s.println(indentation, s.colorize(redColor, failure.Message))
- s.printNewLine()
- s.println(indentation, failure.Location.String())
- if fullTrace {
- s.printNewLine()
- s.println(indentation, s.colorize(redColor, "Full Stack Trace"))
- s.println(indentation, failure.Location.FullStackTrace)
- }
- }
-}
-
-func (s *consoleStenographer) printSpecContext(componentTexts []string, componentCodeLocations []types.CodeLocation, failedComponentType types.SpecComponentType, failedComponentIndex int, state types.SpecState, succinct bool) int {
- startIndex := 1
- indentation := 0
-
- if len(componentTexts) == 1 {
- startIndex = 0
- }
-
- for i := startIndex; i < len(componentTexts); i++ {
- if (state.IsFailure() || state == types.SpecStateSkipped) && i == failedComponentIndex {
- color := redColor
- if state == types.SpecStateSkipped {
- color = cyanColor
- }
- blockType := ""
- switch failedComponentType {
- case types.SpecComponentTypeBeforeSuite:
- blockType = "BeforeSuite"
- case types.SpecComponentTypeAfterSuite:
- blockType = "AfterSuite"
- case types.SpecComponentTypeBeforeEach:
- blockType = "BeforeEach"
- case types.SpecComponentTypeJustBeforeEach:
- blockType = "JustBeforeEach"
- case types.SpecComponentTypeAfterEach:
- blockType = "AfterEach"
- case types.SpecComponentTypeIt:
- blockType = "It"
- case types.SpecComponentTypeMeasure:
- blockType = "Measurement"
- }
- if succinct {
- s.print(0, s.colorize(color+boldStyle, "[%s] %s ", blockType, componentTexts[i]))
- } else {
- s.println(indentation, s.colorize(color+boldStyle, "%s [%s]", componentTexts[i], blockType))
- s.println(indentation, s.colorize(grayColor, "%s", componentCodeLocations[i]))
- }
- } else {
- if succinct {
- s.print(0, s.colorize(alternatingColors[i%2], "%s ", componentTexts[i]))
- } else {
- s.println(indentation, componentTexts[i])
- s.println(indentation, s.colorize(grayColor, "%s", componentCodeLocations[i]))
- }
- }
- indentation++
- }
-
- return indentation
-}
-
-func (s *consoleStenographer) printCodeLocationBlock(componentTexts []string, componentCodeLocations []types.CodeLocation, failedComponentType types.SpecComponentType, failedComponentIndex int, state types.SpecState, succinct bool) int {
- indentation := s.printSpecContext(componentTexts, componentCodeLocations, failedComponentType, failedComponentIndex, state, succinct)
-
- if succinct {
- if len(componentTexts) > 0 {
- s.printNewLine()
- s.print(0, s.colorize(lightGrayColor, "%s", componentCodeLocations[len(componentCodeLocations)-1]))
- }
- s.printNewLine()
- indentation = 1
- } else {
- indentation--
- }
-
- return indentation
-}
-
-func (s *consoleStenographer) orderedMeasurementKeys(measurements map[string]*types.SpecMeasurement) []string {
- orderedKeys := make([]string, len(measurements))
- for key, measurement := range measurements {
- orderedKeys[measurement.Order] = key
- }
- return orderedKeys
-}
-
-func (s *consoleStenographer) measurementReport(spec *types.SpecSummary, succinct bool) string {
- if len(spec.Measurements) == 0 {
- return "Found no measurements"
- }
-
- message := []string{}
- orderedKeys := s.orderedMeasurementKeys(spec.Measurements)
-
- if succinct {
- message = append(message, fmt.Sprintf("%s samples:", s.colorize(boldStyle, "%d", spec.NumberOfSamples)))
- for _, key := range orderedKeys {
- measurement := spec.Measurements[key]
- message = append(message, fmt.Sprintf(" %s - %s: %s%s, %s: %s%s ± %s%s, %s: %s%s",
- s.colorize(boldStyle, "%s", measurement.Name),
- measurement.SmallestLabel,
- s.colorize(greenColor, measurement.PrecisionFmt(), measurement.Smallest),
- measurement.Units,
- measurement.AverageLabel,
- s.colorize(cyanColor, measurement.PrecisionFmt(), measurement.Average),
- measurement.Units,
- s.colorize(cyanColor, measurement.PrecisionFmt(), measurement.StdDeviation),
- measurement.Units,
- measurement.LargestLabel,
- s.colorize(redColor, measurement.PrecisionFmt(), measurement.Largest),
- measurement.Units,
- ))
- }
- } else {
- message = append(message, fmt.Sprintf("Ran %s samples:", s.colorize(boldStyle, "%d", spec.NumberOfSamples)))
- for _, key := range orderedKeys {
- measurement := spec.Measurements[key]
- info := ""
- if measurement.Info != nil {
- message = append(message, fmt.Sprintf("%v", measurement.Info))
- }
-
- message = append(message, fmt.Sprintf("%s:\n%s %s: %s%s\n %s: %s%s\n %s: %s%s ± %s%s",
- s.colorize(boldStyle, "%s", measurement.Name),
- info,
- measurement.SmallestLabel,
- s.colorize(greenColor, measurement.PrecisionFmt(), measurement.Smallest),
- measurement.Units,
- measurement.LargestLabel,
- s.colorize(redColor, measurement.PrecisionFmt(), measurement.Largest),
- measurement.Units,
- measurement.AverageLabel,
- s.colorize(cyanColor, measurement.PrecisionFmt(), measurement.Average),
- measurement.Units,
- s.colorize(cyanColor, measurement.PrecisionFmt(), measurement.StdDeviation),
- measurement.Units,
- ))
- }
- }
-
- return strings.Join(message, "\n")
-}
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/README.md b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/README.md
deleted file mode 100644
index e84226a7358..00000000000
--- a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/README.md
+++ /dev/null
@@ -1,43 +0,0 @@
-# go-colorable
-
-Colorable writer for windows.
-
-For example, most of logger packages doesn't show colors on windows. (I know we can do it with ansicon. But I don't want.)
-This package is possible to handle escape sequence for ansi color on windows.
-
-## Too Bad!
-
-![](https://raw.githubusercontent.com/mattn/go-colorable/gh-pages/bad.png)
-
-
-## So Good!
-
-![](https://raw.githubusercontent.com/mattn/go-colorable/gh-pages/good.png)
-
-## Usage
-
-```go
-logrus.SetFormatter(&logrus.TextFormatter{ForceColors: true})
-logrus.SetOutput(colorable.NewColorableStdout())
-
-logrus.Info("succeeded")
-logrus.Warn("not correct")
-logrus.Error("something error")
-logrus.Fatal("panic")
-```
-
-You can compile above code on non-windows OSs.
-
-## Installation
-
-```
-$ go get github.com/mattn/go-colorable
-```
-
-# License
-
-MIT
-
-# Author
-
-Yasuhiro Matsumoto (a.k.a mattn)
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/colorable_others.go b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/colorable_others.go
deleted file mode 100644
index 3fb5bf15899..00000000000
--- a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/colorable_others.go
+++ /dev/null
@@ -1,25 +0,0 @@
-//go:build !windows
-// +build !windows
-
-package colorable
-
-import (
- "io"
- "os"
-)
-
-func NewColorable(file *os.File) io.Writer {
- if file == nil {
- panic("nil passed instead of *os.File to NewColorable()")
- }
-
- return file
-}
-
-func NewColorableStdout() io.Writer {
- return os.Stdout
-}
-
-func NewColorableStderr() io.Writer {
- return os.Stderr
-}
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/noncolorable.go b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/noncolorable.go
deleted file mode 100644
index fb976dbd8b7..00000000000
--- a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/noncolorable.go
+++ /dev/null
@@ -1,57 +0,0 @@
-package colorable
-
-import (
- "bytes"
- "fmt"
- "io"
-)
-
-type NonColorable struct {
- out io.Writer
- lastbuf bytes.Buffer
-}
-
-func NewNonColorable(w io.Writer) io.Writer {
- return &NonColorable{out: w}
-}
-
-func (w *NonColorable) Write(data []byte) (n int, err error) {
- er := bytes.NewBuffer(data)
-loop:
- for {
- c1, _, err := er.ReadRune()
- if err != nil {
- break loop
- }
- if c1 != 0x1b {
- fmt.Fprint(w.out, string(c1))
- continue
- }
- c2, _, err := er.ReadRune()
- if err != nil {
- w.lastbuf.WriteRune(c1)
- break loop
- }
- if c2 != 0x5b {
- w.lastbuf.WriteRune(c1)
- w.lastbuf.WriteRune(c2)
- continue
- }
-
- var buf bytes.Buffer
- for {
- c, _, err := er.ReadRune()
- if err != nil {
- w.lastbuf.WriteRune(c1)
- w.lastbuf.WriteRune(c2)
- w.lastbuf.Write(buf.Bytes())
- break loop
- }
- if ('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z') || c == '@' {
- break
- }
- buf.Write([]byte(string(c)))
- }
- }
- return len(data) - w.lastbuf.Len(), nil
-}
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/LICENSE b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/LICENSE
deleted file mode 100644
index 65dc692b6b1..00000000000
--- a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/LICENSE
+++ /dev/null
@@ -1,9 +0,0 @@
-Copyright (c) Yasuhiro MATSUMOTO
-
-MIT License (Expat)
-
-Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/README.md b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/README.md
deleted file mode 100644
index 74845de4a24..00000000000
--- a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/README.md
+++ /dev/null
@@ -1,37 +0,0 @@
-# go-isatty
-
-isatty for golang
-
-## Usage
-
-```go
-package main
-
-import (
- "fmt"
- "github.com/mattn/go-isatty"
- "os"
-)
-
-func main() {
- if isatty.IsTerminal(os.Stdout.Fd()) {
- fmt.Println("Is Terminal")
- } else {
- fmt.Println("Is Not Terminal")
- }
-}
-```
-
-## Installation
-
-```
-$ go get github.com/mattn/go-isatty
-```
-
-# License
-
-MIT
-
-# Author
-
-Yasuhiro Matsumoto (a.k.a mattn)
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/doc.go b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/doc.go
deleted file mode 100644
index 17d4f90ebcc..00000000000
--- a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/doc.go
+++ /dev/null
@@ -1,2 +0,0 @@
-// Package isatty implements interface to isatty
-package isatty
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/isatty_appengine.go b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/isatty_appengine.go
deleted file mode 100644
index 84eabda78f8..00000000000
--- a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/isatty_appengine.go
+++ /dev/null
@@ -1,10 +0,0 @@
-//go:build appengine
-// +build appengine
-
-package isatty
-
-// IsTerminal returns true if the file descriptor is terminal which
-// is always false on on appengine classic which is a sandboxed PaaS.
-func IsTerminal(fd uintptr) bool {
- return false
-}
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/isatty_bsd.go b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/isatty_bsd.go
deleted file mode 100644
index 5cb68df6c77..00000000000
--- a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/isatty_bsd.go
+++ /dev/null
@@ -1,19 +0,0 @@
-//go:build (darwin || freebsd || openbsd || netbsd) && !appengine
-// +build darwin freebsd openbsd netbsd
-// +build !appengine
-
-package isatty
-
-import (
- "syscall"
- "unsafe"
-)
-
-const ioctlReadTermios = syscall.TIOCGETA
-
-// IsTerminal return true if the file descriptor is terminal.
-func IsTerminal(fd uintptr) bool {
- var termios syscall.Termios
- _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, fd, ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0)
- return err == 0
-}
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/isatty_linux.go b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/isatty_linux.go
deleted file mode 100644
index 873523f4293..00000000000
--- a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/isatty_linux.go
+++ /dev/null
@@ -1,18 +0,0 @@
-//go:build linux && !appengine
-// +build linux,!appengine
-
-package isatty
-
-import (
- "syscall"
- "unsafe"
-)
-
-const ioctlReadTermios = syscall.TCGETS
-
-// IsTerminal return true if the file descriptor is terminal.
-func IsTerminal(fd uintptr) bool {
- var termios syscall.Termios
- _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, fd, ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0)
- return err == 0
-}
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/isatty_solaris.go b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/isatty_solaris.go
deleted file mode 100644
index fb80b296692..00000000000
--- a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/isatty_solaris.go
+++ /dev/null
@@ -1,16 +0,0 @@
-//go:build solaris && !appengine
-// +build solaris,!appengine
-
-package isatty
-
-import (
- "golang.org/x/sys/unix"
-)
-
-// IsTerminal returns true if the given file descriptor is a terminal.
-// see: http://src.illumos.org/source/xref/illumos-gate/usr/src/lib/libbc/libc/gen/common/isatty.c
-func IsTerminal(fd uintptr) bool {
- var termio unix.Termio
- err := unix.IoctlSetTermio(int(fd), unix.TCGETA, &termio)
- return err == nil
-}
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/isatty_windows.go b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/isatty_windows.go
deleted file mode 100644
index 89fc3d02b22..00000000000
--- a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/isatty_windows.go
+++ /dev/null
@@ -1,19 +0,0 @@
-//go:build windows && !appengine
-// +build windows,!appengine
-
-package isatty
-
-import (
- "syscall"
- "unsafe"
-)
-
-var kernel32 = syscall.NewLazyDLL("kernel32.dll")
-var procGetConsoleMode = kernel32.NewProc("GetConsoleMode")
-
-// IsTerminal return true if the file descriptor is terminal.
-func IsTerminal(fd uintptr) bool {
- var st uint32
- r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, fd, uintptr(unsafe.Pointer(&st)), 0)
- return r != 0 && e == 0
-}
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/reporters/teamcity_reporter.go b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/reporters/teamcity_reporter.go
deleted file mode 100644
index 84fd8aff878..00000000000
--- a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/reporters/teamcity_reporter.go
+++ /dev/null
@@ -1,106 +0,0 @@
-/*
-
-TeamCity Reporter for Ginkgo
-
-Makes use of TeamCity's support for Service Messages
-http://confluence.jetbrains.com/display/TCD7/Build+Script+Interaction+with+TeamCity#BuildScriptInteractionwithTeamCity-ReportingTests
-*/
-
-package reporters
-
-import (
- "fmt"
- "io"
- "strings"
-
- "github.com/onsi/ginkgo/config"
- "github.com/onsi/ginkgo/types"
-)
-
-const (
- messageId = "##teamcity"
-)
-
-type TeamCityReporter struct {
- writer io.Writer
- testSuiteName string
- ReporterConfig config.DefaultReporterConfigType
-}
-
-func NewTeamCityReporter(writer io.Writer) *TeamCityReporter {
- return &TeamCityReporter{
- writer: writer,
- }
-}
-
-func (reporter *TeamCityReporter) SpecSuiteWillBegin(config config.GinkgoConfigType, summary *types.SuiteSummary) {
- reporter.testSuiteName = escape(summary.SuiteDescription)
- fmt.Fprintf(reporter.writer, "%s[testSuiteStarted name='%s']\n", messageId, reporter.testSuiteName)
-}
-
-func (reporter *TeamCityReporter) BeforeSuiteDidRun(setupSummary *types.SetupSummary) {
- reporter.handleSetupSummary("BeforeSuite", setupSummary)
-}
-
-func (reporter *TeamCityReporter) AfterSuiteDidRun(setupSummary *types.SetupSummary) {
- reporter.handleSetupSummary("AfterSuite", setupSummary)
-}
-
-func (reporter *TeamCityReporter) handleSetupSummary(name string, setupSummary *types.SetupSummary) {
- if setupSummary.State != types.SpecStatePassed {
- testName := escape(name)
- fmt.Fprintf(reporter.writer, "%s[testStarted name='%s']\n", messageId, testName)
- message := reporter.failureMessage(setupSummary.Failure)
- details := reporter.failureDetails(setupSummary.Failure)
- fmt.Fprintf(reporter.writer, "%s[testFailed name='%s' message='%s' details='%s']\n", messageId, testName, message, details)
- durationInMilliseconds := setupSummary.RunTime.Seconds() * 1000
- fmt.Fprintf(reporter.writer, "%s[testFinished name='%s' duration='%v']\n", messageId, testName, durationInMilliseconds)
- }
-}
-
-func (reporter *TeamCityReporter) SpecWillRun(specSummary *types.SpecSummary) {
- testName := escape(strings.Join(specSummary.ComponentTexts[1:], " "))
- fmt.Fprintf(reporter.writer, "%s[testStarted name='%s']\n", messageId, testName)
-}
-
-func (reporter *TeamCityReporter) SpecDidComplete(specSummary *types.SpecSummary) {
- testName := escape(strings.Join(specSummary.ComponentTexts[1:], " "))
-
- if reporter.ReporterConfig.ReportPassed && specSummary.State == types.SpecStatePassed {
- details := escape(specSummary.CapturedOutput)
- fmt.Fprintf(reporter.writer, "%s[testPassed name='%s' details='%s']\n", messageId, testName, details)
- }
- if specSummary.State == types.SpecStateFailed || specSummary.State == types.SpecStateTimedOut || specSummary.State == types.SpecStatePanicked {
- message := reporter.failureMessage(specSummary.Failure)
- details := reporter.failureDetails(specSummary.Failure)
- fmt.Fprintf(reporter.writer, "%s[testFailed name='%s' message='%s' details='%s']\n", messageId, testName, message, details)
- }
- if specSummary.State == types.SpecStateSkipped || specSummary.State == types.SpecStatePending {
- fmt.Fprintf(reporter.writer, "%s[testIgnored name='%s']\n", messageId, testName)
- }
-
- durationInMilliseconds := specSummary.RunTime.Seconds() * 1000
- fmt.Fprintf(reporter.writer, "%s[testFinished name='%s' duration='%v']\n", messageId, testName, durationInMilliseconds)
-}
-
-func (reporter *TeamCityReporter) SpecSuiteDidEnd(summary *types.SuiteSummary) {
- fmt.Fprintf(reporter.writer, "%s[testSuiteFinished name='%s']\n", messageId, reporter.testSuiteName)
-}
-
-func (reporter *TeamCityReporter) failureMessage(failure types.SpecFailure) string {
- return escape(failure.ComponentCodeLocation.String())
-}
-
-func (reporter *TeamCityReporter) failureDetails(failure types.SpecFailure) string {
- return escape(fmt.Sprintf("%s\n%s", failure.Message, failure.Location.String()))
-}
-
-func escape(output string) string {
- output = strings.Replace(output, "|", "||", -1)
- output = strings.Replace(output, "'", "|'", -1)
- output = strings.Replace(output, "\n", "|n", -1)
- output = strings.Replace(output, "\r", "|r", -1)
- output = strings.Replace(output, "[", "|[", -1)
- output = strings.Replace(output, "]", "|]", -1)
- return output
-}
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/types/code_location.go b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/types/code_location.go
deleted file mode 100644
index 935a89e136a..00000000000
--- a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/types/code_location.go
+++ /dev/null
@@ -1,15 +0,0 @@
-package types
-
-import (
- "fmt"
-)
-
-type CodeLocation struct {
- FileName string
- LineNumber int
- FullStackTrace string
-}
-
-func (codeLocation CodeLocation) String() string {
- return fmt.Sprintf("%s:%d", codeLocation.FileName, codeLocation.LineNumber)
-}
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/types/synchronization.go b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/types/synchronization.go
deleted file mode 100644
index fdd6ed5bdf8..00000000000
--- a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/types/synchronization.go
+++ /dev/null
@@ -1,30 +0,0 @@
-package types
-
-import (
- "encoding/json"
-)
-
-type RemoteBeforeSuiteState int
-
-const (
- RemoteBeforeSuiteStateInvalid RemoteBeforeSuiteState = iota
-
- RemoteBeforeSuiteStatePending
- RemoteBeforeSuiteStatePassed
- RemoteBeforeSuiteStateFailed
- RemoteBeforeSuiteStateDisappeared
-)
-
-type RemoteBeforeSuiteData struct {
- Data []byte
- State RemoteBeforeSuiteState
-}
-
-func (r RemoteBeforeSuiteData) ToJSON() []byte {
- data, _ := json.Marshal(r)
- return data
-}
-
-type RemoteAfterSuiteData struct {
- CanRun bool
-}
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/types/types.go b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/types/types.go
deleted file mode 100644
index c143e02d845..00000000000
--- a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/types/types.go
+++ /dev/null
@@ -1,174 +0,0 @@
-package types
-
-import (
- "strconv"
- "time"
-)
-
-const GINKGO_FOCUS_EXIT_CODE = 197
-
-/*
-SuiteSummary represents the a summary of the test suite and is passed to both
-Reporter.SpecSuiteWillBegin
-Reporter.SpecSuiteDidEnd
-
-this is unfortunate as these two methods should receive different objects. When running in parallel
-each node does not deterministically know how many specs it will end up running.
-
-Unfortunately making such a change would break backward compatibility.
-
-Until Ginkgo 2.0 comes out we will continue to reuse this struct but populate unknown fields
-with -1.
-*/
-type SuiteSummary struct {
- SuiteDescription string
- SuiteSucceeded bool
- SuiteID string
-
- NumberOfSpecsBeforeParallelization int
- NumberOfTotalSpecs int
- NumberOfSpecsThatWillBeRun int
- NumberOfPendingSpecs int
- NumberOfSkippedSpecs int
- NumberOfPassedSpecs int
- NumberOfFailedSpecs int
- // Flaked specs are those that failed initially, but then passed on a
- // subsequent try.
- NumberOfFlakedSpecs int
- RunTime time.Duration
-}
-
-type SpecSummary struct {
- ComponentTexts []string
- ComponentCodeLocations []CodeLocation
-
- State SpecState
- RunTime time.Duration
- Failure SpecFailure
- IsMeasurement bool
- NumberOfSamples int
- Measurements map[string]*SpecMeasurement
-
- CapturedOutput string
- SuiteID string
-}
-
-func (s SpecSummary) HasFailureState() bool {
- return s.State.IsFailure()
-}
-
-func (s SpecSummary) TimedOut() bool {
- return s.State == SpecStateTimedOut
-}
-
-func (s SpecSummary) Panicked() bool {
- return s.State == SpecStatePanicked
-}
-
-func (s SpecSummary) Failed() bool {
- return s.State == SpecStateFailed
-}
-
-func (s SpecSummary) Passed() bool {
- return s.State == SpecStatePassed
-}
-
-func (s SpecSummary) Skipped() bool {
- return s.State == SpecStateSkipped
-}
-
-func (s SpecSummary) Pending() bool {
- return s.State == SpecStatePending
-}
-
-type SetupSummary struct {
- ComponentType SpecComponentType
- CodeLocation CodeLocation
-
- State SpecState
- RunTime time.Duration
- Failure SpecFailure
-
- CapturedOutput string
- SuiteID string
-}
-
-type SpecFailure struct {
- Message string
- Location CodeLocation
- ForwardedPanic string
-
- ComponentIndex int
- ComponentType SpecComponentType
- ComponentCodeLocation CodeLocation
-}
-
-type SpecMeasurement struct {
- Name string
- Info interface{}
- Order int
-
- Results []float64
-
- Smallest float64
- Largest float64
- Average float64
- StdDeviation float64
-
- SmallestLabel string
- LargestLabel string
- AverageLabel string
- Units string
- Precision int
-}
-
-func (s SpecMeasurement) PrecisionFmt() string {
- if s.Precision == 0 {
- return "%f"
- }
-
- str := strconv.Itoa(s.Precision)
-
- return "%." + str + "f"
-}
-
-type SpecState uint
-
-const (
- SpecStateInvalid SpecState = iota
-
- SpecStatePending
- SpecStateSkipped
- SpecStatePassed
- SpecStateFailed
- SpecStatePanicked
- SpecStateTimedOut
-)
-
-func (state SpecState) IsFailure() bool {
- return state == SpecStateTimedOut || state == SpecStatePanicked || state == SpecStateFailed
-}
-
-type SpecComponentType uint
-
-const (
- SpecComponentTypeInvalid SpecComponentType = iota
-
- SpecComponentTypeContainer
- SpecComponentTypeBeforeSuite
- SpecComponentTypeAfterSuite
- SpecComponentTypeBeforeEach
- SpecComponentTypeJustBeforeEach
- SpecComponentTypeJustAfterEach
- SpecComponentTypeAfterEach
- SpecComponentTypeIt
- SpecComponentTypeMeasure
-)
-
-type FlagType uint
-
-const (
- FlagTypeNone FlagType = iota
- FlagTypeFocused
- FlagTypePending
-)
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/.gitignore b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/.gitignore
similarity index 90%
rename from integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/.gitignore
rename to integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/.gitignore
index b9f9659d29a..18793c248aa 100644
--- a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/.gitignore
+++ b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/.gitignore
@@ -4,4 +4,4 @@ tmp/**/*
*.coverprofile
.vscode
.idea/
-*.log
+*.log
\ No newline at end of file
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md
new file mode 100644
index 00000000000..76577dc78e5
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md
@@ -0,0 +1,1028 @@
+## 2.19.0
+
+### Features
+
+[Label Sets](https://onsi.github.io/ginkgo/#label-sets) allow for more expressive and flexible label filtering.
+
+## 2.18.0
+
+### Features
+- Add --slience-skips and --force-newlines [f010b65]
+- fail when no tests were run and --fail-on-empty was set [d80eebe]
+
+### Fixes
+- Fix table entry context edge case [42013d6]
+
+### Maintenance
+- Bump golang.org/x/tools from 0.20.0 to 0.21.0 (#1406) [fcf1fd7]
+- Bump github.com/onsi/gomega from 1.33.0 to 1.33.1 (#1399) [8bb14fd]
+- Bump golang.org/x/net from 0.24.0 to 0.25.0 (#1407) [04bfad7]
+
+## 2.17.3
+
+### Fixes
+`ginkgo watch` now ignores hidden files [bde6e00]
+
+## 2.17.2
+
+### Fixes
+- fix: close files [32259c8]
+- fix github output log level for skipped specs [780e7a3]
+
+### Maintenance
+- Bump github.com/google/pprof [d91fe4e]
+- Bump github.com/go-task/slim-sprig to v3 [8cb662e]
+- Bump golang.org/x/net in /integration/_fixtures/version_mismatch_fixture (#1391) [3134422]
+- Bump github-pages from 230 to 231 in /docs (#1384) [eca81b4]
+- Bump golang.org/x/tools from 0.19.0 to 0.20.0 (#1383) [760def8]
+- Bump golang.org/x/net from 0.23.0 to 0.24.0 (#1381) [4ce33f4]
+- Fix test for gomega version bump [f2fcd97]
+- Bump github.com/onsi/gomega from 1.30.0 to 1.33.0 (#1390) [fd622d2]
+- Bump golang.org/x/tools from 0.17.0 to 0.19.0 (#1368) [5474a26]
+- Bump github-pages from 229 to 230 in /docs (#1359) [e6d1170]
+- Bump google.golang.org/protobuf from 1.28.0 to 1.33.0 (#1374) [7f447b2]
+- Bump golang.org/x/net from 0.20.0 to 0.23.0 (#1380) [f15239a]
+
+## 2.17.1
+
+### Fixes
+- If the user sets --seed=0, make sure all parallel nodes get the same seed [af0330d]
+
+## 2.17.0
+
+### Features
+
+- add `--github-output` for nicer output in github actions [e8a2056]
+
+### Maintenance
+
+- fix typo in core_dsl.go [977bc6f]
+- Fix typo in docs [e297e7b]
+
+## 2.16.0
+
+### Features
+- add SpecContext to reporting nodes
+
+### Fixes
+- merge coverages instead of combining them (#1329) (#1340) [23f0cc5]
+- core_dsl: disable Getwd() with environment variable (#1357) [cd418b7]
+
+### Maintenance
+- docs/index.md: Typo [2cebe8d]
+- fix docs [06de431]
+- chore: test with Go 1.22 (#1352) [898cba9]
+- Bump golang.org/x/tools from 0.16.1 to 0.17.0 (#1336) [17ae120]
+- Bump golang.org/x/sys from 0.15.0 to 0.16.0 (#1327) [5a179ed]
+- Bump github.com/go-logr/logr from 1.3.0 to 1.4.1 (#1321) [a1e6b69]
+- Bump github-pages and jekyll-feed in /docs (#1351) [d52951d]
+- Fix docs for handling failures in goroutines (#1339) [4471b2e]
+
+## 2.15.0
+
+### Features
+
+- JUnit reports now interpret Label(owner:X) and set owner to X. [8f3bd70]
+- include cancellation reason when cancelling spec context [96e915c]
+
+### Fixes
+
+- emit output of failed go tool cover invocation so users can try to debug things for themselves [c245d09]
+- fix outline when using nodot in ginkgo v2 [dca77c8]
+- Document areas where GinkgoT() behaves differently from testing.T [dbaf18f]
+- bugfix(docs): use Unsetenv instead of Clearenv (#1337) [6f67a14]
+
+### Maintenance
+
+- Bump to go 1.20 [4fcd0b3]
+
+## 2.14.0
+
+### Features
+You can now use `GinkgoTB()` when you need an instance of `testing.TB` to pass to a library.
+
+Prior to this release table testing only supported generating individual `It`s for each test entry. `DescribeTableSubtree` extends table testing support to entire testing subtrees - under the hood `DescrieTableSubtree` generates a new container for each entry and invokes your function to fill our the container. See the [docs](https://onsi.github.io/ginkgo/#generating-subtree-tables) to learn more.
+
+- Introduce DescribeTableSubtree [65ec56d]
+- add GinkgoTB() to docs [4a2c832]
+- Add GinkgoTB() function (#1333) [92b6744]
+
+### Fixes
+- Fix typo in internal/suite.go (#1332) [beb9507]
+- Fix typo in docs/index.md (#1319) [4ac3a13]
+- allow wasm to compile with ginkgo present (#1311) [b2e5bc5]
+
+### Maintenance
+- Bump golang.org/x/tools from 0.16.0 to 0.16.1 (#1316) [465a8ec]
+- Bump actions/setup-go from 4 to 5 (#1313) [eab0e40]
+- Bump github/codeql-action from 2 to 3 (#1317) [fbf9724]
+- Bump golang.org/x/crypto (#1318) [3ee80ee]
+- Bump golang.org/x/tools from 0.14.0 to 0.16.0 (#1306) [123e1d5]
+- Bump github.com/onsi/gomega from 1.29.0 to 1.30.0 (#1297) [558f6e0]
+- Bump golang.org/x/net from 0.17.0 to 0.19.0 (#1307) [84ff7f3]
+
+## 2.13.2
+
+### Fixes
+- Fix file handler leak (#1309) [e2e81c8]
+- Avoid allocations with `(*regexp.Regexp).MatchString` (#1302) [3b2a2a7]
+
+## 2.13.1
+
+### Fixes
+- # 1296 fix(precompiled test guite): exec bit check omitted on Windows (#1301) [26eea01]
+
+### Maintenance
+- Bump github.com/go-logr/logr from 1.2.4 to 1.3.0 (#1291) [7161a9d]
+- Bump golang.org/x/sys from 0.13.0 to 0.14.0 (#1295) [7fc7b10]
+- Bump golang.org/x/tools from 0.12.0 to 0.14.0 (#1282) [74bbd65]
+- Bump github.com/onsi/gomega from 1.27.10 to 1.29.0 (#1290) [9373633]
+- Bump golang.org/x/net in /integration/_fixtures/version_mismatch_fixture (#1286) [6e3cf65]
+
+## 2.13.0
+
+### Features
+
+Add PreviewSpect() to enable programmatic preview access to the suite report (fixes #1225)
+
+## 2.12.1
+
+### Fixes
+- Print logr prefix if it exists (#1275) [90d4846]
+
+### Maintenance
+- Bump actions/checkout from 3 to 4 (#1271) [555f543]
+- Bump golang.org/x/sys from 0.11.0 to 0.12.0 (#1270) [d867b7d]
+
+## 2.12.0
+
+### Features
+
+- feat: allow MustPassRepeatedly decorator to be set at suite level (#1266) [05de518]
+
+### Fixes
+
+- fix-errors-in-readme (#1244) [27c2f5d]
+
+### Maintenance
+
+Various chores/dependency bumps.
+
+## 2.11.0
+
+In prior versions of Ginkgo specs the CLI filter flags (e.g. `--focus`, `--label-filter`) would _override_ any programmatic focus. This behavior has proved surprising and confusing in at least the following ways:
+
+- users cannot combine programmatic filters and CLI filters to more efficiently select subsets of tests
+- CLI filters can override programmatic focus on CI systems resulting in an exit code of 0 despite the presence of (incorrectly!) committed focused specs.
+
+Going forward Ginkgo will AND all programmatic and CLI filters. Moreover, the presence of any programmatic focused tests will always result in a non-zero exit code.
+
+This change is technically a change in Ginkgo's external contract and may require some users to make changes to successfully adopt. Specifically: it's possible some users were intentionally using CLI filters to override programmatic focus. If this is you please open an issue so we can explore solutions to the underlying problem you are trying to solve.
+
+### Fixes
+- Programmatic focus is no longer overwrriten by CLI filters [d6bba86]
+
+### Maintenance
+- Bump github.com/onsi/gomega from 1.27.7 to 1.27.8 (#1218) [4a70a38]
+- Bump golang.org/x/sys from 0.8.0 to 0.9.0 (#1219) [97eda4d]
+
+## 2.10.0
+
+### Features
+- feat(ginkgo/generators): add --tags flag (#1216) [a782a77]
+ adds a new --tags flag to ginkgo generate
+
+### Fixes
+- Fix broken link of MIGRATING_TO_V2.md (#1217) [548d78e]
+
+### Maintenance
+- Bump golang.org/x/tools from 0.9.1 to 0.9.3 (#1215) [2b76a5e]
+
+## 2.9.7
+
+### Fixes
+- fix race when multiple defercleanups are called in goroutines [07fc3a0]
+
+## 2.9.6
+
+### Fixes
+- fix: create parent directory before report files (#1212) [0ac65de]
+
+### Maintenance
+- Bump github.com/onsi/gomega from 1.27.6 to 1.27.7 (#1202) [3e39231]
+
+## 2.9.5
+
+### Fixes
+- ensure the correct deterministic sort order is produced when ordered specs are generated by a helper function [7fa0b6b]
+
+### Maintenance
+- fix generators link (#1200) [9f9d8b9]
+- Bump golang.org/x/tools from 0.8.0 to 0.9.1 (#1196) [150e3f2]
+- fix spelling err in docs (#1199) [0013b1a]
+- Bump golang.org/x/sys from 0.7.0 to 0.8.0 (#1193) [9e9e3e5]
+
+## 2.9.4
+
+### Fixes
+- fix hang with ginkgo -p (#1192) [15d4bdc] - this addresses a _long_ standing issue related to Ginkgo hanging when a child process spawned by the test does not exit.
+
+- fix: fail fast may cause Serial spec or cleanup Node interrupted (#1178) [8dea88b] - prior to this there was a small gap in which specs on other processes might start even if one process has tried to abort the suite.
+
+
+### Maintenance
+- Document run order when multiple setup nodes are at the same nesting level [903be81]
+
+## 2.9.3
+
+### Features
+- Add RenderTimeline to GinkgoT() [c0c77b6]
+
+### Fixes
+- update Measure deprecation message. fixes #1176 [227c662]
+- add newlines to GinkgoLogr (#1170) (#1171) [0de0e7c]
+
+### Maintenance
+- Bump commonmarker from 0.23.8 to 0.23.9 in /docs (#1183) [8b925ab]
+- Bump nokogiri from 1.14.1 to 1.14.3 in /docs (#1184) [e3795a4]
+- Bump golang.org/x/tools from 0.7.0 to 0.8.0 (#1182) [b453793]
+- Bump actions/setup-go from 3 to 4 (#1164) [73ed75b]
+- Bump github.com/onsi/gomega from 1.27.4 to 1.27.6 (#1173) [0a2bc64]
+- Bump github.com/go-logr/logr from 1.2.3 to 1.2.4 (#1174) [f41c557]
+- Bump golang.org/x/sys from 0.6.0 to 0.7.0 (#1179) [8e423e5]
+
+## 2.9.2
+
+### Maintenance
+- Bump github.com/go-task/slim-sprig (#1167) [3fcc5bf]
+- Bump github.com/onsi/gomega from 1.27.3 to 1.27.4 (#1163) [6143ffe]
+
+## 2.9.1
+
+### Fixes
+This release fixes a longstanding issue where `ginkgo -coverpkg=./...` would not work. This is now resolved and fixes [#1161](https://github.com/onsi/ginkgo/issues/1161) and [#995](https://github.com/onsi/ginkgo/issues/995)
+- Support -coverpkg=./... [26ca1b5]
+- document coverpkg a bit more clearly [fc44c3b]
+
+### Maintenance
+- bump various dependencies
+- Improve Documentation and fix typo (#1158) [93de676]
+
+## 2.9.0
+
+### Features
+- AttachProgressReporter is an experimental feature that allows users to provide arbitrary information when a ProgressReport is requested [28801fe]
+
+- GinkgoT() has been expanded to include several Ginkgo-specific methods [2bd5a3b]
+
+ The intent is to enable the development of third-party libraries that integrate deeply with Ginkgo using `GinkgoT()` to access Ginkgo's functionality.
+
+## 2.8.4
+
+### Features
+- Add OmitSuiteSetupNodes to JunitReportConfig (#1147) [979fbc2]
+- Add a reference to ginkgolinter in docs.index.md (#1143) [8432589]
+
+### Fixes
+- rename tools hack to see if it fixes things for downstream users [a8bb39a]
+
+### Maintenance
+- Bump golang.org/x/text (#1144) [41b2a8a]
+- Bump github.com/onsi/gomega from 1.27.0 to 1.27.1 (#1142) [7c4f583]
+
+## 2.8.3
+
+Released to fix security issue in golang.org/x/net dependency
+
+### Maintenance
+
+- Bump golang.org/x/net from 0.6.0 to 0.7.0 (#1141) [fc1a02e]
+- remove tools.go hack from documentation [0718693]
+
+## 2.8.2
+
+Ginkgo now includes a `tools.go` file in the root directory of the `ginkgo` package. This should allow modules that simply `go get github.com/onsi/ginkgo/v2` to also pull in the CLI dependencies. This obviates the need for consumers of Ginkgo to have their own `tools.go` file and makes it simpler to ensure that the version of the `ginkgo` CLI being used matches the version of the library. You can simply run `go run github.com/onsi/ginkgo/v2/ginkgo` to run the version of the cli associated with your package go.mod.
+
+### Maintenance
+
+- Bump github.com/onsi/gomega from 1.26.0 to 1.27.0 (#1139) [5767b0a]
+- Fix minor typos (#1138) [e1e9723]
+- Fix link in V2 Migration Guide (#1137) [a588f60]
+
+## 2.8.1
+
+### Fixes
+- lock around default report output to avoid triggering the race detector when calling By from goroutines [2d5075a]
+- don't run ReportEntries through sprintf [febbe38]
+
+### Maintenance
+- Bump golang.org/x/tools from 0.5.0 to 0.6.0 (#1135) [11a4860]
+- test: update matrix for Go 1.20 (#1130) [4890a62]
+- Bump golang.org/x/sys from 0.4.0 to 0.5.0 (#1133) [a774638]
+- Bump github.com/onsi/gomega from 1.25.0 to 1.26.0 (#1120) [3f233bd]
+- Bump github-pages from 227 to 228 in /docs (#1131) [f9b8649]
+- Bump activesupport from 6.0.6 to 6.0.6.1 in /docs (#1127) [6f8c042]
+- Update index.md with instructions on how to upgrade Ginkgo [833a75e]
+
+## 2.8.0
+
+### Features
+
+- Introduce GinkgoHelper() to track and exclude helper functions from potential CodeLocations [e19f556]
+
+Modeled after `testing.T.Helper()`. Now, rather than write code like:
+
+```go
+func helper(model Model) {
+ Expect(model).WithOffset(1).To(BeValid())
+ Expect(model.SerialNumber).WithOffset(1).To(MatchRegexp(/[a-f0-9]*/))
+}
+```
+
+you can stop tracking offsets (which makes nesting composing helpers nearly impossible) and simply write:
+
+```go
+func helper(model Model) {
+ GinkgoHelper()
+ Expect(model).To(BeValid())
+ Expect(model.SerialNumber).To(MatchRegexp(/[a-f0-9]*/))
+}
+```
+
+- Introduce GinkgoLabelFilter() and Label().MatchesLabelFilter() to make it possible to programmatically match filters (fixes #1119) [2f6597c]
+
+You can now write code like this:
+
+```go
+BeforeSuite(func() {
+ if Label("slow").MatchesLabelFilter(GinkgoLabelFilter()) {
+ // do slow setup
+ }
+
+ if Label("fast").MatchesLabelFilter(GinkgoLabelFilter()) {
+ // do fast setup
+ }
+})
+```
+
+to programmatically check whether a given set of labels will match the configured `--label-filter`.
+
+### Maintenance
+
+- Bump webrick from 1.7.0 to 1.8.1 in /docs (#1125) [ea4966e]
+- cdeql: add ruby language (#1124) [9dd275b]
+- dependabot: add bundler package-ecosystem for docs (#1123) [14e7bdd]
+
+## 2.7.1
+
+### Fixes
+- Bring back SuiteConfig.EmitSpecProgress to avoid compilation issue for consumers that set it manually [d2a1cb0]
+
+### Maintenance
+- Bump github.com/onsi/gomega from 1.24.2 to 1.25.0 (#1118) [cafece6]
+- Bump golang.org/x/tools from 0.4.0 to 0.5.0 (#1111) [eda66c2]
+- Bump golang.org/x/sys from 0.3.0 to 0.4.0 (#1112) [ac5ccaa]
+- Bump github.com/onsi/gomega from 1.24.1 to 1.24.2 (#1097) [eee6480]
+
+## 2.7.0
+
+### Features
+- Introduce ContinueOnFailure for Ordered containers [e0123ca] - Ordered containers that are also decorated with ContinueOnFailure will not stop running specs after the first spec fails.
+- Support for bootstrap commands to use custom data for templates (#1110) [7a2b242]
+- Support for labels and pending decorator in ginkgo outline output (#1113) [e6e3b98]
+- Color aliases for custom color support (#1101) [49fab7a]
+
+### Fixes
+- correctly ensure deterministic spec order, even if specs are generated by iterating over a map [89dda20]
+- Fix a bug where timedout specs were not correctly treated as failures when determining whether or not to run AfterAlls in an Ordered container.
+- Ensure go test coverprofile outputs to the expected location (#1105) [b0bd77b]
+
+## 2.6.1
+
+### Features
+- Override formatter colors from envvars - this is a new feature but an alternative approach involving config files might be taken in the future (#1095) [60240d1]
+
+### Fixes
+- GinkgoRecover now supports ignoring panics that match a specific, hidden, interface [301f3e2]
+
+### Maintenance
+- Bump github.com/onsi/gomega from 1.24.0 to 1.24.1 (#1077) [3643823]
+- Bump golang.org/x/tools from 0.2.0 to 0.4.0 (#1090) [f9f856e]
+- Bump nokogiri from 1.13.9 to 1.13.10 in /docs (#1091) [0d7087e]
+
+## 2.6.0
+
+### Features
+- `ReportBeforeSuite` provides access to the suite report before the suite begins.
+- Add junit config option for omitting leafnodetype (#1088) [956e6d2]
+- Add support to customize junit report config to omit spec labels (#1087) [de44005]
+
+### Fixes
+- Fix stack trace pruning so that it has a chance of working on windows [2165648]
+
+## 2.5.1
+
+### Fixes
+- skipped tests only show as 'S' when running with -v [3ab38ae]
+- Fix typo in docs/index.md (#1082) [55fc58d]
+- Fix typo in docs/index.md (#1081) [8a14f1f]
+- Fix link notation in docs/index.md (#1080) [2669612]
+- Fix typo in `--progress` deprecation message (#1076) [b4b7edc]
+
+### Maintenance
+- chore: Included githubactions in the dependabot config (#976) [baea341]
+- Bump golang.org/x/sys from 0.1.0 to 0.2.0 (#1075) [9646297]
+
+## 2.5.0
+
+### Ginkgo output now includes a timeline-view of the spec
+
+This commit changes Ginkgo's default output. Spec details are now
+presented as a **timeline** that includes events that occur during the spec
+lifecycle interleaved with any GinkgoWriter content. This makes is much easier
+to understand the flow of a spec and where a given failure occurs.
+
+The --progress, --slow-spec-threshold, --always-emit-ginkgo-writer flags
+and the SuppressProgressReporting decorator have all been deprecated. Instead
+the existing -v and -vv flags better capture the level of verbosity to display. However,
+a new --show-node-events flag is added to include node `> Enter` and `< Exit` events
+in the spec timeline.
+
+In addition, JUnit reports now include the timeline (rendered with -vv) and custom JUnit
+reports can be configured and generated using
+`GenerateJUnitReportWithConfig(report types.Report, dst string, config JunitReportConfig)`
+
+Code should continue to work unchanged with this version of Ginkgo - however if you have tooling that
+was relying on the specific output format of Ginkgo you _may_ run into issues. Ginkgo's console output is not guaranteed to be stable for tooling and automation purposes. You should, instead, use Ginkgo's JSON format
+to build tooling on top of as it has stronger guarantees to be stable from version to version.
+
+### Features
+- Provide details about which timeout expired [0f2fa27]
+
+### Fixes
+- Add Support Policy to docs [c70867a]
+
+### Maintenance
+- Bump github.com/onsi/gomega from 1.22.1 to 1.23.0 (#1070) [bb3b4e2]
+
+## 2.4.0
+
+### Features
+
+- DeferCleanup supports functions with multiple-return values [5e33c75]
+- Add GinkgoLogr (#1067) [bf78c28]
+- Introduction of 'MustPassRepeatedly' decorator (#1051) [047c02f]
+
+### Fixes
+- correcting some typos (#1064) [1403d3c]
+- fix flaky internal_integration interrupt specs [2105ba3]
+- Correct busted link in README [be6b5b9]
+
+### Maintenance
+- Bump actions/checkout from 2 to 3 (#1062) [8a2f483]
+- Bump golang.org/x/tools from 0.1.12 to 0.2.0 (#1065) [529c4e8]
+- Bump github/codeql-action from 1 to 2 (#1061) [da09146]
+- Bump actions/setup-go from 2 to 3 (#1060) [918040d]
+- Bump github.com/onsi/gomega from 1.22.0 to 1.22.1 (#1053) [2098e4d]
+- Bump nokogiri from 1.13.8 to 1.13.9 in /docs (#1066) [1d74122]
+- Add GHA to dependabot config [4442772]
+
+## 2.3.1
+
+## Fixes
+Several users were invoking `ginkgo` by installing the latest version of the cli via `go install github.com/onsi/ginkgo/v2/ginkgo@latest`. When 2.3.0 was released this resulted in an influx of issues as CI systems failed due to a change in the internal contract between the Ginkgo CLI and the Ginkgo library. Ginkgo only supports running the same version of the library as the cli (which is why both are packaged in the same repository).
+
+With this patch release, the ginkgo CLI can now identify a version mismatch and emit a helpful error message.
+
+- Ginkgo cli can identify version mismatches and emit a helpful error message [bc4ae2f]
+- further emphasize that a version match is required when running Ginkgo on CI and/or locally [2691dd8]
+
+### Maintenance
+- bump gomega to v1.22.0 [822a937]
+
+## 2.3.0
+
+### Interruptible Nodes and Timeouts
+
+Ginkgo now supports per-node and per-spec timeouts on interruptible nodes. Check out the [documentation for all the details](https://onsi.github.io/ginkgo/#spec-timeouts-and-interruptible-nodes) but the gist is you can now write specs like this:
+
+```go
+It("is interruptible", func(ctx SpecContext) { // or context.Context instead of SpecContext, both are valid.
+ // do things until `ctx.Done()` is closed, for example:
+ req, err := http.NewRequestWithContext(ctx, "POST", "/build-widgets", nil)
+ Expect(err).NotTo(HaveOccured())
+ _, err := http.DefaultClient.Do(req)
+ Expect(err).NotTo(HaveOccured())
+
+ Eventually(client.WidgetCount).WithContext(ctx).Should(Equal(17))
+}, NodeTimeout(time.Second*20), GracePeriod(5*time.Second))
+```
+
+and have Ginkgo ensure that the node completes before the timeout elapses. If it does elapse, or if an external interrupt is received (e.g. `^C`) then Ginkgo will cancel the context and wait for the Grace Period for the node to exit before proceeding with any cleanup nodes associated with the spec. The `ctx` provided by Ginkgo can also be passed down to Gomega's `Eventually` to have all assertions within the node governed by a single deadline.
+
+### Features
+
+- Ginkgo now records any additional failures that occur during the cleanup of a failed spec. In prior versions this information was quietly discarded, but the introduction of a more rigorous approach to timeouts and interruptions allows Ginkgo to better track subsequent failures.
+- `SpecContext` also provides a mechanism for third-party libraries to provide additional information when a Progress Report is generated. Gomega uses this to provide the current state of an `Eventually().WithContext()` assertion when a Progress Report is requested.
+- DescribeTable now exits with an error if it is not passed any Entries [a4c9865]
+
+## Fixes
+- fixes crashes on newer Ruby 3 installations by upgrading github-pages gem dependency [92c88d5]
+- Make the outline command able to use the DSL import [1be2427]
+
+## Maintenance
+- chore(docs): delete no meaning d [57c373c]
+- chore(docs): Fix hyperlinks [30526d5]
+- chore(docs): fix code blocks without language settings [cf611c4]
+- fix intra-doc link [b541bcb]
+
+## 2.2.0
+
+### Generate real-time Progress Reports [f91377c]
+
+Ginkgo can now generate Progress Reports to point users at the current running line of code (including a preview of the actual source code) and a best guess at the most relevant subroutines.
+
+These Progress Reports allow users to debug stuck or slow tests without exiting the Ginkgo process. A Progress Report can be generated at any time by sending Ginkgo a `SIGINFO` (`^T` on MacOS/BSD) or `SIGUSR1`.
+
+In addition, the user can specify `--poll-progress-after` and `--poll-progress-interval` to have Ginkgo start periodically emitting progress reports if a given node takes too long. These can be overriden/set on a per-node basis with the `PollProgressAfter` and `PollProgressInterval` decorators.
+
+Progress Reports are emitted to stdout, and also stored in the machine-redable report formats that Ginkgo supports.
+
+Ginkgo also uses this progress reporting infrastructure under the hood when handling timeouts and interrupts. This yields much more focused, useful, and informative stack traces than previously.
+
+### Features
+- `BeforeSuite`, `AfterSuite`, `SynchronizedBeforeSuite`, `SynchronizedAfterSuite`, and `ReportAfterSuite` now support (the relevant subset of) decorators. These can be passed in _after_ the callback functions that are usually passed into these nodes.
+
+ As a result the **signature of these methods has changed** and now includes a trailing `args ...interface{}`. For most users simply using the DSL, this change is transparent. However if you were assigning one of these functions to a custom variable (or passing it around) then your code may need to change to reflect the new signature.
+
+### Maintenance
+- Modernize the invocation of Ginkgo in github actions [0ffde58]
+- Update reocmmended CI settings in docs [896bbb9]
+- Speed up unnecessarily slow integration test [6d3a90e]
+
+## 2.1.6
+
+### Fixes
+- Add `SuppressProgressReporting` decorator to turn off --progress announcements for a given node [dfef62a]
+- chore: remove duplicate word in comments [7373214]
+
+## 2.1.5
+
+### Fixes
+- drop -mod=mod instructions; fixes #1026 [6ad7138]
+- Ensure `CurrentSpecReport` and `AddReportEntry` are thread-safe [817c09b]
+- remove stale importmap gcflags flag test [3cd8b93]
+- Always emit spec summary [5cf23e2] - even when only one spec has failed
+- Fix ReportAfterSuite usage in docs [b1864ad]
+- fixed typo (#997) [219cc00]
+- TrimRight is not designed to trim Suffix [71ebb74]
+- refactor: replace strings.Replace with strings.ReplaceAll (#978) [143d208]
+- fix syntax in examples (#975) [b69554f]
+
+### Maintenance
+- Bump github.com/onsi/gomega from 1.20.0 to 1.20.1 (#1027) [e5dfce4]
+- Bump tzinfo from 1.2.9 to 1.2.10 in /docs (#1006) [7ae91c4]
+- Bump github.com/onsi/gomega from 1.19.0 to 1.20.0 (#1005) [e87a85a]
+- test: add new Go 1.19 to test matrix (#1014) [bbefe12]
+- Bump golang.org/x/tools from 0.1.11 to 0.1.12 (#1012) [9327906]
+- Bump golang.org/x/tools from 0.1.10 to 0.1.11 (#993) [f44af96]
+- Bump nokogiri from 1.13.3 to 1.13.6 in /docs (#981) [ef336aa]
+
+## 2.1.4
+
+### Fixes
+- Numerous documentation typos
+- Prepend `when` when using `When` (this behavior was in 1.x but unintentionally lost during the 2.0 rewrite) [efce903]
+- improve error message when a parallel process fails to report back [a7bd1fe]
+- guard against concurrent map writes in DeprecationTracker [0976569]
+- Invoke reporting nodes during dry-run (fixes #956 and #935) [aae4480]
+- Fix ginkgo import circle [f779385]
+
+## 2.1.3
+
+See [https://onsi.github.io/ginkgo/MIGRATING_TO_V2](https://onsi.github.io/ginkgo/MIGRATING_TO_V2) for details on V2.
+
+### Fixes
+- Calling By in a container node now emits a useful error. [ff12cee]
+
+## 2.1.2
+
+### Fixes
+
+- Track location of focused specs correctly in `ginkgo unfocus` [a612ff1]
+- Profiling suites with focused specs no longer generates an erroneous failure message [8fbfa02]
+- Several documentation typos fixed. Big thanks to everyone who helped catch them and report/fix them!
+
+## 2.1.1
+
+See [https://onsi.github.io/ginkgo/MIGRATING_TO_V2](https://onsi.github.io/ginkgo/MIGRATING_TO_V2) for details on V2.
+
+### Fixes
+- Suites that only import the new dsl packages are now correctly identified as Ginkgo suites [ec17e17]
+
+## 2.1.0
+
+See [https://onsi.github.io/ginkgo/MIGRATING_TO_V2](https://onsi.github.io/ginkgo/MIGRATING_TO_V2) for details on V2.
+
+2.1.0 is a minor release with a few tweaks:
+
+- Introduce new DSL packages to enable users to pick-and-choose which portions of the DSL to dot-import. [90868e2] More details [here](https://onsi.github.io/ginkgo/#alternatives-to-dot-importing-ginkgo).
+- Add error check for invalid/nil parameters to DescribeTable [6f8577e]
+- Myriad docs typos fixed (thanks everyone!) [718542a, ecb7098, 146654c, a8f9913, 6bdffde, 03dcd7e]
+
+## 2.0.0
+
+See [https://onsi.github.io/ginkgo/MIGRATING_TO_V2](https://onsi.github.io/ginkgo/MIGRATING_TO_V2)
+
+## 1.16.5
+
+Ginkgo 2.0 now has a Release Candidate. 1.16.5 advertises the existence of the RC.
+1.16.5 deprecates GinkgoParallelNode in favor of GinkgoParallelProcess
+
+You can silence the RC advertisement by setting an `ACK_GINKGO_RC=true` environment variable or creating a file in your home directory called `.ack-ginkgo-rc`
+
+## 1.16.4
+
+### Fixes
+1.16.4 retracts 1.16.3. There are no code changes. The 1.16.3 tag was associated with the wrong commit and an attempt to change it after-the-fact has proven problematic. 1.16.4 retracts 1.16.3 in Ginkgo's go.mod and creates a new, correctly tagged, release.
+
+## 1.16.3
+
+### Features
+- Measure is now deprecated and emits a deprecation warning.
+
+## 1.16.2
+
+### Fixes
+- Deprecations can be suppressed by setting an `ACK_GINKGO_DEPRECATIONS=` environment variable.
+
+## 1.16.1
+
+### Fixes
+- Suppress --stream deprecation warning on windows (#793)
+
+## 1.16.0
+
+### Features
+- Advertise Ginkgo 2.0. Introduce deprecations. [9ef1913]
+ - Update README.md to advertise that Ginkgo 2.0 is coming.
+ - Backport the 2.0 DeprecationTracker and start alerting users
+ about upcoming deprecations.
+
+- Add slim-sprig template functions to bootstrap/generate (#775) [9162b86]
+
+- Fix accidental reference to 1488 (#784) [9fb7fe4]
+
+## 1.15.2
+
+### Fixes
+- ignore blank `-focus` and `-skip` flags (#780) [e90a4a0]
+
+## 1.15.1
+
+### Fixes
+- reporters/junit: Use `system-out` element instead of `passed` (#769) [9eda305]
+
+## 1.15.0
+
+### Features
+- Adds 'outline' command to print the outline of specs/containers in a file (#754) [071c369] [6803cc3] [935b538] [06744e8] [0c40583]
+- Add support for using template to generate tests (#752) [efb9e69]
+- Add a Chinese Doc #755 (#756) [5207632]
+- cli: allow multiple -focus and -skip flags (#736) [9a782fb]
+
+### Fixes
+- Add _internal to filename of tests created with internal flag (#751) [43c12da]
+
+## 1.14.2
+
+### Fixes
+- correct handling windows backslash in import path (#721) [97f3d51]
+- Add additional methods to GinkgoT() to improve compatibility with the testing.TB interface [b5fe44d]
+
+## 1.14.1
+
+### Fixes
+- Discard exported method declaration when running ginkgo bootstrap (#558) [f4b0240]
+
+## 1.14.0
+
+### Features
+- Defer running top-level container nodes until RunSpecs is called [d44dedf]
+- [Document Ginkgo lifecycle](http://onsi.github.io/ginkgo/#understanding-ginkgos-lifecycle)
+- Add `extensions/globals` package (#692) [3295c8f] - this can be helpful in contexts where you are test-driving your test-generation code (see [#692](https://github.com/onsi/ginkgo/pull/692))
+- Print Skip reason in JUnit reporter if one was provided [820dfab]
+
+## 1.13.0
+
+### Features
+- Add a version of table.Entry that allows dumping the entry parameters. (#689) [21eaef2]
+
+### Fixes
+- Ensure integration tests pass in an environment sans GOPATH [606fba2]
+- Add books package (#568) [fc0e44e]
+- doc(readme): installation via "tools package" (#677) [83bb20e]
+- Solve the undefined: unix.Dup2 compile error on mips64le (#680) [0624f75]
+- Import package without dot (#687) [6321024]
+- Fix integration tests to stop require GOPATH (#686) [a912ec5]
+
+## 1.12.3
+
+### Fixes
+- Print correct code location of failing table test (#666) [c6d7afb]
+
+## 1.12.2
+
+### Fixes
+- Update dependencies [ea4a036]
+
+## 1.12.1
+
+### Fixes
+- Make unfocus ("blur") much faster (#674) [8b18061]
+- Fix typo (#673) [7fdcbe8]
+- Test against 1.14 and remove 1.12 [d5c2ad6]
+- Test if a coverprofile content is empty before checking its latest character (#670) [14d9fa2]
+- replace tail package with maintained one. this fixes go get errors (#667) [4ba33d4]
+- improve ginkgo performance - makes progress on #644 [a14f98e]
+- fix convert integration tests [1f8ba69]
+- fix typo successful -> successful (#663) [1ea49cf]
+- Fix invalid link (#658) [b886136]
+- convert utility : Include comments from source (#657) [1077c6d]
+- Explain what BDD means [d79e7fb]
+- skip race detector test on unsupported platform (#642) [f8ab89d]
+- Use Dup2 from golang.org/x/sys/unix instead of syscallDup (#638) [5d53c55]
+- Fix missing newline in combined coverage file (#641) [6a07ea2]
+- check if a spec is run before returning SpecSummary (#645) [8850000]
+
+## 1.12.0
+
+### Features
+- Add module definition (#630) [78916ab]
+
+## 1.11.0
+
+### Features
+- Add syscall for riscv64 architecture [f66e896]
+- teamcity reporter: output location of test failure as well as test definition (#626) [9869142]
+- teamcity reporter: output newline after every service message (#625) [3cfa02d]
+- Add support for go module when running `generate` command (#578) [9c89e3f]
+
+## 1.10.3
+
+### Fixes
+- Set go_import_path in travis.yml to allow internal packages in forks (#607) [3b721db]
+- Add integration test [d90e0dc]
+- Fix coverage files combining [e5dde8c]
+- A new CLI option: -ginkgo.reportFile (#601) [034fd25]
+
+## 1.10.2
+
+### Fixes
+- speed up table entry generateIt() (#609) [5049dc5]
+- Fix. Write errors to stderr instead of stdout (#610) [7bb3091]
+
+## 1.10.1
+
+### Fixes
+- stack backtrace: fix skipping (#600) [2a4c0bd]
+
+## 1.10.0
+
+### Fixes
+- stack backtrace: fix alignment and skipping [66915d6]
+- fix typo in documentation [8f97b93]
+
+## 1.9.0
+
+### Features
+- Option to print output into report, when tests have passed [0545415]
+
+### Fixes
+- Fixed typos in comments [0ecbc58]
+- gofmt code [a7f8bfb]
+- Simplify code [7454d00]
+- Simplify concatenation, incrementation and function assignment [4825557]
+- Avoid unnecessary conversions [9d9403c]
+- JUnit: include more detailed information about panic [19cca4b]
+- Print help to stdout when the user asks for help [4cb7441]
+
+
+## 1.8.0
+
+### New Features
+- allow config of the vet flag for `go test` (#562) [3cd45fa]
+- Support projects using go modules [d56ee76]
+
+### Fixes and Minor Improvements
+- chore(godoc): fixes typos in Measurement funcs [dbaca8e]
+- Optimize focus to avoid allocations [f493786]
+- Ensure generated test file names are underscored [505cc35]
+
+## 1.7.0
+
+### New Features
+- Add JustAfterEach (#484) [0d4f080]
+
+### Fixes
+- Correctly round suite time in junit reporter [2445fc1]
+- Avoid using -i argument to go test for Golang 1.10+ [46bbc26]
+
+## 1.6.0
+
+### New Features
+- add --debug flag to emit node output to files (#499) [39febac]
+
+### Fixes
+- fix: for `go vet` to pass [69338ec]
+- docs: fix for contributing instructions [7004cb1]
+- consolidate and streamline contribution docs (#494) [d848015]
+- Make generated Junit file compatible with "Maven Surefire" (#488) [e51bee6]
+- all: gofmt [000d317]
+- Increase eventually timeout to 30s [c73579c]
+- Clarify asynchronous test behavior [294d8f4]
+- Travis badge should only show master [26d2143]
+
+## 1.5.0 5/10/2018
+
+### New Features
+- Supports go v1.10 (#443, #446, #451) [e873237, 468e89e, e37dbfe, a37f4c0, c0b857d, bca5260, 4177ca8]
+- Add a When() synonym for Context() (#386) [747514b, 7484dad, 7354a07, dd826c8]
+- Re-add noisySkippings flag [652e15c]
+- Allow coverage to be displayed for focused specs (#367) [11459a8]
+- Handle -outputdir flag (#364) [228e3a8]
+- Handle -coverprofile flag (#355) [43392d5]
+
+### Fixes
+- When using custom reporters register the custom reporters *before* the default reporter. This allows users to see the output of any print statements in their customer reporters. (#365) [8382b23]
+- When running a test and calculating the coverage using the `-coverprofile` and `-outputdir` flags, Ginkgo fails with an error if the directory does not exist. This is due to an [issue in go 1.10](https://github.com/golang/go/issues/24588) (#446) [b36a6e0]
+- `unfocus` command ignores vendor folder (#459) [e5e551c, c556e43, a3b6351, 9a820dd]
+- Ignore packages whose tests are all ignored by go (#456) [7430ca7, 6d8be98]
+- Increase the threshold when checking time measurements (#455) [2f714bf, 68f622c]
+- Fix race condition in coverage tests (#423) [a5a8ff7, ab9c08b]
+- Add an extra new line after reporting spec run completion for test2json [874520d]
+- added name name field to junit reported testsuite [ae61c63]
+- Do not set the run time of a spec when the dryRun flag is used (#438) [457e2d9, ba8e856]
+- Process FWhen and FSpecify when unfocusing (#434) [9008c7b, ee65bd, df87dfe]
+- Synchronies the access to the state of specs to avoid race conditions (#430) [7d481bc, ae6829d]
+- Added Duration on GinkgoTestDescription (#383) [5f49dad, 528417e, 0747408, 329d7ed]
+- Fix Ginkgo stack trace on failure for Specify (#415) [b977ede, 65ca40e, 6c46eb8]
+- Update README with Go 1.6+, Golang -> Go (#409) [17f6b97, bc14b66, 20d1598]
+- Use fmt.Errorf instead of errors.New(fmt.Sprintf (#401) [a299f56, 44e2eaa]
+- Imports in generated code should follow conventions (#398) [0bec0b0, e8536d8]
+- Prevent data race error when Recording a benchmark value from multiple go routines (#390) [c0c4881, 7a241e9]
+- Replace GOPATH in Environment [4b883f0]
+
+
+## 1.4.0 7/16/2017
+
+- `ginkgo` now provides a hint if you accidentally forget to run `ginkgo bootstrap` to generate a `*_suite_test.go` file that actually invokes the Ginkgo test runner. [#345](https://github.com/onsi/ginkgo/pull/345)
+- thanks to improvements in `go test -c` `ginkgo` no longer needs to fix Go's compilation output to ensure compilation errors are expressed relative to the CWD. [#357]
+- `ginkgo watch -watchRegExp=...` allows you to specify a custom regular expression to watch. Only files matching the regular expression are watched for changes (the default is `\.go$`) [#356]
+- `ginkgo` now always emits compilation output. Previously, only failed compilation output was printed out. [#277]
+- `ginkgo -requireSuite` now fails the test run if there are `*_test.go` files but `go test` fails to detect any tests. Typically this means you forgot to run `ginkgo bootstrap` to generate a suite file. [#344]
+- `ginkgo -timeout=DURATION` allows you to adjust the timeout for the entire test suite (default is 24 hours) [#248]
+
+## 1.3.0 3/28/2017
+
+Improvements:
+
+- Significantly improved parallel test distribution. Now instead of pre-sharding test cases across workers (which can result in idle workers and poor test performance) Ginkgo uses a shared queue to keep all workers busy until all tests are complete. This improves test-time performance and consistency.
+- `Skip(message)` can be used to skip the current test.
+- Added `extensions/table` - a Ginkgo DSL for [Table Driven Tests](http://onsi.github.io/ginkgo/#table-driven-tests)
+- Add `GinkgoRandomSeed()` - shorthand for `config.GinkgoConfig.RandomSeed`
+- Support for retrying flaky tests with `--flakeAttempts`
+- `ginkgo ./...` now recurses as you'd expect
+- Added `Specify` a synonym for `It`
+- Support colorise on Windows
+- Broader support for various go compilation flags in the `ginkgo` CLI
+
+Bug Fixes:
+
+- Ginkgo tests now fail when you `panic(nil)` (#167)
+
+## 1.2.0 5/31/2015
+
+Improvements
+
+- `ginkgo -coverpkg` calls down to `go test -coverpkg` (#160)
+- `ginkgo -afterSuiteHook COMMAND` invokes the passed-in `COMMAND` after a test suite completes (#152)
+- Relaxed requirement for Go 1.4+. `ginkgo` now works with Go v1.3+ (#166)
+
+## 1.2.0-beta
+
+Ginkgo now requires Go 1.4+
+
+Improvements:
+
+- Call reporters in reverse order when announcing spec completion -- allows custom reporters to emit output before the default reporter does.
+- Improved focus behavior. Now, this:
+
+ ```golang
+ FDescribe("Some describe", func() {
+ It("A", func() {})
+
+ FIt("B", func() {})
+ })
+ ```
+
+ will run `B` but *not* `A`. This tends to be a common usage pattern when in the thick of writing and debugging tests.
+- When `SIGINT` is received, Ginkgo will emit the contents of the `GinkgoWriter` before running the `AfterSuite`. Useful for debugging stuck tests.
+- When `--progress` is set, Ginkgo will write test progress (in particular, Ginkgo will say when it is about to run a BeforeEach, AfterEach, It, etc...) to the `GinkgoWriter`. This is useful for debugging stuck tests and tests that generate many logs.
+- Improved output when an error occurs in a setup or teardown block.
+- When `--dryRun` is set, Ginkgo will walk the spec tree and emit to its reporter *without* actually running anything. Best paired with `-v` to understand which specs will run in which order.
+- Add `By` to help document long `It`s. `By` simply writes to the `GinkgoWriter`.
+- Add support for precompiled tests:
+ - `ginkgo build ` will now compile the package, producing a file named `package.test`
+ - The compiled `package.test` file can be run directly. This runs the tests in series.
+ - To run precompiled tests in parallel, you can run: `ginkgo -p package.test`
+- Support `bootstrap`ping and `generate`ing [Agouti](http://agouti.org) specs.
+- `ginkgo generate` and `ginkgo bootstrap` now honor the package name already defined in a given directory
+- The `ginkgo` CLI ignores `SIGQUIT`. Prevents its stack dump from interlacing with the underlying test suite's stack dump.
+- The `ginkgo` CLI now compiles tests into a temporary directory instead of the package directory. This necessitates upgrading to Go v1.4+.
+- `ginkgo -notify` now works on Linux
+
+Bug Fixes:
+
+- If --skipPackages is used and all packages are skipped, Ginkgo should exit 0.
+- Fix tempfile leak when running in parallel
+- Fix incorrect failure message when a panic occurs during a parallel test run
+- Fixed an issue where a pending test within a focused context (or a focused test within a pending context) would skip all other tests.
+- Be more consistent about handling SIGTERM as well as SIGINT
+- When interrupted while concurrently compiling test suites in the background, Ginkgo now cleans up the compiled artifacts.
+- Fixed a long standing bug where `ginkgo -p` would hang if a process spawned by one of the Ginkgo parallel nodes does not exit. (Hooray!)
+
+## 1.1.0 (8/2/2014)
+
+No changes, just dropping the beta.
+
+## 1.1.0-beta (7/22/2014)
+New Features:
+
+- `ginkgo watch` now monitors packages *and their dependencies* for changes. The depth of the dependency tree can be modified with the `-depth` flag.
+- Test suites with a programmatic focus (`FIt`, `FDescribe`, etc...) exit with non-zero status code, even when they pass. This allows CI systems to detect accidental commits of focused test suites.
+- `ginkgo -p` runs the testsuite in parallel with an auto-detected number of nodes.
+- `ginkgo -tags=TAG_LIST` passes a list of tags down to the `go build` command.
+- `ginkgo --failFast` aborts the test suite after the first failure.
+- `ginkgo generate file_1 file_2` can take multiple file arguments.
+- Ginkgo now summarizes any spec failures that occurred at the end of the test run.
+- `ginkgo --randomizeSuites` will run tests *suites* in random order using the generated/passed-in seed.
+
+Improvements:
+
+- `ginkgo -skipPackage` now takes a comma-separated list of strings. If the *relative path* to a package matches one of the entries in the comma-separated list, that package is skipped.
+- `ginkgo --untilItFails` no longer recompiles between attempts.
+- Ginkgo now panics when a runnable node (`It`, `BeforeEach`, `JustBeforeEach`, `AfterEach`, `Measure`) is nested within another runnable node. This is always a mistake. Any test suites that panic because of this change should be fixed.
+
+Bug Fixes:
+
+- `ginkgo boostrap` and `ginkgo generate` no longer fail when dealing with `hyphen-separated-packages`.
+- parallel specs are now better distributed across nodes - fixed a crashing bug where (for example) distributing 11 tests across 7 nodes would panic
+
+## 1.0.0 (5/24/2014)
+New Features:
+
+- Add `GinkgoParallelNode()` - shorthand for `config.GinkgoConfig.ParallelNode`
+
+Improvements:
+
+- When compilation fails, the compilation output is rewritten to present a correct *relative* path. Allows ⌘-clicking in iTerm open the file in your text editor.
+- `--untilItFails` and `ginkgo watch` now generate new random seeds between test runs, unless a particular random seed is specified.
+
+Bug Fixes:
+
+- `-cover` now generates a correctly combined coverprofile when running with in parallel with multiple `-node`s.
+- Print out the contents of the `GinkgoWriter` when `BeforeSuite` or `AfterSuite` fail.
+- Fix all remaining race conditions in Ginkgo's test suite.
+
+## 1.0.0-beta (4/14/2014)
+Breaking changes:
+
+- `thirdparty/gomocktestreporter` is gone. Use `GinkgoT()` instead
+- Modified the Reporter interface
+- `watch` is now a subcommand, not a flag.
+
+DSL changes:
+
+- `BeforeSuite` and `AfterSuite` for setting up and tearing down test suites.
+- `AfterSuite` is triggered on interrupt (`^C`) as well as exit.
+- `SynchronizedBeforeSuite` and `SynchronizedAfterSuite` for setting up and tearing down singleton resources across parallel nodes.
+
+CLI changes:
+
+- `watch` is now a subcommand, not a flag
+- `--nodot` flag can be passed to `ginkgo generate` and `ginkgo bootstrap` to avoid dot imports. This explicitly imports all exported identifiers in Ginkgo and Gomega. Refreshing this list can be done by running `ginkgo nodot`
+- Additional arguments can be passed to specs. Pass them after the `--` separator
+- `--skipPackage` flag takes a regexp and ignores any packages with package names passing said regexp.
+- `--trace` flag prints out full stack traces when errors occur, not just the line at which the error occurs.
+
+Misc:
+
+- Start using semantic versioning
+- Start maintaining changelog
+
+Major refactor:
+
+- Pull out Ginkgo's internal to `internal`
+- Rename `example` everywhere to `spec`
+- Much more!
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/CONTRIBUTING.md b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/CONTRIBUTING.md
new file mode 100644
index 00000000000..80de566a522
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/CONTRIBUTING.md
@@ -0,0 +1,15 @@
+# Contributing to Ginkgo
+
+Your contributions to Ginkgo are essential for its long-term maintenance and improvement.
+
+- Please **open an issue first** - describe what problem you are trying to solve and give the community a forum for input and feedback ahead of investing time in writing code!
+- Ensure adequate test coverage:
+ - When adding to the Ginkgo library, add unit and/or integration tests (under the `integration` folder).
+ - When adding to the Ginkgo CLI, note that there are very few unit tests. Please add an integration test.
+- Run `make` or:
+ - Install ginkgo locally via `go install ./...`
+ - Make sure all the tests succeed via `ginkgo -r -p`
+ - Vet your changes via `go vet ./...`
+- Update the documentation. Ginkgo uses `godoc` comments and documentation in `docs/index.md`. You can run `bundle && bundle exec jekyll serve` in the `docs` directory to preview your changes.
+
+Thanks for supporting Ginkgo!
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/LICENSE b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/LICENSE
similarity index 100%
rename from integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/LICENSE
rename to integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/LICENSE
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/Makefile b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/Makefile
new file mode 100644
index 00000000000..cb099aff950
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/Makefile
@@ -0,0 +1,11 @@
+# default task since it's first
+.PHONY: all
+all: vet test
+
+.PHONY: test
+test:
+ go run github.com/onsi/ginkgo/v2/ginkgo -r -p
+
+.PHONY: vet
+vet:
+ go vet ./...
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/README.md b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/README.md
new file mode 100644
index 00000000000..cb23ffdf6a8
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/README.md
@@ -0,0 +1,115 @@
+![Ginkgo](https://onsi.github.io/ginkgo/images/ginkgo.png)
+
+[![test](https://github.com/onsi/ginkgo/workflows/test/badge.svg?branch=master)](https://github.com/onsi/ginkgo/actions?query=workflow%3Atest+branch%3Amaster) | [Ginkgo Docs](https://onsi.github.io/ginkgo/)
+
+---
+
+# Ginkgo
+
+Ginkgo is a mature testing framework for Go designed to help you write expressive specs. Ginkgo builds on top of Go's `testing` foundation and is complemented by the [Gomega](https://github.com/onsi/gomega) matcher library. Together, Ginkgo and Gomega let you express the intent behind your specs clearly:
+
+```go
+import (
+ . "github.com/onsi/ginkgo/v2"
+ . "github.com/onsi/gomega"
+ ...
+)
+
+var _ = Describe("Checking books out of the library", Label("library"), func() {
+ var library *libraries.Library
+ var book *books.Book
+ var valjean *users.User
+ BeforeEach(func() {
+ library = libraries.NewClient()
+ book = &books.Book{
+ Title: "Les Miserables",
+ Author: "Victor Hugo",
+ }
+ valjean = users.NewUser("Jean Valjean")
+ })
+
+ When("the library has the book in question", func() {
+ BeforeEach(func(ctx SpecContext) {
+ Expect(library.Store(ctx, book)).To(Succeed())
+ })
+
+ Context("and the book is available", func() {
+ It("lends it to the reader", func(ctx SpecContext) {
+ Expect(valjean.Checkout(ctx, library, "Les Miserables")).To(Succeed())
+ Expect(valjean.Books()).To(ContainElement(book))
+ Expect(library.UserWithBook(ctx, book)).To(Equal(valjean))
+ }, SpecTimeout(time.Second * 5))
+ })
+
+ Context("but the book has already been checked out", func() {
+ var javert *users.User
+ BeforeEach(func(ctx SpecContext) {
+ javert = users.NewUser("Javert")
+ Expect(javert.Checkout(ctx, library, "Les Miserables")).To(Succeed())
+ })
+
+ It("tells the user", func(ctx SpecContext) {
+ err := valjean.Checkout(ctx, library, "Les Miserables")
+ Expect(err).To(MatchError("Les Miserables is currently checked out"))
+ }, SpecTimeout(time.Second * 5))
+
+ It("lets the user place a hold and get notified later", func(ctx SpecContext) {
+ Expect(valjean.Hold(ctx, library, "Les Miserables")).To(Succeed())
+ Expect(valjean.Holds(ctx)).To(ContainElement(book))
+
+ By("when Javert returns the book")
+ Expect(javert.Return(ctx, library, book)).To(Succeed())
+
+ By("it eventually informs Valjean")
+ notification := "Les Miserables is ready for pick up"
+ Eventually(ctx, valjean.Notifications).Should(ContainElement(notification))
+
+ Expect(valjean.Checkout(ctx, library, "Les Miserables")).To(Succeed())
+ Expect(valjean.Books(ctx)).To(ContainElement(book))
+ Expect(valjean.Holds(ctx)).To(BeEmpty())
+ }, SpecTimeout(time.Second * 10))
+ })
+ })
+
+ When("the library does not have the book in question", func() {
+ It("tells the reader the book is unavailable", func(ctx SpecContext) {
+ err := valjean.Checkout(ctx, library, "Les Miserables")
+ Expect(err).To(MatchError("Les Miserables is not in the library catalog"))
+ }, SpecTimeout(time.Second * 5))
+ })
+})
+```
+
+Jump to the [docs](https://onsi.github.io/ginkgo/) to learn more. It's easy to [bootstrap](https://onsi.github.io/ginkgo/#bootstrapping-a-suite) and start writing your [first specs](https://onsi.github.io/ginkgo/#adding-specs-to-a-suite).
+
+If you have a question, comment, bug report, feature request, etc. please open a [GitHub issue](https://github.com/onsi/ginkgo/issues/new), or visit the [Ginkgo Slack channel](https://app.slack.com/client/T029RQSE6/CQQ50BBNW).
+
+## Capabilities
+
+Whether writing basic unit specs, complex integration specs, or even performance specs - Ginkgo gives you an expressive Domain-Specific Language (DSL) that will be familiar to users coming from frameworks such as [Quick](https://github.com/Quick/Quick), [RSpec](https://rspec.info), [Jasmine](https://jasmine.github.io), and [Busted](https://lunarmodules.github.io/busted/). This style of testing is sometimes referred to as "Behavior-Driven Development" (BDD) though Ginkgo's utility extends beyond acceptance-level testing.
+
+With Ginkgo's DSL you can use nestable [`Describe`, `Context` and `When` container nodes](https://onsi.github.io/ginkgo/#organizing-specs-with-container-nodes) to help you organize your specs. [`BeforeEach` and `AfterEach` setup nodes](https://onsi.github.io/ginkgo/#extracting-common-setup-beforeeach) for setup and cleanup. [`It` and `Specify` subject nodes](https://onsi.github.io/ginkgo/#spec-subjects-it) that hold your assertions. [`BeforeSuite` and `AfterSuite` nodes](https://onsi.github.io/ginkgo/#suite-setup-and-cleanup-beforesuite-and-aftersuite) to prep for and cleanup after a suite... and [much more!](https://onsi.github.io/ginkgo/#writing-specs).
+
+At runtime, Ginkgo can run your specs in reproducibly [random order](https://onsi.github.io/ginkgo/#spec-randomization) and has sophisticated support for [spec parallelization](https://onsi.github.io/ginkgo/#spec-parallelization). In fact, running specs in parallel is as easy as
+
+```bash
+ginkgo -p
+```
+
+By following [established patterns for writing parallel specs](https://onsi.github.io/ginkgo/#patterns-for-parallel-integration-specs) you can build even large, complex integration suites that parallelize cleanly and run performantly. And you don't have to worry about your spec suite hanging or leaving a mess behind - Ginkgo provides a per-node `context.Context` and the capability to interrupt the spec after a set period of time - and then clean up.
+
+As your suites grow Ginkgo helps you keep your specs organized with [labels](https://onsi.github.io/ginkgo/#spec-labels) and lets you easily run [subsets of specs](https://onsi.github.io/ginkgo/#filtering-specs), either [programmatically](https://onsi.github.io/ginkgo/#focused-specs) or on the [command line](https://onsi.github.io/ginkgo/#combining-filters). And Ginkgo's reporting infrastructure generates machine-readable output in a [variety of formats](https://onsi.github.io/ginkgo/#generating-machine-readable-reports) _and_ allows you to build your own [custom reporting infrastructure](https://onsi.github.io/ginkgo/#generating-reports-programmatically).
+
+Ginkgo ships with `ginkgo`, a [command line tool](https://onsi.github.io/ginkgo/#ginkgo-cli-overview) with support for generating, running, filtering, and profiling Ginkgo suites. You can even have Ginkgo automatically run your specs when it detects a change with `ginkgo watch`, enabling rapid feedback loops during test-driven development.
+
+And that's just Ginkgo! [Gomega](https://onsi.github.io/gomega/) brings a rich, mature, family of [assertions and matchers](https://onsi.github.io/gomega/#provided-matchers) to your suites. With Gomega you can easily mix [synchronous and asynchronous assertions](https://onsi.github.io/ginkgo/#patterns-for-asynchronous-testing) in your specs. You can even build your own set of expressive domain-specific matchers quickly and easily by composing Gomega's [existing building blocks](https://onsi.github.io/ginkgo/#building-custom-matchers).
+
+Happy Testing!
+
+## License
+
+Ginkgo is MIT-Licensed
+
+## Contributing
+
+See [CONTRIBUTING.md](CONTRIBUTING.md)
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/RELEASING.md b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/RELEASING.md
new file mode 100644
index 00000000000..363815d7c7f
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/RELEASING.md
@@ -0,0 +1,23 @@
+A Ginkgo release is a tagged git sha and a GitHub release. To cut a release:
+
+1. Ensure CHANGELOG.md is up to date.
+ - Use
+ ```bash
+ LAST_VERSION=$(git tag --sort=version:refname | tail -n1)
+ CHANGES=$(git log --pretty=format:'- %s [%h]' HEAD...$LAST_VERSION)
+ echo -e "## NEXT\n\n$CHANGES\n\n### Features\n\n### Fixes\n\n### Maintenance\n\n$(cat CHANGELOG.md)" > CHANGELOG.md
+ ```
+ to update the changelog
+ - Categorize the changes into
+ - Breaking Changes (requires a major version)
+ - New Features (minor version)
+ - Fixes (fix version)
+ - Maintenance (which in general should not be mentioned in `CHANGELOG.md` as they have no user impact)
+1. Update `VERSION` in `types/version.go`
+1. Commit, push, and release:
+ ```
+ git commit -m "vM.m.p"
+ git push
+ gh release create "vM.m.p"
+ git fetch --tags origin master
+ ```
\ No newline at end of file
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/config/deprecated.go b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/config/deprecated.go
new file mode 100644
index 00000000000..a61021d0889
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/config/deprecated.go
@@ -0,0 +1,69 @@
+package config
+
+// GinkgoConfigType has been deprecated and its equivalent now lives in
+// the types package. You can no longer access Ginkgo configuration from the config
+// package. Instead use the DSL's GinkgoConfiguration() function to get copies of the
+// current configuration
+//
+// GinkgoConfigType is still here so custom V1 reporters do not result in a compilation error
+// It will be removed in a future minor release of Ginkgo
+type GinkgoConfigType = DeprecatedGinkgoConfigType
+type DeprecatedGinkgoConfigType struct {
+ RandomSeed int64
+ RandomizeAllSpecs bool
+ RegexScansFilePath bool
+ FocusStrings []string
+ SkipStrings []string
+ SkipMeasurements bool
+ FailOnPending bool
+ FailFast bool
+ FlakeAttempts int
+ EmitSpecProgress bool
+ DryRun bool
+ DebugParallel bool
+
+ ParallelNode int
+ ParallelTotal int
+ SyncHost string
+ StreamHost string
+}
+
+// DefaultReporterConfigType has been deprecated and its equivalent now lives in
+// the types package. You can no longer access Ginkgo configuration from the config
+// package. Instead use the DSL's GinkgoConfiguration() function to get copies of the
+// current configuration
+//
+// DefaultReporterConfigType is still here so custom V1 reporters do not result in a compilation error
+// It will be removed in a future minor release of Ginkgo
+type DefaultReporterConfigType = DeprecatedDefaultReporterConfigType
+type DeprecatedDefaultReporterConfigType struct {
+ NoColor bool
+ SlowSpecThreshold float64
+ NoisyPendings bool
+ NoisySkippings bool
+ Succinct bool
+ Verbose bool
+ FullTrace bool
+ ReportPassed bool
+ ReportFile string
+}
+
+// Sadly there is no way to gracefully deprecate access to these global config variables.
+// Users who need access to Ginkgo's configuration should use the DSL's GinkgoConfiguration() method
+// These new unwieldy type names exist to give users a hint when they try to compile and the compilation fails
+type GinkgoConfigIsNoLongerAccessibleFromTheConfigPackageUseTheDSLsGinkgoConfigurationFunctionInstead struct{}
+
+// Sadly there is no way to gracefully deprecate access to these global config variables.
+// Users who need access to Ginkgo's configuration should use the DSL's GinkgoConfiguration() method
+// These new unwieldy type names exist to give users a hint when they try to compile and the compilation fails
+var GinkgoConfig = GinkgoConfigIsNoLongerAccessibleFromTheConfigPackageUseTheDSLsGinkgoConfigurationFunctionInstead{}
+
+// Sadly there is no way to gracefully deprecate access to these global config variables.
+// Users who need access to Ginkgo's configuration should use the DSL's GinkgoConfiguration() method
+// These new unwieldy type names exist to give users a hint when they try to compile and the compilation fails
+type DefaultReporterConfigIsNoLongerAccessibleFromTheConfigPackageUseTheDSLsGinkgoConfigurationFunctionInstead struct{}
+
+// Sadly there is no way to gracefully deprecate access to these global config variables.
+// Users who need access to Ginkgo's configuration should use the DSL's GinkgoConfiguration() method
+// These new unwieldy type names exist to give users a hint when they try to compile and the compilation fails
+var DefaultReporterConfig = DefaultReporterConfigIsNoLongerAccessibleFromTheConfigPackageUseTheDSLsGinkgoConfigurationFunctionInstead{}
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/core_dsl.go b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/core_dsl.go
new file mode 100644
index 00000000000..a3e8237e938
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/core_dsl.go
@@ -0,0 +1,847 @@
+/*
+Ginkgo is a testing framework for Go designed to help you write expressive tests.
+https://github.com/onsi/ginkgo
+MIT-Licensed
+
+The godoc documentation outlines Ginkgo's API. Since Ginkgo is a Domain-Specific Language it is important to
+build a mental model for Ginkgo - the narrative documentation at https://onsi.github.io/ginkgo/ is designed to help you do that.
+You should start there - even a brief skim will be helpful. At minimum you should skim through the https://onsi.github.io/ginkgo/#getting-started chapter.
+
+Ginkgo's is best paired with the Gomega matcher library: https://github.com/onsi/gomega
+
+You can run Ginkgo specs with go test - however we recommend using the ginkgo cli. It enables functionality
+that go test does not (especially running suites in parallel). You can learn more at https://onsi.github.io/ginkgo/#ginkgo-cli-overview
+or by running 'ginkgo help'.
+*/
+package ginkgo
+
+import (
+ "fmt"
+ "io"
+ "os"
+ "path/filepath"
+ "strings"
+
+ "github.com/go-logr/logr"
+ "github.com/onsi/ginkgo/v2/formatter"
+ "github.com/onsi/ginkgo/v2/internal"
+ "github.com/onsi/ginkgo/v2/internal/global"
+ "github.com/onsi/ginkgo/v2/internal/interrupt_handler"
+ "github.com/onsi/ginkgo/v2/internal/parallel_support"
+ "github.com/onsi/ginkgo/v2/reporters"
+ "github.com/onsi/ginkgo/v2/types"
+)
+
+const GINKGO_VERSION = types.VERSION
+
+var flagSet types.GinkgoFlagSet
+var deprecationTracker = types.NewDeprecationTracker()
+var suiteConfig = types.NewDefaultSuiteConfig()
+var reporterConfig = types.NewDefaultReporterConfig()
+var suiteDidRun = false
+var outputInterceptor internal.OutputInterceptor
+var client parallel_support.Client
+
+func init() {
+ var err error
+ flagSet, err = types.BuildTestSuiteFlagSet(&suiteConfig, &reporterConfig)
+ exitIfErr(err)
+ writer := internal.NewWriter(os.Stdout)
+ GinkgoWriter = writer
+ GinkgoLogr = internal.GinkgoLogrFunc(writer)
+}
+
+func exitIfErr(err error) {
+ if err != nil {
+ if outputInterceptor != nil {
+ outputInterceptor.Shutdown()
+ }
+ if client != nil {
+ client.Close()
+ }
+ fmt.Fprintln(formatter.ColorableStdErr, err.Error())
+ os.Exit(1)
+ }
+}
+
+func exitIfErrors(errors []error) {
+ if len(errors) > 0 {
+ if outputInterceptor != nil {
+ outputInterceptor.Shutdown()
+ }
+ if client != nil {
+ client.Close()
+ }
+ for _, err := range errors {
+ fmt.Fprintln(formatter.ColorableStdErr, err.Error())
+ }
+ os.Exit(1)
+ }
+}
+
+// The interface implemented by GinkgoWriter
+type GinkgoWriterInterface interface {
+ io.Writer
+
+ Print(a ...interface{})
+ Printf(format string, a ...interface{})
+ Println(a ...interface{})
+
+ TeeTo(writer io.Writer)
+ ClearTeeWriters()
+}
+
+/*
+SpecContext is the context object passed into nodes that are subject to a timeout or need to be notified of an interrupt. It implements the standard context.Context interface but also contains additional helpers to provide an extensibility point for Ginkgo. (As an example, Gomega's Eventually can use the methods defined on SpecContext to provide deeper integration with Ginkgo).
+
+You can do anything with SpecContext that you do with a typical context.Context including wrapping it with any of the context.With* methods.
+
+Ginkgo will cancel the SpecContext when a node is interrupted (e.g. by the user sending an interrupt signal) or when a node has exceeded its allowed run-time. Note, however, that even in cases where a node has a deadline, SpecContext will not return a deadline via .Deadline(). This is because Ginkgo does not use a WithDeadline() context to model node deadlines as Ginkgo needs control over the precise timing of the context cancellation to ensure it can provide an accurate progress report at the moment of cancellation.
+*/
+type SpecContext = internal.SpecContext
+
+/*
+GinkgoWriter implements a GinkgoWriterInterface and io.Writer
+
+When running in verbose mode (ginkgo -v) any writes to GinkgoWriter will be immediately printed
+to stdout. Otherwise, GinkgoWriter will buffer any writes produced during the current test and flush them to screen
+only if the current test fails.
+
+GinkgoWriter also provides convenience Print, Printf and Println methods and allows you to tee to a custom writer via GinkgoWriter.TeeTo(writer).
+Writes to GinkgoWriter are immediately sent to any registered TeeTo() writers. You can unregister all TeeTo() Writers with GinkgoWriter.ClearTeeWriters()
+
+You can learn more at https://onsi.github.io/ginkgo/#logging-output
+*/
+var GinkgoWriter GinkgoWriterInterface
+
+/*
+GinkgoLogr is a logr.Logger that writes to GinkgoWriter
+*/
+var GinkgoLogr logr.Logger
+
+// The interface by which Ginkgo receives *testing.T
+type GinkgoTestingT interface {
+ Fail()
+}
+
+/*
+GinkgoConfiguration returns the configuration of the current suite.
+
+The first return value is the SuiteConfig which controls aspects of how the suite runs,
+the second return value is the ReporterConfig which controls aspects of how Ginkgo's default
+reporter emits output.
+
+Mutating the returned configurations has no effect. To reconfigure Ginkgo programmatically you need
+to pass in your mutated copies into RunSpecs().
+
+You can learn more at https://onsi.github.io/ginkgo/#overriding-ginkgos-command-line-configuration-in-the-suite
+*/
+func GinkgoConfiguration() (types.SuiteConfig, types.ReporterConfig) {
+ return suiteConfig, reporterConfig
+}
+
+/*
+GinkgoRandomSeed returns the seed used to randomize spec execution order. It is
+useful for seeding your own pseudorandom number generators to ensure
+consistent executions from run to run, where your tests contain variability (for
+example, when selecting random spec data).
+
+You can learn more at https://onsi.github.io/ginkgo/#spec-randomization
+*/
+func GinkgoRandomSeed() int64 {
+ return suiteConfig.RandomSeed
+}
+
+/*
+GinkgoParallelProcess returns the parallel process number for the current ginkgo process
+The process number is 1-indexed. You can use GinkgoParallelProcess() to shard access to shared
+resources across your suites. You can learn more about patterns for sharding at https://onsi.github.io/ginkgo/#patterns-for-parallel-integration-specs
+
+For more on how specs are parallelized in Ginkgo, see http://onsi.github.io/ginkgo/#spec-parallelization
+*/
+func GinkgoParallelProcess() int {
+ return suiteConfig.ParallelProcess
+}
+
+/*
+GinkgoHelper marks the function it's called in as a test helper. When a failure occurs inside a helper function, Ginkgo will skip the helper when analyzing the stack trace to identify where the failure occurred.
+
+This is an alternative, simpler, mechanism to passing in a skip offset when calling Fail or using Gomega.
+*/
+func GinkgoHelper() {
+ types.MarkAsHelper(1)
+}
+
+/*
+GinkgoLabelFilter() returns the label filter configured for this suite via `--label-filter`.
+
+You can use this to manually check if a set of labels would satisfy the filter via:
+
+ if (Label("cat", "dog").MatchesLabelFilter(GinkgoLabelFilter())) {
+ //...
+ }
+*/
+func GinkgoLabelFilter() string {
+ suiteConfig, _ := GinkgoConfiguration()
+ return suiteConfig.LabelFilter
+}
+
+/*
+PauseOutputInterception() pauses Ginkgo's output interception. This is only relevant
+when running in parallel and output to stdout/stderr is being intercepted. You generally
+don't need to call this function - however there are cases when Ginkgo's output interception
+mechanisms can interfere with external processes launched by the test process.
+
+In particular, if an external process is launched that has cmd.Stdout/cmd.Stderr set to os.Stdout/os.Stderr
+then Ginkgo's output interceptor will hang. To circumvent this, set cmd.Stdout/cmd.Stderr to GinkgoWriter.
+If, for some reason, you aren't able to do that, you can PauseOutputInterception() before starting the process
+then ResumeOutputInterception() after starting it.
+
+Note that PauseOutputInterception() does not cause stdout writes to print to the console -
+this simply stops intercepting and storing stdout writes to an internal buffer.
+*/
+func PauseOutputInterception() {
+ if outputInterceptor == nil {
+ return
+ }
+ outputInterceptor.PauseIntercepting()
+}
+
+// ResumeOutputInterception() - see docs for PauseOutputInterception()
+func ResumeOutputInterception() {
+ if outputInterceptor == nil {
+ return
+ }
+ outputInterceptor.ResumeIntercepting()
+}
+
+/*
+RunSpecs is the entry point for the Ginkgo spec runner.
+
+You must call this within a Golang testing TestX(t *testing.T) function.
+If you bootstrapped your suite with "ginkgo bootstrap" this is already
+done for you.
+
+Ginkgo is typically configured via command-line flags. This configuration
+can be overridden, however, and passed into RunSpecs as optional arguments:
+
+ func TestMySuite(t *testing.T) {
+ RegisterFailHandler(gomega.Fail)
+ // fetch the current config
+ suiteConfig, reporterConfig := GinkgoConfiguration()
+ // adjust it
+ suiteConfig.SkipStrings = []string{"NEVER-RUN"}
+ reporterConfig.FullTrace = true
+ // pass it in to RunSpecs
+ RunSpecs(t, "My Suite", suiteConfig, reporterConfig)
+ }
+
+Note that some configuration changes can lead to undefined behavior. For example,
+you should not change ParallelProcess or ParallelTotal as the Ginkgo CLI is responsible
+for setting these and orchestrating parallel specs across the parallel processes. See http://onsi.github.io/ginkgo/#spec-parallelization
+for more on how specs are parallelized in Ginkgo.
+
+You can also pass suite-level Label() decorators to RunSpecs. The passed-in labels will apply to all specs in the suite.
+*/
+func RunSpecs(t GinkgoTestingT, description string, args ...interface{}) bool {
+ if suiteDidRun {
+ exitIfErr(types.GinkgoErrors.RerunningSuite())
+ }
+ suiteDidRun = true
+ err := global.PushClone()
+ if err != nil {
+ exitIfErr(err)
+ }
+ defer global.PopClone()
+
+ suiteLabels := extractSuiteConfiguration(args)
+
+ var reporter reporters.Reporter
+ if suiteConfig.ParallelTotal == 1 {
+ reporter = reporters.NewDefaultReporter(reporterConfig, formatter.ColorableStdOut)
+ outputInterceptor = internal.NoopOutputInterceptor{}
+ client = nil
+ } else {
+ reporter = reporters.NoopReporter{}
+ switch strings.ToLower(suiteConfig.OutputInterceptorMode) {
+ case "swap":
+ outputInterceptor = internal.NewOSGlobalReassigningOutputInterceptor()
+ case "none":
+ outputInterceptor = internal.NoopOutputInterceptor{}
+ default:
+ outputInterceptor = internal.NewOutputInterceptor()
+ }
+ client = parallel_support.NewClient(suiteConfig.ParallelHost)
+ if !client.Connect() {
+ client = nil
+ exitIfErr(types.GinkgoErrors.UnreachableParallelHost(suiteConfig.ParallelHost))
+ }
+ defer client.Close()
+ }
+
+ writer := GinkgoWriter.(*internal.Writer)
+ if reporterConfig.Verbosity().GTE(types.VerbosityLevelVerbose) && suiteConfig.ParallelTotal == 1 {
+ writer.SetMode(internal.WriterModeStreamAndBuffer)
+ } else {
+ writer.SetMode(internal.WriterModeBufferOnly)
+ }
+
+ if reporterConfig.WillGenerateReport() {
+ registerReportAfterSuiteNodeForAutogeneratedReports(reporterConfig)
+ }
+
+ err = global.Suite.BuildTree()
+ exitIfErr(err)
+ suitePath, err := getwd()
+ exitIfErr(err)
+ suitePath, err = filepath.Abs(suitePath)
+ exitIfErr(err)
+
+ passed, hasFocusedTests := global.Suite.Run(description, suiteLabels, suitePath, global.Failer, reporter, writer, outputInterceptor, interrupt_handler.NewInterruptHandler(client), client, internal.RegisterForProgressSignal, suiteConfig)
+ outputInterceptor.Shutdown()
+
+ flagSet.ValidateDeprecations(deprecationTracker)
+ if deprecationTracker.DidTrackDeprecations() {
+ fmt.Fprintln(formatter.ColorableStdErr, deprecationTracker.DeprecationsReport())
+ }
+
+ if !passed {
+ t.Fail()
+ }
+
+ if passed && hasFocusedTests && strings.TrimSpace(os.Getenv("GINKGO_EDITOR_INTEGRATION")) == "" {
+ fmt.Println("PASS | FOCUSED")
+ os.Exit(types.GINKGO_FOCUS_EXIT_CODE)
+ }
+ return passed
+}
+
+func extractSuiteConfiguration(args []interface{}) Labels {
+ suiteLabels := Labels{}
+ configErrors := []error{}
+ for _, arg := range args {
+ switch arg := arg.(type) {
+ case types.SuiteConfig:
+ suiteConfig = arg
+ case types.ReporterConfig:
+ reporterConfig = arg
+ case Labels:
+ suiteLabels = append(suiteLabels, arg...)
+ default:
+ configErrors = append(configErrors, types.GinkgoErrors.UnknownTypePassedToRunSpecs(arg))
+ }
+ }
+ exitIfErrors(configErrors)
+
+ configErrors = types.VetConfig(flagSet, suiteConfig, reporterConfig)
+ if len(configErrors) > 0 {
+ fmt.Fprintf(formatter.ColorableStdErr, formatter.F("{{red}}Ginkgo detected configuration issues:{{/}}\n"))
+ for _, err := range configErrors {
+ fmt.Fprintf(formatter.ColorableStdErr, err.Error())
+ }
+ os.Exit(1)
+ }
+
+ return suiteLabels
+}
+
+func getwd() (string, error) {
+ if !strings.EqualFold(os.Getenv("GINKGO_PRESERVE_CACHE"), "true") {
+ // Getwd calls os.Getenv("PWD"), which breaks test caching if the cache
+ // is shared between two different directories with the same test code.
+ return os.Getwd()
+ }
+ return "", nil
+}
+
+/*
+PreviewSpecs walks the testing tree and produces a report without actually invoking the specs.
+See http://onsi.github.io/ginkgo/#previewing-specs for more information.
+*/
+func PreviewSpecs(description string, args ...any) Report {
+ err := global.PushClone()
+ if err != nil {
+ exitIfErr(err)
+ }
+ defer global.PopClone()
+
+ suiteLabels := extractSuiteConfiguration(args)
+ priorDryRun, priorParallelTotal, priorParallelProcess := suiteConfig.DryRun, suiteConfig.ParallelTotal, suiteConfig.ParallelProcess
+ suiteConfig.DryRun, suiteConfig.ParallelTotal, suiteConfig.ParallelProcess = true, 1, 1
+ defer func() {
+ suiteConfig.DryRun, suiteConfig.ParallelTotal, suiteConfig.ParallelProcess = priorDryRun, priorParallelTotal, priorParallelProcess
+ }()
+ reporter := reporters.NoopReporter{}
+ outputInterceptor = internal.NoopOutputInterceptor{}
+ client = nil
+ writer := GinkgoWriter.(*internal.Writer)
+
+ err = global.Suite.BuildTree()
+ exitIfErr(err)
+ suitePath, err := getwd()
+ exitIfErr(err)
+ suitePath, err = filepath.Abs(suitePath)
+ exitIfErr(err)
+
+ global.Suite.Run(description, suiteLabels, suitePath, global.Failer, reporter, writer, outputInterceptor, interrupt_handler.NewInterruptHandler(client), client, internal.RegisterForProgressSignal, suiteConfig)
+
+ return global.Suite.GetPreviewReport()
+}
+
+/*
+Skip instructs Ginkgo to skip the current spec
+
+You can call Skip in any Setup or Subject node closure.
+
+For more on how to filter specs in Ginkgo see https://onsi.github.io/ginkgo/#filtering-specs
+*/
+func Skip(message string, callerSkip ...int) {
+ skip := 0
+ if len(callerSkip) > 0 {
+ skip = callerSkip[0]
+ }
+ cl := types.NewCodeLocationWithStackTrace(skip + 1)
+ global.Failer.Skip(message, cl)
+ panic(types.GinkgoErrors.UncaughtGinkgoPanic(cl))
+}
+
+/*
+Fail notifies Ginkgo that the current spec has failed. (Gomega will call Fail for you automatically when an assertion fails.)
+
+Under the hood, Fail panics to end execution of the current spec. Ginkgo will catch this panic and proceed with
+the subsequent spec. If you call Fail, or make an assertion, within a goroutine launched by your spec you must
+add defer GinkgoRecover() to the goroutine to catch the panic emitted by Fail.
+
+You can call Fail in any Setup or Subject node closure.
+
+You can learn more about how Ginkgo manages failures here: https://onsi.github.io/ginkgo/#mental-model-how-ginkgo-handles-failure
+*/
+func Fail(message string, callerSkip ...int) {
+ skip := 0
+ if len(callerSkip) > 0 {
+ skip = callerSkip[0]
+ }
+
+ cl := types.NewCodeLocationWithStackTrace(skip + 1)
+ global.Failer.Fail(message, cl)
+ panic(types.GinkgoErrors.UncaughtGinkgoPanic(cl))
+}
+
+/*
+AbortSuite instructs Ginkgo to fail the current spec and skip all subsequent specs, thereby aborting the suite.
+
+You can call AbortSuite in any Setup or Subject node closure.
+
+You can learn more about how Ginkgo handles suite interruptions here: https://onsi.github.io/ginkgo/#interrupting-aborting-and-timing-out-suites
+*/
+func AbortSuite(message string, callerSkip ...int) {
+ skip := 0
+ if len(callerSkip) > 0 {
+ skip = callerSkip[0]
+ }
+
+ cl := types.NewCodeLocationWithStackTrace(skip + 1)
+ global.Failer.AbortSuite(message, cl)
+ panic(types.GinkgoErrors.UncaughtGinkgoPanic(cl))
+}
+
+/*
+ignorablePanic is used by Gomega to signal to GinkgoRecover that Goemga is handling
+the error associated with this panic. It i used when Eventually/Consistently are passed a func(g Gomega) and the resulting function launches a goroutines that makes a failed assertion. That failed assertion is registered by Gomega and then panics. Ordinarily the panic is captured by Gomega. In the case of a goroutine Gomega can't capture the panic - so we piggy back on GinkgoRecover so users have a single defer GinkgoRecover() pattern to follow. To do that we need to tell Ginkgo to ignore this panic and not register it as a panic on the global Failer.
+*/
+type ignorablePanic interface{ GinkgoRecoverShouldIgnoreThisPanic() }
+
+/*
+GinkgoRecover should be deferred at the top of any spawned goroutine that (may) call `Fail`
+Since Gomega assertions call fail, you should throw a `defer GinkgoRecover()` at the top of any goroutine that
+calls out to Gomega
+
+Here's why: Ginkgo's `Fail` method records the failure and then panics to prevent
+further assertions from running. This panic must be recovered. Normally, Ginkgo recovers the panic for you,
+however if a panic originates on a goroutine *launched* from one of your specs there's no
+way for Ginkgo to rescue the panic. To do this, you must remember to `defer GinkgoRecover()` at the top of such a goroutine.
+
+You can learn more about how Ginkgo manages failures here: https://onsi.github.io/ginkgo/#mental-model-how-ginkgo-handles-failure
+*/
+func GinkgoRecover() {
+ e := recover()
+ if e != nil {
+ if _, ok := e.(ignorablePanic); ok {
+ return
+ }
+ global.Failer.Panic(types.NewCodeLocationWithStackTrace(1), e)
+ }
+}
+
+// pushNode is used by the various test construction DSL methods to push nodes onto the suite
+// it handles returned errors, emits a detailed error message to help the user learn what they may have done wrong, then exits
+func pushNode(node internal.Node, errors []error) bool {
+ exitIfErrors(errors)
+ exitIfErr(global.Suite.PushNode(node))
+ return true
+}
+
+/*
+Describe nodes are Container nodes that allow you to organize your specs. A Describe node's closure can contain any number of
+Setup nodes (e.g. BeforeEach, AfterEach, JustBeforeEach), and Subject nodes (i.e. It).
+
+Context and When nodes are aliases for Describe - use whichever gives your suite a better narrative flow. It is idomatic
+to Describe the behavior of an object or function and, within that Describe, outline a number of Contexts and Whens.
+
+You can learn more at https://onsi.github.io/ginkgo/#organizing-specs-with-container-nodes
+In addition, container nodes can be decorated with a variety of decorators. You can learn more here: https://onsi.github.io/ginkgo/#decorator-reference
+*/
+func Describe(text string, args ...interface{}) bool {
+ return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeContainer, text, args...))
+}
+
+/*
+FDescribe focuses specs within the Describe block.
+*/
+func FDescribe(text string, args ...interface{}) bool {
+ args = append(args, internal.Focus)
+ return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeContainer, text, args...))
+}
+
+/*
+PDescribe marks specs within the Describe block as pending.
+*/
+func PDescribe(text string, args ...interface{}) bool {
+ args = append(args, internal.Pending)
+ return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeContainer, text, args...))
+}
+
+/*
+XDescribe marks specs within the Describe block as pending.
+
+XDescribe is an alias for PDescribe
+*/
+var XDescribe = PDescribe
+
+/* Context is an alias for Describe - it generates the exact same kind of Container node */
+var Context, FContext, PContext, XContext = Describe, FDescribe, PDescribe, XDescribe
+
+/* When is an alias for Describe - it generates the exact same kind of Container node */
+func When(text string, args ...interface{}) bool {
+ return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeContainer, "when "+text, args...))
+}
+
+/* When is an alias for Describe - it generates the exact same kind of Container node */
+func FWhen(text string, args ...interface{}) bool {
+ args = append(args, internal.Focus)
+ return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeContainer, "when "+text, args...))
+}
+
+/* When is an alias for Describe - it generates the exact same kind of Container node */
+func PWhen(text string, args ...interface{}) bool {
+ args = append(args, internal.Pending)
+ return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeContainer, "when "+text, args...))
+}
+
+var XWhen = PWhen
+
+/*
+It nodes are Subject nodes that contain your spec code and assertions.
+
+Each It node corresponds to an individual Ginkgo spec. You cannot nest any other Ginkgo nodes within an It node's closure.
+
+You can pass It nodes bare functions (func() {}) or functions that receive a SpecContext or context.Context: func(ctx SpecContext) {} and func (ctx context.Context) {}. If the function takes a context then the It is deemed interruptible and Ginkgo will cancel the context in the event of a timeout (configured via the SpecTimeout() or NodeTimeout() decorators) or of an interrupt signal.
+
+You can learn more at https://onsi.github.io/ginkgo/#spec-subjects-it
+In addition, subject nodes can be decorated with a variety of decorators. You can learn more here: https://onsi.github.io/ginkgo/#decorator-reference
+*/
+func It(text string, args ...interface{}) bool {
+ return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeIt, text, args...))
+}
+
+/*
+FIt allows you to focus an individual It.
+*/
+func FIt(text string, args ...interface{}) bool {
+ args = append(args, internal.Focus)
+ return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeIt, text, args...))
+}
+
+/*
+PIt allows you to mark an individual It as pending.
+*/
+func PIt(text string, args ...interface{}) bool {
+ args = append(args, internal.Pending)
+ return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeIt, text, args...))
+}
+
+/*
+XIt allows you to mark an individual It as pending.
+
+XIt is an alias for PIt
+*/
+var XIt = PIt
+
+/*
+Specify is an alias for It - it can allow for more natural wording in some context.
+*/
+var Specify, FSpecify, PSpecify, XSpecify = It, FIt, PIt, XIt
+
+/*
+By allows you to better document complex Specs.
+
+Generally you should try to keep your Its short and to the point. This is not always possible, however,
+especially in the context of integration tests that capture complex or lengthy workflows.
+
+By allows you to document such flows. By may be called within a Setup or Subject node (It, BeforeEach, etc...)
+and will simply log the passed in text to the GinkgoWriter. If By is handed a function it will immediately run the function.
+
+By will also generate and attach a ReportEntry to the spec. This will ensure that By annotations appear in Ginkgo's machine-readable reports.
+
+Note that By does not generate a new Ginkgo node - rather it is simply syntactic sugar around GinkgoWriter and AddReportEntry
+You can learn more about By here: https://onsi.github.io/ginkgo/#documenting-complex-specs-by
+*/
+func By(text string, callback ...func()) {
+ exitIfErr(global.Suite.By(text, callback...))
+}
+
+/*
+BeforeSuite nodes are suite-level Setup nodes that run just once before any specs are run.
+When running in parallel, each parallel process will call BeforeSuite.
+
+You may only register *one* BeforeSuite handler per test suite. You typically do so in your bootstrap file at the top level.
+
+BeforeSuite can take a func() body, or an interruptible func(SpecContext)/func(context.Context) body.
+
+You cannot nest any other Ginkgo nodes within a BeforeSuite node's closure.
+You can learn more here: https://onsi.github.io/ginkgo/#suite-setup-and-cleanup-beforesuite-and-aftersuite
+*/
+func BeforeSuite(body interface{}, args ...interface{}) bool {
+ combinedArgs := []interface{}{body}
+ combinedArgs = append(combinedArgs, args...)
+ return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeBeforeSuite, "", combinedArgs...))
+}
+
+/*
+AfterSuite nodes are suite-level Setup nodes run after all specs have finished - regardless of whether specs have passed or failed.
+AfterSuite node closures always run, even if Ginkgo receives an interrupt signal (^C), in order to ensure cleanup occurs.
+
+When running in parallel, each parallel process will call AfterSuite.
+
+You may only register *one* AfterSuite handler per test suite. You typically do so in your bootstrap file at the top level.
+
+AfterSuite can take a func() body, or an interruptible func(SpecContext)/func(context.Context) body.
+
+You cannot nest any other Ginkgo nodes within an AfterSuite node's closure.
+You can learn more here: https://onsi.github.io/ginkgo/#suite-setup-and-cleanup-beforesuite-and-aftersuite
+*/
+func AfterSuite(body interface{}, args ...interface{}) bool {
+ combinedArgs := []interface{}{body}
+ combinedArgs = append(combinedArgs, args...)
+ return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeAfterSuite, "", combinedArgs...))
+}
+
+/*
+SynchronizedBeforeSuite nodes allow you to perform some of the suite setup just once - on parallel process #1 - and then pass information
+from that setup to the rest of the suite setup on all processes. This is useful for performing expensive or singleton setup once, then passing
+information from that setup to all parallel processes.
+
+SynchronizedBeforeSuite accomplishes this by taking *two* function arguments and passing data between them.
+The first function is only run on parallel process #1. The second is run on all processes, but *only* after the first function completes successfully. The functions have the following signatures:
+
+The first function (which only runs on process #1) can have any of the following the signatures:
+
+ func()
+ func(ctx context.Context)
+ func(ctx SpecContext)
+ func() []byte
+ func(ctx context.Context) []byte
+ func(ctx SpecContext) []byte
+
+The byte array returned by the first function (if present) is then passed to the second function, which can have any of the following signature:
+
+ func()
+ func(ctx context.Context)
+ func(ctx SpecContext)
+ func(data []byte)
+ func(ctx context.Context, data []byte)
+ func(ctx SpecContext, data []byte)
+
+If either function receives a context.Context/SpecContext it is considered interruptible.
+
+You cannot nest any other Ginkgo nodes within an SynchronizedBeforeSuite node's closure.
+You can learn more, and see some examples, here: https://onsi.github.io/ginkgo/#parallel-suite-setup-and-cleanup-synchronizedbeforesuite-and-synchronizedaftersuite
+*/
+func SynchronizedBeforeSuite(process1Body interface{}, allProcessBody interface{}, args ...interface{}) bool {
+ combinedArgs := []interface{}{process1Body, allProcessBody}
+ combinedArgs = append(combinedArgs, args...)
+
+ return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeSynchronizedBeforeSuite, "", combinedArgs...))
+}
+
+/*
+SynchronizedAfterSuite nodes complement the SynchronizedBeforeSuite nodes in solving the problem of splitting clean up into a piece that runs on all processes
+and a piece that must only run once - on process #1.
+
+SynchronizedAfterSuite accomplishes this by taking *two* function arguments. The first runs on all processes. The second runs only on parallel process #1
+and *only* after all other processes have finished and exited. This ensures that process #1, and any resources it is managing, remain alive until
+all other processes are finished. These two functions can be bare functions (func()) or interruptible (func(context.Context)/func(SpecContext))
+
+Note that you can also use DeferCleanup() in SynchronizedBeforeSuite to accomplish similar results.
+
+You cannot nest any other Ginkgo nodes within an SynchronizedAfterSuite node's closure.
+You can learn more, and see some examples, here: https://onsi.github.io/ginkgo/#parallel-suite-setup-and-cleanup-synchronizedbeforesuite-and-synchronizedaftersuite
+*/
+func SynchronizedAfterSuite(allProcessBody interface{}, process1Body interface{}, args ...interface{}) bool {
+ combinedArgs := []interface{}{allProcessBody, process1Body}
+ combinedArgs = append(combinedArgs, args...)
+
+ return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeSynchronizedAfterSuite, "", combinedArgs...))
+}
+
+/*
+BeforeEach nodes are Setup nodes whose closures run before It node closures. When multiple BeforeEach nodes
+are defined in nested Container nodes the outermost BeforeEach node closures are run first.
+
+BeforeEach can take a func() body, or an interruptible func(SpecContext)/func(context.Context) body.
+
+You cannot nest any other Ginkgo nodes within a BeforeEach node's closure.
+You can learn more here: https://onsi.github.io/ginkgo/#extracting-common-setup-beforeeach
+*/
+func BeforeEach(args ...interface{}) bool {
+ return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeBeforeEach, "", args...))
+}
+
+/*
+JustBeforeEach nodes are similar to BeforeEach nodes, however they are guaranteed to run *after* all BeforeEach node closures - just before the It node closure.
+This can allow you to separate configuration from creation of resources for a spec.
+
+JustBeforeEach can take a func() body, or an interruptible func(SpecContext)/func(context.Context) body.
+
+You cannot nest any other Ginkgo nodes within a JustBeforeEach node's closure.
+You can learn more and see some examples here: https://onsi.github.io/ginkgo/#separating-creation-and-configuration-justbeforeeach
+*/
+func JustBeforeEach(args ...interface{}) bool {
+ return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeJustBeforeEach, "", args...))
+}
+
+/*
+AfterEach nodes are Setup nodes whose closures run after It node closures. When multiple AfterEach nodes
+are defined in nested Container nodes the innermost AfterEach node closures are run first.
+
+Note that you can also use DeferCleanup() in other Setup or Subject nodes to accomplish similar results.
+
+AfterEach can take a func() body, or an interruptible func(SpecContext)/func(context.Context) body.
+
+You cannot nest any other Ginkgo nodes within an AfterEach node's closure.
+You can learn more here: https://onsi.github.io/ginkgo/#spec-cleanup-aftereach-and-defercleanup
+*/
+func AfterEach(args ...interface{}) bool {
+ return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeAfterEach, "", args...))
+}
+
+/*
+JustAfterEach nodes are similar to AfterEach nodes, however they are guaranteed to run *before* all AfterEach node closures - just after the It node closure. This can allow you to separate diagnostics collection from teardown for a spec.
+
+JustAfterEach can take a func() body, or an interruptible func(SpecContext)/func(context.Context) body.
+
+You cannot nest any other Ginkgo nodes within a JustAfterEach node's closure.
+You can learn more and see some examples here: https://onsi.github.io/ginkgo/#separating-diagnostics-collection-and-teardown-justaftereach
+*/
+func JustAfterEach(args ...interface{}) bool {
+ return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeJustAfterEach, "", args...))
+}
+
+/*
+BeforeAll nodes are Setup nodes that can occur inside Ordered containers. They run just once before any specs in the Ordered container run.
+
+Multiple BeforeAll nodes can be defined in a given Ordered container however they cannot be nested inside any other container.
+
+BeforeAll can take a func() body, or an interruptible func(SpecContext)/func(context.Context) body.
+
+You cannot nest any other Ginkgo nodes within a BeforeAll node's closure.
+You can learn more about Ordered Containers at: https://onsi.github.io/ginkgo/#ordered-containers
+And you can learn more about BeforeAll at: https://onsi.github.io/ginkgo/#setup-in-ordered-containers-beforeall-and-afterall
+*/
+func BeforeAll(args ...interface{}) bool {
+ return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeBeforeAll, "", args...))
+}
+
+/*
+AfterAll nodes are Setup nodes that can occur inside Ordered containers. They run just once after all specs in the Ordered container have run.
+
+Multiple AfterAll nodes can be defined in a given Ordered container however they cannot be nested inside any other container.
+
+Note that you can also use DeferCleanup() in a BeforeAll node to accomplish similar behavior.
+
+AfterAll can take a func() body, or an interruptible func(SpecContext)/func(context.Context) body.
+
+You cannot nest any other Ginkgo nodes within an AfterAll node's closure.
+You can learn more about Ordered Containers at: https://onsi.github.io/ginkgo/#ordered-containers
+And you can learn more about AfterAll at: https://onsi.github.io/ginkgo/#setup-in-ordered-containers-beforeall-and-afterall
+*/
+func AfterAll(args ...interface{}) bool {
+ return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeAfterAll, "", args...))
+}
+
+/*
+DeferCleanup can be called within any Setup or Subject node to register a cleanup callback that Ginkgo will call at the appropriate time to cleanup after the spec.
+
+DeferCleanup can be passed:
+1. A function that takes no arguments and returns no values.
+2. A function that returns multiple values. `DeferCleanup` will ignore all these return values except for the last one. If this last return value is a non-nil error `DeferCleanup` will fail the spec).
+3. A function that takes a context.Context or SpecContext (and optionally returns multiple values). The resulting cleanup node is deemed interruptible and the passed-in context will be cancelled in the event of a timeout or interrupt.
+4. A function that takes arguments (and optionally returns multiple values) followed by a list of arguments to pass to the function.
+5. A function that takes SpecContext and a list of arguments (and optionally returns multiple values) followed by a list of arguments to pass to the function.
+
+For example:
+
+ BeforeEach(func() {
+ DeferCleanup(os.Setenv, "FOO", os.GetEnv("FOO"))
+ os.Setenv("FOO", "BAR")
+ })
+
+will register a cleanup handler that will set the environment variable "FOO" to its current value (obtained by os.GetEnv("FOO")) after the spec runs and then sets the environment variable "FOO" to "BAR" for the current spec.
+
+Similarly:
+
+ BeforeEach(func() {
+ DeferCleanup(func(ctx SpecContext, path) {
+ req, err := http.NewRequestWithContext(ctx, "POST", path, nil)
+ Expect(err).NotTo(HaveOccured())
+ _, err := http.DefaultClient.Do(req)
+ Expect(err).NotTo(HaveOccured())
+ }, "example.com/cleanup", NodeTimeout(time.Second*3))
+ })
+
+will register a cleanup handler that will have three seconds to successfully complete a request to the specified path. Note that we do not specify a context in the list of arguments passed to DeferCleanup - only in the signature of the function we pass in. Ginkgo will detect the requested context and supply a SpecContext when it invokes the cleanup node. If you want to pass in your own context in addition to the Ginkgo-provided SpecContext you must specify the SpecContext as the first argument (e.g. func(ctx SpecContext, otherCtx context.Context)).
+
+When DeferCleanup is called in BeforeEach, JustBeforeEach, It, AfterEach, or JustAfterEach the registered callback will be invoked when the spec completes (i.e. it will behave like an AfterEach node)
+When DeferCleanup is called in BeforeAll or AfterAll the registered callback will be invoked when the ordered container completes (i.e. it will behave like an AfterAll node)
+When DeferCleanup is called in BeforeSuite, SynchronizedBeforeSuite, AfterSuite, or SynchronizedAfterSuite the registered callback will be invoked when the suite completes (i.e. it will behave like an AfterSuite node)
+
+Note that DeferCleanup does not represent a node but rather dynamically generates the appropriate type of cleanup node based on the context in which it is called. As such you must call DeferCleanup within a Setup or Subject node, and not within a Container node.
+You can learn more about DeferCleanup here: https://onsi.github.io/ginkgo/#cleaning-up-our-cleanup-code-defercleanup
+*/
+func DeferCleanup(args ...interface{}) {
+ fail := func(message string, cl types.CodeLocation) {
+ global.Failer.Fail(message, cl)
+ }
+ pushNode(internal.NewCleanupNode(deprecationTracker, fail, args...))
+}
+
+/*
+AttachProgressReporter allows you to register a function that will be called whenever Ginkgo generates a Progress Report. The contents returned by the function will be included in the report.
+
+**This is an experimental feature and the public-facing interface may change in a future minor version of Ginkgo**
+
+Progress Reports are generated:
+- whenever the user explicitly requests one (via `SIGINFO` or `SIGUSR1`)
+- on nodes decorated with PollProgressAfter
+- on suites run with --poll-progress-after
+- whenever a test times out
+
+Ginkgo uses Progress Reports to convey the current state of the test suite, including any running goroutines. By attaching a progress reporter you are able to supplement these reports with additional information.
+
+# AttachProgressReporter returns a function that can be called to detach the progress reporter
+
+You can learn more about AttachProgressReporter here: https://onsi.github.io/ginkgo/#attaching-additional-information-to-progress-reports
+*/
+func AttachProgressReporter(reporter func() string) func() {
+ return global.Suite.AttachProgressReporter(reporter)
+}
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/decorator_dsl.go b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/decorator_dsl.go
new file mode 100644
index 00000000000..c65af4ce1cf
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/decorator_dsl.go
@@ -0,0 +1,143 @@
+package ginkgo
+
+import (
+ "github.com/onsi/ginkgo/v2/internal"
+)
+
+/*
+Offset(uint) is a decorator that allows you to change the stack-frame offset used when computing the line number of the node in question.
+
+You can learn more here: https://onsi.github.io/ginkgo/#the-offset-decorator
+You can learn more about decorators here: https://onsi.github.io/ginkgo/#decorator-reference
+*/
+type Offset = internal.Offset
+
+/*
+FlakeAttempts(uint N) is a decorator that allows you to mark individual specs or spec containers as flaky. Ginkgo will run them up to `N` times until they pass.
+
+You can learn more here: https://onsi.github.io/ginkgo/#the-flakeattempts-decorator
+You can learn more about decorators here: https://onsi.github.io/ginkgo/#decorator-reference
+*/
+type FlakeAttempts = internal.FlakeAttempts
+
+/*
+MustPassRepeatedly(uint N) is a decorator that allows you to repeat the execution of individual specs or spec containers. Ginkgo will run them up to `N` times until they fail.
+
+You can learn more here: https://onsi.github.io/ginkgo/#the-mustpassrepeatedly-decorator
+You can learn more about decorators here: https://onsi.github.io/ginkgo/#decorator-reference
+*/
+type MustPassRepeatedly = internal.MustPassRepeatedly
+
+/*
+Focus is a decorator that allows you to mark a spec or container as focused. Identical to FIt and FDescribe.
+
+You can learn more here: https://onsi.github.io/ginkgo/#filtering-specs
+You can learn more about decorators here: https://onsi.github.io/ginkgo/#decorator-reference
+*/
+const Focus = internal.Focus
+
+/*
+Pending is a decorator that allows you to mark a spec or container as pending. Identical to PIt and PDescribe.
+
+You can learn more here: https://onsi.github.io/ginkgo/#filtering-specs
+You can learn more about decorators here: https://onsi.github.io/ginkgo/#decorator-reference
+*/
+const Pending = internal.Pending
+
+/*
+Serial is a decorator that allows you to mark a spec or container as serial. These specs will never run in parallel with other specs.
+Specs in ordered containers cannot be marked as serial - mark the ordered container instead.
+
+You can learn more here: https://onsi.github.io/ginkgo/#serial-specs
+You can learn more about decorators here: https://onsi.github.io/ginkgo/#decorator-reference
+*/
+const Serial = internal.Serial
+
+/*
+Ordered is a decorator that allows you to mark a container as ordered. Specs in the container will always run in the order they appear.
+They will never be randomized and they will never run in parallel with one another, though they may run in parallel with other specs.
+
+You can learn more here: https://onsi.github.io/ginkgo/#ordered-containers
+You can learn more about decorators here: https://onsi.github.io/ginkgo/#decorator-reference
+*/
+const Ordered = internal.Ordered
+
+/*
+ContinueOnFailure is a decorator that allows you to mark an Ordered container to continue running specs even if failures occur. Ordinarily an ordered container will stop running specs after the first failure occurs. Note that if a BeforeAll or a BeforeEach/JustBeforeEach annotated with OncePerOrdered fails then no specs will run as the precondition for the Ordered container will consider to be failed.
+
+ContinueOnFailure only applies to the outermost Ordered container. Attempting to place ContinueOnFailure in a nested container will result in an error.
+
+You can learn more here: https://onsi.github.io/ginkgo/#ordered-containers
+You can learn more about decorators here: https://onsi.github.io/ginkgo/#decorator-reference
+*/
+const ContinueOnFailure = internal.ContinueOnFailure
+
+/*
+OncePerOrdered is a decorator that allows you to mark outer BeforeEach, AfterEach, JustBeforeEach, and JustAfterEach setup nodes to run once
+per ordered context. Normally these setup nodes run around each individual spec, with OncePerOrdered they will run once around the set of specs in an ordered container.
+The behavior for non-Ordered containers/specs is unchanged.
+
+You can learn more here: https://onsi.github.io/ginkgo/#setup-around-ordered-containers-the-onceperordered-decorator
+You can learn more about decorators here: https://onsi.github.io/ginkgo/#decorator-reference
+*/
+const OncePerOrdered = internal.OncePerOrdered
+
+/*
+Label decorates specs with Labels. Multiple labels can be passed to Label and these can be arbitrary strings but must not include the following characters: "&|!,()/".
+Labels can be applied to container and subject nodes, but not setup nodes. You can provide multiple Labels to a given node and a spec's labels is the union of all labels in its node hierarchy.
+
+You can learn more here: https://onsi.github.io/ginkgo/#spec-labels
+You can learn more about decorators here: https://onsi.github.io/ginkgo/#decorator-reference
+*/
+func Label(labels ...string) Labels {
+ return Labels(labels)
+}
+
+/*
+Labels are the type for spec Label decorators. Use Label(...) to construct Labels.
+You can learn more here: https://onsi.github.io/ginkgo/#spec-labels
+*/
+type Labels = internal.Labels
+
+/*
+PollProgressAfter allows you to override the configured value for --poll-progress-after for a particular node.
+
+Ginkgo will start emitting node progress if the node is still running after a duration of PollProgressAfter. This allows you to get quicker feedback about the state of a long-running spec.
+*/
+type PollProgressAfter = internal.PollProgressAfter
+
+/*
+PollProgressInterval allows you to override the configured value for --poll-progress-interval for a particular node.
+
+Once a node has been running for longer than PollProgressAfter Ginkgo will emit node progress periodically at an interval of PollProgresInterval.
+*/
+type PollProgressInterval = internal.PollProgressInterval
+
+/*
+NodeTimeout allows you to specify a timeout for an indivdiual node. The node cannot be a container and must be interruptible (i.e. it must be passed a function that accepts a SpecContext or context.Context).
+
+If the node does not exit within the specified NodeTimeout its context will be cancelled. The node wil then have a period of time controlled by the GracePeriod decorator (or global --grace-period command-line argument) to exit. If the node does not exit within GracePeriod Ginkgo will leak the node and proceed to any clean-up nodes associated with the current spec.
+*/
+type NodeTimeout = internal.NodeTimeout
+
+/*
+SpecTimeout allows you to specify a timeout for an indivdiual spec. SpecTimeout can only decorate interruptible It nodes.
+
+All nodes associated with the It node will need to complete before the SpecTimeout has elapsed. Individual nodes (e.g. BeforeEach) may be decorated with different NodeTimeouts - but these can only serve to provide a more stringent deadline for the node in question; they cannot extend the deadline past the SpecTimeout.
+
+If the spec does not complete within the specified SpecTimeout the currently running node will have its context cancelled. The node wil then have a period of time controlled by that node's GracePeriod decorator (or global --grace-period command-line argument) to exit. If the node does not exit within GracePeriod Ginkgo will leak the node and proceed to any clean-up nodes associated with the current spec.
+*/
+type SpecTimeout = internal.SpecTimeout
+
+/*
+GracePeriod denotes the period of time Ginkgo will wait for an interruptible node to exit once an interruption (whether due to a timeout or a user-invoked signal) has occurred. If both the global --grace-period cli flag and a GracePeriod decorator are specified the value in the decorator will take precedence.
+
+Nodes that do not finish within a GracePeriod will be leaked and Ginkgo will proceed to run subsequent nodes. In the event of a timeout, such leaks will be reported to the user.
+*/
+type GracePeriod = internal.GracePeriod
+
+/*
+SuppressProgressReporting is a decorator that allows you to disable progress reporting of a particular node. This is useful if `ginkgo -v -progress` is generating too much noise; particularly
+if you have a `ReportAfterEach` node that is running for every skipped spec and is generating lots of progress reports.
+*/
+const SuppressProgressReporting = internal.SuppressProgressReporting
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/deprecated_dsl.go b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/deprecated_dsl.go
new file mode 100644
index 00000000000..f912bbec65a
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/deprecated_dsl.go
@@ -0,0 +1,135 @@
+package ginkgo
+
+import (
+ "time"
+
+ "github.com/onsi/ginkgo/v2/internal"
+ "github.com/onsi/ginkgo/v2/internal/global"
+ "github.com/onsi/ginkgo/v2/reporters"
+ "github.com/onsi/ginkgo/v2/types"
+)
+
+/*
+Deprecated: Done Channel for asynchronous testing
+
+The Done channel pattern is no longer supported in Ginkgo 2.0.
+See here for better patterns for asynchronous testing: https://onsi.github.io/ginkgo/#patterns-for-asynchronous-testing
+
+For a migration guide see: https://onsi.github.io/ginkgo/MIGRATING_TO_V2#removed-async-testing
+*/
+type Done = internal.Done
+
+/*
+Deprecated: Custom Ginkgo test reporters are deprecated in Ginkgo 2.0.
+
+Use Ginkgo's reporting nodes instead and 2.0 reporting infrastructure instead. You can learn more here: https://onsi.github.io/ginkgo/#reporting-infrastructure
+For a migration guide see: https://onsi.github.io/ginkgo/MIGRATING_TO_V2#removed-custom-reporters
+*/
+type Reporter = reporters.DeprecatedReporter
+
+/*
+Deprecated: Custom Reporters have been removed in Ginkgo 2.0. RunSpecsWithDefaultAndCustomReporters will simply call RunSpecs()
+
+Use Ginkgo's reporting nodes instead and 2.0 reporting infrastructure instead. You can learn more here: https://onsi.github.io/ginkgo/#reporting-infrastructure
+For a migration guide see: https://onsi.github.io/ginkgo/MIGRATING_TO_V2#removed-custom-reporters
+*/
+func RunSpecsWithDefaultAndCustomReporters(t GinkgoTestingT, description string, _ []Reporter) bool {
+ deprecationTracker.TrackDeprecation(types.Deprecations.CustomReporter())
+ return RunSpecs(t, description)
+}
+
+/*
+Deprecated: Custom Reporters have been removed in Ginkgo 2.0. RunSpecsWithCustomReporters will simply call RunSpecs()
+
+Use Ginkgo's reporting nodes instead and 2.0 reporting infrastructure instead. You can learn more here: https://onsi.github.io/ginkgo/#reporting-infrastructure
+For a migration guide see: https://onsi.github.io/ginkgo/MIGRATING_TO_V2#removed-custom-reporters
+*/
+func RunSpecsWithCustomReporters(t GinkgoTestingT, description string, _ []Reporter) bool {
+ deprecationTracker.TrackDeprecation(types.Deprecations.CustomReporter())
+ return RunSpecs(t, description)
+}
+
+/*
+Deprecated: GinkgoTestDescription has been replaced with SpecReport.
+
+Use CurrentSpecReport() instead.
+You can learn more here: https://onsi.github.io/ginkgo/#getting-a-report-for-the-current-spec
+The SpecReport type is documented here: https://pkg.go.dev/github.com/onsi/ginkgo/v2/types#SpecReport
+*/
+type DeprecatedGinkgoTestDescription struct {
+ FullTestText string
+ ComponentTexts []string
+ TestText string
+
+ FileName string
+ LineNumber int
+
+ Failed bool
+ Duration time.Duration
+}
+type GinkgoTestDescription = DeprecatedGinkgoTestDescription
+
+/*
+Deprecated: CurrentGinkgoTestDescription has been replaced with CurrentSpecReport.
+
+Use CurrentSpecReport() instead.
+You can learn more here: https://onsi.github.io/ginkgo/#getting-a-report-for-the-current-spec
+The SpecReport type is documented here: https://pkg.go.dev/github.com/onsi/ginkgo/v2/types#SpecReport
+*/
+func CurrentGinkgoTestDescription() DeprecatedGinkgoTestDescription {
+ deprecationTracker.TrackDeprecation(
+ types.Deprecations.CurrentGinkgoTestDescription(),
+ types.NewCodeLocation(1),
+ )
+ report := global.Suite.CurrentSpecReport()
+ if report.State == types.SpecStateInvalid {
+ return GinkgoTestDescription{}
+ }
+ componentTexts := []string{}
+ componentTexts = append(componentTexts, report.ContainerHierarchyTexts...)
+ componentTexts = append(componentTexts, report.LeafNodeText)
+
+ return DeprecatedGinkgoTestDescription{
+ ComponentTexts: componentTexts,
+ FullTestText: report.FullText(),
+ TestText: report.LeafNodeText,
+ FileName: report.LeafNodeLocation.FileName,
+ LineNumber: report.LeafNodeLocation.LineNumber,
+ Failed: report.State.Is(types.SpecStateFailureStates),
+ Duration: report.RunTime,
+ }
+}
+
+/*
+Deprecated: GinkgoParallelNode() has been renamed to GinkgoParallelProcess()
+*/
+func GinkgoParallelNode() int {
+ deprecationTracker.TrackDeprecation(
+ types.Deprecations.ParallelNode(),
+ types.NewCodeLocation(1),
+ )
+ return GinkgoParallelProcess()
+}
+
+/*
+Deprecated: Benchmarker has been removed from Ginkgo 2.0
+
+Use Gomega's gmeasure package instead.
+You can learn more here: https://onsi.github.io/ginkgo/#benchmarking-code
+*/
+type Benchmarker interface {
+ Time(name string, body func(), info ...interface{}) (elapsedTime time.Duration)
+ RecordValue(name string, value float64, info ...interface{})
+ RecordValueWithPrecision(name string, value float64, units string, precision int, info ...interface{})
+}
+
+/*
+Deprecated: Measure() has been removed from Ginkgo 2.0
+
+Use Gomega's gmeasure package instead.
+You can learn more here: https://onsi.github.io/ginkgo/#benchmarking-code
+*/
+func Measure(_ ...interface{}) bool {
+ deprecationTracker.TrackDeprecation(types.Deprecations.Measure(), types.NewCodeLocation(1))
+ return true
+}
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/formatter/colorable_others.go b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/formatter/colorable_others.go
new file mode 100644
index 00000000000..778bfd7c7ca
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/formatter/colorable_others.go
@@ -0,0 +1,41 @@
+// +build !windows
+
+/*
+These packages are used for colorize on Windows and contributed by mattn.jp@gmail.com
+
+ * go-colorable:
+ * go-isatty:
+
+The MIT License (MIT)
+
+Copyright (c) 2016 Yasuhiro Matsumoto
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+*/
+
+package formatter
+
+import (
+ "io"
+ "os"
+)
+
+func newColorable(file *os.File) io.Writer {
+ return file
+}
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/colorable_windows.go b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/formatter/colorable_windows.go
similarity index 90%
rename from integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/colorable_windows.go
rename to integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/formatter/colorable_windows.go
index 1088009230b..dd1d143cc20 100644
--- a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/colorable_windows.go
+++ b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/formatter/colorable_windows.go
@@ -1,4 +1,33 @@
-package colorable
+/*
+These packages are used for colorize on Windows and contributed by mattn.jp@gmail.com
+
+ * go-colorable:
+ * go-isatty:
+
+The MIT License (MIT)
+
+Copyright (c) 2016 Yasuhiro Matsumoto
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+*/
+
+package formatter
import (
"bytes"
@@ -10,10 +39,24 @@ import (
"strings"
"syscall"
"unsafe"
+)
- "github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty"
+var (
+ kernel32 = syscall.NewLazyDLL("kernel32.dll")
+ procGetConsoleScreenBufferInfo = kernel32.NewProc("GetConsoleScreenBufferInfo")
+ procSetConsoleTextAttribute = kernel32.NewProc("SetConsoleTextAttribute")
+ procSetConsoleCursorPosition = kernel32.NewProc("SetConsoleCursorPosition")
+ procFillConsoleOutputCharacter = kernel32.NewProc("FillConsoleOutputCharacterW")
+ procFillConsoleOutputAttribute = kernel32.NewProc("FillConsoleOutputAttribute")
+ procGetConsoleMode = kernel32.NewProc("GetConsoleMode")
)
+func isTerminal(fd uintptr) bool {
+ var st uint32
+ r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, fd, uintptr(unsafe.Pointer(&st)), 0)
+ return r != 0 && e == 0
+}
+
const (
foregroundBlue = 0x1
foregroundGreen = 0x2
@@ -52,45 +95,28 @@ type consoleScreenBufferInfo struct {
maximumWindowSize coord
}
-var (
- kernel32 = syscall.NewLazyDLL("kernel32.dll")
- procGetConsoleScreenBufferInfo = kernel32.NewProc("GetConsoleScreenBufferInfo")
- procSetConsoleTextAttribute = kernel32.NewProc("SetConsoleTextAttribute")
- procSetConsoleCursorPosition = kernel32.NewProc("SetConsoleCursorPosition")
- procFillConsoleOutputCharacter = kernel32.NewProc("FillConsoleOutputCharacterW")
- procFillConsoleOutputAttribute = kernel32.NewProc("FillConsoleOutputAttribute")
-)
-
-type Writer struct {
+type writer struct {
out io.Writer
handle syscall.Handle
lastbuf bytes.Buffer
oldattr word
}
-func NewColorable(file *os.File) io.Writer {
+func newColorable(file *os.File) io.Writer {
if file == nil {
panic("nil passed instead of *os.File to NewColorable()")
}
- if isatty.IsTerminal(file.Fd()) {
+ if isTerminal(file.Fd()) {
var csbi consoleScreenBufferInfo
handle := syscall.Handle(file.Fd())
procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi)))
- return &Writer{out: file, handle: handle, oldattr: csbi.attributes}
+ return &writer{out: file, handle: handle, oldattr: csbi.attributes}
} else {
return file
}
}
-func NewColorableStdout() io.Writer {
- return NewColorable(os.Stdout)
-}
-
-func NewColorableStderr() io.Writer {
- return NewColorable(os.Stderr)
-}
-
var color256 = map[int]int{
0: 0x000000,
1: 0x800000,
@@ -350,7 +376,7 @@ var color256 = map[int]int{
255: 0xeeeeee,
}
-func (w *Writer) Write(data []byte) (n int, err error) {
+func (w *writer) Write(data []byte) (n int, err error) {
var csbi consoleScreenBufferInfo
procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/formatter/formatter.go b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/formatter/formatter.go
new file mode 100644
index 00000000000..743555ddea5
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/formatter/formatter.go
@@ -0,0 +1,230 @@
+package formatter
+
+import (
+ "fmt"
+ "os"
+ "regexp"
+ "strconv"
+ "strings"
+)
+
+// ColorableStdOut and ColorableStdErr enable color output support on Windows
+var ColorableStdOut = newColorable(os.Stdout)
+var ColorableStdErr = newColorable(os.Stderr)
+
+const COLS = 80
+
+type ColorMode uint8
+
+const (
+ ColorModeNone ColorMode = iota
+ ColorModeTerminal
+ ColorModePassthrough
+)
+
+var SingletonFormatter = New(ColorModeTerminal)
+
+func F(format string, args ...interface{}) string {
+ return SingletonFormatter.F(format, args...)
+}
+
+func Fi(indentation uint, format string, args ...interface{}) string {
+ return SingletonFormatter.Fi(indentation, format, args...)
+}
+
+func Fiw(indentation uint, maxWidth uint, format string, args ...interface{}) string {
+ return SingletonFormatter.Fiw(indentation, maxWidth, format, args...)
+}
+
+type Formatter struct {
+ ColorMode ColorMode
+ colors map[string]string
+ styleRe *regexp.Regexp
+ preserveColorStylingTags bool
+}
+
+func NewWithNoColorBool(noColor bool) Formatter {
+ if noColor {
+ return New(ColorModeNone)
+ }
+ return New(ColorModeTerminal)
+}
+
+func New(colorMode ColorMode) Formatter {
+ colorAliases := map[string]int{
+ "black": 0,
+ "red": 1,
+ "green": 2,
+ "yellow": 3,
+ "blue": 4,
+ "magenta": 5,
+ "cyan": 6,
+ "white": 7,
+ }
+ for colorAlias, n := range colorAliases {
+ colorAliases[fmt.Sprintf("bright-%s", colorAlias)] = n + 8
+ }
+
+ getColor := func(color, defaultEscapeCode string) string {
+ color = strings.ToUpper(strings.ReplaceAll(color, "-", "_"))
+ envVar := fmt.Sprintf("GINKGO_CLI_COLOR_%s", color)
+ envVarColor := os.Getenv(envVar)
+ if envVarColor == "" {
+ return defaultEscapeCode
+ }
+ if colorCode, ok := colorAliases[envVarColor]; ok {
+ return fmt.Sprintf("\x1b[38;5;%dm", colorCode)
+ }
+ colorCode, err := strconv.Atoi(envVarColor)
+ if err != nil || colorCode < 0 || colorCode > 255 {
+ return defaultEscapeCode
+ }
+ return fmt.Sprintf("\x1b[38;5;%dm", colorCode)
+ }
+
+ f := Formatter{
+ ColorMode: colorMode,
+ colors: map[string]string{
+ "/": "\x1b[0m",
+ "bold": "\x1b[1m",
+ "underline": "\x1b[4m",
+
+ "red": getColor("red", "\x1b[38;5;9m"),
+ "orange": getColor("orange", "\x1b[38;5;214m"),
+ "coral": getColor("coral", "\x1b[38;5;204m"),
+ "magenta": getColor("magenta", "\x1b[38;5;13m"),
+ "green": getColor("green", "\x1b[38;5;10m"),
+ "dark-green": getColor("dark-green", "\x1b[38;5;28m"),
+ "yellow": getColor("yellow", "\x1b[38;5;11m"),
+ "light-yellow": getColor("light-yellow", "\x1b[38;5;228m"),
+ "cyan": getColor("cyan", "\x1b[38;5;14m"),
+ "gray": getColor("gray", "\x1b[38;5;243m"),
+ "light-gray": getColor("light-gray", "\x1b[38;5;246m"),
+ "blue": getColor("blue", "\x1b[38;5;12m"),
+ },
+ }
+ colors := []string{}
+ for color := range f.colors {
+ colors = append(colors, color)
+ }
+ f.styleRe = regexp.MustCompile("{{(" + strings.Join(colors, "|") + ")}}")
+ return f
+}
+
+func (f Formatter) F(format string, args ...interface{}) string {
+ return f.Fi(0, format, args...)
+}
+
+func (f Formatter) Fi(indentation uint, format string, args ...interface{}) string {
+ return f.Fiw(indentation, 0, format, args...)
+}
+
+func (f Formatter) Fiw(indentation uint, maxWidth uint, format string, args ...interface{}) string {
+ out := f.style(format)
+ if len(args) > 0 {
+ out = fmt.Sprintf(out, args...)
+ }
+
+ if indentation == 0 && maxWidth == 0 {
+ return out
+ }
+
+ lines := strings.Split(out, "\n")
+
+ if maxWidth != 0 {
+ outLines := []string{}
+
+ maxWidth = maxWidth - indentation*2
+ for _, line := range lines {
+ if f.length(line) <= maxWidth {
+ outLines = append(outLines, line)
+ continue
+ }
+ words := strings.Split(line, " ")
+ outWords := []string{words[0]}
+ length := uint(f.length(words[0]))
+ for _, word := range words[1:] {
+ wordLength := f.length(word)
+ if length+wordLength+1 <= maxWidth {
+ length += wordLength + 1
+ outWords = append(outWords, word)
+ continue
+ }
+ outLines = append(outLines, strings.Join(outWords, " "))
+ outWords = []string{word}
+ length = wordLength
+ }
+ if len(outWords) > 0 {
+ outLines = append(outLines, strings.Join(outWords, " "))
+ }
+ }
+
+ lines = outLines
+ }
+
+ if indentation == 0 {
+ return strings.Join(lines, "\n")
+ }
+
+ padding := strings.Repeat(" ", int(indentation))
+ for i := range lines {
+ if lines[i] != "" {
+ lines[i] = padding + lines[i]
+ }
+ }
+
+ return strings.Join(lines, "\n")
+}
+
+func (f Formatter) length(styled string) uint {
+ n := uint(0)
+ inStyle := false
+ for _, b := range styled {
+ if inStyle {
+ if b == 'm' {
+ inStyle = false
+ }
+ continue
+ }
+ if b == '\x1b' {
+ inStyle = true
+ continue
+ }
+ n += 1
+ }
+ return n
+}
+
+func (f Formatter) CycleJoin(elements []string, joiner string, cycle []string) string {
+ if len(elements) == 0 {
+ return ""
+ }
+ n := len(cycle)
+ out := ""
+ for i, text := range elements {
+ out += cycle[i%n] + text
+ if i < len(elements)-1 {
+ out += joiner
+ }
+ }
+ out += "{{/}}"
+ return f.style(out)
+}
+
+func (f Formatter) style(s string) string {
+ switch f.ColorMode {
+ case ColorModeNone:
+ return f.styleRe.ReplaceAllString(s, "")
+ case ColorModePassthrough:
+ return s
+ case ColorModeTerminal:
+ return f.styleRe.ReplaceAllStringFunc(s, func(match string) string {
+ if out, ok := f.colors[strings.Trim(match, "{}")]; ok {
+ return out
+ }
+ return match
+ })
+ }
+
+ return ""
+}
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/ginkgo/build/build_command.go b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/ginkgo/build/build_command.go
new file mode 100644
index 00000000000..5db5d1a7bfd
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/ginkgo/build/build_command.go
@@ -0,0 +1,63 @@
+package build
+
+import (
+ "fmt"
+
+ "github.com/onsi/ginkgo/v2/ginkgo/command"
+ "github.com/onsi/ginkgo/v2/ginkgo/internal"
+ "github.com/onsi/ginkgo/v2/types"
+)
+
+func BuildBuildCommand() command.Command {
+ var cliConfig = types.NewDefaultCLIConfig()
+ var goFlagsConfig = types.NewDefaultGoFlagsConfig()
+
+ flags, err := types.BuildBuildCommandFlagSet(&cliConfig, &goFlagsConfig)
+ if err != nil {
+ panic(err)
+ }
+
+ return command.Command{
+ Name: "build",
+ Flags: flags,
+ Usage: "ginkgo build ",
+ ShortDoc: "Build the passed in (or the package in the current directory if left blank).",
+ DocLink: "precompiling-suites",
+ Command: func(args []string, _ []string) {
+ var errors []error
+ cliConfig, goFlagsConfig, errors = types.VetAndInitializeCLIAndGoConfig(cliConfig, goFlagsConfig)
+ command.AbortIfErrors("Ginkgo detected configuration issues:", errors)
+
+ buildSpecs(args, cliConfig, goFlagsConfig)
+ },
+ }
+}
+
+func buildSpecs(args []string, cliConfig types.CLIConfig, goFlagsConfig types.GoFlagsConfig) {
+ suites := internal.FindSuites(args, cliConfig, false).WithoutState(internal.TestSuiteStateSkippedByFilter)
+ if len(suites) == 0 {
+ command.AbortWith("Found no test suites")
+ }
+
+ internal.VerifyCLIAndFrameworkVersion(suites)
+
+ opc := internal.NewOrderedParallelCompiler(cliConfig.ComputedNumCompilers())
+ opc.StartCompiling(suites, goFlagsConfig)
+
+ for {
+ suiteIdx, suite := opc.Next()
+ if suiteIdx >= len(suites) {
+ break
+ }
+ suites[suiteIdx] = suite
+ if suite.State.Is(internal.TestSuiteStateFailedToCompile) {
+ fmt.Println(suite.CompilationError.Error())
+ } else {
+ fmt.Printf("Compiled %s.test\n", suite.PackageName)
+ }
+ }
+
+ if suites.CountWithState(internal.TestSuiteStateFailedToCompile) > 0 {
+ command.AbortWith("Failed to compile all tests")
+ }
+}
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/ginkgo/command/abort.go b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/ginkgo/command/abort.go
new file mode 100644
index 00000000000..2efd2860886
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/ginkgo/command/abort.go
@@ -0,0 +1,61 @@
+package command
+
+import "fmt"
+
+type AbortDetails struct {
+ ExitCode int
+ Error error
+ EmitUsage bool
+}
+
+func Abort(details AbortDetails) {
+ panic(details)
+}
+
+func AbortGracefullyWith(format string, args ...interface{}) {
+ Abort(AbortDetails{
+ ExitCode: 0,
+ Error: fmt.Errorf(format, args...),
+ EmitUsage: false,
+ })
+}
+
+func AbortWith(format string, args ...interface{}) {
+ Abort(AbortDetails{
+ ExitCode: 1,
+ Error: fmt.Errorf(format, args...),
+ EmitUsage: false,
+ })
+}
+
+func AbortWithUsage(format string, args ...interface{}) {
+ Abort(AbortDetails{
+ ExitCode: 1,
+ Error: fmt.Errorf(format, args...),
+ EmitUsage: true,
+ })
+}
+
+func AbortIfError(preamble string, err error) {
+ if err != nil {
+ Abort(AbortDetails{
+ ExitCode: 1,
+ Error: fmt.Errorf("%s\n%s", preamble, err.Error()),
+ EmitUsage: false,
+ })
+ }
+}
+
+func AbortIfErrors(preamble string, errors []error) {
+ if len(errors) > 0 {
+ out := ""
+ for _, err := range errors {
+ out += err.Error()
+ }
+ Abort(AbortDetails{
+ ExitCode: 1,
+ Error: fmt.Errorf("%s\n%s", preamble, out),
+ EmitUsage: false,
+ })
+ }
+}
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/ginkgo/command/command.go b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/ginkgo/command/command.go
new file mode 100644
index 00000000000..12e0e565914
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/ginkgo/command/command.go
@@ -0,0 +1,50 @@
+package command
+
+import (
+ "fmt"
+ "io"
+ "strings"
+
+ "github.com/onsi/ginkgo/v2/formatter"
+ "github.com/onsi/ginkgo/v2/types"
+)
+
+type Command struct {
+ Name string
+ Flags types.GinkgoFlagSet
+ Usage string
+ ShortDoc string
+ Documentation string
+ DocLink string
+ Command func(args []string, additionalArgs []string)
+}
+
+func (c Command) Run(args []string, additionalArgs []string) {
+ args, err := c.Flags.Parse(args)
+ if err != nil {
+ AbortWithUsage(err.Error())
+ }
+
+ c.Command(args, additionalArgs)
+}
+
+func (c Command) EmitUsage(writer io.Writer) {
+ fmt.Fprintln(writer, formatter.F("{{bold}}"+c.Usage+"{{/}}"))
+ fmt.Fprintln(writer, formatter.F("{{gray}}%s{{/}}", strings.Repeat("-", len(c.Usage))))
+ if c.ShortDoc != "" {
+ fmt.Fprintln(writer, formatter.Fiw(0, formatter.COLS, c.ShortDoc))
+ fmt.Fprintln(writer, "")
+ }
+ if c.Documentation != "" {
+ fmt.Fprintln(writer, formatter.Fiw(0, formatter.COLS, c.Documentation))
+ fmt.Fprintln(writer, "")
+ }
+ if c.DocLink != "" {
+ fmt.Fprintln(writer, formatter.Fi(0, "{{bold}}Learn more at:{{/}} {{cyan}}{{underline}}http://onsi.github.io/ginkgo/#%s{{/}}", c.DocLink))
+ fmt.Fprintln(writer, "")
+ }
+ flagUsage := c.Flags.Usage()
+ if flagUsage != "" {
+ fmt.Fprintf(writer, formatter.F(flagUsage))
+ }
+}
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/ginkgo/command/program.go b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/ginkgo/command/program.go
new file mode 100644
index 00000000000..88dd8d6b07e
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/ginkgo/command/program.go
@@ -0,0 +1,182 @@
+package command
+
+import (
+ "fmt"
+ "io"
+ "os"
+ "strings"
+
+ "github.com/onsi/ginkgo/v2/formatter"
+ "github.com/onsi/ginkgo/v2/types"
+)
+
+type Program struct {
+ Name string
+ Heading string
+ Commands []Command
+ DefaultCommand Command
+ DeprecatedCommands []DeprecatedCommand
+
+ //For testing - leave as nil in production
+ OutWriter io.Writer
+ ErrWriter io.Writer
+ Exiter func(code int)
+}
+
+type DeprecatedCommand struct {
+ Name string
+ Deprecation types.Deprecation
+}
+
+func (p Program) RunAndExit(osArgs []string) {
+ var command Command
+ deprecationTracker := types.NewDeprecationTracker()
+ if p.Exiter == nil {
+ p.Exiter = os.Exit
+ }
+ if p.OutWriter == nil {
+ p.OutWriter = formatter.ColorableStdOut
+ }
+ if p.ErrWriter == nil {
+ p.ErrWriter = formatter.ColorableStdErr
+ }
+
+ defer func() {
+ exitCode := 0
+
+ if r := recover(); r != nil {
+ details, ok := r.(AbortDetails)
+ if !ok {
+ panic(r)
+ }
+
+ if details.Error != nil {
+ fmt.Fprintln(p.ErrWriter, formatter.F("{{red}}{{bold}}%s %s{{/}} {{red}}failed{{/}}", p.Name, command.Name))
+ fmt.Fprintln(p.ErrWriter, formatter.Fi(1, details.Error.Error()))
+ }
+ if details.EmitUsage {
+ if details.Error != nil {
+ fmt.Fprintln(p.ErrWriter, "")
+ }
+ command.EmitUsage(p.ErrWriter)
+ }
+ exitCode = details.ExitCode
+ }
+
+ command.Flags.ValidateDeprecations(deprecationTracker)
+ if deprecationTracker.DidTrackDeprecations() {
+ fmt.Fprintln(p.ErrWriter, deprecationTracker.DeprecationsReport())
+ }
+ p.Exiter(exitCode)
+ return
+ }()
+
+ args, additionalArgs := []string{}, []string{}
+
+ foundDelimiter := false
+ for _, arg := range osArgs[1:] {
+ if !foundDelimiter {
+ if arg == "--" {
+ foundDelimiter = true
+ continue
+ }
+ }
+
+ if foundDelimiter {
+ additionalArgs = append(additionalArgs, arg)
+ } else {
+ args = append(args, arg)
+ }
+ }
+
+ command = p.DefaultCommand
+ if len(args) > 0 {
+ p.handleHelpRequestsAndExit(p.OutWriter, args)
+ if command.Name == args[0] {
+ args = args[1:]
+ } else {
+ for _, deprecatedCommand := range p.DeprecatedCommands {
+ if deprecatedCommand.Name == args[0] {
+ deprecationTracker.TrackDeprecation(deprecatedCommand.Deprecation)
+ return
+ }
+ }
+ for _, tryCommand := range p.Commands {
+ if tryCommand.Name == args[0] {
+ command, args = tryCommand, args[1:]
+ break
+ }
+ }
+ }
+ }
+
+ command.Run(args, additionalArgs)
+}
+
+func (p Program) handleHelpRequestsAndExit(writer io.Writer, args []string) {
+ if len(args) == 0 {
+ return
+ }
+
+ matchesHelpFlag := func(args ...string) bool {
+ for _, arg := range args {
+ if arg == "--help" || arg == "-help" || arg == "-h" || arg == "--h" {
+ return true
+ }
+ }
+ return false
+ }
+ if len(args) == 1 {
+ if args[0] == "help" || matchesHelpFlag(args[0]) {
+ p.EmitUsage(writer)
+ Abort(AbortDetails{})
+ }
+ } else {
+ var name string
+ if args[0] == "help" || matchesHelpFlag(args[0]) {
+ name = args[1]
+ } else if matchesHelpFlag(args[1:]...) {
+ name = args[0]
+ } else {
+ return
+ }
+
+ if p.DefaultCommand.Name == name || p.Name == name {
+ p.DefaultCommand.EmitUsage(writer)
+ Abort(AbortDetails{})
+ }
+ for _, command := range p.Commands {
+ if command.Name == name {
+ command.EmitUsage(writer)
+ Abort(AbortDetails{})
+ }
+ }
+
+ fmt.Fprintln(writer, formatter.F("{{red}}Unknown Command: {{bold}}%s{{/}}", name))
+ fmt.Fprintln(writer, "")
+ p.EmitUsage(writer)
+ Abort(AbortDetails{ExitCode: 1})
+ }
+ return
+}
+
+func (p Program) EmitUsage(writer io.Writer) {
+ fmt.Fprintln(writer, formatter.F(p.Heading))
+ fmt.Fprintln(writer, formatter.F("{{gray}}%s{{/}}", strings.Repeat("-", len(p.Heading))))
+ fmt.Fprintln(writer, formatter.F("For usage information for a command, run {{bold}}%s help COMMAND{{/}}.", p.Name))
+ fmt.Fprintln(writer, formatter.F("For usage information for the default command, run {{bold}}%s help %s{{/}} or {{bold}}%s help %s{{/}}.", p.Name, p.Name, p.Name, p.DefaultCommand.Name))
+ fmt.Fprintln(writer, "")
+ fmt.Fprintln(writer, formatter.F("The following commands are available:"))
+
+ fmt.Fprintln(writer, formatter.Fi(1, "{{bold}}%s{{/}} or %s {{bold}}%s{{/}} - {{gray}}%s{{/}}", p.Name, p.Name, p.DefaultCommand.Name, p.DefaultCommand.Usage))
+ if p.DefaultCommand.ShortDoc != "" {
+ fmt.Fprintln(writer, formatter.Fi(2, p.DefaultCommand.ShortDoc))
+ }
+
+ for _, command := range p.Commands {
+ fmt.Fprintln(writer, formatter.Fi(1, "{{bold}}%s{{/}} - {{gray}}%s{{/}}", command.Name, command.Usage))
+ if command.ShortDoc != "" {
+ fmt.Fprintln(writer, formatter.Fi(2, command.ShortDoc))
+ }
+ }
+}
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/boostrap_templates.go b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/boostrap_templates.go
new file mode 100644
index 00000000000..a367a1fc977
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/boostrap_templates.go
@@ -0,0 +1,48 @@
+package generators
+
+var bootstrapText = `package {{.Package}}
+
+import (
+ "testing"
+
+ {{.GinkgoImport}}
+ {{.GomegaImport}}
+)
+
+func Test{{.FormattedName}}(t *testing.T) {
+ {{.GomegaPackage}}RegisterFailHandler({{.GinkgoPackage}}Fail)
+ {{.GinkgoPackage}}RunSpecs(t, "{{.FormattedName}} Suite")
+}
+`
+
+var agoutiBootstrapText = `package {{.Package}}
+
+import (
+ "testing"
+
+ {{.GinkgoImport}}
+ {{.GomegaImport}}
+ "github.com/sclevine/agouti"
+)
+
+func Test{{.FormattedName}}(t *testing.T) {
+ {{.GomegaPackage}}RegisterFailHandler({{.GinkgoPackage}}Fail)
+ {{.GinkgoPackage}}RunSpecs(t, "{{.FormattedName}} Suite")
+}
+
+var agoutiDriver *agouti.WebDriver
+
+var _ = {{.GinkgoPackage}}BeforeSuite(func() {
+ // Choose a WebDriver:
+
+ agoutiDriver = agouti.PhantomJS()
+ // agoutiDriver = agouti.Selenium()
+ // agoutiDriver = agouti.ChromeDriver()
+
+ {{.GomegaPackage}}Expect(agoutiDriver.Start()).To({{.GomegaPackage}}Succeed())
+})
+
+var _ = {{.GinkgoPackage}}AfterSuite(func() {
+ {{.GomegaPackage}}Expect(agoutiDriver.Stop()).To({{.GomegaPackage}}Succeed())
+})
+`
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/bootstrap_command.go b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/bootstrap_command.go
new file mode 100644
index 00000000000..b2dc59be66f
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/bootstrap_command.go
@@ -0,0 +1,133 @@
+package generators
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "os"
+ "text/template"
+
+ sprig "github.com/go-task/slim-sprig/v3"
+ "github.com/onsi/ginkgo/v2/ginkgo/command"
+ "github.com/onsi/ginkgo/v2/ginkgo/internal"
+ "github.com/onsi/ginkgo/v2/types"
+)
+
+func BuildBootstrapCommand() command.Command {
+ conf := GeneratorsConfig{}
+ flags, err := types.NewGinkgoFlagSet(
+ types.GinkgoFlags{
+ {Name: "agouti", KeyPath: "Agouti",
+ Usage: "If set, bootstrap will generate a bootstrap file for writing Agouti tests"},
+ {Name: "nodot", KeyPath: "NoDot",
+ Usage: "If set, bootstrap will generate a bootstrap test file that does not dot-import ginkgo and gomega"},
+ {Name: "internal", KeyPath: "Internal",
+ Usage: "If set, bootstrap will generate a bootstrap test file that uses the regular package name (i.e. `package X`, not `package X_test`)"},
+ {Name: "template", KeyPath: "CustomTemplate",
+ UsageArgument: "template-file",
+ Usage: "If specified, generate will use the contents of the file passed as the bootstrap template"},
+ {Name: "template-data", KeyPath: "CustomTemplateData",
+ UsageArgument: "template-data-file",
+ Usage: "If specified, generate will use the contents of the file passed as data to be rendered in the bootstrap template"},
+ },
+ &conf,
+ types.GinkgoFlagSections{},
+ )
+
+ if err != nil {
+ panic(err)
+ }
+
+ return command.Command{
+ Name: "bootstrap",
+ Usage: "ginkgo bootstrap",
+ ShortDoc: "Bootstrap a test suite for the current package",
+ Documentation: `Tests written in Ginkgo and Gomega require a small amount of boilerplate to hook into Go's testing infrastructure.
+
+{{bold}}ginkgo bootstrap{{/}} generates this boilerplate for you in a file named X_suite_test.go where X is the name of the package under test.`,
+ DocLink: "generators",
+ Flags: flags,
+ Command: func(_ []string, _ []string) {
+ generateBootstrap(conf)
+ },
+ }
+}
+
+type bootstrapData struct {
+ Package string
+ FormattedName string
+
+ GinkgoImport string
+ GomegaImport string
+ GinkgoPackage string
+ GomegaPackage string
+ CustomData map[string]any
+}
+
+func generateBootstrap(conf GeneratorsConfig) {
+ packageName, bootstrapFilePrefix, formattedName := getPackageAndFormattedName()
+
+ data := bootstrapData{
+ Package: determinePackageName(packageName, conf.Internal),
+ FormattedName: formattedName,
+
+ GinkgoImport: `. "github.com/onsi/ginkgo/v2"`,
+ GomegaImport: `. "github.com/onsi/gomega"`,
+ GinkgoPackage: "",
+ GomegaPackage: "",
+ }
+
+ if conf.NoDot {
+ data.GinkgoImport = `"github.com/onsi/ginkgo/v2"`
+ data.GomegaImport = `"github.com/onsi/gomega"`
+ data.GinkgoPackage = `ginkgo.`
+ data.GomegaPackage = `gomega.`
+ }
+
+ targetFile := fmt.Sprintf("%s_suite_test.go", bootstrapFilePrefix)
+ if internal.FileExists(targetFile) {
+ command.AbortWith("{{bold}}%s{{/}} already exists", targetFile)
+ } else {
+ fmt.Printf("Generating ginkgo test suite bootstrap for %s in:\n\t%s\n", packageName, targetFile)
+ }
+
+ f, err := os.Create(targetFile)
+ command.AbortIfError("Failed to create file:", err)
+ defer f.Close()
+
+ var templateText string
+ if conf.CustomTemplate != "" {
+ tpl, err := os.ReadFile(conf.CustomTemplate)
+ command.AbortIfError("Failed to read custom bootstrap file:", err)
+ templateText = string(tpl)
+ if conf.CustomTemplateData != "" {
+ var tplCustomDataMap map[string]any
+ tplCustomData, err := os.ReadFile(conf.CustomTemplateData)
+ command.AbortIfError("Failed to read custom boostrap data file:", err)
+ if !json.Valid([]byte(tplCustomData)) {
+ command.AbortWith("Invalid JSON object in custom data file.")
+ }
+ //create map from the custom template data
+ json.Unmarshal(tplCustomData, &tplCustomDataMap)
+ data.CustomData = tplCustomDataMap
+ }
+ } else if conf.Agouti {
+ templateText = agoutiBootstrapText
+ } else {
+ templateText = bootstrapText
+ }
+
+ //Setting the option to explicitly fail if template is rendered trying to access missing key
+ bootstrapTemplate, err := template.New("bootstrap").Funcs(sprig.TxtFuncMap()).Option("missingkey=error").Parse(templateText)
+ command.AbortIfError("Failed to parse bootstrap template:", err)
+
+ buf := &bytes.Buffer{}
+ //Being explicit about failing sooner during template rendering
+ //when accessing custom data rather than during the go fmt command
+ err = bootstrapTemplate.Execute(buf, data)
+ command.AbortIfError("Failed to render bootstrap template:", err)
+
+ buf.WriteTo(f)
+
+ internal.GoFmt(targetFile)
+}
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_command.go b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_command.go
new file mode 100644
index 00000000000..cf3b7cb6d6d
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_command.go
@@ -0,0 +1,265 @@
+package generators
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "os"
+ "path/filepath"
+ "strconv"
+ "strings"
+ "text/template"
+
+ sprig "github.com/go-task/slim-sprig/v3"
+ "github.com/onsi/ginkgo/v2/ginkgo/command"
+ "github.com/onsi/ginkgo/v2/ginkgo/internal"
+ "github.com/onsi/ginkgo/v2/types"
+)
+
+func BuildGenerateCommand() command.Command {
+ conf := GeneratorsConfig{}
+ flags, err := types.NewGinkgoFlagSet(
+ types.GinkgoFlags{
+ {Name: "agouti", KeyPath: "Agouti",
+ Usage: "If set, generate will create a test file for writing Agouti tests"},
+ {Name: "nodot", KeyPath: "NoDot",
+ Usage: "If set, generate will create a test file that does not dot-import ginkgo and gomega"},
+ {Name: "internal", KeyPath: "Internal",
+ Usage: "If set, generate will create a test file that uses the regular package name (i.e. `package X`, not `package X_test`)"},
+ {Name: "template", KeyPath: "CustomTemplate",
+ UsageArgument: "template-file",
+ Usage: "If specified, generate will use the contents of the file passed as the test file template"},
+ {Name: "template-data", KeyPath: "CustomTemplateData",
+ UsageArgument: "template-data-file",
+ Usage: "If specified, generate will use the contents of the file passed as data to be rendered in the test file template"},
+ {Name: "tags", KeyPath: "Tags",
+ UsageArgument: "build-tags",
+ Usage: "If specified, generate will create a test file that uses the given build tags (i.e. `--tags e2e,!unit` will add `//go:build e2e,!unit`)"},
+ },
+ &conf,
+ types.GinkgoFlagSections{},
+ )
+
+ if err != nil {
+ panic(err)
+ }
+
+ return command.Command{
+ Name: "generate",
+ Usage: "ginkgo generate ",
+ ShortDoc: "Generate a test file named _test.go",
+ Documentation: `If the optional argument is omitted, a file named after the package in the current directory will be created.
+
+You can pass multiple to generate multiple files simultaneously. The resulting files are named _test.go.
+
+You can also pass a of the form "file.go" and generate will emit "file_test.go".`,
+ DocLink: "generators",
+ Flags: flags,
+ Command: func(args []string, _ []string) {
+ generateTestFiles(conf, args)
+ },
+ }
+}
+
+type specData struct {
+ BuildTags string
+ Package string
+ Subject string
+ PackageImportPath string
+ ImportPackage bool
+
+ GinkgoImport string
+ GomegaImport string
+ GinkgoPackage string
+ GomegaPackage string
+ CustomData map[string]any
+}
+
+func generateTestFiles(conf GeneratorsConfig, args []string) {
+ subjects := args
+ if len(subjects) == 0 {
+ subjects = []string{""}
+ }
+ for _, subject := range subjects {
+ generateTestFileForSubject(subject, conf)
+ }
+}
+
+func generateTestFileForSubject(subject string, conf GeneratorsConfig) {
+ packageName, specFilePrefix, formattedName := getPackageAndFormattedName()
+ if subject != "" {
+ specFilePrefix = formatSubject(subject)
+ formattedName = prettifyName(specFilePrefix)
+ }
+
+ if conf.Internal {
+ specFilePrefix = specFilePrefix + "_internal"
+ }
+
+ data := specData{
+ BuildTags: getBuildTags(conf.Tags),
+ Package: determinePackageName(packageName, conf.Internal),
+ Subject: formattedName,
+ PackageImportPath: getPackageImportPath(),
+ ImportPackage: !conf.Internal,
+
+ GinkgoImport: `. "github.com/onsi/ginkgo/v2"`,
+ GomegaImport: `. "github.com/onsi/gomega"`,
+ GinkgoPackage: "",
+ GomegaPackage: "",
+ }
+
+ if conf.NoDot {
+ data.GinkgoImport = `"github.com/onsi/ginkgo/v2"`
+ data.GomegaImport = `"github.com/onsi/gomega"`
+ data.GinkgoPackage = `ginkgo.`
+ data.GomegaPackage = `gomega.`
+ }
+
+ targetFile := fmt.Sprintf("%s_test.go", specFilePrefix)
+ if internal.FileExists(targetFile) {
+ command.AbortWith("{{bold}}%s{{/}} already exists", targetFile)
+ } else {
+ fmt.Printf("Generating ginkgo test for %s in:\n %s\n", data.Subject, targetFile)
+ }
+
+ f, err := os.Create(targetFile)
+ command.AbortIfError("Failed to create test file:", err)
+ defer f.Close()
+
+ var templateText string
+ if conf.CustomTemplate != "" {
+ tpl, err := os.ReadFile(conf.CustomTemplate)
+ command.AbortIfError("Failed to read custom template file:", err)
+ templateText = string(tpl)
+ if conf.CustomTemplateData != "" {
+ var tplCustomDataMap map[string]any
+ tplCustomData, err := os.ReadFile(conf.CustomTemplateData)
+ command.AbortIfError("Failed to read custom template data file:", err)
+ if !json.Valid([]byte(tplCustomData)) {
+ command.AbortWith("Invalid JSON object in custom data file.")
+ }
+ //create map from the custom template data
+ json.Unmarshal(tplCustomData, &tplCustomDataMap)
+ data.CustomData = tplCustomDataMap
+ }
+ } else if conf.Agouti {
+ templateText = agoutiSpecText
+ } else {
+ templateText = specText
+ }
+
+ //Setting the option to explicitly fail if template is rendered trying to access missing key
+ specTemplate, err := template.New("spec").Funcs(sprig.TxtFuncMap()).Option("missingkey=error").Parse(templateText)
+ command.AbortIfError("Failed to read parse test template:", err)
+
+ //Being explicit about failing sooner during template rendering
+ //when accessing custom data rather than during the go fmt command
+ err = specTemplate.Execute(f, data)
+ command.AbortIfError("Failed to render bootstrap template:", err)
+ internal.GoFmt(targetFile)
+}
+
+func formatSubject(name string) string {
+ name = strings.ReplaceAll(name, "-", "_")
+ name = strings.ReplaceAll(name, " ", "_")
+ name = strings.Split(name, ".go")[0]
+ name = strings.Split(name, "_test")[0]
+ return name
+}
+
+// moduleName returns module name from go.mod from given module root directory
+func moduleName(modRoot string) string {
+ modFile, err := os.Open(filepath.Join(modRoot, "go.mod"))
+ if err != nil {
+ return ""
+ }
+ defer modFile.Close()
+
+ mod := make([]byte, 128)
+ _, err = modFile.Read(mod)
+ if err != nil {
+ return ""
+ }
+
+ slashSlash := []byte("//")
+ moduleStr := []byte("module")
+
+ for len(mod) > 0 {
+ line := mod
+ mod = nil
+ if i := bytes.IndexByte(line, '\n'); i >= 0 {
+ line, mod = line[:i], line[i+1:]
+ }
+ if i := bytes.Index(line, slashSlash); i >= 0 {
+ line = line[:i]
+ }
+ line = bytes.TrimSpace(line)
+ if !bytes.HasPrefix(line, moduleStr) {
+ continue
+ }
+ line = line[len(moduleStr):]
+ n := len(line)
+ line = bytes.TrimSpace(line)
+ if len(line) == n || len(line) == 0 {
+ continue
+ }
+
+ if line[0] == '"' || line[0] == '`' {
+ p, err := strconv.Unquote(string(line))
+ if err != nil {
+ return "" // malformed quoted string or multiline module path
+ }
+ return p
+ }
+
+ return string(line)
+ }
+
+ return "" // missing module path
+}
+
+func findModuleRoot(dir string) (root string) {
+ dir = filepath.Clean(dir)
+
+ // Look for enclosing go.mod.
+ for {
+ if fi, err := os.Stat(filepath.Join(dir, "go.mod")); err == nil && !fi.IsDir() {
+ return dir
+ }
+ d := filepath.Dir(dir)
+ if d == dir {
+ break
+ }
+ dir = d
+ }
+ return ""
+}
+
+func getPackageImportPath() string {
+ workingDir, err := os.Getwd()
+ if err != nil {
+ panic(err.Error())
+ }
+
+ sep := string(filepath.Separator)
+
+ // Try go.mod file first
+ modRoot := findModuleRoot(workingDir)
+ if modRoot != "" {
+ modName := moduleName(modRoot)
+ if modName != "" {
+ cd := strings.ReplaceAll(workingDir, modRoot, "")
+ cd = strings.ReplaceAll(cd, sep, "/")
+ return modName + cd
+ }
+ }
+
+ // Fallback to GOPATH structure
+ paths := strings.Split(workingDir, sep+"src"+sep)
+ if len(paths) == 1 {
+ fmt.Printf("\nCouldn't identify package import path.\n\n\tginkgo generate\n\nMust be run within a package directory under $GOPATH/src/...\nYou're going to have to change UNKNOWN_PACKAGE_PATH in the generated file...\n\n")
+ return "UNKNOWN_PACKAGE_PATH"
+ }
+ return filepath.ToSlash(paths[len(paths)-1])
+}
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_templates.go b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_templates.go
new file mode 100644
index 00000000000..4dab07d0369
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_templates.go
@@ -0,0 +1,43 @@
+package generators
+
+var specText = `{{.BuildTags}}
+package {{.Package}}
+
+import (
+ {{.GinkgoImport}}
+ {{.GomegaImport}}
+
+ {{if .ImportPackage}}"{{.PackageImportPath}}"{{end}}
+)
+
+var _ = {{.GinkgoPackage}}Describe("{{.Subject}}", func() {
+
+})
+`
+
+var agoutiSpecText = `{{.BuildTags}}
+package {{.Package}}
+
+import (
+ {{.GinkgoImport}}
+ {{.GomegaImport}}
+ "github.com/sclevine/agouti"
+ . "github.com/sclevine/agouti/matchers"
+
+ {{if .ImportPackage}}"{{.PackageImportPath}}"{{end}}
+)
+
+var _ = {{.GinkgoPackage}}Describe("{{.Subject}}", func() {
+ var page *agouti.Page
+
+ {{.GinkgoPackage}}BeforeEach(func() {
+ var err error
+ page, err = agoutiDriver.NewPage()
+ {{.GomegaPackage}}Expect(err).NotTo({{.GomegaPackage}}HaveOccurred())
+ })
+
+ {{.GinkgoPackage}}AfterEach(func() {
+ {{.GomegaPackage}}Expect(page.Destroy()).To({{.GomegaPackage}}Succeed())
+ })
+})
+`
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generators_common.go b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generators_common.go
new file mode 100644
index 00000000000..28c7aa6f434
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generators_common.go
@@ -0,0 +1,76 @@
+package generators
+
+import (
+ "fmt"
+ "go/build"
+ "os"
+ "path/filepath"
+ "strconv"
+ "strings"
+
+ "github.com/onsi/ginkgo/v2/ginkgo/command"
+)
+
+type GeneratorsConfig struct {
+ Agouti, NoDot, Internal bool
+ CustomTemplate string
+ CustomTemplateData string
+ Tags string
+}
+
+func getPackageAndFormattedName() (string, string, string) {
+ path, err := os.Getwd()
+ command.AbortIfError("Could not get current working directory:", err)
+
+ dirName := strings.ReplaceAll(filepath.Base(path), "-", "_")
+ dirName = strings.ReplaceAll(dirName, " ", "_")
+
+ pkg, err := build.ImportDir(path, 0)
+ packageName := pkg.Name
+ if err != nil {
+ packageName = ensureLegalPackageName(dirName)
+ }
+
+ formattedName := prettifyName(filepath.Base(path))
+ return packageName, dirName, formattedName
+}
+
+func ensureLegalPackageName(name string) string {
+ if name == "_" {
+ return "underscore"
+ }
+ if len(name) == 0 {
+ return "empty"
+ }
+ n, isDigitErr := strconv.Atoi(string(name[0]))
+ if isDigitErr == nil {
+ return []string{"zero", "one", "two", "three", "four", "five", "six", "seven", "eight", "nine"}[n] + name[1:]
+ }
+ return name
+}
+
+func prettifyName(name string) string {
+ name = strings.ReplaceAll(name, "-", " ")
+ name = strings.ReplaceAll(name, "_", " ")
+ name = strings.Title(name)
+ name = strings.ReplaceAll(name, " ", "")
+ return name
+}
+
+func determinePackageName(name string, internal bool) string {
+ if internal {
+ return name
+ }
+
+ return name + "_test"
+}
+
+// getBuildTags returns the resultant string to be added.
+// If the input string is not empty, then returns a `//go:build {}` string,
+// otherwise returns an empty string.
+func getBuildTags(tags string) string {
+ if tags != "" {
+ return fmt.Sprintf("//go:build %s\n", tags)
+ }
+ return ""
+}
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/compile.go b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/compile.go
new file mode 100644
index 00000000000..86da7340d17
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/compile.go
@@ -0,0 +1,161 @@
+package internal
+
+import (
+ "fmt"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "strings"
+ "sync"
+
+ "github.com/onsi/ginkgo/v2/types"
+)
+
+func CompileSuite(suite TestSuite, goFlagsConfig types.GoFlagsConfig) TestSuite {
+ if suite.PathToCompiledTest != "" {
+ return suite
+ }
+
+ suite.CompilationError = nil
+
+ path, err := filepath.Abs(filepath.Join(suite.Path, suite.PackageName+".test"))
+ if err != nil {
+ suite.State = TestSuiteStateFailedToCompile
+ suite.CompilationError = fmt.Errorf("Failed to compute compilation target path:\n%s", err.Error())
+ return suite
+ }
+
+ ginkgoInvocationPath, _ := os.Getwd()
+ ginkgoInvocationPath, _ = filepath.Abs(ginkgoInvocationPath)
+ packagePath := suite.AbsPath()
+ pathToInvocationPath, err := filepath.Rel(packagePath, ginkgoInvocationPath)
+ if err != nil {
+ suite.State = TestSuiteStateFailedToCompile
+ suite.CompilationError = fmt.Errorf("Failed to get relative path from package to the current working directory:\n%s", err.Error())
+ return suite
+ }
+ args, err := types.GenerateGoTestCompileArgs(goFlagsConfig, path, "./", pathToInvocationPath)
+ if err != nil {
+ suite.State = TestSuiteStateFailedToCompile
+ suite.CompilationError = fmt.Errorf("Failed to generate go test compile flags:\n%s", err.Error())
+ return suite
+ }
+
+ cmd := exec.Command("go", args...)
+ cmd.Dir = suite.Path
+ output, err := cmd.CombinedOutput()
+ if err != nil {
+ if len(output) > 0 {
+ suite.State = TestSuiteStateFailedToCompile
+ suite.CompilationError = fmt.Errorf("Failed to compile %s:\n\n%s", suite.PackageName, output)
+ } else {
+ suite.State = TestSuiteStateFailedToCompile
+ suite.CompilationError = fmt.Errorf("Failed to compile %s\n%s", suite.PackageName, err.Error())
+ }
+ return suite
+ }
+
+ if strings.Contains(string(output), "[no test files]") {
+ suite.State = TestSuiteStateSkippedDueToEmptyCompilation
+ return suite
+ }
+
+ if len(output) > 0 {
+ fmt.Println(string(output))
+ }
+
+ if !FileExists(path) {
+ suite.State = TestSuiteStateFailedToCompile
+ suite.CompilationError = fmt.Errorf("Failed to compile %s:\nOutput file %s could not be found", suite.PackageName, path)
+ return suite
+ }
+
+ suite.State = TestSuiteStateCompiled
+ suite.PathToCompiledTest = path
+ return suite
+}
+
+func Cleanup(goFlagsConfig types.GoFlagsConfig, suites ...TestSuite) {
+ if goFlagsConfig.BinaryMustBePreserved() {
+ return
+ }
+ for _, suite := range suites {
+ if !suite.Precompiled {
+ os.Remove(suite.PathToCompiledTest)
+ }
+ }
+}
+
+type parallelSuiteBundle struct {
+ suite TestSuite
+ compiled chan TestSuite
+}
+
+type OrderedParallelCompiler struct {
+ mutex *sync.Mutex
+ stopped bool
+ numCompilers int
+
+ idx int
+ numSuites int
+ completionChannels []chan TestSuite
+}
+
+func NewOrderedParallelCompiler(numCompilers int) *OrderedParallelCompiler {
+ return &OrderedParallelCompiler{
+ mutex: &sync.Mutex{},
+ numCompilers: numCompilers,
+ }
+}
+
+func (opc *OrderedParallelCompiler) StartCompiling(suites TestSuites, goFlagsConfig types.GoFlagsConfig) {
+ opc.stopped = false
+ opc.idx = 0
+ opc.numSuites = len(suites)
+ opc.completionChannels = make([]chan TestSuite, opc.numSuites)
+
+ toCompile := make(chan parallelSuiteBundle, opc.numCompilers)
+ for compiler := 0; compiler < opc.numCompilers; compiler++ {
+ go func() {
+ for bundle := range toCompile {
+ c, suite := bundle.compiled, bundle.suite
+ opc.mutex.Lock()
+ stopped := opc.stopped
+ opc.mutex.Unlock()
+ if !stopped {
+ suite = CompileSuite(suite, goFlagsConfig)
+ }
+ c <- suite
+ }
+ }()
+ }
+
+ for idx, suite := range suites {
+ opc.completionChannels[idx] = make(chan TestSuite, 1)
+ toCompile <- parallelSuiteBundle{suite, opc.completionChannels[idx]}
+ if idx == 0 { //compile first suite serially
+ suite = <-opc.completionChannels[0]
+ opc.completionChannels[0] <- suite
+ }
+ }
+
+ close(toCompile)
+}
+
+func (opc *OrderedParallelCompiler) Next() (int, TestSuite) {
+ if opc.idx >= opc.numSuites {
+ return opc.numSuites, TestSuite{}
+ }
+
+ idx := opc.idx
+ suite := <-opc.completionChannels[idx]
+ opc.idx = opc.idx + 1
+
+ return idx, suite
+}
+
+func (opc *OrderedParallelCompiler) StopAndDrain() {
+ opc.mutex.Lock()
+ opc.stopped = true
+ opc.mutex.Unlock()
+}
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/gocovmerge.go b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/gocovmerge.go
new file mode 100644
index 00000000000..3c5079ff4c7
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/gocovmerge.go
@@ -0,0 +1,129 @@
+// Copyright (c) 2015, Wade Simmons
+// All rights reserved.
+
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+
+// 1. Redistributions of source code must retain the above copyright notice, this
+// list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Package gocovmerge takes the results from multiple `go test -coverprofile`
+// runs and merges them into one profile
+
+// this file was originally taken from the gocovmerge project
+// see also: https://go.shabbyrobe.org/gocovmerge
+package internal
+
+import (
+ "fmt"
+ "io"
+ "sort"
+
+ "golang.org/x/tools/cover"
+)
+
+func AddCoverProfile(profiles []*cover.Profile, p *cover.Profile) []*cover.Profile {
+ i := sort.Search(len(profiles), func(i int) bool { return profiles[i].FileName >= p.FileName })
+ if i < len(profiles) && profiles[i].FileName == p.FileName {
+ MergeCoverProfiles(profiles[i], p)
+ } else {
+ profiles = append(profiles, nil)
+ copy(profiles[i+1:], profiles[i:])
+ profiles[i] = p
+ }
+ return profiles
+}
+
+func DumpCoverProfiles(profiles []*cover.Profile, out io.Writer) error {
+ if len(profiles) == 0 {
+ return nil
+ }
+ if _, err := fmt.Fprintf(out, "mode: %s\n", profiles[0].Mode); err != nil {
+ return err
+ }
+ for _, p := range profiles {
+ for _, b := range p.Blocks {
+ if _, err := fmt.Fprintf(out, "%s:%d.%d,%d.%d %d %d\n", p.FileName, b.StartLine, b.StartCol, b.EndLine, b.EndCol, b.NumStmt, b.Count); err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func MergeCoverProfiles(into *cover.Profile, merge *cover.Profile) error {
+ if into.Mode != merge.Mode {
+ return fmt.Errorf("cannot merge profiles with different modes")
+ }
+ // Since the blocks are sorted, we can keep track of where the last block
+ // was inserted and only look at the blocks after that as targets for merge
+ startIndex := 0
+ for _, b := range merge.Blocks {
+ var err error
+ startIndex, err = mergeProfileBlock(into, b, startIndex)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func mergeProfileBlock(p *cover.Profile, pb cover.ProfileBlock, startIndex int) (int, error) {
+ sortFunc := func(i int) bool {
+ pi := p.Blocks[i+startIndex]
+ return pi.StartLine >= pb.StartLine && (pi.StartLine != pb.StartLine || pi.StartCol >= pb.StartCol)
+ }
+
+ i := 0
+ if sortFunc(i) != true {
+ i = sort.Search(len(p.Blocks)-startIndex, sortFunc)
+ }
+
+ i += startIndex
+ if i < len(p.Blocks) && p.Blocks[i].StartLine == pb.StartLine && p.Blocks[i].StartCol == pb.StartCol {
+ if p.Blocks[i].EndLine != pb.EndLine || p.Blocks[i].EndCol != pb.EndCol {
+ return i, fmt.Errorf("gocovmerge: overlapping merge %v %v %v", p.FileName, p.Blocks[i], pb)
+ }
+ switch p.Mode {
+ case "set":
+ p.Blocks[i].Count |= pb.Count
+ case "count", "atomic":
+ p.Blocks[i].Count += pb.Count
+ default:
+ return i, fmt.Errorf("gocovmerge: unsupported covermode '%s'", p.Mode)
+ }
+
+ } else {
+ if i > 0 {
+ pa := p.Blocks[i-1]
+ if pa.EndLine >= pb.EndLine && (pa.EndLine != pb.EndLine || pa.EndCol > pb.EndCol) {
+ return i, fmt.Errorf("gocovmerge: overlap before %v %v %v", p.FileName, pa, pb)
+ }
+ }
+ if i < len(p.Blocks)-1 {
+ pa := p.Blocks[i+1]
+ if pa.StartLine <= pb.StartLine && (pa.StartLine != pb.StartLine || pa.StartCol < pb.StartCol) {
+ return i, fmt.Errorf("gocovmerge: overlap after %v %v %v", p.FileName, pa, pb)
+ }
+ }
+ p.Blocks = append(p.Blocks, cover.ProfileBlock{})
+ copy(p.Blocks[i+1:], p.Blocks[i:])
+ p.Blocks[i] = pb
+ }
+
+ return i + 1, nil
+}
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/profiles_and_reports.go b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/profiles_and_reports.go
new file mode 100644
index 00000000000..8e16d2bb034
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/profiles_and_reports.go
@@ -0,0 +1,227 @@
+package internal
+
+import (
+ "fmt"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "regexp"
+ "strconv"
+
+ "github.com/google/pprof/profile"
+ "github.com/onsi/ginkgo/v2/reporters"
+ "github.com/onsi/ginkgo/v2/types"
+ "golang.org/x/tools/cover"
+)
+
+func AbsPathForGeneratedAsset(assetName string, suite TestSuite, cliConfig types.CLIConfig, process int) string {
+ suffix := ""
+ if process != 0 {
+ suffix = fmt.Sprintf(".%d", process)
+ }
+ if cliConfig.OutputDir == "" {
+ return filepath.Join(suite.AbsPath(), assetName+suffix)
+ }
+ outputDir, _ := filepath.Abs(cliConfig.OutputDir)
+ return filepath.Join(outputDir, suite.NamespacedName()+"_"+assetName+suffix)
+}
+
+func FinalizeProfilesAndReportsForSuites(suites TestSuites, cliConfig types.CLIConfig, suiteConfig types.SuiteConfig, reporterConfig types.ReporterConfig, goFlagsConfig types.GoFlagsConfig) ([]string, error) {
+ messages := []string{}
+ suitesWithProfiles := suites.WithState(TestSuiteStatePassed, TestSuiteStateFailed) //anything else won't have actually run and generated a profile
+
+ // merge cover profiles if need be
+ if goFlagsConfig.Cover && !cliConfig.KeepSeparateCoverprofiles {
+ coverProfiles := []string{}
+ for _, suite := range suitesWithProfiles {
+ if !suite.HasProgrammaticFocus {
+ coverProfiles = append(coverProfiles, AbsPathForGeneratedAsset(goFlagsConfig.CoverProfile, suite, cliConfig, 0))
+ }
+ }
+
+ if len(coverProfiles) > 0 {
+ dst := goFlagsConfig.CoverProfile
+ if cliConfig.OutputDir != "" {
+ dst = filepath.Join(cliConfig.OutputDir, goFlagsConfig.CoverProfile)
+ }
+ err := MergeAndCleanupCoverProfiles(coverProfiles, dst)
+ if err != nil {
+ return messages, err
+ }
+ coverage, err := GetCoverageFromCoverProfile(dst)
+ if err != nil {
+ return messages, err
+ }
+ if coverage == 0 {
+ messages = append(messages, "composite coverage: [no statements]")
+ } else if suitesWithProfiles.AnyHaveProgrammaticFocus() {
+ messages = append(messages, fmt.Sprintf("composite coverage: %.1f%% of statements however some suites did not contribute because they included programatically focused specs", coverage))
+ } else {
+ messages = append(messages, fmt.Sprintf("composite coverage: %.1f%% of statements", coverage))
+ }
+ } else {
+ messages = append(messages, "no composite coverage computed: all suites included programatically focused specs")
+ }
+ }
+
+ // copy binaries if need be
+ for _, suite := range suitesWithProfiles {
+ if goFlagsConfig.BinaryMustBePreserved() && cliConfig.OutputDir != "" {
+ src := suite.PathToCompiledTest
+ dst := filepath.Join(cliConfig.OutputDir, suite.NamespacedName()+".test")
+ if suite.Precompiled {
+ if err := CopyFile(src, dst); err != nil {
+ return messages, err
+ }
+ } else {
+ if err := os.Rename(src, dst); err != nil {
+ return messages, err
+ }
+ }
+ }
+ }
+
+ type reportFormat struct {
+ ReportName string
+ GenerateFunc func(types.Report, string) error
+ MergeFunc func([]string, string) ([]string, error)
+ }
+ reportFormats := []reportFormat{}
+ if reporterConfig.JSONReport != "" {
+ reportFormats = append(reportFormats, reportFormat{ReportName: reporterConfig.JSONReport, GenerateFunc: reporters.GenerateJSONReport, MergeFunc: reporters.MergeAndCleanupJSONReports})
+ }
+ if reporterConfig.JUnitReport != "" {
+ reportFormats = append(reportFormats, reportFormat{ReportName: reporterConfig.JUnitReport, GenerateFunc: reporters.GenerateJUnitReport, MergeFunc: reporters.MergeAndCleanupJUnitReports})
+ }
+ if reporterConfig.TeamcityReport != "" {
+ reportFormats = append(reportFormats, reportFormat{ReportName: reporterConfig.TeamcityReport, GenerateFunc: reporters.GenerateTeamcityReport, MergeFunc: reporters.MergeAndCleanupTeamcityReports})
+ }
+
+ // Generate reports for suites that failed to run
+ reportableSuites := suites.ThatAreGinkgoSuites()
+ for _, suite := range reportableSuites.WithState(TestSuiteStateFailedToCompile, TestSuiteStateFailedDueToTimeout, TestSuiteStateSkippedDueToPriorFailures, TestSuiteStateSkippedDueToEmptyCompilation) {
+ report := types.Report{
+ SuitePath: suite.AbsPath(),
+ SuiteConfig: suiteConfig,
+ SuiteSucceeded: false,
+ }
+ switch suite.State {
+ case TestSuiteStateFailedToCompile:
+ report.SpecialSuiteFailureReasons = append(report.SpecialSuiteFailureReasons, suite.CompilationError.Error())
+ case TestSuiteStateFailedDueToTimeout:
+ report.SpecialSuiteFailureReasons = append(report.SpecialSuiteFailureReasons, TIMEOUT_ELAPSED_FAILURE_REASON)
+ case TestSuiteStateSkippedDueToPriorFailures:
+ report.SpecialSuiteFailureReasons = append(report.SpecialSuiteFailureReasons, PRIOR_FAILURES_FAILURE_REASON)
+ case TestSuiteStateSkippedDueToEmptyCompilation:
+ report.SpecialSuiteFailureReasons = append(report.SpecialSuiteFailureReasons, EMPTY_SKIP_FAILURE_REASON)
+ report.SuiteSucceeded = true
+ }
+
+ for _, format := range reportFormats {
+ format.GenerateFunc(report, AbsPathForGeneratedAsset(format.ReportName, suite, cliConfig, 0))
+ }
+ }
+
+ // Merge reports unless we've been asked to keep them separate
+ if !cliConfig.KeepSeparateReports {
+ for _, format := range reportFormats {
+ reports := []string{}
+ for _, suite := range reportableSuites {
+ reports = append(reports, AbsPathForGeneratedAsset(format.ReportName, suite, cliConfig, 0))
+ }
+ dst := format.ReportName
+ if cliConfig.OutputDir != "" {
+ dst = filepath.Join(cliConfig.OutputDir, format.ReportName)
+ }
+ mergeMessages, err := format.MergeFunc(reports, dst)
+ messages = append(messages, mergeMessages...)
+ if err != nil {
+ return messages, err
+ }
+ }
+ }
+
+ return messages, nil
+}
+
+// loads each profile, merges them, deletes them, stores them in destination
+func MergeAndCleanupCoverProfiles(profiles []string, destination string) error {
+ var merged []*cover.Profile
+ for _, file := range profiles {
+ parsedProfiles, err := cover.ParseProfiles(file)
+ if err != nil {
+ return err
+ }
+ os.Remove(file)
+ for _, p := range parsedProfiles {
+ merged = AddCoverProfile(merged, p)
+ }
+ }
+ dst, err := os.OpenFile(destination, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666)
+ if err != nil {
+ return err
+ }
+ defer dst.Close()
+ err = DumpCoverProfiles(merged, dst)
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+func GetCoverageFromCoverProfile(profile string) (float64, error) {
+ cmd := exec.Command("go", "tool", "cover", "-func", profile)
+ output, err := cmd.CombinedOutput()
+ if err != nil {
+ return 0, fmt.Errorf("Could not process Coverprofile %s: %s - %s", profile, err.Error(), string(output))
+ }
+ re := regexp.MustCompile(`total:\s*\(statements\)\s*(\d*\.\d*)\%`)
+ matches := re.FindStringSubmatch(string(output))
+ if matches == nil {
+ return 0, fmt.Errorf("Could not parse Coverprofile to compute coverage percentage")
+ }
+ coverageString := matches[1]
+ coverage, err := strconv.ParseFloat(coverageString, 64)
+ if err != nil {
+ return 0, fmt.Errorf("Could not parse Coverprofile to compute coverage percentage: %s", err.Error())
+ }
+
+ return coverage, nil
+}
+
+func MergeProfiles(profilePaths []string, destination string) error {
+ profiles := []*profile.Profile{}
+ for _, profilePath := range profilePaths {
+ proFile, err := os.Open(profilePath)
+ if err != nil {
+ return fmt.Errorf("Could not open profile: %s\n%s", profilePath, err.Error())
+ }
+ prof, err := profile.Parse(proFile)
+ _ = proFile.Close()
+ if err != nil {
+ return fmt.Errorf("Could not parse profile: %s\n%s", profilePath, err.Error())
+ }
+ profiles = append(profiles, prof)
+ os.Remove(profilePath)
+ }
+
+ mergedProfile, err := profile.Merge(profiles)
+ if err != nil {
+ return fmt.Errorf("Could not merge profiles:\n%s", err.Error())
+ }
+
+ outFile, err := os.Create(destination)
+ if err != nil {
+ return fmt.Errorf("Could not create merged profile %s:\n%s", destination, err.Error())
+ }
+ err = mergedProfile.Write(outFile)
+ if err != nil {
+ return fmt.Errorf("Could not write merged profile %s:\n%s", destination, err.Error())
+ }
+ err = outFile.Close()
+ if err != nil {
+ return fmt.Errorf("Could not close merged profile %s:\n%s", destination, err.Error())
+ }
+
+ return nil
+}
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/run.go b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/run.go
new file mode 100644
index 00000000000..41052ea19db
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/run.go
@@ -0,0 +1,355 @@
+package internal
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "regexp"
+ "strings"
+ "syscall"
+ "time"
+
+ "github.com/onsi/ginkgo/v2/formatter"
+ "github.com/onsi/ginkgo/v2/ginkgo/command"
+ "github.com/onsi/ginkgo/v2/internal/parallel_support"
+ "github.com/onsi/ginkgo/v2/reporters"
+ "github.com/onsi/ginkgo/v2/types"
+)
+
+func RunCompiledSuite(suite TestSuite, ginkgoConfig types.SuiteConfig, reporterConfig types.ReporterConfig, cliConfig types.CLIConfig, goFlagsConfig types.GoFlagsConfig, additionalArgs []string) TestSuite {
+ suite.State = TestSuiteStateFailed
+ suite.HasProgrammaticFocus = false
+
+ if suite.PathToCompiledTest == "" {
+ return suite
+ }
+
+ if suite.IsGinkgo && cliConfig.ComputedProcs() > 1 {
+ suite = runParallel(suite, ginkgoConfig, reporterConfig, cliConfig, goFlagsConfig, additionalArgs)
+ } else if suite.IsGinkgo {
+ suite = runSerial(suite, ginkgoConfig, reporterConfig, cliConfig, goFlagsConfig, additionalArgs)
+ } else {
+ suite = runGoTest(suite, cliConfig, goFlagsConfig)
+ }
+ runAfterRunHook(cliConfig.AfterRunHook, reporterConfig.NoColor, suite)
+ return suite
+}
+
+func buildAndStartCommand(suite TestSuite, args []string, pipeToStdout bool) (*exec.Cmd, *bytes.Buffer) {
+ buf := &bytes.Buffer{}
+ cmd := exec.Command(suite.PathToCompiledTest, args...)
+ cmd.Dir = suite.Path
+ if pipeToStdout {
+ cmd.Stderr = io.MultiWriter(os.Stdout, buf)
+ cmd.Stdout = os.Stdout
+ } else {
+ cmd.Stderr = buf
+ cmd.Stdout = buf
+ }
+ err := cmd.Start()
+ command.AbortIfError("Failed to start test suite", err)
+
+ return cmd, buf
+}
+
+func checkForNoTestsWarning(buf *bytes.Buffer) bool {
+ if strings.Contains(buf.String(), "warning: no tests to run") {
+ fmt.Fprintf(os.Stderr, `Found no test suites, did you forget to run "ginkgo bootstrap"?`)
+ return true
+ }
+ return false
+}
+
+func runGoTest(suite TestSuite, cliConfig types.CLIConfig, goFlagsConfig types.GoFlagsConfig) TestSuite {
+ // As we run the go test from the suite directory, make sure the cover profile is absolute
+ // and placed into the expected output directory when one is configured.
+ if goFlagsConfig.Cover && !filepath.IsAbs(goFlagsConfig.CoverProfile) {
+ goFlagsConfig.CoverProfile = AbsPathForGeneratedAsset(goFlagsConfig.CoverProfile, suite, cliConfig, 0)
+ }
+
+ args, err := types.GenerateGoTestRunArgs(goFlagsConfig)
+ command.AbortIfError("Failed to generate test run arguments", err)
+ cmd, buf := buildAndStartCommand(suite, args, true)
+
+ cmd.Wait()
+
+ exitStatus := cmd.ProcessState.Sys().(syscall.WaitStatus).ExitStatus()
+ passed := (exitStatus == 0) || (exitStatus == types.GINKGO_FOCUS_EXIT_CODE)
+ passed = !(checkForNoTestsWarning(buf) && cliConfig.RequireSuite) && passed
+ if passed {
+ suite.State = TestSuiteStatePassed
+ } else {
+ suite.State = TestSuiteStateFailed
+ }
+
+ return suite
+}
+
+func runSerial(suite TestSuite, ginkgoConfig types.SuiteConfig, reporterConfig types.ReporterConfig, cliConfig types.CLIConfig, goFlagsConfig types.GoFlagsConfig, additionalArgs []string) TestSuite {
+ if goFlagsConfig.Cover {
+ goFlagsConfig.CoverProfile = AbsPathForGeneratedAsset(goFlagsConfig.CoverProfile, suite, cliConfig, 0)
+ }
+ if goFlagsConfig.BlockProfile != "" {
+ goFlagsConfig.BlockProfile = AbsPathForGeneratedAsset(goFlagsConfig.BlockProfile, suite, cliConfig, 0)
+ }
+ if goFlagsConfig.CPUProfile != "" {
+ goFlagsConfig.CPUProfile = AbsPathForGeneratedAsset(goFlagsConfig.CPUProfile, suite, cliConfig, 0)
+ }
+ if goFlagsConfig.MemProfile != "" {
+ goFlagsConfig.MemProfile = AbsPathForGeneratedAsset(goFlagsConfig.MemProfile, suite, cliConfig, 0)
+ }
+ if goFlagsConfig.MutexProfile != "" {
+ goFlagsConfig.MutexProfile = AbsPathForGeneratedAsset(goFlagsConfig.MutexProfile, suite, cliConfig, 0)
+ }
+ if reporterConfig.JSONReport != "" {
+ reporterConfig.JSONReport = AbsPathForGeneratedAsset(reporterConfig.JSONReport, suite, cliConfig, 0)
+ }
+ if reporterConfig.JUnitReport != "" {
+ reporterConfig.JUnitReport = AbsPathForGeneratedAsset(reporterConfig.JUnitReport, suite, cliConfig, 0)
+ }
+ if reporterConfig.TeamcityReport != "" {
+ reporterConfig.TeamcityReport = AbsPathForGeneratedAsset(reporterConfig.TeamcityReport, suite, cliConfig, 0)
+ }
+
+ args, err := types.GenerateGinkgoTestRunArgs(ginkgoConfig, reporterConfig, goFlagsConfig)
+ command.AbortIfError("Failed to generate test run arguments", err)
+ args = append([]string{"--test.timeout=0"}, args...)
+ args = append(args, additionalArgs...)
+
+ cmd, buf := buildAndStartCommand(suite, args, true)
+
+ cmd.Wait()
+
+ exitStatus := cmd.ProcessState.Sys().(syscall.WaitStatus).ExitStatus()
+ suite.HasProgrammaticFocus = (exitStatus == types.GINKGO_FOCUS_EXIT_CODE)
+ passed := (exitStatus == 0) || (exitStatus == types.GINKGO_FOCUS_EXIT_CODE)
+ passed = !(checkForNoTestsWarning(buf) && cliConfig.RequireSuite) && passed
+ if passed {
+ suite.State = TestSuiteStatePassed
+ } else {
+ suite.State = TestSuiteStateFailed
+ }
+
+ if suite.HasProgrammaticFocus {
+ if goFlagsConfig.Cover {
+ fmt.Fprintln(os.Stdout, "coverage: no coverfile was generated because specs are programmatically focused")
+ }
+ if goFlagsConfig.BlockProfile != "" {
+ fmt.Fprintln(os.Stdout, "no block profile was generated because specs are programmatically focused")
+ }
+ if goFlagsConfig.CPUProfile != "" {
+ fmt.Fprintln(os.Stdout, "no cpu profile was generated because specs are programmatically focused")
+ }
+ if goFlagsConfig.MemProfile != "" {
+ fmt.Fprintln(os.Stdout, "no mem profile was generated because specs are programmatically focused")
+ }
+ if goFlagsConfig.MutexProfile != "" {
+ fmt.Fprintln(os.Stdout, "no mutex profile was generated because specs are programmatically focused")
+ }
+ }
+
+ return suite
+}
+
+func runParallel(suite TestSuite, ginkgoConfig types.SuiteConfig, reporterConfig types.ReporterConfig, cliConfig types.CLIConfig, goFlagsConfig types.GoFlagsConfig, additionalArgs []string) TestSuite {
+ type procResult struct {
+ passed bool
+ hasProgrammaticFocus bool
+ }
+
+ numProcs := cliConfig.ComputedProcs()
+ procOutput := make([]*bytes.Buffer, numProcs)
+ coverProfiles := []string{}
+
+ blockProfiles := []string{}
+ cpuProfiles := []string{}
+ memProfiles := []string{}
+ mutexProfiles := []string{}
+
+ procResults := make(chan procResult)
+
+ server, err := parallel_support.NewServer(numProcs, reporters.NewDefaultReporter(reporterConfig, formatter.ColorableStdOut))
+ command.AbortIfError("Failed to start parallel spec server", err)
+ server.Start()
+ defer server.Close()
+
+ if reporterConfig.JSONReport != "" {
+ reporterConfig.JSONReport = AbsPathForGeneratedAsset(reporterConfig.JSONReport, suite, cliConfig, 0)
+ }
+ if reporterConfig.JUnitReport != "" {
+ reporterConfig.JUnitReport = AbsPathForGeneratedAsset(reporterConfig.JUnitReport, suite, cliConfig, 0)
+ }
+ if reporterConfig.TeamcityReport != "" {
+ reporterConfig.TeamcityReport = AbsPathForGeneratedAsset(reporterConfig.TeamcityReport, suite, cliConfig, 0)
+ }
+
+ for proc := 1; proc <= numProcs; proc++ {
+ procGinkgoConfig := ginkgoConfig
+ procGinkgoConfig.ParallelProcess, procGinkgoConfig.ParallelTotal, procGinkgoConfig.ParallelHost = proc, numProcs, server.Address()
+
+ procGoFlagsConfig := goFlagsConfig
+ if goFlagsConfig.Cover {
+ procGoFlagsConfig.CoverProfile = AbsPathForGeneratedAsset(goFlagsConfig.CoverProfile, suite, cliConfig, proc)
+ coverProfiles = append(coverProfiles, procGoFlagsConfig.CoverProfile)
+ }
+ if goFlagsConfig.BlockProfile != "" {
+ procGoFlagsConfig.BlockProfile = AbsPathForGeneratedAsset(goFlagsConfig.BlockProfile, suite, cliConfig, proc)
+ blockProfiles = append(blockProfiles, procGoFlagsConfig.BlockProfile)
+ }
+ if goFlagsConfig.CPUProfile != "" {
+ procGoFlagsConfig.CPUProfile = AbsPathForGeneratedAsset(goFlagsConfig.CPUProfile, suite, cliConfig, proc)
+ cpuProfiles = append(cpuProfiles, procGoFlagsConfig.CPUProfile)
+ }
+ if goFlagsConfig.MemProfile != "" {
+ procGoFlagsConfig.MemProfile = AbsPathForGeneratedAsset(goFlagsConfig.MemProfile, suite, cliConfig, proc)
+ memProfiles = append(memProfiles, procGoFlagsConfig.MemProfile)
+ }
+ if goFlagsConfig.MutexProfile != "" {
+ procGoFlagsConfig.MutexProfile = AbsPathForGeneratedAsset(goFlagsConfig.MutexProfile, suite, cliConfig, proc)
+ mutexProfiles = append(mutexProfiles, procGoFlagsConfig.MutexProfile)
+ }
+
+ args, err := types.GenerateGinkgoTestRunArgs(procGinkgoConfig, reporterConfig, procGoFlagsConfig)
+ command.AbortIfError("Failed to generate test run arguments", err)
+ args = append([]string{"--test.timeout=0"}, args...)
+ args = append(args, additionalArgs...)
+
+ cmd, buf := buildAndStartCommand(suite, args, false)
+ procOutput[proc-1] = buf
+ server.RegisterAlive(proc, func() bool { return cmd.ProcessState == nil || !cmd.ProcessState.Exited() })
+
+ go func() {
+ cmd.Wait()
+ exitStatus := cmd.ProcessState.Sys().(syscall.WaitStatus).ExitStatus()
+ procResults <- procResult{
+ passed: (exitStatus == 0) || (exitStatus == types.GINKGO_FOCUS_EXIT_CODE),
+ hasProgrammaticFocus: exitStatus == types.GINKGO_FOCUS_EXIT_CODE,
+ }
+ }()
+ }
+
+ passed := true
+ for proc := 1; proc <= cliConfig.ComputedProcs(); proc++ {
+ result := <-procResults
+ passed = passed && result.passed
+ suite.HasProgrammaticFocus = suite.HasProgrammaticFocus || result.hasProgrammaticFocus
+ }
+ if passed {
+ suite.State = TestSuiteStatePassed
+ } else {
+ suite.State = TestSuiteStateFailed
+ }
+
+ select {
+ case <-server.GetSuiteDone():
+ fmt.Println("")
+ case <-time.After(time.Second):
+ //one of the nodes never finished reporting to the server. Something must have gone wrong.
+ fmt.Fprint(formatter.ColorableStdErr, formatter.F("\n{{bold}}{{red}}Ginkgo timed out waiting for all parallel procs to report back{{/}}\n"))
+ fmt.Fprint(formatter.ColorableStdErr, formatter.F("{{gray}}Test suite:{{/}} %s (%s)\n\n", suite.PackageName, suite.Path))
+ fmt.Fprint(formatter.ColorableStdErr, formatter.Fiw(0, formatter.COLS, "This occurs if a parallel process exits before it reports its results to the Ginkgo CLI. The CLI will now print out all the stdout/stderr output it's collected from the running processes. However you may not see anything useful in these logs because the individual test processes usually intercept output to stdout/stderr in order to capture it in the spec reports.\n\nYou may want to try rerunning your test suite with {{light-gray}}--output-interceptor-mode=none{{/}} to see additional output here and debug your suite.\n"))
+ fmt.Fprintln(formatter.ColorableStdErr, " ")
+ for proc := 1; proc <= cliConfig.ComputedProcs(); proc++ {
+ fmt.Fprintf(formatter.ColorableStdErr, formatter.F("{{bold}}Output from proc %d:{{/}}\n", proc))
+ fmt.Fprintln(os.Stderr, formatter.Fi(1, "%s", procOutput[proc-1].String()))
+ }
+ fmt.Fprintf(os.Stderr, "** End **")
+ }
+
+ for proc := 1; proc <= cliConfig.ComputedProcs(); proc++ {
+ output := procOutput[proc-1].String()
+ if proc == 1 && checkForNoTestsWarning(procOutput[0]) && cliConfig.RequireSuite {
+ suite.State = TestSuiteStateFailed
+ }
+ if strings.Contains(output, "deprecated Ginkgo functionality") {
+ fmt.Fprintln(os.Stderr, output)
+ }
+ }
+
+ if len(coverProfiles) > 0 {
+ if suite.HasProgrammaticFocus {
+ fmt.Fprintln(os.Stdout, "coverage: no coverfile was generated because specs are programmatically focused")
+ } else {
+ coverProfile := AbsPathForGeneratedAsset(goFlagsConfig.CoverProfile, suite, cliConfig, 0)
+ err := MergeAndCleanupCoverProfiles(coverProfiles, coverProfile)
+ command.AbortIfError("Failed to combine cover profiles", err)
+
+ coverage, err := GetCoverageFromCoverProfile(coverProfile)
+ command.AbortIfError("Failed to compute coverage", err)
+ if coverage == 0 {
+ fmt.Fprintln(os.Stdout, "coverage: [no statements]")
+ } else {
+ fmt.Fprintf(os.Stdout, "coverage: %.1f%% of statements\n", coverage)
+ }
+ }
+ }
+ if len(blockProfiles) > 0 {
+ if suite.HasProgrammaticFocus {
+ fmt.Fprintln(os.Stdout, "no block profile was generated because specs are programmatically focused")
+ } else {
+ blockProfile := AbsPathForGeneratedAsset(goFlagsConfig.BlockProfile, suite, cliConfig, 0)
+ err := MergeProfiles(blockProfiles, blockProfile)
+ command.AbortIfError("Failed to combine blockprofiles", err)
+ }
+ }
+ if len(cpuProfiles) > 0 {
+ if suite.HasProgrammaticFocus {
+ fmt.Fprintln(os.Stdout, "no cpu profile was generated because specs are programmatically focused")
+ } else {
+ cpuProfile := AbsPathForGeneratedAsset(goFlagsConfig.CPUProfile, suite, cliConfig, 0)
+ err := MergeProfiles(cpuProfiles, cpuProfile)
+ command.AbortIfError("Failed to combine cpuprofiles", err)
+ }
+ }
+ if len(memProfiles) > 0 {
+ if suite.HasProgrammaticFocus {
+ fmt.Fprintln(os.Stdout, "no mem profile was generated because specs are programmatically focused")
+ } else {
+ memProfile := AbsPathForGeneratedAsset(goFlagsConfig.MemProfile, suite, cliConfig, 0)
+ err := MergeProfiles(memProfiles, memProfile)
+ command.AbortIfError("Failed to combine memprofiles", err)
+ }
+ }
+ if len(mutexProfiles) > 0 {
+ if suite.HasProgrammaticFocus {
+ fmt.Fprintln(os.Stdout, "no mutex profile was generated because specs are programmatically focused")
+ } else {
+ mutexProfile := AbsPathForGeneratedAsset(goFlagsConfig.MutexProfile, suite, cliConfig, 0)
+ err := MergeProfiles(mutexProfiles, mutexProfile)
+ command.AbortIfError("Failed to combine mutexprofiles", err)
+ }
+ }
+
+ return suite
+}
+
+func runAfterRunHook(command string, noColor bool, suite TestSuite) {
+ if command == "" {
+ return
+ }
+ f := formatter.NewWithNoColorBool(noColor)
+
+ // Allow for string replacement to pass input to the command
+ passed := "[FAIL]"
+ if suite.State.Is(TestSuiteStatePassed) {
+ passed = "[PASS]"
+ }
+ command = strings.ReplaceAll(command, "(ginkgo-suite-passed)", passed)
+ command = strings.ReplaceAll(command, "(ginkgo-suite-name)", suite.PackageName)
+
+ // Must break command into parts
+ splitArgs := regexp.MustCompile(`'.+'|".+"|\S+`)
+ parts := splitArgs.FindAllString(command, -1)
+
+ output, err := exec.Command(parts[0], parts[1:]...).CombinedOutput()
+ if err != nil {
+ fmt.Fprintln(formatter.ColorableStdOut, f.Fi(0, "{{red}}{{bold}}After-run-hook failed:{{/}}"))
+ fmt.Fprintln(formatter.ColorableStdOut, f.Fi(1, "{{red}}%s{{/}}", output))
+ } else {
+ fmt.Fprintln(formatter.ColorableStdOut, f.Fi(0, "{{green}}{{bold}}After-run-hook succeeded:{{/}}"))
+ fmt.Fprintln(formatter.ColorableStdOut, f.Fi(1, "{{green}}%s{{/}}", output))
+ }
+}
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/test_suite.go b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/test_suite.go
new file mode 100644
index 00000000000..df99875be20
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/test_suite.go
@@ -0,0 +1,284 @@
+package internal
+
+import (
+ "errors"
+ "math/rand"
+ "os"
+ "path"
+ "path/filepath"
+ "regexp"
+ "runtime"
+ "strings"
+
+ "github.com/onsi/ginkgo/v2/types"
+)
+
+const TIMEOUT_ELAPSED_FAILURE_REASON = "Suite did not run because the timeout elapsed"
+const PRIOR_FAILURES_FAILURE_REASON = "Suite did not run because prior suites failed and --keep-going is not set"
+const EMPTY_SKIP_FAILURE_REASON = "Suite did not run go test reported that no test files were found"
+
+type TestSuiteState uint
+
+const (
+ TestSuiteStateInvalid TestSuiteState = iota
+
+ TestSuiteStateUncompiled
+ TestSuiteStateCompiled
+
+ TestSuiteStatePassed
+
+ TestSuiteStateSkippedDueToEmptyCompilation
+ TestSuiteStateSkippedByFilter
+ TestSuiteStateSkippedDueToPriorFailures
+
+ TestSuiteStateFailed
+ TestSuiteStateFailedDueToTimeout
+ TestSuiteStateFailedToCompile
+)
+
+var TestSuiteStateFailureStates = []TestSuiteState{TestSuiteStateFailed, TestSuiteStateFailedDueToTimeout, TestSuiteStateFailedToCompile}
+
+func (state TestSuiteState) Is(states ...TestSuiteState) bool {
+ for _, suiteState := range states {
+ if suiteState == state {
+ return true
+ }
+ }
+
+ return false
+}
+
+type TestSuite struct {
+ Path string
+ PackageName string
+ IsGinkgo bool
+
+ Precompiled bool
+ PathToCompiledTest string
+ CompilationError error
+
+ HasProgrammaticFocus bool
+ State TestSuiteState
+}
+
+func (ts TestSuite) AbsPath() string {
+ path, _ := filepath.Abs(ts.Path)
+ return path
+}
+
+func (ts TestSuite) NamespacedName() string {
+ name := relPath(ts.Path)
+ name = strings.TrimLeft(name, "."+string(filepath.Separator))
+ name = strings.ReplaceAll(name, string(filepath.Separator), "_")
+ name = strings.ReplaceAll(name, " ", "_")
+ if name == "" {
+ return ts.PackageName
+ }
+ return name
+}
+
+type TestSuites []TestSuite
+
+func (ts TestSuites) AnyHaveProgrammaticFocus() bool {
+ for _, suite := range ts {
+ if suite.HasProgrammaticFocus {
+ return true
+ }
+ }
+
+ return false
+}
+
+func (ts TestSuites) ThatAreGinkgoSuites() TestSuites {
+ out := TestSuites{}
+ for _, suite := range ts {
+ if suite.IsGinkgo {
+ out = append(out, suite)
+ }
+ }
+ return out
+}
+
+func (ts TestSuites) CountWithState(states ...TestSuiteState) int {
+ n := 0
+ for _, suite := range ts {
+ if suite.State.Is(states...) {
+ n += 1
+ }
+ }
+
+ return n
+}
+
+func (ts TestSuites) WithState(states ...TestSuiteState) TestSuites {
+ out := TestSuites{}
+ for _, suite := range ts {
+ if suite.State.Is(states...) {
+ out = append(out, suite)
+ }
+ }
+
+ return out
+}
+
+func (ts TestSuites) WithoutState(states ...TestSuiteState) TestSuites {
+ out := TestSuites{}
+ for _, suite := range ts {
+ if !suite.State.Is(states...) {
+ out = append(out, suite)
+ }
+ }
+
+ return out
+}
+
+func (ts TestSuites) ShuffledCopy(seed int64) TestSuites {
+ out := make(TestSuites, len(ts))
+ permutation := rand.New(rand.NewSource(seed)).Perm(len(ts))
+ for i, j := range permutation {
+ out[i] = ts[j]
+ }
+ return out
+}
+
+func FindSuites(args []string, cliConfig types.CLIConfig, allowPrecompiled bool) TestSuites {
+ suites := TestSuites{}
+
+ if len(args) > 0 {
+ for _, arg := range args {
+ if allowPrecompiled {
+ suite, err := precompiledTestSuite(arg)
+ if err == nil {
+ suites = append(suites, suite)
+ continue
+ }
+ }
+ recurseForSuite := cliConfig.Recurse
+ if strings.HasSuffix(arg, "/...") && arg != "/..." {
+ arg = arg[:len(arg)-4]
+ recurseForSuite = true
+ }
+ suites = append(suites, suitesInDir(arg, recurseForSuite)...)
+ }
+ } else {
+ suites = suitesInDir(".", cliConfig.Recurse)
+ }
+
+ if cliConfig.SkipPackage != "" {
+ skipFilters := strings.Split(cliConfig.SkipPackage, ",")
+ for idx := range suites {
+ for _, skipFilter := range skipFilters {
+ if strings.Contains(suites[idx].Path, skipFilter) {
+ suites[idx].State = TestSuiteStateSkippedByFilter
+ break
+ }
+ }
+ }
+ }
+
+ return suites
+}
+
+func precompiledTestSuite(path string) (TestSuite, error) {
+ info, err := os.Stat(path)
+ if err != nil {
+ return TestSuite{}, err
+ }
+
+ if info.IsDir() {
+ return TestSuite{}, errors.New("this is a directory, not a file")
+ }
+
+ if filepath.Ext(path) != ".test" && filepath.Ext(path) != ".exe" {
+ return TestSuite{}, errors.New("this is not a .test binary")
+ }
+
+ if filepath.Ext(path) == ".test" && runtime.GOOS != "windows" && info.Mode()&0111 == 0 {
+ return TestSuite{}, errors.New("this is not executable")
+ }
+
+ dir := relPath(filepath.Dir(path))
+ packageName := strings.TrimSuffix(filepath.Base(path), ".exe")
+ packageName = strings.TrimSuffix(packageName, ".test")
+
+ path, err = filepath.Abs(path)
+ if err != nil {
+ return TestSuite{}, err
+ }
+
+ return TestSuite{
+ Path: dir,
+ PackageName: packageName,
+ IsGinkgo: true,
+ Precompiled: true,
+ PathToCompiledTest: path,
+ State: TestSuiteStateCompiled,
+ }, nil
+}
+
+func suitesInDir(dir string, recurse bool) TestSuites {
+ suites := TestSuites{}
+
+ if path.Base(dir) == "vendor" {
+ return suites
+ }
+
+ files, _ := os.ReadDir(dir)
+ re := regexp.MustCompile(`^[^._].*_test\.go$`)
+ for _, file := range files {
+ if !file.IsDir() && re.MatchString(file.Name()) {
+ suite := TestSuite{
+ Path: relPath(dir),
+ PackageName: packageNameForSuite(dir),
+ IsGinkgo: filesHaveGinkgoSuite(dir, files),
+ State: TestSuiteStateUncompiled,
+ }
+ suites = append(suites, suite)
+ break
+ }
+ }
+
+ if recurse {
+ re = regexp.MustCompile(`^[._]`)
+ for _, file := range files {
+ if file.IsDir() && !re.MatchString(file.Name()) {
+ suites = append(suites, suitesInDir(dir+"/"+file.Name(), recurse)...)
+ }
+ }
+ }
+
+ return suites
+}
+
+func relPath(dir string) string {
+ dir, _ = filepath.Abs(dir)
+ cwd, _ := os.Getwd()
+ dir, _ = filepath.Rel(cwd, filepath.Clean(dir))
+
+ if string(dir[0]) != "." {
+ dir = "." + string(filepath.Separator) + dir
+ }
+
+ return dir
+}
+
+func packageNameForSuite(dir string) string {
+ path, _ := filepath.Abs(dir)
+ return filepath.Base(path)
+}
+
+func filesHaveGinkgoSuite(dir string, files []os.DirEntry) bool {
+ reTestFile := regexp.MustCompile(`_test\.go$`)
+ reGinkgo := regexp.MustCompile(`package ginkgo|\/ginkgo"|\/ginkgo\/v2"|\/ginkgo\/v2/dsl/`)
+
+ for _, file := range files {
+ if !file.IsDir() && reTestFile.MatchString(file.Name()) {
+ contents, _ := os.ReadFile(dir + "/" + file.Name())
+ if reGinkgo.Match(contents) {
+ return true
+ }
+ }
+ }
+
+ return false
+}
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/utils.go b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/utils.go
new file mode 100644
index 00000000000..bd9ca7d51e8
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/utils.go
@@ -0,0 +1,86 @@
+package internal
+
+import (
+ "fmt"
+ "io"
+ "os"
+ "os/exec"
+
+ "github.com/onsi/ginkgo/v2/formatter"
+ "github.com/onsi/ginkgo/v2/ginkgo/command"
+)
+
+func FileExists(path string) bool {
+ _, err := os.Stat(path)
+ return err == nil
+}
+
+func CopyFile(src string, dest string) error {
+ srcFile, err := os.Open(src)
+ if err != nil {
+ return err
+ }
+
+ srcStat, err := srcFile.Stat()
+ if err != nil {
+ return err
+ }
+
+ if _, err := os.Stat(dest); err == nil {
+ os.Remove(dest)
+ }
+
+ destFile, err := os.OpenFile(dest, os.O_WRONLY|os.O_CREATE, srcStat.Mode())
+ if err != nil {
+ return err
+ }
+
+ _, err = io.Copy(destFile, srcFile)
+ if err != nil {
+ return err
+ }
+
+ if err := srcFile.Close(); err != nil {
+ return err
+ }
+ return destFile.Close()
+}
+
+func GoFmt(path string) {
+ out, err := exec.Command("go", "fmt", path).CombinedOutput()
+ if err != nil {
+ command.AbortIfError(fmt.Sprintf("Could not fmt:\n%s\n", string(out)), err)
+ }
+}
+
+func PluralizedWord(singular, plural string, count int) string {
+ if count == 1 {
+ return singular
+ }
+ return plural
+}
+
+func FailedSuitesReport(suites TestSuites, f formatter.Formatter) string {
+ out := ""
+ out += "There were failures detected in the following suites:\n"
+
+ maxPackageNameLength := 0
+ for _, suite := range suites.WithState(TestSuiteStateFailureStates...) {
+ if len(suite.PackageName) > maxPackageNameLength {
+ maxPackageNameLength = len(suite.PackageName)
+ }
+ }
+
+ packageNameFormatter := fmt.Sprintf("%%%ds", maxPackageNameLength)
+ for _, suite := range suites {
+ switch suite.State {
+ case TestSuiteStateFailed:
+ out += f.Fi(1, "{{red}}"+packageNameFormatter+" {{gray}}%s{{/}}\n", suite.PackageName, suite.Path)
+ case TestSuiteStateFailedToCompile:
+ out += f.Fi(1, "{{red}}"+packageNameFormatter+" {{gray}}%s {{magenta}}[Compilation failure]{{/}}\n", suite.PackageName, suite.Path)
+ case TestSuiteStateFailedDueToTimeout:
+ out += f.Fi(1, "{{red}}"+packageNameFormatter+" {{gray}}%s {{orange}}[%s]{{/}}\n", suite.PackageName, suite.Path, TIMEOUT_ELAPSED_FAILURE_REASON)
+ }
+ }
+ return out
+}
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/verify_version.go b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/verify_version.go
new file mode 100644
index 00000000000..9da1bab3db7
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/verify_version.go
@@ -0,0 +1,54 @@
+package internal
+
+import (
+ "fmt"
+ "os/exec"
+ "regexp"
+ "strings"
+
+ "github.com/onsi/ginkgo/v2/formatter"
+ "github.com/onsi/ginkgo/v2/types"
+)
+
+var versiorRe = regexp.MustCompile(`v(\d+\.\d+\.\d+)`)
+
+func VerifyCLIAndFrameworkVersion(suites TestSuites) {
+ cliVersion := types.VERSION
+ mismatches := map[string][]string{}
+
+ for _, suite := range suites {
+ cmd := exec.Command("go", "list", "-m", "github.com/onsi/ginkgo/v2")
+ cmd.Dir = suite.Path
+ output, err := cmd.CombinedOutput()
+ if err != nil {
+ continue
+ }
+ components := strings.Split(string(output), " ")
+ if len(components) != 2 {
+ continue
+ }
+ matches := versiorRe.FindStringSubmatch(components[1])
+ if matches == nil || len(matches) != 2 {
+ continue
+ }
+ libraryVersion := matches[1]
+ if cliVersion != libraryVersion {
+ mismatches[libraryVersion] = append(mismatches[libraryVersion], suite.PackageName)
+ }
+ }
+
+ if len(mismatches) == 0 {
+ return
+ }
+
+ fmt.Println(formatter.F("{{red}}{{bold}}Ginkgo detected a version mismatch between the Ginkgo CLI and the version of Ginkgo imported by your packages:{{/}}"))
+
+ fmt.Println(formatter.Fi(1, "Ginkgo CLI Version:"))
+ fmt.Println(formatter.Fi(2, "{{bold}}%s{{/}}", cliVersion))
+ fmt.Println(formatter.Fi(1, "Mismatched package versions found:"))
+ for version, packages := range mismatches {
+ fmt.Println(formatter.Fi(2, "{{bold}}%s{{/}} used by %s", version, strings.Join(packages, ", ")))
+ }
+ fmt.Println("")
+ fmt.Println(formatter.Fiw(1, formatter.COLS, "{{gray}}Ginkgo will continue to attempt to run but you may see errors (including flag parsing errors) and should either update your go.mod or your version of the Ginkgo CLI to match.\n\nTo install the matching version of the CLI run\n {{bold}}go install github.com/onsi/ginkgo/v2/ginkgo{{/}}{{gray}}\nfrom a path that contains a go.mod file. Alternatively you can use\n {{bold}}go run github.com/onsi/ginkgo/v2/ginkgo{{/}}{{gray}}\nfrom a path that contains a go.mod file to invoke the matching version of the Ginkgo CLI.\n\nIf you are attempting to test multiple packages that each have a different version of the Ginkgo library with a single Ginkgo CLI that is currently unsupported.\n{{/}}"))
+}
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/ginkgo/labels/labels_command.go b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/ginkgo/labels/labels_command.go
new file mode 100644
index 00000000000..6c61f09d1b5
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/ginkgo/labels/labels_command.go
@@ -0,0 +1,123 @@
+package labels
+
+import (
+ "fmt"
+ "go/ast"
+ "go/parser"
+ "go/token"
+ "sort"
+ "strconv"
+ "strings"
+
+ "github.com/onsi/ginkgo/v2/ginkgo/command"
+ "github.com/onsi/ginkgo/v2/ginkgo/internal"
+ "github.com/onsi/ginkgo/v2/types"
+ "golang.org/x/tools/go/ast/inspector"
+)
+
+func BuildLabelsCommand() command.Command {
+ var cliConfig = types.NewDefaultCLIConfig()
+
+ flags, err := types.BuildLabelsCommandFlagSet(&cliConfig)
+ if err != nil {
+ panic(err)
+ }
+
+ return command.Command{
+ Name: "labels",
+ Usage: "ginkgo labels ",
+ Flags: flags,
+ ShortDoc: "List labels detected in the passed-in packages (or the package in the current directory if left blank).",
+ DocLink: "spec-labels",
+ Command: func(args []string, _ []string) {
+ ListLabels(args, cliConfig)
+ },
+ }
+}
+
+func ListLabels(args []string, cliConfig types.CLIConfig) {
+ suites := internal.FindSuites(args, cliConfig, false).WithoutState(internal.TestSuiteStateSkippedByFilter)
+ if len(suites) == 0 {
+ command.AbortWith("Found no test suites")
+ }
+ for _, suite := range suites {
+ labels := fetchLabelsFromPackage(suite.Path)
+ if len(labels) == 0 {
+ fmt.Printf("%s: No labels found\n", suite.PackageName)
+ } else {
+ fmt.Printf("%s: [%s]\n", suite.PackageName, strings.Join(labels, ", "))
+ }
+ }
+}
+
+func fetchLabelsFromPackage(packagePath string) []string {
+ fset := token.NewFileSet()
+ parsedPackages, err := parser.ParseDir(fset, packagePath, nil, 0)
+ command.AbortIfError("Failed to parse package source:", err)
+
+ files := []*ast.File{}
+ hasTestPackage := false
+ for key, pkg := range parsedPackages {
+ if strings.HasSuffix(key, "_test") {
+ hasTestPackage = true
+ for _, file := range pkg.Files {
+ files = append(files, file)
+ }
+ }
+ }
+ if !hasTestPackage {
+ for _, pkg := range parsedPackages {
+ for _, file := range pkg.Files {
+ files = append(files, file)
+ }
+ }
+ }
+
+ seen := map[string]bool{}
+ labels := []string{}
+ ispr := inspector.New(files)
+ ispr.Preorder([]ast.Node{&ast.CallExpr{}}, func(n ast.Node) {
+ potentialLabels := fetchLabels(n.(*ast.CallExpr))
+ for _, label := range potentialLabels {
+ if !seen[label] {
+ seen[label] = true
+ labels = append(labels, strconv.Quote(label))
+ }
+ }
+ })
+
+ sort.Strings(labels)
+ return labels
+}
+
+func fetchLabels(callExpr *ast.CallExpr) []string {
+ out := []string{}
+ switch expr := callExpr.Fun.(type) {
+ case *ast.Ident:
+ if expr.Name != "Label" {
+ return out
+ }
+ case *ast.SelectorExpr:
+ if expr.Sel.Name != "Label" {
+ return out
+ }
+ default:
+ return out
+ }
+ for _, arg := range callExpr.Args {
+ switch expr := arg.(type) {
+ case *ast.BasicLit:
+ if expr.Kind == token.STRING {
+ unquoted, err := strconv.Unquote(expr.Value)
+ if err != nil {
+ unquoted = expr.Value
+ }
+ validated, err := types.ValidateAndCleanupLabel(unquoted, types.CodeLocation{})
+ if err == nil {
+ out = append(out, validated)
+ }
+ }
+ }
+ }
+ return out
+}
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/ginkgo/main.go b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/ginkgo/main.go
new file mode 100644
index 00000000000..e9abb27d8bf
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/ginkgo/main.go
@@ -0,0 +1,58 @@
+package main
+
+import (
+ "fmt"
+ "os"
+
+ "github.com/onsi/ginkgo/v2/ginkgo/build"
+ "github.com/onsi/ginkgo/v2/ginkgo/command"
+ "github.com/onsi/ginkgo/v2/ginkgo/generators"
+ "github.com/onsi/ginkgo/v2/ginkgo/labels"
+ "github.com/onsi/ginkgo/v2/ginkgo/outline"
+ "github.com/onsi/ginkgo/v2/ginkgo/run"
+ "github.com/onsi/ginkgo/v2/ginkgo/unfocus"
+ "github.com/onsi/ginkgo/v2/ginkgo/watch"
+ "github.com/onsi/ginkgo/v2/types"
+)
+
+var program command.Program
+
+func GenerateCommands() []command.Command {
+ return []command.Command{
+ watch.BuildWatchCommand(),
+ build.BuildBuildCommand(),
+ generators.BuildBootstrapCommand(),
+ generators.BuildGenerateCommand(),
+ labels.BuildLabelsCommand(),
+ outline.BuildOutlineCommand(),
+ unfocus.BuildUnfocusCommand(),
+ BuildVersionCommand(),
+ }
+}
+
+func main() {
+ program = command.Program{
+ Name: "ginkgo",
+ Heading: fmt.Sprintf("Ginkgo Version %s", types.VERSION),
+ Commands: GenerateCommands(),
+ DefaultCommand: run.BuildRunCommand(),
+ DeprecatedCommands: []command.DeprecatedCommand{
+ {Name: "convert", Deprecation: types.Deprecations.Convert()},
+ {Name: "blur", Deprecation: types.Deprecations.Blur()},
+ {Name: "nodot", Deprecation: types.Deprecations.Nodot()},
+ },
+ }
+
+ program.RunAndExit(os.Args)
+}
+
+func BuildVersionCommand() command.Command {
+ return command.Command{
+ Name: "version",
+ Usage: "ginkgo version",
+ ShortDoc: "Print Ginkgo's version",
+ Command: func(_ []string, _ []string) {
+ fmt.Printf("Ginkgo Version %s\n", types.VERSION)
+ },
+ }
+}
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/ginkgo.go b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/ginkgo.go
new file mode 100644
index 00000000000..5d8d00bb17f
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/ginkgo.go
@@ -0,0 +1,301 @@
+package outline
+
+import (
+ "go/ast"
+ "go/token"
+ "strconv"
+
+ "github.com/onsi/ginkgo/v2/types"
+)
+
+const (
+ // undefinedTextAlt is used if the spec/container text cannot be derived
+ undefinedTextAlt = "undefined"
+)
+
+// ginkgoMetadata holds useful bits of information for every entry in the outline
+type ginkgoMetadata struct {
+ // Name is the spec or container function name, e.g. `Describe` or `It`
+ Name string `json:"name"`
+
+ // Text is the `text` argument passed to specs, and some containers
+ Text string `json:"text"`
+
+ // Start is the position of first character of the spec or container block
+ Start int `json:"start"`
+
+ // End is the position of first character immediately after the spec or container block
+ End int `json:"end"`
+
+ Spec bool `json:"spec"`
+ Focused bool `json:"focused"`
+ Pending bool `json:"pending"`
+ Labels []string `json:"labels"`
+}
+
+// ginkgoNode is used to construct the outline as a tree
+type ginkgoNode struct {
+ ginkgoMetadata
+ Nodes []*ginkgoNode `json:"nodes"`
+}
+
+type walkFunc func(n *ginkgoNode)
+
+func (n *ginkgoNode) PreOrder(f walkFunc) {
+ f(n)
+ for _, m := range n.Nodes {
+ m.PreOrder(f)
+ }
+}
+
+func (n *ginkgoNode) PostOrder(f walkFunc) {
+ for _, m := range n.Nodes {
+ m.PostOrder(f)
+ }
+ f(n)
+}
+
+func (n *ginkgoNode) Walk(pre, post walkFunc) {
+ pre(n)
+ for _, m := range n.Nodes {
+ m.Walk(pre, post)
+ }
+ post(n)
+}
+
+// PropagateInheritedProperties propagates the Pending and Focused properties
+// through the subtree rooted at n.
+func (n *ginkgoNode) PropagateInheritedProperties() {
+ n.PreOrder(func(thisNode *ginkgoNode) {
+ for _, descendantNode := range thisNode.Nodes {
+ if thisNode.Pending {
+ descendantNode.Pending = true
+ descendantNode.Focused = false
+ }
+ if thisNode.Focused && !descendantNode.Pending {
+ descendantNode.Focused = true
+ }
+ }
+ })
+}
+
+// BackpropagateUnfocus propagates the Focused property through the subtree
+// rooted at n. It applies the rule described in the Ginkgo docs:
+// > Nested programmatically focused specs follow a simple rule: if a
+// > leaf-node is marked focused, any of its ancestor nodes that are marked
+// > focus will be unfocused.
+func (n *ginkgoNode) BackpropagateUnfocus() {
+ focusedSpecInSubtreeStack := []bool{}
+ n.PostOrder(func(thisNode *ginkgoNode) {
+ if thisNode.Spec {
+ focusedSpecInSubtreeStack = append(focusedSpecInSubtreeStack, thisNode.Focused)
+ return
+ }
+ focusedSpecInSubtree := false
+ for range thisNode.Nodes {
+ focusedSpecInSubtree = focusedSpecInSubtree || focusedSpecInSubtreeStack[len(focusedSpecInSubtreeStack)-1]
+ focusedSpecInSubtreeStack = focusedSpecInSubtreeStack[0 : len(focusedSpecInSubtreeStack)-1]
+ }
+ focusedSpecInSubtreeStack = append(focusedSpecInSubtreeStack, focusedSpecInSubtree)
+ if focusedSpecInSubtree {
+ thisNode.Focused = false
+ }
+ })
+
+}
+
+func packageAndIdentNamesFromCallExpr(ce *ast.CallExpr) (string, string, bool) {
+ switch ex := ce.Fun.(type) {
+ case *ast.Ident:
+ return "", ex.Name, true
+ case *ast.SelectorExpr:
+ pkgID, ok := ex.X.(*ast.Ident)
+ if !ok {
+ return "", "", false
+ }
+ // A package identifier is top-level, so Obj must be nil
+ if pkgID.Obj != nil {
+ return "", "", false
+ }
+ if ex.Sel == nil {
+ return "", "", false
+ }
+ return pkgID.Name, ex.Sel.Name, true
+ default:
+ return "", "", false
+ }
+}
+
+// absoluteOffsetsForNode derives the absolute character offsets of the node start and
+// end positions.
+func absoluteOffsetsForNode(fset *token.FileSet, n ast.Node) (start, end int) {
+ return fset.PositionFor(n.Pos(), false).Offset, fset.PositionFor(n.End(), false).Offset
+}
+
+// ginkgoNodeFromCallExpr derives an outline entry from a go AST subtree
+// corresponding to a Ginkgo container or spec.
+func ginkgoNodeFromCallExpr(fset *token.FileSet, ce *ast.CallExpr, ginkgoPackageName *string) (*ginkgoNode, bool) {
+ packageName, identName, ok := packageAndIdentNamesFromCallExpr(ce)
+ if !ok {
+ return nil, false
+ }
+
+ n := ginkgoNode{}
+ n.Name = identName
+ n.Start, n.End = absoluteOffsetsForNode(fset, ce)
+ n.Nodes = make([]*ginkgoNode, 0)
+ switch identName {
+ case "It", "Specify", "Entry":
+ n.Spec = true
+ n.Text = textOrAltFromCallExpr(ce, undefinedTextAlt)
+ n.Labels = labelFromCallExpr(ce)
+ n.Pending = pendingFromCallExpr(ce)
+ return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName
+ case "FIt", "FSpecify", "FEntry":
+ n.Spec = true
+ n.Focused = true
+ n.Text = textOrAltFromCallExpr(ce, undefinedTextAlt)
+ n.Labels = labelFromCallExpr(ce)
+ return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName
+ case "PIt", "PSpecify", "XIt", "XSpecify", "PEntry", "XEntry":
+ n.Spec = true
+ n.Pending = true
+ n.Text = textOrAltFromCallExpr(ce, undefinedTextAlt)
+ n.Labels = labelFromCallExpr(ce)
+ return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName
+ case "Context", "Describe", "When", "DescribeTable":
+ n.Text = textOrAltFromCallExpr(ce, undefinedTextAlt)
+ n.Labels = labelFromCallExpr(ce)
+ n.Pending = pendingFromCallExpr(ce)
+ return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName
+ case "FContext", "FDescribe", "FWhen", "FDescribeTable":
+ n.Focused = true
+ n.Text = textOrAltFromCallExpr(ce, undefinedTextAlt)
+ n.Labels = labelFromCallExpr(ce)
+ return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName
+ case "PContext", "PDescribe", "PWhen", "XContext", "XDescribe", "XWhen", "PDescribeTable", "XDescribeTable":
+ n.Pending = true
+ n.Text = textOrAltFromCallExpr(ce, undefinedTextAlt)
+ n.Labels = labelFromCallExpr(ce)
+ return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName
+ case "By":
+ n.Text = textOrAltFromCallExpr(ce, undefinedTextAlt)
+ return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName
+ case "AfterEach", "BeforeEach":
+ return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName
+ case "JustAfterEach", "JustBeforeEach":
+ return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName
+ case "AfterSuite", "BeforeSuite":
+ return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName
+ case "SynchronizedAfterSuite", "SynchronizedBeforeSuite":
+ return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName
+ default:
+ return nil, false
+ }
+}
+
+// textOrAltFromCallExpr tries to derive the "text" of a Ginkgo spec or
+// container. If it cannot derive it, it returns the alt text.
+func textOrAltFromCallExpr(ce *ast.CallExpr, alt string) string {
+ text, defined := textFromCallExpr(ce)
+ if !defined {
+ return alt
+ }
+ return text
+}
+
+// textFromCallExpr tries to derive the "text" of a Ginkgo spec or container. If
+// it cannot derive it, it returns false.
+func textFromCallExpr(ce *ast.CallExpr) (string, bool) {
+ if len(ce.Args) < 1 {
+ return "", false
+ }
+ text, ok := ce.Args[0].(*ast.BasicLit)
+ if !ok {
+ return "", false
+ }
+ switch text.Kind {
+ case token.CHAR, token.STRING:
+ // For token.CHAR and token.STRING, Value is quoted
+ unquoted, err := strconv.Unquote(text.Value)
+ if err != nil {
+ // If unquoting fails, just use the raw Value
+ return text.Value, true
+ }
+ return unquoted, true
+ default:
+ return text.Value, true
+ }
+}
+
+func labelFromCallExpr(ce *ast.CallExpr) []string {
+
+ labels := []string{}
+ if len(ce.Args) < 2 {
+ return labels
+ }
+
+ for _, arg := range ce.Args[1:] {
+ switch expr := arg.(type) {
+ case *ast.CallExpr:
+ id, ok := expr.Fun.(*ast.Ident)
+ if !ok {
+ // to skip over cases where the expr.Fun. is actually *ast.SelectorExpr
+ continue
+ }
+ if id.Name == "Label" {
+ ls := extractLabels(expr)
+ labels = append(labels, ls...)
+ }
+ }
+ }
+ return labels
+}
+
+func extractLabels(expr *ast.CallExpr) []string {
+ out := []string{}
+ for _, arg := range expr.Args {
+ switch expr := arg.(type) {
+ case *ast.BasicLit:
+ if expr.Kind == token.STRING {
+ unquoted, err := strconv.Unquote(expr.Value)
+ if err != nil {
+ unquoted = expr.Value
+ }
+ validated, err := types.ValidateAndCleanupLabel(unquoted, types.CodeLocation{})
+ if err == nil {
+ out = append(out, validated)
+ }
+ }
+ }
+ }
+
+ return out
+}
+
+func pendingFromCallExpr(ce *ast.CallExpr) bool {
+
+ pending := false
+ if len(ce.Args) < 2 {
+ return pending
+ }
+
+ for _, arg := range ce.Args[1:] {
+ switch expr := arg.(type) {
+ case *ast.CallExpr:
+ id, ok := expr.Fun.(*ast.Ident)
+ if !ok {
+ // to skip over cases where the expr.Fun. is actually *ast.SelectorExpr
+ continue
+ }
+ if id.Name == "Pending" {
+ pending = true
+ }
+ case *ast.Ident:
+ if expr.Name == "Pending" {
+ pending = true
+ }
+ }
+ }
+ return pending
+}
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/import.go b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/import.go
new file mode 100644
index 00000000000..f0a6b5d26cd
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/import.go
@@ -0,0 +1,58 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Most of the required functions were available in the
+// "golang.org/x/tools/go/ast/astutil" package, but not exported.
+// They were copied from https://github.com/golang/tools/blob/2b0845dc783e36ae26d683f4915a5840ef01ab0f/go/ast/astutil/imports.go
+
+package outline
+
+import (
+ "go/ast"
+ "strconv"
+ "strings"
+)
+
+// packageNameForImport returns the package name for the package. If the package
+// is not imported, it returns nil. "Package name" refers to `pkgname` in the
+// call expression `pkgname.ExportedIdentifier`. Examples:
+// (import path not found) -> nil
+// "import example.com/pkg/foo" -> "foo"
+// "import fooalias example.com/pkg/foo" -> "fooalias"
+// "import . example.com/pkg/foo" -> ""
+func packageNameForImport(f *ast.File, path string) *string {
+ spec := importSpec(f, path)
+ if spec == nil {
+ return nil
+ }
+ name := spec.Name.String()
+ if name == "" {
+ name = "ginkgo"
+ }
+ if name == "." {
+ name = ""
+ }
+ return &name
+}
+
+// importSpec returns the import spec if f imports path,
+// or nil otherwise.
+func importSpec(f *ast.File, path string) *ast.ImportSpec {
+ for _, s := range f.Imports {
+ if strings.HasPrefix(importPath(s), path) {
+ return s
+ }
+ }
+ return nil
+}
+
+// importPath returns the unquoted import path of s,
+// or "" if the path is not properly quoted.
+func importPath(s *ast.ImportSpec) string {
+ t, err := strconv.Unquote(s.Path.Value)
+ if err != nil {
+ return ""
+ }
+ return t
+}
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/outline.go b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/outline.go
new file mode 100644
index 00000000000..c2327cda8cf
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/outline.go
@@ -0,0 +1,110 @@
+package outline
+
+import (
+ "encoding/json"
+ "fmt"
+ "go/ast"
+ "go/token"
+ "strings"
+
+ "golang.org/x/tools/go/ast/inspector"
+)
+
+const (
+ // ginkgoImportPath is the well-known ginkgo import path
+ ginkgoImportPath = "github.com/onsi/ginkgo/v2"
+)
+
+// FromASTFile returns an outline for a Ginkgo test source file
+func FromASTFile(fset *token.FileSet, src *ast.File) (*outline, error) {
+ ginkgoPackageName := packageNameForImport(src, ginkgoImportPath)
+ if ginkgoPackageName == nil {
+ return nil, fmt.Errorf("file does not import %q", ginkgoImportPath)
+ }
+
+ root := ginkgoNode{}
+ stack := []*ginkgoNode{&root}
+ ispr := inspector.New([]*ast.File{src})
+ ispr.Nodes([]ast.Node{(*ast.CallExpr)(nil)}, func(node ast.Node, push bool) bool {
+ if push {
+ // Pre-order traversal
+ ce, ok := node.(*ast.CallExpr)
+ if !ok {
+ // Because `Nodes` calls this function only when the node is an
+ // ast.CallExpr, this should never happen
+ panic(fmt.Errorf("node starting at %d, ending at %d is not an *ast.CallExpr", node.Pos(), node.End()))
+ }
+ gn, ok := ginkgoNodeFromCallExpr(fset, ce, ginkgoPackageName)
+ if !ok {
+ // Node is not a Ginkgo spec or container, continue
+ return true
+ }
+ parent := stack[len(stack)-1]
+ parent.Nodes = append(parent.Nodes, gn)
+ stack = append(stack, gn)
+ return true
+ }
+ // Post-order traversal
+ start, end := absoluteOffsetsForNode(fset, node)
+ lastVisitedGinkgoNode := stack[len(stack)-1]
+ if start != lastVisitedGinkgoNode.Start || end != lastVisitedGinkgoNode.End {
+ // Node is not a Ginkgo spec or container, so it was not pushed onto the stack, continue
+ return true
+ }
+ stack = stack[0 : len(stack)-1]
+ return true
+ })
+ if len(root.Nodes) == 0 {
+ return &outline{[]*ginkgoNode{}}, nil
+ }
+
+ // Derive the final focused property for all nodes. This must be done
+ // _before_ propagating the inherited focused property.
+ root.BackpropagateUnfocus()
+ // Now, propagate inherited properties, including focused and pending.
+ root.PropagateInheritedProperties()
+
+ return &outline{root.Nodes}, nil
+}
+
+type outline struct {
+ Nodes []*ginkgoNode `json:"nodes"`
+}
+
+func (o *outline) MarshalJSON() ([]byte, error) {
+ return json.Marshal(o.Nodes)
+}
+
+// String returns a CSV-formatted outline. Spec or container are output in
+// depth-first order.
+func (o *outline) String() string {
+ return o.StringIndent(0)
+}
+
+// StringIndent returns a CSV-formated outline, but every line is indented by
+// one 'width' of spaces for every level of nesting.
+func (o *outline) StringIndent(width int) string {
+ var b strings.Builder
+ b.WriteString("Name,Text,Start,End,Spec,Focused,Pending,Labels\n")
+
+ currentIndent := 0
+ pre := func(n *ginkgoNode) {
+ b.WriteString(fmt.Sprintf("%*s", currentIndent, ""))
+ var labels string
+ if len(n.Labels) == 1 {
+ labels = n.Labels[0]
+ } else {
+ labels = strings.Join(n.Labels, ", ")
+ }
+ //enclosing labels in a double quoted comma separate listed so that when inmported into a CSV app the Labels column has comma separate strings
+ b.WriteString(fmt.Sprintf("%s,%s,%d,%d,%t,%t,%t,\"%s\"\n", n.Name, n.Text, n.Start, n.End, n.Spec, n.Focused, n.Pending, labels))
+ currentIndent += width
+ }
+ post := func(n *ginkgoNode) {
+ currentIndent -= width
+ }
+ for _, n := range o.Nodes {
+ n.Walk(pre, post)
+ }
+ return b.String()
+}
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/outline_command.go b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/outline_command.go
new file mode 100644
index 00000000000..36698d46a4b
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/outline_command.go
@@ -0,0 +1,98 @@
+package outline
+
+import (
+ "encoding/json"
+ "fmt"
+ "go/parser"
+ "go/token"
+ "os"
+
+ "github.com/onsi/ginkgo/v2/ginkgo/command"
+ "github.com/onsi/ginkgo/v2/types"
+)
+
+const (
+ // indentWidth is the width used by the 'indent' output
+ indentWidth = 4
+ // stdinAlias is a portable alias for stdin. This convention is used in
+ // other CLIs, e.g., kubectl.
+ stdinAlias = "-"
+ usageCommand = "ginkgo outline "
+)
+
+type outlineConfig struct {
+ Format string
+}
+
+func BuildOutlineCommand() command.Command {
+ conf := outlineConfig{
+ Format: "csv",
+ }
+ flags, err := types.NewGinkgoFlagSet(
+ types.GinkgoFlags{
+ {Name: "format", KeyPath: "Format",
+ Usage: "Format of outline",
+ UsageArgument: "one of 'csv', 'indent', or 'json'",
+ UsageDefaultValue: conf.Format,
+ },
+ },
+ &conf,
+ types.GinkgoFlagSections{},
+ )
+ if err != nil {
+ panic(err)
+ }
+
+ return command.Command{
+ Name: "outline",
+ Usage: "ginkgo outline ",
+ ShortDoc: "Create an outline of Ginkgo symbols for a file",
+ Documentation: "To read from stdin, use: `ginkgo outline -`",
+ DocLink: "creating-an-outline-of-specs",
+ Flags: flags,
+ Command: func(args []string, _ []string) {
+ outlineFile(args, conf.Format)
+ },
+ }
+}
+
+func outlineFile(args []string, format string) {
+ if len(args) != 1 {
+ command.AbortWithUsage("outline expects exactly one argument")
+ }
+
+ filename := args[0]
+ var src *os.File
+ if filename == stdinAlias {
+ src = os.Stdin
+ } else {
+ var err error
+ src, err = os.Open(filename)
+ command.AbortIfError("Failed to open file:", err)
+ }
+
+ fset := token.NewFileSet()
+
+ parsedSrc, err := parser.ParseFile(fset, filename, src, 0)
+ command.AbortIfError("Failed to parse source:", err)
+
+ o, err := FromASTFile(fset, parsedSrc)
+ command.AbortIfError("Failed to create outline:", err)
+
+ var oerr error
+ switch format {
+ case "csv":
+ _, oerr = fmt.Print(o)
+ case "indent":
+ _, oerr = fmt.Print(o.StringIndent(indentWidth))
+ case "json":
+ b, err := json.Marshal(o)
+ if err != nil {
+ println(fmt.Sprintf("error marshalling to json: %s", err))
+ }
+ _, oerr = fmt.Println(string(b))
+ default:
+ command.AbortWith("Format %s not accepted", format)
+ }
+ command.AbortIfError("Failed to write outline:", oerr)
+}
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/ginkgo/run/run_command.go b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/ginkgo/run/run_command.go
new file mode 100644
index 00000000000..aaed4d570e9
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/ginkgo/run/run_command.go
@@ -0,0 +1,232 @@
+package run
+
+import (
+ "fmt"
+ "os"
+ "strings"
+ "time"
+
+ "github.com/onsi/ginkgo/v2/formatter"
+ "github.com/onsi/ginkgo/v2/ginkgo/command"
+ "github.com/onsi/ginkgo/v2/ginkgo/internal"
+ "github.com/onsi/ginkgo/v2/internal/interrupt_handler"
+ "github.com/onsi/ginkgo/v2/types"
+)
+
+func BuildRunCommand() command.Command {
+ var suiteConfig = types.NewDefaultSuiteConfig()
+ var reporterConfig = types.NewDefaultReporterConfig()
+ var cliConfig = types.NewDefaultCLIConfig()
+ var goFlagsConfig = types.NewDefaultGoFlagsConfig()
+
+ flags, err := types.BuildRunCommandFlagSet(&suiteConfig, &reporterConfig, &cliConfig, &goFlagsConfig)
+ if err != nil {
+ panic(err)
+ }
+
+ interruptHandler := interrupt_handler.NewInterruptHandler(nil)
+ interrupt_handler.SwallowSigQuit()
+
+ return command.Command{
+ Name: "run",
+ Flags: flags,
+ Usage: "ginkgo run -- ",
+ ShortDoc: "Run the tests in the passed in (or the package in the current directory if left blank)",
+ Documentation: "Any arguments after -- will be passed to the test.",
+ DocLink: "running-tests",
+ Command: func(args []string, additionalArgs []string) {
+ var errors []error
+ cliConfig, goFlagsConfig, errors = types.VetAndInitializeCLIAndGoConfig(cliConfig, goFlagsConfig)
+ command.AbortIfErrors("Ginkgo detected configuration issues:", errors)
+
+ runner := &SpecRunner{
+ cliConfig: cliConfig,
+ goFlagsConfig: goFlagsConfig,
+ suiteConfig: suiteConfig,
+ reporterConfig: reporterConfig,
+ flags: flags,
+
+ interruptHandler: interruptHandler,
+ }
+
+ runner.RunSpecs(args, additionalArgs)
+ },
+ }
+}
+
+type SpecRunner struct {
+ suiteConfig types.SuiteConfig
+ reporterConfig types.ReporterConfig
+ cliConfig types.CLIConfig
+ goFlagsConfig types.GoFlagsConfig
+ flags types.GinkgoFlagSet
+
+ interruptHandler *interrupt_handler.InterruptHandler
+}
+
+func (r *SpecRunner) RunSpecs(args []string, additionalArgs []string) {
+ suites := internal.FindSuites(args, r.cliConfig, true)
+ skippedSuites := suites.WithState(internal.TestSuiteStateSkippedByFilter)
+ suites = suites.WithoutState(internal.TestSuiteStateSkippedByFilter)
+
+ internal.VerifyCLIAndFrameworkVersion(suites)
+
+ if len(skippedSuites) > 0 {
+ fmt.Println("Will skip:")
+ for _, skippedSuite := range skippedSuites {
+ fmt.Println(" " + skippedSuite.Path)
+ }
+ }
+
+ if len(skippedSuites) > 0 && len(suites) == 0 {
+ command.AbortGracefullyWith("All tests skipped! Exiting...")
+ }
+
+ if len(suites) == 0 {
+ command.AbortWith("Found no test suites")
+ }
+
+ if len(suites) > 1 && !r.flags.WasSet("succinct") && r.reporterConfig.Verbosity().LT(types.VerbosityLevelVerbose) {
+ r.reporterConfig.Succinct = true
+ }
+
+ t := time.Now()
+ var endTime time.Time
+ if r.suiteConfig.Timeout > 0 {
+ endTime = t.Add(r.suiteConfig.Timeout)
+ }
+
+ iteration := 0
+OUTER_LOOP:
+ for {
+ if !r.flags.WasSet("seed") {
+ r.suiteConfig.RandomSeed = time.Now().Unix()
+ }
+ if r.cliConfig.RandomizeSuites && len(suites) > 1 {
+ suites = suites.ShuffledCopy(r.suiteConfig.RandomSeed)
+ }
+
+ opc := internal.NewOrderedParallelCompiler(r.cliConfig.ComputedNumCompilers())
+ opc.StartCompiling(suites, r.goFlagsConfig)
+
+ SUITE_LOOP:
+ for {
+ suiteIdx, suite := opc.Next()
+ if suiteIdx >= len(suites) {
+ break SUITE_LOOP
+ }
+ suites[suiteIdx] = suite
+
+ if r.interruptHandler.Status().Interrupted() {
+ opc.StopAndDrain()
+ break OUTER_LOOP
+ }
+
+ if suites[suiteIdx].State.Is(internal.TestSuiteStateSkippedDueToEmptyCompilation) {
+ fmt.Printf("Skipping %s (no test files)\n", suite.Path)
+ continue SUITE_LOOP
+ }
+
+ if suites[suiteIdx].State.Is(internal.TestSuiteStateFailedToCompile) {
+ fmt.Println(suites[suiteIdx].CompilationError.Error())
+ if !r.cliConfig.KeepGoing {
+ opc.StopAndDrain()
+ }
+ continue SUITE_LOOP
+ }
+
+ if suites.CountWithState(internal.TestSuiteStateFailureStates...) > 0 && !r.cliConfig.KeepGoing {
+ suites[suiteIdx].State = internal.TestSuiteStateSkippedDueToPriorFailures
+ opc.StopAndDrain()
+ continue SUITE_LOOP
+ }
+
+ if !endTime.IsZero() {
+ r.suiteConfig.Timeout = endTime.Sub(time.Now())
+ if r.suiteConfig.Timeout <= 0 {
+ suites[suiteIdx].State = internal.TestSuiteStateFailedDueToTimeout
+ opc.StopAndDrain()
+ continue SUITE_LOOP
+ }
+ }
+
+ suites[suiteIdx] = internal.RunCompiledSuite(suites[suiteIdx], r.suiteConfig, r.reporterConfig, r.cliConfig, r.goFlagsConfig, additionalArgs)
+ }
+
+ if suites.CountWithState(internal.TestSuiteStateFailureStates...) > 0 {
+ if iteration > 0 {
+ fmt.Printf("\nTests failed on attempt #%d\n\n", iteration+1)
+ }
+ break OUTER_LOOP
+ }
+
+ if r.cliConfig.UntilItFails {
+ fmt.Printf("\nAll tests passed...\nWill keep running them until they fail.\nThis was attempt #%d\n%s\n", iteration+1, orcMessage(iteration+1))
+ } else if r.cliConfig.Repeat > 0 && iteration < r.cliConfig.Repeat {
+ fmt.Printf("\nAll tests passed...\nThis was attempt %d of %d.\n", iteration+1, r.cliConfig.Repeat+1)
+ } else {
+ break OUTER_LOOP
+ }
+ iteration += 1
+ }
+
+ internal.Cleanup(r.goFlagsConfig, suites...)
+
+ messages, err := internal.FinalizeProfilesAndReportsForSuites(suites, r.cliConfig, r.suiteConfig, r.reporterConfig, r.goFlagsConfig)
+ command.AbortIfError("could not finalize profiles:", err)
+ for _, message := range messages {
+ fmt.Println(message)
+ }
+
+ fmt.Printf("\nGinkgo ran %d %s in %s\n", len(suites), internal.PluralizedWord("suite", "suites", len(suites)), time.Since(t))
+
+ if suites.CountWithState(internal.TestSuiteStateFailureStates...) == 0 {
+ if suites.AnyHaveProgrammaticFocus() && strings.TrimSpace(os.Getenv("GINKGO_EDITOR_INTEGRATION")) == "" {
+ fmt.Printf("Test Suite Passed\n")
+ fmt.Printf("Detected Programmatic Focus - setting exit status to %d\n", types.GINKGO_FOCUS_EXIT_CODE)
+ command.Abort(command.AbortDetails{ExitCode: types.GINKGO_FOCUS_EXIT_CODE})
+ } else {
+ fmt.Printf("Test Suite Passed\n")
+ command.Abort(command.AbortDetails{})
+ }
+ } else {
+ fmt.Fprintln(formatter.ColorableStdOut, "")
+ if len(suites) > 1 && suites.CountWithState(internal.TestSuiteStateFailureStates...) > 0 {
+ fmt.Fprintln(formatter.ColorableStdOut,
+ internal.FailedSuitesReport(suites, formatter.NewWithNoColorBool(r.reporterConfig.NoColor)))
+ }
+ fmt.Printf("Test Suite Failed\n")
+ command.Abort(command.AbortDetails{ExitCode: 1})
+ }
+}
+
+func orcMessage(iteration int) string {
+ if iteration < 10 {
+ return ""
+ } else if iteration < 30 {
+ return []string{
+ "If at first you succeed...",
+ "...try, try again.",
+ "Looking good!",
+ "Still good...",
+ "I think your tests are fine....",
+ "Yep, still passing",
+ "Oh boy, here I go testin' again!",
+ "Even the gophers are getting bored",
+ "Did you try -race?",
+ "Maybe you should stop now?",
+ "I'm getting tired...",
+ "What if I just made you a sandwich?",
+ "Hit ^C, hit ^C, please hit ^C",
+ "Make it stop. Please!",
+ "Come on! Enough is enough!",
+ "Dave, this conversation can serve no purpose anymore. Goodbye.",
+ "Just what do you think you're doing, Dave? ",
+ "I, Sisyphus",
+ "Insanity: doing the same thing over and over again and expecting different results. -Einstein",
+ "I guess Einstein never tried to churn butter",
+ }[iteration-10] + "\n"
+ } else {
+ return "No, seriously... you can probably stop now.\n"
+ }
+}
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/ginkgo/unfocus/unfocus_command.go b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/ginkgo/unfocus/unfocus_command.go
new file mode 100644
index 00000000000..7dd2943948a
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/ginkgo/unfocus/unfocus_command.go
@@ -0,0 +1,186 @@
+package unfocus
+
+import (
+ "bytes"
+ "fmt"
+ "go/ast"
+ "go/parser"
+ "go/token"
+ "io"
+ "os"
+ "path/filepath"
+ "strings"
+ "sync"
+
+ "github.com/onsi/ginkgo/v2/ginkgo/command"
+)
+
+func BuildUnfocusCommand() command.Command {
+ return command.Command{
+ Name: "unfocus",
+ Usage: "ginkgo unfocus",
+ ShortDoc: "Recursively unfocus any focused tests under the current directory",
+ DocLink: "filtering-specs",
+ Command: func(_ []string, _ []string) {
+ unfocusSpecs()
+ },
+ }
+}
+
+func unfocusSpecs() {
+ fmt.Println("Scanning for focus...")
+
+ goFiles := make(chan string)
+ go func() {
+ unfocusDir(goFiles, ".")
+ close(goFiles)
+ }()
+
+ const workers = 10
+ wg := sync.WaitGroup{}
+ wg.Add(workers)
+
+ for i := 0; i < workers; i++ {
+ go func() {
+ for path := range goFiles {
+ unfocusFile(path)
+ }
+ wg.Done()
+ }()
+ }
+
+ wg.Wait()
+}
+
+func unfocusDir(goFiles chan string, path string) {
+ files, err := os.ReadDir(path)
+ if err != nil {
+ fmt.Println(err.Error())
+ return
+ }
+
+ for _, f := range files {
+ switch {
+ case f.IsDir() && shouldProcessDir(f.Name()):
+ unfocusDir(goFiles, filepath.Join(path, f.Name()))
+ case !f.IsDir() && shouldProcessFile(f.Name()):
+ goFiles <- filepath.Join(path, f.Name())
+ }
+ }
+}
+
+func shouldProcessDir(basename string) bool {
+ return basename != "vendor" && !strings.HasPrefix(basename, ".")
+}
+
+func shouldProcessFile(basename string) bool {
+ return strings.HasSuffix(basename, ".go")
+}
+
+func unfocusFile(path string) {
+ data, err := os.ReadFile(path)
+ if err != nil {
+ fmt.Printf("error reading file '%s': %s\n", path, err.Error())
+ return
+ }
+
+ ast, err := parser.ParseFile(token.NewFileSet(), path, bytes.NewReader(data), parser.ParseComments)
+ if err != nil {
+ fmt.Printf("error parsing file '%s': %s\n", path, err.Error())
+ return
+ }
+
+ eliminations := scanForFocus(ast)
+ if len(eliminations) == 0 {
+ return
+ }
+
+ fmt.Printf("...updating %s\n", path)
+ backup, err := writeBackup(path, data)
+ if err != nil {
+ fmt.Printf("error creating backup file: %s\n", err.Error())
+ return
+ }
+
+ if err := updateFile(path, data, eliminations); err != nil {
+ fmt.Printf("error writing file '%s': %s\n", path, err.Error())
+ return
+ }
+
+ os.Remove(backup)
+}
+
+func writeBackup(path string, data []byte) (string, error) {
+ t, err := os.CreateTemp(filepath.Dir(path), filepath.Base(path))
+
+ if err != nil {
+ return "", fmt.Errorf("error creating temporary file: %w", err)
+ }
+ defer t.Close()
+
+ if _, err := io.Copy(t, bytes.NewReader(data)); err != nil {
+ return "", fmt.Errorf("error writing to temporary file: %w", err)
+ }
+
+ return t.Name(), nil
+}
+
+func updateFile(path string, data []byte, eliminations [][]int64) error {
+ to, err := os.Create(path)
+ if err != nil {
+ return fmt.Errorf("error opening file for writing '%s': %w\n", path, err)
+ }
+ defer to.Close()
+
+ from := bytes.NewReader(data)
+ var cursor int64
+ for _, eliminationRange := range eliminations {
+ positionToEliminate, lengthToEliminate := eliminationRange[0]-1, eliminationRange[1]
+ if _, err := io.CopyN(to, from, positionToEliminate-cursor); err != nil {
+ return fmt.Errorf("error copying data: %w", err)
+ }
+
+ cursor = positionToEliminate + lengthToEliminate
+
+ if _, err := from.Seek(lengthToEliminate, io.SeekCurrent); err != nil {
+ return fmt.Errorf("error seeking to position in buffer: %w", err)
+ }
+ }
+
+ if _, err := io.Copy(to, from); err != nil {
+ return fmt.Errorf("error copying end data: %w", err)
+ }
+
+ return nil
+}
+
+func scanForFocus(file *ast.File) (eliminations [][]int64) {
+ ast.Inspect(file, func(n ast.Node) bool {
+ if c, ok := n.(*ast.CallExpr); ok {
+ if i, ok := c.Fun.(*ast.Ident); ok {
+ if isFocus(i.Name) {
+ eliminations = append(eliminations, []int64{int64(i.Pos()), 1})
+ }
+ }
+ }
+
+ if i, ok := n.(*ast.Ident); ok {
+ if i.Name == "Focus" {
+ eliminations = append(eliminations, []int64{int64(i.Pos()), 6})
+ }
+ }
+
+ return true
+ })
+
+ return eliminations
+}
+
+func isFocus(name string) bool {
+ switch name {
+ case "FDescribe", "FContext", "FIt", "FDescribeTable", "FEntry", "FSpecify", "FWhen":
+ return true
+ default:
+ return false
+ }
+}
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/delta.go b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/delta.go
new file mode 100644
index 00000000000..6c485c5b1af
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/delta.go
@@ -0,0 +1,22 @@
+package watch
+
+import "sort"
+
+type Delta struct {
+ ModifiedPackages []string
+
+ NewSuites []*Suite
+ RemovedSuites []*Suite
+ modifiedSuites []*Suite
+}
+
+type DescendingByDelta []*Suite
+
+func (a DescendingByDelta) Len() int { return len(a) }
+func (a DescendingByDelta) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
+func (a DescendingByDelta) Less(i, j int) bool { return a[i].Delta() > a[j].Delta() }
+
+func (d Delta) ModifiedSuites() []*Suite {
+ sort.Sort(DescendingByDelta(d.modifiedSuites))
+ return d.modifiedSuites
+}
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/delta_tracker.go b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/delta_tracker.go
new file mode 100644
index 00000000000..26418ac62ed
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/delta_tracker.go
@@ -0,0 +1,75 @@
+package watch
+
+import (
+ "fmt"
+
+ "regexp"
+
+ "github.com/onsi/ginkgo/v2/ginkgo/internal"
+)
+
+type SuiteErrors map[internal.TestSuite]error
+
+type DeltaTracker struct {
+ maxDepth int
+ watchRegExp *regexp.Regexp
+ suites map[string]*Suite
+ packageHashes *PackageHashes
+}
+
+func NewDeltaTracker(maxDepth int, watchRegExp *regexp.Regexp) *DeltaTracker {
+ return &DeltaTracker{
+ maxDepth: maxDepth,
+ watchRegExp: watchRegExp,
+ packageHashes: NewPackageHashes(watchRegExp),
+ suites: map[string]*Suite{},
+ }
+}
+
+func (d *DeltaTracker) Delta(suites internal.TestSuites) (delta Delta, errors SuiteErrors) {
+ errors = SuiteErrors{}
+ delta.ModifiedPackages = d.packageHashes.CheckForChanges()
+
+ providedSuitePaths := map[string]bool{}
+ for _, suite := range suites {
+ providedSuitePaths[suite.Path] = true
+ }
+
+ d.packageHashes.StartTrackingUsage()
+
+ for _, suite := range d.suites {
+ if providedSuitePaths[suite.Suite.Path] {
+ if suite.Delta() > 0 {
+ delta.modifiedSuites = append(delta.modifiedSuites, suite)
+ }
+ } else {
+ delta.RemovedSuites = append(delta.RemovedSuites, suite)
+ }
+ }
+
+ d.packageHashes.StopTrackingUsageAndPrune()
+
+ for _, suite := range suites {
+ _, ok := d.suites[suite.Path]
+ if !ok {
+ s, err := NewSuite(suite, d.maxDepth, d.packageHashes)
+ if err != nil {
+ errors[suite] = err
+ continue
+ }
+ d.suites[suite.Path] = s
+ delta.NewSuites = append(delta.NewSuites, s)
+ }
+ }
+
+ return delta, errors
+}
+
+func (d *DeltaTracker) WillRun(suite internal.TestSuite) error {
+ s, ok := d.suites[suite.Path]
+ if !ok {
+ return fmt.Errorf("unknown suite %s", suite.Path)
+ }
+
+ return s.MarkAsRunAndRecomputedDependencies(d.maxDepth)
+}
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/dependencies.go b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/dependencies.go
new file mode 100644
index 00000000000..a34d94354d9
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/dependencies.go
@@ -0,0 +1,92 @@
+package watch
+
+import (
+ "go/build"
+ "regexp"
+)
+
+var ginkgoAndGomegaFilter = regexp.MustCompile(`github\.com/onsi/ginkgo|github\.com/onsi/gomega`)
+var ginkgoIntegrationTestFilter = regexp.MustCompile(`github\.com/onsi/ginkgo/integration`) //allow us to integration test this thing
+
+type Dependencies struct {
+ deps map[string]int
+}
+
+func NewDependencies(path string, maxDepth int) (Dependencies, error) {
+ d := Dependencies{
+ deps: map[string]int{},
+ }
+
+ if maxDepth == 0 {
+ return d, nil
+ }
+
+ err := d.seedWithDepsForPackageAtPath(path)
+ if err != nil {
+ return d, err
+ }
+
+ for depth := 1; depth < maxDepth; depth++ {
+ n := len(d.deps)
+ d.addDepsForDepth(depth)
+ if n == len(d.deps) {
+ break
+ }
+ }
+
+ return d, nil
+}
+
+func (d Dependencies) Dependencies() map[string]int {
+ return d.deps
+}
+
+func (d Dependencies) seedWithDepsForPackageAtPath(path string) error {
+ pkg, err := build.ImportDir(path, 0)
+ if err != nil {
+ return err
+ }
+
+ d.resolveAndAdd(pkg.Imports, 1)
+ d.resolveAndAdd(pkg.TestImports, 1)
+ d.resolveAndAdd(pkg.XTestImports, 1)
+
+ delete(d.deps, pkg.Dir)
+ return nil
+}
+
+func (d Dependencies) addDepsForDepth(depth int) {
+ for dep, depDepth := range d.deps {
+ if depDepth == depth {
+ d.addDepsForDep(dep, depth+1)
+ }
+ }
+}
+
+func (d Dependencies) addDepsForDep(dep string, depth int) {
+ pkg, err := build.ImportDir(dep, 0)
+ if err != nil {
+ println(err.Error())
+ return
+ }
+ d.resolveAndAdd(pkg.Imports, depth)
+}
+
+func (d Dependencies) resolveAndAdd(deps []string, depth int) {
+ for _, dep := range deps {
+ pkg, err := build.Import(dep, ".", 0)
+ if err != nil {
+ continue
+ }
+ if !pkg.Goroot && (!ginkgoAndGomegaFilter.MatchString(pkg.Dir) || ginkgoIntegrationTestFilter.MatchString(pkg.Dir)) {
+ d.addDepIfNotPresent(pkg.Dir, depth)
+ }
+ }
+}
+
+func (d Dependencies) addDepIfNotPresent(dep string, depth int) {
+ _, ok := d.deps[dep]
+ if !ok {
+ d.deps[dep] = depth
+ }
+}
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/package_hash.go b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/package_hash.go
new file mode 100644
index 00000000000..0e6ae1f2903
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/package_hash.go
@@ -0,0 +1,117 @@
+package watch
+
+import (
+ "fmt"
+ "os"
+ "regexp"
+ "strings"
+ "time"
+)
+
+var goTestRegExp = regexp.MustCompile(`_test\.go$`)
+
+type PackageHash struct {
+ CodeModifiedTime time.Time
+ TestModifiedTime time.Time
+ Deleted bool
+
+ path string
+ codeHash string
+ testHash string
+ watchRegExp *regexp.Regexp
+}
+
+func NewPackageHash(path string, watchRegExp *regexp.Regexp) *PackageHash {
+ p := &PackageHash{
+ path: path,
+ watchRegExp: watchRegExp,
+ }
+
+ p.codeHash, _, p.testHash, _, p.Deleted = p.computeHashes()
+
+ return p
+}
+
+func (p *PackageHash) CheckForChanges() bool {
+ codeHash, codeModifiedTime, testHash, testModifiedTime, deleted := p.computeHashes()
+
+ if deleted {
+ if !p.Deleted {
+ t := time.Now()
+ p.CodeModifiedTime = t
+ p.TestModifiedTime = t
+ }
+ p.Deleted = true
+ return true
+ }
+
+ modified := false
+ p.Deleted = false
+
+ if p.codeHash != codeHash {
+ p.CodeModifiedTime = codeModifiedTime
+ modified = true
+ }
+ if p.testHash != testHash {
+ p.TestModifiedTime = testModifiedTime
+ modified = true
+ }
+
+ p.codeHash = codeHash
+ p.testHash = testHash
+ return modified
+}
+
+func (p *PackageHash) computeHashes() (codeHash string, codeModifiedTime time.Time, testHash string, testModifiedTime time.Time, deleted bool) {
+ entries, err := os.ReadDir(p.path)
+
+ if err != nil {
+ deleted = true
+ return
+ }
+
+ for _, entry := range entries {
+ if entry.IsDir() {
+ continue
+ }
+
+ info, err := entry.Info()
+ if err != nil {
+ continue
+ }
+
+ if isHiddenFile(info) {
+ continue
+ }
+
+ if goTestRegExp.MatchString(info.Name()) {
+ testHash += p.hashForFileInfo(info)
+ if info.ModTime().After(testModifiedTime) {
+ testModifiedTime = info.ModTime()
+ }
+ continue
+ }
+
+ if p.watchRegExp.MatchString(info.Name()) {
+ codeHash += p.hashForFileInfo(info)
+ if info.ModTime().After(codeModifiedTime) {
+ codeModifiedTime = info.ModTime()
+ }
+ }
+ }
+
+ testHash += codeHash
+ if codeModifiedTime.After(testModifiedTime) {
+ testModifiedTime = codeModifiedTime
+ }
+
+ return
+}
+
+func isHiddenFile(info os.FileInfo) bool {
+ return strings.HasPrefix(info.Name(), ".") || strings.HasPrefix(info.Name(), "_")
+}
+
+func (p *PackageHash) hashForFileInfo(info os.FileInfo) string {
+ return fmt.Sprintf("%s_%d_%d", info.Name(), info.Size(), info.ModTime().UnixNano())
+}
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/package_hashes.go b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/package_hashes.go
new file mode 100644
index 00000000000..b4892bebf26
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/package_hashes.go
@@ -0,0 +1,85 @@
+package watch
+
+import (
+ "path/filepath"
+ "regexp"
+ "sync"
+)
+
+type PackageHashes struct {
+ PackageHashes map[string]*PackageHash
+ usedPaths map[string]bool
+ watchRegExp *regexp.Regexp
+ lock *sync.Mutex
+}
+
+func NewPackageHashes(watchRegExp *regexp.Regexp) *PackageHashes {
+ return &PackageHashes{
+ PackageHashes: map[string]*PackageHash{},
+ usedPaths: nil,
+ watchRegExp: watchRegExp,
+ lock: &sync.Mutex{},
+ }
+}
+
+func (p *PackageHashes) CheckForChanges() []string {
+ p.lock.Lock()
+ defer p.lock.Unlock()
+
+ modified := []string{}
+
+ for _, packageHash := range p.PackageHashes {
+ if packageHash.CheckForChanges() {
+ modified = append(modified, packageHash.path)
+ }
+ }
+
+ return modified
+}
+
+func (p *PackageHashes) Add(path string) *PackageHash {
+ p.lock.Lock()
+ defer p.lock.Unlock()
+
+ path, _ = filepath.Abs(path)
+ _, ok := p.PackageHashes[path]
+ if !ok {
+ p.PackageHashes[path] = NewPackageHash(path, p.watchRegExp)
+ }
+
+ if p.usedPaths != nil {
+ p.usedPaths[path] = true
+ }
+ return p.PackageHashes[path]
+}
+
+func (p *PackageHashes) Get(path string) *PackageHash {
+ p.lock.Lock()
+ defer p.lock.Unlock()
+
+ path, _ = filepath.Abs(path)
+ if p.usedPaths != nil {
+ p.usedPaths[path] = true
+ }
+ return p.PackageHashes[path]
+}
+
+func (p *PackageHashes) StartTrackingUsage() {
+ p.lock.Lock()
+ defer p.lock.Unlock()
+
+ p.usedPaths = map[string]bool{}
+}
+
+func (p *PackageHashes) StopTrackingUsageAndPrune() {
+ p.lock.Lock()
+ defer p.lock.Unlock()
+
+ for path := range p.PackageHashes {
+ if !p.usedPaths[path] {
+ delete(p.PackageHashes, path)
+ }
+ }
+
+ p.usedPaths = nil
+}
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/suite.go b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/suite.go
new file mode 100644
index 00000000000..53272df7e5a
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/suite.go
@@ -0,0 +1,87 @@
+package watch
+
+import (
+ "fmt"
+ "math"
+ "time"
+
+ "github.com/onsi/ginkgo/v2/ginkgo/internal"
+)
+
+type Suite struct {
+ Suite internal.TestSuite
+ RunTime time.Time
+ Dependencies Dependencies
+
+ sharedPackageHashes *PackageHashes
+}
+
+func NewSuite(suite internal.TestSuite, maxDepth int, sharedPackageHashes *PackageHashes) (*Suite, error) {
+ deps, err := NewDependencies(suite.Path, maxDepth)
+ if err != nil {
+ return nil, err
+ }
+
+ sharedPackageHashes.Add(suite.Path)
+ for dep := range deps.Dependencies() {
+ sharedPackageHashes.Add(dep)
+ }
+
+ return &Suite{
+ Suite: suite,
+ Dependencies: deps,
+
+ sharedPackageHashes: sharedPackageHashes,
+ }, nil
+}
+
+func (s *Suite) Delta() float64 {
+ delta := s.delta(s.Suite.Path, true, 0) * 1000
+ for dep, depth := range s.Dependencies.Dependencies() {
+ delta += s.delta(dep, false, depth)
+ }
+ return delta
+}
+
+func (s *Suite) MarkAsRunAndRecomputedDependencies(maxDepth int) error {
+ s.RunTime = time.Now()
+
+ deps, err := NewDependencies(s.Suite.Path, maxDepth)
+ if err != nil {
+ return err
+ }
+
+ s.sharedPackageHashes.Add(s.Suite.Path)
+ for dep := range deps.Dependencies() {
+ s.sharedPackageHashes.Add(dep)
+ }
+
+ s.Dependencies = deps
+
+ return nil
+}
+
+func (s *Suite) Description() string {
+ numDeps := len(s.Dependencies.Dependencies())
+ pluralizer := "ies"
+ if numDeps == 1 {
+ pluralizer = "y"
+ }
+ return fmt.Sprintf("%s [%d dependenc%s]", s.Suite.Path, numDeps, pluralizer)
+}
+
+func (s *Suite) delta(packagePath string, includeTests bool, depth int) float64 {
+ return math.Max(float64(s.dt(packagePath, includeTests)), 0) / float64(depth+1)
+}
+
+func (s *Suite) dt(packagePath string, includeTests bool) time.Duration {
+ packageHash := s.sharedPackageHashes.Get(packagePath)
+ var modifiedTime time.Time
+ if includeTests {
+ modifiedTime = packageHash.TestModifiedTime
+ } else {
+ modifiedTime = packageHash.CodeModifiedTime
+ }
+
+ return modifiedTime.Sub(s.RunTime)
+}
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/watch_command.go b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/watch_command.go
new file mode 100644
index 00000000000..bde4193ce71
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/watch_command.go
@@ -0,0 +1,192 @@
+package watch
+
+import (
+ "fmt"
+ "regexp"
+ "time"
+
+ "github.com/onsi/ginkgo/v2/formatter"
+ "github.com/onsi/ginkgo/v2/ginkgo/command"
+ "github.com/onsi/ginkgo/v2/ginkgo/internal"
+ "github.com/onsi/ginkgo/v2/internal/interrupt_handler"
+ "github.com/onsi/ginkgo/v2/types"
+)
+
+func BuildWatchCommand() command.Command {
+ var suiteConfig = types.NewDefaultSuiteConfig()
+ var reporterConfig = types.NewDefaultReporterConfig()
+ var cliConfig = types.NewDefaultCLIConfig()
+ var goFlagsConfig = types.NewDefaultGoFlagsConfig()
+
+ flags, err := types.BuildWatchCommandFlagSet(&suiteConfig, &reporterConfig, &cliConfig, &goFlagsConfig)
+ if err != nil {
+ panic(err)
+ }
+ interruptHandler := interrupt_handler.NewInterruptHandler(nil)
+ interrupt_handler.SwallowSigQuit()
+
+ return command.Command{
+ Name: "watch",
+ Flags: flags,
+ Usage: "ginkgo watch -- ",
+ ShortDoc: "Watch the passed in and runs their tests whenever changes occur.",
+ Documentation: "Any arguments after -- will be passed to the test.",
+ DocLink: "watching-for-changes",
+ Command: func(args []string, additionalArgs []string) {
+ var errors []error
+ cliConfig, goFlagsConfig, errors = types.VetAndInitializeCLIAndGoConfig(cliConfig, goFlagsConfig)
+ command.AbortIfErrors("Ginkgo detected configuration issues:", errors)
+
+ watcher := &SpecWatcher{
+ cliConfig: cliConfig,
+ goFlagsConfig: goFlagsConfig,
+ suiteConfig: suiteConfig,
+ reporterConfig: reporterConfig,
+ flags: flags,
+
+ interruptHandler: interruptHandler,
+ }
+
+ watcher.WatchSpecs(args, additionalArgs)
+ },
+ }
+}
+
+type SpecWatcher struct {
+ suiteConfig types.SuiteConfig
+ reporterConfig types.ReporterConfig
+ cliConfig types.CLIConfig
+ goFlagsConfig types.GoFlagsConfig
+ flags types.GinkgoFlagSet
+
+ interruptHandler *interrupt_handler.InterruptHandler
+}
+
+func (w *SpecWatcher) WatchSpecs(args []string, additionalArgs []string) {
+ suites := internal.FindSuites(args, w.cliConfig, false).WithoutState(internal.TestSuiteStateSkippedByFilter)
+
+ internal.VerifyCLIAndFrameworkVersion(suites)
+
+ if len(suites) == 0 {
+ command.AbortWith("Found no test suites")
+ }
+
+ fmt.Printf("Identified %d test %s. Locating dependencies to a depth of %d (this may take a while)...\n", len(suites), internal.PluralizedWord("suite", "suites", len(suites)), w.cliConfig.Depth)
+ deltaTracker := NewDeltaTracker(w.cliConfig.Depth, regexp.MustCompile(w.cliConfig.WatchRegExp))
+ delta, errors := deltaTracker.Delta(suites)
+
+ fmt.Printf("Watching %d %s:\n", len(delta.NewSuites), internal.PluralizedWord("suite", "suites", len(delta.NewSuites)))
+ for _, suite := range delta.NewSuites {
+ fmt.Println(" " + suite.Description())
+ }
+
+ for suite, err := range errors {
+ fmt.Printf("Failed to watch %s: %s\n", suite.PackageName, err)
+ }
+
+ if len(suites) == 1 {
+ w.updateSeed()
+ w.compileAndRun(suites[0], additionalArgs)
+ }
+
+ ticker := time.NewTicker(time.Second)
+
+ for {
+ select {
+ case <-ticker.C:
+ suites := internal.FindSuites(args, w.cliConfig, false).WithoutState(internal.TestSuiteStateSkippedByFilter)
+ delta, _ := deltaTracker.Delta(suites)
+ coloredStream := formatter.ColorableStdOut
+
+ suites = internal.TestSuites{}
+
+ if len(delta.NewSuites) > 0 {
+ fmt.Fprintln(coloredStream, formatter.F("{{green}}Detected %d new %s:{{/}}", len(delta.NewSuites), internal.PluralizedWord("suite", "suites", len(delta.NewSuites))))
+ for _, suite := range delta.NewSuites {
+ suites = append(suites, suite.Suite)
+ fmt.Fprintln(coloredStream, formatter.Fi(1, "%s", suite.Description()))
+ }
+ }
+
+ modifiedSuites := delta.ModifiedSuites()
+ if len(modifiedSuites) > 0 {
+ fmt.Fprintln(coloredStream, formatter.F("{{green}}Detected changes in:{{/}}"))
+ for _, pkg := range delta.ModifiedPackages {
+ fmt.Fprintln(coloredStream, formatter.Fi(1, "%s", pkg))
+ }
+ fmt.Fprintln(coloredStream, formatter.F("{{green}}Will run %d %s:{{/}}", len(modifiedSuites), internal.PluralizedWord("suite", "suites", len(modifiedSuites))))
+ for _, suite := range modifiedSuites {
+ suites = append(suites, suite.Suite)
+ fmt.Fprintln(coloredStream, formatter.Fi(1, "%s", suite.Description()))
+ }
+ fmt.Fprintln(coloredStream, "")
+ }
+
+ if len(suites) == 0 {
+ break
+ }
+
+ w.updateSeed()
+ w.computeSuccinctMode(len(suites))
+ for idx := range suites {
+ if w.interruptHandler.Status().Interrupted() {
+ return
+ }
+ deltaTracker.WillRun(suites[idx])
+ suites[idx] = w.compileAndRun(suites[idx], additionalArgs)
+ }
+ color := "{{green}}"
+ if suites.CountWithState(internal.TestSuiteStateFailureStates...) > 0 {
+ color = "{{red}}"
+ }
+ fmt.Fprintln(coloredStream, formatter.F(color+"\nDone. Resuming watch...{{/}}"))
+
+ messages, err := internal.FinalizeProfilesAndReportsForSuites(suites, w.cliConfig, w.suiteConfig, w.reporterConfig, w.goFlagsConfig)
+ command.AbortIfError("could not finalize profiles:", err)
+ for _, message := range messages {
+ fmt.Println(message)
+ }
+ case <-w.interruptHandler.Status().Channel:
+ return
+ }
+ }
+}
+
+func (w *SpecWatcher) compileAndRun(suite internal.TestSuite, additionalArgs []string) internal.TestSuite {
+ suite = internal.CompileSuite(suite, w.goFlagsConfig)
+ if suite.State.Is(internal.TestSuiteStateFailedToCompile) {
+ fmt.Println(suite.CompilationError.Error())
+ return suite
+ }
+ if w.interruptHandler.Status().Interrupted() {
+ return suite
+ }
+ suite = internal.RunCompiledSuite(suite, w.suiteConfig, w.reporterConfig, w.cliConfig, w.goFlagsConfig, additionalArgs)
+ internal.Cleanup(w.goFlagsConfig, suite)
+ return suite
+}
+
+func (w *SpecWatcher) computeSuccinctMode(numSuites int) {
+ if w.reporterConfig.Verbosity().GTE(types.VerbosityLevelVerbose) {
+ w.reporterConfig.Succinct = false
+ return
+ }
+
+ if w.flags.WasSet("succinct") {
+ return
+ }
+
+ if numSuites == 1 {
+ w.reporterConfig.Succinct = false
+ }
+
+ if numSuites > 1 {
+ w.reporterConfig.Succinct = true
+ }
+}
+
+func (w *SpecWatcher) updateSeed() {
+ if !w.flags.WasSet("seed") {
+ w.suiteConfig.RandomSeed = time.Now().Unix()
+ }
+}
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/ginkgo_cli_dependencies.go b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/ginkgo_cli_dependencies.go
new file mode 100644
index 00000000000..85162720f94
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/ginkgo_cli_dependencies.go
@@ -0,0 +1,8 @@
+//go:build ginkgoclidependencies
+// +build ginkgoclidependencies
+
+package ginkgo
+
+import (
+ _ "github.com/onsi/ginkgo/v2/ginkgo"
+)
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/ginkgo_t_dsl.go b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/ginkgo_t_dsl.go
new file mode 100644
index 00000000000..02c6739e5be
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/ginkgo_t_dsl.go
@@ -0,0 +1,180 @@
+package ginkgo
+
+import (
+ "testing"
+
+ "github.com/onsi/ginkgo/v2/internal/testingtproxy"
+ "github.com/onsi/ginkgo/v2/types"
+)
+
+/*
+GinkgoT() implements an interface that allows third party libraries to integrate with and build on top of Ginkgo.
+
+GinkgoT() is analogous to *testing.T and implements the majority of *testing.T's methods. It can be typically be used a a drop-in replacement with third-party libraries that accept *testing.T through an interface.
+
+GinkgoT() takes an optional offset argument that can be used to get the
+correct line number associated with the failure - though you do not need to use this if you call GinkgoHelper() or GinkgoT().Helper() appropriately
+
+GinkgoT() attempts to mimic the behavior of `testing.T` with the exception of the following:
+
+- Error/Errorf: failures in Ginkgo always immediately stop execution and there is no mechanism to log a failure without aborting the test. As such Error/Errorf are equivalent to Fatal/Fatalf.
+- Parallel() is a no-op as Ginkgo's multi-process parallelism model is substantially different from go test's in-process model.
+
+You can learn more here: https://onsi.github.io/ginkgo/#using-third-party-libraries
+*/
+func GinkgoT(optionalOffset ...int) FullGinkgoTInterface {
+ offset := 1
+ if len(optionalOffset) > 0 {
+ offset = optionalOffset[0]
+ }
+ return testingtproxy.New(
+ GinkgoWriter,
+ Fail,
+ Skip,
+ DeferCleanup,
+ CurrentSpecReport,
+ AddReportEntry,
+ GinkgoRecover,
+ AttachProgressReporter,
+ suiteConfig.RandomSeed,
+ suiteConfig.ParallelProcess,
+ suiteConfig.ParallelTotal,
+ reporterConfig.NoColor,
+ offset)
+}
+
+/*
+The portion of the interface returned by GinkgoT() that maps onto methods in the testing package's T.
+*/
+type GinkgoTInterface interface {
+ Cleanup(func())
+ Setenv(kev, value string)
+ Error(args ...any)
+ Errorf(format string, args ...any)
+ Fail()
+ FailNow()
+ Failed() bool
+ Fatal(args ...any)
+ Fatalf(format string, args ...any)
+ Helper()
+ Log(args ...any)
+ Logf(format string, args ...any)
+ Name() string
+ Parallel()
+ Skip(args ...any)
+ SkipNow()
+ Skipf(format string, args ...any)
+ Skipped() bool
+ TempDir() string
+}
+
+/*
+Additional methods returned by GinkgoT() that provide deeper integration points into Ginkgo
+*/
+type FullGinkgoTInterface interface {
+ GinkgoTInterface
+
+ AddReportEntryVisibilityAlways(name string, args ...any)
+ AddReportEntryVisibilityFailureOrVerbose(name string, args ...any)
+ AddReportEntryVisibilityNever(name string, args ...any)
+
+ //Prints to the GinkgoWriter
+ Print(a ...any)
+ Printf(format string, a ...any)
+ Println(a ...any)
+
+ //Provides access to Ginkgo's color formatting, correctly configured to match the color settings specified in the invocation of ginkgo
+ F(format string, args ...any) string
+ Fi(indentation uint, format string, args ...any) string
+ Fiw(indentation uint, maxWidth uint, format string, args ...any) string
+
+ //Generates a formatted string version of the current spec's timeline
+ RenderTimeline() string
+
+ GinkgoRecover()
+ DeferCleanup(args ...any)
+
+ RandomSeed() int64
+ ParallelProcess() int
+ ParallelTotal() int
+
+ AttachProgressReporter(func() string) func()
+}
+
+/*
+GinkgoTB() implements a wrapper that exactly matches the testing.TB interface.
+
+In go 1.18 a new private() function was added to the testing.TB interface. Any function which accepts testing.TB as input needs to be passed in something that directly implements testing.TB.
+
+This wrapper satisfies the testing.TB interface and intended to be used as a drop-in replacement with third party libraries that accept testing.TB.
+
+Similar to GinkgoT(), GinkgoTB() takes an optional offset argument that can be used to get the
+correct line number associated with the failure - though you do not need to use this if you call GinkgoHelper() or GinkgoT().Helper() appropriately
+*/
+func GinkgoTB(optionalOffset ...int) *GinkgoTBWrapper {
+ offset := 2
+ if len(optionalOffset) > 0 {
+ offset = optionalOffset[0]
+ }
+ return &GinkgoTBWrapper{GinkgoT: GinkgoT(offset)}
+}
+
+type GinkgoTBWrapper struct {
+ testing.TB
+ GinkgoT FullGinkgoTInterface
+}
+
+func (g *GinkgoTBWrapper) Cleanup(f func()) {
+ g.GinkgoT.Cleanup(f)
+}
+func (g *GinkgoTBWrapper) Error(args ...any) {
+ g.GinkgoT.Error(args...)
+}
+func (g *GinkgoTBWrapper) Errorf(format string, args ...any) {
+ g.GinkgoT.Errorf(format, args...)
+}
+func (g *GinkgoTBWrapper) Fail() {
+ g.GinkgoT.Fail()
+}
+func (g *GinkgoTBWrapper) FailNow() {
+ g.GinkgoT.FailNow()
+}
+func (g *GinkgoTBWrapper) Failed() bool {
+ return g.GinkgoT.Failed()
+}
+func (g *GinkgoTBWrapper) Fatal(args ...any) {
+ g.GinkgoT.Fatal(args...)
+}
+func (g *GinkgoTBWrapper) Fatalf(format string, args ...any) {
+ g.GinkgoT.Fatalf(format, args...)
+}
+func (g *GinkgoTBWrapper) Helper() {
+ types.MarkAsHelper(1)
+}
+func (g *GinkgoTBWrapper) Log(args ...any) {
+ g.GinkgoT.Log(args...)
+}
+func (g *GinkgoTBWrapper) Logf(format string, args ...any) {
+ g.GinkgoT.Logf(format, args...)
+}
+func (g *GinkgoTBWrapper) Name() string {
+ return g.GinkgoT.Name()
+}
+func (g *GinkgoTBWrapper) Setenv(key, value string) {
+ g.GinkgoT.Setenv(key, value)
+}
+func (g *GinkgoTBWrapper) Skip(args ...any) {
+ g.GinkgoT.Skip(args...)
+}
+func (g *GinkgoTBWrapper) SkipNow() {
+ g.GinkgoT.SkipNow()
+}
+func (g *GinkgoTBWrapper) Skipf(format string, args ...any) {
+ g.GinkgoT.Skipf(format, args...)
+}
+func (g *GinkgoTBWrapper) Skipped() bool {
+ return g.GinkgoT.Skipped()
+}
+func (g *GinkgoTBWrapper) TempDir() string {
+ return g.GinkgoT.TempDir()
+}
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/go.mod b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/go.mod
new file mode 100644
index 00000000000..b4e83a80c9a
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/go.mod
@@ -0,0 +1,20 @@
+module github.com/onsi/ginkgo/v2
+
+go 1.20
+
+require (
+ github.com/go-logr/logr v1.4.1
+ github.com/go-task/slim-sprig/v3 v3.0.0
+ github.com/google/pprof v0.0.0-20240424215950-a892ee059fd6
+ github.com/onsi/gomega v1.33.1
+ golang.org/x/net v0.25.0
+ golang.org/x/sys v0.20.0
+ golang.org/x/tools v0.21.0
+)
+
+require (
+ github.com/google/go-cmp v0.6.0 // indirect
+ golang.org/x/text v0.15.0 // indirect
+ google.golang.org/protobuf v1.33.0 // indirect
+ gopkg.in/yaml.v3 v3.0.1 // indirect
+)
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/go.sum b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/go.sum
new file mode 100644
index 00000000000..3928989661d
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/go.sum
@@ -0,0 +1,27 @@
+github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
+github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ=
+github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
+github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI=
+github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8=
+github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
+github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
+github.com/google/pprof v0.0.0-20240424215950-a892ee059fd6 h1:k7nVchz72niMH6YLQNvHSdIE7iqsQxK1P41mySCvssg=
+github.com/google/pprof v0.0.0-20240424215950-a892ee059fd6/go.mod h1:kf6iHlnVGwgKolg33glAes7Yg/8iWP8ukqeldJSO7jw=
+github.com/onsi/gomega v1.33.1 h1:dsYjIxxSR755MDmKVsaFQTE22ChNBcuuTWgkUDSubOk=
+github.com/onsi/gomega v1.33.1/go.mod h1:U4R44UsT+9eLIaYRB2a5qajjtQYn0hauxvRm16AVYg0=
+github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
+github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=
+golang.org/x/net v0.25.0 h1:d/OCCoBEUq33pjydKrGQhw7IlUPI2Oylr+8qLx49kac=
+golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM=
+golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y=
+golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/text v0.15.0 h1:h1V/4gjBv8v9cjcR6+AR5+/cIYK5N/WAgiv4xlsEtAk=
+golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
+golang.org/x/tools v0.21.0 h1:qc0xYgIbsSDt9EyWz05J5wfa7LOVW0YTLOXrqdLAWIw=
+golang.org/x/tools v0.21.0/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk=
+google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI=
+google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
+gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/internal/counter.go b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/internal/counter.go
new file mode 100644
index 00000000000..712d85afb0b
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/internal/counter.go
@@ -0,0 +1,9 @@
+package internal
+
+func MakeIncrementingIndexCounter() func() (int, error) {
+ idx := -1
+ return func() (int, error) {
+ idx += 1
+ return idx, nil
+ }
+}
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/internal/failer/failer.go b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/internal/failer.go
similarity index 61%
rename from integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/internal/failer/failer.go
rename to integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/internal/failer.go
index 678ea2514a6..e9bd9565fc7 100644
--- a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/internal/failer/failer.go
+++ b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/internal/failer.go
@@ -1,32 +1,44 @@
-package failer
+package internal
import (
"fmt"
"sync"
- "github.com/onsi/ginkgo/types"
+ "github.com/onsi/ginkgo/v2/types"
)
type Failer struct {
lock *sync.Mutex
- failure types.SpecFailure
+ failure types.Failure
state types.SpecState
}
-func New() *Failer {
+func NewFailer() *Failer {
return &Failer{
lock: &sync.Mutex{},
state: types.SpecStatePassed,
}
}
+func (f *Failer) GetState() types.SpecState {
+ f.lock.Lock()
+ defer f.lock.Unlock()
+ return f.state
+}
+
+func (f *Failer) GetFailure() types.Failure {
+ f.lock.Lock()
+ defer f.lock.Unlock()
+ return f.failure
+}
+
func (f *Failer) Panic(location types.CodeLocation, forwardedPanic interface{}) {
f.lock.Lock()
defer f.lock.Unlock()
if f.state == types.SpecStatePassed {
f.state = types.SpecStatePanicked
- f.failure = types.SpecFailure{
+ f.failure = types.Failure{
Message: "Test Panicked",
Location: location,
ForwardedPanic: fmt.Sprintf("%v", forwardedPanic),
@@ -34,59 +46,54 @@ func (f *Failer) Panic(location types.CodeLocation, forwardedPanic interface{})
}
}
-func (f *Failer) Timeout(location types.CodeLocation) {
+func (f *Failer) Fail(message string, location types.CodeLocation) {
f.lock.Lock()
defer f.lock.Unlock()
if f.state == types.SpecStatePassed {
- f.state = types.SpecStateTimedOut
- f.failure = types.SpecFailure{
- Message: "Timed out",
+ f.state = types.SpecStateFailed
+ f.failure = types.Failure{
+ Message: message,
Location: location,
}
}
}
-func (f *Failer) Fail(message string, location types.CodeLocation) {
+func (f *Failer) Skip(message string, location types.CodeLocation) {
f.lock.Lock()
defer f.lock.Unlock()
if f.state == types.SpecStatePassed {
- f.state = types.SpecStateFailed
- f.failure = types.SpecFailure{
+ f.state = types.SpecStateSkipped
+ f.failure = types.Failure{
Message: message,
Location: location,
}
}
}
-func (f *Failer) Drain(componentType types.SpecComponentType, componentIndex int, componentCodeLocation types.CodeLocation) (types.SpecFailure, types.SpecState) {
+func (f *Failer) AbortSuite(message string, location types.CodeLocation) {
f.lock.Lock()
defer f.lock.Unlock()
- failure := f.failure
- outcome := f.state
- if outcome != types.SpecStatePassed {
- failure.ComponentType = componentType
- failure.ComponentIndex = componentIndex
- failure.ComponentCodeLocation = componentCodeLocation
+ if f.state == types.SpecStatePassed {
+ f.state = types.SpecStateAborted
+ f.failure = types.Failure{
+ Message: message,
+ Location: location,
+ }
}
-
- f.state = types.SpecStatePassed
- f.failure = types.SpecFailure{}
-
- return failure, outcome
}
-func (f *Failer) Skip(message string, location types.CodeLocation) {
+func (f *Failer) Drain() (types.SpecState, types.Failure) {
f.lock.Lock()
defer f.lock.Unlock()
- if f.state == types.SpecStatePassed {
- f.state = types.SpecStateSkipped
- f.failure = types.SpecFailure{
- Message: message,
- Location: location,
- }
- }
+ failure := f.failure
+ outcome := f.state
+
+ f.state = types.SpecStatePassed
+ f.failure = types.Failure{}
+
+ return outcome, failure
}
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/internal/focus.go b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/internal/focus.go
new file mode 100644
index 00000000000..e3da7d14dd1
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/internal/focus.go
@@ -0,0 +1,122 @@
+package internal
+
+import (
+ "regexp"
+ "strings"
+
+ "github.com/onsi/ginkgo/v2/types"
+)
+
+/*
+If a container marked as focus has a descendant that is also marked as focus, Ginkgo's policy is to
+unmark the container's focus. This gives developers a more intuitive experience when debugging specs.
+It is common to focus a container to just run a subset of specs, then identify the specific specs within the container to focus -
+this policy allows the developer to simply focus those specific specs and not need to go back and turn the focus off of the container:
+
+As a common example, consider:
+
+ FDescribe("something to debug", function() {
+ It("works", function() {...})
+ It("works", function() {...})
+ FIt("doesn't work", function() {...})
+ It("works", function() {...})
+ })
+
+here the developer's intent is to focus in on the `"doesn't work"` spec and not to run the adjacent specs in the focused `"something to debug"` container.
+The nested policy applied by this function enables this behavior.
+*/
+func ApplyNestedFocusPolicyToTree(tree *TreeNode) {
+ var walkTree func(tree *TreeNode) bool
+ walkTree = func(tree *TreeNode) bool {
+ if tree.Node.MarkedPending {
+ return false
+ }
+ hasFocusedDescendant := false
+ for _, child := range tree.Children {
+ childHasFocus := walkTree(child)
+ hasFocusedDescendant = hasFocusedDescendant || childHasFocus
+ }
+ tree.Node.MarkedFocus = tree.Node.MarkedFocus && !hasFocusedDescendant
+ return tree.Node.MarkedFocus || hasFocusedDescendant
+ }
+
+ walkTree(tree)
+}
+
+/*
+Ginkgo supports focussing specs using `FIt`, `FDescribe`, etc. - this is called "programmatic focus"
+It also supports focussing specs using regular expressions on the command line (`-focus=`, `-skip=`) that match against spec text and file filters (`-focus-files=`, `-skip-files=`) that match against code locations for nodes in specs.
+
+When both programmatic and file filters are provided their results are ANDed together. If multiple kinds of filters are provided, the file filters run first followed by the regex filters.
+
+This function sets the `Skip` property on specs by applying Ginkgo's focus policy:
+- If there are no CLI arguments and no programmatic focus, do nothing.
+- If a spec somewhere has programmatic focus skip any specs that have no programmatic focus.
+- If there are CLI arguments parse them and skip any specs that either don't match the focus filters or do match the skip filters.
+
+*Note:* specs with pending nodes are Skipped when created by NewSpec.
+*/
+func ApplyFocusToSpecs(specs Specs, description string, suiteLabels Labels, suiteConfig types.SuiteConfig) (Specs, bool) {
+ focusString := strings.Join(suiteConfig.FocusStrings, "|")
+ skipString := strings.Join(suiteConfig.SkipStrings, "|")
+
+ type SkipCheck func(spec Spec) bool
+
+ // by default, skip any specs marked pending
+ skipChecks := []SkipCheck{func(spec Spec) bool { return spec.Nodes.HasNodeMarkedPending() }}
+ hasProgrammaticFocus := false
+
+ for _, spec := range specs {
+ if spec.Nodes.HasNodeMarkedFocus() && !spec.Nodes.HasNodeMarkedPending() {
+ hasProgrammaticFocus = true
+ break
+ }
+ }
+
+ if hasProgrammaticFocus {
+ skipChecks = append(skipChecks, func(spec Spec) bool { return !spec.Nodes.HasNodeMarkedFocus() })
+ }
+
+ if suiteConfig.LabelFilter != "" {
+ labelFilter, _ := types.ParseLabelFilter(suiteConfig.LabelFilter)
+ skipChecks = append(skipChecks, func(spec Spec) bool {
+ return !labelFilter(UnionOfLabels(suiteLabels, spec.Nodes.UnionOfLabels()))
+ })
+ }
+
+ if len(suiteConfig.FocusFiles) > 0 {
+ focusFilters, _ := types.ParseFileFilters(suiteConfig.FocusFiles)
+ skipChecks = append(skipChecks, func(spec Spec) bool { return !focusFilters.Matches(spec.Nodes.CodeLocations()) })
+ }
+
+ if len(suiteConfig.SkipFiles) > 0 {
+ skipFilters, _ := types.ParseFileFilters(suiteConfig.SkipFiles)
+ skipChecks = append(skipChecks, func(spec Spec) bool { return skipFilters.Matches(spec.Nodes.CodeLocations()) })
+ }
+
+ if focusString != "" {
+ // skip specs that don't match the focus string
+ re := regexp.MustCompile(focusString)
+ skipChecks = append(skipChecks, func(spec Spec) bool { return !re.MatchString(description + " " + spec.Text()) })
+ }
+
+ if skipString != "" {
+ // skip specs that match the skip string
+ re := regexp.MustCompile(skipString)
+ skipChecks = append(skipChecks, func(spec Spec) bool { return re.MatchString(description + " " + spec.Text()) })
+ }
+
+ // skip specs if shouldSkip() is true. note that we do nothing if shouldSkip() is false to avoid overwriting skip status established by the node's pending status
+ processedSpecs := Specs{}
+ for _, spec := range specs {
+ for _, skipCheck := range skipChecks {
+ if skipCheck(spec) {
+ spec.Skip = true
+ break
+ }
+ }
+ processedSpecs = append(processedSpecs, spec)
+ }
+
+ return processedSpecs, hasProgrammaticFocus
+}
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/internal/global/init.go b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/internal/global/init.go
new file mode 100644
index 00000000000..464e3c97ff8
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/internal/global/init.go
@@ -0,0 +1,28 @@
+package global
+
+import (
+ "github.com/onsi/ginkgo/v2/internal"
+)
+
+var Suite *internal.Suite
+var Failer *internal.Failer
+var backupSuite *internal.Suite
+
+func init() {
+ InitializeGlobals()
+}
+
+func InitializeGlobals() {
+ Failer = internal.NewFailer()
+ Suite = internal.NewSuite()
+}
+
+func PushClone() error {
+ var err error
+ backupSuite, err = Suite.Clone()
+ return err
+}
+
+func PopClone() {
+ Suite = backupSuite
+}
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/internal/group.go b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/internal/group.go
new file mode 100644
index 00000000000..02c9fe4fcd4
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/internal/group.go
@@ -0,0 +1,383 @@
+package internal
+
+import (
+ "fmt"
+ "time"
+
+ "github.com/onsi/ginkgo/v2/types"
+)
+
+type runOncePair struct {
+ //nodeId should only run once...
+ nodeID uint
+ nodeType types.NodeType
+ //...for specs in a hierarchy that includes this context
+ containerID uint
+}
+
+func (pair runOncePair) isZero() bool {
+ return pair.nodeID == 0
+}
+
+func runOncePairForNode(node Node, containerID uint) runOncePair {
+ return runOncePair{
+ nodeID: node.ID,
+ nodeType: node.NodeType,
+ containerID: containerID,
+ }
+}
+
+type runOncePairs []runOncePair
+
+func runOncePairsForSpec(spec Spec) runOncePairs {
+ pairs := runOncePairs{}
+
+ containers := spec.Nodes.WithType(types.NodeTypeContainer)
+ for _, node := range spec.Nodes {
+ if node.NodeType.Is(types.NodeTypeBeforeAll | types.NodeTypeAfterAll) {
+ pairs = append(pairs, runOncePairForNode(node, containers.FirstWithNestingLevel(node.NestingLevel-1).ID))
+ } else if node.NodeType.Is(types.NodeTypeBeforeEach|types.NodeTypeJustBeforeEach|types.NodeTypeAfterEach|types.NodeTypeJustAfterEach) && node.MarkedOncePerOrdered {
+ passedIntoAnOrderedContainer := false
+ firstOrderedContainerDeeperThanNode := containers.FirstSatisfying(func(container Node) bool {
+ passedIntoAnOrderedContainer = passedIntoAnOrderedContainer || container.MarkedOrdered
+ return container.NestingLevel >= node.NestingLevel && passedIntoAnOrderedContainer
+ })
+ if firstOrderedContainerDeeperThanNode.IsZero() {
+ continue
+ }
+ pairs = append(pairs, runOncePairForNode(node, firstOrderedContainerDeeperThanNode.ID))
+ }
+ }
+
+ return pairs
+}
+
+func (pairs runOncePairs) runOncePairFor(nodeID uint) runOncePair {
+ for i := range pairs {
+ if pairs[i].nodeID == nodeID {
+ return pairs[i]
+ }
+ }
+ return runOncePair{}
+}
+
+func (pairs runOncePairs) hasRunOncePair(pair runOncePair) bool {
+ for i := range pairs {
+ if pairs[i] == pair {
+ return true
+ }
+ }
+ return false
+}
+
+func (pairs runOncePairs) withType(nodeTypes types.NodeType) runOncePairs {
+ count := 0
+ for i := range pairs {
+ if pairs[i].nodeType.Is(nodeTypes) {
+ count++
+ }
+ }
+
+ out, j := make(runOncePairs, count), 0
+ for i := range pairs {
+ if pairs[i].nodeType.Is(nodeTypes) {
+ out[j] = pairs[i]
+ j++
+ }
+ }
+ return out
+}
+
+type group struct {
+ suite *Suite
+ specs Specs
+ runOncePairs map[uint]runOncePairs
+ runOnceTracker map[runOncePair]types.SpecState
+
+ succeeded bool
+ failedInARunOnceBefore bool
+ continueOnFailure bool
+}
+
+func newGroup(suite *Suite) *group {
+ return &group{
+ suite: suite,
+ runOncePairs: map[uint]runOncePairs{},
+ runOnceTracker: map[runOncePair]types.SpecState{},
+ succeeded: true,
+ failedInARunOnceBefore: false,
+ continueOnFailure: false,
+ }
+}
+
+func (g *group) initialReportForSpec(spec Spec) types.SpecReport {
+ return types.SpecReport{
+ ContainerHierarchyTexts: spec.Nodes.WithType(types.NodeTypeContainer).Texts(),
+ ContainerHierarchyLocations: spec.Nodes.WithType(types.NodeTypeContainer).CodeLocations(),
+ ContainerHierarchyLabels: spec.Nodes.WithType(types.NodeTypeContainer).Labels(),
+ LeafNodeLocation: spec.FirstNodeWithType(types.NodeTypeIt).CodeLocation,
+ LeafNodeType: types.NodeTypeIt,
+ LeafNodeText: spec.FirstNodeWithType(types.NodeTypeIt).Text,
+ LeafNodeLabels: []string(spec.FirstNodeWithType(types.NodeTypeIt).Labels),
+ ParallelProcess: g.suite.config.ParallelProcess,
+ RunningInParallel: g.suite.isRunningInParallel(),
+ IsSerial: spec.Nodes.HasNodeMarkedSerial(),
+ IsInOrderedContainer: !spec.Nodes.FirstNodeMarkedOrdered().IsZero(),
+ MaxFlakeAttempts: spec.Nodes.GetMaxFlakeAttempts(),
+ MaxMustPassRepeatedly: spec.Nodes.GetMaxMustPassRepeatedly(),
+ }
+}
+
+func (g *group) evaluateSkipStatus(spec Spec) (types.SpecState, types.Failure) {
+ if spec.Nodes.HasNodeMarkedPending() {
+ return types.SpecStatePending, types.Failure{}
+ }
+ if spec.Skip {
+ return types.SpecStateSkipped, types.Failure{}
+ }
+ if g.suite.interruptHandler.Status().Interrupted() || g.suite.skipAll {
+ return types.SpecStateSkipped, types.Failure{}
+ }
+ if !g.suite.deadline.IsZero() && g.suite.deadline.Before(time.Now()) {
+ return types.SpecStateSkipped, types.Failure{}
+ }
+ if !g.succeeded && !g.continueOnFailure {
+ return types.SpecStateSkipped, g.suite.failureForLeafNodeWithMessage(spec.FirstNodeWithType(types.NodeTypeIt),
+ "Spec skipped because an earlier spec in an ordered container failed")
+ }
+ if g.failedInARunOnceBefore && g.continueOnFailure {
+ return types.SpecStateSkipped, g.suite.failureForLeafNodeWithMessage(spec.FirstNodeWithType(types.NodeTypeIt),
+ "Spec skipped because a BeforeAll node failed")
+ }
+ beforeOncePairs := g.runOncePairs[spec.SubjectID()].withType(types.NodeTypeBeforeAll | types.NodeTypeBeforeEach | types.NodeTypeJustBeforeEach)
+ for _, pair := range beforeOncePairs {
+ if g.runOnceTracker[pair].Is(types.SpecStateSkipped) {
+ return types.SpecStateSkipped, g.suite.failureForLeafNodeWithMessage(spec.FirstNodeWithType(types.NodeTypeIt),
+ fmt.Sprintf("Spec skipped because Skip() was called in %s", pair.nodeType))
+ }
+ }
+ if g.suite.config.DryRun {
+ return types.SpecStatePassed, types.Failure{}
+ }
+ return g.suite.currentSpecReport.State, g.suite.currentSpecReport.Failure
+}
+
+func (g *group) isLastSpecWithPair(specID uint, pair runOncePair) bool {
+ lastSpecID := uint(0)
+ for idx := range g.specs {
+ if g.specs[idx].Skip {
+ continue
+ }
+ sID := g.specs[idx].SubjectID()
+ if g.runOncePairs[sID].hasRunOncePair(pair) {
+ lastSpecID = sID
+ }
+ }
+ return lastSpecID == specID
+}
+
+func (g *group) attemptSpec(isFinalAttempt bool, spec Spec) bool {
+ failedInARunOnceBefore := false
+ pairs := g.runOncePairs[spec.SubjectID()]
+
+ nodes := spec.Nodes.WithType(types.NodeTypeBeforeAll)
+ nodes = append(nodes, spec.Nodes.WithType(types.NodeTypeBeforeEach)...).SortedByAscendingNestingLevel()
+ nodes = append(nodes, spec.Nodes.WithType(types.NodeTypeJustBeforeEach).SortedByAscendingNestingLevel()...)
+ nodes = append(nodes, spec.Nodes.FirstNodeWithType(types.NodeTypeIt))
+ terminatingNode, terminatingPair := Node{}, runOncePair{}
+
+ deadline := time.Time{}
+ if spec.SpecTimeout() > 0 {
+ deadline = time.Now().Add(spec.SpecTimeout())
+ }
+
+ for _, node := range nodes {
+ oncePair := pairs.runOncePairFor(node.ID)
+ if !oncePair.isZero() && g.runOnceTracker[oncePair].Is(types.SpecStatePassed) {
+ continue
+ }
+ g.suite.currentSpecReport.State, g.suite.currentSpecReport.Failure = g.suite.runNode(node, deadline, spec.Nodes.BestTextFor(node))
+ g.suite.currentSpecReport.RunTime = time.Since(g.suite.currentSpecReport.StartTime)
+ if !oncePair.isZero() {
+ g.runOnceTracker[oncePair] = g.suite.currentSpecReport.State
+ }
+ if g.suite.currentSpecReport.State != types.SpecStatePassed {
+ terminatingNode, terminatingPair = node, oncePair
+ failedInARunOnceBefore = !terminatingPair.isZero()
+ break
+ }
+ }
+
+ afterNodeWasRun := map[uint]bool{}
+ includeDeferCleanups := false
+ for {
+ nodes := spec.Nodes.WithType(types.NodeTypeAfterEach)
+ nodes = append(nodes, spec.Nodes.WithType(types.NodeTypeAfterAll)...).SortedByDescendingNestingLevel()
+ nodes = append(spec.Nodes.WithType(types.NodeTypeJustAfterEach).SortedByDescendingNestingLevel(), nodes...)
+ if !terminatingNode.IsZero() {
+ nodes = nodes.WithinNestingLevel(terminatingNode.NestingLevel)
+ }
+ if includeDeferCleanups {
+ nodes = append(nodes, g.suite.cleanupNodes.WithType(types.NodeTypeCleanupAfterEach).Reverse()...)
+ nodes = append(nodes, g.suite.cleanupNodes.WithType(types.NodeTypeCleanupAfterAll).Reverse()...)
+ }
+ nodes = nodes.Filter(func(node Node) bool {
+ if afterNodeWasRun[node.ID] {
+ //this node has already been run on this attempt, don't rerun it
+ return false
+ }
+ var pair runOncePair
+ switch node.NodeType {
+ case types.NodeTypeCleanupAfterEach, types.NodeTypeCleanupAfterAll:
+ // check if we were generated in an AfterNode that has already run
+ if afterNodeWasRun[node.NodeIDWhereCleanupWasGenerated] {
+ return true // we were, so we should definitely run this cleanup now
+ }
+ // looks like this cleanup nodes was generated by a before node or it.
+ // the run-once status of a cleanup node is governed by the run-once status of its generator
+ pair = pairs.runOncePairFor(node.NodeIDWhereCleanupWasGenerated)
+ default:
+ pair = pairs.runOncePairFor(node.ID)
+ }
+ if pair.isZero() {
+ // this node is not governed by any run-once policy, we should run it
+ return true
+ }
+ // it's our last chance to run if we're the last spec for our oncePair
+ isLastSpecWithPair := g.isLastSpecWithPair(spec.SubjectID(), pair)
+
+ switch g.suite.currentSpecReport.State {
+ case types.SpecStatePassed: //this attempt is passing...
+ return isLastSpecWithPair //...we should run-once if we'this is our last chance
+ case types.SpecStateSkipped: //the spec was skipped by the user...
+ if isLastSpecWithPair {
+ return true //...we're the last spec, so we should run the AfterNode
+ }
+ if !terminatingPair.isZero() && terminatingNode.NestingLevel == node.NestingLevel {
+ return true //...or, a run-once node at our nesting level was skipped which means this is our last chance to run
+ }
+ case types.SpecStateFailed, types.SpecStatePanicked, types.SpecStateTimedout: // the spec has failed...
+ if isFinalAttempt {
+ if g.continueOnFailure {
+ return isLastSpecWithPair || failedInARunOnceBefore //...we're configured to continue on failures - so we should only run if we're the last spec for this pair or if we failed in a runOnceBefore (which means we _are_ the last spec to run)
+ } else {
+ return true //...this was the last attempt and continueOnFailure is false therefore we are the last spec to run and so the AfterNode should run
+ }
+ }
+ if !terminatingPair.isZero() { // ...and it failed in a run-once. which will be running again
+ if node.NodeType.Is(types.NodeTypeCleanupAfterEach | types.NodeTypeCleanupAfterAll) {
+ return terminatingNode.ID == node.NodeIDWhereCleanupWasGenerated // we should run this node if we're a clean-up generated by it
+ } else {
+ return terminatingNode.NestingLevel == node.NestingLevel // ...or if we're at the same nesting level
+ }
+ }
+ case types.SpecStateInterrupted, types.SpecStateAborted: // ...we've been interrupted and/or aborted
+ return true //...that means the test run is over and we should clean up the stack. Run the AfterNode
+ }
+ return false
+ })
+
+ if len(nodes) == 0 && includeDeferCleanups {
+ break
+ }
+
+ for _, node := range nodes {
+ afterNodeWasRun[node.ID] = true
+ state, failure := g.suite.runNode(node, deadline, spec.Nodes.BestTextFor(node))
+ g.suite.currentSpecReport.RunTime = time.Since(g.suite.currentSpecReport.StartTime)
+ if g.suite.currentSpecReport.State == types.SpecStatePassed || state == types.SpecStateAborted {
+ g.suite.currentSpecReport.State = state
+ g.suite.currentSpecReport.Failure = failure
+ } else if state.Is(types.SpecStateFailureStates) {
+ g.suite.currentSpecReport.AdditionalFailures = append(g.suite.currentSpecReport.AdditionalFailures, types.AdditionalFailure{State: state, Failure: failure})
+ }
+ }
+ includeDeferCleanups = true
+ }
+
+ return failedInARunOnceBefore
+}
+
+func (g *group) run(specs Specs) {
+ g.specs = specs
+ g.continueOnFailure = specs[0].Nodes.FirstNodeMarkedOrdered().MarkedContinueOnFailure
+ for _, spec := range g.specs {
+ g.runOncePairs[spec.SubjectID()] = runOncePairsForSpec(spec)
+ }
+
+ for _, spec := range g.specs {
+ g.suite.selectiveLock.Lock()
+ g.suite.currentSpecReport = g.initialReportForSpec(spec)
+ g.suite.selectiveLock.Unlock()
+
+ g.suite.currentSpecReport.State, g.suite.currentSpecReport.Failure = g.evaluateSkipStatus(spec)
+ g.suite.reporter.WillRun(g.suite.currentSpecReport)
+ g.suite.reportEach(spec, types.NodeTypeReportBeforeEach)
+
+ skip := g.suite.config.DryRun || g.suite.currentSpecReport.State.Is(types.SpecStateFailureStates|types.SpecStateSkipped|types.SpecStatePending)
+
+ g.suite.currentSpecReport.StartTime = time.Now()
+ failedInARunOnceBefore := false
+ if !skip {
+ var maxAttempts = 1
+
+ if g.suite.config.MustPassRepeatedly > 0 {
+ maxAttempts = g.suite.config.MustPassRepeatedly
+ g.suite.currentSpecReport.MaxMustPassRepeatedly = maxAttempts
+ } else if g.suite.currentSpecReport.MaxMustPassRepeatedly > 0 {
+ maxAttempts = max(1, spec.MustPassRepeatedly())
+ } else if g.suite.config.FlakeAttempts > 0 {
+ maxAttempts = g.suite.config.FlakeAttempts
+ g.suite.currentSpecReport.MaxFlakeAttempts = maxAttempts
+ } else if g.suite.currentSpecReport.MaxFlakeAttempts > 0 {
+ maxAttempts = max(1, spec.FlakeAttempts())
+ }
+
+ for attempt := 0; attempt < maxAttempts; attempt++ {
+ g.suite.currentSpecReport.NumAttempts = attempt + 1
+ g.suite.writer.Truncate()
+ g.suite.outputInterceptor.StartInterceptingOutput()
+ if attempt > 0 {
+ if g.suite.currentSpecReport.MaxMustPassRepeatedly > 0 {
+ g.suite.handleSpecEvent(types.SpecEvent{SpecEventType: types.SpecEventSpecRepeat, Attempt: attempt})
+ }
+ if g.suite.currentSpecReport.MaxFlakeAttempts > 0 {
+ g.suite.handleSpecEvent(types.SpecEvent{SpecEventType: types.SpecEventSpecRetry, Attempt: attempt})
+ }
+ }
+
+ failedInARunOnceBefore = g.attemptSpec(attempt == maxAttempts-1, spec)
+
+ g.suite.currentSpecReport.EndTime = time.Now()
+ g.suite.currentSpecReport.RunTime = g.suite.currentSpecReport.EndTime.Sub(g.suite.currentSpecReport.StartTime)
+ g.suite.currentSpecReport.CapturedGinkgoWriterOutput += string(g.suite.writer.Bytes())
+ g.suite.currentSpecReport.CapturedStdOutErr += g.suite.outputInterceptor.StopInterceptingAndReturnOutput()
+
+ if g.suite.currentSpecReport.MaxMustPassRepeatedly > 0 {
+ if g.suite.currentSpecReport.State.Is(types.SpecStateFailureStates | types.SpecStateSkipped) {
+ break
+ }
+ }
+ if g.suite.currentSpecReport.MaxFlakeAttempts > 0 {
+ if g.suite.currentSpecReport.State.Is(types.SpecStatePassed | types.SpecStateSkipped | types.SpecStateAborted | types.SpecStateInterrupted) {
+ break
+ } else if attempt < maxAttempts-1 {
+ af := types.AdditionalFailure{State: g.suite.currentSpecReport.State, Failure: g.suite.currentSpecReport.Failure}
+ af.Failure.Message = fmt.Sprintf("Failure recorded during attempt %d:\n%s", attempt+1, af.Failure.Message)
+ g.suite.currentSpecReport.AdditionalFailures = append(g.suite.currentSpecReport.AdditionalFailures, af)
+ }
+ }
+ }
+ }
+
+ g.suite.reportEach(spec, types.NodeTypeReportAfterEach)
+ g.suite.processCurrentSpecReport()
+ if g.suite.currentSpecReport.State.Is(types.SpecStateFailureStates) {
+ g.succeeded = false
+ g.failedInARunOnceBefore = g.failedInARunOnceBefore || failedInARunOnceBefore
+ }
+ g.suite.selectiveLock.Lock()
+ g.suite.currentSpecReport = types.SpecReport{}
+ g.suite.selectiveLock.Unlock()
+ }
+}
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/internal/interrupt_handler/interrupt_handler.go b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/internal/interrupt_handler/interrupt_handler.go
new file mode 100644
index 00000000000..8ed86111f78
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/internal/interrupt_handler/interrupt_handler.go
@@ -0,0 +1,177 @@
+package interrupt_handler
+
+import (
+ "os"
+ "os/signal"
+ "sync"
+ "syscall"
+ "time"
+
+ "github.com/onsi/ginkgo/v2/internal/parallel_support"
+)
+
+var ABORT_POLLING_INTERVAL = 500 * time.Millisecond
+
+type InterruptCause uint
+
+const (
+ InterruptCauseInvalid InterruptCause = iota
+ InterruptCauseSignal
+ InterruptCauseAbortByOtherProcess
+)
+
+type InterruptLevel uint
+
+const (
+ InterruptLevelUninterrupted InterruptLevel = iota
+ InterruptLevelCleanupAndReport
+ InterruptLevelReportOnly
+ InterruptLevelBailOut
+)
+
+func (ic InterruptCause) String() string {
+ switch ic {
+ case InterruptCauseSignal:
+ return "Interrupted by User"
+ case InterruptCauseAbortByOtherProcess:
+ return "Interrupted by Other Ginkgo Process"
+ }
+ return "INVALID_INTERRUPT_CAUSE"
+}
+
+type InterruptStatus struct {
+ Channel chan interface{}
+ Level InterruptLevel
+ Cause InterruptCause
+}
+
+func (s InterruptStatus) Interrupted() bool {
+ return s.Level != InterruptLevelUninterrupted
+}
+
+func (s InterruptStatus) Message() string {
+ return s.Cause.String()
+}
+
+func (s InterruptStatus) ShouldIncludeProgressReport() bool {
+ return s.Cause != InterruptCauseAbortByOtherProcess
+}
+
+type InterruptHandlerInterface interface {
+ Status() InterruptStatus
+}
+
+type InterruptHandler struct {
+ c chan interface{}
+ lock *sync.Mutex
+ level InterruptLevel
+ cause InterruptCause
+ client parallel_support.Client
+ stop chan interface{}
+ signals []os.Signal
+ requestAbortCheck chan interface{}
+}
+
+func NewInterruptHandler(client parallel_support.Client, signals ...os.Signal) *InterruptHandler {
+ if len(signals) == 0 {
+ signals = []os.Signal{os.Interrupt, syscall.SIGTERM}
+ }
+ handler := &InterruptHandler{
+ c: make(chan interface{}),
+ lock: &sync.Mutex{},
+ stop: make(chan interface{}),
+ requestAbortCheck: make(chan interface{}),
+ client: client,
+ signals: signals,
+ }
+ handler.registerForInterrupts()
+ return handler
+}
+
+func (handler *InterruptHandler) Stop() {
+ close(handler.stop)
+}
+
+func (handler *InterruptHandler) registerForInterrupts() {
+ // os signal handling
+ signalChannel := make(chan os.Signal, 1)
+ signal.Notify(signalChannel, handler.signals...)
+
+ // cross-process abort handling
+ var abortChannel chan interface{}
+ if handler.client != nil {
+ abortChannel = make(chan interface{})
+ go func() {
+ pollTicker := time.NewTicker(ABORT_POLLING_INTERVAL)
+ for {
+ select {
+ case <-pollTicker.C:
+ if handler.client.ShouldAbort() {
+ close(abortChannel)
+ pollTicker.Stop()
+ return
+ }
+ case <-handler.requestAbortCheck:
+ if handler.client.ShouldAbort() {
+ close(abortChannel)
+ pollTicker.Stop()
+ return
+ }
+ case <-handler.stop:
+ pollTicker.Stop()
+ return
+ }
+ }
+ }()
+ }
+
+ go func(abortChannel chan interface{}) {
+ var interruptCause InterruptCause
+ for {
+ select {
+ case <-signalChannel:
+ interruptCause = InterruptCauseSignal
+ case <-abortChannel:
+ interruptCause = InterruptCauseAbortByOtherProcess
+ case <-handler.stop:
+ signal.Stop(signalChannel)
+ return
+ }
+ abortChannel = nil
+
+ handler.lock.Lock()
+ oldLevel := handler.level
+ handler.cause = interruptCause
+ if handler.level == InterruptLevelUninterrupted {
+ handler.level = InterruptLevelCleanupAndReport
+ } else if handler.level == InterruptLevelCleanupAndReport {
+ handler.level = InterruptLevelReportOnly
+ } else if handler.level == InterruptLevelReportOnly {
+ handler.level = InterruptLevelBailOut
+ }
+ if handler.level != oldLevel {
+ close(handler.c)
+ handler.c = make(chan interface{})
+ }
+ handler.lock.Unlock()
+ }
+ }(abortChannel)
+}
+
+func (handler *InterruptHandler) Status() InterruptStatus {
+ handler.lock.Lock()
+ status := InterruptStatus{
+ Level: handler.level,
+ Channel: handler.c,
+ Cause: handler.cause,
+ }
+ handler.lock.Unlock()
+
+ if handler.client != nil && handler.client.ShouldAbort() && !status.Interrupted() {
+ close(handler.requestAbortCheck)
+ <-status.Channel
+ return handler.Status()
+ }
+
+ return status
+}
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/internal/interrupt_handler/sigquit_swallower_unix.go b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/internal/interrupt_handler/sigquit_swallower_unix.go
new file mode 100644
index 00000000000..bf0de496dc3
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/internal/interrupt_handler/sigquit_swallower_unix.go
@@ -0,0 +1,15 @@
+//go:build freebsd || openbsd || netbsd || dragonfly || darwin || linux || solaris
+// +build freebsd openbsd netbsd dragonfly darwin linux solaris
+
+package interrupt_handler
+
+import (
+ "os"
+ "os/signal"
+ "syscall"
+)
+
+func SwallowSigQuit() {
+ c := make(chan os.Signal, 1024)
+ signal.Notify(c, syscall.SIGQUIT)
+}
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/internal/interrupt_handler/sigquit_swallower_windows.go b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/internal/interrupt_handler/sigquit_swallower_windows.go
new file mode 100644
index 00000000000..fcf8da8335f
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/internal/interrupt_handler/sigquit_swallower_windows.go
@@ -0,0 +1,8 @@
+//go:build windows
+// +build windows
+
+package interrupt_handler
+
+func SwallowSigQuit() {
+ //noop
+}
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/internal/node.go b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/internal/node.go
new file mode 100644
index 00000000000..6a15f19ae04
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/internal/node.go
@@ -0,0 +1,935 @@
+package internal
+
+import (
+ "context"
+ "fmt"
+ "reflect"
+ "sort"
+ "sync"
+ "time"
+
+ "github.com/onsi/ginkgo/v2/types"
+)
+
+var _global_node_id_counter = uint(0)
+var _global_id_mutex = &sync.Mutex{}
+
+func UniqueNodeID() uint {
+ // There's a reace in the internal integration tests if we don't make
+ // accessing _global_node_id_counter safe across goroutines.
+ _global_id_mutex.Lock()
+ defer _global_id_mutex.Unlock()
+ _global_node_id_counter += 1
+ return _global_node_id_counter
+}
+
+type Node struct {
+ ID uint
+ NodeType types.NodeType
+
+ Text string
+ Body func(SpecContext)
+ CodeLocation types.CodeLocation
+ NestingLevel int
+ HasContext bool
+
+ SynchronizedBeforeSuiteProc1Body func(SpecContext) []byte
+ SynchronizedBeforeSuiteProc1BodyHasContext bool
+ SynchronizedBeforeSuiteAllProcsBody func(SpecContext, []byte)
+ SynchronizedBeforeSuiteAllProcsBodyHasContext bool
+
+ SynchronizedAfterSuiteAllProcsBody func(SpecContext)
+ SynchronizedAfterSuiteAllProcsBodyHasContext bool
+ SynchronizedAfterSuiteProc1Body func(SpecContext)
+ SynchronizedAfterSuiteProc1BodyHasContext bool
+
+ ReportEachBody func(SpecContext, types.SpecReport)
+ ReportSuiteBody func(SpecContext, types.Report)
+
+ MarkedFocus bool
+ MarkedPending bool
+ MarkedSerial bool
+ MarkedOrdered bool
+ MarkedContinueOnFailure bool
+ MarkedOncePerOrdered bool
+ FlakeAttempts int
+ MustPassRepeatedly int
+ Labels Labels
+ PollProgressAfter time.Duration
+ PollProgressInterval time.Duration
+ NodeTimeout time.Duration
+ SpecTimeout time.Duration
+ GracePeriod time.Duration
+
+ NodeIDWhereCleanupWasGenerated uint
+}
+
+// Decoration Types
+type focusType bool
+type pendingType bool
+type serialType bool
+type orderedType bool
+type continueOnFailureType bool
+type honorsOrderedType bool
+type suppressProgressReporting bool
+
+const Focus = focusType(true)
+const Pending = pendingType(true)
+const Serial = serialType(true)
+const Ordered = orderedType(true)
+const ContinueOnFailure = continueOnFailureType(true)
+const OncePerOrdered = honorsOrderedType(true)
+const SuppressProgressReporting = suppressProgressReporting(true)
+
+type FlakeAttempts uint
+type MustPassRepeatedly uint
+type Offset uint
+type Done chan<- interface{} // Deprecated Done Channel for asynchronous testing
+type Labels []string
+type PollProgressInterval time.Duration
+type PollProgressAfter time.Duration
+type NodeTimeout time.Duration
+type SpecTimeout time.Duration
+type GracePeriod time.Duration
+
+func (l Labels) MatchesLabelFilter(query string) bool {
+ return types.MustParseLabelFilter(query)(l)
+}
+
+func UnionOfLabels(labels ...Labels) Labels {
+ out := Labels{}
+ seen := map[string]bool{}
+ for _, labelSet := range labels {
+ for _, label := range labelSet {
+ if !seen[label] {
+ seen[label] = true
+ out = append(out, label)
+ }
+ }
+ }
+ return out
+}
+
+func PartitionDecorations(args ...interface{}) ([]interface{}, []interface{}) {
+ decorations := []interface{}{}
+ remainingArgs := []interface{}{}
+ for _, arg := range args {
+ if isDecoration(arg) {
+ decorations = append(decorations, arg)
+ } else {
+ remainingArgs = append(remainingArgs, arg)
+ }
+ }
+ return decorations, remainingArgs
+}
+
+func isDecoration(arg interface{}) bool {
+ switch t := reflect.TypeOf(arg); {
+ case t == nil:
+ return false
+ case t == reflect.TypeOf(Offset(0)):
+ return true
+ case t == reflect.TypeOf(types.CodeLocation{}):
+ return true
+ case t == reflect.TypeOf(Focus):
+ return true
+ case t == reflect.TypeOf(Pending):
+ return true
+ case t == reflect.TypeOf(Serial):
+ return true
+ case t == reflect.TypeOf(Ordered):
+ return true
+ case t == reflect.TypeOf(ContinueOnFailure):
+ return true
+ case t == reflect.TypeOf(OncePerOrdered):
+ return true
+ case t == reflect.TypeOf(SuppressProgressReporting):
+ return true
+ case t == reflect.TypeOf(FlakeAttempts(0)):
+ return true
+ case t == reflect.TypeOf(MustPassRepeatedly(0)):
+ return true
+ case t == reflect.TypeOf(Labels{}):
+ return true
+ case t == reflect.TypeOf(PollProgressInterval(0)):
+ return true
+ case t == reflect.TypeOf(PollProgressAfter(0)):
+ return true
+ case t == reflect.TypeOf(NodeTimeout(0)):
+ return true
+ case t == reflect.TypeOf(SpecTimeout(0)):
+ return true
+ case t == reflect.TypeOf(GracePeriod(0)):
+ return true
+ case t.Kind() == reflect.Slice && isSliceOfDecorations(arg):
+ return true
+ default:
+ return false
+ }
+}
+
+func isSliceOfDecorations(slice interface{}) bool {
+ vSlice := reflect.ValueOf(slice)
+ if vSlice.Len() == 0 {
+ return false
+ }
+ for i := 0; i < vSlice.Len(); i++ {
+ if !isDecoration(vSlice.Index(i).Interface()) {
+ return false
+ }
+ }
+ return true
+}
+
+var contextType = reflect.TypeOf(new(context.Context)).Elem()
+var specContextType = reflect.TypeOf(new(SpecContext)).Elem()
+
+func NewNode(deprecationTracker *types.DeprecationTracker, nodeType types.NodeType, text string, args ...interface{}) (Node, []error) {
+ baseOffset := 2
+ node := Node{
+ ID: UniqueNodeID(),
+ NodeType: nodeType,
+ Text: text,
+ Labels: Labels{},
+ CodeLocation: types.NewCodeLocation(baseOffset),
+ NestingLevel: -1,
+ PollProgressAfter: -1,
+ PollProgressInterval: -1,
+ GracePeriod: -1,
+ }
+
+ errors := []error{}
+ appendError := func(err error) {
+ if err != nil {
+ errors = append(errors, err)
+ }
+ }
+
+ args = unrollInterfaceSlice(args)
+
+ remainingArgs := []interface{}{}
+ // First get the CodeLocation up-to-date
+ for _, arg := range args {
+ switch v := arg.(type) {
+ case Offset:
+ node.CodeLocation = types.NewCodeLocation(baseOffset + int(v))
+ case types.CodeLocation:
+ node.CodeLocation = v
+ default:
+ remainingArgs = append(remainingArgs, arg)
+ }
+ }
+
+ labelsSeen := map[string]bool{}
+ trackedFunctionError := false
+ args = remainingArgs
+ remainingArgs = []interface{}{}
+ // now process the rest of the args
+ for _, arg := range args {
+ switch t := reflect.TypeOf(arg); {
+ case t == reflect.TypeOf(float64(0)):
+ break // ignore deprecated timeouts
+ case t == reflect.TypeOf(Focus):
+ node.MarkedFocus = bool(arg.(focusType))
+ if !nodeType.Is(types.NodeTypesForContainerAndIt) {
+ appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "Focus"))
+ }
+ case t == reflect.TypeOf(Pending):
+ node.MarkedPending = bool(arg.(pendingType))
+ if !nodeType.Is(types.NodeTypesForContainerAndIt) {
+ appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "Pending"))
+ }
+ case t == reflect.TypeOf(Serial):
+ node.MarkedSerial = bool(arg.(serialType))
+ if !nodeType.Is(types.NodeTypesForContainerAndIt) {
+ appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "Serial"))
+ }
+ case t == reflect.TypeOf(Ordered):
+ node.MarkedOrdered = bool(arg.(orderedType))
+ if !nodeType.Is(types.NodeTypeContainer) {
+ appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "Ordered"))
+ }
+ case t == reflect.TypeOf(ContinueOnFailure):
+ node.MarkedContinueOnFailure = bool(arg.(continueOnFailureType))
+ if !nodeType.Is(types.NodeTypeContainer) {
+ appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "ContinueOnFailure"))
+ }
+ case t == reflect.TypeOf(OncePerOrdered):
+ node.MarkedOncePerOrdered = bool(arg.(honorsOrderedType))
+ if !nodeType.Is(types.NodeTypeBeforeEach | types.NodeTypeJustBeforeEach | types.NodeTypeAfterEach | types.NodeTypeJustAfterEach) {
+ appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "OncePerOrdered"))
+ }
+ case t == reflect.TypeOf(SuppressProgressReporting):
+ deprecationTracker.TrackDeprecation(types.Deprecations.SuppressProgressReporting())
+ case t == reflect.TypeOf(FlakeAttempts(0)):
+ node.FlakeAttempts = int(arg.(FlakeAttempts))
+ if !nodeType.Is(types.NodeTypesForContainerAndIt) {
+ appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "FlakeAttempts"))
+ }
+ case t == reflect.TypeOf(MustPassRepeatedly(0)):
+ node.MustPassRepeatedly = int(arg.(MustPassRepeatedly))
+ if !nodeType.Is(types.NodeTypesForContainerAndIt) {
+ appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "MustPassRepeatedly"))
+ }
+ case t == reflect.TypeOf(PollProgressAfter(0)):
+ node.PollProgressAfter = time.Duration(arg.(PollProgressAfter))
+ if nodeType.Is(types.NodeTypeContainer) {
+ appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "PollProgressAfter"))
+ }
+ case t == reflect.TypeOf(PollProgressInterval(0)):
+ node.PollProgressInterval = time.Duration(arg.(PollProgressInterval))
+ if nodeType.Is(types.NodeTypeContainer) {
+ appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "PollProgressInterval"))
+ }
+ case t == reflect.TypeOf(NodeTimeout(0)):
+ node.NodeTimeout = time.Duration(arg.(NodeTimeout))
+ if nodeType.Is(types.NodeTypeContainer) {
+ appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "NodeTimeout"))
+ }
+ case t == reflect.TypeOf(SpecTimeout(0)):
+ node.SpecTimeout = time.Duration(arg.(SpecTimeout))
+ if !nodeType.Is(types.NodeTypeIt) {
+ appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "SpecTimeout"))
+ }
+ case t == reflect.TypeOf(GracePeriod(0)):
+ node.GracePeriod = time.Duration(arg.(GracePeriod))
+ if nodeType.Is(types.NodeTypeContainer) {
+ appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "GracePeriod"))
+ }
+ case t == reflect.TypeOf(Labels{}):
+ if !nodeType.Is(types.NodeTypesForContainerAndIt) {
+ appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "Label"))
+ }
+ for _, label := range arg.(Labels) {
+ if !labelsSeen[label] {
+ labelsSeen[label] = true
+ label, err := types.ValidateAndCleanupLabel(label, node.CodeLocation)
+ node.Labels = append(node.Labels, label)
+ appendError(err)
+ }
+ }
+ case t.Kind() == reflect.Func:
+ if nodeType.Is(types.NodeTypeContainer) {
+ if node.Body != nil {
+ appendError(types.GinkgoErrors.MultipleBodyFunctions(node.CodeLocation, nodeType))
+ trackedFunctionError = true
+ break
+ }
+ if t.NumOut() > 0 || t.NumIn() > 0 {
+ appendError(types.GinkgoErrors.InvalidBodyTypeForContainer(t, node.CodeLocation, nodeType))
+ trackedFunctionError = true
+ break
+ }
+ body := arg.(func())
+ node.Body = func(SpecContext) { body() }
+ } else if nodeType.Is(types.NodeTypeReportBeforeEach | types.NodeTypeReportAfterEach) {
+ if node.ReportEachBody == nil {
+ if fn, ok := arg.(func(types.SpecReport)); ok {
+ node.ReportEachBody = func(_ SpecContext, r types.SpecReport) { fn(r) }
+ } else {
+ node.ReportEachBody = arg.(func(SpecContext, types.SpecReport))
+ node.HasContext = true
+ }
+ } else {
+ appendError(types.GinkgoErrors.MultipleBodyFunctions(node.CodeLocation, nodeType))
+ trackedFunctionError = true
+ break
+ }
+ } else if nodeType.Is(types.NodeTypeReportBeforeSuite | types.NodeTypeReportAfterSuite) {
+ if node.ReportSuiteBody == nil {
+ if fn, ok := arg.(func(types.Report)); ok {
+ node.ReportSuiteBody = func(_ SpecContext, r types.Report) { fn(r) }
+ } else {
+ node.ReportSuiteBody = arg.(func(SpecContext, types.Report))
+ node.HasContext = true
+ }
+ } else {
+ appendError(types.GinkgoErrors.MultipleBodyFunctions(node.CodeLocation, nodeType))
+ trackedFunctionError = true
+ break
+ }
+ } else if nodeType.Is(types.NodeTypeSynchronizedBeforeSuite) {
+ if node.SynchronizedBeforeSuiteProc1Body != nil && node.SynchronizedBeforeSuiteAllProcsBody != nil {
+ appendError(types.GinkgoErrors.MultipleBodyFunctions(node.CodeLocation, nodeType))
+ trackedFunctionError = true
+ break
+ }
+ if node.SynchronizedBeforeSuiteProc1Body == nil {
+ body, hasContext := extractSynchronizedBeforeSuiteProc1Body(arg)
+ if body == nil {
+ appendError(types.GinkgoErrors.InvalidBodyTypeForSynchronizedBeforeSuiteProc1(t, node.CodeLocation))
+ trackedFunctionError = true
+ }
+ node.SynchronizedBeforeSuiteProc1Body, node.SynchronizedBeforeSuiteProc1BodyHasContext = body, hasContext
+ } else if node.SynchronizedBeforeSuiteAllProcsBody == nil {
+ body, hasContext := extractSynchronizedBeforeSuiteAllProcsBody(arg)
+ if body == nil {
+ appendError(types.GinkgoErrors.InvalidBodyTypeForSynchronizedBeforeSuiteAllProcs(t, node.CodeLocation))
+ trackedFunctionError = true
+ }
+ node.SynchronizedBeforeSuiteAllProcsBody, node.SynchronizedBeforeSuiteAllProcsBodyHasContext = body, hasContext
+ }
+ } else if nodeType.Is(types.NodeTypeSynchronizedAfterSuite) {
+ if node.SynchronizedAfterSuiteAllProcsBody != nil && node.SynchronizedAfterSuiteProc1Body != nil {
+ appendError(types.GinkgoErrors.MultipleBodyFunctions(node.CodeLocation, nodeType))
+ trackedFunctionError = true
+ break
+ }
+ body, hasContext := extractBodyFunction(deprecationTracker, node.CodeLocation, arg)
+ if body == nil {
+ appendError(types.GinkgoErrors.InvalidBodyType(t, node.CodeLocation, nodeType))
+ trackedFunctionError = true
+ break
+ }
+ if node.SynchronizedAfterSuiteAllProcsBody == nil {
+ node.SynchronizedAfterSuiteAllProcsBody, node.SynchronizedAfterSuiteAllProcsBodyHasContext = body, hasContext
+ } else if node.SynchronizedAfterSuiteProc1Body == nil {
+ node.SynchronizedAfterSuiteProc1Body, node.SynchronizedAfterSuiteProc1BodyHasContext = body, hasContext
+ }
+ } else {
+ if node.Body != nil {
+ appendError(types.GinkgoErrors.MultipleBodyFunctions(node.CodeLocation, nodeType))
+ trackedFunctionError = true
+ break
+ }
+ node.Body, node.HasContext = extractBodyFunction(deprecationTracker, node.CodeLocation, arg)
+ if node.Body == nil {
+ appendError(types.GinkgoErrors.InvalidBodyType(t, node.CodeLocation, nodeType))
+ trackedFunctionError = true
+ break
+ }
+ }
+ default:
+ remainingArgs = append(remainingArgs, arg)
+ }
+ }
+
+ // validations
+ if node.MarkedPending && node.MarkedFocus {
+ appendError(types.GinkgoErrors.InvalidDeclarationOfFocusedAndPending(node.CodeLocation, nodeType))
+ }
+
+ if node.MarkedContinueOnFailure && !node.MarkedOrdered {
+ appendError(types.GinkgoErrors.InvalidContinueOnFailureDecoration(node.CodeLocation))
+ }
+
+ hasContext := node.HasContext || node.SynchronizedAfterSuiteProc1BodyHasContext || node.SynchronizedAfterSuiteAllProcsBodyHasContext || node.SynchronizedBeforeSuiteProc1BodyHasContext || node.SynchronizedBeforeSuiteAllProcsBodyHasContext
+
+ if !hasContext && (node.NodeTimeout > 0 || node.SpecTimeout > 0 || node.GracePeriod > 0) && len(errors) == 0 {
+ appendError(types.GinkgoErrors.InvalidTimeoutOrGracePeriodForNonContextNode(node.CodeLocation, nodeType))
+ }
+
+ if !node.NodeType.Is(types.NodeTypeReportBeforeEach|types.NodeTypeReportAfterEach|types.NodeTypeSynchronizedBeforeSuite|types.NodeTypeSynchronizedAfterSuite|types.NodeTypeReportBeforeSuite|types.NodeTypeReportAfterSuite) && node.Body == nil && !node.MarkedPending && !trackedFunctionError {
+ appendError(types.GinkgoErrors.MissingBodyFunction(node.CodeLocation, nodeType))
+ }
+
+ if node.NodeType.Is(types.NodeTypeSynchronizedBeforeSuite) && !trackedFunctionError && (node.SynchronizedBeforeSuiteProc1Body == nil || node.SynchronizedBeforeSuiteAllProcsBody == nil) {
+ appendError(types.GinkgoErrors.MissingBodyFunction(node.CodeLocation, nodeType))
+ }
+
+ if node.NodeType.Is(types.NodeTypeSynchronizedAfterSuite) && !trackedFunctionError && (node.SynchronizedAfterSuiteProc1Body == nil || node.SynchronizedAfterSuiteAllProcsBody == nil) {
+ appendError(types.GinkgoErrors.MissingBodyFunction(node.CodeLocation, nodeType))
+ }
+
+ for _, arg := range remainingArgs {
+ appendError(types.GinkgoErrors.UnknownDecorator(node.CodeLocation, nodeType, arg))
+ }
+
+ if node.FlakeAttempts > 0 && node.MustPassRepeatedly > 0 {
+ appendError(types.GinkgoErrors.InvalidDeclarationOfFlakeAttemptsAndMustPassRepeatedly(node.CodeLocation, nodeType))
+ }
+
+ if len(errors) > 0 {
+ return Node{}, errors
+ }
+
+ return node, errors
+}
+
+var doneType = reflect.TypeOf(make(Done))
+
+func extractBodyFunction(deprecationTracker *types.DeprecationTracker, cl types.CodeLocation, arg interface{}) (func(SpecContext), bool) {
+ t := reflect.TypeOf(arg)
+ if t.NumOut() > 0 || t.NumIn() > 1 {
+ return nil, false
+ }
+ if t.NumIn() == 1 {
+ if t.In(0) == doneType {
+ deprecationTracker.TrackDeprecation(types.Deprecations.Async(), cl)
+ deprecatedAsyncBody := arg.(func(Done))
+ return func(SpecContext) { deprecatedAsyncBody(make(Done)) }, false
+ } else if t.In(0).Implements(specContextType) {
+ return arg.(func(SpecContext)), true
+ } else if t.In(0).Implements(contextType) {
+ body := arg.(func(context.Context))
+ return func(c SpecContext) { body(c) }, true
+ }
+
+ return nil, false
+ }
+
+ body := arg.(func())
+ return func(SpecContext) { body() }, false
+}
+
+var byteType = reflect.TypeOf([]byte{})
+
+func extractSynchronizedBeforeSuiteProc1Body(arg interface{}) (func(SpecContext) []byte, bool) {
+ t := reflect.TypeOf(arg)
+ v := reflect.ValueOf(arg)
+
+ if t.NumOut() > 1 || t.NumIn() > 1 {
+ return nil, false
+ } else if t.NumOut() == 1 && t.Out(0) != byteType {
+ return nil, false
+ } else if t.NumIn() == 1 && !t.In(0).Implements(contextType) {
+ return nil, false
+ }
+ hasContext := t.NumIn() == 1
+
+ return func(c SpecContext) []byte {
+ var out []reflect.Value
+ if hasContext {
+ out = v.Call([]reflect.Value{reflect.ValueOf(c)})
+ } else {
+ out = v.Call([]reflect.Value{})
+ }
+ if len(out) == 1 {
+ return (out[0].Interface()).([]byte)
+ } else {
+ return []byte{}
+ }
+ }, hasContext
+}
+
+func extractSynchronizedBeforeSuiteAllProcsBody(arg interface{}) (func(SpecContext, []byte), bool) {
+ t := reflect.TypeOf(arg)
+ v := reflect.ValueOf(arg)
+ hasContext, hasByte := false, false
+
+ if t.NumOut() > 0 || t.NumIn() > 2 {
+ return nil, false
+ } else if t.NumIn() == 2 && t.In(0).Implements(contextType) && t.In(1) == byteType {
+ hasContext, hasByte = true, true
+ } else if t.NumIn() == 1 && t.In(0).Implements(contextType) {
+ hasContext = true
+ } else if t.NumIn() == 1 && t.In(0) == byteType {
+ hasByte = true
+ } else if t.NumIn() != 0 {
+ return nil, false
+ }
+
+ return func(c SpecContext, b []byte) {
+ in := []reflect.Value{}
+ if hasContext {
+ in = append(in, reflect.ValueOf(c))
+ }
+ if hasByte {
+ in = append(in, reflect.ValueOf(b))
+ }
+ v.Call(in)
+ }, hasContext
+}
+
+var errInterface = reflect.TypeOf((*error)(nil)).Elem()
+
+func NewCleanupNode(deprecationTracker *types.DeprecationTracker, fail func(string, types.CodeLocation), args ...interface{}) (Node, []error) {
+ decorations, remainingArgs := PartitionDecorations(args...)
+ baseOffset := 2
+ cl := types.NewCodeLocation(baseOffset)
+ finalArgs := []interface{}{}
+ for _, arg := range decorations {
+ switch t := reflect.TypeOf(arg); {
+ case t == reflect.TypeOf(Offset(0)):
+ cl = types.NewCodeLocation(baseOffset + int(arg.(Offset)))
+ case t == reflect.TypeOf(types.CodeLocation{}):
+ cl = arg.(types.CodeLocation)
+ default:
+ finalArgs = append(finalArgs, arg)
+ }
+ }
+ finalArgs = append(finalArgs, cl)
+
+ if len(remainingArgs) == 0 {
+ return Node{}, []error{types.GinkgoErrors.DeferCleanupInvalidFunction(cl)}
+ }
+
+ callback := reflect.ValueOf(remainingArgs[0])
+ if !(callback.Kind() == reflect.Func) {
+ return Node{}, []error{types.GinkgoErrors.DeferCleanupInvalidFunction(cl)}
+ }
+
+ callArgs := []reflect.Value{}
+ for _, arg := range remainingArgs[1:] {
+ callArgs = append(callArgs, reflect.ValueOf(arg))
+ }
+
+ hasContext := false
+ t := callback.Type()
+ if t.NumIn() > 0 {
+ if t.In(0).Implements(specContextType) {
+ hasContext = true
+ } else if t.In(0).Implements(contextType) && (len(callArgs) == 0 || !callArgs[0].Type().Implements(contextType)) {
+ hasContext = true
+ }
+ }
+
+ handleFailure := func(out []reflect.Value) {
+ if len(out) == 0 {
+ return
+ }
+ last := out[len(out)-1]
+ if last.Type().Implements(errInterface) && !last.IsNil() {
+ fail(fmt.Sprintf("DeferCleanup callback returned error: %v", last), cl)
+ }
+ }
+
+ if hasContext {
+ finalArgs = append(finalArgs, func(c SpecContext) {
+ out := callback.Call(append([]reflect.Value{reflect.ValueOf(c)}, callArgs...))
+ handleFailure(out)
+ })
+ } else {
+ finalArgs = append(finalArgs, func() {
+ out := callback.Call(callArgs)
+ handleFailure(out)
+ })
+ }
+
+ return NewNode(deprecationTracker, types.NodeTypeCleanupInvalid, "", finalArgs...)
+}
+
+func (n Node) IsZero() bool {
+ return n.ID == 0
+}
+
+/* Nodes */
+type Nodes []Node
+
+func (n Nodes) Clone() Nodes {
+ nodes := make(Nodes, len(n))
+ copy(nodes, n)
+ return nodes
+}
+
+func (n Nodes) CopyAppend(nodes ...Node) Nodes {
+ numN := len(n)
+ out := make(Nodes, numN+len(nodes))
+ copy(out, n)
+ for j, node := range nodes {
+ out[numN+j] = node
+ }
+ return out
+}
+
+func (n Nodes) SplitAround(pivot Node) (Nodes, Nodes) {
+ pivotIdx := len(n)
+ for i := range n {
+ if n[i].ID == pivot.ID {
+ pivotIdx = i
+ break
+ }
+ }
+ left := n[:pivotIdx]
+ right := Nodes{}
+ if pivotIdx+1 < len(n) {
+ right = n[pivotIdx+1:]
+ }
+
+ return left, right
+}
+
+func (n Nodes) FirstNodeWithType(nodeTypes types.NodeType) Node {
+ for i := range n {
+ if n[i].NodeType.Is(nodeTypes) {
+ return n[i]
+ }
+ }
+ return Node{}
+}
+
+func (n Nodes) WithType(nodeTypes types.NodeType) Nodes {
+ count := 0
+ for i := range n {
+ if n[i].NodeType.Is(nodeTypes) {
+ count++
+ }
+ }
+
+ out, j := make(Nodes, count), 0
+ for i := range n {
+ if n[i].NodeType.Is(nodeTypes) {
+ out[j] = n[i]
+ j++
+ }
+ }
+ return out
+}
+
+func (n Nodes) WithoutType(nodeTypes types.NodeType) Nodes {
+ count := 0
+ for i := range n {
+ if !n[i].NodeType.Is(nodeTypes) {
+ count++
+ }
+ }
+
+ out, j := make(Nodes, count), 0
+ for i := range n {
+ if !n[i].NodeType.Is(nodeTypes) {
+ out[j] = n[i]
+ j++
+ }
+ }
+ return out
+}
+
+func (n Nodes) WithoutNode(nodeToExclude Node) Nodes {
+ idxToExclude := len(n)
+ for i := range n {
+ if n[i].ID == nodeToExclude.ID {
+ idxToExclude = i
+ break
+ }
+ }
+ if idxToExclude == len(n) {
+ return n
+ }
+ out, j := make(Nodes, len(n)-1), 0
+ for i := range n {
+ if i == idxToExclude {
+ continue
+ }
+ out[j] = n[i]
+ j++
+ }
+ return out
+}
+
+func (n Nodes) Filter(filter func(Node) bool) Nodes {
+ trufa, count := make([]bool, len(n)), 0
+ for i := range n {
+ if filter(n[i]) {
+ trufa[i] = true
+ count += 1
+ }
+ }
+ out, j := make(Nodes, count), 0
+ for i := range n {
+ if trufa[i] {
+ out[j] = n[i]
+ j++
+ }
+ }
+ return out
+}
+
+func (n Nodes) FirstSatisfying(filter func(Node) bool) Node {
+ for i := range n {
+ if filter(n[i]) {
+ return n[i]
+ }
+ }
+ return Node{}
+}
+
+func (n Nodes) WithinNestingLevel(deepestNestingLevel int) Nodes {
+ count := 0
+ for i := range n {
+ if n[i].NestingLevel <= deepestNestingLevel {
+ count++
+ }
+ }
+ out, j := make(Nodes, count), 0
+ for i := range n {
+ if n[i].NestingLevel <= deepestNestingLevel {
+ out[j] = n[i]
+ j++
+ }
+ }
+ return out
+}
+
+func (n Nodes) SortedByDescendingNestingLevel() Nodes {
+ out := make(Nodes, len(n))
+ copy(out, n)
+ sort.SliceStable(out, func(i int, j int) bool {
+ return out[i].NestingLevel > out[j].NestingLevel
+ })
+
+ return out
+}
+
+func (n Nodes) SortedByAscendingNestingLevel() Nodes {
+ out := make(Nodes, len(n))
+ copy(out, n)
+ sort.SliceStable(out, func(i int, j int) bool {
+ return out[i].NestingLevel < out[j].NestingLevel
+ })
+
+ return out
+}
+
+func (n Nodes) FirstWithNestingLevel(level int) Node {
+ for i := range n {
+ if n[i].NestingLevel == level {
+ return n[i]
+ }
+ }
+ return Node{}
+}
+
+func (n Nodes) Reverse() Nodes {
+ out := make(Nodes, len(n))
+ for i := range n {
+ out[len(n)-1-i] = n[i]
+ }
+ return out
+}
+
+func (n Nodes) Texts() []string {
+ out := make([]string, len(n))
+ for i := range n {
+ out[i] = n[i].Text
+ }
+ return out
+}
+
+func (n Nodes) Labels() [][]string {
+ out := make([][]string, len(n))
+ for i := range n {
+ if n[i].Labels == nil {
+ out[i] = []string{}
+ } else {
+ out[i] = []string(n[i].Labels)
+ }
+ }
+ return out
+}
+
+func (n Nodes) UnionOfLabels() []string {
+ out := []string{}
+ seen := map[string]bool{}
+ for i := range n {
+ for _, label := range n[i].Labels {
+ if !seen[label] {
+ seen[label] = true
+ out = append(out, label)
+ }
+ }
+ }
+ return out
+}
+
+func (n Nodes) CodeLocations() []types.CodeLocation {
+ out := make([]types.CodeLocation, len(n))
+ for i := range n {
+ out[i] = n[i].CodeLocation
+ }
+ return out
+}
+
+func (n Nodes) BestTextFor(node Node) string {
+ if node.Text != "" {
+ return node.Text
+ }
+ parentNestingLevel := node.NestingLevel - 1
+ for i := range n {
+ if n[i].Text != "" && n[i].NestingLevel == parentNestingLevel {
+ return n[i].Text
+ }
+ }
+
+ return ""
+}
+
+func (n Nodes) ContainsNodeID(id uint) bool {
+ for i := range n {
+ if n[i].ID == id {
+ return true
+ }
+ }
+ return false
+}
+
+func (n Nodes) HasNodeMarkedPending() bool {
+ for i := range n {
+ if n[i].MarkedPending {
+ return true
+ }
+ }
+ return false
+}
+
+func (n Nodes) HasNodeMarkedFocus() bool {
+ for i := range n {
+ if n[i].MarkedFocus {
+ return true
+ }
+ }
+ return false
+}
+
+func (n Nodes) HasNodeMarkedSerial() bool {
+ for i := range n {
+ if n[i].MarkedSerial {
+ return true
+ }
+ }
+ return false
+}
+
+func (n Nodes) FirstNodeMarkedOrdered() Node {
+ for i := range n {
+ if n[i].MarkedOrdered {
+ return n[i]
+ }
+ }
+ return Node{}
+}
+
+func (n Nodes) IndexOfFirstNodeMarkedOrdered() int {
+ for i := range n {
+ if n[i].MarkedOrdered {
+ return i
+ }
+ }
+ return -1
+}
+
+func (n Nodes) GetMaxFlakeAttempts() int {
+ maxFlakeAttempts := 0
+ for i := range n {
+ if n[i].FlakeAttempts > 0 {
+ maxFlakeAttempts = n[i].FlakeAttempts
+ }
+ }
+ return maxFlakeAttempts
+}
+
+func (n Nodes) GetMaxMustPassRepeatedly() int {
+ maxMustPassRepeatedly := 0
+ for i := range n {
+ if n[i].MustPassRepeatedly > 0 {
+ maxMustPassRepeatedly = n[i].MustPassRepeatedly
+ }
+ }
+ return maxMustPassRepeatedly
+}
+
+func unrollInterfaceSlice(args interface{}) []interface{} {
+ v := reflect.ValueOf(args)
+ if v.Kind() != reflect.Slice {
+ return []interface{}{args}
+ }
+ out := []interface{}{}
+ for i := 0; i < v.Len(); i++ {
+ el := reflect.ValueOf(v.Index(i).Interface())
+ if el.Kind() == reflect.Slice && el.Type() != reflect.TypeOf(Labels{}) {
+ out = append(out, unrollInterfaceSlice(el.Interface())...)
+ } else {
+ out = append(out, v.Index(i).Interface())
+ }
+ }
+ return out
+}
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/internal/ordering.go b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/internal/ordering.go
new file mode 100644
index 00000000000..84eea0a59e6
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/internal/ordering.go
@@ -0,0 +1,171 @@
+package internal
+
+import (
+ "math/rand"
+ "sort"
+
+ "github.com/onsi/ginkgo/v2/types"
+)
+
+type SortableSpecs struct {
+ Specs Specs
+ Indexes []int
+}
+
+func NewSortableSpecs(specs Specs) *SortableSpecs {
+ indexes := make([]int, len(specs))
+ for i := range specs {
+ indexes[i] = i
+ }
+ return &SortableSpecs{
+ Specs: specs,
+ Indexes: indexes,
+ }
+}
+func (s *SortableSpecs) Len() int { return len(s.Indexes) }
+func (s *SortableSpecs) Swap(i, j int) { s.Indexes[i], s.Indexes[j] = s.Indexes[j], s.Indexes[i] }
+func (s *SortableSpecs) Less(i, j int) bool {
+ a, b := s.Specs[s.Indexes[i]], s.Specs[s.Indexes[j]]
+
+ aNodes, bNodes := a.Nodes.WithType(types.NodeTypesForContainerAndIt), b.Nodes.WithType(types.NodeTypesForContainerAndIt)
+
+ firstOrderedAIdx, firstOrderedBIdx := aNodes.IndexOfFirstNodeMarkedOrdered(), bNodes.IndexOfFirstNodeMarkedOrdered()
+ if firstOrderedAIdx > -1 && firstOrderedBIdx > -1 && aNodes[firstOrderedAIdx].ID == bNodes[firstOrderedBIdx].ID {
+ // strictly preserve order within an ordered containers. ID will track this as IDs are generated monotonically
+ return aNodes.FirstNodeWithType(types.NodeTypeIt).ID < bNodes.FirstNodeWithType(types.NodeTypeIt).ID
+ }
+
+ // if either spec is in an ordered container - only use the nodes up to the outermost ordered container
+ if firstOrderedAIdx > -1 {
+ aNodes = aNodes[:firstOrderedAIdx+1]
+ }
+ if firstOrderedBIdx > -1 {
+ bNodes = bNodes[:firstOrderedBIdx+1]
+ }
+
+ for i := 0; i < len(aNodes) && i < len(bNodes); i++ {
+ aCL, bCL := aNodes[i].CodeLocation, bNodes[i].CodeLocation
+ if aCL.FileName != bCL.FileName {
+ return aCL.FileName < bCL.FileName
+ }
+ if aCL.LineNumber != bCL.LineNumber {
+ return aCL.LineNumber < bCL.LineNumber
+ }
+ }
+ // either everything is equal or we have different lengths of CLs
+ if len(aNodes) != len(bNodes) {
+ return len(aNodes) < len(bNodes)
+ }
+ // ok, now we are sure everything was equal. so we use the spec text to break ties
+ for i := 0; i < len(aNodes); i++ {
+ if aNodes[i].Text != bNodes[i].Text {
+ return aNodes[i].Text < bNodes[i].Text
+ }
+ }
+ // ok, all those texts were equal. we'll use the ID of the most deeply nested node as a last resort
+ return aNodes[len(aNodes)-1].ID < bNodes[len(bNodes)-1].ID
+}
+
+type GroupedSpecIndices []SpecIndices
+type SpecIndices []int
+
+func OrderSpecs(specs Specs, suiteConfig types.SuiteConfig) (GroupedSpecIndices, GroupedSpecIndices) {
+ /*
+ Ginkgo has sophisticated support for randomizing specs. Specs are guaranteed to have the same
+ order for a given seed across test runs.
+
+ By default only top-level containers and specs are shuffled - this makes for a more intuitive debugging
+ experience - specs within a given container run in the order they appear in the file.
+
+ Developers can set -randomizeAllSpecs to shuffle _all_ specs.
+
+ In addition, spec containers can be marked as Ordered. Specs within an Ordered container are never shuffled.
+
+ Finally, specs and spec containers can be marked as Serial. When running in parallel, serial specs run on Process #1 _after_ all other processes have finished.
+ */
+
+ // Seed a new random source based on thee configured random seed.
+ r := rand.New(rand.NewSource(suiteConfig.RandomSeed))
+
+ // first, we sort the entire suite to ensure a deterministic order. the sort is performed by filename, then line number, and then spec text. this ensures every parallel process has the exact same spec order and is only necessary to cover the edge case where the user iterates over a map to generate specs.
+ sortableSpecs := NewSortableSpecs(specs)
+ sort.Sort(sortableSpecs)
+
+ // then we break things into execution groups
+ // a group represents a single unit of execution and is a collection of SpecIndices
+ // usually a group is just a single spec, however ordered containers must be preserved as a single group
+ executionGroupIDs := []uint{}
+ executionGroups := map[uint]SpecIndices{}
+ for _, idx := range sortableSpecs.Indexes {
+ spec := specs[idx]
+ groupNode := spec.Nodes.FirstNodeMarkedOrdered()
+ if groupNode.IsZero() {
+ groupNode = spec.Nodes.FirstNodeWithType(types.NodeTypeIt)
+ }
+ executionGroups[groupNode.ID] = append(executionGroups[groupNode.ID], idx)
+ if len(executionGroups[groupNode.ID]) == 1 {
+ executionGroupIDs = append(executionGroupIDs, groupNode.ID)
+ }
+ }
+
+ // now, we only shuffle all the execution groups if we're randomizing all specs, otherwise
+ // we shuffle outermost containers. so we need to form shufflable groupings of GroupIDs
+ shufflableGroupingIDs := []uint{}
+ shufflableGroupingIDToGroupIDs := map[uint][]uint{}
+
+ // for each execution group we're going to have to pick a node to represent how the
+ // execution group is grouped for shuffling:
+ nodeTypesToShuffle := types.NodeTypesForContainerAndIt
+ if suiteConfig.RandomizeAllSpecs {
+ nodeTypesToShuffle = types.NodeTypeIt
+ }
+
+ //so, for each execution group:
+ for _, groupID := range executionGroupIDs {
+ // pick out a representative spec
+ representativeSpec := specs[executionGroups[groupID][0]]
+
+ // and grab the node on the spec that will represent which shufflable group this execution group belongs tu
+ shufflableGroupingNode := representativeSpec.Nodes.FirstNodeWithType(nodeTypesToShuffle)
+
+ //add the execution group to its shufflable group
+ shufflableGroupingIDToGroupIDs[shufflableGroupingNode.ID] = append(shufflableGroupingIDToGroupIDs[shufflableGroupingNode.ID], groupID)
+
+ //and if it's the first one in
+ if len(shufflableGroupingIDToGroupIDs[shufflableGroupingNode.ID]) == 1 {
+ // record the shuffleable group ID
+ shufflableGroupingIDs = append(shufflableGroupingIDs, shufflableGroupingNode.ID)
+ }
+ }
+
+ // now we permute the sorted shufflable grouping IDs and build the ordered Groups
+ orderedGroups := GroupedSpecIndices{}
+ permutation := r.Perm(len(shufflableGroupingIDs))
+ for _, j := range permutation {
+ //let's get the execution group IDs for this shufflable group:
+ executionGroupIDsForJ := shufflableGroupingIDToGroupIDs[shufflableGroupingIDs[j]]
+ // and we'll add their associated specindices to the orderedGroups slice:
+ for _, executionGroupID := range executionGroupIDsForJ {
+ orderedGroups = append(orderedGroups, executionGroups[executionGroupID])
+ }
+ }
+
+ // If we're running in series, we're done.
+ if suiteConfig.ParallelTotal == 1 {
+ return orderedGroups, GroupedSpecIndices{}
+ }
+
+ // We're running in parallel so we need to partition the ordered groups into a parallelizable set and a serialized set.
+ // The parallelizable groups will run across all Ginkgo processes...
+ // ...the serial groups will only run on Process #1 after all other processes have exited.
+ parallelizableGroups, serialGroups := GroupedSpecIndices{}, GroupedSpecIndices{}
+ for _, specIndices := range orderedGroups {
+ if specs[specIndices[0]].Nodes.HasNodeMarkedSerial() {
+ serialGroups = append(serialGroups, specIndices)
+ } else {
+ parallelizableGroups = append(parallelizableGroups, specIndices)
+ }
+ }
+
+ return parallelizableGroups, serialGroups
+}
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/internal/output_interceptor.go b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/internal/output_interceptor.go
new file mode 100644
index 00000000000..4a1c0946127
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/internal/output_interceptor.go
@@ -0,0 +1,250 @@
+package internal
+
+import (
+ "bytes"
+ "io"
+ "os"
+ "time"
+)
+
+const BAILOUT_TIME = 1 * time.Second
+const BAILOUT_MESSAGE = `Ginkgo detected an issue while intercepting output.
+
+When running in parallel, Ginkgo captures stdout and stderr output
+and attaches it to the running spec. It looks like that process is getting
+stuck for this suite.
+
+This usually happens if you, or a library you are using, spin up an external
+process and set cmd.Stdout = os.Stdout and/or cmd.Stderr = os.Stderr. This
+causes the external process to keep Ginkgo's output interceptor pipe open and
+causes output interception to hang.
+
+Ginkgo has detected this and shortcircuited the capture process. The specs
+will continue running after this message however output from the external
+process that caused this issue will not be captured.
+
+You have several options to fix this. In preferred order they are:
+
+1. Pass GinkgoWriter instead of os.Stdout or os.Stderr to your process.
+2. Ensure your process exits before the current spec completes. If your
+process is long-lived and must cross spec boundaries, this option won't
+work for you.
+3. Pause Ginkgo's output interceptor before starting your process and then
+resume it after. Use PauseOutputInterception() and ResumeOutputInterception()
+to do this.
+4. Set --output-interceptor-mode=none when running your Ginkgo suite. This will
+turn off all output interception but allow specs to run in parallel without this
+issue. You may miss important output if you do this including output from Go's
+race detector.
+
+More details on issue #851 - https://github.com/onsi/ginkgo/issues/851
+`
+
+/*
+The OutputInterceptor is used by to
+intercept and capture all stdin and stderr output during a test run.
+*/
+type OutputInterceptor interface {
+ StartInterceptingOutput()
+ StartInterceptingOutputAndForwardTo(io.Writer)
+ StopInterceptingAndReturnOutput() string
+
+ PauseIntercepting()
+ ResumeIntercepting()
+
+ Shutdown()
+}
+
+type NoopOutputInterceptor struct{}
+
+func (interceptor NoopOutputInterceptor) StartInterceptingOutput() {}
+func (interceptor NoopOutputInterceptor) StartInterceptingOutputAndForwardTo(io.Writer) {}
+func (interceptor NoopOutputInterceptor) StopInterceptingAndReturnOutput() string { return "" }
+func (interceptor NoopOutputInterceptor) PauseIntercepting() {}
+func (interceptor NoopOutputInterceptor) ResumeIntercepting() {}
+func (interceptor NoopOutputInterceptor) Shutdown() {}
+
+type pipePair struct {
+ reader *os.File
+ writer *os.File
+}
+
+func startPipeFactory(pipeChannel chan pipePair, shutdown chan interface{}) {
+ for {
+ //make the next pipe...
+ pair := pipePair{}
+ pair.reader, pair.writer, _ = os.Pipe()
+ select {
+ //...and provide it to the next consumer (they are responsible for closing the files)
+ case pipeChannel <- pair:
+ continue
+ //...or close the files if we were told to shutdown
+ case <-shutdown:
+ pair.reader.Close()
+ pair.writer.Close()
+ return
+ }
+ }
+}
+
+type interceptorImplementation interface {
+ CreateStdoutStderrClones() (*os.File, *os.File)
+ ConnectPipeToStdoutStderr(*os.File)
+ RestoreStdoutStderrFromClones(*os.File, *os.File)
+ ShutdownClones(*os.File, *os.File)
+}
+
+type genericOutputInterceptor struct {
+ intercepting bool
+
+ stdoutClone *os.File
+ stderrClone *os.File
+ pipe pipePair
+
+ shutdown chan interface{}
+ emergencyBailout chan interface{}
+ pipeChannel chan pipePair
+ interceptedContent chan string
+
+ forwardTo io.Writer
+ accumulatedOutput string
+
+ implementation interceptorImplementation
+}
+
+func (interceptor *genericOutputInterceptor) StartInterceptingOutput() {
+ interceptor.StartInterceptingOutputAndForwardTo(io.Discard)
+}
+
+func (interceptor *genericOutputInterceptor) StartInterceptingOutputAndForwardTo(w io.Writer) {
+ if interceptor.intercepting {
+ return
+ }
+ interceptor.accumulatedOutput = ""
+ interceptor.forwardTo = w
+ interceptor.ResumeIntercepting()
+}
+
+func (interceptor *genericOutputInterceptor) StopInterceptingAndReturnOutput() string {
+ if interceptor.intercepting {
+ interceptor.PauseIntercepting()
+ }
+ return interceptor.accumulatedOutput
+}
+
+func (interceptor *genericOutputInterceptor) ResumeIntercepting() {
+ if interceptor.intercepting {
+ return
+ }
+ interceptor.intercepting = true
+ if interceptor.stdoutClone == nil {
+ interceptor.stdoutClone, interceptor.stderrClone = interceptor.implementation.CreateStdoutStderrClones()
+ interceptor.shutdown = make(chan interface{})
+ go startPipeFactory(interceptor.pipeChannel, interceptor.shutdown)
+ }
+
+ // Now we make a pipe, we'll use this to redirect the input to the 1 and 2 file descriptors (this is how everything else in the world is string to log to stdout and stderr)
+ // we get the pipe from our pipe factory. it runs in the background so we can request the next pipe while the spec being intercepted is running
+ interceptor.pipe = <-interceptor.pipeChannel
+
+ interceptor.emergencyBailout = make(chan interface{})
+
+ //Spin up a goroutine to copy data from the pipe into a buffer, this is how we capture any output the user is emitting
+ go func() {
+ buffer := &bytes.Buffer{}
+ destination := io.MultiWriter(buffer, interceptor.forwardTo)
+ copyFinished := make(chan interface{})
+ reader := interceptor.pipe.reader
+ go func() {
+ io.Copy(destination, reader)
+ reader.Close() // close the read end of the pipe so we don't leak a file descriptor
+ close(copyFinished)
+ }()
+ select {
+ case <-copyFinished:
+ interceptor.interceptedContent <- buffer.String()
+ case <-interceptor.emergencyBailout:
+ interceptor.interceptedContent <- ""
+ }
+ }()
+
+ interceptor.implementation.ConnectPipeToStdoutStderr(interceptor.pipe.writer)
+}
+
+func (interceptor *genericOutputInterceptor) PauseIntercepting() {
+ if !interceptor.intercepting {
+ return
+ }
+ // first we have to close the write end of the pipe. To do this we have to close all file descriptors pointing
+ // to the write end. So that would be the pipewriter itself, and FD #1 and FD #2 if we've Dup2'd them
+ interceptor.pipe.writer.Close() // the pipewriter itself
+
+ // we also need to stop intercepting. we do that by reconnecting the stdout and stderr file descriptions back to their respective #1 and #2 file descriptors;
+ // this also closes #1 and #2 before it points that their original stdout and stderr file descriptions
+ interceptor.implementation.RestoreStdoutStderrFromClones(interceptor.stdoutClone, interceptor.stderrClone)
+
+ var content string
+ select {
+ case content = <-interceptor.interceptedContent:
+ case <-time.After(BAILOUT_TIME):
+ /*
+ By closing all the pipe writer's file descriptors associated with the pipe writer's file description the io.Copy reading from the reader
+ should eventually receive an EOF and exit.
+
+ **However**, if the user has spun up an external process and passed in os.Stdout/os.Stderr to cmd.Stdout/cmd.Stderr then the external process
+ will have a file descriptor pointing to the pipe writer's file description and it will not close until the external process exits.
+
+ That would leave us hanging here waiting for the io.Copy to close forever. Instead we invoke this emergency escape valve. This returns whatever
+ content we've got but leaves the io.Copy running. This ensures the external process can continue writing without hanging at the cost of leaking a goroutine
+ and file descriptor (those these will be cleaned up when the process exits).
+
+ We tack on a message to notify the user that they've hit this edgecase and encourage them to address it.
+ */
+ close(interceptor.emergencyBailout)
+ content = <-interceptor.interceptedContent + BAILOUT_MESSAGE
+ }
+
+ interceptor.accumulatedOutput += content
+ interceptor.intercepting = false
+}
+
+func (interceptor *genericOutputInterceptor) Shutdown() {
+ interceptor.PauseIntercepting()
+
+ if interceptor.stdoutClone != nil {
+ close(interceptor.shutdown)
+ interceptor.implementation.ShutdownClones(interceptor.stdoutClone, interceptor.stderrClone)
+ interceptor.stdoutClone = nil
+ interceptor.stderrClone = nil
+ }
+}
+
+/* This is used on windows builds but included here so it can be explicitly tested on unix systems too */
+func NewOSGlobalReassigningOutputInterceptor() OutputInterceptor {
+ return &genericOutputInterceptor{
+ interceptedContent: make(chan string),
+ pipeChannel: make(chan pipePair),
+ shutdown: make(chan interface{}),
+ implementation: &osGlobalReassigningOutputInterceptorImpl{},
+ }
+}
+
+type osGlobalReassigningOutputInterceptorImpl struct{}
+
+func (impl *osGlobalReassigningOutputInterceptorImpl) CreateStdoutStderrClones() (*os.File, *os.File) {
+ return os.Stdout, os.Stderr
+}
+
+func (impl *osGlobalReassigningOutputInterceptorImpl) ConnectPipeToStdoutStderr(pipeWriter *os.File) {
+ os.Stdout = pipeWriter
+ os.Stderr = pipeWriter
+}
+
+func (impl *osGlobalReassigningOutputInterceptorImpl) RestoreStdoutStderrFromClones(stdoutClone *os.File, stderrClone *os.File) {
+ os.Stdout = stdoutClone
+ os.Stderr = stderrClone
+}
+
+func (impl *osGlobalReassigningOutputInterceptorImpl) ShutdownClones(_ *os.File, _ *os.File) {
+ //noop
+}
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/internal/output_interceptor_unix.go b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/internal/output_interceptor_unix.go
new file mode 100644
index 00000000000..8a237f44637
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/internal/output_interceptor_unix.go
@@ -0,0 +1,73 @@
+//go:build freebsd || openbsd || netbsd || dragonfly || darwin || linux || solaris
+// +build freebsd openbsd netbsd dragonfly darwin linux solaris
+
+package internal
+
+import (
+ "os"
+
+ "golang.org/x/sys/unix"
+)
+
+func NewOutputInterceptor() OutputInterceptor {
+ return &genericOutputInterceptor{
+ interceptedContent: make(chan string),
+ pipeChannel: make(chan pipePair),
+ shutdown: make(chan interface{}),
+ implementation: &dupSyscallOutputInterceptorImpl{},
+ }
+}
+
+type dupSyscallOutputInterceptorImpl struct{}
+
+func (impl *dupSyscallOutputInterceptorImpl) CreateStdoutStderrClones() (*os.File, *os.File) {
+ // To clone stdout and stderr we:
+ // First, create two clone file descriptors that point to the stdout and stderr file descriptions
+ stdoutCloneFD, _ := unix.Dup(1)
+ stderrCloneFD, _ := unix.Dup(2)
+
+ // Important, set the fds to FD_CLOEXEC to prevent them leaking into childs
+ // https://github.com/onsi/ginkgo/issues/1191
+ flags, err := unix.FcntlInt(uintptr(stdoutCloneFD), unix.F_GETFD, 0)
+ if err == nil {
+ unix.FcntlInt(uintptr(stdoutCloneFD), unix.F_SETFD, flags|unix.FD_CLOEXEC)
+ }
+ flags, err = unix.FcntlInt(uintptr(stderrCloneFD), unix.F_GETFD, 0)
+ if err == nil {
+ unix.FcntlInt(uintptr(stderrCloneFD), unix.F_SETFD, flags|unix.FD_CLOEXEC)
+ }
+
+ // And then wrap the clone file descriptors in files.
+ // One benefit of this (that we don't use yet) is that we can actually write
+ // to these files to emit output to the console even though we're intercepting output
+ stdoutClone := os.NewFile(uintptr(stdoutCloneFD), "stdout-clone")
+ stderrClone := os.NewFile(uintptr(stderrCloneFD), "stderr-clone")
+
+ //these clones remain alive throughout the lifecycle of the suite and don't need to be recreated
+ //this speeds things up a bit, actually.
+ return stdoutClone, stderrClone
+}
+
+func (impl *dupSyscallOutputInterceptorImpl) ConnectPipeToStdoutStderr(pipeWriter *os.File) {
+ // To redirect output to our pipe we need to point the 1 and 2 file descriptors (which is how the world tries to log things)
+ // to the write end of the pipe.
+ // We do this with Dup2 (possibly Dup3 on some architectures) to have file descriptors 1 and 2 point to the same file description as the pipeWriter
+ // This effectively shunts data written to stdout and stderr to the write end of our pipe
+ unix.Dup2(int(pipeWriter.Fd()), 1)
+ unix.Dup2(int(pipeWriter.Fd()), 2)
+}
+
+func (impl *dupSyscallOutputInterceptorImpl) RestoreStdoutStderrFromClones(stdoutClone *os.File, stderrClone *os.File) {
+ // To restore stdour/stderr from the clones we have the 1 and 2 file descriptors
+ // point to the original file descriptions that we saved off in the clones.
+ // This has the added benefit of closing the connection between these descriptors and the write end of the pipe
+ // which is important to cause the io.Copy on the pipe.Reader to end.
+ unix.Dup2(int(stdoutClone.Fd()), 1)
+ unix.Dup2(int(stderrClone.Fd()), 2)
+}
+
+func (impl *dupSyscallOutputInterceptorImpl) ShutdownClones(stdoutClone *os.File, stderrClone *os.File) {
+ // We're done with the clones so we can close them to clean up after ourselves
+ stdoutClone.Close()
+ stderrClone.Close()
+}
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/internal/output_interceptor_wasm.go b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/internal/output_interceptor_wasm.go
new file mode 100644
index 00000000000..4c374935b8a
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/internal/output_interceptor_wasm.go
@@ -0,0 +1,7 @@
+//go:build wasm
+
+package internal
+
+func NewOutputInterceptor() OutputInterceptor {
+ return &NoopOutputInterceptor{}
+}
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/internal/output_interceptor_win.go b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/internal/output_interceptor_win.go
new file mode 100644
index 00000000000..30c2851a818
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/internal/output_interceptor_win.go
@@ -0,0 +1,7 @@
+// +build windows
+
+package internal
+
+func NewOutputInterceptor() OutputInterceptor {
+ return NewOSGlobalReassigningOutputInterceptor()
+}
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/client_server.go b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/client_server.go
new file mode 100644
index 00000000000..b3cd64292ab
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/client_server.go
@@ -0,0 +1,72 @@
+package parallel_support
+
+import (
+ "fmt"
+ "io"
+ "os"
+ "time"
+
+ "github.com/onsi/ginkgo/v2/reporters"
+ "github.com/onsi/ginkgo/v2/types"
+)
+
+type BeforeSuiteState struct {
+ Data []byte
+ State types.SpecState
+}
+
+type ParallelIndexCounter struct {
+ Index int
+}
+
+var ErrorGone = fmt.Errorf("gone")
+var ErrorFailed = fmt.Errorf("failed")
+var ErrorEarly = fmt.Errorf("early")
+
+var POLLING_INTERVAL = 50 * time.Millisecond
+
+type Server interface {
+ Start()
+ Close()
+ Address() string
+ RegisterAlive(node int, alive func() bool)
+ GetSuiteDone() chan interface{}
+ GetOutputDestination() io.Writer
+ SetOutputDestination(io.Writer)
+}
+
+type Client interface {
+ Connect() bool
+ Close() error
+
+ PostSuiteWillBegin(report types.Report) error
+ PostDidRun(report types.SpecReport) error
+ PostSuiteDidEnd(report types.Report) error
+ PostReportBeforeSuiteCompleted(state types.SpecState) error
+ BlockUntilReportBeforeSuiteCompleted() (types.SpecState, error)
+ PostSynchronizedBeforeSuiteCompleted(state types.SpecState, data []byte) error
+ BlockUntilSynchronizedBeforeSuiteData() (types.SpecState, []byte, error)
+ BlockUntilNonprimaryProcsHaveFinished() error
+ BlockUntilAggregatedNonprimaryProcsReport() (types.Report, error)
+ FetchNextCounter() (int, error)
+ PostAbort() error
+ ShouldAbort() bool
+ PostEmitProgressReport(report types.ProgressReport) error
+ Write(p []byte) (int, error)
+}
+
+func NewServer(parallelTotal int, reporter reporters.Reporter) (Server, error) {
+ if os.Getenv("GINKGO_PARALLEL_PROTOCOL") == "HTTP" {
+ return newHttpServer(parallelTotal, reporter)
+ } else {
+ return newRPCServer(parallelTotal, reporter)
+ }
+}
+
+func NewClient(serverHost string) Client {
+ if os.Getenv("GINKGO_PARALLEL_PROTOCOL") == "HTTP" {
+ return newHttpClient(serverHost)
+ } else {
+ return newRPCClient(serverHost)
+ }
+}
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/http_client.go b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/http_client.go
new file mode 100644
index 00000000000..6547c7a66e5
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/http_client.go
@@ -0,0 +1,169 @@
+package parallel_support
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "io"
+ "net/http"
+ "time"
+
+ "github.com/onsi/ginkgo/v2/types"
+)
+
+type httpClient struct {
+ serverHost string
+}
+
+func newHttpClient(serverHost string) *httpClient {
+ return &httpClient{
+ serverHost: serverHost,
+ }
+}
+
+func (client *httpClient) Connect() bool {
+ resp, err := http.Get(client.serverHost + "/up")
+ if err != nil {
+ return false
+ }
+ resp.Body.Close()
+ return resp.StatusCode == http.StatusOK
+}
+
+func (client *httpClient) Close() error {
+ return nil
+}
+
+func (client *httpClient) post(path string, data interface{}) error {
+ var body io.Reader
+ if data != nil {
+ encoded, err := json.Marshal(data)
+ if err != nil {
+ return err
+ }
+ body = bytes.NewBuffer(encoded)
+ }
+ resp, err := http.Post(client.serverHost+path, "application/json", body)
+ if err != nil {
+ return err
+ }
+ defer resp.Body.Close()
+ if resp.StatusCode != http.StatusOK {
+ return fmt.Errorf("received unexpected status code %d", resp.StatusCode)
+ }
+ return nil
+}
+
+func (client *httpClient) poll(path string, data interface{}) error {
+ for {
+ resp, err := http.Get(client.serverHost + path)
+ if err != nil {
+ return err
+ }
+ if resp.StatusCode == http.StatusTooEarly {
+ resp.Body.Close()
+ time.Sleep(POLLING_INTERVAL)
+ continue
+ }
+ defer resp.Body.Close()
+ if resp.StatusCode == http.StatusGone {
+ return ErrorGone
+ }
+ if resp.StatusCode == http.StatusFailedDependency {
+ return ErrorFailed
+ }
+ if resp.StatusCode != http.StatusOK {
+ return fmt.Errorf("received unexpected status code %d", resp.StatusCode)
+ }
+ if data != nil {
+ return json.NewDecoder(resp.Body).Decode(data)
+ }
+ return nil
+ }
+}
+
+func (client *httpClient) PostSuiteWillBegin(report types.Report) error {
+ return client.post("/suite-will-begin", report)
+}
+
+func (client *httpClient) PostDidRun(report types.SpecReport) error {
+ return client.post("/did-run", report)
+}
+
+func (client *httpClient) PostSuiteDidEnd(report types.Report) error {
+ return client.post("/suite-did-end", report)
+}
+
+func (client *httpClient) PostEmitProgressReport(report types.ProgressReport) error {
+ return client.post("/progress-report", report)
+}
+
+func (client *httpClient) PostReportBeforeSuiteCompleted(state types.SpecState) error {
+ return client.post("/report-before-suite-completed", state)
+}
+
+func (client *httpClient) BlockUntilReportBeforeSuiteCompleted() (types.SpecState, error) {
+ var state types.SpecState
+ err := client.poll("/report-before-suite-state", &state)
+ if err == ErrorGone {
+ return types.SpecStateFailed, nil
+ }
+ return state, err
+}
+
+func (client *httpClient) PostSynchronizedBeforeSuiteCompleted(state types.SpecState, data []byte) error {
+ beforeSuiteState := BeforeSuiteState{
+ State: state,
+ Data: data,
+ }
+ return client.post("/before-suite-completed", beforeSuiteState)
+}
+
+func (client *httpClient) BlockUntilSynchronizedBeforeSuiteData() (types.SpecState, []byte, error) {
+ var beforeSuiteState BeforeSuiteState
+ err := client.poll("/before-suite-state", &beforeSuiteState)
+ if err == ErrorGone {
+ return types.SpecStateInvalid, nil, types.GinkgoErrors.SynchronizedBeforeSuiteDisappearedOnProc1()
+ }
+ return beforeSuiteState.State, beforeSuiteState.Data, err
+}
+
+func (client *httpClient) BlockUntilNonprimaryProcsHaveFinished() error {
+ return client.poll("/have-nonprimary-procs-finished", nil)
+}
+
+func (client *httpClient) BlockUntilAggregatedNonprimaryProcsReport() (types.Report, error) {
+ var report types.Report
+ err := client.poll("/aggregated-nonprimary-procs-report", &report)
+ if err == ErrorGone {
+ return types.Report{}, types.GinkgoErrors.AggregatedReportUnavailableDueToNodeDisappearing()
+ }
+ return report, err
+}
+
+func (client *httpClient) FetchNextCounter() (int, error) {
+ var counter ParallelIndexCounter
+ err := client.poll("/counter", &counter)
+ return counter.Index, err
+}
+
+func (client *httpClient) PostAbort() error {
+ return client.post("/abort", nil)
+}
+
+func (client *httpClient) ShouldAbort() bool {
+ err := client.poll("/abort", nil)
+ if err == ErrorGone {
+ return true
+ }
+ return false
+}
+
+func (client *httpClient) Write(p []byte) (int, error) {
+ resp, err := http.Post(client.serverHost+"/emit-output", "text/plain;charset=UTF-8 ", bytes.NewReader(p))
+ resp.Body.Close()
+ if resp.StatusCode != http.StatusOK {
+ return 0, fmt.Errorf("failed to emit output")
+ }
+ return len(p), err
+}
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/http_server.go b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/http_server.go
new file mode 100644
index 00000000000..d2c71ab1b25
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/http_server.go
@@ -0,0 +1,242 @@
+/*
+
+The remote package provides the pieces to allow Ginkgo test suites to report to remote listeners.
+This is used, primarily, to enable streaming parallel test output but has, in principal, broader applications (e.g. streaming test output to a browser).
+
+*/
+
+package parallel_support
+
+import (
+ "encoding/json"
+ "io"
+ "net"
+ "net/http"
+
+ "github.com/onsi/ginkgo/v2/reporters"
+ "github.com/onsi/ginkgo/v2/types"
+)
+
+/*
+httpServer spins up on an automatically selected port and listens for communication from the forwarding reporter.
+It then forwards that communication to attached reporters.
+*/
+type httpServer struct {
+ listener net.Listener
+ handler *ServerHandler
+}
+
+// Create a new server, automatically selecting a port
+func newHttpServer(parallelTotal int, reporter reporters.Reporter) (*httpServer, error) {
+ listener, err := net.Listen("tcp", "127.0.0.1:0")
+ if err != nil {
+ return nil, err
+ }
+ return &httpServer{
+ listener: listener,
+ handler: newServerHandler(parallelTotal, reporter),
+ }, nil
+}
+
+// Start the server. You don't need to `go s.Start()`, just `s.Start()`
+func (server *httpServer) Start() {
+ httpServer := &http.Server{}
+ mux := http.NewServeMux()
+ httpServer.Handler = mux
+
+ //streaming endpoints
+ mux.HandleFunc("/suite-will-begin", server.specSuiteWillBegin)
+ mux.HandleFunc("/did-run", server.didRun)
+ mux.HandleFunc("/suite-did-end", server.specSuiteDidEnd)
+ mux.HandleFunc("/emit-output", server.emitOutput)
+ mux.HandleFunc("/progress-report", server.emitProgressReport)
+
+ //synchronization endpoints
+ mux.HandleFunc("/report-before-suite-completed", server.handleReportBeforeSuiteCompleted)
+ mux.HandleFunc("/report-before-suite-state", server.handleReportBeforeSuiteState)
+ mux.HandleFunc("/before-suite-completed", server.handleBeforeSuiteCompleted)
+ mux.HandleFunc("/before-suite-state", server.handleBeforeSuiteState)
+ mux.HandleFunc("/have-nonprimary-procs-finished", server.handleHaveNonprimaryProcsFinished)
+ mux.HandleFunc("/aggregated-nonprimary-procs-report", server.handleAggregatedNonprimaryProcsReport)
+ mux.HandleFunc("/counter", server.handleCounter)
+ mux.HandleFunc("/up", server.handleUp)
+ mux.HandleFunc("/abort", server.handleAbort)
+
+ go httpServer.Serve(server.listener)
+}
+
+// Stop the server
+func (server *httpServer) Close() {
+ server.listener.Close()
+}
+
+// The address the server can be reached it. Pass this into the `ForwardingReporter`.
+func (server *httpServer) Address() string {
+ return "http://" + server.listener.Addr().String()
+}
+
+func (server *httpServer) GetSuiteDone() chan interface{} {
+ return server.handler.done
+}
+
+func (server *httpServer) GetOutputDestination() io.Writer {
+ return server.handler.outputDestination
+}
+
+func (server *httpServer) SetOutputDestination(w io.Writer) {
+ server.handler.outputDestination = w
+}
+
+func (server *httpServer) RegisterAlive(node int, alive func() bool) {
+ server.handler.registerAlive(node, alive)
+}
+
+//
+// Streaming Endpoints
+//
+
+// The server will forward all received messages to Ginkgo reporters registered with `RegisterReporters`
+func (server *httpServer) decode(writer http.ResponseWriter, request *http.Request, object interface{}) bool {
+ defer request.Body.Close()
+ if json.NewDecoder(request.Body).Decode(object) != nil {
+ writer.WriteHeader(http.StatusBadRequest)
+ return false
+ }
+ return true
+}
+
+func (server *httpServer) handleError(err error, writer http.ResponseWriter) bool {
+ if err == nil {
+ return false
+ }
+ switch err {
+ case ErrorEarly:
+ writer.WriteHeader(http.StatusTooEarly)
+ case ErrorGone:
+ writer.WriteHeader(http.StatusGone)
+ case ErrorFailed:
+ writer.WriteHeader(http.StatusFailedDependency)
+ default:
+ writer.WriteHeader(http.StatusInternalServerError)
+ }
+ return true
+}
+
+func (server *httpServer) specSuiteWillBegin(writer http.ResponseWriter, request *http.Request) {
+ var report types.Report
+ if !server.decode(writer, request, &report) {
+ return
+ }
+
+ server.handleError(server.handler.SpecSuiteWillBegin(report, voidReceiver), writer)
+}
+
+func (server *httpServer) didRun(writer http.ResponseWriter, request *http.Request) {
+ var report types.SpecReport
+ if !server.decode(writer, request, &report) {
+ return
+ }
+
+ server.handleError(server.handler.DidRun(report, voidReceiver), writer)
+}
+
+func (server *httpServer) specSuiteDidEnd(writer http.ResponseWriter, request *http.Request) {
+ var report types.Report
+ if !server.decode(writer, request, &report) {
+ return
+ }
+ server.handleError(server.handler.SpecSuiteDidEnd(report, voidReceiver), writer)
+}
+
+func (server *httpServer) emitOutput(writer http.ResponseWriter, request *http.Request) {
+ output, err := io.ReadAll(request.Body)
+ if err != nil {
+ writer.WriteHeader(http.StatusInternalServerError)
+ return
+ }
+ var n int
+ server.handleError(server.handler.EmitOutput(output, &n), writer)
+}
+
+func (server *httpServer) emitProgressReport(writer http.ResponseWriter, request *http.Request) {
+ var report types.ProgressReport
+ if !server.decode(writer, request, &report) {
+ return
+ }
+ server.handleError(server.handler.EmitProgressReport(report, voidReceiver), writer)
+}
+
+func (server *httpServer) handleReportBeforeSuiteCompleted(writer http.ResponseWriter, request *http.Request) {
+ var state types.SpecState
+ if !server.decode(writer, request, &state) {
+ return
+ }
+
+ server.handleError(server.handler.ReportBeforeSuiteCompleted(state, voidReceiver), writer)
+}
+
+func (server *httpServer) handleReportBeforeSuiteState(writer http.ResponseWriter, request *http.Request) {
+ var state types.SpecState
+ if server.handleError(server.handler.ReportBeforeSuiteState(voidSender, &state), writer) {
+ return
+ }
+ json.NewEncoder(writer).Encode(state)
+}
+
+func (server *httpServer) handleBeforeSuiteCompleted(writer http.ResponseWriter, request *http.Request) {
+ var beforeSuiteState BeforeSuiteState
+ if !server.decode(writer, request, &beforeSuiteState) {
+ return
+ }
+
+ server.handleError(server.handler.BeforeSuiteCompleted(beforeSuiteState, voidReceiver), writer)
+}
+
+func (server *httpServer) handleBeforeSuiteState(writer http.ResponseWriter, request *http.Request) {
+ var beforeSuiteState BeforeSuiteState
+ if server.handleError(server.handler.BeforeSuiteState(voidSender, &beforeSuiteState), writer) {
+ return
+ }
+ json.NewEncoder(writer).Encode(beforeSuiteState)
+}
+
+func (server *httpServer) handleHaveNonprimaryProcsFinished(writer http.ResponseWriter, request *http.Request) {
+ if server.handleError(server.handler.HaveNonprimaryProcsFinished(voidSender, voidReceiver), writer) {
+ return
+ }
+ writer.WriteHeader(http.StatusOK)
+}
+
+func (server *httpServer) handleAggregatedNonprimaryProcsReport(writer http.ResponseWriter, request *http.Request) {
+ var aggregatedReport types.Report
+ if server.handleError(server.handler.AggregatedNonprimaryProcsReport(voidSender, &aggregatedReport), writer) {
+ return
+ }
+ json.NewEncoder(writer).Encode(aggregatedReport)
+}
+
+func (server *httpServer) handleCounter(writer http.ResponseWriter, request *http.Request) {
+ var n int
+ if server.handleError(server.handler.Counter(voidSender, &n), writer) {
+ return
+ }
+ json.NewEncoder(writer).Encode(ParallelIndexCounter{Index: n})
+}
+
+func (server *httpServer) handleUp(writer http.ResponseWriter, request *http.Request) {
+ writer.WriteHeader(http.StatusOK)
+}
+
+func (server *httpServer) handleAbort(writer http.ResponseWriter, request *http.Request) {
+ if request.Method == "GET" {
+ var shouldAbort bool
+ server.handler.ShouldAbort(voidSender, &shouldAbort)
+ if shouldAbort {
+ writer.WriteHeader(http.StatusGone)
+ } else {
+ writer.WriteHeader(http.StatusOK)
+ }
+ } else {
+ server.handler.Abort(voidSender, voidReceiver)
+ }
+}
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/rpc_client.go b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/rpc_client.go
new file mode 100644
index 00000000000..59e8e6fd0a6
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/rpc_client.go
@@ -0,0 +1,136 @@
+package parallel_support
+
+import (
+ "net/rpc"
+ "time"
+
+ "github.com/onsi/ginkgo/v2/types"
+)
+
+type rpcClient struct {
+ serverHost string
+ client *rpc.Client
+}
+
+func newRPCClient(serverHost string) *rpcClient {
+ return &rpcClient{
+ serverHost: serverHost,
+ }
+}
+
+func (client *rpcClient) Connect() bool {
+ var err error
+ if client.client != nil {
+ return true
+ }
+ client.client, err = rpc.DialHTTPPath("tcp", client.serverHost, "/")
+ if err != nil {
+ client.client = nil
+ return false
+ }
+ return true
+}
+
+func (client *rpcClient) Close() error {
+ return client.client.Close()
+}
+
+func (client *rpcClient) poll(method string, data interface{}) error {
+ for {
+ err := client.client.Call(method, voidSender, data)
+ if err == nil {
+ return nil
+ }
+ switch err.Error() {
+ case ErrorEarly.Error():
+ time.Sleep(POLLING_INTERVAL)
+ case ErrorGone.Error():
+ return ErrorGone
+ case ErrorFailed.Error():
+ return ErrorFailed
+ default:
+ return err
+ }
+ }
+}
+
+func (client *rpcClient) PostSuiteWillBegin(report types.Report) error {
+ return client.client.Call("Server.SpecSuiteWillBegin", report, voidReceiver)
+}
+
+func (client *rpcClient) PostDidRun(report types.SpecReport) error {
+ return client.client.Call("Server.DidRun", report, voidReceiver)
+}
+
+func (client *rpcClient) PostSuiteDidEnd(report types.Report) error {
+ return client.client.Call("Server.SpecSuiteDidEnd", report, voidReceiver)
+}
+
+func (client *rpcClient) Write(p []byte) (int, error) {
+ var n int
+ err := client.client.Call("Server.EmitOutput", p, &n)
+ return n, err
+}
+
+func (client *rpcClient) PostEmitProgressReport(report types.ProgressReport) error {
+ return client.client.Call("Server.EmitProgressReport", report, voidReceiver)
+}
+
+func (client *rpcClient) PostReportBeforeSuiteCompleted(state types.SpecState) error {
+ return client.client.Call("Server.ReportBeforeSuiteCompleted", state, voidReceiver)
+}
+
+func (client *rpcClient) BlockUntilReportBeforeSuiteCompleted() (types.SpecState, error) {
+ var state types.SpecState
+ err := client.poll("Server.ReportBeforeSuiteState", &state)
+ if err == ErrorGone {
+ return types.SpecStateFailed, nil
+ }
+ return state, err
+}
+
+func (client *rpcClient) PostSynchronizedBeforeSuiteCompleted(state types.SpecState, data []byte) error {
+ beforeSuiteState := BeforeSuiteState{
+ State: state,
+ Data: data,
+ }
+ return client.client.Call("Server.BeforeSuiteCompleted", beforeSuiteState, voidReceiver)
+}
+
+func (client *rpcClient) BlockUntilSynchronizedBeforeSuiteData() (types.SpecState, []byte, error) {
+ var beforeSuiteState BeforeSuiteState
+ err := client.poll("Server.BeforeSuiteState", &beforeSuiteState)
+ if err == ErrorGone {
+ return types.SpecStateInvalid, nil, types.GinkgoErrors.SynchronizedBeforeSuiteDisappearedOnProc1()
+ }
+ return beforeSuiteState.State, beforeSuiteState.Data, err
+}
+
+func (client *rpcClient) BlockUntilNonprimaryProcsHaveFinished() error {
+ return client.poll("Server.HaveNonprimaryProcsFinished", voidReceiver)
+}
+
+func (client *rpcClient) BlockUntilAggregatedNonprimaryProcsReport() (types.Report, error) {
+ var report types.Report
+ err := client.poll("Server.AggregatedNonprimaryProcsReport", &report)
+ if err == ErrorGone {
+ return types.Report{}, types.GinkgoErrors.AggregatedReportUnavailableDueToNodeDisappearing()
+ }
+ return report, err
+}
+
+func (client *rpcClient) FetchNextCounter() (int, error) {
+ var counter int
+ err := client.client.Call("Server.Counter", voidSender, &counter)
+ return counter, err
+}
+
+func (client *rpcClient) PostAbort() error {
+ return client.client.Call("Server.Abort", voidSender, voidReceiver)
+}
+
+func (client *rpcClient) ShouldAbort() bool {
+ var shouldAbort bool
+ client.client.Call("Server.ShouldAbort", voidSender, &shouldAbort)
+ return shouldAbort
+}
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/rpc_server.go b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/rpc_server.go
new file mode 100644
index 00000000000..2620fd562d3
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/rpc_server.go
@@ -0,0 +1,75 @@
+/*
+
+The remote package provides the pieces to allow Ginkgo test suites to report to remote listeners.
+This is used, primarily, to enable streaming parallel test output but has, in principal, broader applications (e.g. streaming test output to a browser).
+
+*/
+
+package parallel_support
+
+import (
+ "io"
+ "net"
+ "net/http"
+ "net/rpc"
+
+ "github.com/onsi/ginkgo/v2/reporters"
+)
+
+/*
+RPCServer spins up on an automatically selected port and listens for communication from the forwarding reporter.
+It then forwards that communication to attached reporters.
+*/
+type RPCServer struct {
+ listener net.Listener
+ handler *ServerHandler
+}
+
+//Create a new server, automatically selecting a port
+func newRPCServer(parallelTotal int, reporter reporters.Reporter) (*RPCServer, error) {
+ listener, err := net.Listen("tcp", "127.0.0.1:0")
+ if err != nil {
+ return nil, err
+ }
+ return &RPCServer{
+ listener: listener,
+ handler: newServerHandler(parallelTotal, reporter),
+ }, nil
+}
+
+//Start the server. You don't need to `go s.Start()`, just `s.Start()`
+func (server *RPCServer) Start() {
+ rpcServer := rpc.NewServer()
+ rpcServer.RegisterName("Server", server.handler) //register the handler's methods as the server
+
+ httpServer := &http.Server{}
+ httpServer.Handler = rpcServer
+
+ go httpServer.Serve(server.listener)
+}
+
+//Stop the server
+func (server *RPCServer) Close() {
+ server.listener.Close()
+}
+
+//The address the server can be reached it. Pass this into the `ForwardingReporter`.
+func (server *RPCServer) Address() string {
+ return server.listener.Addr().String()
+}
+
+func (server *RPCServer) GetSuiteDone() chan interface{} {
+ return server.handler.done
+}
+
+func (server *RPCServer) GetOutputDestination() io.Writer {
+ return server.handler.outputDestination
+}
+
+func (server *RPCServer) SetOutputDestination(w io.Writer) {
+ server.handler.outputDestination = w
+}
+
+func (server *RPCServer) RegisterAlive(node int, alive func() bool) {
+ server.handler.registerAlive(node, alive)
+}
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/server_handler.go b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/server_handler.go
new file mode 100644
index 00000000000..a6d98793e9f
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/server_handler.go
@@ -0,0 +1,234 @@
+package parallel_support
+
+import (
+ "io"
+ "os"
+ "sync"
+
+ "github.com/onsi/ginkgo/v2/reporters"
+ "github.com/onsi/ginkgo/v2/types"
+)
+
+type Void struct{}
+
+var voidReceiver *Void = &Void{}
+var voidSender Void
+
+// ServerHandler is an RPC-compatible handler that is shared between the http server and the rpc server.
+// It handles all the business logic to avoid duplication between the two servers
+
+type ServerHandler struct {
+ done chan interface{}
+ outputDestination io.Writer
+ reporter reporters.Reporter
+ alives []func() bool
+ lock *sync.Mutex
+ beforeSuiteState BeforeSuiteState
+ reportBeforeSuiteState types.SpecState
+ parallelTotal int
+ counter int
+ counterLock *sync.Mutex
+ shouldAbort bool
+
+ numSuiteDidBegins int
+ numSuiteDidEnds int
+ aggregatedReport types.Report
+ reportHoldingArea []types.SpecReport
+}
+
+func newServerHandler(parallelTotal int, reporter reporters.Reporter) *ServerHandler {
+ return &ServerHandler{
+ reporter: reporter,
+ lock: &sync.Mutex{},
+ counterLock: &sync.Mutex{},
+ alives: make([]func() bool, parallelTotal),
+ beforeSuiteState: BeforeSuiteState{Data: nil, State: types.SpecStateInvalid},
+
+ parallelTotal: parallelTotal,
+ outputDestination: os.Stdout,
+ done: make(chan interface{}),
+ }
+}
+
+func (handler *ServerHandler) SpecSuiteWillBegin(report types.Report, _ *Void) error {
+ handler.lock.Lock()
+ defer handler.lock.Unlock()
+
+ handler.numSuiteDidBegins += 1
+
+ // all summaries are identical, so it's fine to simply emit the last one of these
+ if handler.numSuiteDidBegins == handler.parallelTotal {
+ handler.reporter.SuiteWillBegin(report)
+
+ for _, summary := range handler.reportHoldingArea {
+ handler.reporter.WillRun(summary)
+ handler.reporter.DidRun(summary)
+ }
+
+ handler.reportHoldingArea = nil
+ }
+
+ return nil
+}
+
+func (handler *ServerHandler) DidRun(report types.SpecReport, _ *Void) error {
+ handler.lock.Lock()
+ defer handler.lock.Unlock()
+
+ if handler.numSuiteDidBegins == handler.parallelTotal {
+ handler.reporter.WillRun(report)
+ handler.reporter.DidRun(report)
+ } else {
+ handler.reportHoldingArea = append(handler.reportHoldingArea, report)
+ }
+
+ return nil
+}
+
+func (handler *ServerHandler) SpecSuiteDidEnd(report types.Report, _ *Void) error {
+ handler.lock.Lock()
+ defer handler.lock.Unlock()
+
+ handler.numSuiteDidEnds += 1
+ if handler.numSuiteDidEnds == 1 {
+ handler.aggregatedReport = report
+ } else {
+ handler.aggregatedReport = handler.aggregatedReport.Add(report)
+ }
+
+ if handler.numSuiteDidEnds == handler.parallelTotal {
+ handler.reporter.SuiteDidEnd(handler.aggregatedReport)
+ close(handler.done)
+ }
+
+ return nil
+}
+
+func (handler *ServerHandler) EmitOutput(output []byte, n *int) error {
+ var err error
+ *n, err = handler.outputDestination.Write(output)
+ return err
+}
+
+func (handler *ServerHandler) EmitProgressReport(report types.ProgressReport, _ *Void) error {
+ handler.lock.Lock()
+ defer handler.lock.Unlock()
+ handler.reporter.EmitProgressReport(report)
+ return nil
+}
+
+func (handler *ServerHandler) registerAlive(proc int, alive func() bool) {
+ handler.lock.Lock()
+ defer handler.lock.Unlock()
+ handler.alives[proc-1] = alive
+}
+
+func (handler *ServerHandler) procIsAlive(proc int) bool {
+ handler.lock.Lock()
+ defer handler.lock.Unlock()
+ alive := handler.alives[proc-1]
+ if alive == nil {
+ return true
+ }
+ return alive()
+}
+
+func (handler *ServerHandler) haveNonprimaryProcsFinished() bool {
+ for i := 2; i <= handler.parallelTotal; i++ {
+ if handler.procIsAlive(i) {
+ return false
+ }
+ }
+ return true
+}
+
+func (handler *ServerHandler) ReportBeforeSuiteCompleted(reportBeforeSuiteState types.SpecState, _ *Void) error {
+ handler.lock.Lock()
+ defer handler.lock.Unlock()
+ handler.reportBeforeSuiteState = reportBeforeSuiteState
+
+ return nil
+}
+
+func (handler *ServerHandler) ReportBeforeSuiteState(_ Void, reportBeforeSuiteState *types.SpecState) error {
+ proc1IsAlive := handler.procIsAlive(1)
+ handler.lock.Lock()
+ defer handler.lock.Unlock()
+ if handler.reportBeforeSuiteState == types.SpecStateInvalid {
+ if proc1IsAlive {
+ return ErrorEarly
+ } else {
+ return ErrorGone
+ }
+ }
+ *reportBeforeSuiteState = handler.reportBeforeSuiteState
+ return nil
+}
+
+func (handler *ServerHandler) BeforeSuiteCompleted(beforeSuiteState BeforeSuiteState, _ *Void) error {
+ handler.lock.Lock()
+ defer handler.lock.Unlock()
+ handler.beforeSuiteState = beforeSuiteState
+
+ return nil
+}
+
+func (handler *ServerHandler) BeforeSuiteState(_ Void, beforeSuiteState *BeforeSuiteState) error {
+ proc1IsAlive := handler.procIsAlive(1)
+ handler.lock.Lock()
+ defer handler.lock.Unlock()
+ if handler.beforeSuiteState.State == types.SpecStateInvalid {
+ if proc1IsAlive {
+ return ErrorEarly
+ } else {
+ return ErrorGone
+ }
+ }
+ *beforeSuiteState = handler.beforeSuiteState
+ return nil
+}
+
+func (handler *ServerHandler) HaveNonprimaryProcsFinished(_ Void, _ *Void) error {
+ if handler.haveNonprimaryProcsFinished() {
+ return nil
+ } else {
+ return ErrorEarly
+ }
+}
+
+func (handler *ServerHandler) AggregatedNonprimaryProcsReport(_ Void, report *types.Report) error {
+ if handler.haveNonprimaryProcsFinished() {
+ handler.lock.Lock()
+ defer handler.lock.Unlock()
+ if handler.numSuiteDidEnds == handler.parallelTotal-1 {
+ *report = handler.aggregatedReport
+ return nil
+ } else {
+ return ErrorGone
+ }
+ } else {
+ return ErrorEarly
+ }
+}
+
+func (handler *ServerHandler) Counter(_ Void, counter *int) error {
+ handler.counterLock.Lock()
+ defer handler.counterLock.Unlock()
+ *counter = handler.counter
+ handler.counter++
+ return nil
+}
+
+func (handler *ServerHandler) Abort(_ Void, _ *Void) error {
+ handler.lock.Lock()
+ defer handler.lock.Unlock()
+ handler.shouldAbort = true
+ return nil
+}
+
+func (handler *ServerHandler) ShouldAbort(_ Void, shouldAbort *bool) error {
+ handler.lock.Lock()
+ defer handler.lock.Unlock()
+ *shouldAbort = handler.shouldAbort
+ return nil
+}
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/internal/progress_report.go b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/internal/progress_report.go
new file mode 100644
index 00000000000..11269cf1f24
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/internal/progress_report.go
@@ -0,0 +1,287 @@
+package internal
+
+import (
+ "bufio"
+ "bytes"
+ "context"
+ "fmt"
+ "io"
+ "os"
+ "os/signal"
+ "path/filepath"
+ "runtime"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/onsi/ginkgo/v2/types"
+)
+
+var _SOURCE_CACHE = map[string][]string{}
+
+type ProgressSignalRegistrar func(func()) context.CancelFunc
+
+func RegisterForProgressSignal(handler func()) context.CancelFunc {
+ signalChannel := make(chan os.Signal, 1)
+ if len(PROGRESS_SIGNALS) > 0 {
+ signal.Notify(signalChannel, PROGRESS_SIGNALS...)
+ }
+ ctx, cancel := context.WithCancel(context.Background())
+ go func() {
+ for {
+ select {
+ case <-signalChannel:
+ handler()
+ case <-ctx.Done():
+ signal.Stop(signalChannel)
+ return
+ }
+ }
+ }()
+
+ return cancel
+}
+
+type ProgressStepCursor struct {
+ Text string
+ CodeLocation types.CodeLocation
+ StartTime time.Time
+}
+
+func NewProgressReport(isRunningInParallel bool, report types.SpecReport, currentNode Node, currentNodeStartTime time.Time, currentStep types.SpecEvent, gwOutput string, timelineLocation types.TimelineLocation, additionalReports []string, sourceRoots []string, includeAll bool) (types.ProgressReport, error) {
+ pr := types.ProgressReport{
+ ParallelProcess: report.ParallelProcess,
+ RunningInParallel: isRunningInParallel,
+ ContainerHierarchyTexts: report.ContainerHierarchyTexts,
+ LeafNodeText: report.LeafNodeText,
+ LeafNodeLocation: report.LeafNodeLocation,
+ SpecStartTime: report.StartTime,
+
+ CurrentNodeType: currentNode.NodeType,
+ CurrentNodeText: currentNode.Text,
+ CurrentNodeLocation: currentNode.CodeLocation,
+ CurrentNodeStartTime: currentNodeStartTime,
+
+ CurrentStepText: currentStep.Message,
+ CurrentStepLocation: currentStep.CodeLocation,
+ CurrentStepStartTime: currentStep.TimelineLocation.Time,
+
+ AdditionalReports: additionalReports,
+
+ CapturedGinkgoWriterOutput: gwOutput,
+ TimelineLocation: timelineLocation,
+ }
+
+ goroutines, err := extractRunningGoroutines()
+ if err != nil {
+ return pr, err
+ }
+ pr.Goroutines = goroutines
+
+ // now we want to try to find goroutines of interest. these will be goroutines that have any function calls with code in packagesOfInterest:
+ packagesOfInterest := map[string]bool{}
+ packageFromFilename := func(filename string) string {
+ return filepath.Dir(filename)
+ }
+ addPackageFor := func(filename string) {
+ if filename != "" {
+ packagesOfInterest[packageFromFilename(filename)] = true
+ }
+ }
+ isPackageOfInterest := func(filename string) bool {
+ stackPackage := packageFromFilename(filename)
+ for packageOfInterest := range packagesOfInterest {
+ if strings.HasPrefix(stackPackage, packageOfInterest) {
+ return true
+ }
+ }
+ return false
+ }
+ for _, location := range report.ContainerHierarchyLocations {
+ addPackageFor(location.FileName)
+ }
+ addPackageFor(report.LeafNodeLocation.FileName)
+ addPackageFor(currentNode.CodeLocation.FileName)
+ addPackageFor(currentStep.CodeLocation.FileName)
+
+ //First, we find the SpecGoroutine - this will be the goroutine that includes `runNode`
+ specGoRoutineIdx := -1
+ runNodeFunctionCallIdx := -1
+OUTER:
+ for goroutineIdx, goroutine := range pr.Goroutines {
+ for functionCallIdx, functionCall := range goroutine.Stack {
+ if strings.Contains(functionCall.Function, "ginkgo/v2/internal.(*Suite).runNode.func") {
+ specGoRoutineIdx = goroutineIdx
+ runNodeFunctionCallIdx = functionCallIdx
+ break OUTER
+ }
+ }
+ }
+
+ //Now, we find the first non-Ginkgo function call
+ if specGoRoutineIdx > -1 {
+ for runNodeFunctionCallIdx >= 0 {
+ fn := goroutines[specGoRoutineIdx].Stack[runNodeFunctionCallIdx].Function
+ file := goroutines[specGoRoutineIdx].Stack[runNodeFunctionCallIdx].Filename
+ // these are all things that could potentially happen from within ginkgo
+ if strings.Contains(fn, "ginkgo/v2/internal") || strings.Contains(fn, "reflect.Value") || strings.Contains(file, "ginkgo/table_dsl") || strings.Contains(file, "ginkgo/core_dsl") {
+ runNodeFunctionCallIdx--
+ continue
+ }
+ if strings.Contains(goroutines[specGoRoutineIdx].Stack[runNodeFunctionCallIdx].Function, "ginkgo/table_dsl") {
+
+ }
+ //found it! lets add its package of interest
+ addPackageFor(goroutines[specGoRoutineIdx].Stack[runNodeFunctionCallIdx].Filename)
+ break
+ }
+ }
+
+ ginkgoEntryPointIdx := -1
+OUTER_GINKGO_ENTRY_POINT:
+ for goroutineIdx, goroutine := range pr.Goroutines {
+ for _, functionCall := range goroutine.Stack {
+ if strings.Contains(functionCall.Function, "ginkgo/v2.RunSpecs") {
+ ginkgoEntryPointIdx = goroutineIdx
+ break OUTER_GINKGO_ENTRY_POINT
+ }
+ }
+ }
+
+ // Now we go through all goroutines and highlight any lines with packages in `packagesOfInterest`
+ // Any goroutines with highlighted lines end up in the HighlightGoRoutines
+ for goroutineIdx, goroutine := range pr.Goroutines {
+ if goroutineIdx == ginkgoEntryPointIdx {
+ continue
+ }
+ if goroutineIdx == specGoRoutineIdx {
+ pr.Goroutines[goroutineIdx].IsSpecGoroutine = true
+ }
+ for functionCallIdx, functionCall := range goroutine.Stack {
+ if isPackageOfInterest(functionCall.Filename) {
+ goroutine.Stack[functionCallIdx].Highlight = true
+ goroutine.Stack[functionCallIdx].Source, goroutine.Stack[functionCallIdx].SourceHighlight = fetchSource(functionCall.Filename, functionCall.Line, 2, sourceRoots)
+ }
+ }
+ }
+
+ if !includeAll {
+ goroutines := []types.Goroutine{pr.SpecGoroutine()}
+ goroutines = append(goroutines, pr.HighlightedGoroutines()...)
+ pr.Goroutines = goroutines
+ }
+
+ return pr, nil
+}
+
+func extractRunningGoroutines() ([]types.Goroutine, error) {
+ var stack []byte
+ for size := 64 * 1024; ; size *= 2 {
+ stack = make([]byte, size)
+ if n := runtime.Stack(stack, true); n < size {
+ stack = stack[:n]
+ break
+ }
+ }
+ r := bufio.NewReader(bytes.NewReader(stack))
+ out := []types.Goroutine{}
+ idx := -1
+ for {
+ line, err := r.ReadString('\n')
+ if err == io.EOF {
+ break
+ }
+
+ line = strings.TrimSuffix(line, "\n")
+
+ //skip blank lines
+ if line == "" {
+ continue
+ }
+
+ //parse headers for new goroutine frames
+ if strings.HasPrefix(line, "goroutine") {
+ out = append(out, types.Goroutine{})
+ idx = len(out) - 1
+
+ line = strings.TrimPrefix(line, "goroutine ")
+ line = strings.TrimSuffix(line, ":")
+ fields := strings.SplitN(line, " ", 2)
+ if len(fields) != 2 {
+ return nil, types.GinkgoErrors.FailedToParseStackTrace(fmt.Sprintf("Invalid goroutine frame header: %s", line))
+ }
+ out[idx].ID, err = strconv.ParseUint(fields[0], 10, 64)
+ if err != nil {
+ return nil, types.GinkgoErrors.FailedToParseStackTrace(fmt.Sprintf("Invalid goroutine ID: %s", fields[1]))
+ }
+
+ out[idx].State = strings.TrimSuffix(strings.TrimPrefix(fields[1], "["), "]")
+ continue
+ }
+
+ //if we are here we must be at a function call entry in the stack
+ functionCall := types.FunctionCall{
+ Function: strings.TrimPrefix(line, "created by "), // no need to track 'created by'
+ }
+
+ line, err = r.ReadString('\n')
+ line = strings.TrimSuffix(line, "\n")
+ if err == io.EOF {
+ return nil, types.GinkgoErrors.FailedToParseStackTrace(fmt.Sprintf("Invalid function call: %s -- missing file name and line number", functionCall.Function))
+ }
+ line = strings.TrimLeft(line, " \t")
+ delimiterIdx := strings.LastIndex(line, ":")
+ if delimiterIdx == -1 {
+ return nil, types.GinkgoErrors.FailedToParseStackTrace(fmt.Sprintf("Invalid filename and line number: %s", line))
+ }
+ functionCall.Filename = line[:delimiterIdx]
+ line = strings.Split(line[delimiterIdx+1:], " ")[0]
+ lineNumber, err := strconv.ParseInt(line, 10, 64)
+ functionCall.Line = int(lineNumber)
+ if err != nil {
+ return nil, types.GinkgoErrors.FailedToParseStackTrace(fmt.Sprintf("Invalid function call line number: %s\n%s", line, err.Error()))
+ }
+ out[idx].Stack = append(out[idx].Stack, functionCall)
+ }
+
+ return out, nil
+}
+
+func fetchSource(filename string, lineNumber int, span int, configuredSourceRoots []string) ([]string, int) {
+ if filename == "" {
+ return []string{}, 0
+ }
+
+ var lines []string
+ var ok bool
+ if lines, ok = _SOURCE_CACHE[filename]; !ok {
+ sourceRoots := []string{""}
+ sourceRoots = append(sourceRoots, configuredSourceRoots...)
+ var data []byte
+ var err error
+ var found bool
+ for _, root := range sourceRoots {
+ data, err = os.ReadFile(filepath.Join(root, filename))
+ if err == nil {
+ found = true
+ break
+ }
+ }
+ if !found {
+ return []string{}, 0
+ }
+ lines = strings.Split(string(data), "\n")
+ _SOURCE_CACHE[filename] = lines
+ }
+
+ startIndex := lineNumber - span - 1
+ endIndex := startIndex + span + span + 1
+ if startIndex < 0 {
+ startIndex = 0
+ }
+ if endIndex > len(lines) {
+ endIndex = len(lines)
+ }
+ highlightIndex := lineNumber - 1 - startIndex
+ return lines[startIndex:endIndex], highlightIndex
+}
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/internal/progress_report_bsd.go b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/internal/progress_report_bsd.go
new file mode 100644
index 00000000000..61e0ed30667
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/internal/progress_report_bsd.go
@@ -0,0 +1,11 @@
+//go:build freebsd || openbsd || netbsd || darwin || dragonfly
+// +build freebsd openbsd netbsd darwin dragonfly
+
+package internal
+
+import (
+ "os"
+ "syscall"
+)
+
+var PROGRESS_SIGNALS = []os.Signal{syscall.SIGINFO, syscall.SIGUSR1}
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/internal/progress_report_unix.go b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/internal/progress_report_unix.go
new file mode 100644
index 00000000000..ad30de459da
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/internal/progress_report_unix.go
@@ -0,0 +1,11 @@
+//go:build linux || solaris
+// +build linux solaris
+
+package internal
+
+import (
+ "os"
+ "syscall"
+)
+
+var PROGRESS_SIGNALS = []os.Signal{syscall.SIGUSR1}
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/internal/progress_report_wasm.go b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/internal/progress_report_wasm.go
new file mode 100644
index 00000000000..8c53fe0adad
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/internal/progress_report_wasm.go
@@ -0,0 +1,10 @@
+//go:build wasm
+
+package internal
+
+import (
+ "os"
+ "syscall"
+)
+
+var PROGRESS_SIGNALS = []os.Signal{syscall.SIGUSR1}
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/internal/progress_report_win.go b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/internal/progress_report_win.go
new file mode 100644
index 00000000000..0eca2516adb
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/internal/progress_report_win.go
@@ -0,0 +1,8 @@
+//go:build windows
+// +build windows
+
+package internal
+
+import "os"
+
+var PROGRESS_SIGNALS = []os.Signal{}
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/internal/progress_reporter_manager.go b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/internal/progress_reporter_manager.go
new file mode 100644
index 00000000000..2c6e260f7d7
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/internal/progress_reporter_manager.go
@@ -0,0 +1,79 @@
+package internal
+
+import (
+ "context"
+ "sort"
+ "strings"
+ "sync"
+
+ "github.com/onsi/ginkgo/v2/types"
+)
+
+type ProgressReporterManager struct {
+ lock *sync.Mutex
+ progressReporters map[int]func() string
+ prCounter int
+}
+
+func NewProgressReporterManager() *ProgressReporterManager {
+ return &ProgressReporterManager{
+ progressReporters: map[int]func() string{},
+ lock: &sync.Mutex{},
+ }
+}
+
+func (prm *ProgressReporterManager) AttachProgressReporter(reporter func() string) func() {
+ prm.lock.Lock()
+ defer prm.lock.Unlock()
+ prm.prCounter += 1
+ prCounter := prm.prCounter
+ prm.progressReporters[prCounter] = reporter
+
+ return func() {
+ prm.lock.Lock()
+ defer prm.lock.Unlock()
+ delete(prm.progressReporters, prCounter)
+ }
+}
+
+func (prm *ProgressReporterManager) QueryProgressReporters(ctx context.Context, failer *Failer) []string {
+ prm.lock.Lock()
+ keys := []int{}
+ for key := range prm.progressReporters {
+ keys = append(keys, key)
+ }
+ sort.Ints(keys)
+ reporters := []func() string{}
+ for _, key := range keys {
+ reporters = append(reporters, prm.progressReporters[key])
+ }
+ prm.lock.Unlock()
+
+ if len(reporters) == 0 {
+ return nil
+ }
+ out := []string{}
+ for _, reporter := range reporters {
+ reportC := make(chan string, 1)
+ go func() {
+ defer func() {
+ e := recover()
+ if e != nil {
+ failer.Panic(types.NewCodeLocationWithStackTrace(1), e)
+ reportC <- "failed to query attached progress reporter"
+ }
+ }()
+ reportC <- reporter()
+ }()
+ var report string
+ select {
+ case report = <-reportC:
+ case <-ctx.Done():
+ return out
+ }
+ if strings.TrimSpace(report) != "" {
+ out = append(out, report)
+ }
+ }
+ return out
+}
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/internal/report_entry.go b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/internal/report_entry.go
new file mode 100644
index 00000000000..cc351a39bd6
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/internal/report_entry.go
@@ -0,0 +1,39 @@
+package internal
+
+import (
+ "time"
+
+ "github.com/onsi/ginkgo/v2/types"
+)
+
+type ReportEntry = types.ReportEntry
+
+func NewReportEntry(name string, cl types.CodeLocation, args ...interface{}) (ReportEntry, error) {
+ out := ReportEntry{
+ Visibility: types.ReportEntryVisibilityAlways,
+ Name: name,
+ Location: cl,
+ Time: time.Now(),
+ }
+ var didSetValue = false
+ for _, arg := range args {
+ switch x := arg.(type) {
+ case types.ReportEntryVisibility:
+ out.Visibility = x
+ case types.CodeLocation:
+ out.Location = x
+ case Offset:
+ out.Location = types.NewCodeLocation(2 + int(x))
+ case time.Time:
+ out.Time = x
+ default:
+ if didSetValue {
+ return ReportEntry{}, types.GinkgoErrors.TooManyReportEntryValues(out.Location, arg)
+ }
+ out.Value = types.WrapEntryValue(arg)
+ didSetValue = true
+ }
+ }
+
+ return out, nil
+}
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/internal/spec.go b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/internal/spec.go
new file mode 100644
index 00000000000..7c4ee5bb7f1
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/internal/spec.go
@@ -0,0 +1,87 @@
+package internal
+
+import (
+ "strings"
+ "time"
+
+ "github.com/onsi/ginkgo/v2/types"
+)
+
+type Spec struct {
+ Nodes Nodes
+ Skip bool
+}
+
+func (s Spec) SubjectID() uint {
+ return s.Nodes.FirstNodeWithType(types.NodeTypeIt).ID
+}
+
+func (s Spec) Text() string {
+ texts := []string{}
+ for i := range s.Nodes {
+ if s.Nodes[i].Text != "" {
+ texts = append(texts, s.Nodes[i].Text)
+ }
+ }
+ return strings.Join(texts, " ")
+}
+
+func (s Spec) FirstNodeWithType(nodeTypes types.NodeType) Node {
+ return s.Nodes.FirstNodeWithType(nodeTypes)
+}
+
+func (s Spec) FlakeAttempts() int {
+ flakeAttempts := 0
+ for i := range s.Nodes {
+ if s.Nodes[i].FlakeAttempts > 0 {
+ flakeAttempts = s.Nodes[i].FlakeAttempts
+ }
+ }
+
+ return flakeAttempts
+}
+
+func (s Spec) MustPassRepeatedly() int {
+ mustPassRepeatedly := 0
+ for i := range s.Nodes {
+ if s.Nodes[i].MustPassRepeatedly > 0 {
+ mustPassRepeatedly = s.Nodes[i].MustPassRepeatedly
+ }
+ }
+
+ return mustPassRepeatedly
+}
+
+func (s Spec) SpecTimeout() time.Duration {
+ return s.FirstNodeWithType(types.NodeTypeIt).SpecTimeout
+}
+
+type Specs []Spec
+
+func (s Specs) HasAnySpecsMarkedPending() bool {
+ for i := range s {
+ if s[i].Nodes.HasNodeMarkedPending() {
+ return true
+ }
+ }
+
+ return false
+}
+
+func (s Specs) CountWithoutSkip() int {
+ n := 0
+ for i := range s {
+ if !s[i].Skip {
+ n += 1
+ }
+ }
+ return n
+}
+
+func (s Specs) AtIndices(indices SpecIndices) Specs {
+ out := make(Specs, len(indices))
+ for i, idx := range indices {
+ out[i] = s[idx]
+ }
+ return out
+}
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/internal/spec_context.go b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/internal/spec_context.go
new file mode 100644
index 00000000000..2d2ea2fc357
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/internal/spec_context.go
@@ -0,0 +1,47 @@
+package internal
+
+import (
+ "context"
+
+ "github.com/onsi/ginkgo/v2/types"
+)
+
+type SpecContext interface {
+ context.Context
+
+ SpecReport() types.SpecReport
+ AttachProgressReporter(func() string) func()
+}
+
+type specContext struct {
+ context.Context
+ *ProgressReporterManager
+
+ cancel context.CancelCauseFunc
+
+ suite *Suite
+}
+
+/*
+SpecContext includes a reference to `suite` and embeds itself in itself as a "GINKGO_SPEC_CONTEXT" value. This allows users to create child Contexts without having down-stream consumers (e.g. Gomega) lose access to the SpecContext and its methods. This allows us to build extensions on top of Ginkgo that simply take an all-encompassing context.
+
+Note that while SpecContext is used to enforce deadlines by Ginkgo it is not configured as a context.WithDeadline. Instead, Ginkgo owns responsibility for cancelling the context when the deadline elapses.
+
+This is because Ginkgo needs finer control over when the context is canceled. Specifically, Ginkgo needs to generate a ProgressReport before it cancels the context to ensure progress is captured where the spec is currently running. The only way to avoid a race here is to manually control the cancellation.
+*/
+func NewSpecContext(suite *Suite) *specContext {
+ ctx, cancel := context.WithCancelCause(context.Background())
+ sc := &specContext{
+ cancel: cancel,
+ suite: suite,
+ ProgressReporterManager: NewProgressReporterManager(),
+ }
+ ctx = context.WithValue(ctx, "GINKGO_SPEC_CONTEXT", sc) //yes, yes, the go docs say don't use a string for a key... but we'd rather avoid a circular dependency between Gomega and Ginkgo
+ sc.Context = ctx //thank goodness for garbage collectors that can handle circular dependencies
+
+ return sc
+}
+
+func (sc *specContext) SpecReport() types.SpecReport {
+ return sc.suite.CurrentSpecReport()
+}
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/internal/suite.go b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/internal/suite.go
new file mode 100644
index 00000000000..a3c9e6bf18f
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/internal/suite.go
@@ -0,0 +1,1046 @@
+package internal
+
+import (
+ "fmt"
+ "sync"
+ "time"
+
+ "github.com/onsi/ginkgo/v2/internal/interrupt_handler"
+ "github.com/onsi/ginkgo/v2/internal/parallel_support"
+ "github.com/onsi/ginkgo/v2/reporters"
+ "github.com/onsi/ginkgo/v2/types"
+ "golang.org/x/net/context"
+)
+
+type Phase uint
+
+const (
+ PhaseBuildTopLevel Phase = iota
+ PhaseBuildTree
+ PhaseRun
+)
+
+var PROGRESS_REPORTER_DEADLING = 5 * time.Second
+
+type Suite struct {
+ tree *TreeNode
+ topLevelContainers Nodes
+
+ *ProgressReporterManager
+
+ phase Phase
+
+ suiteNodes Nodes
+ cleanupNodes Nodes
+
+ failer *Failer
+ reporter reporters.Reporter
+ writer WriterInterface
+ outputInterceptor OutputInterceptor
+ interruptHandler interrupt_handler.InterruptHandlerInterface
+ config types.SuiteConfig
+ deadline time.Time
+
+ skipAll bool
+ report types.Report
+ currentSpecReport types.SpecReport
+ currentNode Node
+ currentNodeStartTime time.Time
+
+ currentSpecContext *specContext
+
+ currentByStep types.SpecEvent
+ timelineOrder int
+
+ /*
+ We don't need to lock around all operations. Just those that *could* happen concurrently.
+
+ Suite, generally, only runs one node at a time - and so the possibiity for races is small. In fact, the presence of a race usually indicates the user has launched a goroutine that has leaked past the node it was launched in.
+
+ However, there are some operations that can happen concurrently:
+
+ - AddReportEntry and CurrentSpecReport can be accessed at any point by the user - including in goroutines that outlive the node intentionally (see, e.g. #1020). They both form a self-contained read-write pair and so a lock in them is sufficent.
+ - generateProgressReport can be invoked at any point in time by an interrupt or a progres poll. Moreover, it requires access to currentSpecReport, currentNode, currentNodeStartTime, and progressStepCursor. To make it threadsafe we need to lock around generateProgressReport when we read those variables _and_ everywhere those variables are *written*. In general we don't need to worry about all possible field writes to these variables as what `generateProgressReport` does with these variables is fairly selective (hence the name of the lock). Specifically, we dont' need to lock around state and failure message changes on `currentSpecReport` - just the setting of the variable itself.
+ */
+ selectiveLock *sync.Mutex
+
+ client parallel_support.Client
+}
+
+func NewSuite() *Suite {
+ return &Suite{
+ tree: &TreeNode{},
+ phase: PhaseBuildTopLevel,
+ ProgressReporterManager: NewProgressReporterManager(),
+
+ selectiveLock: &sync.Mutex{},
+ }
+}
+
+func (suite *Suite) Clone() (*Suite, error) {
+ if suite.phase != PhaseBuildTopLevel {
+ return nil, fmt.Errorf("cannot clone suite after tree has been built")
+ }
+ return &Suite{
+ tree: &TreeNode{},
+ phase: PhaseBuildTopLevel,
+ ProgressReporterManager: NewProgressReporterManager(),
+ topLevelContainers: suite.topLevelContainers.Clone(),
+ suiteNodes: suite.suiteNodes.Clone(),
+ selectiveLock: &sync.Mutex{},
+ }, nil
+}
+
+func (suite *Suite) BuildTree() error {
+ // During PhaseBuildTopLevel, the top level containers are stored in suite.topLevelCotainers and entered
+ // We now enter PhaseBuildTree where these top level containers are entered and added to the spec tree
+ suite.phase = PhaseBuildTree
+ for _, topLevelContainer := range suite.topLevelContainers {
+ err := suite.PushNode(topLevelContainer)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (suite *Suite) Run(description string, suiteLabels Labels, suitePath string, failer *Failer, reporter reporters.Reporter, writer WriterInterface, outputInterceptor OutputInterceptor, interruptHandler interrupt_handler.InterruptHandlerInterface, client parallel_support.Client, progressSignalRegistrar ProgressSignalRegistrar, suiteConfig types.SuiteConfig) (bool, bool) {
+ if suite.phase != PhaseBuildTree {
+ panic("cannot run before building the tree = call suite.BuildTree() first")
+ }
+ ApplyNestedFocusPolicyToTree(suite.tree)
+ specs := GenerateSpecsFromTreeRoot(suite.tree)
+ specs, hasProgrammaticFocus := ApplyFocusToSpecs(specs, description, suiteLabels, suiteConfig)
+
+ suite.phase = PhaseRun
+ suite.client = client
+ suite.failer = failer
+ suite.reporter = reporter
+ suite.writer = writer
+ suite.outputInterceptor = outputInterceptor
+ suite.interruptHandler = interruptHandler
+ suite.config = suiteConfig
+
+ if suite.config.Timeout > 0 {
+ suite.deadline = time.Now().Add(suite.config.Timeout)
+ }
+
+ cancelProgressHandler := progressSignalRegistrar(suite.handleProgressSignal)
+
+ success := suite.runSpecs(description, suiteLabels, suitePath, hasProgrammaticFocus, specs)
+
+ cancelProgressHandler()
+
+ return success, hasProgrammaticFocus
+}
+
+func (suite *Suite) InRunPhase() bool {
+ return suite.phase == PhaseRun
+}
+
+/*
+ Tree Construction methods
+
+ PushNode is used during PhaseBuildTopLevel and PhaseBuildTree
+*/
+
+func (suite *Suite) PushNode(node Node) error {
+ if node.NodeType.Is(types.NodeTypeCleanupInvalid | types.NodeTypeCleanupAfterEach | types.NodeTypeCleanupAfterAll | types.NodeTypeCleanupAfterSuite) {
+ return suite.pushCleanupNode(node)
+ }
+
+ if node.NodeType.Is(types.NodeTypeBeforeSuite | types.NodeTypeAfterSuite | types.NodeTypeSynchronizedBeforeSuite | types.NodeTypeSynchronizedAfterSuite | types.NodeTypeBeforeSuite | types.NodeTypeReportBeforeSuite | types.NodeTypeReportAfterSuite) {
+ return suite.pushSuiteNode(node)
+ }
+
+ if suite.phase == PhaseRun {
+ return types.GinkgoErrors.PushingNodeInRunPhase(node.NodeType, node.CodeLocation)
+ }
+
+ if node.MarkedSerial {
+ firstOrderedNode := suite.tree.AncestorNodeChain().FirstNodeMarkedOrdered()
+ if !firstOrderedNode.IsZero() && !firstOrderedNode.MarkedSerial {
+ return types.GinkgoErrors.InvalidSerialNodeInNonSerialOrderedContainer(node.CodeLocation, node.NodeType)
+ }
+ }
+
+ if node.NodeType.Is(types.NodeTypeBeforeAll | types.NodeTypeAfterAll) {
+ firstOrderedNode := suite.tree.AncestorNodeChain().FirstNodeMarkedOrdered()
+ if firstOrderedNode.IsZero() {
+ return types.GinkgoErrors.SetupNodeNotInOrderedContainer(node.CodeLocation, node.NodeType)
+ }
+ }
+
+ if node.MarkedContinueOnFailure {
+ firstOrderedNode := suite.tree.AncestorNodeChain().FirstNodeMarkedOrdered()
+ if !firstOrderedNode.IsZero() {
+ return types.GinkgoErrors.InvalidContinueOnFailureDecoration(node.CodeLocation)
+ }
+ }
+
+ if node.NodeType == types.NodeTypeContainer {
+ // During PhaseBuildTopLevel we only track the top level containers without entering them
+ // We only enter the top level container nodes during PhaseBuildTree
+ //
+ // This ensures the tree is only constructed after `go spec` has called `flag.Parse()` and gives
+ // the user an opportunity to load suiteConfiguration information in the `TestX` go spec hook just before `RunSpecs`
+ // is invoked. This makes the lifecycle easier to reason about and solves issues like #693.
+ if suite.phase == PhaseBuildTopLevel {
+ suite.topLevelContainers = append(suite.topLevelContainers, node)
+ return nil
+ }
+ if suite.phase == PhaseBuildTree {
+ parentTree := suite.tree
+ suite.tree = &TreeNode{Node: node}
+ parentTree.AppendChild(suite.tree)
+ err := func() (err error) {
+ defer func() {
+ if e := recover(); e != nil {
+ err = types.GinkgoErrors.CaughtPanicDuringABuildPhase(e, node.CodeLocation)
+ }
+ }()
+ node.Body(nil)
+ return err
+ }()
+ suite.tree = parentTree
+ return err
+ }
+ } else {
+ suite.tree.AppendChild(&TreeNode{Node: node})
+ return nil
+ }
+
+ return nil
+}
+
+func (suite *Suite) pushSuiteNode(node Node) error {
+ if suite.phase == PhaseBuildTree {
+ return types.GinkgoErrors.SuiteNodeInNestedContext(node.NodeType, node.CodeLocation)
+ }
+
+ if suite.phase == PhaseRun {
+ return types.GinkgoErrors.SuiteNodeDuringRunPhase(node.NodeType, node.CodeLocation)
+ }
+
+ switch node.NodeType {
+ case types.NodeTypeBeforeSuite, types.NodeTypeSynchronizedBeforeSuite:
+ existingBefores := suite.suiteNodes.WithType(types.NodeTypeBeforeSuite | types.NodeTypeSynchronizedBeforeSuite)
+ if len(existingBefores) > 0 {
+ return types.GinkgoErrors.MultipleBeforeSuiteNodes(node.NodeType, node.CodeLocation, existingBefores[0].NodeType, existingBefores[0].CodeLocation)
+ }
+ case types.NodeTypeAfterSuite, types.NodeTypeSynchronizedAfterSuite:
+ existingAfters := suite.suiteNodes.WithType(types.NodeTypeAfterSuite | types.NodeTypeSynchronizedAfterSuite)
+ if len(existingAfters) > 0 {
+ return types.GinkgoErrors.MultipleAfterSuiteNodes(node.NodeType, node.CodeLocation, existingAfters[0].NodeType, existingAfters[0].CodeLocation)
+ }
+ }
+
+ suite.suiteNodes = append(suite.suiteNodes, node)
+ return nil
+}
+
+func (suite *Suite) pushCleanupNode(node Node) error {
+ if suite.phase != PhaseRun || suite.currentNode.IsZero() {
+ return types.GinkgoErrors.PushingCleanupNodeDuringTreeConstruction(node.CodeLocation)
+ }
+
+ switch suite.currentNode.NodeType {
+ case types.NodeTypeBeforeSuite, types.NodeTypeSynchronizedBeforeSuite, types.NodeTypeAfterSuite, types.NodeTypeSynchronizedAfterSuite:
+ node.NodeType = types.NodeTypeCleanupAfterSuite
+ case types.NodeTypeBeforeAll, types.NodeTypeAfterAll:
+ node.NodeType = types.NodeTypeCleanupAfterAll
+ case types.NodeTypeReportBeforeEach, types.NodeTypeReportAfterEach, types.NodeTypeReportBeforeSuite, types.NodeTypeReportAfterSuite:
+ return types.GinkgoErrors.PushingCleanupInReportingNode(node.CodeLocation, suite.currentNode.NodeType)
+ case types.NodeTypeCleanupInvalid, types.NodeTypeCleanupAfterEach, types.NodeTypeCleanupAfterAll, types.NodeTypeCleanupAfterSuite:
+ return types.GinkgoErrors.PushingCleanupInCleanupNode(node.CodeLocation)
+ default:
+ node.NodeType = types.NodeTypeCleanupAfterEach
+ }
+
+ node.NodeIDWhereCleanupWasGenerated = suite.currentNode.ID
+ node.NestingLevel = suite.currentNode.NestingLevel
+ suite.selectiveLock.Lock()
+ suite.cleanupNodes = append(suite.cleanupNodes, node)
+ suite.selectiveLock.Unlock()
+
+ return nil
+}
+
+func (suite *Suite) generateTimelineLocation() types.TimelineLocation {
+ suite.selectiveLock.Lock()
+ defer suite.selectiveLock.Unlock()
+
+ suite.timelineOrder += 1
+ return types.TimelineLocation{
+ Offset: len(suite.currentSpecReport.CapturedGinkgoWriterOutput) + suite.writer.Len(),
+ Order: suite.timelineOrder,
+ Time: time.Now(),
+ }
+}
+
+func (suite *Suite) handleSpecEvent(event types.SpecEvent) types.SpecEvent {
+ event.TimelineLocation = suite.generateTimelineLocation()
+ suite.selectiveLock.Lock()
+ suite.currentSpecReport.SpecEvents = append(suite.currentSpecReport.SpecEvents, event)
+ suite.selectiveLock.Unlock()
+ suite.reporter.EmitSpecEvent(event)
+ return event
+}
+
+func (suite *Suite) handleSpecEventEnd(eventType types.SpecEventType, startEvent types.SpecEvent) {
+ event := startEvent
+ event.SpecEventType = eventType
+ event.TimelineLocation = suite.generateTimelineLocation()
+ event.Duration = event.TimelineLocation.Time.Sub(startEvent.TimelineLocation.Time)
+ suite.selectiveLock.Lock()
+ suite.currentSpecReport.SpecEvents = append(suite.currentSpecReport.SpecEvents, event)
+ suite.selectiveLock.Unlock()
+ suite.reporter.EmitSpecEvent(event)
+}
+
+func (suite *Suite) By(text string, callback ...func()) error {
+ cl := types.NewCodeLocation(2)
+ if suite.phase != PhaseRun {
+ return types.GinkgoErrors.ByNotDuringRunPhase(cl)
+ }
+
+ event := suite.handleSpecEvent(types.SpecEvent{
+ SpecEventType: types.SpecEventByStart,
+ CodeLocation: cl,
+ Message: text,
+ })
+ suite.selectiveLock.Lock()
+ suite.currentByStep = event
+ suite.selectiveLock.Unlock()
+
+ if len(callback) == 1 {
+ defer func() {
+ suite.selectiveLock.Lock()
+ suite.currentByStep = types.SpecEvent{}
+ suite.selectiveLock.Unlock()
+ suite.handleSpecEventEnd(types.SpecEventByEnd, event)
+ }()
+ callback[0]()
+ } else if len(callback) > 1 {
+ panic("just one callback per By, please")
+ }
+ return nil
+}
+
+/*
+Spec Running methods - used during PhaseRun
+*/
+func (suite *Suite) CurrentSpecReport() types.SpecReport {
+ suite.selectiveLock.Lock()
+ defer suite.selectiveLock.Unlock()
+ report := suite.currentSpecReport
+ if suite.writer != nil {
+ report.CapturedGinkgoWriterOutput = string(suite.writer.Bytes())
+ }
+ report.ReportEntries = make([]ReportEntry, len(report.ReportEntries))
+ copy(report.ReportEntries, suite.currentSpecReport.ReportEntries)
+ return report
+}
+
+// Only valid in the preview context. In general suite.report only includes
+// the specs run by _this_ node - it is only at the end of the suite that
+// the parallel reports are aggregated. However in the preview context we run
+// in series and
+func (suite *Suite) GetPreviewReport() types.Report {
+ suite.selectiveLock.Lock()
+ defer suite.selectiveLock.Unlock()
+ return suite.report
+}
+
+func (suite *Suite) AddReportEntry(entry ReportEntry) error {
+ if suite.phase != PhaseRun {
+ return types.GinkgoErrors.AddReportEntryNotDuringRunPhase(entry.Location)
+ }
+ entry.TimelineLocation = suite.generateTimelineLocation()
+ entry.Time = entry.TimelineLocation.Time
+ suite.selectiveLock.Lock()
+ suite.currentSpecReport.ReportEntries = append(suite.currentSpecReport.ReportEntries, entry)
+ suite.selectiveLock.Unlock()
+ suite.reporter.EmitReportEntry(entry)
+ return nil
+}
+
+func (suite *Suite) generateProgressReport(fullReport bool) types.ProgressReport {
+ timelineLocation := suite.generateTimelineLocation()
+ suite.selectiveLock.Lock()
+ defer suite.selectiveLock.Unlock()
+
+ deadline, cancel := context.WithTimeout(context.Background(), PROGRESS_REPORTER_DEADLING)
+ defer cancel()
+ var additionalReports []string
+ if suite.currentSpecContext != nil {
+ additionalReports = append(additionalReports, suite.currentSpecContext.QueryProgressReporters(deadline, suite.failer)...)
+ }
+ additionalReports = append(additionalReports, suite.QueryProgressReporters(deadline, suite.failer)...)
+ gwOutput := suite.currentSpecReport.CapturedGinkgoWriterOutput + string(suite.writer.Bytes())
+ pr, err := NewProgressReport(suite.isRunningInParallel(), suite.currentSpecReport, suite.currentNode, suite.currentNodeStartTime, suite.currentByStep, gwOutput, timelineLocation, additionalReports, suite.config.SourceRoots, fullReport)
+
+ if err != nil {
+ fmt.Printf("{{red}}Failed to generate progress report:{{/}}\n%s\n", err.Error())
+ }
+ return pr
+}
+
+func (suite *Suite) handleProgressSignal() {
+ report := suite.generateProgressReport(false)
+ report.Message = "{{bold}}You've requested a progress report:{{/}}"
+ suite.emitProgressReport(report)
+}
+
+func (suite *Suite) emitProgressReport(report types.ProgressReport) {
+ suite.selectiveLock.Lock()
+ suite.currentSpecReport.ProgressReports = append(suite.currentSpecReport.ProgressReports, report.WithoutCapturedGinkgoWriterOutput())
+ suite.selectiveLock.Unlock()
+
+ suite.reporter.EmitProgressReport(report)
+ if suite.isRunningInParallel() {
+ err := suite.client.PostEmitProgressReport(report)
+ if err != nil {
+ fmt.Println(err.Error())
+ }
+ }
+}
+
+func (suite *Suite) isRunningInParallel() bool {
+ return suite.config.ParallelTotal > 1
+}
+
+func (suite *Suite) processCurrentSpecReport() {
+ suite.reporter.DidRun(suite.currentSpecReport)
+ if suite.isRunningInParallel() {
+ suite.client.PostDidRun(suite.currentSpecReport)
+ }
+ suite.report.SpecReports = append(suite.report.SpecReports, suite.currentSpecReport)
+
+ if suite.currentSpecReport.State.Is(types.SpecStateFailureStates) {
+ suite.report.SuiteSucceeded = false
+ if suite.config.FailFast || suite.currentSpecReport.State.Is(types.SpecStateAborted) {
+ suite.skipAll = true
+ if suite.isRunningInParallel() {
+ suite.client.PostAbort()
+ }
+ }
+ }
+}
+
+func (suite *Suite) runSpecs(description string, suiteLabels Labels, suitePath string, hasProgrammaticFocus bool, specs Specs) bool {
+ numSpecsThatWillBeRun := specs.CountWithoutSkip()
+
+ suite.report = types.Report{
+ SuitePath: suitePath,
+ SuiteDescription: description,
+ SuiteLabels: suiteLabels,
+ SuiteConfig: suite.config,
+ SuiteHasProgrammaticFocus: hasProgrammaticFocus,
+ PreRunStats: types.PreRunStats{
+ TotalSpecs: len(specs),
+ SpecsThatWillRun: numSpecsThatWillBeRun,
+ },
+ StartTime: time.Now(),
+ }
+
+ suite.reporter.SuiteWillBegin(suite.report)
+ if suite.isRunningInParallel() {
+ suite.client.PostSuiteWillBegin(suite.report)
+ }
+
+ suite.report.SuiteSucceeded = true
+
+ suite.runReportSuiteNodesIfNeedBe(types.NodeTypeReportBeforeSuite)
+
+ ranBeforeSuite := suite.report.SuiteSucceeded
+ if suite.report.SuiteSucceeded {
+ suite.runBeforeSuite(numSpecsThatWillBeRun)
+ }
+
+ if suite.report.SuiteSucceeded {
+ groupedSpecIndices, serialGroupedSpecIndices := OrderSpecs(specs, suite.config)
+ nextIndex := MakeIncrementingIndexCounter()
+ if suite.isRunningInParallel() {
+ nextIndex = suite.client.FetchNextCounter
+ }
+
+ for {
+ groupedSpecIdx, err := nextIndex()
+ if err != nil {
+ suite.report.SpecialSuiteFailureReasons = append(suite.report.SpecialSuiteFailureReasons, fmt.Sprintf("Failed to iterate over specs:\n%s", err.Error()))
+ suite.report.SuiteSucceeded = false
+ break
+ }
+
+ if groupedSpecIdx >= len(groupedSpecIndices) {
+ if suite.config.ParallelProcess == 1 && len(serialGroupedSpecIndices) > 0 {
+ groupedSpecIndices, serialGroupedSpecIndices, nextIndex = serialGroupedSpecIndices, GroupedSpecIndices{}, MakeIncrementingIndexCounter()
+ suite.client.BlockUntilNonprimaryProcsHaveFinished()
+ continue
+ }
+ break
+ }
+
+ // the complexity for running groups of specs is very high because of Ordered containers and FlakeAttempts
+ // we encapsulate that complexity in the notion of a Group that can run
+ // Group is really just an extension of suite so it gets passed a suite and has access to all its internals
+ // Note that group is stateful and intended for single use!
+ newGroup(suite).run(specs.AtIndices(groupedSpecIndices[groupedSpecIdx]))
+ }
+
+ if suite.config.FailOnPending && specs.HasAnySpecsMarkedPending() {
+ suite.report.SpecialSuiteFailureReasons = append(suite.report.SpecialSuiteFailureReasons, "Detected pending specs and --fail-on-pending is set")
+ suite.report.SuiteSucceeded = false
+ }
+
+ if suite.config.FailOnEmpty && specs.CountWithoutSkip() == 0 {
+ suite.report.SpecialSuiteFailureReasons = append(suite.report.SpecialSuiteFailureReasons, "Detected no specs ran and --fail-on-empty is set")
+ suite.report.SuiteSucceeded = false
+ }
+ }
+
+ if ranBeforeSuite {
+ suite.runAfterSuiteCleanup(numSpecsThatWillBeRun)
+ }
+
+ interruptStatus := suite.interruptHandler.Status()
+ if interruptStatus.Interrupted() {
+ suite.report.SpecialSuiteFailureReasons = append(suite.report.SpecialSuiteFailureReasons, interruptStatus.Cause.String())
+ suite.report.SuiteSucceeded = false
+ }
+ suite.report.EndTime = time.Now()
+ suite.report.RunTime = suite.report.EndTime.Sub(suite.report.StartTime)
+ if !suite.deadline.IsZero() && suite.report.EndTime.After(suite.deadline) {
+ suite.report.SpecialSuiteFailureReasons = append(suite.report.SpecialSuiteFailureReasons, "Suite Timeout Elapsed")
+ suite.report.SuiteSucceeded = false
+ }
+
+ suite.runReportSuiteNodesIfNeedBe(types.NodeTypeReportAfterSuite)
+ suite.reporter.SuiteDidEnd(suite.report)
+ if suite.isRunningInParallel() {
+ suite.client.PostSuiteDidEnd(suite.report)
+ }
+
+ return suite.report.SuiteSucceeded
+}
+
+func (suite *Suite) runBeforeSuite(numSpecsThatWillBeRun int) {
+ beforeSuiteNode := suite.suiteNodes.FirstNodeWithType(types.NodeTypeBeforeSuite | types.NodeTypeSynchronizedBeforeSuite)
+ if !beforeSuiteNode.IsZero() && numSpecsThatWillBeRun > 0 {
+ suite.selectiveLock.Lock()
+ suite.currentSpecReport = types.SpecReport{
+ LeafNodeType: beforeSuiteNode.NodeType,
+ LeafNodeLocation: beforeSuiteNode.CodeLocation,
+ ParallelProcess: suite.config.ParallelProcess,
+ RunningInParallel: suite.isRunningInParallel(),
+ }
+ suite.selectiveLock.Unlock()
+
+ suite.reporter.WillRun(suite.currentSpecReport)
+ suite.runSuiteNode(beforeSuiteNode)
+ if suite.currentSpecReport.State.Is(types.SpecStateSkipped) {
+ suite.report.SpecialSuiteFailureReasons = append(suite.report.SpecialSuiteFailureReasons, "Suite skipped in BeforeSuite")
+ suite.skipAll = true
+ }
+ suite.processCurrentSpecReport()
+ }
+}
+
+func (suite *Suite) runAfterSuiteCleanup(numSpecsThatWillBeRun int) {
+ afterSuiteNode := suite.suiteNodes.FirstNodeWithType(types.NodeTypeAfterSuite | types.NodeTypeSynchronizedAfterSuite)
+ if !afterSuiteNode.IsZero() && numSpecsThatWillBeRun > 0 {
+ suite.selectiveLock.Lock()
+ suite.currentSpecReport = types.SpecReport{
+ LeafNodeType: afterSuiteNode.NodeType,
+ LeafNodeLocation: afterSuiteNode.CodeLocation,
+ ParallelProcess: suite.config.ParallelProcess,
+ RunningInParallel: suite.isRunningInParallel(),
+ }
+ suite.selectiveLock.Unlock()
+
+ suite.reporter.WillRun(suite.currentSpecReport)
+ suite.runSuiteNode(afterSuiteNode)
+ suite.processCurrentSpecReport()
+ }
+
+ afterSuiteCleanup := suite.cleanupNodes.WithType(types.NodeTypeCleanupAfterSuite).Reverse()
+ if len(afterSuiteCleanup) > 0 {
+ for _, cleanupNode := range afterSuiteCleanup {
+ suite.selectiveLock.Lock()
+ suite.currentSpecReport = types.SpecReport{
+ LeafNodeType: cleanupNode.NodeType,
+ LeafNodeLocation: cleanupNode.CodeLocation,
+ ParallelProcess: suite.config.ParallelProcess,
+ RunningInParallel: suite.isRunningInParallel(),
+ }
+ suite.selectiveLock.Unlock()
+
+ suite.reporter.WillRun(suite.currentSpecReport)
+ suite.runSuiteNode(cleanupNode)
+ suite.processCurrentSpecReport()
+ }
+ }
+}
+
+func (suite *Suite) reportEach(spec Spec, nodeType types.NodeType) {
+ nodes := spec.Nodes.WithType(nodeType)
+ if nodeType == types.NodeTypeReportAfterEach {
+ nodes = nodes.SortedByDescendingNestingLevel()
+ }
+ if nodeType == types.NodeTypeReportBeforeEach {
+ nodes = nodes.SortedByAscendingNestingLevel()
+ }
+ if len(nodes) == 0 {
+ return
+ }
+
+ for i := range nodes {
+ suite.writer.Truncate()
+ suite.outputInterceptor.StartInterceptingOutput()
+ report := suite.currentSpecReport
+ nodes[i].Body = func(ctx SpecContext) {
+ nodes[i].ReportEachBody(ctx, report)
+ }
+ state, failure := suite.runNode(nodes[i], time.Time{}, spec.Nodes.BestTextFor(nodes[i]))
+
+ // If the spec is not in a failure state (i.e. it's Passed/Skipped/Pending) and the reporter has failed, override the state.
+ // Also, if the reporter is every aborted - always override the state to propagate the abort
+ if (!suite.currentSpecReport.State.Is(types.SpecStateFailureStates) && state.Is(types.SpecStateFailureStates)) || state.Is(types.SpecStateAborted) {
+ suite.currentSpecReport.State = state
+ suite.currentSpecReport.Failure = failure
+ }
+ suite.currentSpecReport.CapturedGinkgoWriterOutput += string(suite.writer.Bytes())
+ suite.currentSpecReport.CapturedStdOutErr += suite.outputInterceptor.StopInterceptingAndReturnOutput()
+ }
+}
+
+func (suite *Suite) runSuiteNode(node Node) {
+ if suite.config.DryRun {
+ suite.currentSpecReport.State = types.SpecStatePassed
+ return
+ }
+
+ suite.writer.Truncate()
+ suite.outputInterceptor.StartInterceptingOutput()
+ suite.currentSpecReport.StartTime = time.Now()
+
+ var err error
+ switch node.NodeType {
+ case types.NodeTypeBeforeSuite, types.NodeTypeAfterSuite:
+ suite.currentSpecReport.State, suite.currentSpecReport.Failure = suite.runNode(node, time.Time{}, "")
+ case types.NodeTypeCleanupAfterSuite:
+ if suite.config.ParallelTotal > 1 && suite.config.ParallelProcess == 1 {
+ err = suite.client.BlockUntilNonprimaryProcsHaveFinished()
+ }
+ if err == nil {
+ suite.currentSpecReport.State, suite.currentSpecReport.Failure = suite.runNode(node, time.Time{}, "")
+ }
+ case types.NodeTypeSynchronizedBeforeSuite:
+ var data []byte
+ var runAllProcs bool
+ if suite.config.ParallelProcess == 1 {
+ if suite.config.ParallelTotal > 1 {
+ suite.outputInterceptor.StopInterceptingAndReturnOutput()
+ suite.outputInterceptor.StartInterceptingOutputAndForwardTo(suite.client)
+ }
+ node.Body = func(c SpecContext) { data = node.SynchronizedBeforeSuiteProc1Body(c) }
+ node.HasContext = node.SynchronizedBeforeSuiteProc1BodyHasContext
+ suite.currentSpecReport.State, suite.currentSpecReport.Failure = suite.runNode(node, time.Time{}, "")
+ if suite.config.ParallelTotal > 1 {
+ suite.currentSpecReport.CapturedStdOutErr += suite.outputInterceptor.StopInterceptingAndReturnOutput()
+ suite.outputInterceptor.StartInterceptingOutput()
+ if suite.currentSpecReport.State.Is(types.SpecStatePassed) {
+ err = suite.client.PostSynchronizedBeforeSuiteCompleted(types.SpecStatePassed, data)
+ } else {
+ err = suite.client.PostSynchronizedBeforeSuiteCompleted(suite.currentSpecReport.State, nil)
+ }
+ }
+ runAllProcs = suite.currentSpecReport.State.Is(types.SpecStatePassed) && err == nil
+ } else {
+ var proc1State types.SpecState
+ proc1State, data, err = suite.client.BlockUntilSynchronizedBeforeSuiteData()
+ switch proc1State {
+ case types.SpecStatePassed:
+ runAllProcs = true
+ case types.SpecStateFailed, types.SpecStatePanicked, types.SpecStateTimedout:
+ err = types.GinkgoErrors.SynchronizedBeforeSuiteFailedOnProc1()
+ case types.SpecStateInterrupted, types.SpecStateAborted, types.SpecStateSkipped:
+ suite.currentSpecReport.State = proc1State
+ }
+ }
+ if runAllProcs {
+ node.Body = func(c SpecContext) { node.SynchronizedBeforeSuiteAllProcsBody(c, data) }
+ node.HasContext = node.SynchronizedBeforeSuiteAllProcsBodyHasContext
+ suite.currentSpecReport.State, suite.currentSpecReport.Failure = suite.runNode(node, time.Time{}, "")
+ }
+ case types.NodeTypeSynchronizedAfterSuite:
+ node.Body = node.SynchronizedAfterSuiteAllProcsBody
+ node.HasContext = node.SynchronizedAfterSuiteAllProcsBodyHasContext
+ suite.currentSpecReport.State, suite.currentSpecReport.Failure = suite.runNode(node, time.Time{}, "")
+ if suite.config.ParallelProcess == 1 {
+ if suite.config.ParallelTotal > 1 {
+ err = suite.client.BlockUntilNonprimaryProcsHaveFinished()
+ }
+ if err == nil {
+ if suite.config.ParallelTotal > 1 {
+ suite.currentSpecReport.CapturedStdOutErr += suite.outputInterceptor.StopInterceptingAndReturnOutput()
+ suite.outputInterceptor.StartInterceptingOutputAndForwardTo(suite.client)
+ }
+
+ node.Body = node.SynchronizedAfterSuiteProc1Body
+ node.HasContext = node.SynchronizedAfterSuiteProc1BodyHasContext
+ state, failure := suite.runNode(node, time.Time{}, "")
+ if suite.currentSpecReport.State.Is(types.SpecStatePassed) {
+ suite.currentSpecReport.State, suite.currentSpecReport.Failure = state, failure
+ }
+ }
+ }
+ }
+
+ if err != nil && !suite.currentSpecReport.State.Is(types.SpecStateFailureStates) {
+ suite.currentSpecReport.State, suite.currentSpecReport.Failure = types.SpecStateFailed, suite.failureForLeafNodeWithMessage(node, err.Error())
+ suite.reporter.EmitFailure(suite.currentSpecReport.State, suite.currentSpecReport.Failure)
+ }
+
+ suite.currentSpecReport.EndTime = time.Now()
+ suite.currentSpecReport.RunTime = suite.currentSpecReport.EndTime.Sub(suite.currentSpecReport.StartTime)
+ suite.currentSpecReport.CapturedGinkgoWriterOutput = string(suite.writer.Bytes())
+ suite.currentSpecReport.CapturedStdOutErr += suite.outputInterceptor.StopInterceptingAndReturnOutput()
+}
+
+func (suite *Suite) runReportSuiteNodesIfNeedBe(nodeType types.NodeType) {
+ nodes := suite.suiteNodes.WithType(nodeType)
+ // only run ReportAfterSuite on proc 1
+ if nodeType.Is(types.NodeTypeReportAfterSuite) && suite.config.ParallelProcess != 1 {
+ return
+ }
+ // if we're running ReportBeforeSuite on proc > 1 - we should wait until proc 1 has completed
+ if nodeType.Is(types.NodeTypeReportBeforeSuite) && suite.config.ParallelProcess != 1 && len(nodes) > 0 {
+ state, err := suite.client.BlockUntilReportBeforeSuiteCompleted()
+ if err != nil || state.Is(types.SpecStateFailed) {
+ suite.report.SuiteSucceeded = false
+ }
+ return
+ }
+
+ for _, node := range nodes {
+ suite.selectiveLock.Lock()
+ suite.currentSpecReport = types.SpecReport{
+ LeafNodeType: node.NodeType,
+ LeafNodeLocation: node.CodeLocation,
+ LeafNodeText: node.Text,
+ ParallelProcess: suite.config.ParallelProcess,
+ RunningInParallel: suite.isRunningInParallel(),
+ }
+ suite.selectiveLock.Unlock()
+
+ suite.reporter.WillRun(suite.currentSpecReport)
+ suite.runReportSuiteNode(node, suite.report)
+ suite.processCurrentSpecReport()
+ }
+
+ // if we're running ReportBeforeSuite and we're running in parallel - we shuld tell the other procs that we're done
+ if nodeType.Is(types.NodeTypeReportBeforeSuite) && suite.isRunningInParallel() && len(nodes) > 0 {
+ if suite.report.SuiteSucceeded {
+ suite.client.PostReportBeforeSuiteCompleted(types.SpecStatePassed)
+ } else {
+ suite.client.PostReportBeforeSuiteCompleted(types.SpecStateFailed)
+ }
+ }
+}
+
+func (suite *Suite) runReportSuiteNode(node Node, report types.Report) {
+ suite.writer.Truncate()
+ suite.outputInterceptor.StartInterceptingOutput()
+ suite.currentSpecReport.StartTime = time.Now()
+
+ // if we're running a ReportAfterSuite in parallel (on proc 1) we (a) wait until other procs have exited and
+ // (b) always fetch the latest report as prior ReportAfterSuites will contribute to it
+ if node.NodeType.Is(types.NodeTypeReportAfterSuite) && suite.isRunningInParallel() {
+ aggregatedReport, err := suite.client.BlockUntilAggregatedNonprimaryProcsReport()
+ if err != nil {
+ suite.currentSpecReport.State, suite.currentSpecReport.Failure = types.SpecStateFailed, suite.failureForLeafNodeWithMessage(node, err.Error())
+ suite.reporter.EmitFailure(suite.currentSpecReport.State, suite.currentSpecReport.Failure)
+ return
+ }
+ report = report.Add(aggregatedReport)
+ }
+
+ node.Body = func(ctx SpecContext) { node.ReportSuiteBody(ctx, report) }
+ suite.currentSpecReport.State, suite.currentSpecReport.Failure = suite.runNode(node, time.Time{}, "")
+
+ suite.currentSpecReport.EndTime = time.Now()
+ suite.currentSpecReport.RunTime = suite.currentSpecReport.EndTime.Sub(suite.currentSpecReport.StartTime)
+ suite.currentSpecReport.CapturedGinkgoWriterOutput = string(suite.writer.Bytes())
+ suite.currentSpecReport.CapturedStdOutErr = suite.outputInterceptor.StopInterceptingAndReturnOutput()
+}
+
+func (suite *Suite) runNode(node Node, specDeadline time.Time, text string) (types.SpecState, types.Failure) {
+ if node.NodeType.Is(types.NodeTypeCleanupAfterEach | types.NodeTypeCleanupAfterAll | types.NodeTypeCleanupAfterSuite) {
+ suite.cleanupNodes = suite.cleanupNodes.WithoutNode(node)
+ }
+
+ interruptStatus := suite.interruptHandler.Status()
+ if interruptStatus.Level == interrupt_handler.InterruptLevelBailOut {
+ return types.SpecStateSkipped, types.Failure{}
+ }
+ if interruptStatus.Level == interrupt_handler.InterruptLevelReportOnly && !node.NodeType.Is(types.NodeTypesAllowedDuringReportInterrupt) {
+ return types.SpecStateSkipped, types.Failure{}
+ }
+ if interruptStatus.Level == interrupt_handler.InterruptLevelCleanupAndReport && !node.NodeType.Is(types.NodeTypesAllowedDuringReportInterrupt|types.NodeTypesAllowedDuringCleanupInterrupt) {
+ return types.SpecStateSkipped, types.Failure{}
+ }
+
+ suite.selectiveLock.Lock()
+ suite.currentNode = node
+ suite.currentNodeStartTime = time.Now()
+ suite.currentByStep = types.SpecEvent{}
+ suite.selectiveLock.Unlock()
+ defer func() {
+ suite.selectiveLock.Lock()
+ suite.currentNode = Node{}
+ suite.currentNodeStartTime = time.Time{}
+ suite.selectiveLock.Unlock()
+ }()
+
+ if text == "" {
+ text = "TOP-LEVEL"
+ }
+ event := suite.handleSpecEvent(types.SpecEvent{
+ SpecEventType: types.SpecEventNodeStart,
+ NodeType: node.NodeType,
+ Message: text,
+ CodeLocation: node.CodeLocation,
+ })
+ defer func() {
+ suite.handleSpecEventEnd(types.SpecEventNodeEnd, event)
+ }()
+
+ var failure types.Failure
+ failure.FailureNodeType, failure.FailureNodeLocation = node.NodeType, node.CodeLocation
+ if node.NodeType.Is(types.NodeTypeIt) || node.NodeType.Is(types.NodeTypesForSuiteLevelNodes) {
+ failure.FailureNodeContext = types.FailureNodeIsLeafNode
+ } else if node.NestingLevel <= 0 {
+ failure.FailureNodeContext = types.FailureNodeAtTopLevel
+ } else {
+ failure.FailureNodeContext, failure.FailureNodeContainerIndex = types.FailureNodeInContainer, node.NestingLevel-1
+ }
+ var outcome types.SpecState
+
+ gracePeriod := suite.config.GracePeriod
+ if node.GracePeriod >= 0 {
+ gracePeriod = node.GracePeriod
+ }
+
+ now := time.Now()
+ deadline := suite.deadline
+ timeoutInPlay := "suite"
+ if deadline.IsZero() || (!specDeadline.IsZero() && specDeadline.Before(deadline)) {
+ deadline = specDeadline
+ timeoutInPlay = "spec"
+ }
+ if node.NodeTimeout > 0 && (deadline.IsZero() || deadline.Sub(now) > node.NodeTimeout) {
+ deadline = now.Add(node.NodeTimeout)
+ timeoutInPlay = "node"
+ }
+ if (!deadline.IsZero() && deadline.Before(now)) || interruptStatus.Interrupted() {
+ // we're out of time already. let's wait for a NodeTimeout if we have it, or GracePeriod if we don't
+ if node.NodeTimeout > 0 {
+ deadline = now.Add(node.NodeTimeout)
+ timeoutInPlay = "node"
+ } else {
+ deadline = now.Add(gracePeriod)
+ timeoutInPlay = "grace period"
+ }
+ }
+
+ if !node.HasContext {
+ // this maps onto the pre-context behavior:
+ // - an interrupted node exits immediately. with this, context-less nodes that are in a spec with a SpecTimeout and/or are interrupted by other means will simply exit immediately after the timeout/interrupt
+ // - clean up nodes have up to GracePeriod (formerly hard-coded at 30s) to complete before they are interrupted
+ gracePeriod = 0
+ }
+
+ sc := NewSpecContext(suite)
+ defer sc.cancel(fmt.Errorf("spec has finished"))
+
+ suite.selectiveLock.Lock()
+ suite.currentSpecContext = sc
+ suite.selectiveLock.Unlock()
+
+ var deadlineChannel <-chan time.Time
+ if !deadline.IsZero() {
+ deadlineChannel = time.After(deadline.Sub(now))
+ }
+ var gracePeriodChannel <-chan time.Time
+
+ outcomeC := make(chan types.SpecState)
+ failureC := make(chan types.Failure)
+
+ go func() {
+ finished := false
+ defer func() {
+ if e := recover(); e != nil || !finished {
+ suite.failer.Panic(types.NewCodeLocationWithStackTrace(2), e)
+ }
+
+ outcomeFromRun, failureFromRun := suite.failer.Drain()
+ failureFromRun.TimelineLocation = suite.generateTimelineLocation()
+ outcomeC <- outcomeFromRun
+ failureC <- failureFromRun
+ }()
+
+ node.Body(sc)
+ finished = true
+ }()
+
+ // progress polling timer and channel
+ var emitProgressNow <-chan time.Time
+ var progressPoller *time.Timer
+ var pollProgressAfter, pollProgressInterval = suite.config.PollProgressAfter, suite.config.PollProgressInterval
+ if node.PollProgressAfter >= 0 {
+ pollProgressAfter = node.PollProgressAfter
+ }
+ if node.PollProgressInterval >= 0 {
+ pollProgressInterval = node.PollProgressInterval
+ }
+ if pollProgressAfter > 0 {
+ progressPoller = time.NewTimer(pollProgressAfter)
+ emitProgressNow = progressPoller.C
+ defer progressPoller.Stop()
+ }
+
+ // now we wait for an outcome, an interrupt, a timeout, or a progress poll
+ for {
+ select {
+ case outcomeFromRun := <-outcomeC:
+ failureFromRun := <-failureC
+ if outcome.Is(types.SpecStateInterrupted | types.SpecStateTimedout) {
+ // we've already been interrupted/timed out. we just managed to actually exit
+ // before the grace period elapsed
+ // if we have a failure message we attach it as an additional failure
+ if outcomeFromRun != types.SpecStatePassed {
+ additionalFailure := types.AdditionalFailure{
+ State: outcomeFromRun,
+ Failure: failure, // we make a copy - this will include all the configuration set up above...
+ }
+ // ...and then we update the failure with the details from failureFromRun
+ additionalFailure.Failure.Location, additionalFailure.Failure.ForwardedPanic, additionalFailure.Failure.TimelineLocation = failureFromRun.Location, failureFromRun.ForwardedPanic, failureFromRun.TimelineLocation
+ additionalFailure.Failure.ProgressReport = types.ProgressReport{}
+ if outcome == types.SpecStateTimedout {
+ additionalFailure.Failure.Message = fmt.Sprintf("A %s timeout occurred and then the following failure was recorded in the timedout node before it exited:\n%s", timeoutInPlay, failureFromRun.Message)
+ } else {
+ additionalFailure.Failure.Message = fmt.Sprintf("An interrupt occurred and then the following failure was recorded in the interrupted node before it exited:\n%s", failureFromRun.Message)
+ }
+ suite.reporter.EmitFailure(additionalFailure.State, additionalFailure.Failure)
+ failure.AdditionalFailure = &additionalFailure
+ }
+ return outcome, failure
+ }
+ if outcomeFromRun.Is(types.SpecStatePassed) {
+ return outcomeFromRun, types.Failure{}
+ } else {
+ failure.Message, failure.Location, failure.ForwardedPanic, failure.TimelineLocation = failureFromRun.Message, failureFromRun.Location, failureFromRun.ForwardedPanic, failureFromRun.TimelineLocation
+ suite.reporter.EmitFailure(outcomeFromRun, failure)
+ return outcomeFromRun, failure
+ }
+ case <-gracePeriodChannel:
+ if node.HasContext && outcome.Is(types.SpecStateTimedout) {
+ report := suite.generateProgressReport(false)
+ report.Message = "{{bold}}{{orange}}A running node failed to exit in time{{/}}\nGinkgo is moving on but a node has timed out and failed to exit before its grace period elapsed. The node has now leaked and is running in the background.\nHere's a current progress report:"
+ suite.emitProgressReport(report)
+ }
+ return outcome, failure
+ case <-deadlineChannel:
+ // we're out of time - the outcome is a timeout and we capture the failure and progress report
+ outcome = types.SpecStateTimedout
+ failure.Message, failure.Location, failure.TimelineLocation = fmt.Sprintf("A %s timeout occurred", timeoutInPlay), node.CodeLocation, suite.generateTimelineLocation()
+ failure.ProgressReport = suite.generateProgressReport(false).WithoutCapturedGinkgoWriterOutput()
+ failure.ProgressReport.Message = fmt.Sprintf("{{bold}}This is the Progress Report generated when the %s timeout occurred:{{/}}", timeoutInPlay)
+ deadlineChannel = nil
+ suite.reporter.EmitFailure(outcome, failure)
+
+ // tell the spec to stop. it's important we generate the progress report first to make sure we capture where
+ // the spec is actually stuck
+ sc.cancel(fmt.Errorf("%s timeout occurred", timeoutInPlay))
+ // and now we wait for the grace period
+ gracePeriodChannel = time.After(gracePeriod)
+ case <-interruptStatus.Channel:
+ interruptStatus = suite.interruptHandler.Status()
+ // ignore interruption from other process if we are cleaning up or reporting
+ if interruptStatus.Cause == interrupt_handler.InterruptCauseAbortByOtherProcess &&
+ node.NodeType.Is(types.NodeTypesAllowedDuringReportInterrupt|types.NodeTypesAllowedDuringCleanupInterrupt) {
+ continue
+ }
+
+ deadlineChannel = nil // don't worry about deadlines, time's up now
+
+ failureTimelineLocation := suite.generateTimelineLocation()
+ progressReport := suite.generateProgressReport(true)
+
+ if outcome == types.SpecStateInvalid {
+ outcome = types.SpecStateInterrupted
+ failure.Message, failure.Location, failure.TimelineLocation = interruptStatus.Message(), node.CodeLocation, failureTimelineLocation
+ if interruptStatus.ShouldIncludeProgressReport() {
+ failure.ProgressReport = progressReport.WithoutCapturedGinkgoWriterOutput()
+ failure.ProgressReport.Message = "{{bold}}This is the Progress Report generated when the interrupt was received:{{/}}"
+ }
+ suite.reporter.EmitFailure(outcome, failure)
+ }
+
+ progressReport = progressReport.WithoutOtherGoroutines()
+ sc.cancel(fmt.Errorf(interruptStatus.Message()))
+
+ if interruptStatus.Level == interrupt_handler.InterruptLevelBailOut {
+ if interruptStatus.ShouldIncludeProgressReport() {
+ progressReport.Message = fmt.Sprintf("{{bold}}{{orange}}%s{{/}}\n{{bold}}{{red}}Final interrupt received{{/}}; Ginkgo will not run any cleanup or reporting nodes and will terminate as soon as possible.\nHere's a current progress report:", interruptStatus.Message())
+ suite.emitProgressReport(progressReport)
+ }
+ return outcome, failure
+ }
+ if interruptStatus.ShouldIncludeProgressReport() {
+ if interruptStatus.Level == interrupt_handler.InterruptLevelCleanupAndReport {
+ progressReport.Message = fmt.Sprintf("{{bold}}{{orange}}%s{{/}}\nFirst interrupt received; Ginkgo will run any cleanup and reporting nodes but will skip all remaining specs. {{bold}}Interrupt again to skip cleanup{{/}}.\nHere's a current progress report:", interruptStatus.Message())
+ } else if interruptStatus.Level == interrupt_handler.InterruptLevelReportOnly {
+ progressReport.Message = fmt.Sprintf("{{bold}}{{orange}}%s{{/}}\nSecond interrupt received; Ginkgo will run any reporting nodes but will skip all remaining specs and cleanup nodes. {{bold}}Interrupt again to bail immediately{{/}}.\nHere's a current progress report:", interruptStatus.Message())
+ }
+ suite.emitProgressReport(progressReport)
+ }
+
+ if gracePeriodChannel == nil {
+ // we haven't given grace yet... so let's
+ gracePeriodChannel = time.After(gracePeriod)
+ } else {
+ // we've already given grace. time's up. now.
+ return outcome, failure
+ }
+ case <-emitProgressNow:
+ report := suite.generateProgressReport(false)
+ report.Message = "{{bold}}Automatically polling progress:{{/}}"
+ suite.emitProgressReport(report)
+ if pollProgressInterval > 0 {
+ progressPoller.Reset(pollProgressInterval)
+ }
+ }
+ }
+}
+
+// TODO: search for usages and consider if reporter.EmitFailure() is necessary
+func (suite *Suite) failureForLeafNodeWithMessage(node Node, message string) types.Failure {
+ return types.Failure{
+ Message: message,
+ Location: node.CodeLocation,
+ TimelineLocation: suite.generateTimelineLocation(),
+ FailureNodeContext: types.FailureNodeIsLeafNode,
+ FailureNodeType: node.NodeType,
+ FailureNodeLocation: node.CodeLocation,
+ }
+}
+
+func max(a, b int) int {
+ if a > b {
+ return a
+ }
+ return b
+}
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/internal/testingtproxy/testing_t_proxy.go b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/internal/testingtproxy/testing_t_proxy.go
new file mode 100644
index 00000000000..73e26556568
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/internal/testingtproxy/testing_t_proxy.go
@@ -0,0 +1,210 @@
+package testingtproxy
+
+import (
+ "fmt"
+ "io"
+ "os"
+
+ "github.com/onsi/ginkgo/v2/formatter"
+ "github.com/onsi/ginkgo/v2/internal"
+ "github.com/onsi/ginkgo/v2/reporters"
+ "github.com/onsi/ginkgo/v2/types"
+)
+
+type failFunc func(message string, callerSkip ...int)
+type skipFunc func(message string, callerSkip ...int)
+type cleanupFunc func(args ...any)
+type reportFunc func() types.SpecReport
+type addReportEntryFunc func(names string, args ...any)
+type ginkgoWriterInterface interface {
+ io.Writer
+
+ Print(a ...interface{})
+ Printf(format string, a ...interface{})
+ Println(a ...interface{})
+}
+type ginkgoRecoverFunc func()
+type attachProgressReporterFunc func(func() string) func()
+
+func New(writer ginkgoWriterInterface, fail failFunc, skip skipFunc, cleanup cleanupFunc, report reportFunc, addReportEntry addReportEntryFunc, ginkgoRecover ginkgoRecoverFunc, attachProgressReporter attachProgressReporterFunc, randomSeed int64, parallelProcess int, parallelTotal int, noColor bool, offset int) *ginkgoTestingTProxy {
+ return &ginkgoTestingTProxy{
+ fail: fail,
+ offset: offset,
+ writer: writer,
+ skip: skip,
+ cleanup: cleanup,
+ report: report,
+ addReportEntry: addReportEntry,
+ ginkgoRecover: ginkgoRecover,
+ attachProgressReporter: attachProgressReporter,
+ randomSeed: randomSeed,
+ parallelProcess: parallelProcess,
+ parallelTotal: parallelTotal,
+ f: formatter.NewWithNoColorBool(noColor),
+ }
+}
+
+type ginkgoTestingTProxy struct {
+ fail failFunc
+ skip skipFunc
+ cleanup cleanupFunc
+ report reportFunc
+ offset int
+ writer ginkgoWriterInterface
+ addReportEntry addReportEntryFunc
+ ginkgoRecover ginkgoRecoverFunc
+ attachProgressReporter attachProgressReporterFunc
+ randomSeed int64
+ parallelProcess int
+ parallelTotal int
+ f formatter.Formatter
+}
+
+// basic testing.T support
+
+func (t *ginkgoTestingTProxy) Cleanup(f func()) {
+ t.cleanup(f, internal.Offset(1))
+}
+
+func (t *ginkgoTestingTProxy) Setenv(key, value string) {
+ originalValue, exists := os.LookupEnv(key)
+ if exists {
+ t.cleanup(os.Setenv, key, originalValue, internal.Offset(1))
+ } else {
+ t.cleanup(os.Unsetenv, key, internal.Offset(1))
+ }
+
+ err := os.Setenv(key, value)
+ if err != nil {
+ t.fail(fmt.Sprintf("Failed to set environment variable: %v", err), 1)
+ }
+}
+
+func (t *ginkgoTestingTProxy) Error(args ...interface{}) {
+ t.fail(fmt.Sprintln(args...), t.offset)
+}
+
+func (t *ginkgoTestingTProxy) Errorf(format string, args ...interface{}) {
+ t.fail(fmt.Sprintf(format, args...), t.offset)
+}
+
+func (t *ginkgoTestingTProxy) Fail() {
+ t.fail("failed", t.offset)
+}
+
+func (t *ginkgoTestingTProxy) FailNow() {
+ t.fail("failed", t.offset)
+}
+
+func (t *ginkgoTestingTProxy) Failed() bool {
+ return t.report().Failed()
+}
+
+func (t *ginkgoTestingTProxy) Fatal(args ...interface{}) {
+ t.fail(fmt.Sprintln(args...), t.offset)
+}
+
+func (t *ginkgoTestingTProxy) Fatalf(format string, args ...interface{}) {
+ t.fail(fmt.Sprintf(format, args...), t.offset)
+}
+
+func (t *ginkgoTestingTProxy) Helper() {
+ types.MarkAsHelper(1)
+}
+
+func (t *ginkgoTestingTProxy) Log(args ...interface{}) {
+ fmt.Fprintln(t.writer, args...)
+}
+
+func (t *ginkgoTestingTProxy) Logf(format string, args ...interface{}) {
+ t.Log(fmt.Sprintf(format, args...))
+}
+
+func (t *ginkgoTestingTProxy) Name() string {
+ return t.report().FullText()
+}
+
+func (t *ginkgoTestingTProxy) Parallel() {
+ // No-op
+}
+
+func (t *ginkgoTestingTProxy) Skip(args ...interface{}) {
+ t.skip(fmt.Sprintln(args...), t.offset)
+}
+
+func (t *ginkgoTestingTProxy) SkipNow() {
+ t.skip("skip", t.offset)
+}
+
+func (t *ginkgoTestingTProxy) Skipf(format string, args ...interface{}) {
+ t.skip(fmt.Sprintf(format, args...), t.offset)
+}
+
+func (t *ginkgoTestingTProxy) Skipped() bool {
+ return t.report().State.Is(types.SpecStateSkipped)
+}
+
+func (t *ginkgoTestingTProxy) TempDir() string {
+ tmpDir, err := os.MkdirTemp("", "ginkgo")
+ if err != nil {
+ t.fail(fmt.Sprintf("Failed to create temporary directory: %v", err), 1)
+ return ""
+ }
+ t.cleanup(os.RemoveAll, tmpDir)
+
+ return tmpDir
+}
+
+// FullGinkgoTInterface
+func (t *ginkgoTestingTProxy) AddReportEntryVisibilityAlways(name string, args ...any) {
+ finalArgs := []any{internal.Offset(1), types.ReportEntryVisibilityAlways}
+ t.addReportEntry(name, append(finalArgs, args...)...)
+}
+func (t *ginkgoTestingTProxy) AddReportEntryVisibilityFailureOrVerbose(name string, args ...any) {
+ finalArgs := []any{internal.Offset(1), types.ReportEntryVisibilityFailureOrVerbose}
+ t.addReportEntry(name, append(finalArgs, args...)...)
+}
+func (t *ginkgoTestingTProxy) AddReportEntryVisibilityNever(name string, args ...any) {
+ finalArgs := []any{internal.Offset(1), types.ReportEntryVisibilityNever}
+ t.addReportEntry(name, append(finalArgs, args...)...)
+}
+func (t *ginkgoTestingTProxy) Print(a ...any) {
+ t.writer.Print(a...)
+}
+func (t *ginkgoTestingTProxy) Printf(format string, a ...any) {
+ t.writer.Printf(format, a...)
+}
+func (t *ginkgoTestingTProxy) Println(a ...any) {
+ t.writer.Println(a...)
+}
+func (t *ginkgoTestingTProxy) F(format string, args ...any) string {
+ return t.f.F(format, args...)
+}
+func (t *ginkgoTestingTProxy) Fi(indentation uint, format string, args ...any) string {
+ return t.f.Fi(indentation, format, args...)
+}
+func (t *ginkgoTestingTProxy) Fiw(indentation uint, maxWidth uint, format string, args ...any) string {
+ return t.f.Fiw(indentation, maxWidth, format, args...)
+}
+func (t *ginkgoTestingTProxy) RenderTimeline() string {
+ return reporters.RenderTimeline(t.report(), false)
+}
+func (t *ginkgoTestingTProxy) GinkgoRecover() {
+ t.ginkgoRecover()
+}
+func (t *ginkgoTestingTProxy) DeferCleanup(args ...any) {
+ finalArgs := []any{internal.Offset(1)}
+ t.cleanup(append(finalArgs, args...)...)
+}
+func (t *ginkgoTestingTProxy) RandomSeed() int64 {
+ return t.randomSeed
+}
+func (t *ginkgoTestingTProxy) ParallelProcess() int {
+ return t.parallelProcess
+}
+func (t *ginkgoTestingTProxy) ParallelTotal() int {
+ return t.parallelTotal
+}
+func (t *ginkgoTestingTProxy) AttachProgressReporter(f func() string) func() {
+ return t.attachProgressReporter(f)
+}
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/internal/tree.go b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/internal/tree.go
new file mode 100644
index 00000000000..f9d1eeb8f8e
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/internal/tree.go
@@ -0,0 +1,77 @@
+package internal
+
+import "github.com/onsi/ginkgo/v2/types"
+
+type TreeNode struct {
+ Node Node
+ Parent *TreeNode
+ Children TreeNodes
+}
+
+func (tn *TreeNode) AppendChild(child *TreeNode) {
+ tn.Children = append(tn.Children, child)
+ child.Parent = tn
+}
+
+func (tn *TreeNode) AncestorNodeChain() Nodes {
+ if tn.Parent == nil || tn.Parent.Node.IsZero() {
+ return Nodes{tn.Node}
+ }
+ return append(tn.Parent.AncestorNodeChain(), tn.Node)
+}
+
+type TreeNodes []*TreeNode
+
+func (tn TreeNodes) Nodes() Nodes {
+ out := make(Nodes, len(tn))
+ for i := range tn {
+ out[i] = tn[i].Node
+ }
+ return out
+}
+
+func (tn TreeNodes) WithID(id uint) *TreeNode {
+ for i := range tn {
+ if tn[i].Node.ID == id {
+ return tn[i]
+ }
+ }
+
+ return nil
+}
+
+func GenerateSpecsFromTreeRoot(tree *TreeNode) Specs {
+ var walkTree func(nestingLevel int, lNodes Nodes, rNodes Nodes, trees TreeNodes) Specs
+ walkTree = func(nestingLevel int, lNodes Nodes, rNodes Nodes, trees TreeNodes) Specs {
+ tests := Specs{}
+
+ nodes := make(Nodes, len(trees))
+ for i := range trees {
+ nodes[i] = trees[i].Node
+ nodes[i].NestingLevel = nestingLevel
+ }
+
+ for i := range nodes {
+ if !nodes[i].NodeType.Is(types.NodeTypesForContainerAndIt) {
+ continue
+ }
+ leftNodes, rightNodes := nodes.SplitAround(nodes[i])
+ leftNodes = leftNodes.WithoutType(types.NodeTypesForContainerAndIt)
+ rightNodes = rightNodes.WithoutType(types.NodeTypesForContainerAndIt)
+
+ leftNodes = lNodes.CopyAppend(leftNodes...)
+ rightNodes = rightNodes.CopyAppend(rNodes...)
+
+ if nodes[i].NodeType.Is(types.NodeTypeIt) {
+ tests = append(tests, Spec{Nodes: leftNodes.CopyAppend(nodes[i]).CopyAppend(rightNodes...)})
+ } else {
+ treeNode := trees.WithID(nodes[i].ID)
+ tests = append(tests, walkTree(nestingLevel+1, leftNodes.CopyAppend(nodes[i]), rightNodes, treeNode.Children)...)
+ }
+ }
+
+ return tests
+ }
+
+ return walkTree(0, Nodes{}, Nodes{}, tree.Children)
+}
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/internal/writer.go b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/internal/writer.go
new file mode 100644
index 00000000000..aab42d5fb3c
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/internal/writer.go
@@ -0,0 +1,144 @@
+package internal
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "sync"
+
+ "github.com/go-logr/logr"
+ "github.com/go-logr/logr/funcr"
+)
+
+type WriterMode uint
+
+const (
+ WriterModeStreamAndBuffer WriterMode = iota
+ WriterModeBufferOnly
+)
+
+type WriterInterface interface {
+ io.Writer
+
+ Truncate()
+ Bytes() []byte
+ Len() int
+}
+
+// Writer implements WriterInterface and GinkgoWriterInterface
+type Writer struct {
+ buffer *bytes.Buffer
+ outWriter io.Writer
+ lock *sync.Mutex
+ mode WriterMode
+
+ streamIndent []byte
+ indentNext bool
+
+ teeWriters []io.Writer
+}
+
+func NewWriter(outWriter io.Writer) *Writer {
+ return &Writer{
+ buffer: &bytes.Buffer{},
+ lock: &sync.Mutex{},
+ outWriter: outWriter,
+ mode: WriterModeStreamAndBuffer,
+ streamIndent: []byte(" "),
+ indentNext: true,
+ }
+}
+
+func (w *Writer) SetMode(mode WriterMode) {
+ w.lock.Lock()
+ defer w.lock.Unlock()
+ w.mode = mode
+}
+
+func (w *Writer) Len() int {
+ w.lock.Lock()
+ defer w.lock.Unlock()
+ return w.buffer.Len()
+}
+
+var newline = []byte("\n")
+
+func (w *Writer) Write(b []byte) (n int, err error) {
+ w.lock.Lock()
+ defer w.lock.Unlock()
+
+ for _, teeWriter := range w.teeWriters {
+ teeWriter.Write(b)
+ }
+
+ if w.mode == WriterModeStreamAndBuffer {
+ line, remaining, found := []byte{}, b, false
+ for len(remaining) > 0 {
+ line, remaining, found = bytes.Cut(remaining, newline)
+ if len(line) > 0 {
+ if w.indentNext {
+ w.outWriter.Write(w.streamIndent)
+ w.indentNext = false
+ }
+ w.outWriter.Write(line)
+ }
+ if found {
+ w.outWriter.Write(newline)
+ w.indentNext = true
+ }
+ }
+ }
+ return w.buffer.Write(b)
+}
+
+func (w *Writer) Truncate() {
+ w.lock.Lock()
+ defer w.lock.Unlock()
+ w.buffer.Reset()
+}
+
+func (w *Writer) Bytes() []byte {
+ w.lock.Lock()
+ defer w.lock.Unlock()
+ b := w.buffer.Bytes()
+ copied := make([]byte, len(b))
+ copy(copied, b)
+ return copied
+}
+
+// GinkgoWriterInterface
+func (w *Writer) TeeTo(writer io.Writer) {
+ w.lock.Lock()
+ defer w.lock.Unlock()
+
+ w.teeWriters = append(w.teeWriters, writer)
+}
+
+func (w *Writer) ClearTeeWriters() {
+ w.lock.Lock()
+ defer w.lock.Unlock()
+
+ w.teeWriters = []io.Writer{}
+}
+
+func (w *Writer) Print(a ...interface{}) {
+ fmt.Fprint(w, a...)
+}
+
+func (w *Writer) Printf(format string, a ...interface{}) {
+ fmt.Fprintf(w, format, a...)
+}
+
+func (w *Writer) Println(a ...interface{}) {
+ fmt.Fprintln(w, a...)
+}
+
+func GinkgoLogrFunc(writer *Writer) logr.Logger {
+ return funcr.New(func(prefix, args string) {
+ if prefix == "" {
+ writer.Printf("%s\n", args)
+ } else {
+ writer.Printf("%s %s\n", prefix, args)
+ }
+ }, funcr.Options{})
+}
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/reporters/default_reporter.go b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/reporters/default_reporter.go
new file mode 100644
index 00000000000..480730486af
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/reporters/default_reporter.go
@@ -0,0 +1,788 @@
+/*
+Ginkgo's Default Reporter
+
+A number of command line flags are available to tweak Ginkgo's default output.
+
+These are documented [here](http://onsi.github.io/ginkgo/#running_tests)
+*/
+package reporters
+
+import (
+ "fmt"
+ "io"
+ "runtime"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/onsi/ginkgo/v2/formatter"
+ "github.com/onsi/ginkgo/v2/types"
+)
+
+type DefaultReporter struct {
+ conf types.ReporterConfig
+ writer io.Writer
+
+ // managing the emission stream
+ lastCharWasNewline bool
+ lastEmissionWasDelimiter bool
+
+ // rendering
+ specDenoter string
+ retryDenoter string
+ formatter formatter.Formatter
+
+ runningInParallel bool
+ lock *sync.Mutex
+}
+
+func NewDefaultReporterUnderTest(conf types.ReporterConfig, writer io.Writer) *DefaultReporter {
+ reporter := NewDefaultReporter(conf, writer)
+ reporter.formatter = formatter.New(formatter.ColorModePassthrough)
+
+ return reporter
+}
+
+func NewDefaultReporter(conf types.ReporterConfig, writer io.Writer) *DefaultReporter {
+ reporter := &DefaultReporter{
+ conf: conf,
+ writer: writer,
+
+ lastCharWasNewline: true,
+ lastEmissionWasDelimiter: false,
+
+ specDenoter: "•",
+ retryDenoter: "↺",
+ formatter: formatter.NewWithNoColorBool(conf.NoColor),
+ lock: &sync.Mutex{},
+ }
+ if runtime.GOOS == "windows" {
+ reporter.specDenoter = "+"
+ reporter.retryDenoter = "R"
+ }
+
+ return reporter
+}
+
+/* The Reporter Interface */
+
+func (r *DefaultReporter) SuiteWillBegin(report types.Report) {
+ if r.conf.Verbosity().Is(types.VerbosityLevelSuccinct) {
+ r.emit(r.f("[%d] {{bold}}%s{{/}} ", report.SuiteConfig.RandomSeed, report.SuiteDescription))
+ if len(report.SuiteLabels) > 0 {
+ r.emit(r.f("{{coral}}[%s]{{/}} ", strings.Join(report.SuiteLabels, ", ")))
+ }
+ r.emit(r.f("- %d/%d specs ", report.PreRunStats.SpecsThatWillRun, report.PreRunStats.TotalSpecs))
+ if report.SuiteConfig.ParallelTotal > 1 {
+ r.emit(r.f("- %d procs ", report.SuiteConfig.ParallelTotal))
+ }
+ } else {
+ banner := r.f("Running Suite: %s - %s", report.SuiteDescription, report.SuitePath)
+ r.emitBlock(banner)
+ bannerWidth := len(banner)
+ if len(report.SuiteLabels) > 0 {
+ labels := strings.Join(report.SuiteLabels, ", ")
+ r.emitBlock(r.f("{{coral}}[%s]{{/}} ", labels))
+ if len(labels)+2 > bannerWidth {
+ bannerWidth = len(labels) + 2
+ }
+ }
+ r.emitBlock(strings.Repeat("=", bannerWidth))
+
+ out := r.f("Random Seed: {{bold}}%d{{/}}", report.SuiteConfig.RandomSeed)
+ if report.SuiteConfig.RandomizeAllSpecs {
+ out += r.f(" - will randomize all specs")
+ }
+ r.emitBlock(out)
+ r.emit("\n")
+ r.emitBlock(r.f("Will run {{bold}}%d{{/}} of {{bold}}%d{{/}} specs", report.PreRunStats.SpecsThatWillRun, report.PreRunStats.TotalSpecs))
+ if report.SuiteConfig.ParallelTotal > 1 {
+ r.emitBlock(r.f("Running in parallel across {{bold}}%d{{/}} processes", report.SuiteConfig.ParallelTotal))
+ }
+ }
+}
+
+func (r *DefaultReporter) SuiteDidEnd(report types.Report) {
+ failures := report.SpecReports.WithState(types.SpecStateFailureStates)
+ if len(failures) > 0 {
+ r.emitBlock("\n")
+ if len(failures) > 1 {
+ r.emitBlock(r.f("{{red}}{{bold}}Summarizing %d Failures:{{/}}", len(failures)))
+ } else {
+ r.emitBlock(r.f("{{red}}{{bold}}Summarizing 1 Failure:{{/}}"))
+ }
+ for _, specReport := range failures {
+ highlightColor, heading := "{{red}}", "[FAIL]"
+ switch specReport.State {
+ case types.SpecStatePanicked:
+ highlightColor, heading = "{{magenta}}", "[PANICKED!]"
+ case types.SpecStateAborted:
+ highlightColor, heading = "{{coral}}", "[ABORTED]"
+ case types.SpecStateTimedout:
+ highlightColor, heading = "{{orange}}", "[TIMEDOUT]"
+ case types.SpecStateInterrupted:
+ highlightColor, heading = "{{orange}}", "[INTERRUPTED]"
+ }
+ locationBlock := r.codeLocationBlock(specReport, highlightColor, false, true)
+ r.emitBlock(r.fi(1, highlightColor+"%s{{/}} %s", heading, locationBlock))
+ }
+ }
+
+ //summarize the suite
+ if r.conf.Verbosity().Is(types.VerbosityLevelSuccinct) && report.SuiteSucceeded {
+ r.emit(r.f(" {{green}}SUCCESS!{{/}} %s ", report.RunTime))
+ return
+ }
+
+ r.emitBlock("\n")
+ color, status := "{{green}}{{bold}}", "SUCCESS!"
+ if !report.SuiteSucceeded {
+ color, status = "{{red}}{{bold}}", "FAIL!"
+ }
+
+ specs := report.SpecReports.WithLeafNodeType(types.NodeTypeIt) //exclude any suite setup nodes
+ r.emitBlock(r.f(color+"Ran %d of %d Specs in %.3f seconds{{/}}",
+ specs.CountWithState(types.SpecStatePassed)+specs.CountWithState(types.SpecStateFailureStates),
+ report.PreRunStats.TotalSpecs,
+ report.RunTime.Seconds()),
+ )
+
+ switch len(report.SpecialSuiteFailureReasons) {
+ case 0:
+ r.emit(r.f(color+"%s{{/}} -- ", status))
+ case 1:
+ r.emit(r.f(color+"%s - %s{{/}} -- ", status, report.SpecialSuiteFailureReasons[0]))
+ default:
+ r.emitBlock(r.f(color+"%s - %s{{/}}\n", status, strings.Join(report.SpecialSuiteFailureReasons, ", ")))
+ }
+
+ if len(specs) == 0 && report.SpecReports.WithLeafNodeType(types.NodeTypeBeforeSuite|types.NodeTypeSynchronizedBeforeSuite).CountWithState(types.SpecStateFailureStates) > 0 {
+ r.emit(r.f("{{cyan}}{{bold}}A BeforeSuite node failed so all tests were skipped.{{/}}\n"))
+ } else {
+ r.emit(r.f("{{green}}{{bold}}%d Passed{{/}} | ", specs.CountWithState(types.SpecStatePassed)))
+ r.emit(r.f("{{red}}{{bold}}%d Failed{{/}} | ", specs.CountWithState(types.SpecStateFailureStates)))
+ if specs.CountOfFlakedSpecs() > 0 {
+ r.emit(r.f("{{light-yellow}}{{bold}}%d Flaked{{/}} | ", specs.CountOfFlakedSpecs()))
+ }
+ if specs.CountOfRepeatedSpecs() > 0 {
+ r.emit(r.f("{{light-yellow}}{{bold}}%d Repeated{{/}} | ", specs.CountOfRepeatedSpecs()))
+ }
+ r.emit(r.f("{{yellow}}{{bold}}%d Pending{{/}} | ", specs.CountWithState(types.SpecStatePending)))
+ r.emit(r.f("{{cyan}}{{bold}}%d Skipped{{/}}\n", specs.CountWithState(types.SpecStateSkipped)))
+ }
+}
+
+func (r *DefaultReporter) WillRun(report types.SpecReport) {
+ v := r.conf.Verbosity()
+ if v.LT(types.VerbosityLevelVerbose) || report.State.Is(types.SpecStatePending|types.SpecStateSkipped) || report.RunningInParallel {
+ return
+ }
+
+ r.emitDelimiter(0)
+ r.emitBlock(r.f(r.codeLocationBlock(report, "{{/}}", v.Is(types.VerbosityLevelVeryVerbose), false)))
+}
+
+func (r *DefaultReporter) wrapTextBlock(sectionName string, fn func()) {
+ r.emitBlock("\n")
+ if r.conf.GithubOutput {
+ r.emitBlock(r.fi(1, "::group::%s", sectionName))
+ } else {
+ r.emitBlock(r.fi(1, "{{gray}}%s >>{{/}}", sectionName))
+ }
+ fn()
+ if r.conf.GithubOutput {
+ r.emitBlock(r.fi(1, "::endgroup::"))
+ } else {
+ r.emitBlock(r.fi(1, "{{gray}}<< %s{{/}}", sectionName))
+ }
+
+}
+
+func (r *DefaultReporter) DidRun(report types.SpecReport) {
+ v := r.conf.Verbosity()
+ inParallel := report.RunningInParallel
+
+ //should we completely omit this spec?
+ if report.State.Is(types.SpecStateSkipped) && r.conf.SilenceSkips {
+ return
+ }
+
+ header := r.specDenoter
+ if report.LeafNodeType.Is(types.NodeTypesForSuiteLevelNodes) {
+ header = fmt.Sprintf("[%s]", report.LeafNodeType)
+ }
+ highlightColor := r.highlightColorForState(report.State)
+
+ // have we already been streaming the timeline?
+ timelineHasBeenStreaming := v.GTE(types.VerbosityLevelVerbose) && !inParallel
+
+ // should we show the timeline?
+ var timeline types.Timeline
+ showTimeline := !timelineHasBeenStreaming && (v.GTE(types.VerbosityLevelVerbose) || report.Failed())
+ if showTimeline {
+ timeline = report.Timeline().WithoutHiddenReportEntries()
+ keepVeryVerboseSpecEvents := v.Is(types.VerbosityLevelVeryVerbose) ||
+ (v.Is(types.VerbosityLevelVerbose) && r.conf.ShowNodeEvents) ||
+ (report.Failed() && r.conf.ShowNodeEvents)
+ if !keepVeryVerboseSpecEvents {
+ timeline = timeline.WithoutVeryVerboseSpecEvents()
+ }
+ if len(timeline) == 0 && report.CapturedGinkgoWriterOutput == "" {
+ // the timeline is completely empty - don't show it
+ showTimeline = false
+ }
+ if v.LT(types.VerbosityLevelVeryVerbose) && report.CapturedGinkgoWriterOutput == "" && len(timeline) > 0 {
+ //if we aren't -vv and the timeline only has a single failure, don't show it as it will appear at the end of the report
+ failure, isFailure := timeline[0].(types.Failure)
+ if isFailure && (len(timeline) == 1 || (len(timeline) == 2 && failure.AdditionalFailure != nil)) {
+ showTimeline = false
+ }
+ }
+ }
+
+ // should we have a separate section for always-visible reports?
+ showSeparateVisibilityAlwaysReportsSection := !timelineHasBeenStreaming && !showTimeline && report.ReportEntries.HasVisibility(types.ReportEntryVisibilityAlways)
+
+ // should we have a separate section for captured stdout/stderr
+ showSeparateStdSection := inParallel && (report.CapturedStdOutErr != "")
+
+ // given all that - do we have any actual content to show? or are we a single denoter in a stream?
+ reportHasContent := v.Is(types.VerbosityLevelVeryVerbose) || showTimeline || showSeparateVisibilityAlwaysReportsSection || showSeparateStdSection || report.Failed() || (v.Is(types.VerbosityLevelVerbose) && !report.State.Is(types.SpecStateSkipped))
+
+ // should we show a runtime?
+ includeRuntime := !report.State.Is(types.SpecStateSkipped|types.SpecStatePending) || (report.State.Is(types.SpecStateSkipped) && report.Failure.Message != "")
+
+ // should we show the codelocation block?
+ showCodeLocation := !timelineHasBeenStreaming || !report.State.Is(types.SpecStatePassed)
+
+ switch report.State {
+ case types.SpecStatePassed:
+ if report.LeafNodeType.Is(types.NodeTypesForSuiteLevelNodes) && !reportHasContent {
+ return
+ }
+ if report.LeafNodeType.Is(types.NodeTypesForSuiteLevelNodes) {
+ header = fmt.Sprintf("%s PASSED", header)
+ }
+ if report.NumAttempts > 1 && report.MaxFlakeAttempts > 1 {
+ header, reportHasContent = fmt.Sprintf("%s [FLAKEY TEST - TOOK %d ATTEMPTS TO PASS]", r.retryDenoter, report.NumAttempts), true
+ }
+ case types.SpecStatePending:
+ header = "P"
+ if v.GT(types.VerbosityLevelSuccinct) {
+ header, reportHasContent = "P [PENDING]", true
+ }
+ case types.SpecStateSkipped:
+ header = "S"
+ if v.Is(types.VerbosityLevelVeryVerbose) || (v.Is(types.VerbosityLevelVerbose) && report.Failure.Message != "") {
+ header, reportHasContent = "S [SKIPPED]", true
+ }
+ default:
+ header = fmt.Sprintf("%s [%s]", header, r.humanReadableState(report.State))
+ if report.MaxMustPassRepeatedly > 1 {
+ header = fmt.Sprintf("%s DURING REPETITION #%d", header, report.NumAttempts)
+ }
+ }
+
+ // If we have no content to show, just emit the header and return
+ if !reportHasContent {
+ r.emit(r.f(highlightColor + header + "{{/}}"))
+ if r.conf.ForceNewlines {
+ r.emit("\n")
+ }
+ return
+ }
+
+ if includeRuntime {
+ header = r.f("%s [%.3f seconds]", header, report.RunTime.Seconds())
+ }
+
+ // Emit header
+ if !timelineHasBeenStreaming {
+ r.emitDelimiter(0)
+ }
+ r.emitBlock(r.f(highlightColor + header + "{{/}}"))
+ if showCodeLocation {
+ r.emitBlock(r.codeLocationBlock(report, highlightColor, v.Is(types.VerbosityLevelVeryVerbose), false))
+ }
+
+ //Emit Stdout/Stderr Output
+ if showSeparateStdSection {
+ r.wrapTextBlock("Captured StdOut/StdErr Output", func() {
+ r.emitBlock(r.fi(1, "%s", report.CapturedStdOutErr))
+ })
+ }
+
+ if showSeparateVisibilityAlwaysReportsSection {
+ r.wrapTextBlock("Report Entries", func() {
+ for _, entry := range report.ReportEntries.WithVisibility(types.ReportEntryVisibilityAlways) {
+ r.emitReportEntry(1, entry)
+ }
+ })
+ }
+
+ if showTimeline {
+ r.wrapTextBlock("Timeline", func() {
+ r.emitTimeline(1, report, timeline)
+ })
+ }
+
+ // Emit Failure Message
+ if !report.Failure.IsZero() && !v.Is(types.VerbosityLevelVeryVerbose) {
+ r.emitBlock("\n")
+ r.emitFailure(1, report.State, report.Failure, true)
+ if len(report.AdditionalFailures) > 0 {
+ r.emitBlock(r.fi(1, "\nThere were {{bold}}{{red}}additional failures{{/}} detected. To view them in detail run {{bold}}ginkgo -vv{{/}}"))
+ }
+ }
+
+ r.emitDelimiter(0)
+}
+
+func (r *DefaultReporter) highlightColorForState(state types.SpecState) string {
+ switch state {
+ case types.SpecStatePassed:
+ return "{{green}}"
+ case types.SpecStatePending:
+ return "{{yellow}}"
+ case types.SpecStateSkipped:
+ return "{{cyan}}"
+ case types.SpecStateFailed:
+ return "{{red}}"
+ case types.SpecStateTimedout:
+ return "{{orange}}"
+ case types.SpecStatePanicked:
+ return "{{magenta}}"
+ case types.SpecStateInterrupted:
+ return "{{orange}}"
+ case types.SpecStateAborted:
+ return "{{coral}}"
+ default:
+ return "{{gray}}"
+ }
+}
+
+func (r *DefaultReporter) humanReadableState(state types.SpecState) string {
+ return strings.ToUpper(state.String())
+}
+
+func (r *DefaultReporter) emitTimeline(indent uint, report types.SpecReport, timeline types.Timeline) {
+ isVeryVerbose := r.conf.Verbosity().Is(types.VerbosityLevelVeryVerbose)
+ gw := report.CapturedGinkgoWriterOutput
+ cursor := 0
+ for _, entry := range timeline {
+ tl := entry.GetTimelineLocation()
+ if tl.Offset < len(gw) {
+ r.emit(r.fi(indent, "%s", gw[cursor:tl.Offset]))
+ cursor = tl.Offset
+ } else if cursor < len(gw) {
+ r.emit(r.fi(indent, "%s", gw[cursor:]))
+ cursor = len(gw)
+ }
+ switch x := entry.(type) {
+ case types.Failure:
+ if isVeryVerbose {
+ r.emitFailure(indent, report.State, x, false)
+ } else {
+ r.emitShortFailure(indent, report.State, x)
+ }
+ case types.AdditionalFailure:
+ if isVeryVerbose {
+ r.emitFailure(indent, x.State, x.Failure, true)
+ } else {
+ r.emitShortFailure(indent, x.State, x.Failure)
+ }
+ case types.ReportEntry:
+ r.emitReportEntry(indent, x)
+ case types.ProgressReport:
+ r.emitProgressReport(indent, false, x)
+ case types.SpecEvent:
+ if isVeryVerbose || !x.IsOnlyVisibleAtVeryVerbose() || r.conf.ShowNodeEvents {
+ r.emitSpecEvent(indent, x, isVeryVerbose)
+ }
+ }
+ }
+ if cursor < len(gw) {
+ r.emit(r.fi(indent, "%s", gw[cursor:]))
+ }
+}
+
+func (r *DefaultReporter) EmitFailure(state types.SpecState, failure types.Failure) {
+ if r.conf.Verbosity().Is(types.VerbosityLevelVerbose) {
+ r.emitShortFailure(1, state, failure)
+ } else if r.conf.Verbosity().Is(types.VerbosityLevelVeryVerbose) {
+ r.emitFailure(1, state, failure, true)
+ }
+}
+
+func (r *DefaultReporter) emitShortFailure(indent uint, state types.SpecState, failure types.Failure) {
+ r.emitBlock(r.fi(indent, r.highlightColorForState(state)+"[%s]{{/}} in [%s] - %s {{gray}}@ %s{{/}}",
+ r.humanReadableState(state),
+ failure.FailureNodeType,
+ failure.Location,
+ failure.TimelineLocation.Time.Format(types.GINKGO_TIME_FORMAT),
+ ))
+}
+
+func (r *DefaultReporter) emitFailure(indent uint, state types.SpecState, failure types.Failure, includeAdditionalFailure bool) {
+ highlightColor := r.highlightColorForState(state)
+ r.emitBlock(r.fi(indent, highlightColor+"[%s] %s{{/}}", r.humanReadableState(state), failure.Message))
+ if r.conf.GithubOutput {
+ level := "error"
+ if state.Is(types.SpecStateSkipped) {
+ level = "notice"
+ }
+ r.emitBlock(r.fi(indent, "::%s file=%s,line=%d::%s %s", level, failure.Location.FileName, failure.Location.LineNumber, failure.FailureNodeType, failure.TimelineLocation.Time.Format(types.GINKGO_TIME_FORMAT)))
+ } else {
+ r.emitBlock(r.fi(indent, highlightColor+"In {{bold}}[%s]{{/}}"+highlightColor+" at: {{bold}}%s{{/}} {{gray}}@ %s{{/}}\n", failure.FailureNodeType, failure.Location, failure.TimelineLocation.Time.Format(types.GINKGO_TIME_FORMAT)))
+ }
+ if failure.ForwardedPanic != "" {
+ r.emitBlock("\n")
+ r.emitBlock(r.fi(indent, highlightColor+"%s{{/}}", failure.ForwardedPanic))
+ }
+
+ if r.conf.FullTrace || failure.ForwardedPanic != "" {
+ r.emitBlock("\n")
+ r.emitBlock(r.fi(indent, highlightColor+"Full Stack Trace{{/}}"))
+ r.emitBlock(r.fi(indent+1, "%s", failure.Location.FullStackTrace))
+ }
+
+ if !failure.ProgressReport.IsZero() {
+ r.emitBlock("\n")
+ r.emitProgressReport(indent, false, failure.ProgressReport)
+ }
+
+ if failure.AdditionalFailure != nil && includeAdditionalFailure {
+ r.emitBlock("\n")
+ r.emitFailure(indent, failure.AdditionalFailure.State, failure.AdditionalFailure.Failure, true)
+ }
+}
+
+func (r *DefaultReporter) EmitProgressReport(report types.ProgressReport) {
+ r.emitDelimiter(1)
+
+ if report.RunningInParallel {
+ r.emit(r.fi(1, "{{coral}}Progress Report for Ginkgo Process #{{bold}}%d{{/}}\n", report.ParallelProcess))
+ }
+ shouldEmitGW := report.RunningInParallel || r.conf.Verbosity().LT(types.VerbosityLevelVerbose)
+ r.emitProgressReport(1, shouldEmitGW, report)
+ r.emitDelimiter(1)
+}
+
+func (r *DefaultReporter) emitProgressReport(indent uint, emitGinkgoWriterOutput bool, report types.ProgressReport) {
+ if report.Message != "" {
+ r.emitBlock(r.fi(indent, report.Message+"\n"))
+ indent += 1
+ }
+ if report.LeafNodeText != "" {
+ subjectIndent := indent
+ if len(report.ContainerHierarchyTexts) > 0 {
+ r.emit(r.fi(indent, r.cycleJoin(report.ContainerHierarchyTexts, " ")))
+ r.emit(" ")
+ subjectIndent = 0
+ }
+ r.emit(r.fi(subjectIndent, "{{bold}}{{orange}}%s{{/}} (Spec Runtime: %s)\n", report.LeafNodeText, report.Time().Sub(report.SpecStartTime).Round(time.Millisecond)))
+ r.emit(r.fi(indent+1, "{{gray}}%s{{/}}\n", report.LeafNodeLocation))
+ indent += 1
+ }
+ if report.CurrentNodeType != types.NodeTypeInvalid {
+ r.emit(r.fi(indent, "In {{bold}}{{orange}}[%s]{{/}}", report.CurrentNodeType))
+ if report.CurrentNodeText != "" && !report.CurrentNodeType.Is(types.NodeTypeIt) {
+ r.emit(r.f(" {{bold}}{{orange}}%s{{/}}", report.CurrentNodeText))
+ }
+
+ r.emit(r.f(" (Node Runtime: %s)\n", report.Time().Sub(report.CurrentNodeStartTime).Round(time.Millisecond)))
+ r.emit(r.fi(indent+1, "{{gray}}%s{{/}}\n", report.CurrentNodeLocation))
+ indent += 1
+ }
+ if report.CurrentStepText != "" {
+ r.emit(r.fi(indent, "At {{bold}}{{orange}}[By Step] %s{{/}} (Step Runtime: %s)\n", report.CurrentStepText, report.Time().Sub(report.CurrentStepStartTime).Round(time.Millisecond)))
+ r.emit(r.fi(indent+1, "{{gray}}%s{{/}}\n", report.CurrentStepLocation))
+ indent += 1
+ }
+
+ if indent > 0 {
+ indent -= 1
+ }
+
+ if emitGinkgoWriterOutput && report.CapturedGinkgoWriterOutput != "" {
+ r.emit("\n")
+ r.emitBlock(r.fi(indent, "{{gray}}Begin Captured GinkgoWriter Output >>{{/}}"))
+ limit, lines := 10, strings.Split(report.CapturedGinkgoWriterOutput, "\n")
+ if len(lines) <= limit {
+ r.emitBlock(r.fi(indent+1, "%s", report.CapturedGinkgoWriterOutput))
+ } else {
+ r.emitBlock(r.fi(indent+1, "{{gray}}...{{/}}"))
+ for _, line := range lines[len(lines)-limit-1:] {
+ r.emitBlock(r.fi(indent+1, "%s", line))
+ }
+ }
+ r.emitBlock(r.fi(indent, "{{gray}}<< End Captured GinkgoWriter Output{{/}}"))
+ }
+
+ if !report.SpecGoroutine().IsZero() {
+ r.emit("\n")
+ r.emit(r.fi(indent, "{{bold}}{{underline}}Spec Goroutine{{/}}\n"))
+ r.emitGoroutines(indent, report.SpecGoroutine())
+ }
+
+ if len(report.AdditionalReports) > 0 {
+ r.emit("\n")
+ r.emitBlock(r.fi(indent, "{{gray}}Begin Additional Progress Reports >>{{/}}"))
+ for i, additionalReport := range report.AdditionalReports {
+ r.emit(r.fi(indent+1, additionalReport))
+ if i < len(report.AdditionalReports)-1 {
+ r.emitBlock(r.fi(indent+1, "{{gray}}%s{{/}}", strings.Repeat("-", 10)))
+ }
+ }
+ r.emitBlock(r.fi(indent, "{{gray}}<< End Additional Progress Reports{{/}}"))
+ }
+
+ highlightedGoroutines := report.HighlightedGoroutines()
+ if len(highlightedGoroutines) > 0 {
+ r.emit("\n")
+ r.emit(r.fi(indent, "{{bold}}{{underline}}Goroutines of Interest{{/}}\n"))
+ r.emitGoroutines(indent, highlightedGoroutines...)
+ }
+
+ otherGoroutines := report.OtherGoroutines()
+ if len(otherGoroutines) > 0 {
+ r.emit("\n")
+ r.emit(r.fi(indent, "{{gray}}{{bold}}{{underline}}Other Goroutines{{/}}\n"))
+ r.emitGoroutines(indent, otherGoroutines...)
+ }
+}
+
+func (r *DefaultReporter) EmitReportEntry(entry types.ReportEntry) {
+ if r.conf.Verbosity().LT(types.VerbosityLevelVerbose) || entry.Visibility == types.ReportEntryVisibilityNever {
+ return
+ }
+ r.emitReportEntry(1, entry)
+}
+
+func (r *DefaultReporter) emitReportEntry(indent uint, entry types.ReportEntry) {
+ r.emitBlock(r.fi(indent, "{{bold}}"+entry.Name+"{{gray}} "+fmt.Sprintf("- %s @ %s{{/}}", entry.Location, entry.Time.Format(types.GINKGO_TIME_FORMAT))))
+ if representation := entry.StringRepresentation(); representation != "" {
+ r.emitBlock(r.fi(indent+1, representation))
+ }
+}
+
+func (r *DefaultReporter) EmitSpecEvent(event types.SpecEvent) {
+ v := r.conf.Verbosity()
+ if v.Is(types.VerbosityLevelVeryVerbose) || (v.Is(types.VerbosityLevelVerbose) && (r.conf.ShowNodeEvents || !event.IsOnlyVisibleAtVeryVerbose())) {
+ r.emitSpecEvent(1, event, r.conf.Verbosity().Is(types.VerbosityLevelVeryVerbose))
+ }
+}
+
+func (r *DefaultReporter) emitSpecEvent(indent uint, event types.SpecEvent, includeLocation bool) {
+ location := ""
+ if includeLocation {
+ location = fmt.Sprintf("- %s ", event.CodeLocation.String())
+ }
+ switch event.SpecEventType {
+ case types.SpecEventInvalid:
+ return
+ case types.SpecEventByStart:
+ r.emitBlock(r.fi(indent, "{{bold}}STEP:{{/}} %s {{gray}}%s@ %s{{/}}", event.Message, location, event.TimelineLocation.Time.Format(types.GINKGO_TIME_FORMAT)))
+ case types.SpecEventByEnd:
+ r.emitBlock(r.fi(indent, "{{bold}}END STEP:{{/}} %s {{gray}}%s@ %s (%s){{/}}", event.Message, location, event.TimelineLocation.Time.Format(types.GINKGO_TIME_FORMAT), event.Duration.Round(time.Millisecond)))
+ case types.SpecEventNodeStart:
+ r.emitBlock(r.fi(indent, "> Enter {{bold}}[%s]{{/}} %s {{gray}}%s@ %s{{/}}", event.NodeType.String(), event.Message, location, event.TimelineLocation.Time.Format(types.GINKGO_TIME_FORMAT)))
+ case types.SpecEventNodeEnd:
+ r.emitBlock(r.fi(indent, "< Exit {{bold}}[%s]{{/}} %s {{gray}}%s@ %s (%s){{/}}", event.NodeType.String(), event.Message, location, event.TimelineLocation.Time.Format(types.GINKGO_TIME_FORMAT), event.Duration.Round(time.Millisecond)))
+ case types.SpecEventSpecRepeat:
+ r.emitBlock(r.fi(indent, "\n{{bold}}Attempt #%d {{green}}Passed{{/}}{{bold}}. Repeating %s{{/}} {{gray}}@ %s{{/}}\n\n", event.Attempt, r.retryDenoter, event.TimelineLocation.Time.Format(types.GINKGO_TIME_FORMAT)))
+ case types.SpecEventSpecRetry:
+ r.emitBlock(r.fi(indent, "\n{{bold}}Attempt #%d {{red}}Failed{{/}}{{bold}}. Retrying %s{{/}} {{gray}}@ %s{{/}}\n\n", event.Attempt, r.retryDenoter, event.TimelineLocation.Time.Format(types.GINKGO_TIME_FORMAT)))
+ }
+}
+
+func (r *DefaultReporter) emitGoroutines(indent uint, goroutines ...types.Goroutine) {
+ for idx, g := range goroutines {
+ color := "{{gray}}"
+ if g.HasHighlights() {
+ color = "{{orange}}"
+ }
+ r.emit(r.fi(indent, color+"goroutine %d [%s]{{/}}\n", g.ID, g.State))
+ for _, fc := range g.Stack {
+ if fc.Highlight {
+ r.emit(r.fi(indent, color+"{{bold}}> %s{{/}}\n", fc.Function))
+ r.emit(r.fi(indent+2, color+"{{bold}}%s:%d{{/}}\n", fc.Filename, fc.Line))
+ r.emitSource(indent+3, fc)
+ } else {
+ r.emit(r.fi(indent+1, "{{gray}}%s{{/}}\n", fc.Function))
+ r.emit(r.fi(indent+2, "{{gray}}%s:%d{{/}}\n", fc.Filename, fc.Line))
+ }
+ }
+
+ if idx+1 < len(goroutines) {
+ r.emit("\n")
+ }
+ }
+}
+
+func (r *DefaultReporter) emitSource(indent uint, fc types.FunctionCall) {
+ lines := fc.Source
+ if len(lines) == 0 {
+ return
+ }
+
+ lTrim := 100000
+ for _, line := range lines {
+ lTrimLine := len(line) - len(strings.TrimLeft(line, " \t"))
+ if lTrimLine < lTrim && len(line) > 0 {
+ lTrim = lTrimLine
+ }
+ }
+ if lTrim == 100000 {
+ lTrim = 0
+ }
+
+ for idx, line := range lines {
+ if len(line) > lTrim {
+ line = line[lTrim:]
+ }
+ if idx == fc.SourceHighlight {
+ r.emit(r.fi(indent, "{{bold}}{{orange}}> %s{{/}}\n", line))
+ } else {
+ r.emit(r.fi(indent, "| %s\n", line))
+ }
+ }
+}
+
+/* Emitting to the writer */
+func (r *DefaultReporter) emit(s string) {
+ r._emit(s, false, false)
+}
+
+func (r *DefaultReporter) emitBlock(s string) {
+ r._emit(s, true, false)
+}
+
+func (r *DefaultReporter) emitDelimiter(indent uint) {
+ r._emit(r.fi(indent, "{{gray}}%s{{/}}", strings.Repeat("-", 30)), true, true)
+}
+
+// a bit ugly - but we're trying to minimize locking on this hot codepath
+func (r *DefaultReporter) _emit(s string, block bool, isDelimiter bool) {
+ if len(s) == 0 {
+ return
+ }
+ r.lock.Lock()
+ defer r.lock.Unlock()
+ if isDelimiter && r.lastEmissionWasDelimiter {
+ return
+ }
+ if block && !r.lastCharWasNewline {
+ r.writer.Write([]byte("\n"))
+ }
+ r.lastCharWasNewline = (s[len(s)-1:] == "\n")
+ r.writer.Write([]byte(s))
+ if block && !r.lastCharWasNewline {
+ r.writer.Write([]byte("\n"))
+ r.lastCharWasNewline = true
+ }
+ r.lastEmissionWasDelimiter = isDelimiter
+}
+
+/* Rendering text */
+func (r *DefaultReporter) f(format string, args ...interface{}) string {
+ return r.formatter.F(format, args...)
+}
+
+func (r *DefaultReporter) fi(indentation uint, format string, args ...interface{}) string {
+ return r.formatter.Fi(indentation, format, args...)
+}
+
+func (r *DefaultReporter) cycleJoin(elements []string, joiner string) string {
+ return r.formatter.CycleJoin(elements, joiner, []string{"{{/}}", "{{gray}}"})
+}
+
+func (r *DefaultReporter) codeLocationBlock(report types.SpecReport, highlightColor string, veryVerbose bool, usePreciseFailureLocation bool) string {
+ texts, locations, labels := []string{}, []types.CodeLocation{}, [][]string{}
+ texts, locations, labels = append(texts, report.ContainerHierarchyTexts...), append(locations, report.ContainerHierarchyLocations...), append(labels, report.ContainerHierarchyLabels...)
+
+ if report.LeafNodeType.Is(types.NodeTypesForSuiteLevelNodes) {
+ texts = append(texts, r.f("[%s] %s", report.LeafNodeType, report.LeafNodeText))
+ } else {
+ texts = append(texts, r.f(report.LeafNodeText))
+ }
+ labels = append(labels, report.LeafNodeLabels)
+ locations = append(locations, report.LeafNodeLocation)
+
+ failureLocation := report.Failure.FailureNodeLocation
+ if usePreciseFailureLocation {
+ failureLocation = report.Failure.Location
+ }
+
+ highlightIndex := -1
+ switch report.Failure.FailureNodeContext {
+ case types.FailureNodeAtTopLevel:
+ texts = append([]string{fmt.Sprintf("TOP-LEVEL [%s]", report.Failure.FailureNodeType)}, texts...)
+ locations = append([]types.CodeLocation{failureLocation}, locations...)
+ labels = append([][]string{{}}, labels...)
+ highlightIndex = 0
+ case types.FailureNodeInContainer:
+ i := report.Failure.FailureNodeContainerIndex
+ texts[i] = fmt.Sprintf("%s [%s]", texts[i], report.Failure.FailureNodeType)
+ locations[i] = failureLocation
+ highlightIndex = i
+ case types.FailureNodeIsLeafNode:
+ i := len(texts) - 1
+ texts[i] = fmt.Sprintf("[%s] %s", report.LeafNodeType, report.LeafNodeText)
+ locations[i] = failureLocation
+ highlightIndex = i
+ default:
+ //there is no failure, so we highlight the leaf ndoe
+ highlightIndex = len(texts) - 1
+ }
+
+ out := ""
+ if veryVerbose {
+ for i := range texts {
+ if i == highlightIndex {
+ out += r.fi(uint(i), highlightColor+"{{bold}}%s{{/}}", texts[i])
+ } else {
+ out += r.fi(uint(i), "%s", texts[i])
+ }
+ if len(labels[i]) > 0 {
+ out += r.f(" {{coral}}[%s]{{/}}", strings.Join(labels[i], ", "))
+ }
+ out += "\n"
+ out += r.fi(uint(i), "{{gray}}%s{{/}}\n", locations[i])
+ }
+ } else {
+ for i := range texts {
+ style := "{{/}}"
+ if i%2 == 1 {
+ style = "{{gray}}"
+ }
+ if i == highlightIndex {
+ style = highlightColor + "{{bold}}"
+ }
+ out += r.f(style+"%s", texts[i])
+ if i < len(texts)-1 {
+ out += " "
+ } else {
+ out += r.f("{{/}}")
+ }
+ }
+ flattenedLabels := report.Labels()
+ if len(flattenedLabels) > 0 {
+ out += r.f(" {{coral}}[%s]{{/}}", strings.Join(flattenedLabels, ", "))
+ }
+ out += "\n"
+ if usePreciseFailureLocation {
+ out += r.f("{{gray}}%s{{/}}", failureLocation)
+ } else {
+ leafLocation := locations[len(locations)-1]
+ if (report.Failure.FailureNodeLocation != types.CodeLocation{}) && (report.Failure.FailureNodeLocation != leafLocation) {
+ out += r.fi(1, highlightColor+"[%s]{{/}} {{gray}}%s{{/}}\n", report.Failure.FailureNodeType, report.Failure.FailureNodeLocation)
+ out += r.fi(1, "{{gray}}[%s] %s{{/}}", report.LeafNodeType, leafLocation)
+ } else {
+ out += r.f("{{gray}}%s{{/}}", leafLocation)
+ }
+ }
+
+ }
+ return out
+}
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/reporters/deprecated_reporter.go b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/reporters/deprecated_reporter.go
new file mode 100644
index 00000000000..613072ebf1c
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/reporters/deprecated_reporter.go
@@ -0,0 +1,149 @@
+package reporters
+
+import (
+ "github.com/onsi/ginkgo/v2/config"
+ "github.com/onsi/ginkgo/v2/types"
+)
+
+// Deprecated: DeprecatedReporter was how Ginkgo V1 provided support for CustomReporters
+// this has been removed in V2.
+// Please read the documentation at:
+// https://onsi.github.io/ginkgo/MIGRATING_TO_V2#removed-custom-reporters
+// for Ginkgo's new behavior and for a migration path.
+type DeprecatedReporter interface {
+ SuiteWillBegin(config config.GinkgoConfigType, summary *types.SuiteSummary)
+ BeforeSuiteDidRun(setupSummary *types.SetupSummary)
+ SpecWillRun(specSummary *types.SpecSummary)
+ SpecDidComplete(specSummary *types.SpecSummary)
+ AfterSuiteDidRun(setupSummary *types.SetupSummary)
+ SuiteDidEnd(summary *types.SuiteSummary)
+}
+
+// ReportViaDeprecatedReporter takes a V1 custom reporter and a V2 report and
+// calls the custom reporter's methods with appropriately transformed data from the V2 report.
+//
+// ReportViaDeprecatedReporter should be called in a `ReportAfterSuite()`
+//
+// Deprecated: ReportViaDeprecatedReporter method exists to help developer bridge between deprecated V1 functionality and the new
+// reporting support in V2. It will be removed in a future minor version of Ginkgo.
+func ReportViaDeprecatedReporter(reporter DeprecatedReporter, report types.Report) {
+ conf := config.DeprecatedGinkgoConfigType{
+ RandomSeed: report.SuiteConfig.RandomSeed,
+ RandomizeAllSpecs: report.SuiteConfig.RandomizeAllSpecs,
+ FocusStrings: report.SuiteConfig.FocusStrings,
+ SkipStrings: report.SuiteConfig.SkipStrings,
+ FailOnPending: report.SuiteConfig.FailOnPending,
+ FailFast: report.SuiteConfig.FailFast,
+ FlakeAttempts: report.SuiteConfig.FlakeAttempts,
+ EmitSpecProgress: false,
+ DryRun: report.SuiteConfig.DryRun,
+ ParallelNode: report.SuiteConfig.ParallelProcess,
+ ParallelTotal: report.SuiteConfig.ParallelTotal,
+ SyncHost: report.SuiteConfig.ParallelHost,
+ StreamHost: report.SuiteConfig.ParallelHost,
+ }
+
+ summary := &types.DeprecatedSuiteSummary{
+ SuiteDescription: report.SuiteDescription,
+ SuiteID: report.SuitePath,
+
+ NumberOfSpecsBeforeParallelization: report.PreRunStats.TotalSpecs,
+ NumberOfTotalSpecs: report.PreRunStats.TotalSpecs,
+ NumberOfSpecsThatWillBeRun: report.PreRunStats.SpecsThatWillRun,
+ }
+
+ reporter.SuiteWillBegin(conf, summary)
+
+ for _, spec := range report.SpecReports {
+ switch spec.LeafNodeType {
+ case types.NodeTypeBeforeSuite, types.NodeTypeSynchronizedBeforeSuite:
+ setupSummary := &types.DeprecatedSetupSummary{
+ ComponentType: spec.LeafNodeType,
+ CodeLocation: spec.LeafNodeLocation,
+ State: spec.State,
+ RunTime: spec.RunTime,
+ Failure: failureFor(spec),
+ CapturedOutput: spec.CombinedOutput(),
+ SuiteID: report.SuitePath,
+ }
+ reporter.BeforeSuiteDidRun(setupSummary)
+ case types.NodeTypeAfterSuite, types.NodeTypeSynchronizedAfterSuite:
+ setupSummary := &types.DeprecatedSetupSummary{
+ ComponentType: spec.LeafNodeType,
+ CodeLocation: spec.LeafNodeLocation,
+ State: spec.State,
+ RunTime: spec.RunTime,
+ Failure: failureFor(spec),
+ CapturedOutput: spec.CombinedOutput(),
+ SuiteID: report.SuitePath,
+ }
+ reporter.AfterSuiteDidRun(setupSummary)
+ case types.NodeTypeIt:
+ componentTexts, componentCodeLocations := []string{}, []types.CodeLocation{}
+ componentTexts = append(componentTexts, spec.ContainerHierarchyTexts...)
+ componentCodeLocations = append(componentCodeLocations, spec.ContainerHierarchyLocations...)
+ componentTexts = append(componentTexts, spec.LeafNodeText)
+ componentCodeLocations = append(componentCodeLocations, spec.LeafNodeLocation)
+
+ specSummary := &types.DeprecatedSpecSummary{
+ ComponentTexts: componentTexts,
+ ComponentCodeLocations: componentCodeLocations,
+ State: spec.State,
+ RunTime: spec.RunTime,
+ Failure: failureFor(spec),
+ NumberOfSamples: spec.NumAttempts,
+ CapturedOutput: spec.CombinedOutput(),
+ SuiteID: report.SuitePath,
+ }
+ reporter.SpecWillRun(specSummary)
+ reporter.SpecDidComplete(specSummary)
+
+ switch spec.State {
+ case types.SpecStatePending:
+ summary.NumberOfPendingSpecs += 1
+ case types.SpecStateSkipped:
+ summary.NumberOfSkippedSpecs += 1
+ case types.SpecStateFailed, types.SpecStatePanicked, types.SpecStateInterrupted:
+ summary.NumberOfFailedSpecs += 1
+ case types.SpecStatePassed:
+ summary.NumberOfPassedSpecs += 1
+ if spec.NumAttempts > 1 {
+ summary.NumberOfFlakedSpecs += 1
+ }
+ }
+ }
+ }
+
+ summary.SuiteSucceeded = report.SuiteSucceeded
+ summary.RunTime = report.RunTime
+
+ reporter.SuiteDidEnd(summary)
+}
+
+func failureFor(spec types.SpecReport) types.DeprecatedSpecFailure {
+ if spec.Failure.IsZero() {
+ return types.DeprecatedSpecFailure{}
+ }
+
+ index := 0
+ switch spec.Failure.FailureNodeContext {
+ case types.FailureNodeInContainer:
+ index = spec.Failure.FailureNodeContainerIndex
+ case types.FailureNodeAtTopLevel:
+ index = -1
+ case types.FailureNodeIsLeafNode:
+ index = len(spec.ContainerHierarchyTexts) - 1
+ if spec.LeafNodeText != "" {
+ index += 1
+ }
+ }
+
+ return types.DeprecatedSpecFailure{
+ Message: spec.Failure.Message,
+ Location: spec.Failure.Location,
+ ForwardedPanic: spec.Failure.ForwardedPanic,
+ ComponentIndex: index,
+ ComponentType: spec.Failure.FailureNodeType,
+ ComponentCodeLocation: spec.Failure.FailureNodeLocation,
+ }
+}
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/reporters/json_report.go b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/reporters/json_report.go
new file mode 100644
index 00000000000..5d3e8db994b
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/reporters/json_report.go
@@ -0,0 +1,69 @@
+package reporters
+
+import (
+ "encoding/json"
+ "fmt"
+ "os"
+ "path"
+
+ "github.com/onsi/ginkgo/v2/types"
+)
+
+// GenerateJSONReport produces a JSON-formatted report at the passed in destination
+func GenerateJSONReport(report types.Report, destination string) error {
+ if err := os.MkdirAll(path.Dir(destination), 0770); err != nil {
+ return err
+ }
+ f, err := os.Create(destination)
+ if err != nil {
+ return err
+ }
+ defer f.Close()
+ enc := json.NewEncoder(f)
+ enc.SetIndent("", " ")
+ err = enc.Encode([]types.Report{
+ report,
+ })
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+// MergeJSONReports produces a single JSON-formatted report at the passed in destination by merging the JSON-formatted reports provided in sources
+// It skips over reports that fail to decode but reports on them via the returned messages []string
+func MergeAndCleanupJSONReports(sources []string, destination string) ([]string, error) {
+ messages := []string{}
+ allReports := []types.Report{}
+ for _, source := range sources {
+ reports := []types.Report{}
+ data, err := os.ReadFile(source)
+ if err != nil {
+ messages = append(messages, fmt.Sprintf("Could not open %s:\n%s", source, err.Error()))
+ continue
+ }
+ err = json.Unmarshal(data, &reports)
+ if err != nil {
+ messages = append(messages, fmt.Sprintf("Could not decode %s:\n%s", source, err.Error()))
+ continue
+ }
+ os.Remove(source)
+ allReports = append(allReports, reports...)
+ }
+
+ if err := os.MkdirAll(path.Dir(destination), 0770); err != nil {
+ return messages, err
+ }
+ f, err := os.Create(destination)
+ if err != nil {
+ return messages, err
+ }
+ defer f.Close()
+ enc := json.NewEncoder(f)
+ enc.SetIndent("", " ")
+ err = enc.Encode(allReports)
+ if err != nil {
+ return messages, err
+ }
+ return messages, nil
+}
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/reporters/junit_report.go b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/reporters/junit_report.go
new file mode 100644
index 00000000000..562e0f62ba2
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/reporters/junit_report.go
@@ -0,0 +1,390 @@
+/*
+
+JUnit XML Reporter for Ginkgo
+
+For usage instructions: http://onsi.github.io/ginkgo/#generating_junit_xml_output
+
+The schema used for the generated JUnit xml file was adapted from https://llg.cubic.org/docs/junit/
+
+*/
+
+package reporters
+
+import (
+ "encoding/xml"
+ "fmt"
+ "os"
+ "path"
+ "regexp"
+ "strings"
+
+ "github.com/onsi/ginkgo/v2/config"
+ "github.com/onsi/ginkgo/v2/types"
+)
+
+type JunitReportConfig struct {
+ // Spec States for which no timeline should be emitted for system-err
+ // set this to types.SpecStatePassed|types.SpecStateSkipped|types.SpecStatePending to only match failing specs
+ OmitTimelinesForSpecState types.SpecState
+
+ // Enable OmitFailureMessageAttr to prevent failure messages appearing in the "message" attribute of the Failure and Error tags
+ OmitFailureMessageAttr bool
+
+ //Enable OmitCapturedStdOutErr to prevent captured stdout/stderr appearing in system-out
+ OmitCapturedStdOutErr bool
+
+ // Enable OmitSpecLabels to prevent labels from appearing in the spec name
+ OmitSpecLabels bool
+
+ // Enable OmitLeafNodeType to prevent the spec leaf node type from appearing in the spec name
+ OmitLeafNodeType bool
+
+ // Enable OmitSuiteSetupNodes to prevent the creation of testcase entries for setup nodes
+ OmitSuiteSetupNodes bool
+}
+
+type JUnitTestSuites struct {
+ XMLName xml.Name `xml:"testsuites"`
+ // Tests maps onto the total number of specs in all test suites (this includes any suite nodes such as BeforeSuite)
+ Tests int `xml:"tests,attr"`
+ // Disabled maps onto specs that are pending and/or skipped
+ Disabled int `xml:"disabled,attr"`
+ // Errors maps onto specs that panicked or were interrupted
+ Errors int `xml:"errors,attr"`
+ // Failures maps onto specs that failed
+ Failures int `xml:"failures,attr"`
+ // Time is the time in seconds to execute all test suites
+ Time float64 `xml:"time,attr"`
+
+ //The set of all test suites
+ TestSuites []JUnitTestSuite `xml:"testsuite"`
+}
+
+type JUnitTestSuite struct {
+ // Name maps onto the description of the test suite - maps onto Report.SuiteDescription
+ Name string `xml:"name,attr"`
+ // Package maps onto the absolute path to the test suite - maps onto Report.SuitePath
+ Package string `xml:"package,attr"`
+ // Tests maps onto the total number of specs in the test suite (this includes any suite nodes such as BeforeSuite)
+ Tests int `xml:"tests,attr"`
+ // Disabled maps onto specs that are pending
+ Disabled int `xml:"disabled,attr"`
+ // Skiped maps onto specs that are skipped
+ Skipped int `xml:"skipped,attr"`
+ // Errors maps onto specs that panicked or were interrupted
+ Errors int `xml:"errors,attr"`
+ // Failures maps onto specs that failed
+ Failures int `xml:"failures,attr"`
+ // Time is the time in seconds to execute all the test suite - maps onto Report.RunTime
+ Time float64 `xml:"time,attr"`
+ // Timestamp is the ISO 8601 formatted start-time of the suite - maps onto Report.StartTime
+ Timestamp string `xml:"timestamp,attr"`
+
+ //Properties captures the information stored in the rest of the Report type (including SuiteConfig) as key-value pairs
+ Properties JUnitProperties `xml:"properties"`
+
+ //TestCases capture the individual specs
+ TestCases []JUnitTestCase `xml:"testcase"`
+}
+
+type JUnitProperties struct {
+ Properties []JUnitProperty `xml:"property"`
+}
+
+func (jup JUnitProperties) WithName(name string) string {
+ for _, property := range jup.Properties {
+ if property.Name == name {
+ return property.Value
+ }
+ }
+ return ""
+}
+
+type JUnitProperty struct {
+ Name string `xml:"name,attr"`
+ Value string `xml:"value,attr"`
+}
+
+var ownerRE = regexp.MustCompile(`(?i)^owner:(.*)$`)
+
+type JUnitTestCase struct {
+ // Name maps onto the full text of the spec - equivalent to "[SpecReport.LeafNodeType] SpecReport.FullText()"
+ Name string `xml:"name,attr"`
+ // Classname maps onto the name of the test suite - equivalent to Report.SuiteDescription
+ Classname string `xml:"classname,attr"`
+ // Status maps onto the string representation of SpecReport.State
+ Status string `xml:"status,attr"`
+ // Time is the time in seconds to execute the spec - maps onto SpecReport.RunTime
+ Time float64 `xml:"time,attr"`
+ // Owner is the owner the spec - is set if a label matching Label("owner:X") is provided. The last matching label is used as the owner, thereby allowing specs to override owners specified in container nodes.
+ Owner string `xml:"owner,attr,omitempty"`
+ //Skipped is populated with a message if the test was skipped or pending
+ Skipped *JUnitSkipped `xml:"skipped,omitempty"`
+ //Error is populated if the test panicked or was interrupted
+ Error *JUnitError `xml:"error,omitempty"`
+ //Failure is populated if the test failed
+ Failure *JUnitFailure `xml:"failure,omitempty"`
+ //SystemOut maps onto any captured stdout/stderr output - maps onto SpecReport.CapturedStdOutErr
+ SystemOut string `xml:"system-out,omitempty"`
+ //SystemOut maps onto any captured GinkgoWriter output - maps onto SpecReport.CapturedGinkgoWriterOutput
+ SystemErr string `xml:"system-err,omitempty"`
+}
+
+type JUnitSkipped struct {
+ // Message maps onto "pending" if the test was marked pending, "skipped" if the test was marked skipped, and "skipped - REASON" if the user called Skip(REASON)
+ Message string `xml:"message,attr"`
+}
+
+type JUnitError struct {
+ //Message maps onto the panic/exception thrown - equivalent to SpecReport.Failure.ForwardedPanic - or to "interrupted"
+ Message string `xml:"message,attr"`
+ //Type is one of "panicked" or "interrupted"
+ Type string `xml:"type,attr"`
+ //Description maps onto the captured stack trace for a panic, or the failure message for an interrupt which will include the dump of running goroutines
+ Description string `xml:",chardata"`
+}
+
+type JUnitFailure struct {
+ //Message maps onto the failure message - equivalent to SpecReport.Failure.Message
+ Message string `xml:"message,attr"`
+ //Type is "failed"
+ Type string `xml:"type,attr"`
+ //Description maps onto the location and stack trace of the failure
+ Description string `xml:",chardata"`
+}
+
+func GenerateJUnitReport(report types.Report, dst string) error {
+ return GenerateJUnitReportWithConfig(report, dst, JunitReportConfig{})
+}
+
+func GenerateJUnitReportWithConfig(report types.Report, dst string, config JunitReportConfig) error {
+ suite := JUnitTestSuite{
+ Name: report.SuiteDescription,
+ Package: report.SuitePath,
+ Time: report.RunTime.Seconds(),
+ Timestamp: report.StartTime.Format("2006-01-02T15:04:05"),
+ Properties: JUnitProperties{
+ Properties: []JUnitProperty{
+ {"SuiteSucceeded", fmt.Sprintf("%t", report.SuiteSucceeded)},
+ {"SuiteHasProgrammaticFocus", fmt.Sprintf("%t", report.SuiteHasProgrammaticFocus)},
+ {"SpecialSuiteFailureReason", strings.Join(report.SpecialSuiteFailureReasons, ",")},
+ {"SuiteLabels", fmt.Sprintf("[%s]", strings.Join(report.SuiteLabels, ","))},
+ {"RandomSeed", fmt.Sprintf("%d", report.SuiteConfig.RandomSeed)},
+ {"RandomizeAllSpecs", fmt.Sprintf("%t", report.SuiteConfig.RandomizeAllSpecs)},
+ {"LabelFilter", report.SuiteConfig.LabelFilter},
+ {"FocusStrings", strings.Join(report.SuiteConfig.FocusStrings, ",")},
+ {"SkipStrings", strings.Join(report.SuiteConfig.SkipStrings, ",")},
+ {"FocusFiles", strings.Join(report.SuiteConfig.FocusFiles, ";")},
+ {"SkipFiles", strings.Join(report.SuiteConfig.SkipFiles, ";")},
+ {"FailOnPending", fmt.Sprintf("%t", report.SuiteConfig.FailOnPending)},
+ {"FailOnEmpty", fmt.Sprintf("%t", report.SuiteConfig.FailOnEmpty)},
+ {"FailFast", fmt.Sprintf("%t", report.SuiteConfig.FailFast)},
+ {"FlakeAttempts", fmt.Sprintf("%d", report.SuiteConfig.FlakeAttempts)},
+ {"DryRun", fmt.Sprintf("%t", report.SuiteConfig.DryRun)},
+ {"ParallelTotal", fmt.Sprintf("%d", report.SuiteConfig.ParallelTotal)},
+ {"OutputInterceptorMode", report.SuiteConfig.OutputInterceptorMode},
+ },
+ },
+ }
+ for _, spec := range report.SpecReports {
+ if config.OmitSuiteSetupNodes && spec.LeafNodeType != types.NodeTypeIt {
+ continue
+ }
+ name := fmt.Sprintf("[%s]", spec.LeafNodeType)
+ if config.OmitLeafNodeType {
+ name = ""
+ }
+ if spec.FullText() != "" {
+ name = name + " " + spec.FullText()
+ }
+ labels := spec.Labels()
+ if len(labels) > 0 && !config.OmitSpecLabels {
+ name = name + " [" + strings.Join(labels, ", ") + "]"
+ }
+ owner := ""
+ for _, label := range labels {
+ if matches := ownerRE.FindStringSubmatch(label); len(matches) == 2 {
+ owner = matches[1]
+ }
+ }
+ name = strings.TrimSpace(name)
+
+ test := JUnitTestCase{
+ Name: name,
+ Classname: report.SuiteDescription,
+ Status: spec.State.String(),
+ Time: spec.RunTime.Seconds(),
+ Owner: owner,
+ }
+ if !spec.State.Is(config.OmitTimelinesForSpecState) {
+ test.SystemErr = systemErrForUnstructuredReporters(spec)
+ }
+ if !config.OmitCapturedStdOutErr {
+ test.SystemOut = systemOutForUnstructuredReporters(spec)
+ }
+ suite.Tests += 1
+
+ switch spec.State {
+ case types.SpecStateSkipped:
+ message := "skipped"
+ if spec.Failure.Message != "" {
+ message += " - " + spec.Failure.Message
+ }
+ test.Skipped = &JUnitSkipped{Message: message}
+ suite.Skipped += 1
+ case types.SpecStatePending:
+ test.Skipped = &JUnitSkipped{Message: "pending"}
+ suite.Disabled += 1
+ case types.SpecStateFailed:
+ test.Failure = &JUnitFailure{
+ Message: spec.Failure.Message,
+ Type: "failed",
+ Description: failureDescriptionForUnstructuredReporters(spec),
+ }
+ if config.OmitFailureMessageAttr {
+ test.Failure.Message = ""
+ }
+ suite.Failures += 1
+ case types.SpecStateTimedout:
+ test.Failure = &JUnitFailure{
+ Message: spec.Failure.Message,
+ Type: "timedout",
+ Description: failureDescriptionForUnstructuredReporters(spec),
+ }
+ if config.OmitFailureMessageAttr {
+ test.Failure.Message = ""
+ }
+ suite.Failures += 1
+ case types.SpecStateInterrupted:
+ test.Error = &JUnitError{
+ Message: spec.Failure.Message,
+ Type: "interrupted",
+ Description: failureDescriptionForUnstructuredReporters(spec),
+ }
+ if config.OmitFailureMessageAttr {
+ test.Error.Message = ""
+ }
+ suite.Errors += 1
+ case types.SpecStateAborted:
+ test.Failure = &JUnitFailure{
+ Message: spec.Failure.Message,
+ Type: "aborted",
+ Description: failureDescriptionForUnstructuredReporters(spec),
+ }
+ if config.OmitFailureMessageAttr {
+ test.Failure.Message = ""
+ }
+ suite.Errors += 1
+ case types.SpecStatePanicked:
+ test.Error = &JUnitError{
+ Message: spec.Failure.ForwardedPanic,
+ Type: "panicked",
+ Description: failureDescriptionForUnstructuredReporters(spec),
+ }
+ if config.OmitFailureMessageAttr {
+ test.Error.Message = ""
+ }
+ suite.Errors += 1
+ }
+
+ suite.TestCases = append(suite.TestCases, test)
+ }
+
+ junitReport := JUnitTestSuites{
+ Tests: suite.Tests,
+ Disabled: suite.Disabled + suite.Skipped,
+ Errors: suite.Errors,
+ Failures: suite.Failures,
+ Time: suite.Time,
+ TestSuites: []JUnitTestSuite{suite},
+ }
+
+ if err := os.MkdirAll(path.Dir(dst), 0770); err != nil {
+ return err
+ }
+ f, err := os.Create(dst)
+ if err != nil {
+ return err
+ }
+ f.WriteString(xml.Header)
+ encoder := xml.NewEncoder(f)
+ encoder.Indent(" ", " ")
+ encoder.Encode(junitReport)
+
+ return f.Close()
+}
+
+func MergeAndCleanupJUnitReports(sources []string, dst string) ([]string, error) {
+ messages := []string{}
+ mergedReport := JUnitTestSuites{}
+ for _, source := range sources {
+ report := JUnitTestSuites{}
+ f, err := os.Open(source)
+ if err != nil {
+ messages = append(messages, fmt.Sprintf("Could not open %s:\n%s", source, err.Error()))
+ continue
+ }
+ err = xml.NewDecoder(f).Decode(&report)
+ _ = f.Close()
+ if err != nil {
+ messages = append(messages, fmt.Sprintf("Could not decode %s:\n%s", source, err.Error()))
+ continue
+ }
+ os.Remove(source)
+
+ mergedReport.Tests += report.Tests
+ mergedReport.Disabled += report.Disabled
+ mergedReport.Errors += report.Errors
+ mergedReport.Failures += report.Failures
+ mergedReport.Time += report.Time
+ mergedReport.TestSuites = append(mergedReport.TestSuites, report.TestSuites...)
+ }
+
+ if err := os.MkdirAll(path.Dir(dst), 0770); err != nil {
+ return messages, err
+ }
+ f, err := os.Create(dst)
+ if err != nil {
+ return messages, err
+ }
+ f.WriteString(xml.Header)
+ encoder := xml.NewEncoder(f)
+ encoder.Indent(" ", " ")
+ encoder.Encode(mergedReport)
+
+ return messages, f.Close()
+}
+
+func failureDescriptionForUnstructuredReporters(spec types.SpecReport) string {
+ out := &strings.Builder{}
+ NewDefaultReporter(types.ReporterConfig{NoColor: true, VeryVerbose: true}, out).emitFailure(0, spec.State, spec.Failure, true)
+ if len(spec.AdditionalFailures) > 0 {
+ out.WriteString("\nThere were additional failures detected after the initial failure. These are visible in the timeline\n")
+ }
+ return out.String()
+}
+
+func systemErrForUnstructuredReporters(spec types.SpecReport) string {
+ return RenderTimeline(spec, true)
+}
+
+func RenderTimeline(spec types.SpecReport, noColor bool) string {
+ out := &strings.Builder{}
+ NewDefaultReporter(types.ReporterConfig{NoColor: noColor, VeryVerbose: true}, out).emitTimeline(0, spec, spec.Timeline())
+ return out.String()
+}
+
+func systemOutForUnstructuredReporters(spec types.SpecReport) string {
+ return spec.CapturedStdOutErr
+}
+
+// Deprecated JUnitReporter (so folks can still compile their suites)
+type JUnitReporter struct{}
+
+func NewJUnitReporter(_ string) *JUnitReporter { return &JUnitReporter{} }
+func (reporter *JUnitReporter) SuiteWillBegin(_ config.GinkgoConfigType, _ *types.SuiteSummary) {}
+func (reporter *JUnitReporter) BeforeSuiteDidRun(_ *types.SetupSummary) {}
+func (reporter *JUnitReporter) SpecWillRun(_ *types.SpecSummary) {}
+func (reporter *JUnitReporter) SpecDidComplete(_ *types.SpecSummary) {}
+func (reporter *JUnitReporter) AfterSuiteDidRun(_ *types.SetupSummary) {}
+func (reporter *JUnitReporter) SuiteDidEnd(_ *types.SuiteSummary) {}
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/reporters/reporter.go b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/reporters/reporter.go
new file mode 100644
index 00000000000..5e726c464ef
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/reporters/reporter.go
@@ -0,0 +1,29 @@
+package reporters
+
+import (
+ "github.com/onsi/ginkgo/v2/types"
+)
+
+type Reporter interface {
+ SuiteWillBegin(report types.Report)
+ WillRun(report types.SpecReport)
+ DidRun(report types.SpecReport)
+ SuiteDidEnd(report types.Report)
+
+ //Timeline emission
+ EmitFailure(state types.SpecState, failure types.Failure)
+ EmitProgressReport(progressReport types.ProgressReport)
+ EmitReportEntry(entry types.ReportEntry)
+ EmitSpecEvent(event types.SpecEvent)
+}
+
+type NoopReporter struct{}
+
+func (n NoopReporter) SuiteWillBegin(report types.Report) {}
+func (n NoopReporter) WillRun(report types.SpecReport) {}
+func (n NoopReporter) DidRun(report types.SpecReport) {}
+func (n NoopReporter) SuiteDidEnd(report types.Report) {}
+func (n NoopReporter) EmitFailure(state types.SpecState, failure types.Failure) {}
+func (n NoopReporter) EmitProgressReport(progressReport types.ProgressReport) {}
+func (n NoopReporter) EmitReportEntry(entry types.ReportEntry) {}
+func (n NoopReporter) EmitSpecEvent(event types.SpecEvent) {}
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/reporters/teamcity_report.go b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/reporters/teamcity_report.go
new file mode 100644
index 00000000000..e990ad82e13
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/reporters/teamcity_report.go
@@ -0,0 +1,105 @@
+/*
+
+TeamCity Reporter for Ginkgo
+
+Makes use of TeamCity's support for Service Messages
+http://confluence.jetbrains.com/display/TCD7/Build+Script+Interaction+with+TeamCity#BuildScriptInteractionwithTeamCity-ReportingTests
+*/
+
+package reporters
+
+import (
+ "fmt"
+ "os"
+ "path"
+ "strings"
+
+ "github.com/onsi/ginkgo/v2/types"
+)
+
+func tcEscape(s string) string {
+ s = strings.ReplaceAll(s, "|", "||")
+ s = strings.ReplaceAll(s, "'", "|'")
+ s = strings.ReplaceAll(s, "\n", "|n")
+ s = strings.ReplaceAll(s, "\r", "|r")
+ s = strings.ReplaceAll(s, "[", "|[")
+ s = strings.ReplaceAll(s, "]", "|]")
+ return s
+}
+
+func GenerateTeamcityReport(report types.Report, dst string) error {
+ if err := os.MkdirAll(path.Dir(dst), 0770); err != nil {
+ return err
+ }
+ f, err := os.Create(dst)
+ if err != nil {
+ return err
+ }
+
+ name := report.SuiteDescription
+ labels := report.SuiteLabels
+ if len(labels) > 0 {
+ name = name + " [" + strings.Join(labels, ", ") + "]"
+ }
+ fmt.Fprintf(f, "##teamcity[testSuiteStarted name='%s']\n", tcEscape(name))
+ for _, spec := range report.SpecReports {
+ name := fmt.Sprintf("[%s]", spec.LeafNodeType)
+ if spec.FullText() != "" {
+ name = name + " " + spec.FullText()
+ }
+ labels := spec.Labels()
+ if len(labels) > 0 {
+ name = name + " [" + strings.Join(labels, ", ") + "]"
+ }
+
+ name = tcEscape(name)
+ fmt.Fprintf(f, "##teamcity[testStarted name='%s']\n", name)
+ switch spec.State {
+ case types.SpecStatePending:
+ fmt.Fprintf(f, "##teamcity[testIgnored name='%s' message='pending']\n", name)
+ case types.SpecStateSkipped:
+ message := "skipped"
+ if spec.Failure.Message != "" {
+ message += " - " + spec.Failure.Message
+ }
+ fmt.Fprintf(f, "##teamcity[testIgnored name='%s' message='%s']\n", name, tcEscape(message))
+ case types.SpecStateFailed:
+ details := failureDescriptionForUnstructuredReporters(spec)
+ fmt.Fprintf(f, "##teamcity[testFailed name='%s' message='failed - %s' details='%s']\n", name, tcEscape(spec.Failure.Message), tcEscape(details))
+ case types.SpecStatePanicked:
+ details := failureDescriptionForUnstructuredReporters(spec)
+ fmt.Fprintf(f, "##teamcity[testFailed name='%s' message='panicked - %s' details='%s']\n", name, tcEscape(spec.Failure.ForwardedPanic), tcEscape(details))
+ case types.SpecStateTimedout:
+ details := failureDescriptionForUnstructuredReporters(spec)
+ fmt.Fprintf(f, "##teamcity[testFailed name='%s' message='timedout - %s' details='%s']\n", name, tcEscape(spec.Failure.Message), tcEscape(details))
+ case types.SpecStateInterrupted:
+ details := failureDescriptionForUnstructuredReporters(spec)
+ fmt.Fprintf(f, "##teamcity[testFailed name='%s' message='interrupted - %s' details='%s']\n", name, tcEscape(spec.Failure.Message), tcEscape(details))
+ case types.SpecStateAborted:
+ details := failureDescriptionForUnstructuredReporters(spec)
+ fmt.Fprintf(f, "##teamcity[testFailed name='%s' message='aborted - %s' details='%s']\n", name, tcEscape(spec.Failure.Message), tcEscape(details))
+ }
+
+ fmt.Fprintf(f, "##teamcity[testStdOut name='%s' out='%s']\n", name, tcEscape(systemOutForUnstructuredReporters(spec)))
+ fmt.Fprintf(f, "##teamcity[testStdErr name='%s' out='%s']\n", name, tcEscape(systemErrForUnstructuredReporters(spec)))
+ fmt.Fprintf(f, "##teamcity[testFinished name='%s' duration='%d']\n", name, int(spec.RunTime.Seconds()*1000.0))
+ }
+ fmt.Fprintf(f, "##teamcity[testSuiteFinished name='%s']\n", tcEscape(report.SuiteDescription))
+
+ return f.Close()
+}
+
+func MergeAndCleanupTeamcityReports(sources []string, dst string) ([]string, error) {
+ messages := []string{}
+ merged := []byte{}
+ for _, source := range sources {
+ data, err := os.ReadFile(source)
+ if err != nil {
+ messages = append(messages, fmt.Sprintf("Could not open %s:\n%s", source, err.Error()))
+ continue
+ }
+ os.Remove(source)
+ merged = append(merged, data...)
+ }
+ return messages, os.WriteFile(dst, merged, 0666)
+}
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/reporting_dsl.go b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/reporting_dsl.go
new file mode 100644
index 00000000000..aa1a35176aa
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/reporting_dsl.go
@@ -0,0 +1,221 @@
+package ginkgo
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/onsi/ginkgo/v2/internal"
+ "github.com/onsi/ginkgo/v2/internal/global"
+ "github.com/onsi/ginkgo/v2/reporters"
+ "github.com/onsi/ginkgo/v2/types"
+)
+
+/*
+Report represents the report for a Suite.
+It is documented here: https://pkg.go.dev/github.com/onsi/ginkgo/v2/types#Report
+*/
+type Report = types.Report
+
+/*
+Report represents the report for a Spec.
+It is documented here: https://pkg.go.dev/github.com/onsi/ginkgo/v2/types#SpecReport
+*/
+type SpecReport = types.SpecReport
+
+/*
+CurrentSpecReport returns information about the current running spec.
+The returned object is a types.SpecReport which includes helper methods
+to make extracting information about the spec easier.
+
+You can learn more about SpecReport here: https://pkg.go.dev/github.com/onsi/ginkgo/types#SpecReport
+You can learn more about CurrentSpecReport() here: https://onsi.github.io/ginkgo/#getting-a-report-for-the-current-spec
+*/
+func CurrentSpecReport() SpecReport {
+ return global.Suite.CurrentSpecReport()
+}
+
+/*
+ ReportEntryVisibility governs the visibility of ReportEntries in Ginkgo's console reporter
+
+- ReportEntryVisibilityAlways: the default behavior - the ReportEntry is always emitted.
+- ReportEntryVisibilityFailureOrVerbose: the ReportEntry is only emitted if the spec fails or if the tests are run with -v (similar to GinkgoWriters behavior).
+- ReportEntryVisibilityNever: the ReportEntry is never emitted though it appears in any generated machine-readable reports (e.g. by setting `--json-report`).
+
+You can learn more about Report Entries here: https://onsi.github.io/ginkgo/#attaching-data-to-reports
+*/
+type ReportEntryVisibility = types.ReportEntryVisibility
+
+const ReportEntryVisibilityAlways, ReportEntryVisibilityFailureOrVerbose, ReportEntryVisibilityNever = types.ReportEntryVisibilityAlways, types.ReportEntryVisibilityFailureOrVerbose, types.ReportEntryVisibilityNever
+
+/*
+AddReportEntry generates and adds a new ReportEntry to the current spec's SpecReport.
+It can take any of the following arguments:
+ - A single arbitrary object to attach as the Value of the ReportEntry. This object will be included in any generated reports and will be emitted to the console when the report is emitted.
+ - A ReportEntryVisibility enum to control the visibility of the ReportEntry
+ - An Offset or CodeLocation decoration to control the reported location of the ReportEntry
+
+If the Value object implements `fmt.Stringer`, it's `String()` representation is used when emitting to the console.
+
+AddReportEntry() must be called within a Subject or Setup node - not in a Container node.
+
+You can learn more about Report Entries here: https://onsi.github.io/ginkgo/#attaching-data-to-reports
+*/
+func AddReportEntry(name string, args ...interface{}) {
+ cl := types.NewCodeLocation(1)
+ reportEntry, err := internal.NewReportEntry(name, cl, args...)
+ if err != nil {
+ Fail(fmt.Sprintf("Failed to generate Report Entry:\n%s", err.Error()), 1)
+ }
+ err = global.Suite.AddReportEntry(reportEntry)
+ if err != nil {
+ Fail(fmt.Sprintf("Failed to add Report Entry:\n%s", err.Error()), 1)
+ }
+}
+
+/*
+ReportBeforeEach nodes are run for each spec, even if the spec is skipped or pending. ReportBeforeEach nodes take a function that
+receives a SpecReport or both SpecContext and Report for interruptible behavior. They are called before the spec starts.
+
+Example:
+
+ ReportBeforeEach(func(report SpecReport) { // process report })
+ ReportBeforeEach(func(ctx SpecContext, report SpecReport) {
+ // process report
+ }), NodeTimeout(1 * time.Minute))
+
+You cannot nest any other Ginkgo nodes within a ReportBeforeEach node's closure.
+You can learn more about ReportBeforeEach here: https://onsi.github.io/ginkgo/#generating-reports-programmatically
+
+You can learn about interruptible nodes here: https://onsi.github.io/ginkgo/#spec-timeouts-and-interruptible-nodes
+*/
+func ReportBeforeEach(body any, args ...any) bool {
+ combinedArgs := []interface{}{body}
+ combinedArgs = append(combinedArgs, args...)
+
+ return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeReportBeforeEach, "", combinedArgs...))
+}
+
+/*
+ReportAfterEach nodes are run for each spec, even if the spec is skipped or pending.
+ReportAfterEach nodes take a function that receives a SpecReport or both SpecContext and Report for interruptible behavior.
+They are called after the spec has completed and receive the final report for the spec.
+
+Example:
+
+ ReportAfterEach(func(report SpecReport) { // process report })
+ ReportAfterEach(func(ctx SpecContext, report SpecReport) {
+ // process report
+ }), NodeTimeout(1 * time.Minute))
+
+You cannot nest any other Ginkgo nodes within a ReportAfterEach node's closure.
+You can learn more about ReportAfterEach here: https://onsi.github.io/ginkgo/#generating-reports-programmatically
+
+You can learn about interruptible nodes here: https://onsi.github.io/ginkgo/#spec-timeouts-and-interruptible-nodes
+*/
+func ReportAfterEach(body any, args ...any) bool {
+ combinedArgs := []interface{}{body}
+ combinedArgs = append(combinedArgs, args...)
+
+ return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeReportAfterEach, "", combinedArgs...))
+}
+
+/*
+ReportBeforeSuite nodes are run at the beginning of the suite. ReportBeforeSuite nodes take a function
+that can either receive Report or both SpecContext and Report for interruptible behavior.
+
+Example Usage:
+
+ ReportBeforeSuite(func(r Report) { // process report })
+ ReportBeforeSuite(func(ctx SpecContext, r Report) {
+ // process report
+ }, NodeTimeout(1 * time.Minute))
+
+They are called at the beginning of the suite, before any specs have run and any BeforeSuite or SynchronizedBeforeSuite nodes, and are passed in the initial report for the suite.
+ReportBeforeSuite nodes must be created at the top-level (i.e. not nested in a Context/Describe/When node)
+
+# When running in parallel, Ginkgo ensures that only one of the parallel nodes runs the ReportBeforeSuite
+
+You cannot nest any other Ginkgo nodes within a ReportAfterSuite node's closure.
+You can learn more about ReportAfterSuite here: https://onsi.github.io/ginkgo/#generating-reports-programmatically
+
+You can learn more about Ginkgo's reporting infrastructure, including generating reports with the CLI here: https://onsi.github.io/ginkgo/#generating-machine-readable-reports
+
+You can learn about interruptible nodes here: https://onsi.github.io/ginkgo/#spec-timeouts-and-interruptible-nodes
+*/
+func ReportBeforeSuite(body any, args ...any) bool {
+ combinedArgs := []interface{}{body}
+ combinedArgs = append(combinedArgs, args...)
+ return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeReportBeforeSuite, "", combinedArgs...))
+}
+
+/*
+ReportAfterSuite nodes are run at the end of the suite. ReportAfterSuite nodes execute at the suite's conclusion,
+and accept a function that can either receive Report or both SpecContext and Report for interruptible behavior.
+
+Example Usage:
+
+ ReportAfterSuite("Non-interruptible ReportAfterSuite", func(r Report) { // process report })
+ ReportAfterSuite("Interruptible ReportAfterSuite", func(ctx SpecContext, r Report) {
+ // process report
+ }, NodeTimeout(1 * time.Minute))
+
+They are called at the end of the suite, after all specs have run and any AfterSuite or SynchronizedAfterSuite nodes, and are passed in the final report for the suite.
+ReportAfterSuite nodes must be created at the top-level (i.e. not nested in a Context/Describe/When node)
+
+When running in parallel, Ginkgo ensures that only one of the parallel nodes runs the ReportAfterSuite and that it is passed a report that is aggregated across
+all parallel nodes
+
+In addition to using ReportAfterSuite to programmatically generate suite reports, you can also generate JSON, JUnit, and Teamcity formatted reports using the --json-report, --junit-report, and --teamcity-report ginkgo CLI flags.
+
+You cannot nest any other Ginkgo nodes within a ReportAfterSuite node's closure.
+You can learn more about ReportAfterSuite here: https://onsi.github.io/ginkgo/#generating-reports-programmatically
+
+You can learn more about Ginkgo's reporting infrastructure, including generating reports with the CLI here: https://onsi.github.io/ginkgo/#generating-machine-readable-reports
+
+You can learn about interruptible nodes here: https://onsi.github.io/ginkgo/#spec-timeouts-and-interruptible-nodes
+*/
+func ReportAfterSuite(text string, body any, args ...interface{}) bool {
+ combinedArgs := []interface{}{body}
+ combinedArgs = append(combinedArgs, args...)
+ return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeReportAfterSuite, text, combinedArgs...))
+}
+
+func registerReportAfterSuiteNodeForAutogeneratedReports(reporterConfig types.ReporterConfig) {
+ body := func(report Report) {
+ if reporterConfig.JSONReport != "" {
+ err := reporters.GenerateJSONReport(report, reporterConfig.JSONReport)
+ if err != nil {
+ Fail(fmt.Sprintf("Failed to generate JSON report:\n%s", err.Error()))
+ }
+ }
+ if reporterConfig.JUnitReport != "" {
+ err := reporters.GenerateJUnitReport(report, reporterConfig.JUnitReport)
+ if err != nil {
+ Fail(fmt.Sprintf("Failed to generate JUnit report:\n%s", err.Error()))
+ }
+ }
+ if reporterConfig.TeamcityReport != "" {
+ err := reporters.GenerateTeamcityReport(report, reporterConfig.TeamcityReport)
+ if err != nil {
+ Fail(fmt.Sprintf("Failed to generate Teamcity report:\n%s", err.Error()))
+ }
+ }
+ }
+
+ flags := []string{}
+ if reporterConfig.JSONReport != "" {
+ flags = append(flags, "--json-report")
+ }
+ if reporterConfig.JUnitReport != "" {
+ flags = append(flags, "--junit-report")
+ }
+ if reporterConfig.TeamcityReport != "" {
+ flags = append(flags, "--teamcity-report")
+ }
+ pushNode(internal.NewNode(
+ deprecationTracker, types.NodeTypeReportAfterSuite,
+ fmt.Sprintf("Autogenerated ReportAfterSuite for %s", strings.Join(flags, " ")),
+ body,
+ types.NewCustomCodeLocation("autogenerated by Ginkgo"),
+ ))
+}
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/table_dsl.go b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/table_dsl.go
new file mode 100644
index 00000000000..c7de7a8be09
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/table_dsl.go
@@ -0,0 +1,386 @@
+package ginkgo
+
+import (
+ "context"
+ "fmt"
+ "reflect"
+ "strings"
+
+ "github.com/onsi/ginkgo/v2/internal"
+ "github.com/onsi/ginkgo/v2/types"
+)
+
+/*
+The EntryDescription decorator allows you to pass a format string to DescribeTable() and Entry(). This format string is used to generate entry names via:
+
+ fmt.Sprintf(formatString, parameters...)
+
+where parameters are the parameters passed into the entry.
+
+When passed into an Entry the EntryDescription is used to generate the name or that entry. When passed to DescribeTable, the EntryDescription is used to generate the names for any entries that have `nil` descriptions.
+
+You can learn more about generating EntryDescriptions here: https://onsi.github.io/ginkgo/#generating-entry-descriptions
+*/
+type EntryDescription string
+
+func (ed EntryDescription) render(args ...interface{}) string {
+ return fmt.Sprintf(string(ed), args...)
+}
+
+/*
+DescribeTable describes a table-driven spec.
+
+For example:
+
+ DescribeTable("a simple table",
+ func(x int, y int, expected bool) {
+ Ω(x > y).Should(Equal(expected))
+ },
+ Entry("x > y", 1, 0, true),
+ Entry("x == y", 0, 0, false),
+ Entry("x < y", 0, 1, false),
+ )
+
+You can learn more about DescribeTable here: https://onsi.github.io/ginkgo/#table-specs
+And can explore some Table patterns here: https://onsi.github.io/ginkgo/#table-specs-patterns
+*/
+func DescribeTable(description string, args ...interface{}) bool {
+ GinkgoHelper()
+ generateTable(description, false, args...)
+ return true
+}
+
+/*
+You can focus a table with `FDescribeTable`. This is equivalent to `FDescribe`.
+*/
+func FDescribeTable(description string, args ...interface{}) bool {
+ GinkgoHelper()
+ args = append(args, internal.Focus)
+ generateTable(description, false, args...)
+ return true
+}
+
+/*
+You can mark a table as pending with `PDescribeTable`. This is equivalent to `PDescribe`.
+*/
+func PDescribeTable(description string, args ...interface{}) bool {
+ GinkgoHelper()
+ args = append(args, internal.Pending)
+ generateTable(description, false, args...)
+ return true
+}
+
+/*
+You can mark a table as pending with `XDescribeTable`. This is equivalent to `XDescribe`.
+*/
+var XDescribeTable = PDescribeTable
+
+/*
+DescribeTableSubtree describes a table-driven spec that generates a set of tests for each entry.
+
+For example:
+
+ DescribeTableSubtree("a subtree table",
+ func(url string, code int, message string) {
+ var resp *http.Response
+ BeforeEach(func() {
+ var err error
+ resp, err = http.Get(url)
+ Expect(err).NotTo(HaveOccurred())
+ DeferCleanup(resp.Body.Close)
+ })
+
+ It("should return the expected status code", func() {
+ Expect(resp.StatusCode).To(Equal(code))
+ })
+
+ It("should return the expected message", func() {
+ body, err := ioutil.ReadAll(resp.Body)
+ Expect(err).NotTo(HaveOccurred())
+ Expect(string(body)).To(Equal(message))
+ })
+ },
+ Entry("default response", "example.com/response", http.StatusOK, "hello world"),
+ Entry("missing response", "example.com/missing", http.StatusNotFound, "wat?"),
+ )
+
+Note that you **must** place define an It inside the body function.
+
+You can learn more about DescribeTableSubtree here: https://onsi.github.io/ginkgo/#table-specs
+And can explore some Table patterns here: https://onsi.github.io/ginkgo/#table-specs-patterns
+*/
+func DescribeTableSubtree(description string, args ...interface{}) bool {
+ GinkgoHelper()
+ generateTable(description, true, args...)
+ return true
+}
+
+/*
+You can focus a table with `FDescribeTableSubtree`. This is equivalent to `FDescribe`.
+*/
+func FDescribeTableSubtree(description string, args ...interface{}) bool {
+ GinkgoHelper()
+ args = append(args, internal.Focus)
+ generateTable(description, true, args...)
+ return true
+}
+
+/*
+You can mark a table as pending with `PDescribeTableSubtree`. This is equivalent to `PDescribe`.
+*/
+func PDescribeTableSubtree(description string, args ...interface{}) bool {
+ GinkgoHelper()
+ args = append(args, internal.Pending)
+ generateTable(description, true, args...)
+ return true
+}
+
+/*
+You can mark a table as pending with `XDescribeTableSubtree`. This is equivalent to `XDescribe`.
+*/
+var XDescribeTableSubtree = PDescribeTableSubtree
+
+/*
+TableEntry represents an entry in a table test. You generally use the `Entry` constructor.
+*/
+type TableEntry struct {
+ description interface{}
+ decorations []interface{}
+ parameters []interface{}
+ codeLocation types.CodeLocation
+}
+
+/*
+Entry constructs a TableEntry.
+
+The first argument is a description. This can be a string, a function that accepts the parameters passed to the TableEntry and returns a string, an EntryDescription format string, or nil. If nil is provided then the name of the Entry is derived using the table-level entry description.
+Subsequent arguments accept any Ginkgo decorators. These are filtered out and the remaining arguments are passed into the Spec function associated with the table.
+
+Each Entry ends up generating an individual Ginkgo It. The body of the it is the Table Body function with the Entry parameters passed in.
+
+If you want to generate interruptible specs simply write a Table function that accepts a SpecContext as its first argument. You can then decorate individual Entrys with the NodeTimeout and SpecTimeout decorators.
+
+You can learn more about Entry here: https://onsi.github.io/ginkgo/#table-specs
+*/
+func Entry(description interface{}, args ...interface{}) TableEntry {
+ GinkgoHelper()
+ decorations, parameters := internal.PartitionDecorations(args...)
+ return TableEntry{description: description, decorations: decorations, parameters: parameters, codeLocation: types.NewCodeLocation(0)}
+}
+
+/*
+You can focus a particular entry with FEntry. This is equivalent to FIt.
+*/
+func FEntry(description interface{}, args ...interface{}) TableEntry {
+ GinkgoHelper()
+ decorations, parameters := internal.PartitionDecorations(args...)
+ decorations = append(decorations, internal.Focus)
+ return TableEntry{description: description, decorations: decorations, parameters: parameters, codeLocation: types.NewCodeLocation(0)}
+}
+
+/*
+You can mark a particular entry as pending with PEntry. This is equivalent to PIt.
+*/
+func PEntry(description interface{}, args ...interface{}) TableEntry {
+ GinkgoHelper()
+ decorations, parameters := internal.PartitionDecorations(args...)
+ decorations = append(decorations, internal.Pending)
+ return TableEntry{description: description, decorations: decorations, parameters: parameters, codeLocation: types.NewCodeLocation(0)}
+}
+
+/*
+You can mark a particular entry as pending with XEntry. This is equivalent to XIt.
+*/
+var XEntry = PEntry
+
+var contextType = reflect.TypeOf(new(context.Context)).Elem()
+var specContextType = reflect.TypeOf(new(SpecContext)).Elem()
+
+func generateTable(description string, isSubtree bool, args ...interface{}) {
+ GinkgoHelper()
+ cl := types.NewCodeLocation(0)
+ containerNodeArgs := []interface{}{cl}
+
+ entries := []TableEntry{}
+ var internalBody interface{}
+ var internalBodyType reflect.Type
+
+ var tableLevelEntryDescription interface{}
+ tableLevelEntryDescription = func(args ...interface{}) string {
+ out := []string{}
+ for _, arg := range args {
+ out = append(out, fmt.Sprint(arg))
+ }
+ return "Entry: " + strings.Join(out, ", ")
+ }
+
+ if len(args) == 1 {
+ exitIfErr(types.GinkgoErrors.MissingParametersForTableFunction(cl))
+ }
+
+ for i, arg := range args {
+ switch t := reflect.TypeOf(arg); {
+ case t == nil:
+ exitIfErr(types.GinkgoErrors.IncorrectParameterTypeForTable(i, "nil", cl))
+ case t == reflect.TypeOf(TableEntry{}):
+ entries = append(entries, arg.(TableEntry))
+ case t == reflect.TypeOf([]TableEntry{}):
+ entries = append(entries, arg.([]TableEntry)...)
+ case t == reflect.TypeOf(EntryDescription("")):
+ tableLevelEntryDescription = arg.(EntryDescription).render
+ case t.Kind() == reflect.Func && t.NumOut() == 1 && t.Out(0) == reflect.TypeOf(""):
+ tableLevelEntryDescription = arg
+ case t.Kind() == reflect.Func:
+ if internalBody != nil {
+ exitIfErr(types.GinkgoErrors.MultipleEntryBodyFunctionsForTable(cl))
+ }
+ internalBody = arg
+ internalBodyType = reflect.TypeOf(internalBody)
+ default:
+ containerNodeArgs = append(containerNodeArgs, arg)
+ }
+ }
+
+ containerNodeArgs = append(containerNodeArgs, func() {
+ for _, entry := range entries {
+ var err error
+ entry := entry
+ var description string
+ switch t := reflect.TypeOf(entry.description); {
+ case t == nil:
+ err = validateParameters(tableLevelEntryDescription, entry.parameters, "Entry Description function", entry.codeLocation, false)
+ if err == nil {
+ description = invokeFunction(tableLevelEntryDescription, entry.parameters)[0].String()
+ }
+ case t == reflect.TypeOf(EntryDescription("")):
+ description = entry.description.(EntryDescription).render(entry.parameters...)
+ case t == reflect.TypeOf(""):
+ description = entry.description.(string)
+ case t.Kind() == reflect.Func && t.NumOut() == 1 && t.Out(0) == reflect.TypeOf(""):
+ err = validateParameters(entry.description, entry.parameters, "Entry Description function", entry.codeLocation, false)
+ if err == nil {
+ description = invokeFunction(entry.description, entry.parameters)[0].String()
+ }
+ default:
+ err = types.GinkgoErrors.InvalidEntryDescription(entry.codeLocation)
+ }
+
+ internalNodeArgs := []interface{}{entry.codeLocation}
+ internalNodeArgs = append(internalNodeArgs, entry.decorations...)
+
+ hasContext := false
+ if internalBodyType.NumIn() > 0 {
+ if internalBodyType.In(0).Implements(specContextType) {
+ hasContext = true
+ } else if internalBodyType.In(0).Implements(contextType) {
+ hasContext = true
+ if len(entry.parameters) > 0 && reflect.TypeOf(entry.parameters[0]) != nil && reflect.TypeOf(entry.parameters[0]).Implements(contextType) {
+ // we allow you to pass in a non-nil context
+ hasContext = false
+ }
+ }
+ }
+
+ if err == nil {
+ err = validateParameters(internalBody, entry.parameters, "Table Body function", entry.codeLocation, hasContext)
+ }
+
+ if hasContext {
+ internalNodeArgs = append(internalNodeArgs, func(c SpecContext) {
+ if err != nil {
+ panic(err)
+ }
+ invokeFunction(internalBody, append([]interface{}{c}, entry.parameters...))
+ })
+ if isSubtree {
+ exitIfErr(types.GinkgoErrors.ContextsCannotBeUsedInSubtreeTables(cl))
+ }
+ } else {
+ internalNodeArgs = append(internalNodeArgs, func() {
+ if err != nil {
+ panic(err)
+ }
+ invokeFunction(internalBody, entry.parameters)
+ })
+ }
+
+ internalNodeType := types.NodeTypeIt
+ if isSubtree {
+ internalNodeType = types.NodeTypeContainer
+ }
+
+ pushNode(internal.NewNode(deprecationTracker, internalNodeType, description, internalNodeArgs...))
+ }
+ })
+
+ pushNode(internal.NewNode(deprecationTracker, types.NodeTypeContainer, description, containerNodeArgs...))
+}
+
+func invokeFunction(function interface{}, parameters []interface{}) []reflect.Value {
+ inValues := make([]reflect.Value, len(parameters))
+
+ funcType := reflect.TypeOf(function)
+ limit := funcType.NumIn()
+ if funcType.IsVariadic() {
+ limit = limit - 1
+ }
+
+ for i := 0; i < limit && i < len(parameters); i++ {
+ inValues[i] = computeValue(parameters[i], funcType.In(i))
+ }
+
+ if funcType.IsVariadic() {
+ variadicType := funcType.In(limit).Elem()
+ for i := limit; i < len(parameters); i++ {
+ inValues[i] = computeValue(parameters[i], variadicType)
+ }
+ }
+
+ return reflect.ValueOf(function).Call(inValues)
+}
+
+func validateParameters(function interface{}, parameters []interface{}, kind string, cl types.CodeLocation, hasContext bool) error {
+ funcType := reflect.TypeOf(function)
+ limit := funcType.NumIn()
+ offset := 0
+ if hasContext {
+ limit = limit - 1
+ offset = 1
+ }
+ if funcType.IsVariadic() {
+ limit = limit - 1
+ }
+ if len(parameters) < limit {
+ return types.GinkgoErrors.TooFewParametersToTableFunction(limit, len(parameters), kind, cl)
+ }
+ if len(parameters) > limit && !funcType.IsVariadic() {
+ return types.GinkgoErrors.TooManyParametersToTableFunction(limit, len(parameters), kind, cl)
+ }
+ var i = 0
+ for ; i < limit; i++ {
+ actual := reflect.TypeOf(parameters[i])
+ expected := funcType.In(i + offset)
+ if !(actual == nil) && !actual.AssignableTo(expected) {
+ return types.GinkgoErrors.IncorrectParameterTypeToTableFunction(i+1, expected, actual, kind, cl)
+ }
+ }
+ if funcType.IsVariadic() {
+ expected := funcType.In(limit + offset).Elem()
+ for ; i < len(parameters); i++ {
+ actual := reflect.TypeOf(parameters[i])
+ if !(actual == nil) && !actual.AssignableTo(expected) {
+ return types.GinkgoErrors.IncorrectVariadicParameterTypeToTableFunction(expected, actual, kind, cl)
+ }
+ }
+ }
+
+ return nil
+}
+
+func computeValue(parameter interface{}, t reflect.Type) reflect.Value {
+ if parameter == nil {
+ return reflect.Zero(t)
+ } else {
+ return reflect.ValueOf(parameter)
+ }
+}
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/types/code_location.go b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/types/code_location.go
new file mode 100644
index 00000000000..57e87517e07
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/types/code_location.go
@@ -0,0 +1,159 @@
+package types
+
+import (
+ "fmt"
+ "os"
+ "regexp"
+ "runtime"
+ "runtime/debug"
+ "strings"
+ "sync"
+)
+
+type CodeLocation struct {
+ FileName string `json:",omitempty"`
+ LineNumber int `json:",omitempty"`
+ FullStackTrace string `json:",omitempty"`
+ CustomMessage string `json:",omitempty"`
+}
+
+func (codeLocation CodeLocation) String() string {
+ if codeLocation.CustomMessage != "" {
+ return codeLocation.CustomMessage
+ }
+ return fmt.Sprintf("%s:%d", codeLocation.FileName, codeLocation.LineNumber)
+}
+
+func (codeLocation CodeLocation) ContentsOfLine() string {
+ if codeLocation.CustomMessage != "" {
+ return ""
+ }
+ contents, err := os.ReadFile(codeLocation.FileName)
+ if err != nil {
+ return ""
+ }
+ lines := strings.Split(string(contents), "\n")
+ if len(lines) < codeLocation.LineNumber {
+ return ""
+ }
+ return lines[codeLocation.LineNumber-1]
+}
+
+type codeLocationLocator struct {
+ pcs map[uintptr]bool
+ helpers map[string]bool
+ lock *sync.Mutex
+}
+
+func (c *codeLocationLocator) addHelper(pc uintptr) {
+ c.lock.Lock()
+ defer c.lock.Unlock()
+
+ if c.pcs[pc] {
+ return
+ }
+ c.lock.Unlock()
+ f := runtime.FuncForPC(pc)
+ c.lock.Lock()
+ if f == nil {
+ return
+ }
+ c.helpers[f.Name()] = true
+ c.pcs[pc] = true
+}
+
+func (c *codeLocationLocator) hasHelper(name string) bool {
+ c.lock.Lock()
+ defer c.lock.Unlock()
+ return c.helpers[name]
+}
+
+func (c *codeLocationLocator) getCodeLocation(skip int) CodeLocation {
+ pc := make([]uintptr, 40)
+ n := runtime.Callers(skip+2, pc)
+ if n == 0 {
+ return CodeLocation{}
+ }
+ pc = pc[:n]
+ frames := runtime.CallersFrames(pc)
+ for {
+ frame, more := frames.Next()
+ if !c.hasHelper(frame.Function) {
+ return CodeLocation{FileName: frame.File, LineNumber: frame.Line}
+ }
+ if !more {
+ break
+ }
+ }
+ return CodeLocation{}
+}
+
+var clLocator = &codeLocationLocator{
+ pcs: map[uintptr]bool{},
+ helpers: map[string]bool{},
+ lock: &sync.Mutex{},
+}
+
+// MarkAsHelper is used by GinkgoHelper to mark the caller (appropriately offset by skip)as a helper. You can use this directly if you need to provide an optional `skip` to mark functions further up the call stack as helpers.
+func MarkAsHelper(optionalSkip ...int) {
+ skip := 1
+ if len(optionalSkip) > 0 {
+ skip += optionalSkip[0]
+ }
+ pc, _, _, ok := runtime.Caller(skip)
+ if ok {
+ clLocator.addHelper(pc)
+ }
+}
+
+func NewCustomCodeLocation(message string) CodeLocation {
+ return CodeLocation{
+ CustomMessage: message,
+ }
+}
+
+func NewCodeLocation(skip int) CodeLocation {
+ return clLocator.getCodeLocation(skip + 1)
+}
+
+func NewCodeLocationWithStackTrace(skip int) CodeLocation {
+ cl := clLocator.getCodeLocation(skip + 1)
+ cl.FullStackTrace = PruneStack(string(debug.Stack()), skip+1)
+ return cl
+}
+
+// PruneStack removes references to functions that are internal to Ginkgo
+// and the Go runtime from a stack string and a certain number of stack entries
+// at the beginning of the stack. The stack string has the format
+// as returned by runtime/debug.Stack. The leading goroutine information is
+// optional and always removed if present. Beware that runtime/debug.Stack
+// adds itself as first entry, so typically skip must be >= 1 to remove that
+// entry.
+func PruneStack(fullStackTrace string, skip int) string {
+ stack := strings.Split(fullStackTrace, "\n")
+ // Ensure that the even entries are the method names and the
+ // odd entries the source code information.
+ if len(stack) > 0 && strings.HasPrefix(stack[0], "goroutine ") {
+ // Ignore "goroutine 29 [running]:" line.
+ stack = stack[1:]
+ }
+ // The "+1" is for skipping over the initial entry, which is
+ // runtime/debug.Stack() itself.
+ if len(stack) > 2*(skip+1) {
+ stack = stack[2*(skip+1):]
+ }
+ prunedStack := []string{}
+ if os.Getenv("GINKGO_PRUNE_STACK") == "FALSE" {
+ prunedStack = stack
+ } else {
+ re := regexp.MustCompile(`\/ginkgo\/|\/pkg\/testing\/|\/pkg\/runtime\/`)
+ for i := 0; i < len(stack)/2; i++ {
+ // We filter out based on the source code file name.
+ if !re.MatchString(stack[i*2+1]) {
+ prunedStack = append(prunedStack, stack[i*2])
+ prunedStack = append(prunedStack, stack[i*2+1])
+ }
+ }
+ }
+ return strings.Join(prunedStack, "\n")
+}
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/types/config.go b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/types/config.go
new file mode 100644
index 00000000000..66463cf5ea1
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/types/config.go
@@ -0,0 +1,770 @@
+/*
+Ginkgo accepts a number of configuration options.
+These are documented [here](http://onsi.github.io/ginkgo/#the-ginkgo-cli)
+*/
+
+package types
+
+import (
+ "flag"
+ "os"
+ "path/filepath"
+ "runtime"
+ "strconv"
+ "strings"
+ "time"
+)
+
+// Configuration controlling how an individual test suite is run
+type SuiteConfig struct {
+ RandomSeed int64
+ RandomizeAllSpecs bool
+ FocusStrings []string
+ SkipStrings []string
+ FocusFiles []string
+ SkipFiles []string
+ LabelFilter string
+ FailOnPending bool
+ FailOnEmpty bool
+ FailFast bool
+ FlakeAttempts int
+ MustPassRepeatedly int
+ DryRun bool
+ PollProgressAfter time.Duration
+ PollProgressInterval time.Duration
+ Timeout time.Duration
+ EmitSpecProgress bool // this is deprecated but its removal is causing compile issue for some users that were setting it manually
+ OutputInterceptorMode string
+ SourceRoots []string
+ GracePeriod time.Duration
+
+ ParallelProcess int
+ ParallelTotal int
+ ParallelHost string
+}
+
+func NewDefaultSuiteConfig() SuiteConfig {
+ return SuiteConfig{
+ RandomSeed: time.Now().Unix(),
+ Timeout: time.Hour,
+ ParallelProcess: 1,
+ ParallelTotal: 1,
+ GracePeriod: 30 * time.Second,
+ }
+}
+
+type VerbosityLevel uint
+
+const (
+ VerbosityLevelSuccinct VerbosityLevel = iota
+ VerbosityLevelNormal
+ VerbosityLevelVerbose
+ VerbosityLevelVeryVerbose
+)
+
+func (vl VerbosityLevel) GT(comp VerbosityLevel) bool {
+ return vl > comp
+}
+
+func (vl VerbosityLevel) GTE(comp VerbosityLevel) bool {
+ return vl >= comp
+}
+
+func (vl VerbosityLevel) Is(comp VerbosityLevel) bool {
+ return vl == comp
+}
+
+func (vl VerbosityLevel) LTE(comp VerbosityLevel) bool {
+ return vl <= comp
+}
+
+func (vl VerbosityLevel) LT(comp VerbosityLevel) bool {
+ return vl < comp
+}
+
+// Configuration for Ginkgo's reporter
+type ReporterConfig struct {
+ NoColor bool
+ Succinct bool
+ Verbose bool
+ VeryVerbose bool
+ FullTrace bool
+ ShowNodeEvents bool
+ GithubOutput bool
+ SilenceSkips bool
+ ForceNewlines bool
+
+ JSONReport string
+ JUnitReport string
+ TeamcityReport string
+}
+
+func (rc ReporterConfig) Verbosity() VerbosityLevel {
+ if rc.Succinct {
+ return VerbosityLevelSuccinct
+ } else if rc.Verbose {
+ return VerbosityLevelVerbose
+ } else if rc.VeryVerbose {
+ return VerbosityLevelVeryVerbose
+ }
+ return VerbosityLevelNormal
+}
+
+func (rc ReporterConfig) WillGenerateReport() bool {
+ return rc.JSONReport != "" || rc.JUnitReport != "" || rc.TeamcityReport != ""
+}
+
+func NewDefaultReporterConfig() ReporterConfig {
+ return ReporterConfig{}
+}
+
+// Configuration for the Ginkgo CLI
+type CLIConfig struct {
+ //for build, run, and watch
+ Recurse bool
+ SkipPackage string
+ RequireSuite bool
+ NumCompilers int
+
+ //for run and watch only
+ Procs int
+ Parallel bool
+ AfterRunHook string
+ OutputDir string
+ KeepSeparateCoverprofiles bool
+ KeepSeparateReports bool
+
+ //for run only
+ KeepGoing bool
+ UntilItFails bool
+ Repeat int
+ RandomizeSuites bool
+
+ //for watch only
+ Depth int
+ WatchRegExp string
+}
+
+func NewDefaultCLIConfig() CLIConfig {
+ return CLIConfig{
+ Depth: 1,
+ WatchRegExp: `\.go$`,
+ }
+}
+
+func (g CLIConfig) ComputedProcs() int {
+ if g.Procs > 0 {
+ return g.Procs
+ }
+
+ n := 1
+ if g.Parallel {
+ n = runtime.NumCPU()
+ if n > 4 {
+ n = n - 1
+ }
+ }
+ return n
+}
+
+func (g CLIConfig) ComputedNumCompilers() int {
+ if g.NumCompilers > 0 {
+ return g.NumCompilers
+ }
+
+ return runtime.NumCPU()
+}
+
+// Configuration for the Ginkgo CLI capturing available go flags
+// A subset of Go flags are exposed by Ginkgo. Some are available at compile time (e.g. ginkgo build) and others only at run time (e.g. ginkgo run - which has both build and run time flags).
+// More details can be found at:
+// https://docs.google.com/spreadsheets/d/1zkp-DS4hU4sAJl5eHh1UmgwxCPQhf3s5a8fbiOI8tJU/
+type GoFlagsConfig struct {
+ //build-time flags for code-and-performance analysis
+ Race bool
+ Cover bool
+ CoverMode string
+ CoverPkg string
+ Vet string
+
+ //run-time flags for code-and-performance analysis
+ BlockProfile string
+ BlockProfileRate int
+ CoverProfile string
+ CPUProfile string
+ MemProfile string
+ MemProfileRate int
+ MutexProfile string
+ MutexProfileFraction int
+ Trace string
+
+ //build-time flags for building
+ A bool
+ ASMFlags string
+ BuildMode string
+ Compiler string
+ GCCGoFlags string
+ GCFlags string
+ InstallSuffix string
+ LDFlags string
+ LinkShared bool
+ Mod string
+ N bool
+ ModFile string
+ ModCacheRW bool
+ MSan bool
+ PkgDir string
+ Tags string
+ TrimPath bool
+ ToolExec string
+ Work bool
+ X bool
+}
+
+func NewDefaultGoFlagsConfig() GoFlagsConfig {
+ return GoFlagsConfig{}
+}
+
+func (g GoFlagsConfig) BinaryMustBePreserved() bool {
+ return g.BlockProfile != "" || g.CPUProfile != "" || g.MemProfile != "" || g.MutexProfile != ""
+}
+
+// Configuration that were deprecated in 2.0
+type deprecatedConfig struct {
+ DebugParallel bool
+ NoisySkippings bool
+ NoisyPendings bool
+ RegexScansFilePath bool
+ SlowSpecThresholdWithFLoatUnits float64
+ Stream bool
+ Notify bool
+ EmitSpecProgress bool
+ SlowSpecThreshold time.Duration
+ AlwaysEmitGinkgoWriter bool
+}
+
+// Flags
+
+// Flags sections used by both the CLI and the Ginkgo test process
+var FlagSections = GinkgoFlagSections{
+ {Key: "multiple-suites", Style: "{{dark-green}}", Heading: "Running Multiple Test Suites"},
+ {Key: "order", Style: "{{green}}", Heading: "Controlling Test Order"},
+ {Key: "parallel", Style: "{{yellow}}", Heading: "Controlling Test Parallelism"},
+ {Key: "low-level-parallel", Style: "{{yellow}}", Heading: "Controlling Test Parallelism",
+ Description: "These are set by the Ginkgo CLI, {{red}}{{bold}}do not set them manually{{/}} via go test.\nUse ginkgo -p or ginkgo -procs=N instead."},
+ {Key: "filter", Style: "{{cyan}}", Heading: "Filtering Tests"},
+ {Key: "failure", Style: "{{red}}", Heading: "Failure Handling"},
+ {Key: "output", Style: "{{magenta}}", Heading: "Controlling Output Formatting"},
+ {Key: "code-and-coverage-analysis", Style: "{{orange}}", Heading: "Code and Coverage Analysis"},
+ {Key: "performance-analysis", Style: "{{coral}}", Heading: "Performance Analysis"},
+ {Key: "debug", Style: "{{blue}}", Heading: "Debugging Tests",
+ Description: "In addition to these flags, Ginkgo supports a few debugging environment variables. To change the parallel server protocol set {{blue}}GINKGO_PARALLEL_PROTOCOL{{/}} to {{bold}}HTTP{{/}}. To avoid pruning callstacks set {{blue}}GINKGO_PRUNE_STACK{{/}} to {{bold}}FALSE{{/}}."},
+ {Key: "watch", Style: "{{light-yellow}}", Heading: "Controlling Ginkgo Watch"},
+ {Key: "misc", Style: "{{light-gray}}", Heading: "Miscellaneous"},
+ {Key: "go-build", Style: "{{light-gray}}", Heading: "Go Build Flags", Succinct: true,
+ Description: "These flags are inherited from go build. Run {{bold}}ginkgo help build{{/}} for more detailed flag documentation."},
+}
+
+// SuiteConfigFlags provides flags for the Ginkgo test process, and CLI
+var SuiteConfigFlags = GinkgoFlags{
+ {KeyPath: "S.RandomSeed", Name: "seed", SectionKey: "order", UsageDefaultValue: "randomly generated by Ginkgo",
+ Usage: "The seed used to randomize the spec suite.", AlwaysExport: true},
+ {KeyPath: "S.RandomizeAllSpecs", Name: "randomize-all", SectionKey: "order", DeprecatedName: "randomizeAllSpecs", DeprecatedDocLink: "changed-command-line-flags",
+ Usage: "If set, ginkgo will randomize all specs together. By default, ginkgo only randomizes the top level Describe, Context and When containers."},
+
+ {KeyPath: "S.FailOnPending", Name: "fail-on-pending", SectionKey: "failure", DeprecatedName: "failOnPending", DeprecatedDocLink: "changed-command-line-flags",
+ Usage: "If set, ginkgo will mark the test suite as failed if any specs are pending."},
+ {KeyPath: "S.FailFast", Name: "fail-fast", SectionKey: "failure", DeprecatedName: "failFast", DeprecatedDocLink: "changed-command-line-flags",
+ Usage: "If set, ginkgo will stop running a test suite after a failure occurs."},
+ {KeyPath: "S.FlakeAttempts", Name: "flake-attempts", SectionKey: "failure", UsageDefaultValue: "0 - failed tests are not retried", DeprecatedName: "flakeAttempts", DeprecatedDocLink: "changed-command-line-flags",
+ Usage: "Make up to this many attempts to run each spec. If any of the attempts succeed, the suite will not be failed."},
+ {KeyPath: "S.FailOnEmpty", Name: "fail-on-empty", SectionKey: "failure",
+ Usage: "If set, ginkgo will mark the test suite as failed if no specs are run."},
+
+ {KeyPath: "S.DryRun", Name: "dry-run", SectionKey: "debug", DeprecatedName: "dryRun", DeprecatedDocLink: "changed-command-line-flags",
+ Usage: "If set, ginkgo will walk the test hierarchy without actually running anything. Best paired with -v."},
+ {KeyPath: "S.PollProgressAfter", Name: "poll-progress-after", SectionKey: "debug", UsageDefaultValue: "0",
+ Usage: "Emit node progress reports periodically if node hasn't completed after this duration."},
+ {KeyPath: "S.PollProgressInterval", Name: "poll-progress-interval", SectionKey: "debug", UsageDefaultValue: "10s",
+ Usage: "The rate at which to emit node progress reports after poll-progress-after has elapsed."},
+ {KeyPath: "S.SourceRoots", Name: "source-root", SectionKey: "debug",
+ Usage: "The location to look for source code when generating progress reports. You can pass multiple --source-root flags."},
+ {KeyPath: "S.Timeout", Name: "timeout", SectionKey: "debug", UsageDefaultValue: "1h",
+ Usage: "Test suite fails if it does not complete within the specified timeout."},
+ {KeyPath: "S.GracePeriod", Name: "grace-period", SectionKey: "debug", UsageDefaultValue: "30s",
+ Usage: "When interrupted, Ginkgo will wait for GracePeriod for the current running node to exit before moving on to the next one."},
+ {KeyPath: "S.OutputInterceptorMode", Name: "output-interceptor-mode", SectionKey: "debug", UsageArgument: "dup, swap, or none",
+ Usage: "If set, ginkgo will use the specified output interception strategy when running in parallel. Defaults to dup on unix and swap on windows."},
+
+ {KeyPath: "S.LabelFilter", Name: "label-filter", SectionKey: "filter", UsageArgument: "expression",
+ Usage: "If set, ginkgo will only run specs with labels that match the label-filter. The passed-in expression can include boolean operations (!, &&, ||, ','), groupings via '()', and regular expressions '/regexp/'. e.g. '(cat || dog) && !fruit'"},
+ {KeyPath: "S.FocusStrings", Name: "focus", SectionKey: "filter",
+ Usage: "If set, ginkgo will only run specs that match this regular expression. Can be specified multiple times, values are ORed."},
+ {KeyPath: "S.SkipStrings", Name: "skip", SectionKey: "filter",
+ Usage: "If set, ginkgo will only run specs that do not match this regular expression. Can be specified multiple times, values are ORed."},
+ {KeyPath: "S.FocusFiles", Name: "focus-file", SectionKey: "filter", UsageArgument: "file (regexp) | file:line | file:lineA-lineB | file:line,line,line",
+ Usage: "If set, ginkgo will only run specs in matching files. Can be specified multiple times, values are ORed."},
+ {KeyPath: "S.SkipFiles", Name: "skip-file", SectionKey: "filter", UsageArgument: "file (regexp) | file:line | file:lineA-lineB | file:line,line,line",
+ Usage: "If set, ginkgo will skip specs in matching files. Can be specified multiple times, values are ORed."},
+
+ {KeyPath: "D.RegexScansFilePath", DeprecatedName: "regexScansFilePath", DeprecatedDocLink: "removed--regexscansfilepath", DeprecatedVersion: "2.0.0"},
+ {KeyPath: "D.DebugParallel", DeprecatedName: "debug", DeprecatedDocLink: "removed--debug", DeprecatedVersion: "2.0.0"},
+ {KeyPath: "D.EmitSpecProgress", DeprecatedName: "progress", SectionKey: "debug",
+ DeprecatedVersion: "2.5.0", Usage: ". The functionality provided by --progress was confusing and is no longer needed. Use --show-node-events instead to see node entry and exit events included in the timeline of failed and verbose specs. Or you can run with -vv to always see all node events. Lastly, --poll-progress-after and the PollProgressAfter decorator now provide a better mechanism for debugging specs that tend to get stuck."},
+}
+
+// ParallelConfigFlags provides flags for the Ginkgo test process (not the CLI)
+var ParallelConfigFlags = GinkgoFlags{
+ {KeyPath: "S.ParallelProcess", Name: "parallel.process", SectionKey: "low-level-parallel", UsageDefaultValue: "1",
+ Usage: "This worker process's (one-indexed) process number. For running specs in parallel."},
+ {KeyPath: "S.ParallelTotal", Name: "parallel.total", SectionKey: "low-level-parallel", UsageDefaultValue: "1",
+ Usage: "The total number of worker processes. For running specs in parallel."},
+ {KeyPath: "S.ParallelHost", Name: "parallel.host", SectionKey: "low-level-parallel", UsageDefaultValue: "set by Ginkgo CLI",
+ Usage: "The address for the server that will synchronize the processes."},
+}
+
+// ReporterConfigFlags provides flags for the Ginkgo test process, and CLI
+var ReporterConfigFlags = GinkgoFlags{
+ {KeyPath: "R.NoColor", Name: "no-color", SectionKey: "output", DeprecatedName: "noColor", DeprecatedDocLink: "changed-command-line-flags",
+ Usage: "If set, suppress color output in default reporter."},
+ {KeyPath: "R.Verbose", Name: "v", SectionKey: "output",
+ Usage: "If set, emits more output including GinkgoWriter contents."},
+ {KeyPath: "R.VeryVerbose", Name: "vv", SectionKey: "output",
+ Usage: "If set, emits with maximal verbosity - includes skipped and pending tests."},
+ {KeyPath: "R.Succinct", Name: "succinct", SectionKey: "output",
+ Usage: "If set, default reporter prints out a very succinct report"},
+ {KeyPath: "R.FullTrace", Name: "trace", SectionKey: "output",
+ Usage: "If set, default reporter prints out the full stack trace when a failure occurs"},
+ {KeyPath: "R.ShowNodeEvents", Name: "show-node-events", SectionKey: "output",
+ Usage: "If set, default reporter prints node > Enter and < Exit events when specs fail"},
+ {KeyPath: "R.GithubOutput", Name: "github-output", SectionKey: "output",
+ Usage: "If set, default reporter prints easier to manage output in Github Actions."},
+ {KeyPath: "R.SilenceSkips", Name: "silence-skips", SectionKey: "output",
+ Usage: "If set, default reporter will not print out skipped tests."},
+ {KeyPath: "R.ForceNewlines", Name: "force-newlines", SectionKey: "output",
+ Usage: "If set, default reporter will ensure a newline appears after each test."},
+
+ {KeyPath: "R.JSONReport", Name: "json-report", UsageArgument: "filename.json", SectionKey: "output",
+ Usage: "If set, Ginkgo will generate a JSON-formatted test report at the specified location."},
+ {KeyPath: "R.JUnitReport", Name: "junit-report", UsageArgument: "filename.xml", SectionKey: "output", DeprecatedName: "reportFile", DeprecatedDocLink: "improved-reporting-infrastructure",
+ Usage: "If set, Ginkgo will generate a conformant junit test report in the specified file."},
+ {KeyPath: "R.TeamcityReport", Name: "teamcity-report", UsageArgument: "filename", SectionKey: "output",
+ Usage: "If set, Ginkgo will generate a Teamcity-formatted test report at the specified location."},
+
+ {KeyPath: "D.SlowSpecThresholdWithFLoatUnits", DeprecatedName: "slowSpecThreshold", DeprecatedDocLink: "changed--slowspecthreshold",
+ Usage: "use --slow-spec-threshold instead and pass in a duration string (e.g. '5s', not '5.0')"},
+ {KeyPath: "D.NoisyPendings", DeprecatedName: "noisyPendings", DeprecatedDocLink: "removed--noisypendings-and--noisyskippings", DeprecatedVersion: "2.0.0"},
+ {KeyPath: "D.NoisySkippings", DeprecatedName: "noisySkippings", DeprecatedDocLink: "removed--noisypendings-and--noisyskippings", DeprecatedVersion: "2.0.0"},
+ {KeyPath: "D.SlowSpecThreshold", DeprecatedName: "slow-spec-threshold", SectionKey: "output", Usage: "--slow-spec-threshold has been deprecated and will be removed in a future version of Ginkgo. This feature has proved to be more noisy than useful. You can use --poll-progress-after, instead, to get more actionable feedback about potentially slow specs and understand where they might be getting stuck.", DeprecatedVersion: "2.5.0"},
+ {KeyPath: "D.AlwaysEmitGinkgoWriter", DeprecatedName: "always-emit-ginkgo-writer", SectionKey: "output", Usage: " - use -v instead, or one of Ginkgo's machine-readable report formats to get GinkgoWriter output for passing specs."},
+}
+
+// BuildTestSuiteFlagSet attaches to the CommandLine flagset and provides flags for the Ginkgo test process
+func BuildTestSuiteFlagSet(suiteConfig *SuiteConfig, reporterConfig *ReporterConfig) (GinkgoFlagSet, error) {
+ flags := SuiteConfigFlags.CopyAppend(ParallelConfigFlags...).CopyAppend(ReporterConfigFlags...)
+ flags = flags.WithPrefix("ginkgo")
+ bindings := map[string]interface{}{
+ "S": suiteConfig,
+ "R": reporterConfig,
+ "D": &deprecatedConfig{},
+ }
+ extraGoFlagsSection := GinkgoFlagSection{Style: "{{gray}}", Heading: "Go test flags"}
+
+ return NewAttachedGinkgoFlagSet(flag.CommandLine, flags, bindings, FlagSections, extraGoFlagsSection)
+}
+
+// VetConfig validates that the Ginkgo test process' configuration is sound
+func VetConfig(flagSet GinkgoFlagSet, suiteConfig SuiteConfig, reporterConfig ReporterConfig) []error {
+ errors := []error{}
+
+ if flagSet.WasSet("count") || flagSet.WasSet("test.count") {
+ flag := flagSet.Lookup("count")
+ if flag == nil {
+ flag = flagSet.Lookup("test.count")
+ }
+ count, err := strconv.Atoi(flag.Value.String())
+ if err != nil || count != 1 {
+ errors = append(errors, GinkgoErrors.InvalidGoFlagCount())
+ }
+ }
+
+ if flagSet.WasSet("parallel") || flagSet.WasSet("test.parallel") {
+ errors = append(errors, GinkgoErrors.InvalidGoFlagParallel())
+ }
+
+ if suiteConfig.ParallelTotal < 1 {
+ errors = append(errors, GinkgoErrors.InvalidParallelTotalConfiguration())
+ }
+
+ if suiteConfig.ParallelProcess > suiteConfig.ParallelTotal || suiteConfig.ParallelProcess < 1 {
+ errors = append(errors, GinkgoErrors.InvalidParallelProcessConfiguration())
+ }
+
+ if suiteConfig.ParallelTotal > 1 && suiteConfig.ParallelHost == "" {
+ errors = append(errors, GinkgoErrors.MissingParallelHostConfiguration())
+ }
+
+ if suiteConfig.DryRun && suiteConfig.ParallelTotal > 1 {
+ errors = append(errors, GinkgoErrors.DryRunInParallelConfiguration())
+ }
+
+ if suiteConfig.GracePeriod <= 0 {
+ errors = append(errors, GinkgoErrors.GracePeriodCannotBeZero())
+ }
+
+ if len(suiteConfig.FocusFiles) > 0 {
+ _, err := ParseFileFilters(suiteConfig.FocusFiles)
+ if err != nil {
+ errors = append(errors, err)
+ }
+ }
+
+ if len(suiteConfig.SkipFiles) > 0 {
+ _, err := ParseFileFilters(suiteConfig.SkipFiles)
+ if err != nil {
+ errors = append(errors, err)
+ }
+ }
+
+ if suiteConfig.LabelFilter != "" {
+ _, err := ParseLabelFilter(suiteConfig.LabelFilter)
+ if err != nil {
+ errors = append(errors, err)
+ }
+ }
+
+ switch strings.ToLower(suiteConfig.OutputInterceptorMode) {
+ case "", "dup", "swap", "none":
+ default:
+ errors = append(errors, GinkgoErrors.InvalidOutputInterceptorModeConfiguration(suiteConfig.OutputInterceptorMode))
+ }
+
+ numVerbosity := 0
+ for _, v := range []bool{reporterConfig.Succinct, reporterConfig.Verbose, reporterConfig.VeryVerbose} {
+ if v {
+ numVerbosity++
+ }
+ }
+ if numVerbosity > 1 {
+ errors = append(errors, GinkgoErrors.ConflictingVerbosityConfiguration())
+ }
+
+ return errors
+}
+
+// GinkgoCLISharedFlags provides flags shared by the Ginkgo CLI's build, watch, and run commands
+var GinkgoCLISharedFlags = GinkgoFlags{
+ {KeyPath: "C.Recurse", Name: "r", SectionKey: "multiple-suites",
+ Usage: "If set, ginkgo finds and runs test suites under the current directory recursively."},
+ {KeyPath: "C.SkipPackage", Name: "skip-package", SectionKey: "multiple-suites", DeprecatedName: "skipPackage", DeprecatedDocLink: "changed-command-line-flags",
+ UsageArgument: "comma-separated list of packages",
+ Usage: "A comma-separated list of package names to be skipped. If any part of the package's path matches, that package is ignored."},
+ {KeyPath: "C.RequireSuite", Name: "require-suite", SectionKey: "failure", DeprecatedName: "requireSuite", DeprecatedDocLink: "changed-command-line-flags",
+ Usage: "If set, Ginkgo fails if there are ginkgo tests in a directory but no invocation of RunSpecs."},
+ {KeyPath: "C.NumCompilers", Name: "compilers", SectionKey: "multiple-suites", UsageDefaultValue: "0 (will autodetect)",
+ Usage: "When running multiple packages, the number of concurrent compilations to perform."},
+}
+
+// GinkgoCLIRunAndWatchFlags provides flags shared by the Ginkgo CLI's build and watch commands (but not run)
+var GinkgoCLIRunAndWatchFlags = GinkgoFlags{
+ {KeyPath: "C.Procs", Name: "procs", SectionKey: "parallel", UsageDefaultValue: "1 (run in series)",
+ Usage: "The number of parallel test nodes to run."},
+ {KeyPath: "C.Procs", Name: "nodes", SectionKey: "parallel", UsageDefaultValue: "1 (run in series)",
+ Usage: "--nodes is an alias for --procs"},
+ {KeyPath: "C.Parallel", Name: "p", SectionKey: "parallel",
+ Usage: "If set, ginkgo will run in parallel with an auto-detected number of nodes."},
+ {KeyPath: "C.AfterRunHook", Name: "after-run-hook", SectionKey: "misc", DeprecatedName: "afterSuiteHook", DeprecatedDocLink: "changed-command-line-flags",
+ Usage: "Command to run when a test suite completes."},
+ {KeyPath: "C.OutputDir", Name: "output-dir", SectionKey: "output", UsageArgument: "directory", DeprecatedName: "outputdir", DeprecatedDocLink: "improved-profiling-support",
+ Usage: "A location to place all generated profiles and reports."},
+ {KeyPath: "C.KeepSeparateCoverprofiles", Name: "keep-separate-coverprofiles", SectionKey: "code-and-coverage-analysis",
+ Usage: "If set, Ginkgo does not merge coverprofiles into one monolithic coverprofile. The coverprofiles will remain in their respective package directories or in -output-dir if set."},
+ {KeyPath: "C.KeepSeparateReports", Name: "keep-separate-reports", SectionKey: "output",
+ Usage: "If set, Ginkgo does not merge per-suite reports (e.g. -json-report) into one monolithic report for the entire testrun. The reports will remain in their respective package directories or in -output-dir if set."},
+
+ {KeyPath: "D.Stream", DeprecatedName: "stream", DeprecatedDocLink: "removed--stream", DeprecatedVersion: "2.0.0"},
+ {KeyPath: "D.Notify", DeprecatedName: "notify", DeprecatedDocLink: "removed--notify", DeprecatedVersion: "2.0.0"},
+}
+
+// GinkgoCLIRunFlags provides flags for Ginkgo CLI's run command that aren't shared by any other commands
+var GinkgoCLIRunFlags = GinkgoFlags{
+ {KeyPath: "C.KeepGoing", Name: "keep-going", SectionKey: "multiple-suites", DeprecatedName: "keepGoing", DeprecatedDocLink: "changed-command-line-flags",
+ Usage: "If set, failures from earlier test suites do not prevent later test suites from running."},
+ {KeyPath: "C.UntilItFails", Name: "until-it-fails", SectionKey: "debug", DeprecatedName: "untilItFails", DeprecatedDocLink: "changed-command-line-flags",
+ Usage: "If set, ginkgo will keep rerunning test suites until a failure occurs."},
+ {KeyPath: "C.Repeat", Name: "repeat", SectionKey: "debug", UsageArgument: "n", UsageDefaultValue: "0 - i.e. no repetition, run only once",
+ Usage: "The number of times to re-run a test-suite. Useful for debugging flaky tests. If set to N the suite will be run N+1 times and will be required to pass each time."},
+ {KeyPath: "C.RandomizeSuites", Name: "randomize-suites", SectionKey: "order", DeprecatedName: "randomizeSuites", DeprecatedDocLink: "changed-command-line-flags",
+ Usage: "If set, ginkgo will randomize the order in which test suites run."},
+}
+
+// GinkgoCLIRunFlags provides flags for Ginkgo CLI's watch command that aren't shared by any other commands
+var GinkgoCLIWatchFlags = GinkgoFlags{
+ {KeyPath: "C.Depth", Name: "depth", SectionKey: "watch",
+ Usage: "Ginkgo will watch dependencies down to this depth in the dependency tree."},
+ {KeyPath: "C.WatchRegExp", Name: "watch-regexp", SectionKey: "watch", DeprecatedName: "watchRegExp", DeprecatedDocLink: "changed-command-line-flags",
+ UsageArgument: "Regular Expression",
+ UsageDefaultValue: `\.go$`,
+ Usage: "Only files matching this regular expression will be watched for changes."},
+}
+
+// GoBuildFlags provides flags for the Ginkgo CLI build, run, and watch commands that capture go's build-time flags. These are passed to go test -c by the ginkgo CLI
+var GoBuildFlags = GinkgoFlags{
+ {KeyPath: "Go.Race", Name: "race", SectionKey: "code-and-coverage-analysis",
+ Usage: "enable data race detection. Supported only on linux/amd64, freebsd/amd64, darwin/amd64, windows/amd64, linux/ppc64le and linux/arm64 (only for 48-bit VMA)."},
+ {KeyPath: "Go.Vet", Name: "vet", UsageArgument: "list", SectionKey: "code-and-coverage-analysis",
+ Usage: `Configure the invocation of "go vet" during "go test" to use the comma-separated list of vet checks. If list is empty, "go test" runs "go vet" with a curated list of checks believed to be always worth addressing. If list is "off", "go test" does not run "go vet" at all. Available checks can be found by running 'go doc cmd/vet'`},
+ {KeyPath: "Go.Cover", Name: "cover", SectionKey: "code-and-coverage-analysis",
+ Usage: "Enable coverage analysis. Note that because coverage works by annotating the source code before compilation, compilation and test failures with coverage enabled may report line numbers that don't correspond to the original sources."},
+ {KeyPath: "Go.CoverMode", Name: "covermode", UsageArgument: "set,count,atomic", SectionKey: "code-and-coverage-analysis",
+ Usage: `Set the mode for coverage analysis for the package[s] being tested. 'set': does this statement run? 'count': how many times does this statement run? 'atomic': like count, but correct in multithreaded tests and more expensive (must use atomic with -race). Sets -cover`},
+ {KeyPath: "Go.CoverPkg", Name: "coverpkg", UsageArgument: "pattern1,pattern2,pattern3", SectionKey: "code-and-coverage-analysis",
+ Usage: "Apply coverage analysis in each test to packages matching the patterns. The default is for each test to analyze only the package being tested. See 'go help packages' for a description of package patterns. Sets -cover."},
+
+ {KeyPath: "Go.A", Name: "a", SectionKey: "go-build",
+ Usage: "force rebuilding of packages that are already up-to-date."},
+ {KeyPath: "Go.ASMFlags", Name: "asmflags", UsageArgument: "'[pattern=]arg list'", SectionKey: "go-build",
+ Usage: "arguments to pass on each go tool asm invocation."},
+ {KeyPath: "Go.BuildMode", Name: "buildmode", UsageArgument: "mode", SectionKey: "go-build",
+ Usage: "build mode to use. See 'go help buildmode' for more."},
+ {KeyPath: "Go.Compiler", Name: "compiler", UsageArgument: "name", SectionKey: "go-build",
+ Usage: "name of compiler to use, as in runtime.Compiler (gccgo or gc)."},
+ {KeyPath: "Go.GCCGoFlags", Name: "gccgoflags", UsageArgument: "'[pattern=]arg list'", SectionKey: "go-build",
+ Usage: "arguments to pass on each gccgo compiler/linker invocation."},
+ {KeyPath: "Go.GCFlags", Name: "gcflags", UsageArgument: "'[pattern=]arg list'", SectionKey: "go-build",
+ Usage: "arguments to pass on each go tool compile invocation."},
+ {KeyPath: "Go.InstallSuffix", Name: "installsuffix", SectionKey: "go-build",
+ Usage: "a suffix to use in the name of the package installation directory, in order to keep output separate from default builds. If using the -race flag, the install suffix is automatically set to raceor, if set explicitly, has _race appended to it. Likewise for the -msan flag. Using a -buildmode option that requires non-default compile flags has a similar effect."},
+ {KeyPath: "Go.LDFlags", Name: "ldflags", UsageArgument: "'[pattern=]arg list'", SectionKey: "go-build",
+ Usage: "arguments to pass on each go tool link invocation."},
+ {KeyPath: "Go.LinkShared", Name: "linkshared", SectionKey: "go-build",
+ Usage: "build code that will be linked against shared libraries previously created with -buildmode=shared."},
+ {KeyPath: "Go.Mod", Name: "mod", UsageArgument: "mode (readonly, vendor, or mod)", SectionKey: "go-build",
+ Usage: "module download mode to use: readonly, vendor, or mod. See 'go help modules' for more."},
+ {KeyPath: "Go.ModCacheRW", Name: "modcacherw", SectionKey: "go-build",
+ Usage: "leave newly-created directories in the module cache read-write instead of making them read-only."},
+ {KeyPath: "Go.ModFile", Name: "modfile", UsageArgument: "file", SectionKey: "go-build",
+ Usage: `in module aware mode, read (and possibly write) an alternate go.mod file instead of the one in the module root directory. A file named go.mod must still be present in order to determine the module root directory, but it is not accessed. When -modfile is specified, an alternate go.sum file is also used: its path is derived from the -modfile flag by trimming the ".mod" extension and appending ".sum".`},
+ {KeyPath: "Go.MSan", Name: "msan", SectionKey: "go-build",
+ Usage: "enable interoperation with memory sanitizer. Supported only on linux/amd64, linux/arm64 and only with Clang/LLVM as the host C compiler. On linux/arm64, pie build mode will be used."},
+ {KeyPath: "Go.N", Name: "n", SectionKey: "go-build",
+ Usage: "print the commands but do not run them."},
+ {KeyPath: "Go.PkgDir", Name: "pkgdir", UsageArgument: "dir", SectionKey: "go-build",
+ Usage: "install and load all packages from dir instead of the usual locations. For example, when building with a non-standard configuration, use -pkgdir to keep generated packages in a separate location."},
+ {KeyPath: "Go.Tags", Name: "tags", UsageArgument: "tag,list", SectionKey: "go-build",
+ Usage: "a comma-separated list of build tags to consider satisfied during the build. For more information about build tags, see the description of build constraints in the documentation for the go/build package. (Earlier versions of Go used a space-separated list, and that form is deprecated but still recognized.)"},
+ {KeyPath: "Go.TrimPath", Name: "trimpath", SectionKey: "go-build",
+ Usage: `remove all file system paths from the resulting executable. Instead of absolute file system paths, the recorded file names will begin with either "go" (for the standard library), or a module path@version (when using modules), or a plain import path (when using GOPATH).`},
+ {KeyPath: "Go.ToolExec", Name: "toolexec", UsageArgument: "'cmd args'", SectionKey: "go-build",
+ Usage: "a program to use to invoke toolchain programs like vet and asm. For example, instead of running asm, the go command will run cmd args /path/to/asm '."},
+ {KeyPath: "Go.Work", Name: "work", SectionKey: "go-build",
+ Usage: "print the name of the temporary work directory and do not delete it when exiting."},
+ {KeyPath: "Go.X", Name: "x", SectionKey: "go-build",
+ Usage: "print the commands."},
+}
+
+// GoRunFlags provides flags for the Ginkgo CLI run, and watch commands that capture go's run-time flags. These are passed to the compiled test binary by the ginkgo CLI
+var GoRunFlags = GinkgoFlags{
+ {KeyPath: "Go.CoverProfile", Name: "coverprofile", UsageArgument: "file", SectionKey: "code-and-coverage-analysis",
+ Usage: `Write a coverage profile to the file after all tests have passed. Sets -cover.`},
+ {KeyPath: "Go.BlockProfile", Name: "blockprofile", UsageArgument: "file", SectionKey: "performance-analysis",
+ Usage: `Write a goroutine blocking profile to the specified file when all tests are complete. Preserves test binary.`},
+ {KeyPath: "Go.BlockProfileRate", Name: "blockprofilerate", UsageArgument: "rate", SectionKey: "performance-analysis",
+ Usage: `Control the detail provided in goroutine blocking profiles by calling runtime.SetBlockProfileRate with rate. See 'go doc runtime.SetBlockProfileRate'. The profiler aims to sample, on average, one blocking event every n nanoseconds the program spends blocked. By default, if -test.blockprofile is set without this flag, all blocking events are recorded, equivalent to -test.blockprofilerate=1.`},
+ {KeyPath: "Go.CPUProfile", Name: "cpuprofile", UsageArgument: "file", SectionKey: "performance-analysis",
+ Usage: `Write a CPU profile to the specified file before exiting. Preserves test binary.`},
+ {KeyPath: "Go.MemProfile", Name: "memprofile", UsageArgument: "file", SectionKey: "performance-analysis",
+ Usage: `Write an allocation profile to the file after all tests have passed. Preserves test binary.`},
+ {KeyPath: "Go.MemProfileRate", Name: "memprofilerate", UsageArgument: "rate", SectionKey: "performance-analysis",
+ Usage: `Enable more precise (and expensive) memory allocation profiles by setting runtime.MemProfileRate. See 'go doc runtime.MemProfileRate'. To profile all memory allocations, use -test.memprofilerate=1.`},
+ {KeyPath: "Go.MutexProfile", Name: "mutexprofile", UsageArgument: "file", SectionKey: "performance-analysis",
+ Usage: `Write a mutex contention profile to the specified file when all tests are complete. Preserves test binary.`},
+ {KeyPath: "Go.MutexProfileFraction", Name: "mutexprofilefraction", UsageArgument: "n", SectionKey: "performance-analysis",
+ Usage: `if >= 0, calls runtime.SetMutexProfileFraction() Sample 1 in n stack traces of goroutines holding a contended mutex.`},
+ {KeyPath: "Go.Trace", Name: "execution-trace", UsageArgument: "file", ExportAs: "trace", SectionKey: "performance-analysis",
+ Usage: `Write an execution trace to the specified file before exiting.`},
+}
+
+// VetAndInitializeCLIAndGoConfig validates that the Ginkgo CLI's configuration is sound
+// It returns a potentially mutated copy of the config that rationalizes the configuration to ensure consistency for downstream consumers
+func VetAndInitializeCLIAndGoConfig(cliConfig CLIConfig, goFlagsConfig GoFlagsConfig) (CLIConfig, GoFlagsConfig, []error) {
+ errors := []error{}
+
+ if cliConfig.Repeat > 0 && cliConfig.UntilItFails {
+ errors = append(errors, GinkgoErrors.BothRepeatAndUntilItFails())
+ }
+
+ //initialize the output directory
+ if cliConfig.OutputDir != "" {
+ err := os.MkdirAll(cliConfig.OutputDir, 0777)
+ if err != nil {
+ errors = append(errors, err)
+ }
+ }
+
+ //ensure cover mode is configured appropriately
+ if goFlagsConfig.CoverMode != "" || goFlagsConfig.CoverPkg != "" || goFlagsConfig.CoverProfile != "" {
+ goFlagsConfig.Cover = true
+ }
+ if goFlagsConfig.Cover && goFlagsConfig.CoverProfile == "" {
+ goFlagsConfig.CoverProfile = "coverprofile.out"
+ }
+
+ return cliConfig, goFlagsConfig, errors
+}
+
+// GenerateGoTestCompileArgs is used by the Ginkgo CLI to generate command line arguments to pass to the go test -c command when compiling the test
+func GenerateGoTestCompileArgs(goFlagsConfig GoFlagsConfig, destination string, packageToBuild string, pathToInvocationPath string) ([]string, error) {
+ // if the user has set the CoverProfile run-time flag make sure to set the build-time cover flag to make sure
+ // the built test binary can generate a coverprofile
+ if goFlagsConfig.CoverProfile != "" {
+ goFlagsConfig.Cover = true
+ }
+
+ if goFlagsConfig.CoverPkg != "" {
+ coverPkgs := strings.Split(goFlagsConfig.CoverPkg, ",")
+ adjustedCoverPkgs := make([]string, len(coverPkgs))
+ for i, coverPkg := range coverPkgs {
+ coverPkg = strings.Trim(coverPkg, " ")
+ if strings.HasPrefix(coverPkg, "./") {
+ // this is a relative coverPkg - we need to reroot it
+ adjustedCoverPkgs[i] = "./" + filepath.Join(pathToInvocationPath, strings.TrimPrefix(coverPkg, "./"))
+ } else {
+ // this is a package name - don't touch it
+ adjustedCoverPkgs[i] = coverPkg
+ }
+ }
+ goFlagsConfig.CoverPkg = strings.Join(adjustedCoverPkgs, ",")
+ }
+
+ args := []string{"test", "-c", "-o", destination, packageToBuild}
+ goArgs, err := GenerateFlagArgs(
+ GoBuildFlags,
+ map[string]interface{}{
+ "Go": &goFlagsConfig,
+ },
+ )
+
+ if err != nil {
+ return []string{}, err
+ }
+ args = append(args, goArgs...)
+ return args, nil
+}
+
+// GenerateGinkgoTestRunArgs is used by the Ginkgo CLI to generate command line arguments to pass to the compiled Ginkgo test binary
+func GenerateGinkgoTestRunArgs(suiteConfig SuiteConfig, reporterConfig ReporterConfig, goFlagsConfig GoFlagsConfig) ([]string, error) {
+ var flags GinkgoFlags
+ flags = SuiteConfigFlags.WithPrefix("ginkgo")
+ flags = flags.CopyAppend(ParallelConfigFlags.WithPrefix("ginkgo")...)
+ flags = flags.CopyAppend(ReporterConfigFlags.WithPrefix("ginkgo")...)
+ flags = flags.CopyAppend(GoRunFlags.WithPrefix("test")...)
+ bindings := map[string]interface{}{
+ "S": &suiteConfig,
+ "R": &reporterConfig,
+ "Go": &goFlagsConfig,
+ }
+
+ return GenerateFlagArgs(flags, bindings)
+}
+
+// GenerateGoTestRunArgs is used by the Ginkgo CLI to generate command line arguments to pass to the compiled non-Ginkgo test binary
+func GenerateGoTestRunArgs(goFlagsConfig GoFlagsConfig) ([]string, error) {
+ flags := GoRunFlags.WithPrefix("test")
+ bindings := map[string]interface{}{
+ "Go": &goFlagsConfig,
+ }
+
+ args, err := GenerateFlagArgs(flags, bindings)
+ if err != nil {
+ return args, err
+ }
+ args = append(args, "--test.v")
+ return args, nil
+}
+
+// BuildRunCommandFlagSet builds the FlagSet for the `ginkgo run` command
+func BuildRunCommandFlagSet(suiteConfig *SuiteConfig, reporterConfig *ReporterConfig, cliConfig *CLIConfig, goFlagsConfig *GoFlagsConfig) (GinkgoFlagSet, error) {
+ flags := SuiteConfigFlags
+ flags = flags.CopyAppend(ReporterConfigFlags...)
+ flags = flags.CopyAppend(GinkgoCLISharedFlags...)
+ flags = flags.CopyAppend(GinkgoCLIRunAndWatchFlags...)
+ flags = flags.CopyAppend(GinkgoCLIRunFlags...)
+ flags = flags.CopyAppend(GoBuildFlags...)
+ flags = flags.CopyAppend(GoRunFlags...)
+
+ bindings := map[string]interface{}{
+ "S": suiteConfig,
+ "R": reporterConfig,
+ "C": cliConfig,
+ "Go": goFlagsConfig,
+ "D": &deprecatedConfig{},
+ }
+
+ return NewGinkgoFlagSet(flags, bindings, FlagSections)
+}
+
+// BuildWatchCommandFlagSet builds the FlagSet for the `ginkgo watch` command
+func BuildWatchCommandFlagSet(suiteConfig *SuiteConfig, reporterConfig *ReporterConfig, cliConfig *CLIConfig, goFlagsConfig *GoFlagsConfig) (GinkgoFlagSet, error) {
+ flags := SuiteConfigFlags
+ flags = flags.CopyAppend(ReporterConfigFlags...)
+ flags = flags.CopyAppend(GinkgoCLISharedFlags...)
+ flags = flags.CopyAppend(GinkgoCLIRunAndWatchFlags...)
+ flags = flags.CopyAppend(GinkgoCLIWatchFlags...)
+ flags = flags.CopyAppend(GoBuildFlags...)
+ flags = flags.CopyAppend(GoRunFlags...)
+
+ bindings := map[string]interface{}{
+ "S": suiteConfig,
+ "R": reporterConfig,
+ "C": cliConfig,
+ "Go": goFlagsConfig,
+ "D": &deprecatedConfig{},
+ }
+
+ return NewGinkgoFlagSet(flags, bindings, FlagSections)
+}
+
+// BuildBuildCommandFlagSet builds the FlagSet for the `ginkgo build` command
+func BuildBuildCommandFlagSet(cliConfig *CLIConfig, goFlagsConfig *GoFlagsConfig) (GinkgoFlagSet, error) {
+ flags := GinkgoCLISharedFlags
+ flags = flags.CopyAppend(GoBuildFlags...)
+
+ bindings := map[string]interface{}{
+ "C": cliConfig,
+ "Go": goFlagsConfig,
+ "D": &deprecatedConfig{},
+ }
+
+ flagSections := make(GinkgoFlagSections, len(FlagSections))
+ copy(flagSections, FlagSections)
+ for i := range flagSections {
+ if flagSections[i].Key == "multiple-suites" {
+ flagSections[i].Heading = "Building Multiple Suites"
+ }
+ if flagSections[i].Key == "go-build" {
+ flagSections[i] = GinkgoFlagSection{Key: "go-build", Style: "{{/}}", Heading: "Go Build Flags",
+ Description: "These flags are inherited from go build."}
+ }
+ }
+
+ return NewGinkgoFlagSet(flags, bindings, flagSections)
+}
+
+func BuildLabelsCommandFlagSet(cliConfig *CLIConfig) (GinkgoFlagSet, error) {
+ flags := GinkgoCLISharedFlags.SubsetWithNames("r", "skip-package")
+
+ bindings := map[string]interface{}{
+ "C": cliConfig,
+ }
+
+ flagSections := make(GinkgoFlagSections, len(FlagSections))
+ copy(flagSections, FlagSections)
+ for i := range flagSections {
+ if flagSections[i].Key == "multiple-suites" {
+ flagSections[i].Heading = "Fetching Labels from Multiple Suites"
+ }
+ }
+
+ return NewGinkgoFlagSet(flags, bindings, flagSections)
+}
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/types/deprecated_types.go b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/types/deprecated_types.go
new file mode 100644
index 00000000000..17922304b63
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/types/deprecated_types.go
@@ -0,0 +1,141 @@
+package types
+
+import (
+ "strconv"
+ "time"
+)
+
+/*
+ A set of deprecations to make the transition from v1 to v2 easier for users who have written custom reporters.
+*/
+
+type SuiteSummary = DeprecatedSuiteSummary
+type SetupSummary = DeprecatedSetupSummary
+type SpecSummary = DeprecatedSpecSummary
+type SpecMeasurement = DeprecatedSpecMeasurement
+type SpecComponentType = NodeType
+type SpecFailure = DeprecatedSpecFailure
+
+var (
+ SpecComponentTypeInvalid = NodeTypeInvalid
+ SpecComponentTypeContainer = NodeTypeContainer
+ SpecComponentTypeIt = NodeTypeIt
+ SpecComponentTypeBeforeEach = NodeTypeBeforeEach
+ SpecComponentTypeJustBeforeEach = NodeTypeJustBeforeEach
+ SpecComponentTypeAfterEach = NodeTypeAfterEach
+ SpecComponentTypeJustAfterEach = NodeTypeJustAfterEach
+ SpecComponentTypeBeforeSuite = NodeTypeBeforeSuite
+ SpecComponentTypeSynchronizedBeforeSuite = NodeTypeSynchronizedBeforeSuite
+ SpecComponentTypeAfterSuite = NodeTypeAfterSuite
+ SpecComponentTypeSynchronizedAfterSuite = NodeTypeSynchronizedAfterSuite
+)
+
+type DeprecatedSuiteSummary struct {
+ SuiteDescription string
+ SuiteSucceeded bool
+ SuiteID string
+
+ NumberOfSpecsBeforeParallelization int
+ NumberOfTotalSpecs int
+ NumberOfSpecsThatWillBeRun int
+ NumberOfPendingSpecs int
+ NumberOfSkippedSpecs int
+ NumberOfPassedSpecs int
+ NumberOfFailedSpecs int
+ NumberOfFlakedSpecs int
+ RunTime time.Duration
+}
+
+type DeprecatedSetupSummary struct {
+ ComponentType SpecComponentType
+ CodeLocation CodeLocation
+
+ State SpecState
+ RunTime time.Duration
+ Failure SpecFailure
+
+ CapturedOutput string
+ SuiteID string
+}
+
+type DeprecatedSpecSummary struct {
+ ComponentTexts []string
+ ComponentCodeLocations []CodeLocation
+
+ State SpecState
+ RunTime time.Duration
+ Failure SpecFailure
+ IsMeasurement bool
+ NumberOfSamples int
+ Measurements map[string]*DeprecatedSpecMeasurement
+
+ CapturedOutput string
+ SuiteID string
+}
+
+func (s DeprecatedSpecSummary) HasFailureState() bool {
+ return s.State.Is(SpecStateFailureStates)
+}
+
+func (s DeprecatedSpecSummary) TimedOut() bool {
+ return false
+}
+
+func (s DeprecatedSpecSummary) Panicked() bool {
+ return s.State == SpecStatePanicked
+}
+
+func (s DeprecatedSpecSummary) Failed() bool {
+ return s.State == SpecStateFailed
+}
+
+func (s DeprecatedSpecSummary) Passed() bool {
+ return s.State == SpecStatePassed
+}
+
+func (s DeprecatedSpecSummary) Skipped() bool {
+ return s.State == SpecStateSkipped
+}
+
+func (s DeprecatedSpecSummary) Pending() bool {
+ return s.State == SpecStatePending
+}
+
+type DeprecatedSpecFailure struct {
+ Message string
+ Location CodeLocation
+ ForwardedPanic string
+
+ ComponentIndex int
+ ComponentType SpecComponentType
+ ComponentCodeLocation CodeLocation
+}
+
+type DeprecatedSpecMeasurement struct {
+ Name string
+ Info interface{}
+ Order int
+
+ Results []float64
+
+ Smallest float64
+ Largest float64
+ Average float64
+ StdDeviation float64
+
+ SmallestLabel string
+ LargestLabel string
+ AverageLabel string
+ Units string
+ Precision int
+}
+
+func (s DeprecatedSpecMeasurement) PrecisionFmt() string {
+ if s.Precision == 0 {
+ return "%f"
+ }
+
+ str := strconv.Itoa(s.Precision)
+
+ return "%." + str + "f"
+}
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/types/deprecation_support.go b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/types/deprecation_support.go
new file mode 100644
index 00000000000..e2519f673ff
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/types/deprecation_support.go
@@ -0,0 +1,177 @@
+package types
+
+import (
+ "os"
+ "strconv"
+ "strings"
+ "sync"
+ "unicode"
+
+ "github.com/onsi/ginkgo/v2/formatter"
+)
+
+type Deprecation struct {
+ Message string
+ DocLink string
+ Version string
+}
+
+type deprecations struct{}
+
+var Deprecations = deprecations{}
+
+func (d deprecations) CustomReporter() Deprecation {
+ return Deprecation{
+ Message: "Support for custom reporters has been removed in V2. Please read the documentation linked to below for Ginkgo's new behavior and for a migration path:",
+ DocLink: "removed-custom-reporters",
+ Version: "1.16.0",
+ }
+}
+
+func (d deprecations) Async() Deprecation {
+ return Deprecation{
+ Message: "You are passing a Done channel to a test node to test asynchronous behavior. This is deprecated in Ginkgo V2. Your test will run synchronously and the timeout will be ignored.",
+ DocLink: "removed-async-testing",
+ Version: "1.16.0",
+ }
+}
+
+func (d deprecations) Measure() Deprecation {
+ return Deprecation{
+ Message: "Measure is deprecated and has been removed from Ginkgo V2. Any Measure tests in your spec will not run. Please migrate to gomega/gmeasure.",
+ DocLink: "removed-measure",
+ Version: "1.16.3",
+ }
+}
+
+func (d deprecations) ParallelNode() Deprecation {
+ return Deprecation{
+ Message: "GinkgoParallelNode is deprecated and will be removed in Ginkgo V2. Please use GinkgoParallelProcess instead.",
+ DocLink: "renamed-ginkgoparallelnode",
+ Version: "1.16.4",
+ }
+}
+
+func (d deprecations) CurrentGinkgoTestDescription() Deprecation {
+ return Deprecation{
+ Message: "CurrentGinkgoTestDescription() is deprecated in Ginkgo V2. Use CurrentSpecReport() instead.",
+ DocLink: "changed-currentginkgotestdescription",
+ Version: "1.16.0",
+ }
+}
+
+func (d deprecations) Convert() Deprecation {
+ return Deprecation{
+ Message: "The convert command is deprecated in Ginkgo V2",
+ DocLink: "removed-ginkgo-convert",
+ Version: "1.16.0",
+ }
+}
+
+func (d deprecations) Blur() Deprecation {
+ return Deprecation{
+ Message: "The blur command is deprecated in Ginkgo V2. Use 'ginkgo unfocus' instead.",
+ Version: "1.16.0",
+ }
+}
+
+func (d deprecations) Nodot() Deprecation {
+ return Deprecation{
+ Message: "The nodot command is deprecated in Ginkgo V2. Please either dot-import Ginkgo or use the package identifier in your code to references objects and types provided by Ginkgo and Gomega.",
+ DocLink: "removed-ginkgo-nodot",
+ Version: "1.16.0",
+ }
+}
+
+func (d deprecations) SuppressProgressReporting() Deprecation {
+ return Deprecation{
+ Message: "Improvements to how reporters emit timeline information means that SuppressProgressReporting is no longer necessary and has been deprecated.",
+ Version: "2.5.0",
+ }
+}
+
+type DeprecationTracker struct {
+ deprecations map[Deprecation][]CodeLocation
+ lock *sync.Mutex
+}
+
+func NewDeprecationTracker() *DeprecationTracker {
+ return &DeprecationTracker{
+ deprecations: map[Deprecation][]CodeLocation{},
+ lock: &sync.Mutex{},
+ }
+}
+
+func (d *DeprecationTracker) TrackDeprecation(deprecation Deprecation, cl ...CodeLocation) {
+ ackVersion := os.Getenv("ACK_GINKGO_DEPRECATIONS")
+ if deprecation.Version != "" && ackVersion != "" {
+ ack := ParseSemVer(ackVersion)
+ version := ParseSemVer(deprecation.Version)
+ if ack.GreaterThanOrEqualTo(version) {
+ return
+ }
+ }
+
+ d.lock.Lock()
+ defer d.lock.Unlock()
+ if len(cl) == 1 {
+ d.deprecations[deprecation] = append(d.deprecations[deprecation], cl[0])
+ } else {
+ d.deprecations[deprecation] = []CodeLocation{}
+ }
+}
+
+func (d *DeprecationTracker) DidTrackDeprecations() bool {
+ d.lock.Lock()
+ defer d.lock.Unlock()
+ return len(d.deprecations) > 0
+}
+
+func (d *DeprecationTracker) DeprecationsReport() string {
+ d.lock.Lock()
+ defer d.lock.Unlock()
+ out := formatter.F("{{light-yellow}}You're using deprecated Ginkgo functionality:{{/}}\n")
+ out += formatter.F("{{light-yellow}}============================================={{/}}\n")
+ for deprecation, locations := range d.deprecations {
+ out += formatter.Fi(1, "{{yellow}}"+deprecation.Message+"{{/}}\n")
+ if deprecation.DocLink != "" {
+ out += formatter.Fi(1, "{{bold}}Learn more at:{{/}} {{cyan}}{{underline}}https://onsi.github.io/ginkgo/MIGRATING_TO_V2#%s{{/}}\n", deprecation.DocLink)
+ }
+ for _, location := range locations {
+ out += formatter.Fi(2, "{{gray}}%s{{/}}\n", location)
+ }
+ }
+ out += formatter.F("\n{{gray}}To silence deprecations that can be silenced set the following environment variable:{{/}}\n")
+ out += formatter.Fi(1, "{{gray}}ACK_GINKGO_DEPRECATIONS=%s{{/}}\n", VERSION)
+ return out
+}
+
+type SemVer struct {
+ Major int
+ Minor int
+ Patch int
+}
+
+func (s SemVer) GreaterThanOrEqualTo(o SemVer) bool {
+ return (s.Major > o.Major) ||
+ (s.Major == o.Major && s.Minor > o.Minor) ||
+ (s.Major == o.Major && s.Minor == o.Minor && s.Patch >= o.Patch)
+}
+
+func ParseSemVer(semver string) SemVer {
+ out := SemVer{}
+ semver = strings.TrimFunc(semver, func(r rune) bool {
+ return !(unicode.IsNumber(r) || r == '.')
+ })
+ components := strings.Split(semver, ".")
+ if len(components) > 0 {
+ out.Major, _ = strconv.Atoi(components[0])
+ }
+ if len(components) > 1 {
+ out.Minor, _ = strconv.Atoi(components[1])
+ }
+ if len(components) > 2 {
+ out.Patch, _ = strconv.Atoi(components[2])
+ }
+ return out
+}
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/types/enum_support.go b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/types/enum_support.go
new file mode 100644
index 00000000000..1d96ae02800
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/types/enum_support.go
@@ -0,0 +1,43 @@
+package types
+
+import "encoding/json"
+
+type EnumSupport struct {
+ toString map[uint]string
+ toEnum map[string]uint
+ maxEnum uint
+}
+
+func NewEnumSupport(toString map[uint]string) EnumSupport {
+ toEnum, maxEnum := map[string]uint{}, uint(0)
+ for k, v := range toString {
+ toEnum[v] = k
+ if maxEnum < k {
+ maxEnum = k
+ }
+ }
+ return EnumSupport{toString: toString, toEnum: toEnum, maxEnum: maxEnum}
+}
+
+func (es EnumSupport) String(e uint) string {
+ if e > es.maxEnum {
+ return es.toString[0]
+ }
+ return es.toString[e]
+}
+
+func (es EnumSupport) UnmarshJSON(b []byte) (uint, error) {
+ var dec string
+ if err := json.Unmarshal(b, &dec); err != nil {
+ return 0, err
+ }
+ out := es.toEnum[dec] // if we miss we get 0 which is what we want anyway
+ return out, nil
+}
+
+func (es EnumSupport) MarshJSON(e uint) ([]byte, error) {
+ if e == 0 || e > es.maxEnum {
+ return json.Marshal(nil)
+ }
+ return json.Marshal(es.toString[e])
+}
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/types/errors.go b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/types/errors.go
new file mode 100644
index 00000000000..6bb72d00ccd
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/types/errors.go
@@ -0,0 +1,639 @@
+package types
+
+import (
+ "fmt"
+ "reflect"
+ "strings"
+
+ "github.com/onsi/ginkgo/v2/formatter"
+)
+
+type GinkgoError struct {
+ Heading string
+ Message string
+ DocLink string
+ CodeLocation CodeLocation
+}
+
+func (g GinkgoError) Error() string {
+ out := formatter.F("{{bold}}{{red}}%s{{/}}\n", g.Heading)
+ if (g.CodeLocation != CodeLocation{}) {
+ contentsOfLine := strings.TrimLeft(g.CodeLocation.ContentsOfLine(), "\t ")
+ if contentsOfLine != "" {
+ out += formatter.F("{{light-gray}}%s{{/}}\n", contentsOfLine)
+ }
+ out += formatter.F("{{gray}}%s{{/}}\n", g.CodeLocation)
+ }
+ if g.Message != "" {
+ out += formatter.Fiw(1, formatter.COLS, g.Message)
+ out += "\n\n"
+ }
+ if g.DocLink != "" {
+ out += formatter.Fiw(1, formatter.COLS, "{{bold}}Learn more at:{{/}} {{cyan}}{{underline}}http://onsi.github.io/ginkgo/#%s{{/}}\n", g.DocLink)
+ }
+
+ return out
+}
+
+type ginkgoErrors struct{}
+
+var GinkgoErrors = ginkgoErrors{}
+
+func (g ginkgoErrors) UncaughtGinkgoPanic(cl CodeLocation) error {
+ return GinkgoError{
+ Heading: "Your Test Panicked",
+ Message: `When you, or your assertion library, calls Ginkgo's Fail(),
+Ginkgo panics to prevent subsequent assertions from running.
+
+Normally Ginkgo rescues this panic so you shouldn't see it.
+
+However, if you make an assertion in a goroutine, Ginkgo can't capture the panic.
+To circumvent this, you should call
+
+ defer GinkgoRecover()
+
+at the top of the goroutine that caused this panic.
+
+Alternatively, you may have made an assertion outside of a Ginkgo
+leaf node (e.g. in a container node or some out-of-band function) - please move your assertion to
+an appropriate Ginkgo node (e.g. a BeforeSuite, BeforeEach, It, etc...).`,
+ DocLink: "mental-model-how-ginkgo-handles-failure",
+ CodeLocation: cl,
+ }
+}
+
+func (g ginkgoErrors) RerunningSuite() error {
+ return GinkgoError{
+ Heading: "Rerunning Suite",
+ Message: formatter.F(`It looks like you are calling RunSpecs more than once. Ginkgo does not support rerunning suites. If you want to rerun a suite try {{bold}}ginkgo --repeat=N{{/}} or {{bold}}ginkgo --until-it-fails{{/}}`),
+ DocLink: "repeating-spec-runs-and-managing-flaky-specs",
+ }
+}
+
+/* Tree construction errors */
+
+func (g ginkgoErrors) PushingNodeInRunPhase(nodeType NodeType, cl CodeLocation) error {
+ return GinkgoError{
+ Heading: "Ginkgo detected an issue with your spec structure",
+ Message: formatter.F(
+ `It looks like you are trying to add a {{bold}}[%s]{{/}} node
+to the Ginkgo spec tree in a leaf node {{bold}}after{{/}} the specs started running.
+
+To enable randomization and parallelization Ginkgo requires the spec tree
+to be fully constructed up front. In practice, this means that you can
+only create nodes like {{bold}}[%s]{{/}} at the top-level or within the
+body of a {{bold}}Describe{{/}}, {{bold}}Context{{/}}, or {{bold}}When{{/}}.`, nodeType, nodeType),
+ CodeLocation: cl,
+ DocLink: "mental-model-how-ginkgo-traverses-the-spec-hierarchy",
+ }
+}
+
+func (g ginkgoErrors) CaughtPanicDuringABuildPhase(caughtPanic interface{}, cl CodeLocation) error {
+ return GinkgoError{
+ Heading: "Assertion or Panic detected during tree construction",
+ Message: formatter.F(
+ `Ginkgo detected a panic while constructing the spec tree.
+You may be trying to make an assertion in the body of a container node
+(i.e. {{bold}}Describe{{/}}, {{bold}}Context{{/}}, or {{bold}}When{{/}}).
+
+Please ensure all assertions are inside leaf nodes such as {{bold}}BeforeEach{{/}},
+{{bold}}It{{/}}, etc.
+
+{{bold}}Here's the content of the panic that was caught:{{/}}
+%v`, caughtPanic),
+ CodeLocation: cl,
+ DocLink: "no-assertions-in-container-nodes",
+ }
+}
+
+func (g ginkgoErrors) SuiteNodeInNestedContext(nodeType NodeType, cl CodeLocation) error {
+ docLink := "suite-setup-and-cleanup-beforesuite-and-aftersuite"
+ if nodeType.Is(NodeTypeReportBeforeSuite | NodeTypeReportAfterSuite) {
+ docLink = "reporting-nodes---reportbeforesuite-and-reportaftersuite"
+ }
+
+ return GinkgoError{
+ Heading: "Ginkgo detected an issue with your spec structure",
+ Message: formatter.F(
+ `It looks like you are trying to add a {{bold}}[%s]{{/}} node within a container node.
+
+{{bold}}%s{{/}} can only be called at the top level.`, nodeType, nodeType),
+ CodeLocation: cl,
+ DocLink: docLink,
+ }
+}
+
+func (g ginkgoErrors) SuiteNodeDuringRunPhase(nodeType NodeType, cl CodeLocation) error {
+ docLink := "suite-setup-and-cleanup-beforesuite-and-aftersuite"
+ if nodeType.Is(NodeTypeReportBeforeSuite | NodeTypeReportAfterSuite) {
+ docLink = "reporting-nodes---reportbeforesuite-and-reportaftersuite"
+ }
+
+ return GinkgoError{
+ Heading: "Ginkgo detected an issue with your spec structure",
+ Message: formatter.F(
+ `It looks like you are trying to add a {{bold}}[%s]{{/}} node within a leaf node after the spec started running.
+
+{{bold}}%s{{/}} can only be called at the top level.`, nodeType, nodeType),
+ CodeLocation: cl,
+ DocLink: docLink,
+ }
+}
+
+func (g ginkgoErrors) MultipleBeforeSuiteNodes(nodeType NodeType, cl CodeLocation, earlierNodeType NodeType, earlierCodeLocation CodeLocation) error {
+ return ginkgoErrorMultipleSuiteNodes("setup", nodeType, cl, earlierNodeType, earlierCodeLocation)
+}
+
+func (g ginkgoErrors) MultipleAfterSuiteNodes(nodeType NodeType, cl CodeLocation, earlierNodeType NodeType, earlierCodeLocation CodeLocation) error {
+ return ginkgoErrorMultipleSuiteNodes("teardown", nodeType, cl, earlierNodeType, earlierCodeLocation)
+}
+
+func ginkgoErrorMultipleSuiteNodes(setupOrTeardown string, nodeType NodeType, cl CodeLocation, earlierNodeType NodeType, earlierCodeLocation CodeLocation) error {
+ return GinkgoError{
+ Heading: "Ginkgo detected an issue with your spec structure",
+ Message: formatter.F(
+ `It looks like you are trying to add a {{bold}}[%s]{{/}} node but
+you already have a {{bold}}[%s]{{/}} node defined at: {{gray}}%s{{/}}.
+
+Ginkgo only allows you to define one suite %s node.`, nodeType, earlierNodeType, earlierCodeLocation, setupOrTeardown),
+ CodeLocation: cl,
+ DocLink: "suite-setup-and-cleanup-beforesuite-and-aftersuite",
+ }
+}
+
+/* Decorator errors */
+func (g ginkgoErrors) InvalidDecoratorForNodeType(cl CodeLocation, nodeType NodeType, decorator string) error {
+ return GinkgoError{
+ Heading: "Invalid Decorator",
+ Message: formatter.F(`[%s] node cannot be passed a(n) '%s' decorator`, nodeType, decorator),
+ CodeLocation: cl,
+ DocLink: "node-decorators-overview",
+ }
+}
+
+func (g ginkgoErrors) InvalidDeclarationOfFocusedAndPending(cl CodeLocation, nodeType NodeType) error {
+ return GinkgoError{
+ Heading: "Invalid Combination of Decorators: Focused and Pending",
+ Message: formatter.F(`[%s] node was decorated with both Focus and Pending. At most one is allowed.`, nodeType),
+ CodeLocation: cl,
+ DocLink: "node-decorators-overview",
+ }
+}
+
+func (g ginkgoErrors) InvalidDeclarationOfFlakeAttemptsAndMustPassRepeatedly(cl CodeLocation, nodeType NodeType) error {
+ return GinkgoError{
+ Heading: "Invalid Combination of Decorators: FlakeAttempts and MustPassRepeatedly",
+ Message: formatter.F(`[%s] node was decorated with both FlakeAttempts and MustPassRepeatedly. At most one is allowed.`, nodeType),
+ CodeLocation: cl,
+ DocLink: "node-decorators-overview",
+ }
+}
+
+func (g ginkgoErrors) UnknownDecorator(cl CodeLocation, nodeType NodeType, decorator interface{}) error {
+ return GinkgoError{
+ Heading: "Unknown Decorator",
+ Message: formatter.F(`[%s] node was passed an unknown decorator: '%#v'`, nodeType, decorator),
+ CodeLocation: cl,
+ DocLink: "node-decorators-overview",
+ }
+}
+
+func (g ginkgoErrors) InvalidBodyTypeForContainer(t reflect.Type, cl CodeLocation, nodeType NodeType) error {
+ return GinkgoError{
+ Heading: "Invalid Function",
+ Message: formatter.F(`[%s] node must be passed {{bold}}func(){{/}} - i.e. functions that take nothing and return nothing. You passed {{bold}}%s{{/}} instead.`, nodeType, t),
+ CodeLocation: cl,
+ DocLink: "node-decorators-overview",
+ }
+}
+
+func (g ginkgoErrors) InvalidBodyType(t reflect.Type, cl CodeLocation, nodeType NodeType) error {
+ mustGet := "{{bold}}func(){{/}}, {{bold}}func(ctx SpecContext){{/}}, or {{bold}}func(ctx context.Context){{/}}"
+ if nodeType.Is(NodeTypeContainer) {
+ mustGet = "{{bold}}func(){{/}}"
+ }
+ return GinkgoError{
+ Heading: "Invalid Function",
+ Message: formatter.F(`[%s] node must be passed `+mustGet+`.
+You passed {{bold}}%s{{/}} instead.`, nodeType, t),
+ CodeLocation: cl,
+ DocLink: "node-decorators-overview",
+ }
+}
+
+func (g ginkgoErrors) InvalidBodyTypeForSynchronizedBeforeSuiteProc1(t reflect.Type, cl CodeLocation) error {
+ mustGet := "{{bold}}func() []byte{{/}}, {{bold}}func(ctx SpecContext) []byte{{/}}, or {{bold}}func(ctx context.Context) []byte{{/}}, {{bold}}func(){{/}}, {{bold}}func(ctx SpecContext){{/}}, or {{bold}}func(ctx context.Context){{/}}"
+ return GinkgoError{
+ Heading: "Invalid Function",
+ Message: formatter.F(`[SynchronizedBeforeSuite] node must be passed `+mustGet+` for its first function.
+You passed {{bold}}%s{{/}} instead.`, t),
+ CodeLocation: cl,
+ DocLink: "node-decorators-overview",
+ }
+}
+
+func (g ginkgoErrors) InvalidBodyTypeForSynchronizedBeforeSuiteAllProcs(t reflect.Type, cl CodeLocation) error {
+ mustGet := "{{bold}}func(){{/}}, {{bold}}func(ctx SpecContext){{/}}, or {{bold}}func(ctx context.Context){{/}}, {{bold}}func([]byte){{/}}, {{bold}}func(ctx SpecContext, []byte){{/}}, or {{bold}}func(ctx context.Context, []byte){{/}}"
+ return GinkgoError{
+ Heading: "Invalid Function",
+ Message: formatter.F(`[SynchronizedBeforeSuite] node must be passed `+mustGet+` for its second function.
+You passed {{bold}}%s{{/}} instead.`, t),
+ CodeLocation: cl,
+ DocLink: "node-decorators-overview",
+ }
+}
+
+func (g ginkgoErrors) MultipleBodyFunctions(cl CodeLocation, nodeType NodeType) error {
+ return GinkgoError{
+ Heading: "Multiple Functions",
+ Message: formatter.F(`[%s] node must be passed a single function - but more than one was passed in.`, nodeType),
+ CodeLocation: cl,
+ DocLink: "node-decorators-overview",
+ }
+}
+
+func (g ginkgoErrors) MissingBodyFunction(cl CodeLocation, nodeType NodeType) error {
+ return GinkgoError{
+ Heading: "Missing Functions",
+ Message: formatter.F(`[%s] node must be passed a single function - but none was passed in.`, nodeType),
+ CodeLocation: cl,
+ DocLink: "node-decorators-overview",
+ }
+}
+
+func (g ginkgoErrors) InvalidTimeoutOrGracePeriodForNonContextNode(cl CodeLocation, nodeType NodeType) error {
+ return GinkgoError{
+ Heading: "Invalid NodeTimeout SpecTimeout, or GracePeriod",
+ Message: formatter.F(`[%s] was passed NodeTimeout, SpecTimeout, or GracePeriod but does not have a callback that accepts a {{bold}}SpecContext{{/}} or {{bold}}context.Context{{/}}. You must accept a context to enable timeouts and grace periods`, nodeType),
+ CodeLocation: cl,
+ DocLink: "spec-timeouts-and-interruptible-nodes",
+ }
+}
+
+func (g ginkgoErrors) InvalidTimeoutOrGracePeriodForNonContextCleanupNode(cl CodeLocation) error {
+ return GinkgoError{
+ Heading: "Invalid NodeTimeout SpecTimeout, or GracePeriod",
+ Message: formatter.F(`[DeferCleanup] was passed NodeTimeout or GracePeriod but does not have a callback that accepts a {{bold}}SpecContext{{/}} or {{bold}}context.Context{{/}}. You must accept a context to enable timeouts and grace periods`),
+ CodeLocation: cl,
+ DocLink: "spec-timeouts-and-interruptible-nodes",
+ }
+}
+
+/* Ordered Container errors */
+func (g ginkgoErrors) InvalidSerialNodeInNonSerialOrderedContainer(cl CodeLocation, nodeType NodeType) error {
+ return GinkgoError{
+ Heading: "Invalid Serial Node in Non-Serial Ordered Container",
+ Message: formatter.F(`[%s] node was decorated with Serial but occurs in an Ordered container that is not marked Serial. Move the Serial decorator to the outer-most Ordered container to mark all ordered specs within the container as serial.`, nodeType),
+ CodeLocation: cl,
+ DocLink: "node-decorators-overview",
+ }
+}
+
+func (g ginkgoErrors) SetupNodeNotInOrderedContainer(cl CodeLocation, nodeType NodeType) error {
+ return GinkgoError{
+ Heading: "Setup Node not in Ordered Container",
+ Message: fmt.Sprintf("[%s] setup nodes must appear inside an Ordered container. They cannot be nested within other containers, even containers in an ordered container.", nodeType),
+ CodeLocation: cl,
+ DocLink: "ordered-containers",
+ }
+}
+
+func (g ginkgoErrors) InvalidContinueOnFailureDecoration(cl CodeLocation) error {
+ return GinkgoError{
+ Heading: "ContinueOnFailure not decorating an outermost Ordered Container",
+ Message: "ContinueOnFailure can only decorate an Ordered container, and this Ordered container must be the outermost Ordered container.",
+ CodeLocation: cl,
+ DocLink: "ordered-containers",
+ }
+}
+
+/* DeferCleanup errors */
+func (g ginkgoErrors) DeferCleanupInvalidFunction(cl CodeLocation) error {
+ return GinkgoError{
+ Heading: "DeferCleanup requires a valid function",
+ Message: "You must pass DeferCleanup a function to invoke. This function must return zero or one values - if it does return, it must return an error. The function can take arbitrarily many arguments and you should provide these to DeferCleanup to pass along to the function.",
+ CodeLocation: cl,
+ DocLink: "cleaning-up-our-cleanup-code-defercleanup",
+ }
+}
+
+func (g ginkgoErrors) PushingCleanupNodeDuringTreeConstruction(cl CodeLocation) error {
+ return GinkgoError{
+ Heading: "DeferCleanup must be called inside a setup or subject node",
+ Message: "You must call DeferCleanup inside a setup node (e.g. BeforeEach, BeforeSuite, AfterAll...) or a subject node (i.e. It). You can't call DeferCleanup at the top-level or in a container node - use the After* family of setup nodes instead.",
+ CodeLocation: cl,
+ DocLink: "cleaning-up-our-cleanup-code-defercleanup",
+ }
+}
+
+func (g ginkgoErrors) PushingCleanupInReportingNode(cl CodeLocation, nodeType NodeType) error {
+ return GinkgoError{
+ Heading: fmt.Sprintf("DeferCleanup cannot be called in %s", nodeType),
+ Message: "Please inline your cleanup code - Ginkgo won't run cleanup code after a Reporting node.",
+ CodeLocation: cl,
+ DocLink: "cleaning-up-our-cleanup-code-defercleanup",
+ }
+}
+
+func (g ginkgoErrors) PushingCleanupInCleanupNode(cl CodeLocation) error {
+ return GinkgoError{
+ Heading: "DeferCleanup cannot be called in a DeferCleanup callback",
+ Message: "Please inline your cleanup code - Ginkgo doesn't let you call DeferCleanup from within DeferCleanup",
+ CodeLocation: cl,
+ DocLink: "cleaning-up-our-cleanup-code-defercleanup",
+ }
+}
+
+/* ReportEntry errors */
+func (g ginkgoErrors) TooManyReportEntryValues(cl CodeLocation, arg interface{}) error {
+ return GinkgoError{
+ Heading: "Too Many ReportEntry Values",
+ Message: formatter.F(`{{bold}}AddGinkgoReport{{/}} can only be given one value. Got unexpected value: %#v`, arg),
+ CodeLocation: cl,
+ DocLink: "attaching-data-to-reports",
+ }
+}
+
+func (g ginkgoErrors) AddReportEntryNotDuringRunPhase(cl CodeLocation) error {
+ return GinkgoError{
+ Heading: "Ginkgo detected an issue with your spec structure",
+ Message: formatter.F(`It looks like you are calling {{bold}}AddGinkgoReport{{/}} outside of a running spec. Make sure you call {{bold}}AddGinkgoReport{{/}} inside a runnable node such as It or BeforeEach and not inside the body of a container such as Describe or Context.`),
+ CodeLocation: cl,
+ DocLink: "attaching-data-to-reports",
+ }
+}
+
+/* By errors */
+func (g ginkgoErrors) ByNotDuringRunPhase(cl CodeLocation) error {
+ return GinkgoError{
+ Heading: "Ginkgo detected an issue with your spec structure",
+ Message: formatter.F(`It looks like you are calling {{bold}}By{{/}} outside of a running spec. Make sure you call {{bold}}By{{/}} inside a runnable node such as It or BeforeEach and not inside the body of a container such as Describe or Context.`),
+ CodeLocation: cl,
+ DocLink: "documenting-complex-specs-by",
+ }
+}
+
+/* FileFilter and SkipFilter errors */
+func (g ginkgoErrors) InvalidFileFilter(filter string) error {
+ return GinkgoError{
+ Heading: "Invalid File Filter",
+ Message: fmt.Sprintf(`The provided file filter: "%s" is invalid. File filters must have the format "file", "file:lines" where "file" is a regular expression that will match against the file path and lines is a comma-separated list of integers (e.g. file:1,5,7) or line-ranges (e.g. file:1-3,5-9) or both (e.g. file:1,5-9)`, filter),
+ DocLink: "filtering-specs",
+ }
+}
+
+func (g ginkgoErrors) InvalidFileFilterRegularExpression(filter string, err error) error {
+ return GinkgoError{
+ Heading: "Invalid File Filter Regular Expression",
+ Message: fmt.Sprintf(`The provided file filter: "%s" included an invalid regular expression. regexp.Compile error: %s`, filter, err),
+ DocLink: "filtering-specs",
+ }
+}
+
+/* Label Errors */
+func (g ginkgoErrors) SyntaxErrorParsingLabelFilter(input string, location int, error string) error {
+ var message string
+ if location >= 0 {
+ for i, r := range input {
+ if i == location {
+ message += "{{red}}{{bold}}{{underline}}"
+ }
+ message += string(r)
+ if i == location {
+ message += "{{/}}"
+ }
+ }
+ } else {
+ message = input
+ }
+ message += "\n" + error
+ return GinkgoError{
+ Heading: "Syntax Error Parsing Label Filter",
+ Message: message,
+ DocLink: "spec-labels",
+ }
+}
+
+func (g ginkgoErrors) InvalidLabel(label string, cl CodeLocation) error {
+ return GinkgoError{
+ Heading: "Invalid Label",
+ Message: fmt.Sprintf("'%s' is an invalid label. Labels cannot contain of the following characters: '&|!,()/'", label),
+ CodeLocation: cl,
+ DocLink: "spec-labels",
+ }
+}
+
+func (g ginkgoErrors) InvalidEmptyLabel(cl CodeLocation) error {
+ return GinkgoError{
+ Heading: "Invalid Empty Label",
+ Message: "Labels cannot be empty",
+ CodeLocation: cl,
+ DocLink: "spec-labels",
+ }
+}
+
+/* Table errors */
+func (g ginkgoErrors) MultipleEntryBodyFunctionsForTable(cl CodeLocation) error {
+ return GinkgoError{
+ Heading: "DescribeTable passed multiple functions",
+ Message: "It looks like you are passing multiple functions into DescribeTable. Only one function can be passed in. This function will be called for each Entry in the table.",
+ CodeLocation: cl,
+ DocLink: "table-specs",
+ }
+}
+
+func (g ginkgoErrors) InvalidEntryDescription(cl CodeLocation) error {
+ return GinkgoError{
+ Heading: "Invalid Entry description",
+ Message: "Entry description functions must be a string, a function that accepts the entry parameters and returns a string, or nil.",
+ CodeLocation: cl,
+ DocLink: "table-specs",
+ }
+}
+
+func (g ginkgoErrors) MissingParametersForTableFunction(cl CodeLocation) error {
+ return GinkgoError{
+ Heading: "No parameters have been passed to the Table Function",
+ Message: "The Table Function expected at least 1 parameter",
+ CodeLocation: cl,
+ DocLink: "table-specs",
+ }
+}
+
+func (g ginkgoErrors) IncorrectParameterTypeForTable(i int, name string, cl CodeLocation) error {
+ return GinkgoError{
+ Heading: "DescribeTable passed incorrect parameter type",
+ Message: fmt.Sprintf("Parameter #%d passed to DescribeTable is of incorrect type <%s>", i, name),
+ CodeLocation: cl,
+ DocLink: "table-specs",
+ }
+}
+
+func (g ginkgoErrors) TooFewParametersToTableFunction(expected, actual int, kind string, cl CodeLocation) error {
+ return GinkgoError{
+ Heading: fmt.Sprintf("Too few parameters passed in to %s", kind),
+ Message: fmt.Sprintf("The %s expected %d parameters but you passed in %d", kind, expected, actual),
+ CodeLocation: cl,
+ DocLink: "table-specs",
+ }
+}
+
+func (g ginkgoErrors) TooManyParametersToTableFunction(expected, actual int, kind string, cl CodeLocation) error {
+ return GinkgoError{
+ Heading: fmt.Sprintf("Too many parameters passed in to %s", kind),
+ Message: fmt.Sprintf("The %s expected %d parameters but you passed in %d", kind, expected, actual),
+ CodeLocation: cl,
+ DocLink: "table-specs",
+ }
+}
+
+func (g ginkgoErrors) IncorrectParameterTypeToTableFunction(i int, expected, actual reflect.Type, kind string, cl CodeLocation) error {
+ return GinkgoError{
+ Heading: fmt.Sprintf("Incorrect parameters type passed to %s", kind),
+ Message: fmt.Sprintf("The %s expected parameter #%d to be of type <%s> but you passed in <%s>", kind, i, expected, actual),
+ CodeLocation: cl,
+ DocLink: "table-specs",
+ }
+}
+
+func (g ginkgoErrors) IncorrectVariadicParameterTypeToTableFunction(expected, actual reflect.Type, kind string, cl CodeLocation) error {
+ return GinkgoError{
+ Heading: fmt.Sprintf("Incorrect parameters type passed to %s", kind),
+ Message: fmt.Sprintf("The %s expected its variadic parameters to be of type <%s> but you passed in <%s>", kind, expected, actual),
+ CodeLocation: cl,
+ DocLink: "table-specs",
+ }
+}
+
+func (g ginkgoErrors) ContextsCannotBeUsedInSubtreeTables(cl CodeLocation) error {
+ return GinkgoError{
+ Heading: "Contexts cannot be used in subtree tables",
+ Message: "You''ve defined a subtree body function that accepts a context but did not provide one in the table entry. Ginkgo SpecContexts can only be passed in to subject and setup nodes - so if you are trying to implement a spec timeout you should request a context in the It function within your subtree body function, not in the subtree body function itself.",
+ CodeLocation: cl,
+ DocLink: "table-specs",
+ }
+}
+
+/* Parallel Synchronization errors */
+
+func (g ginkgoErrors) AggregatedReportUnavailableDueToNodeDisappearing() error {
+ return GinkgoError{
+ Heading: "Test Report unavailable because a Ginkgo parallel process disappeared",
+ Message: "The aggregated report could not be fetched for a ReportAfterSuite node. A Ginkgo parallel process disappeared before it could finish reporting.",
+ }
+}
+
+func (g ginkgoErrors) SynchronizedBeforeSuiteFailedOnProc1() error {
+ return GinkgoError{
+ Heading: "SynchronizedBeforeSuite failed on Ginkgo parallel process #1",
+ Message: "The first SynchronizedBeforeSuite function running on Ginkgo parallel process #1 failed. This suite will now abort.",
+ }
+}
+
+func (g ginkgoErrors) SynchronizedBeforeSuiteDisappearedOnProc1() error {
+ return GinkgoError{
+ Heading: "Process #1 disappeared before SynchronizedBeforeSuite could report back",
+ Message: "Ginkgo parallel process #1 disappeared before the first SynchronizedBeforeSuite function completed. This suite will now abort.",
+ }
+}
+
+/* Configuration errors */
+
+func (g ginkgoErrors) UnknownTypePassedToRunSpecs(value interface{}) error {
+ return GinkgoError{
+ Heading: "Unknown Type passed to RunSpecs",
+ Message: fmt.Sprintf("RunSpecs() accepts labels, and configuration of type types.SuiteConfig and/or types.ReporterConfig.\n You passed in: %v", value),
+ }
+}
+
+var sharedParallelErrorMessage = "It looks like you are trying to run specs in parallel with go test.\nThis is unsupported and you should use the ginkgo CLI instead."
+
+func (g ginkgoErrors) InvalidParallelTotalConfiguration() error {
+ return GinkgoError{
+ Heading: "-ginkgo.parallel.total must be >= 1",
+ Message: sharedParallelErrorMessage,
+ DocLink: "spec-parallelization",
+ }
+}
+
+func (g ginkgoErrors) InvalidParallelProcessConfiguration() error {
+ return GinkgoError{
+ Heading: "-ginkgo.parallel.process is one-indexed and must be <= ginkgo.parallel.total",
+ Message: sharedParallelErrorMessage,
+ DocLink: "spec-parallelization",
+ }
+}
+
+func (g ginkgoErrors) MissingParallelHostConfiguration() error {
+ return GinkgoError{
+ Heading: "-ginkgo.parallel.host is missing",
+ Message: sharedParallelErrorMessage,
+ DocLink: "spec-parallelization",
+ }
+}
+
+func (g ginkgoErrors) UnreachableParallelHost(host string) error {
+ return GinkgoError{
+ Heading: "Could not reach ginkgo.parallel.host:" + host,
+ Message: sharedParallelErrorMessage,
+ DocLink: "spec-parallelization",
+ }
+}
+
+func (g ginkgoErrors) DryRunInParallelConfiguration() error {
+ return GinkgoError{
+ Heading: "Ginkgo only performs -dryRun in serial mode.",
+ Message: "Please try running ginkgo -dryRun again, but without -p or -procs to ensure the suite is running in series.",
+ }
+}
+
+func (g ginkgoErrors) GracePeriodCannotBeZero() error {
+ return GinkgoError{
+ Heading: "Ginkgo requires a positive --grace-period.",
+ Message: "Please set --grace-period to a positive duration. The default is 30s.",
+ }
+}
+
+func (g ginkgoErrors) ConflictingVerbosityConfiguration() error {
+ return GinkgoError{
+ Heading: "Conflicting reporter verbosity settings.",
+ Message: "You can't set more than one of -v, -vv and --succinct. Please pick one!",
+ }
+}
+
+func (g ginkgoErrors) InvalidOutputInterceptorModeConfiguration(value string) error {
+ return GinkgoError{
+ Heading: fmt.Sprintf("Invalid value '%s' for --output-interceptor-mode.", value),
+ Message: "You must choose one of 'dup', 'swap', or 'none'.",
+ }
+}
+
+func (g ginkgoErrors) InvalidGoFlagCount() error {
+ return GinkgoError{
+ Heading: "Use of go test -count",
+ Message: "Ginkgo does not support using go test -count to rerun suites. Only -count=1 is allowed. To repeat suite runs, please use the ginkgo cli and `ginkgo -until-it-fails` or `ginkgo -repeat=N`.",
+ }
+}
+
+func (g ginkgoErrors) InvalidGoFlagParallel() error {
+ return GinkgoError{
+ Heading: "Use of go test -parallel",
+ Message: "Go test's implementation of parallelization does not actually parallelize Ginkgo specs. Please use the ginkgo cli and `ginkgo -p` or `ginkgo -procs=N` instead.",
+ }
+}
+
+func (g ginkgoErrors) BothRepeatAndUntilItFails() error {
+ return GinkgoError{
+ Heading: "--repeat and --until-it-fails are both set",
+ Message: "--until-it-fails directs Ginkgo to rerun specs indefinitely until they fail. --repeat directs Ginkgo to rerun specs a set number of times. You can't set both... which would you like?",
+ }
+}
+
+/* Stack-Trace parsing errors */
+
+func (g ginkgoErrors) FailedToParseStackTrace(message string) error {
+ return GinkgoError{
+ Heading: "Failed to Parse Stack Trace",
+ Message: message,
+ }
+}
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/types/file_filter.go b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/types/file_filter.go
new file mode 100644
index 00000000000..cc21df71ec8
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/types/file_filter.go
@@ -0,0 +1,106 @@
+package types
+
+import (
+ "regexp"
+ "strconv"
+ "strings"
+)
+
+func ParseFileFilters(filters []string) (FileFilters, error) {
+ ffs := FileFilters{}
+ for _, filter := range filters {
+ ff := FileFilter{}
+ if filter == "" {
+ return nil, GinkgoErrors.InvalidFileFilter(filter)
+ }
+ components := strings.Split(filter, ":")
+ if !(len(components) == 1 || len(components) == 2) {
+ return nil, GinkgoErrors.InvalidFileFilter(filter)
+ }
+
+ var err error
+ ff.Filename, err = regexp.Compile(components[0])
+ if err != nil {
+ return nil, err
+ }
+ if len(components) == 2 {
+ lineFilters := strings.Split(components[1], ",")
+ for _, lineFilter := range lineFilters {
+ components := strings.Split(lineFilter, "-")
+ if len(components) == 1 {
+ line, err := strconv.Atoi(strings.TrimSpace(components[0]))
+ if err != nil {
+ return nil, GinkgoErrors.InvalidFileFilter(filter)
+ }
+ ff.LineFilters = append(ff.LineFilters, LineFilter{line, line + 1})
+ } else if len(components) == 2 {
+ line1, err := strconv.Atoi(strings.TrimSpace(components[0]))
+ if err != nil {
+ return nil, GinkgoErrors.InvalidFileFilter(filter)
+ }
+ line2, err := strconv.Atoi(strings.TrimSpace(components[1]))
+ if err != nil {
+ return nil, GinkgoErrors.InvalidFileFilter(filter)
+ }
+ ff.LineFilters = append(ff.LineFilters, LineFilter{line1, line2})
+ } else {
+ return nil, GinkgoErrors.InvalidFileFilter(filter)
+ }
+ }
+ }
+ ffs = append(ffs, ff)
+ }
+ return ffs, nil
+}
+
+type FileFilter struct {
+ Filename *regexp.Regexp
+ LineFilters LineFilters
+}
+
+func (f FileFilter) Matches(locations []CodeLocation) bool {
+ for _, location := range locations {
+ if f.Filename.MatchString(location.FileName) &&
+ f.LineFilters.Matches(location.LineNumber) {
+ return true
+ }
+
+ }
+ return false
+}
+
+type FileFilters []FileFilter
+
+func (ffs FileFilters) Matches(locations []CodeLocation) bool {
+ for _, ff := range ffs {
+ if ff.Matches(locations) {
+ return true
+ }
+ }
+
+ return false
+}
+
+type LineFilter struct {
+ Min int
+ Max int
+}
+
+func (lf LineFilter) Matches(line int) bool {
+ return lf.Min <= line && line < lf.Max
+}
+
+type LineFilters []LineFilter
+
+func (lfs LineFilters) Matches(line int) bool {
+ if len(lfs) == 0 {
+ return true
+ }
+
+ for _, lf := range lfs {
+ if lf.Matches(line) {
+ return true
+ }
+ }
+ return false
+}
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/types/flags.go b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/types/flags.go
new file mode 100644
index 00000000000..de69f3022de
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/types/flags.go
@@ -0,0 +1,490 @@
+package types
+
+import (
+ "flag"
+ "fmt"
+ "io"
+ "reflect"
+ "strings"
+ "time"
+
+ "github.com/onsi/ginkgo/v2/formatter"
+)
+
+type GinkgoFlag struct {
+ Name string
+ KeyPath string
+ SectionKey string
+
+ Usage string
+ UsageArgument string
+ UsageDefaultValue string
+
+ DeprecatedName string
+ DeprecatedDocLink string
+ DeprecatedVersion string
+
+ ExportAs string
+ AlwaysExport bool
+}
+
+type GinkgoFlags []GinkgoFlag
+
+func (f GinkgoFlags) CopyAppend(flags ...GinkgoFlag) GinkgoFlags {
+ out := GinkgoFlags{}
+ out = append(out, f...)
+ out = append(out, flags...)
+ return out
+}
+
+func (f GinkgoFlags) WithPrefix(prefix string) GinkgoFlags {
+ if prefix == "" {
+ return f
+ }
+ out := GinkgoFlags{}
+ for _, flag := range f {
+ if flag.Name != "" {
+ flag.Name = prefix + "." + flag.Name
+ }
+ if flag.DeprecatedName != "" {
+ flag.DeprecatedName = prefix + "." + flag.DeprecatedName
+ }
+ if flag.ExportAs != "" {
+ flag.ExportAs = prefix + "." + flag.ExportAs
+ }
+ out = append(out, flag)
+ }
+ return out
+}
+
+func (f GinkgoFlags) SubsetWithNames(names ...string) GinkgoFlags {
+ out := GinkgoFlags{}
+ for _, flag := range f {
+ for _, name := range names {
+ if flag.Name == name {
+ out = append(out, flag)
+ break
+ }
+ }
+ }
+ return out
+}
+
+type GinkgoFlagSection struct {
+ Key string
+ Style string
+ Succinct bool
+ Heading string
+ Description string
+}
+
+type GinkgoFlagSections []GinkgoFlagSection
+
+func (gfs GinkgoFlagSections) Lookup(key string) (GinkgoFlagSection, bool) {
+ for _, section := range gfs {
+ if section.Key == key {
+ return section, true
+ }
+ }
+
+ return GinkgoFlagSection{}, false
+}
+
+type GinkgoFlagSet struct {
+ flags GinkgoFlags
+ bindings interface{}
+
+ sections GinkgoFlagSections
+ extraGoFlagsSection GinkgoFlagSection
+
+ flagSet *flag.FlagSet
+}
+
+// Call NewGinkgoFlagSet to create GinkgoFlagSet that creates and binds to it's own *flag.FlagSet
+func NewGinkgoFlagSet(flags GinkgoFlags, bindings interface{}, sections GinkgoFlagSections) (GinkgoFlagSet, error) {
+ return bindFlagSet(GinkgoFlagSet{
+ flags: flags,
+ bindings: bindings,
+ sections: sections,
+ }, nil)
+}
+
+// Call NewGinkgoFlagSet to create GinkgoFlagSet that extends an existing *flag.FlagSet
+func NewAttachedGinkgoFlagSet(flagSet *flag.FlagSet, flags GinkgoFlags, bindings interface{}, sections GinkgoFlagSections, extraGoFlagsSection GinkgoFlagSection) (GinkgoFlagSet, error) {
+ return bindFlagSet(GinkgoFlagSet{
+ flags: flags,
+ bindings: bindings,
+ sections: sections,
+ extraGoFlagsSection: extraGoFlagsSection,
+ }, flagSet)
+}
+
+func bindFlagSet(f GinkgoFlagSet, flagSet *flag.FlagSet) (GinkgoFlagSet, error) {
+ if flagSet == nil {
+ f.flagSet = flag.NewFlagSet("", flag.ContinueOnError)
+ //suppress all output as Ginkgo is responsible for formatting usage
+ f.flagSet.SetOutput(io.Discard)
+ } else {
+ f.flagSet = flagSet
+ //we're piggybacking on an existing flagset (typically go test) so we have limited control
+ //on user feedback
+ f.flagSet.Usage = f.substituteUsage
+ }
+
+ for _, flag := range f.flags {
+ name := flag.Name
+
+ deprecatedUsage := "[DEPRECATED]"
+ deprecatedName := flag.DeprecatedName
+ if name != "" {
+ deprecatedUsage = fmt.Sprintf("[DEPRECATED] use --%s instead", name)
+ } else if flag.Usage != "" {
+ deprecatedUsage += " " + flag.Usage
+ }
+
+ value, ok := valueAtKeyPath(f.bindings, flag.KeyPath)
+ if !ok {
+ return GinkgoFlagSet{}, fmt.Errorf("could not load KeyPath: %s", flag.KeyPath)
+ }
+
+ iface, addr := value.Interface(), value.Addr().Interface()
+
+ switch value.Type() {
+ case reflect.TypeOf(string("")):
+ if name != "" {
+ f.flagSet.StringVar(addr.(*string), name, iface.(string), flag.Usage)
+ }
+ if deprecatedName != "" {
+ f.flagSet.StringVar(addr.(*string), deprecatedName, iface.(string), deprecatedUsage)
+ }
+ case reflect.TypeOf(int64(0)):
+ if name != "" {
+ f.flagSet.Int64Var(addr.(*int64), name, iface.(int64), flag.Usage)
+ }
+ if deprecatedName != "" {
+ f.flagSet.Int64Var(addr.(*int64), deprecatedName, iface.(int64), deprecatedUsage)
+ }
+ case reflect.TypeOf(float64(0)):
+ if name != "" {
+ f.flagSet.Float64Var(addr.(*float64), name, iface.(float64), flag.Usage)
+ }
+ if deprecatedName != "" {
+ f.flagSet.Float64Var(addr.(*float64), deprecatedName, iface.(float64), deprecatedUsage)
+ }
+ case reflect.TypeOf(int(0)):
+ if name != "" {
+ f.flagSet.IntVar(addr.(*int), name, iface.(int), flag.Usage)
+ }
+ if deprecatedName != "" {
+ f.flagSet.IntVar(addr.(*int), deprecatedName, iface.(int), deprecatedUsage)
+ }
+ case reflect.TypeOf(bool(true)):
+ if name != "" {
+ f.flagSet.BoolVar(addr.(*bool), name, iface.(bool), flag.Usage)
+ }
+ if deprecatedName != "" {
+ f.flagSet.BoolVar(addr.(*bool), deprecatedName, iface.(bool), deprecatedUsage)
+ }
+ case reflect.TypeOf(time.Duration(0)):
+ if name != "" {
+ f.flagSet.DurationVar(addr.(*time.Duration), name, iface.(time.Duration), flag.Usage)
+ }
+ if deprecatedName != "" {
+ f.flagSet.DurationVar(addr.(*time.Duration), deprecatedName, iface.(time.Duration), deprecatedUsage)
+ }
+
+ case reflect.TypeOf([]string{}):
+ if name != "" {
+ f.flagSet.Var(stringSliceVar{value}, name, flag.Usage)
+ }
+ if deprecatedName != "" {
+ f.flagSet.Var(stringSliceVar{value}, deprecatedName, deprecatedUsage)
+ }
+ default:
+ return GinkgoFlagSet{}, fmt.Errorf("unsupported type %T", iface)
+ }
+ }
+
+ return f, nil
+}
+
+func (f GinkgoFlagSet) IsZero() bool {
+ return f.flagSet == nil
+}
+
+func (f GinkgoFlagSet) WasSet(name string) bool {
+ found := false
+ f.flagSet.Visit(func(f *flag.Flag) {
+ if f.Name == name {
+ found = true
+ }
+ })
+
+ return found
+}
+
+func (f GinkgoFlagSet) Lookup(name string) *flag.Flag {
+ return f.flagSet.Lookup(name)
+}
+
+func (f GinkgoFlagSet) Parse(args []string) ([]string, error) {
+ if f.IsZero() {
+ return args, nil
+ }
+ err := f.flagSet.Parse(args)
+ if err != nil {
+ return []string{}, err
+ }
+ return f.flagSet.Args(), nil
+}
+
+func (f GinkgoFlagSet) ValidateDeprecations(deprecationTracker *DeprecationTracker) {
+ if f.IsZero() {
+ return
+ }
+ f.flagSet.Visit(func(flag *flag.Flag) {
+ for _, ginkgoFlag := range f.flags {
+ if ginkgoFlag.DeprecatedName != "" && strings.HasSuffix(flag.Name, ginkgoFlag.DeprecatedName) {
+ message := fmt.Sprintf("--%s is deprecated", ginkgoFlag.DeprecatedName)
+ if ginkgoFlag.Name != "" {
+ message = fmt.Sprintf("--%s is deprecated, use --%s instead", ginkgoFlag.DeprecatedName, ginkgoFlag.Name)
+ } else if ginkgoFlag.Usage != "" {
+ message += " " + ginkgoFlag.Usage
+ }
+
+ deprecationTracker.TrackDeprecation(Deprecation{
+ Message: message,
+ DocLink: ginkgoFlag.DeprecatedDocLink,
+ Version: ginkgoFlag.DeprecatedVersion,
+ })
+ }
+ }
+ })
+}
+
+func (f GinkgoFlagSet) Usage() string {
+ if f.IsZero() {
+ return ""
+ }
+ groupedFlags := map[GinkgoFlagSection]GinkgoFlags{}
+ ungroupedFlags := GinkgoFlags{}
+ managedFlags := map[string]bool{}
+ extraGoFlags := []*flag.Flag{}
+
+ for _, flag := range f.flags {
+ managedFlags[flag.Name] = true
+ managedFlags[flag.DeprecatedName] = true
+
+ if flag.Name == "" {
+ continue
+ }
+
+ section, ok := f.sections.Lookup(flag.SectionKey)
+ if ok {
+ groupedFlags[section] = append(groupedFlags[section], flag)
+ } else {
+ ungroupedFlags = append(ungroupedFlags, flag)
+ }
+ }
+
+ f.flagSet.VisitAll(func(flag *flag.Flag) {
+ if !managedFlags[flag.Name] {
+ extraGoFlags = append(extraGoFlags, flag)
+ }
+ })
+
+ out := ""
+ for _, section := range f.sections {
+ flags := groupedFlags[section]
+ if len(flags) == 0 {
+ continue
+ }
+ out += f.usageForSection(section)
+ if section.Succinct {
+ succinctFlags := []string{}
+ for _, flag := range flags {
+ if flag.Name != "" {
+ succinctFlags = append(succinctFlags, fmt.Sprintf("--%s", flag.Name))
+ }
+ }
+ out += formatter.Fiw(1, formatter.COLS, section.Style+strings.Join(succinctFlags, ", ")+"{{/}}\n")
+ } else {
+ for _, flag := range flags {
+ out += f.usageForFlag(flag, section.Style)
+ }
+ }
+ out += "\n"
+ }
+ if len(ungroupedFlags) > 0 {
+ for _, flag := range ungroupedFlags {
+ out += f.usageForFlag(flag, "")
+ }
+ out += "\n"
+ }
+ if len(extraGoFlags) > 0 {
+ out += f.usageForSection(f.extraGoFlagsSection)
+ for _, goFlag := range extraGoFlags {
+ out += f.usageForGoFlag(goFlag)
+ }
+ }
+
+ return out
+}
+
+func (f GinkgoFlagSet) substituteUsage() {
+ fmt.Fprintln(f.flagSet.Output(), f.Usage())
+}
+
+func valueAtKeyPath(root interface{}, keyPath string) (reflect.Value, bool) {
+ if len(keyPath) == 0 {
+ return reflect.Value{}, false
+ }
+
+ val := reflect.ValueOf(root)
+ components := strings.Split(keyPath, ".")
+ for _, component := range components {
+ val = reflect.Indirect(val)
+ switch val.Kind() {
+ case reflect.Map:
+ val = val.MapIndex(reflect.ValueOf(component))
+ if val.Kind() == reflect.Interface {
+ val = reflect.ValueOf(val.Interface())
+ }
+ case reflect.Struct:
+ val = val.FieldByName(component)
+ default:
+ return reflect.Value{}, false
+ }
+ if (val == reflect.Value{}) {
+ return reflect.Value{}, false
+ }
+ }
+
+ return val, true
+}
+
+func (f GinkgoFlagSet) usageForSection(section GinkgoFlagSection) string {
+ out := formatter.F(section.Style + "{{bold}}{{underline}}" + section.Heading + "{{/}}\n")
+ if section.Description != "" {
+ out += formatter.Fiw(0, formatter.COLS, section.Description+"\n")
+ }
+ return out
+}
+
+func (f GinkgoFlagSet) usageForFlag(flag GinkgoFlag, style string) string {
+ argument := flag.UsageArgument
+ defValue := flag.UsageDefaultValue
+ if argument == "" {
+ value, _ := valueAtKeyPath(f.bindings, flag.KeyPath)
+ switch value.Type() {
+ case reflect.TypeOf(string("")):
+ argument = "string"
+ case reflect.TypeOf(int64(0)), reflect.TypeOf(int(0)):
+ argument = "int"
+ case reflect.TypeOf(time.Duration(0)):
+ argument = "duration"
+ case reflect.TypeOf(float64(0)):
+ argument = "float"
+ case reflect.TypeOf([]string{}):
+ argument = "string"
+ }
+ }
+ if argument != "" {
+ argument = "[" + argument + "] "
+ }
+ if defValue != "" {
+ defValue = fmt.Sprintf("(default: %s)", defValue)
+ }
+ hyphens := "--"
+ if len(flag.Name) == 1 {
+ hyphens = "-"
+ }
+
+ out := formatter.Fi(1, style+"%s%s{{/}} %s{{gray}}%s{{/}}\n", hyphens, flag.Name, argument, defValue)
+ out += formatter.Fiw(2, formatter.COLS, "{{light-gray}}%s{{/}}\n", flag.Usage)
+ return out
+}
+
+func (f GinkgoFlagSet) usageForGoFlag(goFlag *flag.Flag) string {
+ //Taken directly from the flag package
+ out := fmt.Sprintf(" -%s", goFlag.Name)
+ name, usage := flag.UnquoteUsage(goFlag)
+ if len(name) > 0 {
+ out += " " + name
+ }
+ if len(out) <= 4 {
+ out += "\t"
+ } else {
+ out += "\n \t"
+ }
+ out += strings.ReplaceAll(usage, "\n", "\n \t")
+ out += "\n"
+ return out
+}
+
+type stringSliceVar struct {
+ slice reflect.Value
+}
+
+func (ssv stringSliceVar) String() string { return "" }
+func (ssv stringSliceVar) Set(s string) error {
+ ssv.slice.Set(reflect.AppendSlice(ssv.slice, reflect.ValueOf([]string{s})))
+ return nil
+}
+
+// given a set of GinkgoFlags and bindings, generate flag arguments suitable to be passed to an application with that set of flags configured.
+func GenerateFlagArgs(flags GinkgoFlags, bindings interface{}) ([]string, error) {
+ result := []string{}
+ for _, flag := range flags {
+ name := flag.ExportAs
+ if name == "" {
+ name = flag.Name
+ }
+ if name == "" {
+ continue
+ }
+
+ value, ok := valueAtKeyPath(bindings, flag.KeyPath)
+ if !ok {
+ return []string{}, fmt.Errorf("could not load KeyPath: %s", flag.KeyPath)
+ }
+
+ iface := value.Interface()
+ switch value.Type() {
+ case reflect.TypeOf(string("")):
+ if iface.(string) != "" || flag.AlwaysExport {
+ result = append(result, fmt.Sprintf("--%s=%s", name, iface))
+ }
+ case reflect.TypeOf(int64(0)):
+ if iface.(int64) != 0 || flag.AlwaysExport {
+ result = append(result, fmt.Sprintf("--%s=%d", name, iface))
+ }
+ case reflect.TypeOf(float64(0)):
+ if iface.(float64) != 0 || flag.AlwaysExport {
+ result = append(result, fmt.Sprintf("--%s=%f", name, iface))
+ }
+ case reflect.TypeOf(int(0)):
+ if iface.(int) != 0 || flag.AlwaysExport {
+ result = append(result, fmt.Sprintf("--%s=%d", name, iface))
+ }
+ case reflect.TypeOf(bool(true)):
+ if iface.(bool) {
+ result = append(result, fmt.Sprintf("--%s", name))
+ }
+ case reflect.TypeOf(time.Duration(0)):
+ if iface.(time.Duration) != time.Duration(0) || flag.AlwaysExport {
+ result = append(result, fmt.Sprintf("--%s=%s", name, iface))
+ }
+
+ case reflect.TypeOf([]string{}):
+ strings := iface.([]string)
+ for _, s := range strings {
+ result = append(result, fmt.Sprintf("--%s=%s", name, s))
+ }
+ default:
+ return []string{}, fmt.Errorf("unsupported type %T", iface)
+ }
+ }
+
+ return result, nil
+}
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/types/label_filter.go b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/types/label_filter.go
new file mode 100644
index 00000000000..7fdc8aa23f3
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/types/label_filter.go
@@ -0,0 +1,583 @@
+package types
+
+import (
+ "fmt"
+ "regexp"
+ "strings"
+)
+
+var DEBUG_LABEL_FILTER_PARSING = false
+
+type LabelFilter func([]string) bool
+
+func matchLabelAction(label string) LabelFilter {
+ expected := strings.ToLower(label)
+ return func(labels []string) bool {
+ for i := range labels {
+ if strings.ToLower(labels[i]) == expected {
+ return true
+ }
+ }
+ return false
+ }
+}
+
+func matchLabelRegexAction(regex *regexp.Regexp) LabelFilter {
+ return func(labels []string) bool {
+ for i := range labels {
+ if regex.MatchString(labels[i]) {
+ return true
+ }
+ }
+ return false
+ }
+}
+
+func notAction(filter LabelFilter) LabelFilter {
+ return func(labels []string) bool { return !filter(labels) }
+}
+
+func andAction(a, b LabelFilter) LabelFilter {
+ return func(labels []string) bool { return a(labels) && b(labels) }
+}
+
+func orAction(a, b LabelFilter) LabelFilter {
+ return func(labels []string) bool { return a(labels) || b(labels) }
+}
+
+func labelSetFor(key string, labels []string) map[string]bool {
+ key = strings.ToLower(strings.TrimSpace(key))
+ out := map[string]bool{}
+ for _, label := range labels {
+ components := strings.SplitN(label, ":", 2)
+ if len(components) < 2 {
+ continue
+ }
+ if key == strings.ToLower(strings.TrimSpace(components[0])) {
+ out[strings.ToLower(strings.TrimSpace(components[1]))] = true
+ }
+ }
+
+ return out
+}
+
+func isEmptyLabelSetAction(key string) LabelFilter {
+ return func(labels []string) bool {
+ return len(labelSetFor(key, labels)) == 0
+ }
+}
+
+func containsAnyLabelSetAction(key string, expectedValues []string) LabelFilter {
+ return func(labels []string) bool {
+ set := labelSetFor(key, labels)
+ for _, value := range expectedValues {
+ if set[value] {
+ return true
+ }
+ }
+ return false
+ }
+}
+
+func containsAllLabelSetAction(key string, expectedValues []string) LabelFilter {
+ return func(labels []string) bool {
+ set := labelSetFor(key, labels)
+ for _, value := range expectedValues {
+ if !set[value] {
+ return false
+ }
+ }
+ return true
+ }
+}
+
+func consistsOfLabelSetAction(key string, expectedValues []string) LabelFilter {
+ return func(labels []string) bool {
+ set := labelSetFor(key, labels)
+ if len(set) != len(expectedValues) {
+ return false
+ }
+ for _, value := range expectedValues {
+ if !set[value] {
+ return false
+ }
+ }
+ return true
+ }
+}
+
+func isSubsetOfLabelSetAction(key string, expectedValues []string) LabelFilter {
+ expectedSet := map[string]bool{}
+ for _, value := range expectedValues {
+ expectedSet[value] = true
+ }
+ return func(labels []string) bool {
+ set := labelSetFor(key, labels)
+ for value := range set {
+ if !expectedSet[value] {
+ return false
+ }
+ }
+ return true
+ }
+}
+
+type lfToken uint
+
+const (
+ lfTokenInvalid lfToken = iota
+
+ lfTokenRoot
+ lfTokenOpenGroup
+ lfTokenCloseGroup
+ lfTokenNot
+ lfTokenAnd
+ lfTokenOr
+ lfTokenRegexp
+ lfTokenLabel
+ lfTokenSetKey
+ lfTokenSetOperation
+ lfTokenSetArgument
+ lfTokenEOF
+)
+
+func (l lfToken) Precedence() int {
+ switch l {
+ case lfTokenRoot, lfTokenOpenGroup:
+ return 0
+ case lfTokenOr:
+ return 1
+ case lfTokenAnd:
+ return 2
+ case lfTokenNot:
+ return 3
+ case lfTokenSetOperation:
+ return 4
+ }
+ return -1
+}
+
+func (l lfToken) String() string {
+ switch l {
+ case lfTokenRoot:
+ return "ROOT"
+ case lfTokenOpenGroup:
+ return "("
+ case lfTokenCloseGroup:
+ return ")"
+ case lfTokenNot:
+ return "!"
+ case lfTokenAnd:
+ return "&&"
+ case lfTokenOr:
+ return "||"
+ case lfTokenRegexp:
+ return "/regexp/"
+ case lfTokenLabel:
+ return "label"
+ case lfTokenSetKey:
+ return "set_key"
+ case lfTokenSetOperation:
+ return "set_operation"
+ case lfTokenSetArgument:
+ return "set_argument"
+ case lfTokenEOF:
+ return "EOF"
+ }
+ return "INVALID"
+}
+
+type treeNode struct {
+ token lfToken
+ location int
+ value string
+
+ parent *treeNode
+ leftNode *treeNode
+ rightNode *treeNode
+}
+
+func (tn *treeNode) setRightNode(node *treeNode) {
+ tn.rightNode = node
+ node.parent = tn
+}
+
+func (tn *treeNode) setLeftNode(node *treeNode) {
+ tn.leftNode = node
+ node.parent = tn
+}
+
+func (tn *treeNode) firstAncestorWithPrecedenceLEQ(precedence int) *treeNode {
+ if tn.token.Precedence() <= precedence {
+ return tn
+ }
+ return tn.parent.firstAncestorWithPrecedenceLEQ(precedence)
+}
+
+func (tn *treeNode) firstUnmatchedOpenNode() *treeNode {
+ if tn.token == lfTokenOpenGroup {
+ return tn
+ }
+ if tn.parent == nil {
+ return nil
+ }
+ return tn.parent.firstUnmatchedOpenNode()
+}
+
+func (tn *treeNode) constructLabelFilter(input string) (LabelFilter, error) {
+ switch tn.token {
+ case lfTokenOpenGroup:
+ return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, tn.location, "Mismatched '(' - could not find matching ')'.")
+ case lfTokenLabel:
+ return matchLabelAction(tn.value), nil
+ case lfTokenRegexp:
+ re, err := regexp.Compile(tn.value)
+ if err != nil {
+ return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, tn.location, fmt.Sprintf("RegExp compilation error: %s", err))
+ }
+ return matchLabelRegexAction(re), nil
+ case lfTokenSetOperation:
+ tokenSetOperation := strings.ToLower(tn.value)
+ if tokenSetOperation == "isempty" {
+ return isEmptyLabelSetAction(tn.leftNode.value), nil
+ }
+ if tn.rightNode == nil {
+ return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, tn.location, fmt.Sprintf("Set operation '%s' is missing an argument.", tn.value))
+ }
+
+ rawValues := strings.Split(tn.rightNode.value, ",")
+ values := make([]string, len(rawValues))
+ for i := range rawValues {
+ values[i] = strings.ToLower(strings.TrimSpace(rawValues[i]))
+ if strings.ContainsAny(values[i], "&|!,()/") {
+ return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, tn.rightNode.location, fmt.Sprintf("Invalid label value '%s' in set operation argument.", values[i]))
+ } else if values[i] == "" {
+ return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, tn.rightNode.location, "Empty label value in set operation argument.")
+ }
+ }
+ switch tokenSetOperation {
+ case "containsany":
+ return containsAnyLabelSetAction(tn.leftNode.value, values), nil
+ case "containsall":
+ return containsAllLabelSetAction(tn.leftNode.value, values), nil
+ case "consistsof":
+ return consistsOfLabelSetAction(tn.leftNode.value, values), nil
+ case "issubsetof":
+ return isSubsetOfLabelSetAction(tn.leftNode.value, values), nil
+ }
+ }
+
+ if tn.rightNode == nil {
+ return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, -1, "Unexpected EOF.")
+ }
+ rightLF, err := tn.rightNode.constructLabelFilter(input)
+ if err != nil {
+ return nil, err
+ }
+
+ switch tn.token {
+ case lfTokenRoot, lfTokenCloseGroup:
+ return rightLF, nil
+ case lfTokenNot:
+ return notAction(rightLF), nil
+ }
+
+ if tn.leftNode == nil {
+ return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, tn.location, fmt.Sprintf("Malformed tree - '%s' is missing left operand.", tn.token))
+ }
+ leftLF, err := tn.leftNode.constructLabelFilter(input)
+ if err != nil {
+ return nil, err
+ }
+
+ switch tn.token {
+ case lfTokenAnd:
+ return andAction(leftLF, rightLF), nil
+ case lfTokenOr:
+ return orAction(leftLF, rightLF), nil
+ }
+
+ return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, tn.location, fmt.Sprintf("Invalid token '%s'.", tn.token))
+}
+
+func (tn *treeNode) tokenString() string {
+ out := fmt.Sprintf("<%s", tn.token)
+ if tn.value != "" {
+ out += " | " + tn.value
+ }
+ out += ">"
+ return out
+}
+
+func (tn *treeNode) toString(indent int) string {
+ out := tn.tokenString() + "\n"
+ if tn.leftNode != nil {
+ out += fmt.Sprintf("%s |_(L)_%s", strings.Repeat(" ", indent), tn.leftNode.toString(indent+1))
+ }
+ if tn.rightNode != nil {
+ out += fmt.Sprintf("%s |_(R)_%s", strings.Repeat(" ", indent), tn.rightNode.toString(indent+1))
+ }
+ return out
+}
+
+var validSetOperations = map[string]string{
+ "containsany": "containsAny",
+ "containsall": "containsAll",
+ "consistsof": "consistsOf",
+ "issubsetof": "isSubsetOf",
+ "isempty": "isEmpty",
+}
+
+func tokenize(input string) func() (*treeNode, error) {
+ lastToken := lfTokenInvalid
+ lastValue := ""
+ runes, i := []rune(input), 0
+
+ peekIs := func(r rune) bool {
+ if i+1 < len(runes) {
+ return runes[i+1] == r
+ }
+ return false
+ }
+
+ consumeUntil := func(cutset string) (string, int) {
+ j := i
+ for ; j < len(runes); j++ {
+ if strings.IndexRune(cutset, runes[j]) >= 0 {
+ break
+ }
+ }
+ return string(runes[i:j]), j - i
+ }
+
+ return func() (*treeNode, error) {
+ for i < len(runes) && runes[i] == ' ' {
+ i += 1
+ }
+
+ if i >= len(runes) {
+ return &treeNode{token: lfTokenEOF}, nil
+ }
+
+ node := &treeNode{location: i}
+ defer func() {
+ lastToken = node.token
+ lastValue = node.value
+ }()
+
+ if lastToken == lfTokenSetKey {
+ //we should get a valid set operation next
+ value, n := consumeUntil(" )")
+ if validSetOperations[strings.ToLower(value)] == "" {
+ return &treeNode{}, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, i, fmt.Sprintf("Invalid set operation '%s'.", value))
+ }
+ i += n
+ node.token, node.value = lfTokenSetOperation, value
+ return node, nil
+ }
+ if lastToken == lfTokenSetOperation {
+ //we should get an argument next, if we aren't isempty
+ var arg = ""
+ origI := i
+ if runes[i] == '{' {
+ i += 1
+ value, n := consumeUntil("}")
+ if i+n >= len(runes) {
+ return &treeNode{}, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, i-1, "Missing closing '}' in set operation argument?")
+ }
+ i += n + 1
+ arg = value
+ } else {
+ value, n := consumeUntil("&|!,()/")
+ i += n
+ arg = strings.TrimSpace(value)
+ }
+ if strings.ToLower(lastValue) == "isempty" && arg != "" {
+ return &treeNode{}, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, origI, fmt.Sprintf("isEmpty does not take arguments, was passed '%s'.", arg))
+ }
+ if arg == "" && strings.ToLower(lastValue) != "isempty" {
+ if i < len(runes) && runes[i] == '/' {
+ return &treeNode{}, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, origI, "Set operations do not support regular expressions.")
+ } else {
+ return &treeNode{}, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, origI, fmt.Sprintf("Set operation '%s' requires an argument.", lastValue))
+ }
+ }
+ // note that we sent an empty SetArgument token if we are isempty
+ node.token, node.value = lfTokenSetArgument, arg
+ return node, nil
+ }
+
+ switch runes[i] {
+ case '&':
+ if !peekIs('&') {
+ return &treeNode{}, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, i, "Invalid token '&'. Did you mean '&&'?")
+ }
+ i += 2
+ node.token = lfTokenAnd
+ case '|':
+ if !peekIs('|') {
+ return &treeNode{}, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, i, "Invalid token '|'. Did you mean '||'?")
+ }
+ i += 2
+ node.token = lfTokenOr
+ case '!':
+ i += 1
+ node.token = lfTokenNot
+ case ',':
+ i += 1
+ node.token = lfTokenOr
+ case '(':
+ i += 1
+ node.token = lfTokenOpenGroup
+ case ')':
+ i += 1
+ node.token = lfTokenCloseGroup
+ case '/':
+ i += 1
+ value, n := consumeUntil("/")
+ i += n + 1
+ node.token, node.value = lfTokenRegexp, value
+ default:
+ value, n := consumeUntil("&|!,()/:")
+ i += n
+ value = strings.TrimSpace(value)
+
+ //are we the beginning of a set operation?
+ if i < len(runes) && runes[i] == ':' {
+ if peekIs(' ') {
+ if value == "" {
+ return &treeNode{}, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, i, "Missing set key.")
+ }
+ i += 1
+ //we are the beginning of a set operation
+ node.token, node.value = lfTokenSetKey, value
+ return node, nil
+ }
+ additionalValue, n := consumeUntil("&|!,()/")
+ additionalValue = strings.TrimSpace(additionalValue)
+ if additionalValue == ":" {
+ return &treeNode{}, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, i, "Missing set operation.")
+ }
+ i += n
+ value += additionalValue
+ }
+
+ valueToCheckForSetOperation := strings.ToLower(value)
+ for setOperation := range validSetOperations {
+ idx := strings.Index(valueToCheckForSetOperation, " "+setOperation)
+ if idx > 0 {
+ return &treeNode{}, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, i-n+idx+1, fmt.Sprintf("Looks like you are using the set operator '%s' but did not provide a set key. Did you forget the ':'?", validSetOperations[setOperation]))
+ }
+ }
+
+ node.token, node.value = lfTokenLabel, strings.TrimSpace(value)
+ }
+ return node, nil
+ }
+}
+
+func MustParseLabelFilter(input string) LabelFilter {
+ filter, err := ParseLabelFilter(input)
+ if err != nil {
+ panic(err)
+ }
+ return filter
+}
+
+func ParseLabelFilter(input string) (LabelFilter, error) {
+ if DEBUG_LABEL_FILTER_PARSING {
+ fmt.Println("\n==============")
+ fmt.Println("Input: ", input)
+ fmt.Print("Tokens: ")
+ }
+ if input == "" {
+ return func(_ []string) bool { return true }, nil
+ }
+ nextToken := tokenize(input)
+
+ root := &treeNode{token: lfTokenRoot}
+ current := root
+LOOP:
+ for {
+ node, err := nextToken()
+ if err != nil {
+ return nil, err
+ }
+
+ if DEBUG_LABEL_FILTER_PARSING {
+ fmt.Print(node.tokenString() + " ")
+ }
+
+ switch node.token {
+ case lfTokenEOF:
+ break LOOP
+ case lfTokenLabel, lfTokenRegexp, lfTokenSetKey:
+ if current.rightNode != nil {
+ return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, node.location, "Found two adjacent labels. You need an operator between them.")
+ }
+ current.setRightNode(node)
+ case lfTokenNot, lfTokenOpenGroup:
+ if current.rightNode != nil {
+ return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, node.location, fmt.Sprintf("Invalid token '%s'.", node.token))
+ }
+ current.setRightNode(node)
+ current = node
+ case lfTokenAnd, lfTokenOr:
+ if current.rightNode == nil {
+ return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, node.location, fmt.Sprintf("Operator '%s' missing left hand operand.", node.token))
+ }
+ nodeToStealFrom := current.firstAncestorWithPrecedenceLEQ(node.token.Precedence())
+ node.setLeftNode(nodeToStealFrom.rightNode)
+ nodeToStealFrom.setRightNode(node)
+ current = node
+ case lfTokenSetOperation:
+ if current.rightNode == nil {
+ return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, node.location, fmt.Sprintf("Set operation '%s' missing left hand operand.", node.value))
+ }
+ node.setLeftNode(current.rightNode)
+ current.setRightNode(node)
+ current = node
+ case lfTokenSetArgument:
+ if current.rightNode != nil {
+ return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, node.location, fmt.Sprintf("Unexpected set argument '%s'.", node.token))
+ }
+ current.setRightNode(node)
+ case lfTokenCloseGroup:
+ firstUnmatchedOpenNode := current.firstUnmatchedOpenNode()
+ if firstUnmatchedOpenNode == nil {
+ return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, node.location, "Mismatched ')' - could not find matching '('.")
+ }
+ if firstUnmatchedOpenNode == current && current.rightNode == nil {
+ return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, node.location, "Found empty '()' group.")
+ }
+ firstUnmatchedOpenNode.token = lfTokenCloseGroup //signify the group is now closed
+ current = firstUnmatchedOpenNode.parent
+ default:
+ return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, node.location, fmt.Sprintf("Unknown token '%s'.", node.token))
+ }
+ }
+ if DEBUG_LABEL_FILTER_PARSING {
+ fmt.Printf("\n Tree:\n%s", root.toString(0))
+ }
+ return root.constructLabelFilter(input)
+}
+
+func ValidateAndCleanupLabel(label string, cl CodeLocation) (string, error) {
+ out := strings.TrimSpace(label)
+ if out == "" {
+ return "", GinkgoErrors.InvalidEmptyLabel(cl)
+ }
+ if strings.ContainsAny(out, "&|!,()/") {
+ return "", GinkgoErrors.InvalidLabel(label, cl)
+ }
+ if out[0] == ':' {
+ return "", GinkgoErrors.InvalidLabel(label, cl)
+ }
+ if strings.Contains(out, ":") {
+ components := strings.SplitN(out, ":", 2)
+ if len(components) < 2 || components[1] == "" {
+ return "", GinkgoErrors.InvalidLabel(label, cl)
+ }
+ }
+ return out, nil
+}
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/types/report_entry.go b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/types/report_entry.go
new file mode 100644
index 00000000000..7b1524b52e8
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/types/report_entry.go
@@ -0,0 +1,190 @@
+package types
+
+import (
+ "encoding/json"
+ "fmt"
+ "time"
+)
+
+// ReportEntryValue wraps a report entry's value ensuring it can be encoded and decoded safely into reports
+// and across the network connection when running in parallel
+type ReportEntryValue struct {
+ raw interface{} //unexported to prevent gob from freaking out about unregistered structs
+ AsJSON string
+ Representation string
+}
+
+func WrapEntryValue(value interface{}) ReportEntryValue {
+ return ReportEntryValue{
+ raw: value,
+ }
+}
+
+func (rev ReportEntryValue) GetRawValue() interface{} {
+ return rev.raw
+}
+
+func (rev ReportEntryValue) String() string {
+ if rev.raw == nil {
+ return ""
+ }
+ if colorableStringer, ok := rev.raw.(ColorableStringer); ok {
+ return colorableStringer.ColorableString()
+ }
+
+ if stringer, ok := rev.raw.(fmt.Stringer); ok {
+ return stringer.String()
+ }
+ if rev.Representation != "" {
+ return rev.Representation
+ }
+ return fmt.Sprintf("%+v", rev.raw)
+}
+
+func (rev ReportEntryValue) MarshalJSON() ([]byte, error) {
+ //All this to capture the representation at encoding-time, not creating time
+ //This way users can Report on pointers and get their final values at reporting-time
+ out := struct {
+ AsJSON string
+ Representation string
+ }{
+ Representation: rev.String(),
+ }
+ asJSON, err := json.Marshal(rev.raw)
+ if err != nil {
+ return nil, err
+ }
+ out.AsJSON = string(asJSON)
+
+ return json.Marshal(out)
+}
+
+func (rev *ReportEntryValue) UnmarshalJSON(data []byte) error {
+ in := struct {
+ AsJSON string
+ Representation string
+ }{}
+ err := json.Unmarshal(data, &in)
+ if err != nil {
+ return err
+ }
+ rev.AsJSON = in.AsJSON
+ rev.Representation = in.Representation
+ return json.Unmarshal([]byte(in.AsJSON), &(rev.raw))
+}
+
+func (rev ReportEntryValue) GobEncode() ([]byte, error) {
+ return rev.MarshalJSON()
+}
+
+func (rev *ReportEntryValue) GobDecode(data []byte) error {
+ return rev.UnmarshalJSON(data)
+}
+
+// ReportEntry captures information attached to `SpecReport` via `AddReportEntry`
+type ReportEntry struct {
+ // Visibility captures the visibility policy for this ReportEntry
+ Visibility ReportEntryVisibility
+ // Location captures the location of the AddReportEntry call
+ Location CodeLocation
+
+ Time time.Time //need this for backwards compatibility
+ TimelineLocation TimelineLocation
+
+ // Name captures the name of this report
+ Name string
+ // Value captures the (optional) object passed into AddReportEntry - this can be
+ // anything the user wants. The value passed to AddReportEntry is wrapped in a ReportEntryValue to make
+ // encoding/decoding the value easier. To access the raw value call entry.GetRawValue()
+ Value ReportEntryValue
+}
+
+// ColorableStringer is an interface that ReportEntry values can satisfy. If they do then ColorableString() is used to generate their representation.
+type ColorableStringer interface {
+ ColorableString() string
+}
+
+// StringRepresentation() returns the string representation of the value associated with the ReportEntry --
+// if value is nil, empty string is returned
+// if value is a `ColorableStringer` then `Value.ColorableString()` is returned
+// if value is a `fmt.Stringer` then `Value.String()` is returned
+// otherwise the value is formatted with "%+v"
+func (entry ReportEntry) StringRepresentation() string {
+ return entry.Value.String()
+}
+
+// GetRawValue returns the Value object that was passed to AddReportEntry
+// If called in-process this will be the same object that was passed into AddReportEntry.
+// If used from a rehydrated JSON file _or_ in a ReportAfterSuite when running in parallel this will be
+// a JSON-decoded {}interface. If you want to reconstitute your original object you can decode the entry.Value.AsJSON
+// field yourself.
+func (entry ReportEntry) GetRawValue() interface{} {
+ return entry.Value.GetRawValue()
+}
+
+func (entry ReportEntry) GetTimelineLocation() TimelineLocation {
+ return entry.TimelineLocation
+}
+
+type ReportEntries []ReportEntry
+
+func (re ReportEntries) HasVisibility(visibilities ...ReportEntryVisibility) bool {
+ for _, entry := range re {
+ if entry.Visibility.Is(visibilities...) {
+ return true
+ }
+ }
+ return false
+}
+
+func (re ReportEntries) WithVisibility(visibilities ...ReportEntryVisibility) ReportEntries {
+ out := ReportEntries{}
+
+ for _, entry := range re {
+ if entry.Visibility.Is(visibilities...) {
+ out = append(out, entry)
+ }
+ }
+
+ return out
+}
+
+// ReportEntryVisibility governs the visibility of ReportEntries in Ginkgo's console reporter
+type ReportEntryVisibility uint
+
+const (
+ // Always print out this ReportEntry
+ ReportEntryVisibilityAlways ReportEntryVisibility = iota
+ // Only print out this ReportEntry if the spec fails or if the test is run with -v
+ ReportEntryVisibilityFailureOrVerbose
+ // Never print out this ReportEntry (note that ReportEntrys are always encoded in machine readable reports (e.g. JSON, JUnit, etc.))
+ ReportEntryVisibilityNever
+)
+
+var revEnumSupport = NewEnumSupport(map[uint]string{
+ uint(ReportEntryVisibilityAlways): "always",
+ uint(ReportEntryVisibilityFailureOrVerbose): "failure-or-verbose",
+ uint(ReportEntryVisibilityNever): "never",
+})
+
+func (rev ReportEntryVisibility) String() string {
+ return revEnumSupport.String(uint(rev))
+}
+func (rev *ReportEntryVisibility) UnmarshalJSON(b []byte) error {
+ out, err := revEnumSupport.UnmarshJSON(b)
+ *rev = ReportEntryVisibility(out)
+ return err
+}
+func (rev ReportEntryVisibility) MarshalJSON() ([]byte, error) {
+ return revEnumSupport.MarshJSON(uint(rev))
+}
+
+func (v ReportEntryVisibility) Is(visibilities ...ReportEntryVisibility) bool {
+ for _, visibility := range visibilities {
+ if v == visibility {
+ return true
+ }
+ }
+
+ return false
+}
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/types/types.go b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/types/types.go
new file mode 100644
index 00000000000..aae69b04c9d
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/types/types.go
@@ -0,0 +1,914 @@
+package types
+
+import (
+ "encoding/json"
+ "fmt"
+ "sort"
+ "strings"
+ "time"
+)
+
+const GINKGO_FOCUS_EXIT_CODE = 197
+const GINKGO_TIME_FORMAT = "01/02/06 15:04:05.999"
+
+// Report captures information about a Ginkgo test run
+type Report struct {
+ //SuitePath captures the absolute path to the test suite
+ SuitePath string
+
+ //SuiteDescription captures the description string passed to the DSL's RunSpecs() function
+ SuiteDescription string
+
+ //SuiteLabels captures any labels attached to the suite by the DSL's RunSpecs() function
+ SuiteLabels []string
+
+ //SuiteSucceeded captures the success or failure status of the test run
+ //If true, the test run is considered successful.
+ //If false, the test run is considered unsuccessful
+ SuiteSucceeded bool
+
+ //SuiteHasProgrammaticFocus captures whether the test suite has a test or set of tests that are programmatically focused
+ //(i.e an `FIt` or an `FDescribe`
+ SuiteHasProgrammaticFocus bool
+
+ //SpecialSuiteFailureReasons may contain special failure reasons
+ //For example, a test suite might be considered "failed" even if none of the individual specs
+ //have a failure state. For example, if the user has configured --fail-on-pending the test suite
+ //will have failed if there are pending tests even though all non-pending tests may have passed. In such
+ //cases, Ginkgo populates SpecialSuiteFailureReasons with a clear message indicating the reason for the failure.
+ //SpecialSuiteFailureReasons is also populated if the test suite is interrupted by the user.
+ //Since multiple special failure reasons can occur, this field is a slice.
+ SpecialSuiteFailureReasons []string
+
+ //PreRunStats contains a set of stats captured before the test run begins. This is primarily used
+ //by Ginkgo's reporter to tell the user how many specs are in the current suite (PreRunStats.TotalSpecs)
+ //and how many it intends to run (PreRunStats.SpecsThatWillRun) after applying any relevant focus or skip filters.
+ PreRunStats PreRunStats
+
+ //StartTime and EndTime capture the start and end time of the test run
+ StartTime time.Time
+ EndTime time.Time
+
+ //RunTime captures the duration of the test run
+ RunTime time.Duration
+
+ //SuiteConfig captures the Ginkgo configuration governing this test run
+ //SuiteConfig includes information necessary for reproducing an identical test run,
+ //such as the random seed and any filters applied during the test run
+ SuiteConfig SuiteConfig
+
+ //SpecReports is a list of all SpecReports generated by this test run
+ //It is empty when the SuiteReport is provided to ReportBeforeSuite
+ SpecReports SpecReports
+}
+
+// PreRunStats contains a set of stats captured before the test run begins. This is primarily used
+// by Ginkgo's reporter to tell the user how many specs are in the current suite (PreRunStats.TotalSpecs)
+// and how many it intends to run (PreRunStats.SpecsThatWillRun) after applying any relevant focus or skip filters.
+type PreRunStats struct {
+ TotalSpecs int
+ SpecsThatWillRun int
+}
+
+// Add is used by Ginkgo's parallel aggregation mechanisms to combine test run reports form individual parallel processes
+// to form a complete final report.
+func (report Report) Add(other Report) Report {
+ report.SuiteSucceeded = report.SuiteSucceeded && other.SuiteSucceeded
+
+ if other.StartTime.Before(report.StartTime) {
+ report.StartTime = other.StartTime
+ }
+
+ if other.EndTime.After(report.EndTime) {
+ report.EndTime = other.EndTime
+ }
+
+ specialSuiteFailureReasons := []string{}
+ reasonsLookup := map[string]bool{}
+ for _, reasons := range [][]string{report.SpecialSuiteFailureReasons, other.SpecialSuiteFailureReasons} {
+ for _, reason := range reasons {
+ if !reasonsLookup[reason] {
+ reasonsLookup[reason] = true
+ specialSuiteFailureReasons = append(specialSuiteFailureReasons, reason)
+ }
+ }
+ }
+ report.SpecialSuiteFailureReasons = specialSuiteFailureReasons
+ report.RunTime = report.EndTime.Sub(report.StartTime)
+
+ reports := make(SpecReports, len(report.SpecReports)+len(other.SpecReports))
+ copy(reports, report.SpecReports)
+ offset := len(report.SpecReports)
+ for i := range other.SpecReports {
+ reports[i+offset] = other.SpecReports[i]
+ }
+
+ report.SpecReports = reports
+ return report
+}
+
+// SpecReport captures information about a Ginkgo spec.
+type SpecReport struct {
+ // ContainerHierarchyTexts is a slice containing the text strings of
+ // all Describe/Context/When containers in this spec's hierarchy.
+ ContainerHierarchyTexts []string
+
+ // ContainerHierarchyLocations is a slice containing the CodeLocations of
+ // all Describe/Context/When containers in this spec's hierarchy.
+ ContainerHierarchyLocations []CodeLocation
+
+ // ContainerHierarchyLabels is a slice containing the labels of
+ // all Describe/Context/When containers in this spec's hierarchy
+ ContainerHierarchyLabels [][]string
+
+ // LeafNodeType, LeadNodeLocation, LeafNodeLabels and LeafNodeText capture the NodeType, CodeLocation, and text
+ // of the Ginkgo node being tested (typically an NodeTypeIt node, though this can also be
+ // one of the NodeTypesForSuiteLevelNodes node types)
+ LeafNodeType NodeType
+ LeafNodeLocation CodeLocation
+ LeafNodeLabels []string
+ LeafNodeText string
+
+ // State captures whether the spec has passed, failed, etc.
+ State SpecState
+
+ // IsSerial captures whether the spec has the Serial decorator
+ IsSerial bool
+
+ // IsInOrderedContainer captures whether the spec appears in an Ordered container
+ IsInOrderedContainer bool
+
+ // StartTime and EndTime capture the start and end time of the spec
+ StartTime time.Time
+ EndTime time.Time
+
+ // RunTime captures the duration of the spec
+ RunTime time.Duration
+
+ // ParallelProcess captures the parallel process that this spec ran on
+ ParallelProcess int
+
+ // RunningInParallel captures whether this spec is part of a suite that ran in parallel
+ RunningInParallel bool
+
+ //Failure is populated if a spec has failed, panicked, been interrupted, or skipped by the user (e.g. calling Skip())
+ //It includes detailed information about the Failure
+ Failure Failure
+
+ // NumAttempts captures the number of times this Spec was run.
+ // Flakey specs can be retried with ginkgo --flake-attempts=N or the use of the FlakeAttempts decorator.
+ // Repeated specs can be retried with the use of the MustPassRepeatedly decorator
+ NumAttempts int
+
+ // MaxFlakeAttempts captures whether the spec has been retried with ginkgo --flake-attempts=N or the use of the FlakeAttempts decorator.
+ MaxFlakeAttempts int
+
+ // MaxMustPassRepeatedly captures whether the spec has the MustPassRepeatedly decorator
+ MaxMustPassRepeatedly int
+
+ // CapturedGinkgoWriterOutput contains text printed to the GinkgoWriter
+ CapturedGinkgoWriterOutput string
+
+ // CapturedStdOutErr contains text printed to stdout/stderr (when running in parallel)
+ // This is always empty when running in series or calling CurrentSpecReport()
+ // It is used internally by Ginkgo's reporter
+ CapturedStdOutErr string
+
+ // ReportEntries contains any reports added via `AddReportEntry`
+ ReportEntries ReportEntries
+
+ // ProgressReports contains any progress reports generated during this spec. These can either be manually triggered, or automatically generated by Ginkgo via the PollProgressAfter() decorator
+ ProgressReports []ProgressReport
+
+ // AdditionalFailures contains any failures that occurred after the initial spec failure. These typically occur in cleanup nodes after the initial failure and are only emitted when running in verbose mode.
+ AdditionalFailures []AdditionalFailure
+
+ // SpecEvents capture additional events that occur during the spec run
+ SpecEvents SpecEvents
+}
+
+func (report SpecReport) MarshalJSON() ([]byte, error) {
+ //All this to avoid emitting an empty Failure struct in the JSON
+ out := struct {
+ ContainerHierarchyTexts []string
+ ContainerHierarchyLocations []CodeLocation
+ ContainerHierarchyLabels [][]string
+ LeafNodeType NodeType
+ LeafNodeLocation CodeLocation
+ LeafNodeLabels []string
+ LeafNodeText string
+ State SpecState
+ StartTime time.Time
+ EndTime time.Time
+ RunTime time.Duration
+ ParallelProcess int
+ Failure *Failure `json:",omitempty"`
+ NumAttempts int
+ MaxFlakeAttempts int
+ MaxMustPassRepeatedly int
+ CapturedGinkgoWriterOutput string `json:",omitempty"`
+ CapturedStdOutErr string `json:",omitempty"`
+ ReportEntries ReportEntries `json:",omitempty"`
+ ProgressReports []ProgressReport `json:",omitempty"`
+ AdditionalFailures []AdditionalFailure `json:",omitempty"`
+ SpecEvents SpecEvents `json:",omitempty"`
+ }{
+ ContainerHierarchyTexts: report.ContainerHierarchyTexts,
+ ContainerHierarchyLocations: report.ContainerHierarchyLocations,
+ ContainerHierarchyLabels: report.ContainerHierarchyLabels,
+ LeafNodeType: report.LeafNodeType,
+ LeafNodeLocation: report.LeafNodeLocation,
+ LeafNodeLabels: report.LeafNodeLabels,
+ LeafNodeText: report.LeafNodeText,
+ State: report.State,
+ StartTime: report.StartTime,
+ EndTime: report.EndTime,
+ RunTime: report.RunTime,
+ ParallelProcess: report.ParallelProcess,
+ Failure: nil,
+ ReportEntries: nil,
+ NumAttempts: report.NumAttempts,
+ MaxFlakeAttempts: report.MaxFlakeAttempts,
+ MaxMustPassRepeatedly: report.MaxMustPassRepeatedly,
+ CapturedGinkgoWriterOutput: report.CapturedGinkgoWriterOutput,
+ CapturedStdOutErr: report.CapturedStdOutErr,
+ }
+
+ if !report.Failure.IsZero() {
+ out.Failure = &(report.Failure)
+ }
+ if len(report.ReportEntries) > 0 {
+ out.ReportEntries = report.ReportEntries
+ }
+ if len(report.ProgressReports) > 0 {
+ out.ProgressReports = report.ProgressReports
+ }
+ if len(report.AdditionalFailures) > 0 {
+ out.AdditionalFailures = report.AdditionalFailures
+ }
+ if len(report.SpecEvents) > 0 {
+ out.SpecEvents = report.SpecEvents
+ }
+
+ return json.Marshal(out)
+}
+
+// CombinedOutput returns a single string representation of both CapturedStdOutErr and CapturedGinkgoWriterOutput
+// Note that both are empty when using CurrentSpecReport() so CurrentSpecReport().CombinedOutput() will always be empty.
+// CombinedOutput() is used internally by Ginkgo's reporter.
+func (report SpecReport) CombinedOutput() string {
+ if report.CapturedStdOutErr == "" {
+ return report.CapturedGinkgoWriterOutput
+ }
+ if report.CapturedGinkgoWriterOutput == "" {
+ return report.CapturedStdOutErr
+ }
+ return report.CapturedStdOutErr + "\n" + report.CapturedGinkgoWriterOutput
+}
+
+// Failed returns true if report.State is one of the SpecStateFailureStates
+// (SpecStateFailed, SpecStatePanicked, SpecStateinterrupted, SpecStateAborted)
+func (report SpecReport) Failed() bool {
+ return report.State.Is(SpecStateFailureStates)
+}
+
+// FullText returns a concatenation of all the report.ContainerHierarchyTexts and report.LeafNodeText
+func (report SpecReport) FullText() string {
+ texts := []string{}
+ texts = append(texts, report.ContainerHierarchyTexts...)
+ if report.LeafNodeText != "" {
+ texts = append(texts, report.LeafNodeText)
+ }
+ return strings.Join(texts, " ")
+}
+
+// Labels returns a deduped set of all the spec's Labels.
+func (report SpecReport) Labels() []string {
+ out := []string{}
+ seen := map[string]bool{}
+ for _, labels := range report.ContainerHierarchyLabels {
+ for _, label := range labels {
+ if !seen[label] {
+ seen[label] = true
+ out = append(out, label)
+ }
+ }
+ }
+ for _, label := range report.LeafNodeLabels {
+ if !seen[label] {
+ seen[label] = true
+ out = append(out, label)
+ }
+ }
+
+ return out
+}
+
+// MatchesLabelFilter returns true if the spec satisfies the passed in label filter query
+func (report SpecReport) MatchesLabelFilter(query string) (bool, error) {
+ filter, err := ParseLabelFilter(query)
+ if err != nil {
+ return false, err
+ }
+ return filter(report.Labels()), nil
+}
+
+// FileName() returns the name of the file containing the spec
+func (report SpecReport) FileName() string {
+ return report.LeafNodeLocation.FileName
+}
+
+// LineNumber() returns the line number of the leaf node
+func (report SpecReport) LineNumber() int {
+ return report.LeafNodeLocation.LineNumber
+}
+
+// FailureMessage() returns the failure message (or empty string if the test hasn't failed)
+func (report SpecReport) FailureMessage() string {
+ return report.Failure.Message
+}
+
+// FailureLocation() returns the location of the failure (or an empty CodeLocation if the test hasn't failed)
+func (report SpecReport) FailureLocation() CodeLocation {
+ return report.Failure.Location
+}
+
+// Timeline() returns a timeline view of the report
+func (report SpecReport) Timeline() Timeline {
+ timeline := Timeline{}
+ if !report.Failure.IsZero() {
+ timeline = append(timeline, report.Failure)
+ if report.Failure.AdditionalFailure != nil {
+ timeline = append(timeline, *(report.Failure.AdditionalFailure))
+ }
+ }
+ for _, additionalFailure := range report.AdditionalFailures {
+ timeline = append(timeline, additionalFailure)
+ }
+ for _, reportEntry := range report.ReportEntries {
+ timeline = append(timeline, reportEntry)
+ }
+ for _, progressReport := range report.ProgressReports {
+ timeline = append(timeline, progressReport)
+ }
+ for _, specEvent := range report.SpecEvents {
+ timeline = append(timeline, specEvent)
+ }
+ sort.Sort(timeline)
+ return timeline
+}
+
+type SpecReports []SpecReport
+
+// WithLeafNodeType returns the subset of SpecReports with LeafNodeType matching one of the requested NodeTypes
+func (reports SpecReports) WithLeafNodeType(nodeTypes NodeType) SpecReports {
+ count := 0
+ for i := range reports {
+ if reports[i].LeafNodeType.Is(nodeTypes) {
+ count++
+ }
+ }
+
+ out := make(SpecReports, count)
+ j := 0
+ for i := range reports {
+ if reports[i].LeafNodeType.Is(nodeTypes) {
+ out[j] = reports[i]
+ j++
+ }
+ }
+ return out
+}
+
+// WithState returns the subset of SpecReports with State matching one of the requested SpecStates
+func (reports SpecReports) WithState(states SpecState) SpecReports {
+ count := 0
+ for i := range reports {
+ if reports[i].State.Is(states) {
+ count++
+ }
+ }
+
+ out, j := make(SpecReports, count), 0
+ for i := range reports {
+ if reports[i].State.Is(states) {
+ out[j] = reports[i]
+ j++
+ }
+ }
+ return out
+}
+
+// CountWithState returns the number of SpecReports with State matching one of the requested SpecStates
+func (reports SpecReports) CountWithState(states SpecState) int {
+ n := 0
+ for i := range reports {
+ if reports[i].State.Is(states) {
+ n += 1
+ }
+ }
+ return n
+}
+
+// If the Spec passes, CountOfFlakedSpecs returns the number of SpecReports that failed after multiple attempts.
+func (reports SpecReports) CountOfFlakedSpecs() int {
+ n := 0
+ for i := range reports {
+ if reports[i].MaxFlakeAttempts > 1 && reports[i].State.Is(SpecStatePassed) && reports[i].NumAttempts > 1 {
+ n += 1
+ }
+ }
+ return n
+}
+
+// If the Spec fails, CountOfRepeatedSpecs returns the number of SpecReports that passed after multiple attempts
+func (reports SpecReports) CountOfRepeatedSpecs() int {
+ n := 0
+ for i := range reports {
+ if reports[i].MaxMustPassRepeatedly > 1 && reports[i].State.Is(SpecStateFailureStates) && reports[i].NumAttempts > 1 {
+ n += 1
+ }
+ }
+ return n
+}
+
+// TimelineLocation captures the location of an event in the spec's timeline
+type TimelineLocation struct {
+ //Offset is the offset (in bytes) of the event relative to the GinkgoWriter stream
+ Offset int `json:",omitempty"`
+
+ //Order is the order of the event with respect to other events. The absolute value of Order
+ //is irrelevant. All that matters is that an event with a lower Order occurs before ane vent with a higher Order
+ Order int `json:",omitempty"`
+
+ Time time.Time
+}
+
+// TimelineEvent represent an event on the timeline
+// consumers of Timeline will need to check the concrete type of each entry to determine how to handle it
+type TimelineEvent interface {
+ GetTimelineLocation() TimelineLocation
+}
+
+type Timeline []TimelineEvent
+
+func (t Timeline) Len() int { return len(t) }
+func (t Timeline) Less(i, j int) bool {
+ return t[i].GetTimelineLocation().Order < t[j].GetTimelineLocation().Order
+}
+func (t Timeline) Swap(i, j int) { t[i], t[j] = t[j], t[i] }
+func (t Timeline) WithoutHiddenReportEntries() Timeline {
+ out := Timeline{}
+ for _, event := range t {
+ if reportEntry, isReportEntry := event.(ReportEntry); isReportEntry && reportEntry.Visibility == ReportEntryVisibilityNever {
+ continue
+ }
+ out = append(out, event)
+ }
+ return out
+}
+
+func (t Timeline) WithoutVeryVerboseSpecEvents() Timeline {
+ out := Timeline{}
+ for _, event := range t {
+ if specEvent, isSpecEvent := event.(SpecEvent); isSpecEvent && specEvent.IsOnlyVisibleAtVeryVerbose() {
+ continue
+ }
+ out = append(out, event)
+ }
+ return out
+}
+
+// Failure captures failure information for an individual test
+type Failure struct {
+ // Message - the failure message passed into Fail(...). When using a matcher library
+ // like Gomega, this will contain the failure message generated by Gomega.
+ //
+ // Message is also populated if the user has called Skip(...).
+ Message string
+
+ // Location - the CodeLocation where the failure occurred
+ // This CodeLocation will include a fully-populated StackTrace
+ Location CodeLocation
+
+ TimelineLocation TimelineLocation
+
+ // ForwardedPanic - if the failure represents a captured panic (i.e. Summary.State == SpecStatePanicked)
+ // then ForwardedPanic will be populated with a string representation of the captured panic.
+ ForwardedPanic string `json:",omitempty"`
+
+ // FailureNodeContext - one of three contexts describing the node in which the failure occurred:
+ // FailureNodeIsLeafNode means the failure occurred in the leaf node of the associated SpecReport. None of the other FailureNode fields will be populated
+ // FailureNodeAtTopLevel means the failure occurred in a non-leaf node that is defined at the top-level of the spec (i.e. not in a container). FailureNodeType and FailureNodeLocation will be populated.
+ // FailureNodeInContainer means the failure occurred in a non-leaf node that is defined within a container. FailureNodeType, FailureNodeLocation, and FailureNodeContainerIndex will be populated.
+ //
+ // FailureNodeType will contain the NodeType of the node in which the failure occurred.
+ // FailureNodeLocation will contain the CodeLocation of the node in which the failure occurred.
+ // If populated, FailureNodeContainerIndex will be the index into SpecReport.ContainerHierarchyTexts and SpecReport.ContainerHierarchyLocations that represents the parent container of the node in which the failure occurred.
+ FailureNodeContext FailureNodeContext `json:",omitempty"`
+
+ FailureNodeType NodeType `json:",omitempty"`
+
+ FailureNodeLocation CodeLocation `json:",omitempty"`
+
+ FailureNodeContainerIndex int `json:",omitempty"`
+
+ //ProgressReport is populated if the spec was interrupted or timed out
+ ProgressReport ProgressReport `json:",omitempty"`
+
+ //AdditionalFailure is non-nil if a follow-on failure occurred within the same node after the primary failure. This only happens when a node has timed out or been interrupted. In such cases the AdditionalFailure can include information about where/why the spec was stuck.
+ AdditionalFailure *AdditionalFailure `json:",omitempty"`
+}
+
+func (f Failure) IsZero() bool {
+ return f.Message == "" && (f.Location == CodeLocation{})
+}
+
+func (f Failure) GetTimelineLocation() TimelineLocation {
+ return f.TimelineLocation
+}
+
+// FailureNodeContext captures the location context for the node containing the failing line of code
+type FailureNodeContext uint
+
+const (
+ FailureNodeContextInvalid FailureNodeContext = iota
+
+ FailureNodeIsLeafNode
+ FailureNodeAtTopLevel
+ FailureNodeInContainer
+)
+
+var fncEnumSupport = NewEnumSupport(map[uint]string{
+ uint(FailureNodeContextInvalid): "INVALID FAILURE NODE CONTEXT",
+ uint(FailureNodeIsLeafNode): "leaf-node",
+ uint(FailureNodeAtTopLevel): "top-level",
+ uint(FailureNodeInContainer): "in-container",
+})
+
+func (fnc FailureNodeContext) String() string {
+ return fncEnumSupport.String(uint(fnc))
+}
+func (fnc *FailureNodeContext) UnmarshalJSON(b []byte) error {
+ out, err := fncEnumSupport.UnmarshJSON(b)
+ *fnc = FailureNodeContext(out)
+ return err
+}
+func (fnc FailureNodeContext) MarshalJSON() ([]byte, error) {
+ return fncEnumSupport.MarshJSON(uint(fnc))
+}
+
+// AdditionalFailure capturs any additional failures that occur after the initial failure of a psec
+// these typically occur in clean up nodes after the spec has failed.
+// We can't simply use Failure as we want to track the SpecState to know what kind of failure this is
+type AdditionalFailure struct {
+ State SpecState
+ Failure Failure
+}
+
+func (f AdditionalFailure) GetTimelineLocation() TimelineLocation {
+ return f.Failure.TimelineLocation
+}
+
+// SpecState captures the state of a spec
+// To determine if a given `state` represents a failure state, use `state.Is(SpecStateFailureStates)`
+type SpecState uint
+
+const (
+ SpecStateInvalid SpecState = 0
+
+ SpecStatePending SpecState = 1 << iota
+ SpecStateSkipped
+ SpecStatePassed
+ SpecStateFailed
+ SpecStateAborted
+ SpecStatePanicked
+ SpecStateInterrupted
+ SpecStateTimedout
+)
+
+var ssEnumSupport = NewEnumSupport(map[uint]string{
+ uint(SpecStateInvalid): "INVALID SPEC STATE",
+ uint(SpecStatePending): "pending",
+ uint(SpecStateSkipped): "skipped",
+ uint(SpecStatePassed): "passed",
+ uint(SpecStateFailed): "failed",
+ uint(SpecStateAborted): "aborted",
+ uint(SpecStatePanicked): "panicked",
+ uint(SpecStateInterrupted): "interrupted",
+ uint(SpecStateTimedout): "timedout",
+})
+
+func (ss SpecState) String() string {
+ return ssEnumSupport.String(uint(ss))
+}
+func (ss SpecState) GomegaString() string {
+ return ssEnumSupport.String(uint(ss))
+}
+func (ss *SpecState) UnmarshalJSON(b []byte) error {
+ out, err := ssEnumSupport.UnmarshJSON(b)
+ *ss = SpecState(out)
+ return err
+}
+func (ss SpecState) MarshalJSON() ([]byte, error) {
+ return ssEnumSupport.MarshJSON(uint(ss))
+}
+
+var SpecStateFailureStates = SpecStateFailed | SpecStateTimedout | SpecStateAborted | SpecStatePanicked | SpecStateInterrupted
+
+func (ss SpecState) Is(states SpecState) bool {
+ return ss&states != 0
+}
+
+// ProgressReport captures the progress of the current spec. It is, effectively, a structured Ginkgo-aware stack trace
+type ProgressReport struct {
+ Message string `json:",omitempty"`
+ ParallelProcess int `json:",omitempty"`
+ RunningInParallel bool `json:",omitempty"`
+
+ ContainerHierarchyTexts []string `json:",omitempty"`
+ LeafNodeText string `json:",omitempty"`
+ LeafNodeLocation CodeLocation `json:",omitempty"`
+ SpecStartTime time.Time `json:",omitempty"`
+
+ CurrentNodeType NodeType `json:",omitempty"`
+ CurrentNodeText string `json:",omitempty"`
+ CurrentNodeLocation CodeLocation `json:",omitempty"`
+ CurrentNodeStartTime time.Time `json:",omitempty"`
+
+ CurrentStepText string `json:",omitempty"`
+ CurrentStepLocation CodeLocation `json:",omitempty"`
+ CurrentStepStartTime time.Time `json:",omitempty"`
+
+ AdditionalReports []string `json:",omitempty"`
+
+ CapturedGinkgoWriterOutput string `json:",omitempty"`
+ TimelineLocation TimelineLocation `json:",omitempty"`
+
+ Goroutines []Goroutine `json:",omitempty"`
+}
+
+func (pr ProgressReport) IsZero() bool {
+ return pr.CurrentNodeType == NodeTypeInvalid
+}
+
+func (pr ProgressReport) Time() time.Time {
+ return pr.TimelineLocation.Time
+}
+
+func (pr ProgressReport) SpecGoroutine() Goroutine {
+ for _, goroutine := range pr.Goroutines {
+ if goroutine.IsSpecGoroutine {
+ return goroutine
+ }
+ }
+ return Goroutine{}
+}
+
+func (pr ProgressReport) HighlightedGoroutines() []Goroutine {
+ out := []Goroutine{}
+ for _, goroutine := range pr.Goroutines {
+ if goroutine.IsSpecGoroutine || !goroutine.HasHighlights() {
+ continue
+ }
+ out = append(out, goroutine)
+ }
+ return out
+}
+
+func (pr ProgressReport) OtherGoroutines() []Goroutine {
+ out := []Goroutine{}
+ for _, goroutine := range pr.Goroutines {
+ if goroutine.IsSpecGoroutine || goroutine.HasHighlights() {
+ continue
+ }
+ out = append(out, goroutine)
+ }
+ return out
+}
+
+func (pr ProgressReport) WithoutCapturedGinkgoWriterOutput() ProgressReport {
+ out := pr
+ out.CapturedGinkgoWriterOutput = ""
+ return out
+}
+
+func (pr ProgressReport) WithoutOtherGoroutines() ProgressReport {
+ out := pr
+ filteredGoroutines := []Goroutine{}
+ for _, goroutine := range pr.Goroutines {
+ if goroutine.IsSpecGoroutine || goroutine.HasHighlights() {
+ filteredGoroutines = append(filteredGoroutines, goroutine)
+ }
+ }
+ out.Goroutines = filteredGoroutines
+ return out
+}
+
+func (pr ProgressReport) GetTimelineLocation() TimelineLocation {
+ return pr.TimelineLocation
+}
+
+type Goroutine struct {
+ ID uint64
+ State string
+ Stack []FunctionCall
+ IsSpecGoroutine bool
+}
+
+func (g Goroutine) IsZero() bool {
+ return g.ID == 0
+}
+
+func (g Goroutine) HasHighlights() bool {
+ for _, fc := range g.Stack {
+ if fc.Highlight {
+ return true
+ }
+ }
+
+ return false
+}
+
+type FunctionCall struct {
+ Function string
+ Filename string
+ Line int
+ Highlight bool `json:",omitempty"`
+ Source []string `json:",omitempty"`
+ SourceHighlight int `json:",omitempty"`
+}
+
+// NodeType captures the type of a given Ginkgo Node
+type NodeType uint
+
+const (
+ NodeTypeInvalid NodeType = 0
+
+ NodeTypeContainer NodeType = 1 << iota
+ NodeTypeIt
+
+ NodeTypeBeforeEach
+ NodeTypeJustBeforeEach
+ NodeTypeAfterEach
+ NodeTypeJustAfterEach
+
+ NodeTypeBeforeAll
+ NodeTypeAfterAll
+
+ NodeTypeBeforeSuite
+ NodeTypeSynchronizedBeforeSuite
+ NodeTypeAfterSuite
+ NodeTypeSynchronizedAfterSuite
+
+ NodeTypeReportBeforeEach
+ NodeTypeReportAfterEach
+ NodeTypeReportBeforeSuite
+ NodeTypeReportAfterSuite
+
+ NodeTypeCleanupInvalid
+ NodeTypeCleanupAfterEach
+ NodeTypeCleanupAfterAll
+ NodeTypeCleanupAfterSuite
+)
+
+var NodeTypesForContainerAndIt = NodeTypeContainer | NodeTypeIt
+var NodeTypesForSuiteLevelNodes = NodeTypeBeforeSuite | NodeTypeSynchronizedBeforeSuite | NodeTypeAfterSuite | NodeTypeSynchronizedAfterSuite | NodeTypeReportBeforeSuite | NodeTypeReportAfterSuite | NodeTypeCleanupAfterSuite
+var NodeTypesAllowedDuringCleanupInterrupt = NodeTypeAfterEach | NodeTypeJustAfterEach | NodeTypeAfterAll | NodeTypeAfterSuite | NodeTypeSynchronizedAfterSuite | NodeTypeCleanupAfterEach | NodeTypeCleanupAfterAll | NodeTypeCleanupAfterSuite
+var NodeTypesAllowedDuringReportInterrupt = NodeTypeReportBeforeEach | NodeTypeReportAfterEach | NodeTypeReportBeforeSuite | NodeTypeReportAfterSuite
+
+var ntEnumSupport = NewEnumSupport(map[uint]string{
+ uint(NodeTypeInvalid): "INVALID NODE TYPE",
+ uint(NodeTypeContainer): "Container",
+ uint(NodeTypeIt): "It",
+ uint(NodeTypeBeforeEach): "BeforeEach",
+ uint(NodeTypeJustBeforeEach): "JustBeforeEach",
+ uint(NodeTypeAfterEach): "AfterEach",
+ uint(NodeTypeJustAfterEach): "JustAfterEach",
+ uint(NodeTypeBeforeAll): "BeforeAll",
+ uint(NodeTypeAfterAll): "AfterAll",
+ uint(NodeTypeBeforeSuite): "BeforeSuite",
+ uint(NodeTypeSynchronizedBeforeSuite): "SynchronizedBeforeSuite",
+ uint(NodeTypeAfterSuite): "AfterSuite",
+ uint(NodeTypeSynchronizedAfterSuite): "SynchronizedAfterSuite",
+ uint(NodeTypeReportBeforeEach): "ReportBeforeEach",
+ uint(NodeTypeReportAfterEach): "ReportAfterEach",
+ uint(NodeTypeReportBeforeSuite): "ReportBeforeSuite",
+ uint(NodeTypeReportAfterSuite): "ReportAfterSuite",
+ uint(NodeTypeCleanupInvalid): "DeferCleanup",
+ uint(NodeTypeCleanupAfterEach): "DeferCleanup (Each)",
+ uint(NodeTypeCleanupAfterAll): "DeferCleanup (All)",
+ uint(NodeTypeCleanupAfterSuite): "DeferCleanup (Suite)",
+})
+
+func (nt NodeType) String() string {
+ return ntEnumSupport.String(uint(nt))
+}
+func (nt *NodeType) UnmarshalJSON(b []byte) error {
+ out, err := ntEnumSupport.UnmarshJSON(b)
+ *nt = NodeType(out)
+ return err
+}
+func (nt NodeType) MarshalJSON() ([]byte, error) {
+ return ntEnumSupport.MarshJSON(uint(nt))
+}
+
+func (nt NodeType) Is(nodeTypes NodeType) bool {
+ return nt&nodeTypes != 0
+}
+
+/*
+SpecEvent captures a vareity of events that can occur when specs run. See SpecEventType for the list of available events.
+*/
+type SpecEvent struct {
+ SpecEventType SpecEventType
+
+ CodeLocation CodeLocation
+ TimelineLocation TimelineLocation
+
+ Message string `json:",omitempty"`
+ Duration time.Duration `json:",omitempty"`
+ NodeType NodeType `json:",omitempty"`
+ Attempt int `json:",omitempty"`
+}
+
+func (se SpecEvent) GetTimelineLocation() TimelineLocation {
+ return se.TimelineLocation
+}
+
+func (se SpecEvent) IsOnlyVisibleAtVeryVerbose() bool {
+ return se.SpecEventType.Is(SpecEventByEnd | SpecEventNodeStart | SpecEventNodeEnd)
+}
+
+func (se SpecEvent) GomegaString() string {
+ out := &strings.Builder{}
+ out.WriteString("[" + se.SpecEventType.String() + " SpecEvent] ")
+ if se.Message != "" {
+ out.WriteString("Message=")
+ out.WriteString(`"` + se.Message + `",`)
+ }
+ if se.Duration != 0 {
+ out.WriteString("Duration=" + se.Duration.String() + ",")
+ }
+ if se.NodeType != NodeTypeInvalid {
+ out.WriteString("NodeType=" + se.NodeType.String() + ",")
+ }
+ if se.Attempt != 0 {
+ out.WriteString(fmt.Sprintf("Attempt=%d", se.Attempt) + ",")
+ }
+ out.WriteString("CL=" + se.CodeLocation.String() + ",")
+ out.WriteString(fmt.Sprintf("TL.Offset=%d", se.TimelineLocation.Offset))
+
+ return out.String()
+}
+
+type SpecEvents []SpecEvent
+
+func (se SpecEvents) WithType(seType SpecEventType) SpecEvents {
+ out := SpecEvents{}
+ for _, event := range se {
+ if event.SpecEventType.Is(seType) {
+ out = append(out, event)
+ }
+ }
+ return out
+}
+
+type SpecEventType uint
+
+const (
+ SpecEventInvalid SpecEventType = 0
+
+ SpecEventByStart SpecEventType = 1 << iota
+ SpecEventByEnd
+ SpecEventNodeStart
+ SpecEventNodeEnd
+ SpecEventSpecRepeat
+ SpecEventSpecRetry
+)
+
+var seEnumSupport = NewEnumSupport(map[uint]string{
+ uint(SpecEventInvalid): "INVALID SPEC EVENT",
+ uint(SpecEventByStart): "By",
+ uint(SpecEventByEnd): "By (End)",
+ uint(SpecEventNodeStart): "Node",
+ uint(SpecEventNodeEnd): "Node (End)",
+ uint(SpecEventSpecRepeat): "Repeat",
+ uint(SpecEventSpecRetry): "Retry",
+})
+
+func (se SpecEventType) String() string {
+ return seEnumSupport.String(uint(se))
+}
+func (se *SpecEventType) UnmarshalJSON(b []byte) error {
+ out, err := seEnumSupport.UnmarshJSON(b)
+ *se = SpecEventType(out)
+ return err
+}
+func (se SpecEventType) MarshalJSON() ([]byte, error) {
+ return seEnumSupport.MarshJSON(uint(se))
+}
+
+func (se SpecEventType) Is(specEventTypes SpecEventType) bool {
+ return se&specEventTypes != 0
+}
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/types/version.go b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/types/version.go
new file mode 100644
index 00000000000..acab03492ac
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/onsi/ginkgo/v2/types/version.go
@@ -0,0 +1,3 @@
+package types
+
+const VERSION = "2.19.0"
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/.gitignore b/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/.gitignore
index 720c13cba84..425d0a509fe 100644
--- a/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/.gitignore
+++ b/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/.gitignore
@@ -3,3 +3,5 @@
.
.idea
gomega.iml
+TODO
+.vscode
\ No newline at end of file
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/.travis.yml b/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/.travis.yml
deleted file mode 100644
index c6391855a29..00000000000
--- a/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/.travis.yml
+++ /dev/null
@@ -1,17 +0,0 @@
-language: go
-
-go:
- - 1.12.x
- - 1.13.x
- - gotip
-
-env:
- - GO111MODULE=on
-
-install:
- - go get -v ./...
- - go build ./...
- - go get github.com/onsi/ginkgo
- - go install github.com/onsi/ginkgo/ginkgo
-
-script: make test
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/CHANGELOG.md b/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/CHANGELOG.md
index 59ad384aade..62af14ad2f2 100644
--- a/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/CHANGELOG.md
+++ b/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/CHANGELOG.md
@@ -1,3 +1,540 @@
+## 1.33.1
+
+### Fixes
+- fix confusing eventually docs [3a66379]
+
+### Maintenance
+- Bump github.com/onsi/ginkgo/v2 from 2.17.1 to 2.17.2 [e9bc35a]
+
+## 1.33.0
+
+### Features
+
+`Receive` not accepts `Receive(, MATCHER>)`, allowing you to pick out a specific value on the channel that satisfies the provided matcher and is stored in the provided pointer.
+
+### Maintenance
+- Bump github.com/onsi/ginkgo/v2 from 2.15.0 to 2.17.1 (#745) [9999deb]
+- Bump github-pages from 229 to 230 in /docs (#735) [cb5ff21]
+- Bump golang.org/x/net from 0.20.0 to 0.23.0 (#746) [bac6596]
+
+## 1.32.0
+
+### Maintenance
+- Migrate github.com/golang/protobuf to google.golang.org/protobuf [436a197]
+
+ This release drops the deprecated github.com/golang/protobuf and adopts google.golang.org/protobuf. Care was taken to ensure the release is backwards compatible (thanks @jbduncan !). Please open an issue if you run into one.
+
+- chore: test with Go 1.22 (#733) [32ef35e]
+- Bump golang.org/x/net from 0.19.0 to 0.20.0 (#717) [a0d0387]
+- Bump github-pages and jekyll-feed in /docs (#732) [b71e477]
+- docs: fix typo and broken anchor link to gstruct [f460154]
+- docs: fix HaveEach matcher signature [a2862e4]
+
+## 1.31.1
+
+### Fixes
+- Inverted arguments order of FailureMessage of BeComparableToMatcher [e0dd999]
+- Update test in case keeping msg is desired [ad1a367]
+
+### Maintenance
+- Show how to import the format sub package [24e958d]
+- tidy up go.sum [26661b8]
+- bump dependencies [bde8f7a]
+
+## 1.31.0
+
+### Features
+- Async assertions include context cancellation cause if present [121c37f]
+
+### Maintenance
+- Bump minimum go version [dee1e3c]
+- docs: fix typo in example usage "occured" -> "occurred" [49005fe]
+- Bump actions/setup-go from 4 to 5 (#714) [f1c8757]
+- Bump github/codeql-action from 2 to 3 (#715) [9836e76]
+- Bump github.com/onsi/ginkgo/v2 from 2.13.0 to 2.13.2 (#713) [54726f0]
+- Bump golang.org/x/net from 0.17.0 to 0.19.0 (#711) [df97ecc]
+- docs: fix `HaveExactElement` typo (#712) [a672c86]
+
+## 1.30.0
+
+### Features
+- BeTrueBecause and BeFalseBecause allow for better failure messages [4da4c7f]
+
+### Maintenance
+- Bump actions/checkout from 3 to 4 (#694) [6ca6e97]
+- doc: fix type on gleak go doc [f1b8343]
+
+## 1.29.0
+
+### Features
+- MatchError can now take an optional func(error) bool + description [2b39142]
+
+## 1.28.1
+
+### Maintenance
+- Bump github.com/onsi/ginkgo/v2 from 2.12.0 to 2.13.0 [635d196]
+- Bump github.com/google/go-cmp from 0.5.9 to 0.6.0 [14f8859]
+- Bump golang.org/x/net from 0.14.0 to 0.17.0 [d8a6508]
+- #703 doc(matchers): HaveEach() doc comment updated [2705bdb]
+- Minor typos (#699) [375648c]
+
+## 1.28.0
+
+### Features
+- Add VerifyHost handler to ghttp (#698) [0b03b36]
+
+### Fixes
+- Read Body for Newer Responses in HaveHTTPBodyMatcher (#686) [18d6673]
+
+### Maintenance
+- Bump github.com/onsi/ginkgo/v2 from 2.11.0 to 2.12.0 (#693) [55a33f3]
+- Typo in matchers.go (#691) [de68e8f]
+- Bump commonmarker from 0.23.9 to 0.23.10 in /docs (#690) [ab17f5e]
+- chore: update test matrix for Go 1.21 (#689) [5069017]
+- Bump golang.org/x/net from 0.12.0 to 0.14.0 (#688) [babe25f]
+
+## 1.27.10
+
+### Fixes
+- fix: go 1.21 adding goroutine ID to creator+location (#685) [bdc7803]
+
+## 1.27.9
+
+### Fixes
+- Prevent nil-dereference in format.Object for boxed nil error (#681) [3b31fc3]
+
+### Maintenance
+- Bump golang.org/x/net from 0.11.0 to 0.12.0 (#679) [360849b]
+- chore: use String() instead of fmt.Sprintf (#678) [86f3659]
+- Bump golang.org/x/net from 0.10.0 to 0.11.0 (#674) [642ead0]
+- chore: unnecessary use of fmt.Sprintf (#677) [ceb9ca6]
+- Bump github.com/onsi/ginkgo/v2 from 2.10.0 to 2.11.0 (#675) [a2087d8]
+- docs: fix ContainSubstring references (#673) [fc9a89f]
+- Bump github.com/onsi/ginkgo/v2 from 2.9.7 to 2.10.0 (#671) [9076019]
+
+## 1.27.8
+
+### Fixes
+- HaveExactElement should not call FailureMessage if a submatcher returned an error [096f392]
+
+### Maintenance
+- Bump github.com/onsi/ginkgo/v2 from 2.9.5 to 2.9.7 (#669) [8884bee]
+
+## 1.27.7
+
+### Fixes
+- fix: gcustom.MakeMatcher accepts nil as actual value (#666) [57054d5]
+
+### Maintenance
+- update gitignore [05c1bc6]
+- Bump github.com/onsi/ginkgo/v2 from 2.9.4 to 2.9.5 (#663) [7cadcf6]
+- Bump golang.org/x/net from 0.9.0 to 0.10.0 (#662) [b524839]
+- Bump github.com/onsi/ginkgo/v2 from 2.9.2 to 2.9.4 (#661) [5f44694]
+- Bump commonmarker from 0.23.8 to 0.23.9 in /docs (#657) [05dc99a]
+- Bump nokogiri from 1.14.1 to 1.14.3 in /docs (#658) [3a033d1]
+- Replace deprecated NewGomegaWithT with NewWithT (#659) [a19238f]
+- Bump golang.org/x/net from 0.8.0 to 0.9.0 (#656) [29ed041]
+- Bump actions/setup-go from 3 to 4 (#651) [11b2080]
+
+## 1.27.6
+
+### Fixes
+- Allow collections matchers to work correctly when expected has nil elements [60e7cf3]
+
+### Maintenance
+- updates MatchError godoc comment to also accept a Gomega matcher (#654) [67b869d]
+
+## 1.27.5
+
+### Maintenance
+- Bump github.com/onsi/ginkgo/v2 from 2.9.1 to 2.9.2 (#653) [a215021]
+- Bump github.com/go-task/slim-sprig (#652) [a26fed8]
+
+## 1.27.4
+
+### Fixes
+- improve error formatting and remove duplication of error message in Eventually/Consistently [854f075]
+
+### Maintenance
+- Bump github.com/onsi/ginkgo/v2 from 2.9.0 to 2.9.1 (#650) [ccebd9b]
+
+## 1.27.3
+
+### Fixes
+- format.Object now always includes err.Error() when passed an error [86d97ef]
+- Fix HaveExactElements to work inside ContainElement or other collection matchers (#648) [636757e]
+
+### Maintenance
+- Bump github.com/golang/protobuf from 1.5.2 to 1.5.3 (#649) [cc16689]
+- Bump github.com/onsi/ginkgo/v2 from 2.8.4 to 2.9.0 (#646) [e783366]
+
+## 1.27.2
+
+### Fixes
+- improve poll progress message when polling a consistently that has been passing [28a319b]
+
+### Maintenance
+- bump ginkgo
+- remove tools.go hack as Ginkgo 2.8.2 automatically pulls in the cli dependencies [81443b3]
+
+## 1.27.1
+
+### Maintenance
+
+- Bump golang.org/x/net from 0.6.0 to 0.7.0 (#640) [bc686cd]
+
+## 1.27.0
+
+### Features
+- Add HaveExactElements matcher (#634) [9d50783]
+- update Gomega docs to discuss GinkgoHelper() [be32774]
+
+### Maintenance
+- Bump github.com/onsi/ginkgo/v2 from 2.8.0 to 2.8.1 (#639) [296a68b]
+- Bump golang.org/x/net from 0.5.0 to 0.6.0 (#638) [c2b098b]
+- Bump github-pages from 227 to 228 in /docs (#636) [a9069ab]
+- test: update matrix for Go 1.20 (#635) [6bd25c8]
+- Bump github.com/onsi/ginkgo/v2 from 2.7.0 to 2.8.0 (#631) [5445f8b]
+- Bump webrick from 1.7.0 to 1.8.1 in /docs (#630) [03e93bb]
+- codeql: add ruby language (#626) [63c7d21]
+- dependabot: add bundler package-ecosystem for docs (#625) [d92f963]
+
+## 1.26.0
+
+### Features
+- When a polled function returns an error, keep track of the actual and report on the matcher state of the last non-errored actual [21f3090]
+- improve eventually failure message output [c530fb3]
+
+### Fixes
+- fix several documentation spelling issues [e2eff1f]
+
+
+## 1.25.0
+
+### Features
+- add `MustPassRepeatedly(int)` to asyncAssertion (#619) [4509f72]
+- compare unwrapped errors using DeepEqual (#617) [aaeaa5d]
+
+### Maintenance
+- Bump golang.org/x/net from 0.4.0 to 0.5.0 (#614) [c7cfea4]
+- Bump github.com/onsi/ginkgo/v2 from 2.6.1 to 2.7.0 (#615) [71b8adb]
+- Docs: Fix typo "MUltiple" -> "Multiple" (#616) [9351dda]
+- clean up go.sum [cd1dc1d]
+
+## 1.24.2
+
+### Fixes
+- Correctly handle assertion failure panics for eventually/consistnetly "g Gomega"s in a goroutine [78f1660]
+- docs:Fix typo "you an" -> "you can" (#607) [3187c1f]
+- fixes issue #600 (#606) [808d192]
+
+### Maintenance
+- Bump golang.org/x/net from 0.2.0 to 0.4.0 (#611) [6ebc0bf]
+- Bump nokogiri from 1.13.9 to 1.13.10 in /docs (#612) [258cfc8]
+- Bump github.com/onsi/ginkgo/v2 from 2.5.0 to 2.5.1 (#609) [e6c3eb9]
+
+## 1.24.1
+
+### Fixes
+- maintain backward compatibility for Eventually and Consisntetly's signatures [4c7df5e]
+- fix small typo (#601) [ea0ebe6]
+
+### Maintenance
+- Bump golang.org/x/net from 0.1.0 to 0.2.0 (#603) [1ba8372]
+- Bump github.com/onsi/ginkgo/v2 from 2.4.0 to 2.5.0 (#602) [f9426cb]
+- fix label-filter in test.yml [d795db6]
+- stop running flakey tests and rely on external network dependencies in CI [7133290]
+
+## 1.24.0
+
+### Features
+
+Introducting [gcustom](https://onsi.github.io/gomega/#gcustom-a-convenient-mechanism-for-buildling-custom-matchers) - a convenient mechanism for building custom matchers.
+
+This is an RC release for `gcustom`. The external API may be tweaked in response to feedback however it is expected to remain mostly stable.
+
+### Maintenance
+
+- Update BeComparableTo documentation [756eaa0]
+
+## 1.23.0
+
+### Features
+- Custom formatting on a per-type basis can be provided using `format.RegisterCustomFormatter()` -- see the docs [here](https://onsi.github.io/gomega/#adjusting-output)
+
+- Substantial improvement have been made to `StopTrying()`:
+ - Users can now use `StopTrying().Wrap(err)` to wrap errors and `StopTrying().Attach(description, object)` to attach arbitrary objects to the `StopTrying()` error
+ - `StopTrying()` is now always interpreted as a failure. If you are an early adopter of `StopTrying()` you may need to change your code as the prior version would match against the returned value even if `StopTrying()` was returned. Going forward the `StopTrying()` api should remain stable.
+ - `StopTrying()` and `StopTrying().Now()` can both be used in matchers - not just polled functions.
+
+- `TryAgainAfter(duration)` is used like `StopTrying()` but instructs `Eventually` and `Consistently` that the poll should be tried again after the specified duration. This allows you to dynamically adjust the polling duration.
+
+- `ctx` can now be passed-in as the first argument to `Eventually` and `Consistently`.
+
+## Maintenance
+
+- Bump github.com/onsi/ginkgo/v2 from 2.3.0 to 2.3.1 (#597) [afed901]
+- Bump nokogiri from 1.13.8 to 1.13.9 in /docs (#599) [7c691b3]
+- Bump github.com/google/go-cmp from 0.5.8 to 0.5.9 (#587) [ff22665]
+
+## 1.22.1
+
+## Fixes
+- When passed a context and no explicit timeout, Eventually will only timeout when the context is cancelled [e5105cf]
+- Allow StopTrying() to be wrapped [bf3cba9]
+
+## Maintenance
+- bump to ginkgo v2.3.0 [c5d5c39]
+
+## 1.22.0
+
+### Features
+
+Several improvements have been made to `Eventually` and `Consistently` in this and the most recent releases:
+
+- Eventually and Consistently can take a context.Context [65c01bc]
+ This enables integration with Ginkgo 2.3.0's interruptible nodes and node timeouts.
+- Eventually and Consistently that are passed a SpecContext can provide reports when an interrupt occurs [0d063c9]
+- Eventually/Consistently will forward an attached context to functions that ask for one [e2091c5]
+- Eventually/Consistently supports passing arguments to functions via WithArguments() [a2dc7c3]
+- Eventually and Consistently can now be stopped early with StopTrying(message) and StopTrying(message).Now() [52976bb]
+
+These improvements are all documented in [Gomega's docs](https://onsi.github.io/gomega/#making-asynchronous-assertions)
+
+## Fixes
+
+## Maintenance
+
+## 1.21.1
+
+### Features
+- Eventually and Consistently that are passed a SpecContext can provide reports when an interrupt occurs [0d063c9]
+
+## 1.21.0
+
+### Features
+- Eventually and Consistently can take a context.Context [65c01bc]
+ This enables integration with Ginkgo 2.3.0's interruptible nodes and node timeouts.
+- Introduces Eventually.Within.ProbeEvery with tests and documentation (#591) [f633800]
+- New BeKeyOf matcher with documentation and unit tests (#590) [fb586b3]
+
+## Fixes
+- Cover the entire gmeasure suite with leak detection [8c54344]
+- Fix gmeasure leak [119d4ce]
+- Ignore new Ginkgo ProgressSignal goroutine in gleak [ba548e2]
+
+## Maintenance
+
+- Fixes crashes on newer Ruby 3 installations by upgrading github-pages gem dependency (#596) [12469a0]
+
+
+## 1.20.2
+
+## Fixes
+- label specs that rely on remote access; bump timeout on short-circuit test to make it less flaky [35eeadf]
+- gexec: allow more headroom for SIGABRT-related unit tests (#581) [5b78f40]
+- Enable reading from a closed gbytes.Buffer (#575) [061fd26]
+
+## Maintenance
+- Bump github.com/onsi/ginkgo/v2 from 2.1.5 to 2.1.6 (#583) [55d895b]
+- Bump github.com/onsi/ginkgo/v2 from 2.1.4 to 2.1.5 (#582) [346de7c]
+
+## 1.20.1
+
+## Fixes
+- fix false positive gleaks when using ginkgo -p (#577) [cb46517]
+- Fix typos in gomega_dsl.go (#569) [5f71ed2]
+- don't panic on Eventually(nil), fixing #555 (#567) [9d1186f]
+- vet optional description args in assertions, fixing #560 (#566) [8e37808]
+
+## Maintenance
+- test: add new Go 1.19 to test matrix (#571) [40d7efe]
+- Bump tzinfo from 1.2.9 to 1.2.10 in /docs (#564) [5f26371]
+
+## 1.20.0
+
+## Features
+- New [`gleak`](https://onsi.github.io/gomega/#codegleakcode-finding-leaked-goroutines) experimental goroutine leak detection package! (#538) [85ba7bc]
+- New `BeComparableTo` matcher(#546) that uses `gocmp` to make comparisons [e77ea75]
+- New `HaveExistingField` matcher (#553) [fd130e1]
+- Document how to wrap Gomega (#539) [56714a4]
+
+## Fixes
+- Support pointer receivers in HaveField; fixes #543 (#544) [8dab36e]
+
+## Maintenance
+- Bump various dependencies:
+ - Upgrade to yaml.v3 (#556) [f5a83b1]
+ - Bump github/codeql-action from 1 to 2 (#549) [52f5adf]
+ - Bump github.com/google/go-cmp from 0.5.7 to 0.5.8 (#551) [5f3942d]
+ - Bump nokogiri from 1.13.4 to 1.13.6 in /docs (#554) [eb4b4c2]
+ - Use latest ginkgo (#535) [1c29028]
+ - Bump nokogiri from 1.13.3 to 1.13.4 in /docs (#541) [1ce84d5]
+ - Bump actions/setup-go from 2 to 3 (#540) [755485e]
+ - Bump nokogiri from 1.12.5 to 1.13.3 in /docs (#522) [4fbb0dc]
+ - Bump actions/checkout from 2 to 3 (#526) [ac49202]
+
+## 1.19.0
+
+## Features
+- New [`HaveEach`](https://onsi.github.io/gomega/#haveeachelement-interface) matcher to ensure that each and every element in an `array`, `slice`, or `map` satisfies the passed in matcher. (#523) [9fc2ae2] (#524) [c8ba582]
+- Users can now wrap the `Gomega` interface to implement custom behavior on each assertion. (#521) [1f2e714]
+- [`ContainElement`](https://onsi.github.io/gomega/#containelementelement-interface) now accepts an additional pointer argument. Elements that satisfy the matcher are stored in the pointer enabling developers to easily add subsequent, more detailed, assertions against the matching element. (#527) [1a4e27f]
+
+## Fixes
+- update RELEASING instructions to match ginkgo [0917cde]
+- Bump github.com/onsi/ginkgo/v2 from 2.0.0 to 2.1.3 (#519) [49ab4b0]
+- Fix CVE-2021-38561 (#534) [f1b4456]
+- Fix max number of samples in experiments on non-64-bit systems. (#528) [1c84497]
+- Remove dependency on ginkgo v1.16.4 (#530) [4dea8d5]
+- Fix for Go 1.18 (#532) [56d2a29]
+- Document precendence of timeouts (#533) [b607941]
+
+## 1.18.1
+
+## Fixes
+- Add pointer support to HaveField matcher (#495) [79e41a3]
+
+## 1.18.0
+
+## Features
+- Docs now live on the master branch in the docs folder which will make for easier PRs. The docs also use Ginkgo 2.0's new docs html/css/js. [2570272]
+- New HaveValue matcher can handle actuals that are either values (in which case they are passed on unscathed) or pointers (in which case they are indirected). [Docs here.](https://onsi.github.io/gomega/#working-with-values) (#485) [bdc087c]
+- Gmeasure has been declared GA [360db9d]
+
+## Fixes
+- Gomega now uses ioutil for Go 1.15 and lower (#492) - official support is only for the most recent two major versions of Go but this will unblock users who need to stay on older unsupported versions of Go. [c29c1c0]
+
+## Maintenace
+- Remove Travis workflow (#491) [72e6040]
+- Upgrade to Ginkgo 2.0.0 GA [f383637]
+- chore: fix description of HaveField matcher (#487) [2b4b2c0]
+- use tools.go to ensure Ginkgo cli dependencies are included [f58a52b]
+- remove dockerfile and simplify github actions to match ginkgo's actions [3f8160d]
+
+## 1.17.0
+
+### Features
+- Add HaveField matcher [3a26311]
+- add Error() assertions on the final error value of multi-return values (#480) [2f96943]
+- separate out offsets and timeouts (#478) [18a4723]
+- fix transformation error reporting (#479) [e001fab]
+- allow transform functions to report errors (#472) [bf93408]
+
+### Fixes
+Stop using deprecated ioutil package (#467) [07f405d]
+
+## 1.16.0
+
+### Features
+- feat: HaveHTTPStatus multiple expected values (#465) [aa69f1b]
+- feat: HaveHTTPHeaderWithValue() matcher (#463) [dd83a96]
+- feat: HaveHTTPBody matcher (#462) [504e1f2]
+- feat: formatter for HTTP responses (#461) [e5b3157]
+
+## 1.15.0
+
+### Fixes
+The previous version (1.14.0) introduced a change to allow `Eventually` and `Consistently` to support functions that make assertions. This was accomplished by overriding the global fail handler when running the callbacks passed to `Eventually/Consistently` in order to capture any resulting errors. Issue #457 uncovered a flaw with this approach: when multiple `Eventually`s are running concurrently they race when overriding the singleton global fail handler.
+
+1.15.0 resolves this by requiring users who want to make assertions in `Eventually/Consistently` call backs to explicitly pass in a function that takes a `Gomega` as an argument. The passed-in `Gomega` instance can be used to make assertions. Any failures will cause `Eventually` to retry the callback. This cleaner interface avoids the issue of swapping out globals but comes at the cost of changing the contract introduced in v1.14.0. As such 1.15.0 introduces a breaking change with respect to 1.14.0 - however we expect that adoption of this feature in 1.14.0 remains limited.
+
+In addition, 1.15.0 cleans up some of Gomega's internals. Most users shouldn't notice any differences stemming from the refactoring that was made.
+
+## 1.14.0
+
+### Features
+- gmeasure.SamplingConfig now suppers a MinSamplingInterval [e94dbca]
+- Eventually and Consistently support functions that make assertions [2f04e6e]
+ - Eventually and Consistently now allow their passed-in functions to make assertions.
+ These assertions must pass or the function is considered to have failed and is retried.
+ - Eventually and Consistently can now take functions with no return values. These implicitly return nil
+ if they contain no failed assertion. Otherwise they return an error wrapping the first assertion failure. This allows
+ these functions to be used with the Succeed() matcher.
+ - Introduce InterceptGomegaFailure - an analogue to InterceptGomegaFailures - that captures the first assertion failure
+ and halts execution in its passed-in callback.
+
+### Fixes
+- Call Verify GHTTPWithGomega receiver funcs (#454) [496e6fd]
+- Build a binary with an expected name (#446) [7356360]
+
+## 1.13.0
+
+### Features
+- gmeasure provides BETA support for benchmarking (#447) [8f2dfbf]
+- Set consistently and eventually defaults on init (#443) [12eb778]
+
+## 1.12.0
+
+### Features
+- Add Satisfy() matcher (#437) [c548f31]
+- tweak truncation message [3360b8c]
+- Add format.GomegaStringer (#427) [cc80b6f]
+- Add Clear() method to gbytes.Buffer [c3c0920]
+
+### Fixes
+- Fix error message in BeNumericallyMatcher (#432) [09c074a]
+- Bump github.com/onsi/ginkgo from 1.12.1 to 1.16.2 (#442) [e5f6ea0]
+- Bump github.com/golang/protobuf from 1.4.3 to 1.5.2 (#431) [adae3bf]
+- Bump golang.org/x/net (#441) [3275b35]
+
+## 1.11.0
+
+### Features
+- feature: add index to gstruct element func (#419) [334e00d]
+- feat(gexec) Add CompileTest functions. Close #410 (#411) [47c613f]
+
+### Fixes
+- Check more carefully for nils in WithTransform (#423) [3c60a15]
+- fix: typo in Makefile [b82522a]
+- Allow WithTransform function to accept a nil value (#422) [b75d2f2]
+- fix: print value type for interface{} containers (#409) [f08e2dc]
+- fix(BeElementOf): consistently flatten expected values [1fa9468]
+
+## 1.10.5
+
+### Fixes
+- fix: collections matchers should display type of expectation (#408) [6b4eb5a]
+- fix(ContainElements): consistently flatten expected values [073b880]
+- fix(ConsistOf): consistently flatten expected values [7266efe]
+
+## 1.10.4
+
+### Fixes
+- update golang net library to more recent version without vulnerability (#406) [817a8b9]
+- Correct spelling: alloted -> allotted (#403) [0bae715]
+- fix a panic in MessageWithDiff with long message (#402) [ea06b9b]
+
+## 1.10.3
+
+### Fixes
+- updates golang/x/net to fix vulnerability detected by snyk (#394) [c479356]
+
+## 1.10.2
+
+### Fixes
+- Add ExpectWithOffset, EventuallyWithOffset and ConsistentlyWithOffset to WithT (#391) [990941a]
+
+## 1.10.1
+
+### Fixes
+- Update dependencies (#389) [9f5eecd]
+
+## 1.10.0
+
+### Features
+- Add HaveHTTPStatusMatcher (#378) [f335c94]
+- Changed matcher for content-type in VerifyJSONRepresenting (#377) [6024f5b]
+- Make ghttp usable with x-unit style tests (#376) [c0be499]
+- Implement PanicWith matcher (#381) [f8032b4]
+
+## 1.9.0
+
+### Features
+- Add ContainElements matcher (#370) [2f57380]
+- Output missing and extra elements in ConsistOf failure message [a31eda7]
+- Document method LargestMatching [7c5a280]
+
## 1.8.1
### Fixes
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/Makefile b/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/Makefile
deleted file mode 100644
index c92cd56e3ad..00000000000
--- a/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/Makefile
+++ /dev/null
@@ -1,6 +0,0 @@
-test:
- [ -z "`gofmt -s -w -l -e .`" ]
- go vet
- ginkgo -p -r --randomizeAllSpecs --failOnPending --randomizeSuites --race
-
-.PHONY: test
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/README.md b/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/README.md
index 76aa6b55851..d45a8c4e596 100644
--- a/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/README.md
+++ b/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/README.md
@@ -1,6 +1,6 @@
![Gomega: Ginkgo's Preferred Matcher Library](http://onsi.github.io/gomega/images/gomega.png)
-[![Build Status](https://travis-ci.org/onsi/gomega.svg?branch=master)](https://travis-ci.org/onsi/gomega)
+[![test](https://github.com/onsi/gomega/actions/workflows/test.yml/badge.svg)](https://github.com/onsi/gomega/actions/workflows/test.yml)
Jump straight to the [docs](http://onsi.github.io/gomega/) to learn about Gomega, including a list of [all available matchers](http://onsi.github.io/gomega/#provided-matchers).
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/RELEASING.md b/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/RELEASING.md
index 998d64ee75b..9973fff49e0 100644
--- a/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/RELEASING.md
+++ b/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/RELEASING.md
@@ -1,12 +1,23 @@
A Gomega release is a tagged sha and a GitHub release. To cut a release:
1. Ensure CHANGELOG.md is up to date.
- - Use `git log --pretty=format:'- %s [%h]' HEAD...vX.X.X` to list all the commits since the last release
+ - Use
+ ```bash
+ LAST_VERSION=$(git tag --sort=version:refname | tail -n1)
+ CHANGES=$(git log --pretty=format:'- %s [%h]' HEAD...$LAST_VERSION)
+ echo -e "## NEXT\n\n$CHANGES\n\n### Features\n\n### Fixes\n\n### Maintenance\n\n$(cat CHANGELOG.md)" > CHANGELOG.md
+ ```
+ to update the changelog
- Categorize the changes into
- Breaking Changes (requires a major version)
- New Features (minor version)
- Fixes (fix version)
- Maintenance (which in general should not be mentioned in `CHANGELOG.md` as they have no user impact)
-2. Update GOMEGA_VERSION in `gomega_dsl.go`
-3. Push a commit with the version number as the commit message (e.g. `v1.3.0`)
-4. Create a new [GitHub release](https://help.github.com/articles/creating-releases/) with the version number as the tag (e.g. `v1.3.0`). List the key changes in the release notes.
+1. Update GOMEGA_VERSION in `gomega_dsl.go`
+1. Commit, push, and release:
+ ```
+ git commit -m "vM.m.p"
+ git push
+ gh release create "vM.m.p"
+ git fetch --tags origin master
+ ```
\ No newline at end of file
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/format/format.go b/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/format/format.go
index fae25adceb7..6c1680638bf 100644
--- a/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/format/format.go
+++ b/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/format/format.go
@@ -7,6 +7,7 @@ Gomega's format package pretty-prints objects. It explores input objects recurs
package format
import (
+ "context"
"fmt"
"reflect"
"strconv"
@@ -17,6 +18,10 @@ import (
// Use MaxDepth to set the maximum recursion depth when printing deeply nested objects
var MaxDepth = uint(10)
+// MaxLength of the string representation of an object.
+// If MaxLength is set to 0, the Object will not be truncated.
+var MaxLength = 4000
+
/*
By default, all objects (even those that implement fmt.Stringer and fmt.GoStringer) are recursively inspected to generate output.
@@ -44,23 +49,68 @@ var TruncateThreshold uint = 50
// after the first diff location in a truncated string assertion error message.
var CharactersAroundMismatchToInclude uint = 5
-// Ctx interface defined here to keep backwards compatibility with go < 1.7
-// It matches the context.Context interface
-type Ctx interface {
- Deadline() (deadline time.Time, ok bool)
- Done() <-chan struct{}
- Err() error
- Value(key interface{}) interface{}
-}
-
-var contextType = reflect.TypeOf((*Ctx)(nil)).Elem()
+var contextType = reflect.TypeOf((*context.Context)(nil)).Elem()
var timeType = reflect.TypeOf(time.Time{})
-//The default indentation string emitted by the format package
+// The default indentation string emitted by the format package
var Indent = " "
var longFormThreshold = 20
+// GomegaStringer allows for custom formating of objects for gomega.
+type GomegaStringer interface {
+ // GomegaString will be used to custom format an object.
+ // It does not follow UseStringerRepresentation value and will always be called regardless.
+ // It also ignores the MaxLength value.
+ GomegaString() string
+}
+
+/*
+CustomFormatters can be registered with Gomega via RegisterCustomFormatter()
+Any value to be rendered by Gomega is passed to each registered CustomFormatters.
+The CustomFormatter signals that it will handle formatting the value by returning (formatted-string, true)
+If the CustomFormatter does not want to handle the object it should return ("", false)
+
+Strings returned by CustomFormatters are not truncated
+*/
+type CustomFormatter func(value interface{}) (string, bool)
+type CustomFormatterKey uint
+
+var customFormatterKey CustomFormatterKey = 1
+
+type customFormatterKeyPair struct {
+ CustomFormatter
+ CustomFormatterKey
+}
+
+/*
+RegisterCustomFormatter registers a CustomFormatter and returns a CustomFormatterKey
+
+You can call UnregisterCustomFormatter with the returned key to unregister the associated CustomFormatter
+*/
+func RegisterCustomFormatter(customFormatter CustomFormatter) CustomFormatterKey {
+ key := customFormatterKey
+ customFormatterKey += 1
+ customFormatters = append(customFormatters, customFormatterKeyPair{customFormatter, key})
+ return key
+}
+
+/*
+UnregisterCustomFormatter unregisters a previously registered CustomFormatter. You should pass in the key returned by RegisterCustomFormatter
+*/
+func UnregisterCustomFormatter(key CustomFormatterKey) {
+ formatters := []customFormatterKeyPair{}
+ for _, f := range customFormatters {
+ if f.CustomFormatterKey == key {
+ continue
+ }
+ formatters = append(formatters, f)
+ }
+ customFormatters = formatters
+}
+
+var customFormatters = []customFormatterKeyPair{}
+
/*
Generates a formatted matcher success/failure message of the form:
@@ -105,7 +155,13 @@ func MessageWithDiff(actual, message, expected string) string {
tabLength := 4
spaceFromMessageToActual := tabLength + len(": ") - len(message)
- padding := strings.Repeat(" ", spaceFromMessageToActual+spacesBeforeFormattedMismatch) + "|"
+
+ paddingCount := spaceFromMessageToActual + spacesBeforeFormattedMismatch
+ if paddingCount < 0 {
+ return Message(formattedActual, message, formattedExpected)
+ }
+
+ padding := strings.Repeat(" ", paddingCount) + "|"
return Message(formattedActual, message+padding, formattedExpected)
}
@@ -161,6 +217,33 @@ func findFirstMismatch(a, b string) int {
return 0
}
+const truncateHelpText = `
+Gomega truncated this representation as it exceeds 'format.MaxLength'.
+Consider having the object provide a custom 'GomegaStringer' representation
+or adjust the parameters in Gomega's 'format' package.
+
+Learn more here: https://onsi.github.io/gomega/#adjusting-output
+`
+
+func truncateLongStrings(s string) string {
+ if MaxLength > 0 && len(s) > MaxLength {
+ var sb strings.Builder
+ for i, r := range s {
+ if i < MaxLength {
+ sb.WriteRune(r)
+ continue
+ }
+ break
+ }
+
+ sb.WriteString("...\n")
+ sb.WriteString(truncateHelpText)
+
+ return sb.String()
+ }
+ return s
+}
+
/*
Pretty prints the passed in object at the passed in indentation level.
@@ -175,45 +258,51 @@ Set PrintContextObjects to true to print the content of objects implementing con
func Object(object interface{}, indentation uint) string {
indent := strings.Repeat(Indent, int(indentation))
value := reflect.ValueOf(object)
- return fmt.Sprintf("%s<%s>: %s", indent, formatType(object), formatValue(value, indentation))
+ commonRepresentation := ""
+ if err, ok := object.(error); ok && !isNilValue(value) { // isNilValue check needed here to avoid nil deref due to boxed nil
+ commonRepresentation += "\n" + IndentString(err.Error(), indentation) + "\n" + indent
+ }
+ return fmt.Sprintf("%s<%s>: %s%s", indent, formatType(value), commonRepresentation, formatValue(value, indentation))
}
/*
IndentString takes a string and indents each line by the specified amount.
*/
func IndentString(s string, indentation uint) string {
+ return indentString(s, indentation, true)
+}
+
+func indentString(s string, indentation uint, indentFirstLine bool) string {
+ result := &strings.Builder{}
components := strings.Split(s, "\n")
- result := ""
indent := strings.Repeat(Indent, int(indentation))
for i, component := range components {
- result += indent + component
+ if i > 0 || indentFirstLine {
+ result.WriteString(indent)
+ }
+ result.WriteString(component)
if i < len(components)-1 {
- result += "\n"
+ result.WriteString("\n")
}
}
- return result
+ return result.String()
}
-func formatType(object interface{}) string {
- t := reflect.TypeOf(object)
- if t == nil {
+func formatType(v reflect.Value) string {
+ switch v.Kind() {
+ case reflect.Invalid:
return "nil"
- }
- switch t.Kind() {
case reflect.Chan:
- v := reflect.ValueOf(object)
- return fmt.Sprintf("%T | len:%d, cap:%d", object, v.Len(), v.Cap())
+ return fmt.Sprintf("%s | len:%d, cap:%d", v.Type(), v.Len(), v.Cap())
case reflect.Ptr:
- return fmt.Sprintf("%T | %p", object, object)
+ return fmt.Sprintf("%s | 0x%x", v.Type(), v.Pointer())
case reflect.Slice:
- v := reflect.ValueOf(object)
- return fmt.Sprintf("%T | len:%d, cap:%d", object, v.Len(), v.Cap())
+ return fmt.Sprintf("%s | len:%d, cap:%d", v.Type(), v.Len(), v.Cap())
case reflect.Map:
- v := reflect.ValueOf(object)
- return fmt.Sprintf("%T | len:%d", object, v.Len())
+ return fmt.Sprintf("%s | len:%d", v.Type(), v.Len())
default:
- return fmt.Sprintf("%T", object)
+ return v.Type().String()
}
}
@@ -226,14 +315,30 @@ func formatValue(value reflect.Value, indentation uint) string {
return "nil"
}
- if UseStringerRepresentation {
- if value.CanInterface() {
- obj := value.Interface()
+ if value.CanInterface() {
+ obj := value.Interface()
+
+ // if a CustomFormatter handles this values, we'll go with that
+ for _, customFormatter := range customFormatters {
+ formatted, handled := customFormatter.CustomFormatter(obj)
+ // do not truncate a user-provided CustomFormatter()
+ if handled {
+ return indentString(formatted, indentation+1, false)
+ }
+ }
+
+ // GomegaStringer will take precedence to other representations and disregards UseStringerRepresentation
+ if x, ok := obj.(GomegaStringer); ok {
+ // do not truncate a user-defined GomegaString() value
+ return indentString(x.GomegaString(), indentation+1, false)
+ }
+
+ if UseStringerRepresentation {
switch x := obj.(type) {
case fmt.GoStringer:
- return x.GoString()
+ return indentString(truncateLongStrings(x.GoString()), indentation+1, false)
case fmt.Stringer:
- return x.String()
+ return indentString(truncateLongStrings(x.String()), indentation+1, false)
}
}
}
@@ -264,26 +369,26 @@ func formatValue(value reflect.Value, indentation uint) string {
case reflect.Ptr:
return formatValue(value.Elem(), indentation)
case reflect.Slice:
- return formatSlice(value, indentation)
+ return truncateLongStrings(formatSlice(value, indentation))
case reflect.String:
- return formatString(value.String(), indentation)
+ return truncateLongStrings(formatString(value.String(), indentation))
case reflect.Array:
- return formatSlice(value, indentation)
+ return truncateLongStrings(formatSlice(value, indentation))
case reflect.Map:
- return formatMap(value, indentation)
+ return truncateLongStrings(formatMap(value, indentation))
case reflect.Struct:
if value.Type() == timeType && value.CanInterface() {
t, _ := value.Interface().(time.Time)
return t.Format(time.RFC3339Nano)
}
- return formatStruct(value, indentation)
+ return truncateLongStrings(formatStruct(value, indentation))
case reflect.Interface:
- return formatValue(value.Elem(), indentation)
+ return formatInterface(value, indentation)
default:
if value.CanInterface() {
- return fmt.Sprintf("%#v", value.Interface())
+ return truncateLongStrings(fmt.Sprintf("%#v", value.Interface()))
}
- return fmt.Sprintf("%#v", value)
+ return truncateLongStrings(fmt.Sprintf("%#v", value))
}
}
@@ -373,6 +478,10 @@ func formatStruct(v reflect.Value, indentation uint) string {
return fmt.Sprintf("{%s}", strings.Join(result, ", "))
}
+func formatInterface(v reflect.Value, indentation uint) string {
+ return fmt.Sprintf("<%s>%s", formatType(v.Elem()), formatValue(v.Elem(), indentation))
+}
+
func isNilValue(a reflect.Value) bool {
switch a.Kind() {
case reflect.Invalid:
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/go.mod b/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/go.mod
index 1eb0dfa6820..8bad5680fba 100644
--- a/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/go.mod
+++ b/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/go.mod
@@ -1,17 +1,20 @@
module github.com/onsi/gomega
+go 1.20
+
require (
- github.com/fsnotify/fsnotify v1.4.7 // indirect
- github.com/golang/protobuf v1.2.0
- github.com/hpcloud/tail v1.0.0 // indirect
- github.com/onsi/ginkgo v1.6.0
- golang.org/x/net v0.0.0-20180906233101-161cd47e91fd
- golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f // indirect
- golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e // indirect
- golang.org/x/text v0.3.0 // indirect
- golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7
- gopkg.in/fsnotify.v1 v1.4.7 // indirect
- gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect
- gopkg.in/yaml.v2 v2.2.4
+ github.com/google/go-cmp v0.6.0
+ github.com/onsi/ginkgo/v2 v2.17.2
+ golang.org/x/net v0.24.0
+ google.golang.org/protobuf v1.33.0
+ gopkg.in/yaml.v3 v3.0.1
)
+require (
+ github.com/go-logr/logr v1.4.1 // indirect
+ github.com/go-task/slim-sprig/v3 v3.0.0 // indirect
+ github.com/google/pprof v0.0.0-20240424215950-a892ee059fd6 // indirect
+ golang.org/x/sys v0.19.0 // indirect
+ golang.org/x/text v0.14.0 // indirect
+ golang.org/x/tools v0.20.0 // indirect
+)
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/go.sum b/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/go.sum
index b872e8a0dff..7e1979f6493 100644
--- a/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/go.sum
+++ b/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/go.sum
@@ -1,26 +1,27 @@
-github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
-github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
-github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM=
-github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
-github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
-github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
-github.com/onsi/ginkgo v1.6.0 h1:Ix8l273rp3QzYgXSR+c8d1fTG7UPgYkOSELPhiY/YGw=
-github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
-golang.org/x/net v0.0.0-20180906233101-161cd47e91fd h1:nTDtHvHSdCn1m6ITfMRqtOd/9+7a3s8RBNOZ3eYZzJA=
-golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f h1:wMNYb4v58l5UBM7MYRLPG6ZhfOqbKu7X5eyFl8ZhKvA=
-golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e h1:o3PsSEY8E4eXWkXrIP9YJALUkVZqzHJT5DOasTyn8Vs=
-golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
-golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
-golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7 h1:9zdDQZ7Thm29KFXgAX/+yaf3eVbP7djjWp/dXAppNCc=
-golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
+github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ=
+github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
+github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI=
+github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8=
+github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
+github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
+github.com/google/pprof v0.0.0-20240424215950-a892ee059fd6 h1:k7nVchz72niMH6YLQNvHSdIE7iqsQxK1P41mySCvssg=
+github.com/google/pprof v0.0.0-20240424215950-a892ee059fd6/go.mod h1:kf6iHlnVGwgKolg33glAes7Yg/8iWP8ukqeldJSO7jw=
+github.com/onsi/ginkgo/v2 v2.17.2 h1:7eMhcy3GimbsA3hEnVKdw/PQM9XN9krpKVXsZdph0/g=
+github.com/onsi/ginkgo/v2 v2.17.2/go.mod h1:nP2DPOQoNsQmsVyv5rDA8JkXQoCs6goXIvr/PRJ1eCc=
+github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
+github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=
+golang.org/x/net v0.24.0 h1:1PcaxkF854Fu3+lvBIx5SYn9wRlBzzcnHZSiaFFAb0w=
+golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8=
+golang.org/x/sys v0.19.0 h1:q5f1RH2jigJ1MoAWp2KTp3gm5zAGFUTarQZ5U386+4o=
+golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
+golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
+golang.org/x/tools v0.20.0 h1:hz/CVckiOxybQvFw6h7b/q80NTr9IUQb4s1IIzW7KNY=
+golang.org/x/tools v0.20.0/go.mod h1:WvitBU7JJf6A4jOdg4S1tviW9bhUxkgeCui/0JHctQg=
+google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI=
+google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=
-gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
-gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
-gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
-gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I=
-gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
+gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/gomega_dsl.go b/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/gomega_dsl.go
index 4cb94d22f2a..9697d5134ff 100644
--- a/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/gomega_dsl.go
+++ b/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/gomega_dsl.go
@@ -14,103 +14,165 @@ Gomega is MIT-Licensed
package gomega
import (
+ "errors"
"fmt"
- "reflect"
"time"
- "github.com/onsi/gomega/internal/assertion"
- "github.com/onsi/gomega/internal/asyncassertion"
- "github.com/onsi/gomega/internal/testingtsupport"
+ "github.com/onsi/gomega/internal"
"github.com/onsi/gomega/types"
)
-const GOMEGA_VERSION = "1.8.1"
+const GOMEGA_VERSION = "1.33.1"
-const nilFailHandlerPanic = `You are trying to make an assertion, but Gomega's fail handler is nil.
+const nilGomegaPanic = `You are trying to make an assertion, but haven't registered Gomega's fail handler.
If you're using Ginkgo then you probably forgot to put your assertion in an It().
Alternatively, you may have forgotten to register a fail handler with RegisterFailHandler() or RegisterTestingT().
Depending on your vendoring solution you may be inadvertently importing gomega and subpackages (e.g. ghhtp, gexec,...) from different locations.
`
-var globalFailWrapper *types.GomegaFailWrapper
+// Gomega describes the essential Gomega DSL. This interface allows libraries
+// to abstract between the standard package-level function implementations
+// and alternatives like *WithT.
+//
+// The types in the top-level DSL have gotten a bit messy due to earlier deprecations that avoid stuttering
+// and due to an accidental use of a concrete type (*WithT) in an earlier release.
+//
+// As of 1.15 both the WithT and Ginkgo variants of Gomega are implemented by the same underlying object
+// however one (the Ginkgo variant) is exported as an interface (types.Gomega) whereas the other (the withT variant)
+// is shared as a concrete type (*WithT, which is aliased to *internal.Gomega). 1.15 did not clean this mess up to ensure
+// that declarations of *WithT in existing code are not broken by the upgrade to 1.15.
+type Gomega = types.Gomega
+
+// DefaultGomega supplies the standard package-level implementation
+var Default = Gomega(internal.NewGomega(internal.FetchDefaultDurationBundle()))
+
+// NewGomega returns an instance of Gomega wired into the passed-in fail handler.
+// You generally don't need to use this when using Ginkgo - RegisterFailHandler will wire up the global gomega
+// However creating a NewGomega with a custom fail handler can be useful in contexts where you want to use Gomega's
+// rich ecosystem of matchers without causing a test to fail. For example, to aggregate a series of potential failures
+// or for use in a non-test setting.
+func NewGomega(fail types.GomegaFailHandler) Gomega {
+ return internal.NewGomega(internalGomega(Default).DurationBundle).ConfigureWithFailHandler(fail)
+}
-var defaultEventuallyTimeout = time.Second
-var defaultEventuallyPollingInterval = 10 * time.Millisecond
-var defaultConsistentlyDuration = 100 * time.Millisecond
-var defaultConsistentlyPollingInterval = 10 * time.Millisecond
+// WithT wraps a *testing.T and provides `Expect`, `Eventually`, and `Consistently` methods. This allows you to leverage
+// Gomega's rich ecosystem of matchers in standard `testing` test suites.
+//
+// Use `NewWithT` to instantiate a `WithT`
+//
+// As of 1.15 both the WithT and Ginkgo variants of Gomega are implemented by the same underlying object
+// however one (the Ginkgo variant) is exported as an interface (types.Gomega) whereas the other (the withT variant)
+// is shared as a concrete type (*WithT, which is aliased to *internal.Gomega). 1.15 did not clean this mess up to ensure
+// that declarations of *WithT in existing code are not broken by the upgrade to 1.15.
+type WithT = internal.Gomega
-// RegisterFailHandler connects Ginkgo to Gomega. When a matcher fails
-// the fail handler passed into RegisterFailHandler is called.
-func RegisterFailHandler(handler types.GomegaFailHandler) {
- RegisterFailHandlerWithT(testingtsupport.EmptyTWithHelper{}, handler)
-}
+// GomegaWithT is deprecated in favor of gomega.WithT, which does not stutter.
+type GomegaWithT = WithT
-// RegisterFailHandlerWithT ensures that the given types.TWithHelper and fail handler
-// are used globally.
-func RegisterFailHandlerWithT(t types.TWithHelper, handler types.GomegaFailHandler) {
- if handler == nil {
- globalFailWrapper = nil
- return
- }
+// inner is an interface that allows users to provide a wrapper around Default. The wrapper
+// must implement the inner interface and return either the original Default or the result of
+// a call to NewGomega().
+type inner interface {
+ Inner() Gomega
+}
- globalFailWrapper = &types.GomegaFailWrapper{
- Fail: handler,
- TWithHelper: t,
+func internalGomega(g Gomega) *internal.Gomega {
+ if v, ok := g.(inner); ok {
+ return v.Inner().(*internal.Gomega)
}
+ return g.(*internal.Gomega)
}
-// RegisterTestingT connects Gomega to Golang's XUnit style
-// Testing.T tests. It is now deprecated and you should use NewWithT() instead.
-//
-// Legacy Documentation:
-//
-// You'll need to call this at the top of each XUnit style test:
-//
-// func TestFarmHasCow(t *testing.T) {
-// RegisterTestingT(t)
-//
-// f := farm.New([]string{"Cow", "Horse"})
-// Expect(f.HasCow()).To(BeTrue(), "Farm should have cow")
-// }
-//
-// Note that this *testing.T is registered *globally* by Gomega (this is why you don't have to
-// pass `t` down to the matcher itself). This means that you cannot run the XUnit style tests
-// in parallel as the global fail handler cannot point to more than one testing.T at a time.
+// NewWithT takes a *testing.T and returns a `gomega.WithT` allowing you to use `Expect`, `Eventually`, and `Consistently` along with
+// Gomega's rich ecosystem of matchers in standard `testing` test suits.
//
-// NewWithT() does not have this limitation
+// func TestFarmHasCow(t *testing.T) {
+// g := gomega.NewWithT(t)
//
-// (As an aside: Ginkgo gets around this limitation by running parallel tests in different *processes*).
+// f := farm.New([]string{"Cow", "Horse"})
+// g.Expect(f.HasCow()).To(BeTrue(), "Farm should have cow")
+// }
+func NewWithT(t types.GomegaTestingT) *WithT {
+ return internal.NewGomega(internalGomega(Default).DurationBundle).ConfigureWithT(t)
+}
+
+// NewGomegaWithT is deprecated in favor of gomega.NewWithT, which does not stutter.
+var NewGomegaWithT = NewWithT
+
+// RegisterFailHandler connects Ginkgo to Gomega. When a matcher fails
+// the fail handler passed into RegisterFailHandler is called.
+func RegisterFailHandler(fail types.GomegaFailHandler) {
+ internalGomega(Default).ConfigureWithFailHandler(fail)
+}
+
+// RegisterFailHandlerWithT is deprecated and will be removed in a future release.
+// users should use RegisterFailHandler, or RegisterTestingT
+func RegisterFailHandlerWithT(_ types.GomegaTestingT, fail types.GomegaFailHandler) {
+ fmt.Println("RegisterFailHandlerWithT is deprecated. Please use RegisterFailHandler or RegisterTestingT instead.")
+ internalGomega(Default).ConfigureWithFailHandler(fail)
+}
+
+// RegisterTestingT connects Gomega to Golang's XUnit style
+// Testing.T tests. It is now deprecated and you should use NewWithT() instead to get a fresh instance of Gomega for each test.
func RegisterTestingT(t types.GomegaTestingT) {
- tWithHelper, hasHelper := t.(types.TWithHelper)
- if !hasHelper {
- RegisterFailHandler(testingtsupport.BuildTestingTGomegaFailWrapper(t).Fail)
- return
- }
- RegisterFailHandlerWithT(tWithHelper, testingtsupport.BuildTestingTGomegaFailWrapper(t).Fail)
+ internalGomega(Default).ConfigureWithT(t)
}
// InterceptGomegaFailures runs a given callback and returns an array of
// failure messages generated by any Gomega assertions within the callback.
-//
-// This is accomplished by temporarily replacing the *global* fail handler
-// with a fail handler that simply annotates failures. The original fail handler
-// is reset when InterceptGomegaFailures returns.
+// Execution continues after the first failure allowing users to collect all failures
+// in the callback.
//
// This is most useful when testing custom matchers, but can also be used to check
// on a value using a Gomega assertion without causing a test failure.
func InterceptGomegaFailures(f func()) []string {
- originalHandler := globalFailWrapper.Fail
+ originalHandler := internalGomega(Default).Fail
failures := []string{}
- RegisterFailHandler(func(message string, callerSkip ...int) {
+ internalGomega(Default).Fail = func(message string, callerSkip ...int) {
failures = append(failures, message)
- })
+ }
+ defer func() {
+ internalGomega(Default).Fail = originalHandler
+ }()
f()
- RegisterFailHandler(originalHandler)
return failures
}
+// InterceptGomegaFailure runs a given callback and returns the first
+// failure message generated by any Gomega assertions within the callback, wrapped in an error.
+//
+// The callback ceases execution as soon as the first failed assertion occurs, however Gomega
+// does not register a failure with the FailHandler registered via RegisterFailHandler - it is up
+// to the user to decide what to do with the returned error
+func InterceptGomegaFailure(f func()) (err error) {
+ originalHandler := internalGomega(Default).Fail
+ internalGomega(Default).Fail = func(message string, callerSkip ...int) {
+ err = errors.New(message)
+ panic("stop execution")
+ }
+
+ defer func() {
+ internalGomega(Default).Fail = originalHandler
+ if e := recover(); e != nil {
+ if err == nil {
+ panic(e)
+ }
+ }
+ }()
+
+ f()
+ return err
+}
+
+func ensureDefaultGomegaIsConfigured() {
+ if !internalGomega(Default).IsConfigured() {
+ panic(nilGomegaPanic)
+ }
+}
+
// Ω wraps an actual value allowing assertions to be made on it:
-// Ω("foo").Should(Equal("foo"))
+//
+// Ω("foo").Should(Equal("foo"))
//
// If Ω is passed more than one argument it will pass the *first* argument to the matcher.
// All subsequent arguments will be required to be nil/zero.
@@ -119,175 +181,314 @@ func InterceptGomegaFailures(f func()) []string {
// a value and an error - a common patter in Go.
//
// For example, given a function with signature:
-// func MyAmazingThing() (int, error)
+//
+// func MyAmazingThing() (int, error)
//
// Then:
-// Ω(MyAmazingThing()).Should(Equal(3))
+//
+// Ω(MyAmazingThing()).Should(Equal(3))
+//
// Will succeed only if `MyAmazingThing()` returns `(3, nil)`
//
// Ω and Expect are identical
func Ω(actual interface{}, extra ...interface{}) Assertion {
- return ExpectWithOffset(0, actual, extra...)
+ ensureDefaultGomegaIsConfigured()
+ return Default.Ω(actual, extra...)
}
// Expect wraps an actual value allowing assertions to be made on it:
-// Expect("foo").To(Equal("foo"))
+//
+// Expect("foo").To(Equal("foo"))
//
// If Expect is passed more than one argument it will pass the *first* argument to the matcher.
// All subsequent arguments will be required to be nil/zero.
//
// This is convenient if you want to make an assertion on a method/function that returns
-// a value and an error - a common patter in Go.
+// a value and an error - a common pattern in Go.
//
// For example, given a function with signature:
-// func MyAmazingThing() (int, error)
+//
+// func MyAmazingThing() (int, error)
//
// Then:
-// Expect(MyAmazingThing()).Should(Equal(3))
+//
+// Expect(MyAmazingThing()).Should(Equal(3))
+//
// Will succeed only if `MyAmazingThing()` returns `(3, nil)`
//
// Expect and Ω are identical
func Expect(actual interface{}, extra ...interface{}) Assertion {
- return ExpectWithOffset(0, actual, extra...)
+ ensureDefaultGomegaIsConfigured()
+ return Default.Expect(actual, extra...)
}
// ExpectWithOffset wraps an actual value allowing assertions to be made on it:
-// ExpectWithOffset(1, "foo").To(Equal("foo"))
+//
+// ExpectWithOffset(1, "foo").To(Equal("foo"))
//
// Unlike `Expect` and `Ω`, `ExpectWithOffset` takes an additional integer argument
-// that is used to modify the call-stack offset when computing line numbers.
+// that is used to modify the call-stack offset when computing line numbers. It is
+// the same as `Expect(...).WithOffset`.
//
// This is most useful in helper functions that make assertions. If you want Gomega's
// error message to refer to the calling line in the test (as opposed to the line in the helper function)
// set the first argument of `ExpectWithOffset` appropriately.
func ExpectWithOffset(offset int, actual interface{}, extra ...interface{}) Assertion {
- if globalFailWrapper == nil {
- panic(nilFailHandlerPanic)
- }
- return assertion.New(actual, globalFailWrapper, offset, extra...)
+ ensureDefaultGomegaIsConfigured()
+ return Default.ExpectWithOffset(offset, actual, extra...)
}
-// Eventually wraps an actual value allowing assertions to be made on it.
-// The assertion is tried periodically until it passes or a timeout occurs.
-//
-// Both the timeout and polling interval are configurable as optional arguments:
-// The first optional argument is the timeout
-// The second optional argument is the polling interval
-//
-// Both intervals can either be specified as time.Duration, parsable duration strings or as floats/integers. In the
-// last case they are interpreted as seconds.
-//
-// If Eventually is passed an actual that is a function taking no arguments and returning at least one value,
-// then Eventually will call the function periodically and try the matcher against the function's first return value.
-//
-// Example:
-//
-// Eventually(func() int {
-// return thingImPolling.Count()
-// }).Should(BeNumerically(">=", 17))
-//
-// Note that this example could be rewritten:
-//
-// Eventually(thingImPolling.Count).Should(BeNumerically(">=", 17))
-//
-// If the function returns more than one value, then Eventually will pass the first value to the matcher and
-// assert that all other values are nil/zero.
-// This allows you to pass Eventually a function that returns a value and an error - a common pattern in Go.
-//
-// For example, consider a method that returns a value and an error:
-// func FetchFromDB() (string, error)
-//
-// Then
-// Eventually(FetchFromDB).Should(Equal("hasselhoff"))
-//
-// Will pass only if the the returned error is nil and the returned string passes the matcher.
-//
-// Eventually's default timeout is 1 second, and its default polling interval is 10ms
-func Eventually(actual interface{}, intervals ...interface{}) AsyncAssertion {
- return EventuallyWithOffset(0, actual, intervals...)
+/*
+Eventually enables making assertions on asynchronous behavior.
+
+Eventually checks that an assertion *eventually* passes. Eventually blocks when called and attempts an assertion periodically until it passes or a timeout occurs. Both the timeout and polling interval are configurable as optional arguments.
+The first optional argument is the timeout (which defaults to 1s), the second is the polling interval (which defaults to 10ms). Both intervals can be specified as time.Duration, parsable duration strings or floats/integers (in which case they are interpreted as seconds). In addition an optional context.Context can be passed in - Eventually will keep trying until either the timeout expires or the context is cancelled, whichever comes first.
+
+Eventually works with any Gomega compatible matcher and supports making assertions against three categories of actual value:
+
+**Category 1: Making Eventually assertions on values**
+
+There are several examples of values that can change over time. These can be passed in to Eventually and will be passed to the matcher repeatedly until a match occurs. For example:
+
+ c := make(chan bool)
+ go DoStuff(c)
+ Eventually(c, "50ms").Should(BeClosed())
+
+will poll the channel repeatedly until it is closed. In this example `Eventually` will block until either the specified timeout of 50ms has elapsed or the channel is closed, whichever comes first.
+
+Several Gomega libraries allow you to use Eventually in this way. For example, the gomega/gexec package allows you to block until a *gexec.Session exits successfully via:
+
+ Eventually(session).Should(gexec.Exit(0))
+
+And the gomega/gbytes package allows you to monitor a streaming *gbytes.Buffer until a given string is seen:
+
+ Eventually(buffer).Should(gbytes.Say("hello there"))
+
+In these examples, both `session` and `buffer` are designed to be thread-safe when polled by the `Exit` and `Say` matchers. This is not true in general of most raw values, so while it is tempting to do something like:
+
+ // THIS IS NOT THREAD-SAFE
+ var s *string
+ go mutateStringEventually(s)
+ Eventually(s).Should(Equal("I've changed"))
+
+this will trigger Go's race detector as the goroutine polling via Eventually will race over the value of s with the goroutine mutating the string. For cases like this you can use channels or introduce your own locking around s by passing Eventually a function.
+
+**Category 2: Make Eventually assertions on functions**
+
+Eventually can be passed functions that **return at least one value**. When configured this way, Eventually will poll the function repeatedly and pass the first returned value to the matcher.
+
+For example:
+
+ Eventually(func() int {
+ return client.FetchCount()
+ }).Should(BeNumerically(">=", 17))
+
+ will repeatedly poll client.FetchCount until the BeNumerically matcher is satisfied. (Note that this example could have been written as Eventually(client.FetchCount).Should(BeNumerically(">=", 17)))
+
+If multiple values are returned by the function, Eventually will pass the first value to the matcher and require that all others are zero-valued. This allows you to pass Eventually a function that returns a value and an error - a common pattern in Go.
+
+For example, consider a method that returns a value and an error:
+
+ func FetchFromDB() (string, error)
+
+Then
+
+ Eventually(FetchFromDB).Should(Equal("got it"))
+
+will pass only if and when the returned error is nil *and* the returned string satisfies the matcher.
+
+Eventually can also accept functions that take arguments, however you must provide those arguments using .WithArguments(). For example, consider a function that takes a user-id and makes a network request to fetch a full name:
+
+ func FetchFullName(userId int) (string, error)
+
+You can poll this function like so:
+
+ Eventually(FetchFullName).WithArguments(1138).Should(Equal("Wookie"))
+
+It is important to note that the function passed into Eventually is invoked *synchronously* when polled. Eventually does not (in fact, it cannot) kill the function if it takes longer to return than Eventually's configured timeout. A common practice here is to use a context. Here's an example that combines Ginkgo's spec timeout support with Eventually:
+
+ It("fetches the correct count", func(ctx SpecContext) {
+ Eventually(ctx, func() int {
+ return client.FetchCount(ctx, "/users")
+ }).Should(BeNumerically(">=", 17))
+ }, SpecTimeout(time.Second))
+
+you an also use Eventually().WithContext(ctx) to pass in the context. Passed-in contexts play nicely with passed-in arguments as long as the context appears first. You can rewrite the above example as:
+
+ It("fetches the correct count", func(ctx SpecContext) {
+ Eventually(client.FetchCount).WithContext(ctx).WithArguments("/users").Should(BeNumerically(">=", 17))
+ }, SpecTimeout(time.Second))
+
+Either way the context passd to Eventually is also passed to the underlying function. Now, when Ginkgo cancels the context both the FetchCount client and Gomega will be informed and can exit.
+
+**Category 3: Making assertions _in_ the function passed into Eventually**
+
+When testing complex systems it can be valuable to assert that a _set_ of assertions passes Eventually. Eventually supports this by accepting functions that take a single Gomega argument and return zero or more values.
+
+Here's an example that makes some assertions and returns a value and error:
+
+ Eventually(func(g Gomega) (Widget, error) {
+ ids, err := client.FetchIDs()
+ g.Expect(err).NotTo(HaveOccurred())
+ g.Expect(ids).To(ContainElement(1138))
+ return client.FetchWidget(1138)
+ }).Should(Equal(expectedWidget))
+
+will pass only if all the assertions in the polled function pass and the return value satisfied the matcher.
+
+Eventually also supports a special case polling function that takes a single Gomega argument and returns no values. Eventually assumes such a function is making assertions and is designed to work with the Succeed matcher to validate that all assertions have passed.
+For example:
+
+ Eventually(func(g Gomega) {
+ model, err := client.Find(1138)
+ g.Expect(err).NotTo(HaveOccurred())
+ g.Expect(model.Reticulate()).To(Succeed())
+ g.Expect(model.IsReticulated()).To(BeTrue())
+ g.Expect(model.Save()).To(Succeed())
+ }).Should(Succeed())
+
+will rerun the function until all assertions pass.
+
+You can also pass additional arguments to functions that take a Gomega. The only rule is that the Gomega argument must be first. If you also want to pass the context attached to Eventually you must ensure that is the second argument. For example:
+
+ Eventually(func(g Gomega, ctx context.Context, path string, expected ...string){
+ tok, err := client.GetToken(ctx)
+ g.Expect(err).NotTo(HaveOccurred())
+
+ elements, err := client.Fetch(ctx, tok, path)
+ g.Expect(err).NotTo(HaveOccurred())
+ g.Expect(elements).To(ConsistOf(expected))
+ }).WithContext(ctx).WithArguments("/names", "Joe", "Jane", "Sam").Should(Succeed())
+
+You can ensure that you get a number of consecutive successful tries before succeeding using `MustPassRepeatedly(int)`. For Example:
+
+ int count := 0
+ Eventually(func() bool {
+ count++
+ return count > 2
+ }).MustPassRepeatedly(2).Should(BeTrue())
+ // Because we had to wait for 2 calls that returned true
+ Expect(count).To(Equal(3))
+
+Finally, in addition to passing timeouts and a context to Eventually you can be more explicit with Eventually's chaining configuration methods:
+
+ Eventually(..., "10s", "2s", ctx).Should(...)
+
+is equivalent to
+
+ Eventually(...).WithTimeout(10*time.Second).WithPolling(2*time.Second).WithContext(ctx).Should(...)
+*/
+func Eventually(actualOrCtx interface{}, args ...interface{}) AsyncAssertion {
+ ensureDefaultGomegaIsConfigured()
+ return Default.Eventually(actualOrCtx, args...)
}
// EventuallyWithOffset operates like Eventually but takes an additional
// initial argument to indicate an offset in the call stack. This is useful when building helper
// functions that contain matchers. To learn more, read about `ExpectWithOffset`.
-func EventuallyWithOffset(offset int, actual interface{}, intervals ...interface{}) AsyncAssertion {
- if globalFailWrapper == nil {
- panic(nilFailHandlerPanic)
- }
- timeoutInterval := defaultEventuallyTimeout
- pollingInterval := defaultEventuallyPollingInterval
- if len(intervals) > 0 {
- timeoutInterval = toDuration(intervals[0])
- }
- if len(intervals) > 1 {
- pollingInterval = toDuration(intervals[1])
- }
- return asyncassertion.New(asyncassertion.AsyncAssertionTypeEventually, actual, globalFailWrapper, timeoutInterval, pollingInterval, offset)
-}
-
-// Consistently wraps an actual value allowing assertions to be made on it.
-// The assertion is tried periodically and is required to pass for a period of time.
//
-// Both the total time and polling interval are configurable as optional arguments:
-// The first optional argument is the duration that Consistently will run for
-// The second optional argument is the polling interval
+// `EventuallyWithOffset` is the same as `Eventually(...).WithOffset`.
//
-// Both intervals can either be specified as time.Duration, parsable duration strings or as floats/integers. In the
-// last case they are interpreted as seconds.
-//
-// If Consistently is passed an actual that is a function taking no arguments and returning at least one value,
-// then Consistently will call the function periodically and try the matcher against the function's first return value.
-//
-// If the function returns more than one value, then Consistently will pass the first value to the matcher and
-// assert that all other values are nil/zero.
-// This allows you to pass Consistently a function that returns a value and an error - a common pattern in Go.
-//
-// Consistently is useful in cases where you want to assert that something *does not happen* over a period of time.
-// For example, you want to assert that a goroutine does *not* send data down a channel. In this case, you could:
-//
-// Consistently(channel).ShouldNot(Receive())
-//
-// Consistently's default duration is 100ms, and its default polling interval is 10ms
-func Consistently(actual interface{}, intervals ...interface{}) AsyncAssertion {
- return ConsistentlyWithOffset(0, actual, intervals...)
+// `EventuallyWithOffset` specifying a timeout interval (and an optional polling interval) are
+// the same as `Eventually(...).WithOffset(...).WithTimeout` or
+// `Eventually(...).WithOffset(...).WithTimeout(...).WithPolling`.
+func EventuallyWithOffset(offset int, actualOrCtx interface{}, args ...interface{}) AsyncAssertion {
+ ensureDefaultGomegaIsConfigured()
+ return Default.EventuallyWithOffset(offset, actualOrCtx, args...)
}
-// ConsistentlyWithOffset operates like Consistnetly but takes an additional
+/*
+Consistently, like Eventually, enables making assertions on asynchronous behavior.
+
+Consistently blocks when called for a specified duration. During that duration Consistently repeatedly polls its matcher and ensures that it is satisfied. If the matcher is consistently satisfied, then Consistently will pass. Otherwise Consistently will fail.
+
+Both the total waiting duration and the polling interval are configurable as optional arguments. The first optional argument is the duration that Consistently will run for (defaults to 100ms), and the second argument is the polling interval (defaults to 10ms). As with Eventually, these intervals can be passed in as time.Duration, parsable duration strings or an integer or float number of seconds. You can also pass in an optional context.Context - Consistently will exit early (with a failure) if the context is cancelled before the waiting duration expires.
+
+Consistently accepts the same three categories of actual as Eventually, check the Eventually docs to learn more.
+
+Consistently is useful in cases where you want to assert that something *does not happen* for a period of time. For example, you may want to assert that a goroutine does *not* send data down a channel. In this case you could write:
+
+ Consistently(channel, "200ms").ShouldNot(Receive())
+
+This will block for 200 milliseconds and repeatedly check the channel and ensure nothing has been received.
+*/
+func Consistently(actualOrCtx interface{}, args ...interface{}) AsyncAssertion {
+ ensureDefaultGomegaIsConfigured()
+ return Default.Consistently(actualOrCtx, args...)
+}
+
+// ConsistentlyWithOffset operates like Consistently but takes an additional
// initial argument to indicate an offset in the call stack. This is useful when building helper
// functions that contain matchers. To learn more, read about `ExpectWithOffset`.
-func ConsistentlyWithOffset(offset int, actual interface{}, intervals ...interface{}) AsyncAssertion {
- if globalFailWrapper == nil {
- panic(nilFailHandlerPanic)
- }
- timeoutInterval := defaultConsistentlyDuration
- pollingInterval := defaultConsistentlyPollingInterval
- if len(intervals) > 0 {
- timeoutInterval = toDuration(intervals[0])
- }
- if len(intervals) > 1 {
- pollingInterval = toDuration(intervals[1])
- }
- return asyncassertion.New(asyncassertion.AsyncAssertionTypeConsistently, actual, globalFailWrapper, timeoutInterval, pollingInterval, offset)
+//
+// `ConsistentlyWithOffset` is the same as `Consistently(...).WithOffset` and
+// optional `WithTimeout` and `WithPolling`.
+func ConsistentlyWithOffset(offset int, actualOrCtx interface{}, args ...interface{}) AsyncAssertion {
+ ensureDefaultGomegaIsConfigured()
+ return Default.ConsistentlyWithOffset(offset, actualOrCtx, args...)
}
+/*
+StopTrying can be used to signal to Eventually and Consistentlythat they should abort and stop trying. This always results in a failure of the assertion - and the failure message is the content of the StopTrying signal.
+
+You can send the StopTrying signal by either returning StopTrying("message") as an error from your passed-in function _or_ by calling StopTrying("message").Now() to trigger a panic and end execution.
+
+You can also wrap StopTrying around an error with `StopTrying("message").Wrap(err)` and can attach additional objects via `StopTrying("message").Attach("description", object). When rendered, the signal will include the wrapped error and any attached objects rendered using Gomega's default formatting.
+
+Here are a couple of examples. This is how you might use StopTrying() as an error to signal that Eventually should stop:
+
+ playerIndex, numPlayers := 0, 11
+ Eventually(func() (string, error) {
+ if playerIndex == numPlayers {
+ return "", StopTrying("no more players left")
+ }
+ name := client.FetchPlayer(playerIndex)
+ playerIndex += 1
+ return name, nil
+ }).Should(Equal("Patrick Mahomes"))
+
+And here's an example where `StopTrying().Now()` is called to halt execution immediately:
+
+ Eventually(func() []string {
+ names, err := client.FetchAllPlayers()
+ if err == client.IRRECOVERABLE_ERROR {
+ StopTrying("Irrecoverable error occurred").Wrap(err).Now()
+ }
+ return names
+ }).Should(ContainElement("Patrick Mahomes"))
+*/
+var StopTrying = internal.StopTrying
+
+/*
+TryAgainAfter() allows you to adjust the polling interval for the _next_ iteration of `Eventually` or `Consistently`. Like `StopTrying` you can either return `TryAgainAfter` as an error or trigger it immedieately with `.Now()`
+
+When `TryAgainAfter(` is triggered `Eventually` and `Consistently` will wait for that duration. If a timeout occurs before the next poll is triggered both `Eventually` and `Consistently` will always fail with the content of the TryAgainAfter message. As with StopTrying you can `.Wrap()` and error and `.Attach()` additional objects to `TryAgainAfter`.
+*/
+var TryAgainAfter = internal.TryAgainAfter
+
+/*
+PollingSignalError is the error returned by StopTrying() and TryAgainAfter()
+*/
+type PollingSignalError = internal.PollingSignalError
+
// SetDefaultEventuallyTimeout sets the default timeout duration for Eventually. Eventually will repeatedly poll your condition until it succeeds, or until this timeout elapses.
func SetDefaultEventuallyTimeout(t time.Duration) {
- defaultEventuallyTimeout = t
+ Default.SetDefaultEventuallyTimeout(t)
}
// SetDefaultEventuallyPollingInterval sets the default polling interval for Eventually.
func SetDefaultEventuallyPollingInterval(t time.Duration) {
- defaultEventuallyPollingInterval = t
+ Default.SetDefaultEventuallyPollingInterval(t)
}
// SetDefaultConsistentlyDuration sets the default duration for Consistently. Consistently will verify that your condition is satisfied for this long.
func SetDefaultConsistentlyDuration(t time.Duration) {
- defaultConsistentlyDuration = t
+ Default.SetDefaultConsistentlyDuration(t)
}
// SetDefaultConsistentlyPollingInterval sets the default polling interval for Consistently.
func SetDefaultConsistentlyPollingInterval(t time.Duration) {
- defaultConsistentlyPollingInterval = t
+ Default.SetDefaultConsistentlyPollingInterval(t)
}
// AsyncAssertion is returned by Eventually and Consistently and polls the actual value passed into Eventually against
@@ -303,15 +504,12 @@ func SetDefaultConsistentlyPollingInterval(t time.Duration) {
//
// Example:
//
-// Eventually(myChannel).Should(Receive(), "Something should have come down the pipe.")
-// Consistently(myChannel).ShouldNot(Receive(), func() string { return "Nothing should have come down the pipe." })
-type AsyncAssertion interface {
- Should(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool
- ShouldNot(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool
-}
+// Eventually(myChannel).Should(Receive(), "Something should have come down the pipe.")
+// Consistently(myChannel).ShouldNot(Receive(), func() string { return "Nothing should have come down the pipe." })
+type AsyncAssertion = types.AsyncAssertion
// GomegaAsyncAssertion is deprecated in favor of AsyncAssertion, which does not stutter.
-type GomegaAsyncAssertion = AsyncAssertion
+type GomegaAsyncAssertion = types.AsyncAssertion
// Assertion is returned by Ω and Expect and compares the actual value to the matcher
// passed to the Should/ShouldNot and To/ToNot/NotTo methods.
@@ -329,106 +527,11 @@ type GomegaAsyncAssertion = AsyncAssertion
//
// Example:
//
-// Ω(farm.HasCow()).Should(BeTrue(), "Farm %v should have a cow", farm)
-type Assertion interface {
- Should(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool
- ShouldNot(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool
-
- To(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool
- ToNot(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool
- NotTo(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool
-}
+// Ω(farm.HasCow()).Should(BeTrue(), "Farm %v should have a cow", farm)
+type Assertion = types.Assertion
// GomegaAssertion is deprecated in favor of Assertion, which does not stutter.
-type GomegaAssertion = Assertion
+type GomegaAssertion = types.Assertion
// OmegaMatcher is deprecated in favor of the better-named and better-organized types.GomegaMatcher but sticks around to support existing code that uses it
-type OmegaMatcher types.GomegaMatcher
-
-// WithT wraps a *testing.T and provides `Expect`, `Eventually`, and `Consistently` methods. This allows you to leverage
-// Gomega's rich ecosystem of matchers in standard `testing` test suites.
-//
-// Use `NewWithT` to instantiate a `WithT`
-type WithT struct {
- t types.GomegaTestingT
-}
-
-// GomegaWithT is deprecated in favor of gomega.WithT, which does not stutter.
-type GomegaWithT = WithT
-
-// NewWithT takes a *testing.T and returngs a `gomega.WithT` allowing you to use `Expect`, `Eventually`, and `Consistently` along with
-// Gomega's rich ecosystem of matchers in standard `testing` test suits.
-//
-// func TestFarmHasCow(t *testing.T) {
-// g := gomega.NewWithT(t)
-//
-// f := farm.New([]string{"Cow", "Horse"})
-// g.Expect(f.HasCow()).To(BeTrue(), "Farm should have cow")
-// }
-func NewWithT(t types.GomegaTestingT) *WithT {
- return &WithT{
- t: t,
- }
-}
-
-// NewGomegaWithT is deprecated in favor of gomega.NewWithT, which does not stutter.
-func NewGomegaWithT(t types.GomegaTestingT) *GomegaWithT {
- return NewWithT(t)
-}
-
-// Expect is used to make assertions. See documentation for Expect.
-func (g *WithT) Expect(actual interface{}, extra ...interface{}) Assertion {
- return assertion.New(actual, testingtsupport.BuildTestingTGomegaFailWrapper(g.t), 0, extra...)
-}
-
-// Eventually is used to make asynchronous assertions. See documentation for Eventually.
-func (g *WithT) Eventually(actual interface{}, intervals ...interface{}) AsyncAssertion {
- timeoutInterval := defaultEventuallyTimeout
- pollingInterval := defaultEventuallyPollingInterval
- if len(intervals) > 0 {
- timeoutInterval = toDuration(intervals[0])
- }
- if len(intervals) > 1 {
- pollingInterval = toDuration(intervals[1])
- }
- return asyncassertion.New(asyncassertion.AsyncAssertionTypeEventually, actual, testingtsupport.BuildTestingTGomegaFailWrapper(g.t), timeoutInterval, pollingInterval, 0)
-}
-
-// Consistently is used to make asynchronous assertions. See documentation for Consistently.
-func (g *WithT) Consistently(actual interface{}, intervals ...interface{}) AsyncAssertion {
- timeoutInterval := defaultConsistentlyDuration
- pollingInterval := defaultConsistentlyPollingInterval
- if len(intervals) > 0 {
- timeoutInterval = toDuration(intervals[0])
- }
- if len(intervals) > 1 {
- pollingInterval = toDuration(intervals[1])
- }
- return asyncassertion.New(asyncassertion.AsyncAssertionTypeConsistently, actual, testingtsupport.BuildTestingTGomegaFailWrapper(g.t), timeoutInterval, pollingInterval, 0)
-}
-
-func toDuration(input interface{}) time.Duration {
- duration, ok := input.(time.Duration)
- if ok {
- return duration
- }
-
- value := reflect.ValueOf(input)
- kind := reflect.TypeOf(input).Kind()
-
- if reflect.Int <= kind && kind <= reflect.Int64 {
- return time.Duration(value.Int()) * time.Second
- } else if reflect.Uint <= kind && kind <= reflect.Uint64 {
- return time.Duration(value.Uint()) * time.Second
- } else if reflect.Float32 <= kind && kind <= reflect.Float64 {
- return time.Duration(value.Float() * float64(time.Second))
- } else if reflect.String == kind {
- duration, err := time.ParseDuration(value.String())
- if err != nil {
- panic(fmt.Sprintf("%#v is not a valid parsable duration string.", input))
- }
- return duration
- }
-
- panic(fmt.Sprintf("%v is not a valid interval. Must be time.Duration, parsable duration string or a number.", input))
-}
+type OmegaMatcher = types.GomegaMatcher
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/gstruct/elements.go b/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/gstruct/elements.go
new file mode 100644
index 00000000000..b5e5ef2e459
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/gstruct/elements.go
@@ -0,0 +1,231 @@
+// untested sections: 6
+
+package gstruct
+
+import (
+ "errors"
+ "fmt"
+ "reflect"
+ "runtime/debug"
+ "strconv"
+
+ "github.com/onsi/gomega/format"
+ errorsutil "github.com/onsi/gomega/gstruct/errors"
+ "github.com/onsi/gomega/types"
+)
+
+//MatchAllElements succeeds if every element of a slice matches the element matcher it maps to
+//through the id function, and every element matcher is matched.
+// idFn := func(element interface{}) string {
+// return fmt.Sprintf("%v", element)
+// }
+//
+// Expect([]string{"a", "b"}).To(MatchAllElements(idFn, Elements{
+// "a": Equal("a"),
+// "b": Equal("b"),
+// }))
+func MatchAllElements(identifier Identifier, elements Elements) types.GomegaMatcher {
+ return &ElementsMatcher{
+ Identifier: identifier,
+ Elements: elements,
+ }
+}
+
+//MatchAllElementsWithIndex succeeds if every element of a slice matches the element matcher it maps to
+//through the id with index function, and every element matcher is matched.
+// idFn := func(index int, element interface{}) string {
+// return strconv.Itoa(index)
+// }
+//
+// Expect([]string{"a", "b"}).To(MatchAllElements(idFn, Elements{
+// "0": Equal("a"),
+// "1": Equal("b"),
+// }))
+func MatchAllElementsWithIndex(identifier IdentifierWithIndex, elements Elements) types.GomegaMatcher {
+ return &ElementsMatcher{
+ Identifier: identifier,
+ Elements: elements,
+ }
+}
+
+//MatchElements succeeds if each element of a slice matches the element matcher it maps to
+//through the id function. It can ignore extra elements and/or missing elements.
+// idFn := func(element interface{}) string {
+// return fmt.Sprintf("%v", element)
+// }
+//
+// Expect([]string{"a", "b", "c"}).To(MatchElements(idFn, IgnoreExtras, Elements{
+// "a": Equal("a"),
+// "b": Equal("b"),
+// }))
+// Expect([]string{"a", "c"}).To(MatchElements(idFn, IgnoreMissing, Elements{
+// "a": Equal("a"),
+// "b": Equal("b"),
+// "c": Equal("c"),
+// "d": Equal("d"),
+// }))
+func MatchElements(identifier Identifier, options Options, elements Elements) types.GomegaMatcher {
+ return &ElementsMatcher{
+ Identifier: identifier,
+ Elements: elements,
+ IgnoreExtras: options&IgnoreExtras != 0,
+ IgnoreMissing: options&IgnoreMissing != 0,
+ AllowDuplicates: options&AllowDuplicates != 0,
+ }
+}
+
+//MatchElementsWithIndex succeeds if each element of a slice matches the element matcher it maps to
+//through the id with index function. It can ignore extra elements and/or missing elements.
+// idFn := func(index int, element interface{}) string {
+// return strconv.Itoa(index)
+// }
+//
+// Expect([]string{"a", "b", "c"}).To(MatchElements(idFn, IgnoreExtras, Elements{
+// "0": Equal("a"),
+// "1": Equal("b"),
+// }))
+// Expect([]string{"a", "c"}).To(MatchElements(idFn, IgnoreMissing, Elements{
+// "0": Equal("a"),
+// "1": Equal("b"),
+// "2": Equal("c"),
+// "3": Equal("d"),
+// }))
+func MatchElementsWithIndex(identifier IdentifierWithIndex, options Options, elements Elements) types.GomegaMatcher {
+ return &ElementsMatcher{
+ Identifier: identifier,
+ Elements: elements,
+ IgnoreExtras: options&IgnoreExtras != 0,
+ IgnoreMissing: options&IgnoreMissing != 0,
+ AllowDuplicates: options&AllowDuplicates != 0,
+ }
+}
+
+// ElementsMatcher is a NestingMatcher that applies custom matchers to each element of a slice mapped
+// by the Identifier function.
+// TODO: Extend this to work with arrays & maps (map the key) as well.
+type ElementsMatcher struct {
+ // Matchers for each element.
+ Elements Elements
+ // Function mapping an element to the string key identifying its matcher.
+ Identifier Identify
+
+ // Whether to ignore extra elements or consider it an error.
+ IgnoreExtras bool
+ // Whether to ignore missing elements or consider it an error.
+ IgnoreMissing bool
+ // Whether to key duplicates when matching IDs.
+ AllowDuplicates bool
+
+ // State.
+ failures []error
+}
+
+// Element ID to matcher.
+type Elements map[string]types.GomegaMatcher
+
+// Function for identifying (mapping) elements.
+type Identifier func(element interface{}) string
+
+// Calls the underlying fucntion with the provided params.
+// Identifier drops the index.
+func (i Identifier) WithIndexAndElement(index int, element interface{}) string {
+ return i(element)
+}
+
+// Uses the index and element to generate an element name
+type IdentifierWithIndex func(index int, element interface{}) string
+
+// Calls the underlying fucntion with the provided params.
+// IdentifierWithIndex uses the index.
+func (i IdentifierWithIndex) WithIndexAndElement(index int, element interface{}) string {
+ return i(index, element)
+}
+
+// Interface for identifing the element
+type Identify interface {
+ WithIndexAndElement(i int, element interface{}) string
+}
+
+// IndexIdentity is a helper function for using an index as
+// the key in the element map
+func IndexIdentity(index int, _ interface{}) string {
+ return strconv.Itoa(index)
+}
+
+func (m *ElementsMatcher) Match(actual interface{}) (success bool, err error) {
+ if reflect.TypeOf(actual).Kind() != reflect.Slice {
+ return false, fmt.Errorf("%v is type %T, expected slice", actual, actual)
+ }
+
+ m.failures = m.matchElements(actual)
+ if len(m.failures) > 0 {
+ return false, nil
+ }
+ return true, nil
+}
+
+func (m *ElementsMatcher) matchElements(actual interface{}) (errs []error) {
+ // Provide more useful error messages in the case of a panic.
+ defer func() {
+ if err := recover(); err != nil {
+ errs = append(errs, fmt.Errorf("panic checking %+v: %v\n%s", actual, err, debug.Stack()))
+ }
+ }()
+
+ val := reflect.ValueOf(actual)
+ elements := map[string]bool{}
+ for i := 0; i < val.Len(); i++ {
+ element := val.Index(i).Interface()
+ id := m.Identifier.WithIndexAndElement(i, element)
+ if elements[id] {
+ if !m.AllowDuplicates {
+ errs = append(errs, fmt.Errorf("found duplicate element ID %s", id))
+ continue
+ }
+ }
+ elements[id] = true
+
+ matcher, expected := m.Elements[id]
+ if !expected {
+ if !m.IgnoreExtras {
+ errs = append(errs, fmt.Errorf("unexpected element %s", id))
+ }
+ continue
+ }
+
+ match, err := matcher.Match(element)
+ if match {
+ continue
+ }
+
+ if err == nil {
+ if nesting, ok := matcher.(errorsutil.NestingMatcher); ok {
+ err = errorsutil.AggregateError(nesting.Failures())
+ } else {
+ err = errors.New(matcher.FailureMessage(element))
+ }
+ }
+ errs = append(errs, errorsutil.Nest(fmt.Sprintf("[%s]", id), err))
+ }
+
+ for id := range m.Elements {
+ if !elements[id] && !m.IgnoreMissing {
+ errs = append(errs, fmt.Errorf("missing expected element %s", id))
+ }
+ }
+
+ return errs
+}
+
+func (m *ElementsMatcher) FailureMessage(actual interface{}) (message string) {
+ failure := errorsutil.AggregateError(m.failures)
+ return format.Message(actual, fmt.Sprintf("to match elements: %v", failure))
+}
+
+func (m *ElementsMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+ return format.Message(actual, "not to match elements")
+}
+
+func (m *ElementsMatcher) Failures() []error {
+ return m.failures
+}
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/gstruct/errors/nested_types.go b/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/gstruct/errors/nested_types.go
new file mode 100644
index 00000000000..188492b212f
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/gstruct/errors/nested_types.go
@@ -0,0 +1,72 @@
+package errors
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/onsi/gomega/types"
+)
+
+// A stateful matcher that nests other matchers within it and preserves the error types of the
+// nested matcher failures.
+type NestingMatcher interface {
+ types.GomegaMatcher
+
+ // Returns the failures of nested matchers.
+ Failures() []error
+}
+
+// An error type for labeling errors on deeply nested matchers.
+type NestedError struct {
+ Path string
+ Err error
+}
+
+func (e *NestedError) Error() string {
+ // Indent Errors.
+ indented := strings.Replace(e.Err.Error(), "\n", "\n\t", -1)
+ return fmt.Sprintf("%s:\n\t%v", e.Path, indented)
+}
+
+// Create a NestedError with the given path.
+// If err is a NestedError, prepend the path to it.
+// If err is an AggregateError, recursively Nest each error.
+func Nest(path string, err error) error {
+ if ag, ok := err.(AggregateError); ok {
+ var errs AggregateError
+ for _, e := range ag {
+ errs = append(errs, Nest(path, e))
+ }
+ return errs
+ }
+ if ne, ok := err.(*NestedError); ok {
+ return &NestedError{
+ Path: path + ne.Path,
+ Err: ne.Err,
+ }
+ }
+ return &NestedError{
+ Path: path,
+ Err: err,
+ }
+}
+
+// An error type for treating multiple errors as a single error.
+type AggregateError []error
+
+// Error is part of the error interface.
+func (err AggregateError) Error() string {
+ if len(err) == 0 {
+ // This should never happen, really.
+ return ""
+ }
+ if len(err) == 1 {
+ return err[0].Error()
+ }
+ result := fmt.Sprintf("[%s", err[0].Error())
+ for i := 1; i < len(err); i++ {
+ result += fmt.Sprintf(", %s", err[i].Error())
+ }
+ result += "]"
+ return result
+}
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/gstruct/fields.go b/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/gstruct/fields.go
new file mode 100644
index 00000000000..faf07b1a25d
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/gstruct/fields.go
@@ -0,0 +1,165 @@
+// untested sections: 6
+
+package gstruct
+
+import (
+ "errors"
+ "fmt"
+ "reflect"
+ "runtime/debug"
+ "strings"
+
+ "github.com/onsi/gomega/format"
+ errorsutil "github.com/onsi/gomega/gstruct/errors"
+ "github.com/onsi/gomega/types"
+)
+
+//MatchAllFields succeeds if every field of a struct matches the field matcher associated with
+//it, and every element matcher is matched.
+// actual := struct{
+// A int
+// B []bool
+// C string
+// }{
+// A: 5,
+// B: []bool{true, false},
+// C: "foo",
+// }
+//
+// Expect(actual).To(MatchAllFields(Fields{
+// "A": Equal(5),
+// "B": ConsistOf(true, false),
+// "C": Equal("foo"),
+// }))
+func MatchAllFields(fields Fields) types.GomegaMatcher {
+ return &FieldsMatcher{
+ Fields: fields,
+ }
+}
+
+//MatchFields succeeds if each element of a struct matches the field matcher associated with
+//it. It can ignore extra fields and/or missing fields.
+// actual := struct{
+// A int
+// B []bool
+// C string
+// }{
+// A: 5,
+// B: []bool{true, false},
+// C: "foo",
+// }
+//
+// Expect(actual).To(MatchFields(IgnoreExtras, Fields{
+// "A": Equal(5),
+// "B": ConsistOf(true, false),
+// }))
+// Expect(actual).To(MatchFields(IgnoreMissing, Fields{
+// "A": Equal(5),
+// "B": ConsistOf(true, false),
+// "C": Equal("foo"),
+// "D": Equal("extra"),
+// }))
+func MatchFields(options Options, fields Fields) types.GomegaMatcher {
+ return &FieldsMatcher{
+ Fields: fields,
+ IgnoreExtras: options&IgnoreExtras != 0,
+ IgnoreMissing: options&IgnoreMissing != 0,
+ }
+}
+
+type FieldsMatcher struct {
+ // Matchers for each field.
+ Fields Fields
+
+ // Whether to ignore extra elements or consider it an error.
+ IgnoreExtras bool
+ // Whether to ignore missing elements or consider it an error.
+ IgnoreMissing bool
+
+ // State.
+ failures []error
+}
+
+// Field name to matcher.
+type Fields map[string]types.GomegaMatcher
+
+func (m *FieldsMatcher) Match(actual interface{}) (success bool, err error) {
+ if reflect.TypeOf(actual).Kind() != reflect.Struct {
+ return false, fmt.Errorf("%v is type %T, expected struct", actual, actual)
+ }
+
+ m.failures = m.matchFields(actual)
+ if len(m.failures) > 0 {
+ return false, nil
+ }
+ return true, nil
+}
+
+func (m *FieldsMatcher) matchFields(actual interface{}) (errs []error) {
+ val := reflect.ValueOf(actual)
+ typ := val.Type()
+ fields := map[string]bool{}
+ for i := 0; i < val.NumField(); i++ {
+ fieldName := typ.Field(i).Name
+ fields[fieldName] = true
+
+ err := func() (err error) {
+ // This test relies heavily on reflect, which tends to panic.
+ // Recover here to provide more useful error messages in that case.
+ defer func() {
+ if r := recover(); r != nil {
+ err = fmt.Errorf("panic checking %+v: %v\n%s", actual, r, debug.Stack())
+ }
+ }()
+
+ matcher, expected := m.Fields[fieldName]
+ if !expected {
+ if !m.IgnoreExtras {
+ return fmt.Errorf("unexpected field %s: %+v", fieldName, actual)
+ }
+ return nil
+ }
+
+ field := val.Field(i).Interface()
+
+ match, err := matcher.Match(field)
+ if err != nil {
+ return err
+ } else if !match {
+ if nesting, ok := matcher.(errorsutil.NestingMatcher); ok {
+ return errorsutil.AggregateError(nesting.Failures())
+ }
+ return errors.New(matcher.FailureMessage(field))
+ }
+ return nil
+ }()
+ if err != nil {
+ errs = append(errs, errorsutil.Nest("."+fieldName, err))
+ }
+ }
+
+ for field := range m.Fields {
+ if !fields[field] && !m.IgnoreMissing {
+ errs = append(errs, fmt.Errorf("missing expected field %s", field))
+ }
+ }
+
+ return errs
+}
+
+func (m *FieldsMatcher) FailureMessage(actual interface{}) (message string) {
+ failures := make([]string, len(m.failures))
+ for i := range m.failures {
+ failures[i] = m.failures[i].Error()
+ }
+ return format.Message(reflect.TypeOf(actual).Name(),
+ fmt.Sprintf("to match fields: {\n%v\n}\n", strings.Join(failures, "\n")))
+}
+
+func (m *FieldsMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+ return format.Message(actual, "not to match fields")
+}
+
+func (m *FieldsMatcher) Failures() []error {
+ return m.failures
+}
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/gstruct/ignore.go b/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/gstruct/ignore.go
new file mode 100644
index 00000000000..4396573e440
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/gstruct/ignore.go
@@ -0,0 +1,39 @@
+// untested sections: 2
+
+package gstruct
+
+import (
+ "github.com/onsi/gomega/types"
+)
+
+//Ignore ignores the actual value and always succeeds.
+// Expect(nil).To(Ignore())
+// Expect(true).To(Ignore())
+func Ignore() types.GomegaMatcher {
+ return &IgnoreMatcher{true}
+}
+
+//Reject ignores the actual value and always fails. It can be used in conjunction with IgnoreMissing
+//to catch problematic elements, or to verify tests are running.
+// Expect(nil).NotTo(Reject())
+// Expect(true).NotTo(Reject())
+func Reject() types.GomegaMatcher {
+ return &IgnoreMatcher{false}
+}
+
+// A matcher that either always succeeds or always fails.
+type IgnoreMatcher struct {
+ Succeed bool
+}
+
+func (m *IgnoreMatcher) Match(actual interface{}) (bool, error) {
+ return m.Succeed, nil
+}
+
+func (m *IgnoreMatcher) FailureMessage(_ interface{}) (message string) {
+ return "Unconditional failure"
+}
+
+func (m *IgnoreMatcher) NegatedFailureMessage(_ interface{}) (message string) {
+ return "Unconditional success"
+}
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/gstruct/keys.go b/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/gstruct/keys.go
new file mode 100644
index 00000000000..56aed4bab72
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/gstruct/keys.go
@@ -0,0 +1,126 @@
+// untested sections: 6
+
+package gstruct
+
+import (
+ "errors"
+ "fmt"
+ "reflect"
+ "runtime/debug"
+ "strings"
+
+ "github.com/onsi/gomega/format"
+ errorsutil "github.com/onsi/gomega/gstruct/errors"
+ "github.com/onsi/gomega/types"
+)
+
+func MatchAllKeys(keys Keys) types.GomegaMatcher {
+ return &KeysMatcher{
+ Keys: keys,
+ }
+}
+
+func MatchKeys(options Options, keys Keys) types.GomegaMatcher {
+ return &KeysMatcher{
+ Keys: keys,
+ IgnoreExtras: options&IgnoreExtras != 0,
+ IgnoreMissing: options&IgnoreMissing != 0,
+ }
+}
+
+type KeysMatcher struct {
+ // Matchers for each key.
+ Keys Keys
+
+ // Whether to ignore extra keys or consider it an error.
+ IgnoreExtras bool
+ // Whether to ignore missing keys or consider it an error.
+ IgnoreMissing bool
+
+ // State.
+ failures []error
+}
+
+type Keys map[interface{}]types.GomegaMatcher
+
+func (m *KeysMatcher) Match(actual interface{}) (success bool, err error) {
+ if reflect.TypeOf(actual).Kind() != reflect.Map {
+ return false, fmt.Errorf("%v is type %T, expected map", actual, actual)
+ }
+
+ m.failures = m.matchKeys(actual)
+ if len(m.failures) > 0 {
+ return false, nil
+ }
+ return true, nil
+}
+
+func (m *KeysMatcher) matchKeys(actual interface{}) (errs []error) {
+ actualValue := reflect.ValueOf(actual)
+ keys := map[interface{}]bool{}
+ for _, keyValue := range actualValue.MapKeys() {
+ key := keyValue.Interface()
+ keys[key] = true
+
+ err := func() (err error) {
+ // This test relies heavily on reflect, which tends to panic.
+ // Recover here to provide more useful error messages in that case.
+ defer func() {
+ if r := recover(); r != nil {
+ err = fmt.Errorf("panic checking %+v: %v\n%s", actual, r, debug.Stack())
+ }
+ }()
+
+ matcher, ok := m.Keys[key]
+ if !ok {
+ if !m.IgnoreExtras {
+ return fmt.Errorf("unexpected key %s: %+v", key, actual)
+ }
+ return nil
+ }
+
+ valInterface := actualValue.MapIndex(keyValue).Interface()
+
+ match, err := matcher.Match(valInterface)
+ if err != nil {
+ return err
+ }
+
+ if !match {
+ if nesting, ok := matcher.(errorsutil.NestingMatcher); ok {
+ return errorsutil.AggregateError(nesting.Failures())
+ }
+ return errors.New(matcher.FailureMessage(valInterface))
+ }
+ return nil
+ }()
+ if err != nil {
+ errs = append(errs, errorsutil.Nest(fmt.Sprintf(".%#v", key), err))
+ }
+ }
+
+ for key := range m.Keys {
+ if !keys[key] && !m.IgnoreMissing {
+ errs = append(errs, fmt.Errorf("missing expected key %s", key))
+ }
+ }
+
+ return errs
+}
+
+func (m *KeysMatcher) FailureMessage(actual interface{}) (message string) {
+ failures := make([]string, len(m.failures))
+ for i := range m.failures {
+ failures[i] = m.failures[i].Error()
+ }
+ return format.Message(reflect.TypeOf(actual).Name(),
+ fmt.Sprintf("to match keys: {\n%v\n}\n", strings.Join(failures, "\n")))
+}
+
+func (m *KeysMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+ return format.Message(actual, "not to match keys")
+}
+
+func (m *KeysMatcher) Failures() []error {
+ return m.failures
+}
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/gstruct/pointer.go b/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/gstruct/pointer.go
new file mode 100644
index 00000000000..cc828a3251d
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/gstruct/pointer.go
@@ -0,0 +1,58 @@
+// untested sections: 3
+
+package gstruct
+
+import (
+ "fmt"
+ "reflect"
+
+ "github.com/onsi/gomega/format"
+ "github.com/onsi/gomega/types"
+)
+
+//PointTo applies the given matcher to the value pointed to by actual. It fails if the pointer is
+//nil.
+// actual := 5
+// Expect(&actual).To(PointTo(Equal(5)))
+func PointTo(matcher types.GomegaMatcher) types.GomegaMatcher {
+ return &PointerMatcher{
+ Matcher: matcher,
+ }
+}
+
+type PointerMatcher struct {
+ Matcher types.GomegaMatcher
+
+ // Failure message.
+ failure string
+}
+
+func (m *PointerMatcher) Match(actual interface{}) (bool, error) {
+ val := reflect.ValueOf(actual)
+
+ // return error if actual type is not a pointer
+ if val.Kind() != reflect.Ptr {
+ return false, fmt.Errorf("PointerMatcher expects a pointer but we have '%s'", val.Kind())
+ }
+
+ if !val.IsValid() || val.IsNil() {
+ m.failure = format.Message(actual, "not to be ")
+ return false, nil
+ }
+
+ // Forward the value.
+ elem := val.Elem().Interface()
+ match, err := m.Matcher.Match(elem)
+ if !match {
+ m.failure = m.Matcher.FailureMessage(elem)
+ }
+ return match, err
+}
+
+func (m *PointerMatcher) FailureMessage(_ interface{}) (message string) {
+ return m.failure
+}
+
+func (m *PointerMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+ return m.Matcher.NegatedFailureMessage(actual)
+}
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/gstruct/types.go b/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/gstruct/types.go
new file mode 100644
index 00000000000..48cbbe8f667
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/gstruct/types.go
@@ -0,0 +1,15 @@
+package gstruct
+
+//Options is the type for options passed to some matchers.
+type Options int
+
+const (
+ //IgnoreExtras tells the matcher to ignore extra elements or fields, rather than triggering a failure.
+ IgnoreExtras Options = 1 << iota
+ //IgnoreMissing tells the matcher to ignore missing elements or fields, rather than triggering a failure.
+ IgnoreMissing
+ //AllowDuplicates tells the matcher to permit multiple members of the slice to produce the same ID when
+ //considered by the indentifier function. All members that map to a given key must still match successfully
+ //with the matcher that is provided for that key.
+ AllowDuplicates
+)
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/internal/assertion.go b/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/internal/assertion.go
new file mode 100644
index 00000000000..08356a610bb
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/internal/assertion.go
@@ -0,0 +1,161 @@
+package internal
+
+import (
+ "fmt"
+ "reflect"
+
+ "github.com/onsi/gomega/format"
+ "github.com/onsi/gomega/types"
+)
+
+type Assertion struct {
+ actuals []interface{} // actual value plus all extra values
+ actualIndex int // value to pass to the matcher
+ vet vetinari // the vet to call before calling Gomega matcher
+ offset int
+ g *Gomega
+}
+
+// ...obligatory discworld reference, as "vetineer" doesn't sound ... quite right.
+type vetinari func(assertion *Assertion, optionalDescription ...interface{}) bool
+
+func NewAssertion(actualInput interface{}, g *Gomega, offset int, extra ...interface{}) *Assertion {
+ return &Assertion{
+ actuals: append([]interface{}{actualInput}, extra...),
+ actualIndex: 0,
+ vet: (*Assertion).vetActuals,
+ offset: offset,
+ g: g,
+ }
+}
+
+func (assertion *Assertion) WithOffset(offset int) types.Assertion {
+ assertion.offset = offset
+ return assertion
+}
+
+func (assertion *Assertion) Error() types.Assertion {
+ return &Assertion{
+ actuals: assertion.actuals,
+ actualIndex: len(assertion.actuals) - 1,
+ vet: (*Assertion).vetError,
+ offset: assertion.offset,
+ g: assertion.g,
+ }
+}
+
+func (assertion *Assertion) Should(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool {
+ assertion.g.THelper()
+ vetOptionalDescription("Assertion", optionalDescription...)
+ return assertion.vet(assertion, optionalDescription...) && assertion.match(matcher, true, optionalDescription...)
+}
+
+func (assertion *Assertion) ShouldNot(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool {
+ assertion.g.THelper()
+ vetOptionalDescription("Assertion", optionalDescription...)
+ return assertion.vet(assertion, optionalDescription...) && assertion.match(matcher, false, optionalDescription...)
+}
+
+func (assertion *Assertion) To(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool {
+ assertion.g.THelper()
+ vetOptionalDescription("Assertion", optionalDescription...)
+ return assertion.vet(assertion, optionalDescription...) && assertion.match(matcher, true, optionalDescription...)
+}
+
+func (assertion *Assertion) ToNot(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool {
+ assertion.g.THelper()
+ vetOptionalDescription("Assertion", optionalDescription...)
+ return assertion.vet(assertion, optionalDescription...) && assertion.match(matcher, false, optionalDescription...)
+}
+
+func (assertion *Assertion) NotTo(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool {
+ assertion.g.THelper()
+ vetOptionalDescription("Assertion", optionalDescription...)
+ return assertion.vet(assertion, optionalDescription...) && assertion.match(matcher, false, optionalDescription...)
+}
+
+func (assertion *Assertion) buildDescription(optionalDescription ...interface{}) string {
+ switch len(optionalDescription) {
+ case 0:
+ return ""
+ case 1:
+ if describe, ok := optionalDescription[0].(func() string); ok {
+ return describe() + "\n"
+ }
+ }
+ return fmt.Sprintf(optionalDescription[0].(string), optionalDescription[1:]...) + "\n"
+}
+
+func (assertion *Assertion) match(matcher types.GomegaMatcher, desiredMatch bool, optionalDescription ...interface{}) bool {
+ actualInput := assertion.actuals[assertion.actualIndex]
+ matches, err := matcher.Match(actualInput)
+ assertion.g.THelper()
+ if err != nil {
+ description := assertion.buildDescription(optionalDescription...)
+ assertion.g.Fail(description+err.Error(), 2+assertion.offset)
+ return false
+ }
+ if matches != desiredMatch {
+ var message string
+ if desiredMatch {
+ message = matcher.FailureMessage(actualInput)
+ } else {
+ message = matcher.NegatedFailureMessage(actualInput)
+ }
+ description := assertion.buildDescription(optionalDescription...)
+ assertion.g.Fail(description+message, 2+assertion.offset)
+ return false
+ }
+
+ return true
+}
+
+// vetActuals vets the actual values, with the (optional) exception of a
+// specific value, such as the first value in case non-error assertions, or the
+// last value in case of Error()-based assertions.
+func (assertion *Assertion) vetActuals(optionalDescription ...interface{}) bool {
+ success, message := vetActuals(assertion.actuals, assertion.actualIndex)
+ if success {
+ return true
+ }
+
+ description := assertion.buildDescription(optionalDescription...)
+ assertion.g.THelper()
+ assertion.g.Fail(description+message, 2+assertion.offset)
+ return false
+}
+
+// vetError vets the actual values, except for the final error value, in case
+// the final error value is non-zero. Otherwise, it doesn't vet the actual
+// values, as these are allowed to take on any values unless there is a non-zero
+// error value.
+func (assertion *Assertion) vetError(optionalDescription ...interface{}) bool {
+ if err := assertion.actuals[assertion.actualIndex]; err != nil {
+ // Go error result idiom: all other actual values must be zero values.
+ return assertion.vetActuals(optionalDescription...)
+ }
+ return true
+}
+
+// vetActuals vets a slice of actual values, optionally skipping a particular
+// value slice element, such as the first or last value slice element.
+func vetActuals(actuals []interface{}, skipIndex int) (bool, string) {
+ for i, actual := range actuals {
+ if i == skipIndex {
+ continue
+ }
+ if actual != nil {
+ zeroValue := reflect.Zero(reflect.TypeOf(actual)).Interface()
+ if !reflect.DeepEqual(zeroValue, actual) {
+ var message string
+ if err, ok := actual.(error); ok {
+ message = fmt.Sprintf("Unexpected error: %s\n%s", err, format.Object(err, 1))
+ } else {
+ message = fmt.Sprintf("Unexpected non-nil/non-zero argument at index %d:\n\t<%T>: %#v", i, actual, actual)
+ }
+ return false, message
+ }
+ }
+ }
+ return true, ""
+}
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/internal/assertion/assertion.go b/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/internal/assertion/assertion.go
deleted file mode 100644
index a248298f474..00000000000
--- a/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/internal/assertion/assertion.go
+++ /dev/null
@@ -1,109 +0,0 @@
-package assertion
-
-import (
- "fmt"
- "reflect"
-
- "github.com/onsi/gomega/types"
-)
-
-type Assertion struct {
- actualInput interface{}
- failWrapper *types.GomegaFailWrapper
- offset int
- extra []interface{}
-}
-
-func New(actualInput interface{}, failWrapper *types.GomegaFailWrapper, offset int, extra ...interface{}) *Assertion {
- return &Assertion{
- actualInput: actualInput,
- failWrapper: failWrapper,
- offset: offset,
- extra: extra,
- }
-}
-
-func (assertion *Assertion) Should(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool {
- assertion.failWrapper.TWithHelper.Helper()
- return assertion.vetExtras(optionalDescription...) && assertion.match(matcher, true, optionalDescription...)
-}
-
-func (assertion *Assertion) ShouldNot(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool {
- assertion.failWrapper.TWithHelper.Helper()
- return assertion.vetExtras(optionalDescription...) && assertion.match(matcher, false, optionalDescription...)
-}
-
-func (assertion *Assertion) To(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool {
- assertion.failWrapper.TWithHelper.Helper()
- return assertion.vetExtras(optionalDescription...) && assertion.match(matcher, true, optionalDescription...)
-}
-
-func (assertion *Assertion) ToNot(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool {
- assertion.failWrapper.TWithHelper.Helper()
- return assertion.vetExtras(optionalDescription...) && assertion.match(matcher, false, optionalDescription...)
-}
-
-func (assertion *Assertion) NotTo(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool {
- assertion.failWrapper.TWithHelper.Helper()
- return assertion.vetExtras(optionalDescription...) && assertion.match(matcher, false, optionalDescription...)
-}
-
-func (assertion *Assertion) buildDescription(optionalDescription ...interface{}) string {
- switch len(optionalDescription) {
- case 0:
- return ""
- case 1:
- if describe, ok := optionalDescription[0].(func() string); ok {
- return describe() + "\n"
- }
- }
- return fmt.Sprintf(optionalDescription[0].(string), optionalDescription[1:]...) + "\n"
-}
-
-func (assertion *Assertion) match(matcher types.GomegaMatcher, desiredMatch bool, optionalDescription ...interface{}) bool {
- matches, err := matcher.Match(assertion.actualInput)
- assertion.failWrapper.TWithHelper.Helper()
- if err != nil {
- description := assertion.buildDescription(optionalDescription...)
- assertion.failWrapper.Fail(description+err.Error(), 2+assertion.offset)
- return false
- }
- if matches != desiredMatch {
- var message string
- if desiredMatch {
- message = matcher.FailureMessage(assertion.actualInput)
- } else {
- message = matcher.NegatedFailureMessage(assertion.actualInput)
- }
- description := assertion.buildDescription(optionalDescription...)
- assertion.failWrapper.Fail(description+message, 2+assertion.offset)
- return false
- }
-
- return true
-}
-
-func (assertion *Assertion) vetExtras(optionalDescription ...interface{}) bool {
- success, message := vetExtras(assertion.extra)
- if success {
- return true
- }
-
- description := assertion.buildDescription(optionalDescription...)
- assertion.failWrapper.TWithHelper.Helper()
- assertion.failWrapper.Fail(description+message, 2+assertion.offset)
- return false
-}
-
-func vetExtras(extras []interface{}) (bool, string) {
- for i, extra := range extras {
- if extra != nil {
- zeroValue := reflect.Zero(reflect.TypeOf(extra)).Interface()
- if !reflect.DeepEqual(zeroValue, extra) {
- message := fmt.Sprintf("Unexpected non-nil/non-zero extra argument at index %d:\n\t<%T>: %#v", i+1, extra, extra)
- return false, message
- }
- }
- }
- return true, ""
-}
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/internal/async_assertion.go b/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/internal/async_assertion.go
new file mode 100644
index 00000000000..cde9e2ec8bd
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/internal/async_assertion.go
@@ -0,0 +1,576 @@
+package internal
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "reflect"
+ "runtime"
+ "sync"
+ "time"
+
+ "github.com/onsi/gomega/format"
+ "github.com/onsi/gomega/types"
+)
+
+var errInterface = reflect.TypeOf((*error)(nil)).Elem()
+var gomegaType = reflect.TypeOf((*types.Gomega)(nil)).Elem()
+var contextType = reflect.TypeOf(new(context.Context)).Elem()
+
+type formattedGomegaError interface {
+ FormattedGomegaError() string
+}
+
+type asyncPolledActualError struct {
+ message string
+}
+
+func (err *asyncPolledActualError) Error() string {
+ return err.message
+}
+
+func (err *asyncPolledActualError) FormattedGomegaError() string {
+ return err.message
+}
+
+type contextWithAttachProgressReporter interface {
+ AttachProgressReporter(func() string) func()
+}
+
+type asyncGomegaHaltExecutionError struct{}
+
+func (a asyncGomegaHaltExecutionError) GinkgoRecoverShouldIgnoreThisPanic() {}
+func (a asyncGomegaHaltExecutionError) Error() string {
+ return `An assertion has failed in a goroutine. You should call
+
+ defer GinkgoRecover()
+
+at the top of the goroutine that caused this panic. This will allow Ginkgo and Gomega to correctly capture and manage this panic.`
+}
+
+type AsyncAssertionType uint
+
+const (
+ AsyncAssertionTypeEventually AsyncAssertionType = iota
+ AsyncAssertionTypeConsistently
+)
+
+func (at AsyncAssertionType) String() string {
+ switch at {
+ case AsyncAssertionTypeEventually:
+ return "Eventually"
+ case AsyncAssertionTypeConsistently:
+ return "Consistently"
+ }
+ return "INVALID ASYNC ASSERTION TYPE"
+}
+
+type AsyncAssertion struct {
+ asyncType AsyncAssertionType
+
+ actualIsFunc bool
+ actual interface{}
+ argsToForward []interface{}
+
+ timeoutInterval time.Duration
+ pollingInterval time.Duration
+ mustPassRepeatedly int
+ ctx context.Context
+ offset int
+ g *Gomega
+}
+
+func NewAsyncAssertion(asyncType AsyncAssertionType, actualInput interface{}, g *Gomega, timeoutInterval time.Duration, pollingInterval time.Duration, mustPassRepeatedly int, ctx context.Context, offset int) *AsyncAssertion {
+ out := &AsyncAssertion{
+ asyncType: asyncType,
+ timeoutInterval: timeoutInterval,
+ pollingInterval: pollingInterval,
+ mustPassRepeatedly: mustPassRepeatedly,
+ offset: offset,
+ ctx: ctx,
+ g: g,
+ }
+
+ out.actual = actualInput
+ if actualInput != nil && reflect.TypeOf(actualInput).Kind() == reflect.Func {
+ out.actualIsFunc = true
+ }
+
+ return out
+}
+
+func (assertion *AsyncAssertion) WithOffset(offset int) types.AsyncAssertion {
+ assertion.offset = offset
+ return assertion
+}
+
+func (assertion *AsyncAssertion) WithTimeout(interval time.Duration) types.AsyncAssertion {
+ assertion.timeoutInterval = interval
+ return assertion
+}
+
+func (assertion *AsyncAssertion) WithPolling(interval time.Duration) types.AsyncAssertion {
+ assertion.pollingInterval = interval
+ return assertion
+}
+
+func (assertion *AsyncAssertion) Within(timeout time.Duration) types.AsyncAssertion {
+ assertion.timeoutInterval = timeout
+ return assertion
+}
+
+func (assertion *AsyncAssertion) ProbeEvery(interval time.Duration) types.AsyncAssertion {
+ assertion.pollingInterval = interval
+ return assertion
+}
+
+func (assertion *AsyncAssertion) WithContext(ctx context.Context) types.AsyncAssertion {
+ assertion.ctx = ctx
+ return assertion
+}
+
+func (assertion *AsyncAssertion) WithArguments(argsToForward ...interface{}) types.AsyncAssertion {
+ assertion.argsToForward = argsToForward
+ return assertion
+}
+
+func (assertion *AsyncAssertion) MustPassRepeatedly(count int) types.AsyncAssertion {
+ assertion.mustPassRepeatedly = count
+ return assertion
+}
+
+func (assertion *AsyncAssertion) Should(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool {
+ assertion.g.THelper()
+ vetOptionalDescription("Asynchronous assertion", optionalDescription...)
+ return assertion.match(matcher, true, optionalDescription...)
+}
+
+func (assertion *AsyncAssertion) ShouldNot(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool {
+ assertion.g.THelper()
+ vetOptionalDescription("Asynchronous assertion", optionalDescription...)
+ return assertion.match(matcher, false, optionalDescription...)
+}
+
+func (assertion *AsyncAssertion) buildDescription(optionalDescription ...interface{}) string {
+ switch len(optionalDescription) {
+ case 0:
+ return ""
+ case 1:
+ if describe, ok := optionalDescription[0].(func() string); ok {
+ return describe() + "\n"
+ }
+ }
+ return fmt.Sprintf(optionalDescription[0].(string), optionalDescription[1:]...) + "\n"
+}
+
+func (assertion *AsyncAssertion) processReturnValues(values []reflect.Value) (interface{}, error) {
+ if len(values) == 0 {
+ return nil, &asyncPolledActualError{
+ message: fmt.Sprintf("The function passed to %s did not return any values", assertion.asyncType),
+ }
+ }
+
+ actual := values[0].Interface()
+ if _, ok := AsPollingSignalError(actual); ok {
+ return actual, actual.(error)
+ }
+
+ var err error
+ for i, extraValue := range values[1:] {
+ extra := extraValue.Interface()
+ if extra == nil {
+ continue
+ }
+ if _, ok := AsPollingSignalError(extra); ok {
+ return actual, extra.(error)
+ }
+ extraType := reflect.TypeOf(extra)
+ zero := reflect.Zero(extraType).Interface()
+ if reflect.DeepEqual(extra, zero) {
+ continue
+ }
+ if i == len(values)-2 && extraType.Implements(errInterface) {
+ err = extra.(error)
+ }
+ if err == nil {
+ err = &asyncPolledActualError{
+ message: fmt.Sprintf("The function passed to %s had an unexpected non-nil/non-zero return value at index %d:\n%s", assertion.asyncType, i+1, format.Object(extra, 1)),
+ }
+ }
+ }
+
+ return actual, err
+}
+
+func (assertion *AsyncAssertion) invalidFunctionError(t reflect.Type) error {
+ return fmt.Errorf(`The function passed to %s had an invalid signature of %s. Functions passed to %s must either:
+
+ (a) have return values or
+ (b) take a Gomega interface as their first argument and use that Gomega instance to make assertions.
+
+You can learn more at https://onsi.github.io/gomega/#eventually
+`, assertion.asyncType, t, assertion.asyncType)
+}
+
+func (assertion *AsyncAssertion) noConfiguredContextForFunctionError() error {
+ return fmt.Errorf(`The function passed to %s requested a context.Context, but no context has been provided. Please pass one in using %s().WithContext().
+
+You can learn more at https://onsi.github.io/gomega/#eventually
+`, assertion.asyncType, assertion.asyncType)
+}
+
+func (assertion *AsyncAssertion) argumentMismatchError(t reflect.Type, numProvided int) error {
+ have := "have"
+ if numProvided == 1 {
+ have = "has"
+ }
+ return fmt.Errorf(`The function passed to %s has signature %s takes %d arguments but %d %s been provided. Please use %s().WithArguments() to pass the corect set of arguments.
+
+You can learn more at https://onsi.github.io/gomega/#eventually
+`, assertion.asyncType, t, t.NumIn(), numProvided, have, assertion.asyncType)
+}
+
+func (assertion *AsyncAssertion) invalidMustPassRepeatedlyError(reason string) error {
+ return fmt.Errorf(`Invalid use of MustPassRepeatedly with %s %s
+
+You can learn more at https://onsi.github.io/gomega/#eventually
+`, assertion.asyncType, reason)
+}
+
+func (assertion *AsyncAssertion) buildActualPoller() (func() (interface{}, error), error) {
+ if !assertion.actualIsFunc {
+ return func() (interface{}, error) { return assertion.actual, nil }, nil
+ }
+ actualValue := reflect.ValueOf(assertion.actual)
+ actualType := reflect.TypeOf(assertion.actual)
+ numIn, numOut, isVariadic := actualType.NumIn(), actualType.NumOut(), actualType.IsVariadic()
+
+ if numIn == 0 && numOut == 0 {
+ return nil, assertion.invalidFunctionError(actualType)
+ }
+ takesGomega, takesContext := false, false
+ if numIn > 0 {
+ takesGomega, takesContext = actualType.In(0).Implements(gomegaType), actualType.In(0).Implements(contextType)
+ }
+ if takesGomega && numIn > 1 && actualType.In(1).Implements(contextType) {
+ takesContext = true
+ }
+ if takesContext && len(assertion.argsToForward) > 0 && reflect.TypeOf(assertion.argsToForward[0]).Implements(contextType) {
+ takesContext = false
+ }
+ if !takesGomega && numOut == 0 {
+ return nil, assertion.invalidFunctionError(actualType)
+ }
+ if takesContext && assertion.ctx == nil {
+ return nil, assertion.noConfiguredContextForFunctionError()
+ }
+
+ var assertionFailure error
+ inValues := []reflect.Value{}
+ if takesGomega {
+ inValues = append(inValues, reflect.ValueOf(NewGomega(assertion.g.DurationBundle).ConfigureWithFailHandler(func(message string, callerSkip ...int) {
+ skip := 0
+ if len(callerSkip) > 0 {
+ skip = callerSkip[0]
+ }
+ _, file, line, _ := runtime.Caller(skip + 1)
+ assertionFailure = &asyncPolledActualError{
+ message: fmt.Sprintf("The function passed to %s failed at %s:%d with:\n%s", assertion.asyncType, file, line, message),
+ }
+ // we throw an asyncGomegaHaltExecutionError so that defer GinkgoRecover() can catch this error if the user makes an assertion in a goroutine
+ panic(asyncGomegaHaltExecutionError{})
+ })))
+ }
+ if takesContext {
+ inValues = append(inValues, reflect.ValueOf(assertion.ctx))
+ }
+ for _, arg := range assertion.argsToForward {
+ inValues = append(inValues, reflect.ValueOf(arg))
+ }
+
+ if !isVariadic && numIn != len(inValues) {
+ return nil, assertion.argumentMismatchError(actualType, len(inValues))
+ } else if isVariadic && len(inValues) < numIn-1 {
+ return nil, assertion.argumentMismatchError(actualType, len(inValues))
+ }
+
+ if assertion.mustPassRepeatedly != 1 && assertion.asyncType != AsyncAssertionTypeEventually {
+ return nil, assertion.invalidMustPassRepeatedlyError("it can only be used with Eventually")
+ }
+ if assertion.mustPassRepeatedly < 1 {
+ return nil, assertion.invalidMustPassRepeatedlyError("parameter can't be < 1")
+ }
+
+ return func() (actual interface{}, err error) {
+ var values []reflect.Value
+ assertionFailure = nil
+ defer func() {
+ if numOut == 0 && takesGomega {
+ actual = assertionFailure
+ } else {
+ actual, err = assertion.processReturnValues(values)
+ _, isAsyncError := AsPollingSignalError(err)
+ if assertionFailure != nil && !isAsyncError {
+ err = assertionFailure
+ }
+ }
+ if e := recover(); e != nil {
+ if _, isAsyncError := AsPollingSignalError(e); isAsyncError {
+ err = e.(error)
+ } else if assertionFailure == nil {
+ panic(e)
+ }
+ }
+ }()
+ values = actualValue.Call(inValues)
+ return
+ }, nil
+}
+
+func (assertion *AsyncAssertion) afterTimeout() <-chan time.Time {
+ if assertion.timeoutInterval >= 0 {
+ return time.After(assertion.timeoutInterval)
+ }
+
+ if assertion.asyncType == AsyncAssertionTypeConsistently {
+ return time.After(assertion.g.DurationBundle.ConsistentlyDuration)
+ } else {
+ if assertion.ctx == nil {
+ return time.After(assertion.g.DurationBundle.EventuallyTimeout)
+ } else {
+ return nil
+ }
+ }
+}
+
+func (assertion *AsyncAssertion) afterPolling() <-chan time.Time {
+ if assertion.pollingInterval >= 0 {
+ return time.After(assertion.pollingInterval)
+ }
+ if assertion.asyncType == AsyncAssertionTypeConsistently {
+ return time.After(assertion.g.DurationBundle.ConsistentlyPollingInterval)
+ } else {
+ return time.After(assertion.g.DurationBundle.EventuallyPollingInterval)
+ }
+}
+
+func (assertion *AsyncAssertion) matcherSaysStopTrying(matcher types.GomegaMatcher, value interface{}) bool {
+ if assertion.actualIsFunc || types.MatchMayChangeInTheFuture(matcher, value) {
+ return false
+ }
+ return true
+}
+
+func (assertion *AsyncAssertion) pollMatcher(matcher types.GomegaMatcher, value interface{}) (matches bool, err error) {
+ defer func() {
+ if e := recover(); e != nil {
+ if _, isAsyncError := AsPollingSignalError(e); isAsyncError {
+ err = e.(error)
+ } else {
+ panic(e)
+ }
+ }
+ }()
+
+ matches, err = matcher.Match(value)
+
+ return
+}
+
+func (assertion *AsyncAssertion) match(matcher types.GomegaMatcher, desiredMatch bool, optionalDescription ...interface{}) bool {
+ timer := time.Now()
+ timeout := assertion.afterTimeout()
+ lock := sync.Mutex{}
+
+ var matches, hasLastValidActual bool
+ var actual, lastValidActual interface{}
+ var actualErr, matcherErr error
+ var oracleMatcherSaysStop bool
+
+ assertion.g.THelper()
+
+ pollActual, buildActualPollerErr := assertion.buildActualPoller()
+ if buildActualPollerErr != nil {
+ assertion.g.Fail(buildActualPollerErr.Error(), 2+assertion.offset)
+ return false
+ }
+
+ actual, actualErr = pollActual()
+ if actualErr == nil {
+ lastValidActual = actual
+ hasLastValidActual = true
+ oracleMatcherSaysStop = assertion.matcherSaysStopTrying(matcher, actual)
+ matches, matcherErr = assertion.pollMatcher(matcher, actual)
+ }
+
+ renderError := func(preamble string, err error) string {
+ message := ""
+ if pollingSignalErr, ok := AsPollingSignalError(err); ok {
+ message = err.Error()
+ for _, attachment := range pollingSignalErr.Attachments {
+ message += fmt.Sprintf("\n%s:\n", attachment.Description)
+ message += format.Object(attachment.Object, 1)
+ }
+ } else {
+ message = preamble + "\n" + format.Object(err, 1)
+ }
+ return message
+ }
+
+ messageGenerator := func() string {
+ // can be called out of band by Ginkgo if the user requests a progress report
+ lock.Lock()
+ defer lock.Unlock()
+ message := ""
+
+ if actualErr == nil {
+ if matcherErr == nil {
+ if desiredMatch != matches {
+ if desiredMatch {
+ message += matcher.FailureMessage(actual)
+ } else {
+ message += matcher.NegatedFailureMessage(actual)
+ }
+ } else {
+ if assertion.asyncType == AsyncAssertionTypeConsistently {
+ message += "There is no failure as the matcher passed to Consistently has not yet failed"
+ } else {
+ message += "There is no failure as the matcher passed to Eventually succeeded on its most recent iteration"
+ }
+ }
+ } else {
+ var fgErr formattedGomegaError
+ if errors.As(actualErr, &fgErr) {
+ message += fgErr.FormattedGomegaError() + "\n"
+ } else {
+ message += renderError(fmt.Sprintf("The matcher passed to %s returned the following error:", assertion.asyncType), matcherErr)
+ }
+ }
+ } else {
+ var fgErr formattedGomegaError
+ if errors.As(actualErr, &fgErr) {
+ message += fgErr.FormattedGomegaError() + "\n"
+ } else {
+ message += renderError(fmt.Sprintf("The function passed to %s returned the following error:", assertion.asyncType), actualErr)
+ }
+ if hasLastValidActual {
+ message += fmt.Sprintf("\nAt one point, however, the function did return successfully.\nYet, %s failed because", assertion.asyncType)
+ _, e := matcher.Match(lastValidActual)
+ if e != nil {
+ message += renderError(" the matcher returned the following error:", e)
+ } else {
+ message += " the matcher was not satisfied:\n"
+ if desiredMatch {
+ message += matcher.FailureMessage(lastValidActual)
+ } else {
+ message += matcher.NegatedFailureMessage(lastValidActual)
+ }
+ }
+ }
+ }
+
+ description := assertion.buildDescription(optionalDescription...)
+ return fmt.Sprintf("%s%s", description, message)
+ }
+
+ fail := func(preamble string) {
+ assertion.g.THelper()
+ assertion.g.Fail(fmt.Sprintf("%s after %.3fs.\n%s", preamble, time.Since(timer).Seconds(), messageGenerator()), 3+assertion.offset)
+ }
+
+ var contextDone <-chan struct{}
+ if assertion.ctx != nil {
+ contextDone = assertion.ctx.Done()
+ if v, ok := assertion.ctx.Value("GINKGO_SPEC_CONTEXT").(contextWithAttachProgressReporter); ok {
+ detach := v.AttachProgressReporter(messageGenerator)
+ defer detach()
+ }
+ }
+
+ // Used to count the number of times in a row a step passed
+ passedRepeatedlyCount := 0
+ for {
+ var nextPoll <-chan time.Time = nil
+ var isTryAgainAfterError = false
+
+ for _, err := range []error{actualErr, matcherErr} {
+ if pollingSignalErr, ok := AsPollingSignalError(err); ok {
+ if pollingSignalErr.IsStopTrying() {
+ fail("Told to stop trying")
+ return false
+ }
+ if pollingSignalErr.IsTryAgainAfter() {
+ nextPoll = time.After(pollingSignalErr.TryAgainDuration())
+ isTryAgainAfterError = true
+ }
+ }
+ }
+
+ if actualErr == nil && matcherErr == nil && matches == desiredMatch {
+ if assertion.asyncType == AsyncAssertionTypeEventually {
+ passedRepeatedlyCount += 1
+ if passedRepeatedlyCount == assertion.mustPassRepeatedly {
+ return true
+ }
+ }
+ } else if !isTryAgainAfterError {
+ if assertion.asyncType == AsyncAssertionTypeConsistently {
+ fail("Failed")
+ return false
+ }
+ // Reset the consecutive pass count
+ passedRepeatedlyCount = 0
+ }
+
+ if oracleMatcherSaysStop {
+ if assertion.asyncType == AsyncAssertionTypeEventually {
+ fail("No future change is possible. Bailing out early")
+ return false
+ } else {
+ return true
+ }
+ }
+
+ if nextPoll == nil {
+ nextPoll = assertion.afterPolling()
+ }
+
+ select {
+ case <-nextPoll:
+ a, e := pollActual()
+ lock.Lock()
+ actual, actualErr = a, e
+ lock.Unlock()
+ if actualErr == nil {
+ lock.Lock()
+ lastValidActual = actual
+ hasLastValidActual = true
+ lock.Unlock()
+ oracleMatcherSaysStop = assertion.matcherSaysStopTrying(matcher, actual)
+ m, e := assertion.pollMatcher(matcher, actual)
+ lock.Lock()
+ matches, matcherErr = m, e
+ lock.Unlock()
+ }
+ case <-contextDone:
+ err := context.Cause(assertion.ctx)
+ if err != nil && err != context.Canceled {
+ fail(fmt.Sprintf("Context was cancelled (cause: %s)", err))
+ } else {
+ fail("Context was cancelled")
+ }
+ return false
+ case <-timeout:
+ if assertion.asyncType == AsyncAssertionTypeEventually {
+ fail("Timed out")
+ return false
+ } else {
+ if isTryAgainAfterError {
+ fail("Timed out while waiting on TryAgainAfter")
+ return false
+ }
+ return true
+ }
+ }
+ }
+}
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/internal/asyncassertion/async_assertion.go b/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/internal/asyncassertion/async_assertion.go
deleted file mode 100644
index 5204836bff0..00000000000
--- a/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/internal/asyncassertion/async_assertion.go
+++ /dev/null
@@ -1,198 +0,0 @@
-// untested sections: 2
-
-package asyncassertion
-
-import (
- "errors"
- "fmt"
- "reflect"
- "time"
-
- "github.com/onsi/gomega/internal/oraclematcher"
- "github.com/onsi/gomega/types"
-)
-
-type AsyncAssertionType uint
-
-const (
- AsyncAssertionTypeEventually AsyncAssertionType = iota
- AsyncAssertionTypeConsistently
-)
-
-type AsyncAssertion struct {
- asyncType AsyncAssertionType
- actualInput interface{}
- timeoutInterval time.Duration
- pollingInterval time.Duration
- failWrapper *types.GomegaFailWrapper
- offset int
-}
-
-func New(asyncType AsyncAssertionType, actualInput interface{}, failWrapper *types.GomegaFailWrapper, timeoutInterval time.Duration, pollingInterval time.Duration, offset int) *AsyncAssertion {
- actualType := reflect.TypeOf(actualInput)
- if actualType.Kind() == reflect.Func {
- if actualType.NumIn() != 0 || actualType.NumOut() == 0 {
- panic("Expected a function with no arguments and one or more return values.")
- }
- }
-
- return &AsyncAssertion{
- asyncType: asyncType,
- actualInput: actualInput,
- failWrapper: failWrapper,
- timeoutInterval: timeoutInterval,
- pollingInterval: pollingInterval,
- offset: offset,
- }
-}
-
-func (assertion *AsyncAssertion) Should(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool {
- assertion.failWrapper.TWithHelper.Helper()
- return assertion.match(matcher, true, optionalDescription...)
-}
-
-func (assertion *AsyncAssertion) ShouldNot(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool {
- assertion.failWrapper.TWithHelper.Helper()
- return assertion.match(matcher, false, optionalDescription...)
-}
-
-func (assertion *AsyncAssertion) buildDescription(optionalDescription ...interface{}) string {
- switch len(optionalDescription) {
- case 0:
- return ""
- case 1:
- if describe, ok := optionalDescription[0].(func() string); ok {
- return describe() + "\n"
- }
- }
- return fmt.Sprintf(optionalDescription[0].(string), optionalDescription[1:]...) + "\n"
-}
-
-func (assertion *AsyncAssertion) actualInputIsAFunction() bool {
- actualType := reflect.TypeOf(assertion.actualInput)
- return actualType.Kind() == reflect.Func && actualType.NumIn() == 0 && actualType.NumOut() > 0
-}
-
-func (assertion *AsyncAssertion) pollActual() (interface{}, error) {
- if assertion.actualInputIsAFunction() {
- values := reflect.ValueOf(assertion.actualInput).Call([]reflect.Value{})
-
- extras := []interface{}{}
- for _, value := range values[1:] {
- extras = append(extras, value.Interface())
- }
-
- success, message := vetExtras(extras)
-
- if !success {
- return nil, errors.New(message)
- }
-
- return values[0].Interface(), nil
- }
-
- return assertion.actualInput, nil
-}
-
-func (assertion *AsyncAssertion) matcherMayChange(matcher types.GomegaMatcher, value interface{}) bool {
- if assertion.actualInputIsAFunction() {
- return true
- }
-
- return oraclematcher.MatchMayChangeInTheFuture(matcher, value)
-}
-
-func (assertion *AsyncAssertion) match(matcher types.GomegaMatcher, desiredMatch bool, optionalDescription ...interface{}) bool {
- timer := time.Now()
- timeout := time.After(assertion.timeoutInterval)
-
- var matches bool
- var err error
- mayChange := true
- value, err := assertion.pollActual()
- if err == nil {
- mayChange = assertion.matcherMayChange(matcher, value)
- matches, err = matcher.Match(value)
- }
-
- assertion.failWrapper.TWithHelper.Helper()
-
- fail := func(preamble string) {
- errMsg := ""
- message := ""
- if err != nil {
- errMsg = "Error: " + err.Error()
- } else {
- if desiredMatch {
- message = matcher.FailureMessage(value)
- } else {
- message = matcher.NegatedFailureMessage(value)
- }
- }
- assertion.failWrapper.TWithHelper.Helper()
- description := assertion.buildDescription(optionalDescription...)
- assertion.failWrapper.Fail(fmt.Sprintf("%s after %.3fs.\n%s%s%s", preamble, time.Since(timer).Seconds(), description, message, errMsg), 3+assertion.offset)
- }
-
- if assertion.asyncType == AsyncAssertionTypeEventually {
- for {
- if err == nil && matches == desiredMatch {
- return true
- }
-
- if !mayChange {
- fail("No future change is possible. Bailing out early")
- return false
- }
-
- select {
- case <-time.After(assertion.pollingInterval):
- value, err = assertion.pollActual()
- if err == nil {
- mayChange = assertion.matcherMayChange(matcher, value)
- matches, err = matcher.Match(value)
- }
- case <-timeout:
- fail("Timed out")
- return false
- }
- }
- } else if assertion.asyncType == AsyncAssertionTypeConsistently {
- for {
- if !(err == nil && matches == desiredMatch) {
- fail("Failed")
- return false
- }
-
- if !mayChange {
- return true
- }
-
- select {
- case <-time.After(assertion.pollingInterval):
- value, err = assertion.pollActual()
- if err == nil {
- mayChange = assertion.matcherMayChange(matcher, value)
- matches, err = matcher.Match(value)
- }
- case <-timeout:
- return true
- }
- }
- }
-
- return false
-}
-
-func vetExtras(extras []interface{}) (bool, string) {
- for i, extra := range extras {
- if extra != nil {
- zeroValue := reflect.Zero(reflect.TypeOf(extra)).Interface()
- if !reflect.DeepEqual(zeroValue, extra) {
- message := fmt.Sprintf("Unexpected non-nil/non-zero extra argument at index %d:\n\t<%T>: %#v", i+1, extra, extra)
- return false, message
- }
- }
- }
- return true, ""
-}
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/internal/duration_bundle.go b/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/internal/duration_bundle.go
new file mode 100644
index 00000000000..6e0d90d3a1a
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/internal/duration_bundle.go
@@ -0,0 +1,71 @@
+package internal
+
+import (
+ "fmt"
+ "os"
+ "reflect"
+ "time"
+)
+
+type DurationBundle struct {
+ EventuallyTimeout time.Duration
+ EventuallyPollingInterval time.Duration
+ ConsistentlyDuration time.Duration
+ ConsistentlyPollingInterval time.Duration
+}
+
+const (
+ EventuallyTimeoutEnvVarName = "GOMEGA_DEFAULT_EVENTUALLY_TIMEOUT"
+ EventuallyPollingIntervalEnvVarName = "GOMEGA_DEFAULT_EVENTUALLY_POLLING_INTERVAL"
+
+ ConsistentlyDurationEnvVarName = "GOMEGA_DEFAULT_CONSISTENTLY_DURATION"
+ ConsistentlyPollingIntervalEnvVarName = "GOMEGA_DEFAULT_CONSISTENTLY_POLLING_INTERVAL"
+)
+
+func FetchDefaultDurationBundle() DurationBundle {
+ return DurationBundle{
+ EventuallyTimeout: durationFromEnv(EventuallyTimeoutEnvVarName, time.Second),
+ EventuallyPollingInterval: durationFromEnv(EventuallyPollingIntervalEnvVarName, 10*time.Millisecond),
+
+ ConsistentlyDuration: durationFromEnv(ConsistentlyDurationEnvVarName, 100*time.Millisecond),
+ ConsistentlyPollingInterval: durationFromEnv(ConsistentlyPollingIntervalEnvVarName, 10*time.Millisecond),
+ }
+}
+
+func durationFromEnv(key string, defaultDuration time.Duration) time.Duration {
+ value := os.Getenv(key)
+ if value == "" {
+ return defaultDuration
+ }
+ duration, err := time.ParseDuration(value)
+ if err != nil {
+ panic(fmt.Sprintf("Expected a duration when using %s! Parse error %v", key, err))
+ }
+ return duration
+}
+
+func toDuration(input interface{}) (time.Duration, error) {
+ duration, ok := input.(time.Duration)
+ if ok {
+ return duration, nil
+ }
+
+ value := reflect.ValueOf(input)
+ kind := reflect.TypeOf(input).Kind()
+
+ if reflect.Int <= kind && kind <= reflect.Int64 {
+ return time.Duration(value.Int()) * time.Second, nil
+ } else if reflect.Uint <= kind && kind <= reflect.Uint64 {
+ return time.Duration(value.Uint()) * time.Second, nil
+ } else if reflect.Float32 <= kind && kind <= reflect.Float64 {
+ return time.Duration(value.Float() * float64(time.Second)), nil
+ } else if reflect.String == kind {
+ duration, err := time.ParseDuration(value.String())
+ if err != nil {
+ return 0, fmt.Errorf("%#v is not a valid parsable duration string: %w", input, err)
+ }
+ return duration, nil
+ }
+
+ return 0, fmt.Errorf("%#v is not a valid interval. Must be a time.Duration, a parsable duration string, or a number.", input)
+}
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/internal/gomega.go b/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/internal/gomega.go
new file mode 100644
index 00000000000..de1f4f336e8
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/internal/gomega.go
@@ -0,0 +1,129 @@
+package internal
+
+import (
+ "context"
+ "time"
+
+ "github.com/onsi/gomega/types"
+)
+
+type Gomega struct {
+ Fail types.GomegaFailHandler
+ THelper func()
+ DurationBundle DurationBundle
+}
+
+func NewGomega(bundle DurationBundle) *Gomega {
+ return &Gomega{
+ Fail: nil,
+ THelper: nil,
+ DurationBundle: bundle,
+ }
+}
+
+func (g *Gomega) IsConfigured() bool {
+ return g.Fail != nil && g.THelper != nil
+}
+
+func (g *Gomega) ConfigureWithFailHandler(fail types.GomegaFailHandler) *Gomega {
+ g.Fail = fail
+ g.THelper = func() {}
+ return g
+}
+
+func (g *Gomega) ConfigureWithT(t types.GomegaTestingT) *Gomega {
+ g.Fail = func(message string, _ ...int) {
+ t.Helper()
+ t.Fatalf("\n%s", message)
+ }
+ g.THelper = t.Helper
+ return g
+}
+
+func (g *Gomega) Ω(actual interface{}, extra ...interface{}) types.Assertion {
+ return g.ExpectWithOffset(0, actual, extra...)
+}
+
+func (g *Gomega) Expect(actual interface{}, extra ...interface{}) types.Assertion {
+ return g.ExpectWithOffset(0, actual, extra...)
+}
+
+func (g *Gomega) ExpectWithOffset(offset int, actual interface{}, extra ...interface{}) types.Assertion {
+ return NewAssertion(actual, g, offset, extra...)
+}
+
+func (g *Gomega) Eventually(actualOrCtx interface{}, args ...interface{}) types.AsyncAssertion {
+ return g.makeAsyncAssertion(AsyncAssertionTypeEventually, 0, actualOrCtx, args...)
+}
+
+func (g *Gomega) EventuallyWithOffset(offset int, actualOrCtx interface{}, args ...interface{}) types.AsyncAssertion {
+ return g.makeAsyncAssertion(AsyncAssertionTypeEventually, offset, actualOrCtx, args...)
+}
+
+func (g *Gomega) Consistently(actualOrCtx interface{}, args ...interface{}) types.AsyncAssertion {
+ return g.makeAsyncAssertion(AsyncAssertionTypeConsistently, 0, actualOrCtx, args...)
+}
+
+func (g *Gomega) ConsistentlyWithOffset(offset int, actualOrCtx interface{}, args ...interface{}) types.AsyncAssertion {
+ return g.makeAsyncAssertion(AsyncAssertionTypeConsistently, offset, actualOrCtx, args...)
+}
+
+func (g *Gomega) makeAsyncAssertion(asyncAssertionType AsyncAssertionType, offset int, actualOrCtx interface{}, args ...interface{}) types.AsyncAssertion {
+ baseOffset := 3
+ timeoutInterval := -time.Duration(1)
+ pollingInterval := -time.Duration(1)
+ intervals := []interface{}{}
+ var ctx context.Context
+
+ actual := actualOrCtx
+ startingIndex := 0
+ if _, isCtx := actualOrCtx.(context.Context); isCtx && len(args) > 0 {
+ // the first argument is a context, we should accept it as the context _only if_ it is **not** the only argumnent **and** the second argument is not a parseable duration
+ // this is due to an unfortunate ambiguity in early version of Gomega in which multi-type durations are allowed after the actual
+ if _, err := toDuration(args[0]); err != nil {
+ ctx = actualOrCtx.(context.Context)
+ actual = args[0]
+ startingIndex = 1
+ }
+ }
+
+ for _, arg := range args[startingIndex:] {
+ switch v := arg.(type) {
+ case context.Context:
+ ctx = v
+ default:
+ intervals = append(intervals, arg)
+ }
+ }
+ var err error
+ if len(intervals) > 0 {
+ timeoutInterval, err = toDuration(intervals[0])
+ if err != nil {
+ g.Fail(err.Error(), offset+baseOffset)
+ }
+ }
+ if len(intervals) > 1 {
+ pollingInterval, err = toDuration(intervals[1])
+ if err != nil {
+ g.Fail(err.Error(), offset+baseOffset)
+ }
+ }
+
+ return NewAsyncAssertion(asyncAssertionType, actual, g, timeoutInterval, pollingInterval, 1, ctx, offset)
+}
+
+func (g *Gomega) SetDefaultEventuallyTimeout(t time.Duration) {
+ g.DurationBundle.EventuallyTimeout = t
+}
+
+func (g *Gomega) SetDefaultEventuallyPollingInterval(t time.Duration) {
+ g.DurationBundle.EventuallyPollingInterval = t
+}
+
+func (g *Gomega) SetDefaultConsistentlyDuration(t time.Duration) {
+ g.DurationBundle.ConsistentlyDuration = t
+}
+
+func (g *Gomega) SetDefaultConsistentlyPollingInterval(t time.Duration) {
+ g.DurationBundle.ConsistentlyPollingInterval = t
+}
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/internal/gutil/post_ioutil.go b/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/internal/gutil/post_ioutil.go
new file mode 100644
index 00000000000..6864055a5a7
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/internal/gutil/post_ioutil.go
@@ -0,0 +1,48 @@
+//go:build go1.16
+// +build go1.16
+
+// Package gutil is a replacement for ioutil, which should not be used in new
+// code as of Go 1.16. With Go 1.16 and higher, this implementation
+// uses the ioutil replacement functions in "io" and "os" with some
+// Gomega specifics. This means that we should not get deprecation warnings
+// for ioutil when they are added.
+package gutil
+
+import (
+ "io"
+ "os"
+)
+
+func NopCloser(r io.Reader) io.ReadCloser {
+ return io.NopCloser(r)
+}
+
+func ReadAll(r io.Reader) ([]byte, error) {
+ return io.ReadAll(r)
+}
+
+func ReadDir(dirname string) ([]string, error) {
+ entries, err := os.ReadDir(dirname)
+ if err != nil {
+ return nil, err
+ }
+
+ var names []string
+ for _, entry := range entries {
+ names = append(names, entry.Name())
+ }
+
+ return names, nil
+}
+
+func ReadFile(filename string) ([]byte, error) {
+ return os.ReadFile(filename)
+}
+
+func MkdirTemp(dir, pattern string) (string, error) {
+ return os.MkdirTemp(dir, pattern)
+}
+
+func WriteFile(filename string, data []byte) error {
+ return os.WriteFile(filename, data, 0644)
+}
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/internal/gutil/using_ioutil.go b/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/internal/gutil/using_ioutil.go
new file mode 100644
index 00000000000..5c0ce1ee3da
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/internal/gutil/using_ioutil.go
@@ -0,0 +1,47 @@
+//go:build !go1.16
+// +build !go1.16
+
+// Package gutil is a replacement for ioutil, which should not be used in new
+// code as of Go 1.16. With Go 1.15 and lower, this implementation
+// uses the ioutil functions, meaning that although Gomega is not officially
+// supported on these versions, it is still likely to work.
+package gutil
+
+import (
+ "io"
+ "io/ioutil"
+)
+
+func NopCloser(r io.Reader) io.ReadCloser {
+ return ioutil.NopCloser(r)
+}
+
+func ReadAll(r io.Reader) ([]byte, error) {
+ return ioutil.ReadAll(r)
+}
+
+func ReadDir(dirname string) ([]string, error) {
+ files, err := ioutil.ReadDir(dirname)
+ if err != nil {
+ return nil, err
+ }
+
+ var names []string
+ for _, file := range files {
+ names = append(names, file.Name())
+ }
+
+ return names, nil
+}
+
+func ReadFile(filename string) ([]byte, error) {
+ return ioutil.ReadFile(filename)
+}
+
+func MkdirTemp(dir, pattern string) (string, error) {
+ return ioutil.TempDir(dir, pattern)
+}
+
+func WriteFile(filename string, data []byte) error {
+ return ioutil.WriteFile(filename, data, 0644)
+}
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/internal/oraclematcher/oracle_matcher.go b/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/internal/oraclematcher/oracle_matcher.go
deleted file mode 100644
index 66cad88a1fb..00000000000
--- a/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/internal/oraclematcher/oracle_matcher.go
+++ /dev/null
@@ -1,25 +0,0 @@
-package oraclematcher
-
-import "github.com/onsi/gomega/types"
-
-/*
-GomegaMatchers that also match the OracleMatcher interface can convey information about
-whether or not their result will change upon future attempts.
-
-This allows `Eventually` and `Consistently` to short circuit if success becomes impossible.
-
-For example, a process' exit code can never change. So, gexec's Exit matcher returns `true`
-for `MatchMayChangeInTheFuture` until the process exits, at which point it returns `false` forevermore.
-*/
-type OracleMatcher interface {
- MatchMayChangeInTheFuture(actual interface{}) bool
-}
-
-func MatchMayChangeInTheFuture(matcher types.GomegaMatcher, value interface{}) bool {
- oracleMatcher, ok := matcher.(OracleMatcher)
- if !ok {
- return true
- }
-
- return oracleMatcher.MatchMayChangeInTheFuture(value)
-}
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/internal/polling_signal_error.go b/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/internal/polling_signal_error.go
new file mode 100644
index 00000000000..83b04b1a4c9
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/internal/polling_signal_error.go
@@ -0,0 +1,106 @@
+package internal
+
+import (
+ "errors"
+ "fmt"
+ "time"
+)
+
+type PollingSignalErrorType int
+
+const (
+ PollingSignalErrorTypeStopTrying PollingSignalErrorType = iota
+ PollingSignalErrorTypeTryAgainAfter
+)
+
+type PollingSignalError interface {
+ error
+ Wrap(err error) PollingSignalError
+ Attach(description string, obj any) PollingSignalError
+ Now()
+}
+
+var StopTrying = func(message string) PollingSignalError {
+ return &PollingSignalErrorImpl{
+ message: message,
+ pollingSignalErrorType: PollingSignalErrorTypeStopTrying,
+ }
+}
+
+var TryAgainAfter = func(duration time.Duration) PollingSignalError {
+ return &PollingSignalErrorImpl{
+ message: fmt.Sprintf("told to try again after %s", duration),
+ duration: duration,
+ pollingSignalErrorType: PollingSignalErrorTypeTryAgainAfter,
+ }
+}
+
+type PollingSignalErrorAttachment struct {
+ Description string
+ Object any
+}
+
+type PollingSignalErrorImpl struct {
+ message string
+ wrappedErr error
+ pollingSignalErrorType PollingSignalErrorType
+ duration time.Duration
+ Attachments []PollingSignalErrorAttachment
+}
+
+func (s *PollingSignalErrorImpl) Wrap(err error) PollingSignalError {
+ s.wrappedErr = err
+ return s
+}
+
+func (s *PollingSignalErrorImpl) Attach(description string, obj any) PollingSignalError {
+ s.Attachments = append(s.Attachments, PollingSignalErrorAttachment{description, obj})
+ return s
+}
+
+func (s *PollingSignalErrorImpl) Error() string {
+ if s.wrappedErr == nil {
+ return s.message
+ } else {
+ return s.message + ": " + s.wrappedErr.Error()
+ }
+}
+
+func (s *PollingSignalErrorImpl) Unwrap() error {
+ if s == nil {
+ return nil
+ }
+ return s.wrappedErr
+}
+
+func (s *PollingSignalErrorImpl) Now() {
+ panic(s)
+}
+
+func (s *PollingSignalErrorImpl) IsStopTrying() bool {
+ return s.pollingSignalErrorType == PollingSignalErrorTypeStopTrying
+}
+
+func (s *PollingSignalErrorImpl) IsTryAgainAfter() bool {
+ return s.pollingSignalErrorType == PollingSignalErrorTypeTryAgainAfter
+}
+
+func (s *PollingSignalErrorImpl) TryAgainDuration() time.Duration {
+ return s.duration
+}
+
+func AsPollingSignalError(actual interface{}) (*PollingSignalErrorImpl, bool) {
+ if actual == nil {
+ return nil, false
+ }
+ if actualErr, ok := actual.(error); ok {
+ var target *PollingSignalErrorImpl
+ if errors.As(actualErr, &target) {
+ return target, true
+ } else {
+ return nil, false
+ }
+ }
+
+ return nil, false
+}
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/internal/testingtsupport/testing_t_support.go b/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/internal/testingtsupport/testing_t_support.go
deleted file mode 100644
index bb27032f6c4..00000000000
--- a/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/internal/testingtsupport/testing_t_support.go
+++ /dev/null
@@ -1,60 +0,0 @@
-package testingtsupport
-
-import (
- "regexp"
- "runtime/debug"
- "strings"
-
- "github.com/onsi/gomega/types"
-)
-
-var StackTracePruneRE = regexp.MustCompile(`\/gomega\/|\/ginkgo\/|\/pkg\/testing\/|\/pkg\/runtime\/`)
-
-type EmptyTWithHelper struct{}
-
-func (e EmptyTWithHelper) Helper() {}
-
-type gomegaTestingT interface {
- Fatalf(format string, args ...interface{})
-}
-
-func BuildTestingTGomegaFailWrapper(t gomegaTestingT) *types.GomegaFailWrapper {
- tWithHelper, hasHelper := t.(types.TWithHelper)
- if !hasHelper {
- tWithHelper = EmptyTWithHelper{}
- }
-
- fail := func(message string, callerSkip ...int) {
- if hasHelper {
- tWithHelper.Helper()
- t.Fatalf("\n%s", message)
- } else {
- skip := 2
- if len(callerSkip) > 0 {
- skip += callerSkip[0]
- }
- stackTrace := pruneStack(string(debug.Stack()), skip)
- t.Fatalf("\n%s\n%s\n", stackTrace, message)
- }
- }
-
- return &types.GomegaFailWrapper{
- Fail: fail,
- TWithHelper: tWithHelper,
- }
-}
-
-func pruneStack(fullStackTrace string, skip int) string {
- stack := strings.Split(fullStackTrace, "\n")[1:]
- if len(stack) > 2*skip {
- stack = stack[2*skip:]
- }
- prunedStack := []string{}
- for i := 0; i < len(stack)/2; i++ {
- if !StackTracePruneRE.Match([]byte(stack[i*2])) {
- prunedStack = append(prunedStack, stack[i*2])
- prunedStack = append(prunedStack, stack[i*2+1])
- }
- }
- return strings.Join(prunedStack, "\n")
-}
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/internal/vetoptdesc.go b/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/internal/vetoptdesc.go
new file mode 100644
index 00000000000..f2958764171
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/internal/vetoptdesc.go
@@ -0,0 +1,22 @@
+package internal
+
+import (
+ "fmt"
+
+ "github.com/onsi/gomega/types"
+)
+
+// vetOptionalDescription vets the optional description args: if it finds any
+// Gomega matcher at the beginning it panics. This allows for rendering Gomega
+// matchers as part of an optional Description, as long as they're not in the
+// first slot.
+func vetOptionalDescription(assertion string, optionalDescription ...interface{}) {
+ if len(optionalDescription) == 0 {
+ return
+ }
+ if _, isGomegaMatcher := optionalDescription[0].(types.GomegaMatcher); isGomegaMatcher {
+ panic(fmt.Sprintf("%s has a GomegaMatcher as the first element of optionalDescription.\n\t"+
+ "Do you mean to use And/Or/SatisfyAll/SatisfyAny to combine multiple matchers?",
+ assertion))
+ }
+}
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/matchers.go b/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/matchers.go
index 9ec8893cba9..7ef27dc9c95 100644
--- a/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/matchers.go
+++ b/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/matchers.go
@@ -1,173 +1,238 @@
package gomega
import (
+ "fmt"
"time"
+ "github.com/google/go-cmp/cmp"
"github.com/onsi/gomega/matchers"
"github.com/onsi/gomega/types"
)
-//Equal uses reflect.DeepEqual to compare actual with expected. Equal is strict about
-//types when performing comparisons.
-//It is an error for both actual and expected to be nil. Use BeNil() instead.
+// Equal uses reflect.DeepEqual to compare actual with expected. Equal is strict about
+// types when performing comparisons.
+// It is an error for both actual and expected to be nil. Use BeNil() instead.
func Equal(expected interface{}) types.GomegaMatcher {
return &matchers.EqualMatcher{
Expected: expected,
}
}
-//BeEquivalentTo is more lax than Equal, allowing equality between different types.
-//This is done by converting actual to have the type of expected before
-//attempting equality with reflect.DeepEqual.
-//It is an error for actual and expected to be nil. Use BeNil() instead.
+// BeEquivalentTo is more lax than Equal, allowing equality between different types.
+// This is done by converting actual to have the type of expected before
+// attempting equality with reflect.DeepEqual.
+// It is an error for actual and expected to be nil. Use BeNil() instead.
func BeEquivalentTo(expected interface{}) types.GomegaMatcher {
return &matchers.BeEquivalentToMatcher{
Expected: expected,
}
}
-//BeIdenticalTo uses the == operator to compare actual with expected.
-//BeIdenticalTo is strict about types when performing comparisons.
-//It is an error for both actual and expected to be nil. Use BeNil() instead.
+// BeComparableTo uses gocmp.Equal from github.com/google/go-cmp (instead of reflect.DeepEqual) to perform a deep comparison.
+// You can pass cmp.Option as options.
+// It is an error for actual and expected to be nil. Use BeNil() instead.
+func BeComparableTo(expected interface{}, opts ...cmp.Option) types.GomegaMatcher {
+ return &matchers.BeComparableToMatcher{
+ Expected: expected,
+ Options: opts,
+ }
+}
+
+// BeIdenticalTo uses the == operator to compare actual with expected.
+// BeIdenticalTo is strict about types when performing comparisons.
+// It is an error for both actual and expected to be nil. Use BeNil() instead.
func BeIdenticalTo(expected interface{}) types.GomegaMatcher {
return &matchers.BeIdenticalToMatcher{
Expected: expected,
}
}
-//BeNil succeeds if actual is nil
+// BeNil succeeds if actual is nil
func BeNil() types.GomegaMatcher {
return &matchers.BeNilMatcher{}
}
-//BeTrue succeeds if actual is true
+// BeTrue succeeds if actual is true
+//
+// In general, it's better to use `BeTrueBecause(reason)` to provide a more useful error message if a true check fails.
func BeTrue() types.GomegaMatcher {
return &matchers.BeTrueMatcher{}
}
-//BeFalse succeeds if actual is false
+// BeFalse succeeds if actual is false
+//
+// In general, it's better to use `BeFalseBecause(reason)` to provide a more useful error message if a false check fails.
func BeFalse() types.GomegaMatcher {
return &matchers.BeFalseMatcher{}
}
-//HaveOccurred succeeds if actual is a non-nil error
-//The typical Go error checking pattern looks like:
-// err := SomethingThatMightFail()
-// Expect(err).ShouldNot(HaveOccurred())
+// BeTrueBecause succeeds if actual is true and displays the provided reason if it is false
+// fmt.Sprintf is used to render the reason
+func BeTrueBecause(format string, args ...any) types.GomegaMatcher {
+ return &matchers.BeTrueMatcher{Reason: fmt.Sprintf(format, args...)}
+}
+
+// BeFalseBecause succeeds if actual is false and displays the provided reason if it is true.
+// fmt.Sprintf is used to render the reason
+func BeFalseBecause(format string, args ...any) types.GomegaMatcher {
+ return &matchers.BeFalseMatcher{Reason: fmt.Sprintf(format, args...)}
+}
+
+// HaveOccurred succeeds if actual is a non-nil error
+// The typical Go error checking pattern looks like:
+//
+// err := SomethingThatMightFail()
+// Expect(err).ShouldNot(HaveOccurred())
func HaveOccurred() types.GomegaMatcher {
return &matchers.HaveOccurredMatcher{}
}
-//Succeed passes if actual is a nil error
-//Succeed is intended to be used with functions that return a single error value. Instead of
-// err := SomethingThatMightFail()
-// Expect(err).ShouldNot(HaveOccurred())
+// Succeed passes if actual is a nil error
+// Succeed is intended to be used with functions that return a single error value. Instead of
//
-//You can write:
-// Expect(SomethingThatMightFail()).Should(Succeed())
+// err := SomethingThatMightFail()
+// Expect(err).ShouldNot(HaveOccurred())
//
-//It is a mistake to use Succeed with a function that has multiple return values. Gomega's Ω and Expect
-//functions automatically trigger failure if any return values after the first return value are non-zero/non-nil.
-//This means that Ω(MultiReturnFunc()).ShouldNot(Succeed()) can never pass.
+// You can write:
+//
+// Expect(SomethingThatMightFail()).Should(Succeed())
+//
+// It is a mistake to use Succeed with a function that has multiple return values. Gomega's Ω and Expect
+// functions automatically trigger failure if any return values after the first return value are non-zero/non-nil.
+// This means that Ω(MultiReturnFunc()).ShouldNot(Succeed()) can never pass.
func Succeed() types.GomegaMatcher {
return &matchers.SucceedMatcher{}
}
-//MatchError succeeds if actual is a non-nil error that matches the passed in string/error.
+// MatchError succeeds if actual is a non-nil error that matches the passed in
+// string, error, function, or matcher.
+//
+// These are valid use-cases:
//
-//These are valid use-cases:
-// Expect(err).Should(MatchError("an error")) //asserts that err.Error() == "an error"
-// Expect(err).Should(MatchError(SomeError)) //asserts that err == SomeError (via reflect.DeepEqual)
+// When passed a string:
//
-//It is an error for err to be nil or an object that does not implement the Error interface
-func MatchError(expected interface{}) types.GomegaMatcher {
+// Expect(err).To(MatchError("an error"))
+//
+// asserts that err.Error() == "an error"
+//
+// When passed an error:
+//
+// Expect(err).To(MatchError(SomeError))
+//
+// First checks if errors.Is(err, SomeError).
+// If that fails then it checks if reflect.DeepEqual(err, SomeError) repeatedly for err and any errors wrapped by err
+//
+// When passed a matcher:
+//
+// Expect(err).To(MatchError(ContainSubstring("sprocket not found")))
+//
+// the matcher is passed err.Error(). In this case it asserts that err.Error() contains substring "sprocket not found"
+//
+// When passed a func(err) bool and a description:
+//
+// Expect(err).To(MatchError(os.IsNotExist, "IsNotExist"))
+//
+// the function is passed err and matches if the return value is true. The description is required to allow Gomega
+// to print a useful error message.
+//
+// It is an error for err to be nil or an object that does not implement the
+// Error interface
+//
+// The optional second argument is a description of the error function, if used. This is required when passing a function but is ignored in all other cases.
+func MatchError(expected interface{}, functionErrorDescription ...any) types.GomegaMatcher {
return &matchers.MatchErrorMatcher{
- Expected: expected,
+ Expected: expected,
+ FuncErrDescription: functionErrorDescription,
}
}
-//BeClosed succeeds if actual is a closed channel.
-//It is an error to pass a non-channel to BeClosed, it is also an error to pass nil
+// BeClosed succeeds if actual is a closed channel.
+// It is an error to pass a non-channel to BeClosed, it is also an error to pass nil
//
-//In order to check whether or not the channel is closed, Gomega must try to read from the channel
-//(even in the `ShouldNot(BeClosed())` case). You should keep this in mind if you wish to make subsequent assertions about
-//values coming down the channel.
+// In order to check whether or not the channel is closed, Gomega must try to read from the channel
+// (even in the `ShouldNot(BeClosed())` case). You should keep this in mind if you wish to make subsequent assertions about
+// values coming down the channel.
//
-//Also, if you are testing that a *buffered* channel is closed you must first read all values out of the channel before
-//asserting that it is closed (it is not possible to detect that a buffered-channel has been closed until all its buffered values are read).
+// Also, if you are testing that a *buffered* channel is closed you must first read all values out of the channel before
+// asserting that it is closed (it is not possible to detect that a buffered-channel has been closed until all its buffered values are read).
//
-//Finally, as a corollary: it is an error to check whether or not a send-only channel is closed.
+// Finally, as a corollary: it is an error to check whether or not a send-only channel is closed.
func BeClosed() types.GomegaMatcher {
return &matchers.BeClosedMatcher{}
}
-//Receive succeeds if there is a value to be received on actual.
-//Actual must be a channel (and cannot be a send-only channel) -- anything else is an error.
+// Receive succeeds if there is a value to be received on actual.
+// Actual must be a channel (and cannot be a send-only channel) -- anything else is an error.
//
-//Receive returns immediately and never blocks:
+// Receive returns immediately and never blocks:
//
-//- If there is nothing on the channel `c` then Expect(c).Should(Receive()) will fail and Ω(c).ShouldNot(Receive()) will pass.
+// - If there is nothing on the channel `c` then Expect(c).Should(Receive()) will fail and Ω(c).ShouldNot(Receive()) will pass.
//
-//- If the channel `c` is closed then Expect(c).Should(Receive()) will fail and Ω(c).ShouldNot(Receive()) will pass.
+// - If the channel `c` is closed then Expect(c).Should(Receive()) will fail and Ω(c).ShouldNot(Receive()) will pass.
//
-//- If there is something on the channel `c` ready to be read, then Expect(c).Should(Receive()) will pass and Ω(c).ShouldNot(Receive()) will fail.
+// - If there is something on the channel `c` ready to be read, then Expect(c).Should(Receive()) will pass and Ω(c).ShouldNot(Receive()) will fail.
//
-//If you have a go-routine running in the background that will write to channel `c` you can:
-// Eventually(c).Should(Receive())
+// If you have a go-routine running in the background that will write to channel `c` you can:
//
-//This will timeout if nothing gets sent to `c` (you can modify the timeout interval as you normally do with `Eventually`)
+// Eventually(c).Should(Receive())
//
-//A similar use-case is to assert that no go-routine writes to a channel (for a period of time). You can do this with `Consistently`:
-// Consistently(c).ShouldNot(Receive())
+// This will timeout if nothing gets sent to `c` (you can modify the timeout interval as you normally do with `Eventually`)
//
-//You can pass `Receive` a matcher. If you do so, it will match the received object against the matcher. For example:
-// Expect(c).Should(Receive(Equal("foo")))
+// A similar use-case is to assert that no go-routine writes to a channel (for a period of time). You can do this with `Consistently`:
//
-//When given a matcher, `Receive` will always fail if there is nothing to be received on the channel.
+// Consistently(c).ShouldNot(Receive())
//
-//Passing Receive a matcher is especially useful when paired with Eventually:
+// You can pass `Receive` a matcher. If you do so, it will match the received object against the matcher. For example:
//
-// Eventually(c).Should(Receive(ContainSubstring("bar")))
+// Expect(c).Should(Receive(Equal("foo")))
//
-//will repeatedly attempt to pull values out of `c` until a value matching "bar" is received.
+// When given a matcher, `Receive` will always fail if there is nothing to be received on the channel.
//
-//Finally, if you want to have a reference to the value *sent* to the channel you can pass the `Receive` matcher a pointer to a variable of the appropriate type:
-// var myThing thing
-// Eventually(thingChan).Should(Receive(&myThing))
-// Expect(myThing.Sprocket).Should(Equal("foo"))
-// Expect(myThing.IsValid()).Should(BeTrue())
+// Passing Receive a matcher is especially useful when paired with Eventually:
+//
+// Eventually(c).Should(Receive(ContainSubstring("bar")))
+//
+// will repeatedly attempt to pull values out of `c` until a value matching "bar" is received.
+//
+// Furthermore, if you want to have a reference to the value *sent* to the channel you can pass the `Receive` matcher a pointer to a variable of the appropriate type:
+//
+// var myThing thing
+// Eventually(thingChan).Should(Receive(&myThing))
+// Expect(myThing.Sprocket).Should(Equal("foo"))
+// Expect(myThing.IsValid()).Should(BeTrue())
+//
+// Finally, if you want to match the received object as well as get the actual received value into a variable, so you can reason further about the value received,
+// you can pass a pointer to a variable of the approriate type first, and second a matcher:
+//
+// var myThing thing
+// Eventually(thingChan).Should(Receive(&myThing, ContainSubstring("bar")))
func Receive(args ...interface{}) types.GomegaMatcher {
- var arg interface{}
- if len(args) > 0 {
- arg = args[0]
- }
-
return &matchers.ReceiveMatcher{
- Arg: arg,
+ Args: args,
}
}
-//BeSent succeeds if a value can be sent to actual.
-//Actual must be a channel (and cannot be a receive-only channel) that can sent the type of the value passed into BeSent -- anything else is an error.
-//In addition, actual must not be closed.
+// BeSent succeeds if a value can be sent to actual.
+// Actual must be a channel (and cannot be a receive-only channel) that can sent the type of the value passed into BeSent -- anything else is an error.
+// In addition, actual must not be closed.
//
-//BeSent never blocks:
+// BeSent never blocks:
//
-//- If the channel `c` is not ready to receive then Expect(c).Should(BeSent("foo")) will fail immediately
-//- If the channel `c` is eventually ready to receive then Eventually(c).Should(BeSent("foo")) will succeed.. presuming the channel becomes ready to receive before Eventually's timeout
-//- If the channel `c` is closed then Expect(c).Should(BeSent("foo")) and Ω(c).ShouldNot(BeSent("foo")) will both fail immediately
+// - If the channel `c` is not ready to receive then Expect(c).Should(BeSent("foo")) will fail immediately
+// - If the channel `c` is eventually ready to receive then Eventually(c).Should(BeSent("foo")) will succeed.. presuming the channel becomes ready to receive before Eventually's timeout
+// - If the channel `c` is closed then Expect(c).Should(BeSent("foo")) and Ω(c).ShouldNot(BeSent("foo")) will both fail immediately
//
-//Of course, the value is actually sent to the channel. The point of `BeSent` is less to make an assertion about the availability of the channel (which is typically an implementation detail that your test should not be concerned with).
-//Rather, the point of `BeSent` is to make it possible to easily and expressively write tests that can timeout on blocked channel sends.
+// Of course, the value is actually sent to the channel. The point of `BeSent` is less to make an assertion about the availability of the channel (which is typically an implementation detail that your test should not be concerned with).
+// Rather, the point of `BeSent` is to make it possible to easily and expressively write tests that can timeout on blocked channel sends.
func BeSent(arg interface{}) types.GomegaMatcher {
return &matchers.BeSentMatcher{
Arg: arg,
}
}
-//MatchRegexp succeeds if actual is a string or stringer that matches the
-//passed-in regexp. Optional arguments can be provided to construct a regexp
-//via fmt.Sprintf().
+// MatchRegexp succeeds if actual is a string or stringer that matches the
+// passed-in regexp. Optional arguments can be provided to construct a regexp
+// via fmt.Sprintf().
func MatchRegexp(regexp string, args ...interface{}) types.GomegaMatcher {
return &matchers.MatchRegexpMatcher{
Regexp: regexp,
@@ -175,9 +240,9 @@ func MatchRegexp(regexp string, args ...interface{}) types.GomegaMatcher {
}
}
-//ContainSubstring succeeds if actual is a string or stringer that contains the
-//passed-in substring. Optional arguments can be provided to construct the substring
-//via fmt.Sprintf().
+// ContainSubstring succeeds if actual is a string or stringer that contains the
+// passed-in substring. Optional arguments can be provided to construct the substring
+// via fmt.Sprintf().
func ContainSubstring(substr string, args ...interface{}) types.GomegaMatcher {
return &matchers.ContainSubstringMatcher{
Substr: substr,
@@ -185,9 +250,9 @@ func ContainSubstring(substr string, args ...interface{}) types.GomegaMatcher {
}
}
-//HavePrefix succeeds if actual is a string or stringer that contains the
-//passed-in string as a prefix. Optional arguments can be provided to construct
-//via fmt.Sprintf().
+// HavePrefix succeeds if actual is a string or stringer that contains the
+// passed-in string as a prefix. Optional arguments can be provided to construct
+// via fmt.Sprintf().
func HavePrefix(prefix string, args ...interface{}) types.GomegaMatcher {
return &matchers.HavePrefixMatcher{
Prefix: prefix,
@@ -195,9 +260,9 @@ func HavePrefix(prefix string, args ...interface{}) types.GomegaMatcher {
}
}
-//HaveSuffix succeeds if actual is a string or stringer that contains the
-//passed-in string as a suffix. Optional arguments can be provided to construct
-//via fmt.Sprintf().
+// HaveSuffix succeeds if actual is a string or stringer that contains the
+// passed-in string as a suffix. Optional arguments can be provided to construct
+// via fmt.Sprintf().
func HaveSuffix(suffix string, args ...interface{}) types.GomegaMatcher {
return &matchers.HaveSuffixMatcher{
Suffix: suffix,
@@ -205,122 +270,191 @@ func HaveSuffix(suffix string, args ...interface{}) types.GomegaMatcher {
}
}
-//MatchJSON succeeds if actual is a string or stringer of JSON that matches
-//the expected JSON. The JSONs are decoded and the resulting objects are compared via
-//reflect.DeepEqual so things like key-ordering and whitespace shouldn't matter.
+// MatchJSON succeeds if actual is a string or stringer of JSON that matches
+// the expected JSON. The JSONs are decoded and the resulting objects are compared via
+// reflect.DeepEqual so things like key-ordering and whitespace shouldn't matter.
func MatchJSON(json interface{}) types.GomegaMatcher {
return &matchers.MatchJSONMatcher{
JSONToMatch: json,
}
}
-//MatchXML succeeds if actual is a string or stringer of XML that matches
-//the expected XML. The XMLs are decoded and the resulting objects are compared via
-//reflect.DeepEqual so things like whitespaces shouldn't matter.
+// MatchXML succeeds if actual is a string or stringer of XML that matches
+// the expected XML. The XMLs are decoded and the resulting objects are compared via
+// reflect.DeepEqual so things like whitespaces shouldn't matter.
func MatchXML(xml interface{}) types.GomegaMatcher {
return &matchers.MatchXMLMatcher{
XMLToMatch: xml,
}
}
-//MatchYAML succeeds if actual is a string or stringer of YAML that matches
-//the expected YAML. The YAML's are decoded and the resulting objects are compared via
-//reflect.DeepEqual so things like key-ordering and whitespace shouldn't matter.
+// MatchYAML succeeds if actual is a string or stringer of YAML that matches
+// the expected YAML. The YAML's are decoded and the resulting objects are compared via
+// reflect.DeepEqual so things like key-ordering and whitespace shouldn't matter.
func MatchYAML(yaml interface{}) types.GomegaMatcher {
return &matchers.MatchYAMLMatcher{
YAMLToMatch: yaml,
}
}
-//BeEmpty succeeds if actual is empty. Actual must be of type string, array, map, chan, or slice.
+// BeEmpty succeeds if actual is empty. Actual must be of type string, array, map, chan, or slice.
func BeEmpty() types.GomegaMatcher {
return &matchers.BeEmptyMatcher{}
}
-//HaveLen succeeds if actual has the passed-in length. Actual must be of type string, array, map, chan, or slice.
+// HaveLen succeeds if actual has the passed-in length. Actual must be of type string, array, map, chan, or slice.
func HaveLen(count int) types.GomegaMatcher {
return &matchers.HaveLenMatcher{
Count: count,
}
}
-//HaveCap succeeds if actual has the passed-in capacity. Actual must be of type array, chan, or slice.
+// HaveCap succeeds if actual has the passed-in capacity. Actual must be of type array, chan, or slice.
func HaveCap(count int) types.GomegaMatcher {
return &matchers.HaveCapMatcher{
Count: count,
}
}
-//BeZero succeeds if actual is the zero value for its type or if actual is nil.
+// BeZero succeeds if actual is the zero value for its type or if actual is nil.
func BeZero() types.GomegaMatcher {
return &matchers.BeZeroMatcher{}
}
-//ContainElement succeeds if actual contains the passed in element.
-//By default ContainElement() uses Equal() to perform the match, however a
-//matcher can be passed in instead:
-// Expect([]string{"Foo", "FooBar"}).Should(ContainElement(ContainSubstring("Bar")))
+// ContainElement succeeds if actual contains the passed in element. By default
+// ContainElement() uses Equal() to perform the match, however a matcher can be
+// passed in instead:
+//
+// Expect([]string{"Foo", "FooBar"}).Should(ContainElement(ContainSubstring("Bar")))
//
-//Actual must be an array, slice or map.
-//For maps, ContainElement searches through the map's values.
-func ContainElement(element interface{}) types.GomegaMatcher {
+// Actual must be an array, slice or map. For maps, ContainElement searches
+// through the map's values.
+//
+// If you want to have a copy of the matching element(s) found you can pass a
+// pointer to a variable of the appropriate type. If the variable isn't a slice
+// or map, then exactly one match will be expected and returned. If the variable
+// is a slice or map, then at least one match is expected and all matches will be
+// stored in the variable.
+//
+// var findings []string
+// Expect([]string{"Foo", "FooBar"}).Should(ContainElement(ContainSubString("Bar", &findings)))
+func ContainElement(element interface{}, result ...interface{}) types.GomegaMatcher {
return &matchers.ContainElementMatcher{
Element: element,
+ Result: result,
}
}
-//BeElementOf succeeds if actual is contained in the passed in elements.
-//BeElementOf() always uses Equal() to perform the match.
-//When the passed in elements are comprised of a single element that is either an Array or Slice, BeElementOf() behaves
-//as the reverse of ContainElement() that operates with Equal() to perform the match.
-// Expect(2).Should(BeElementOf([]int{1, 2}))
-// Expect(2).Should(BeElementOf([2]int{1, 2}))
-//Otherwise, BeElementOf() provides a syntactic sugar for Or(Equal(_), Equal(_), ...):
-// Expect(2).Should(BeElementOf(1, 2))
+// BeElementOf succeeds if actual is contained in the passed in elements.
+// BeElementOf() always uses Equal() to perform the match.
+// When the passed in elements are comprised of a single element that is either an Array or Slice, BeElementOf() behaves
+// as the reverse of ContainElement() that operates with Equal() to perform the match.
+//
+// Expect(2).Should(BeElementOf([]int{1, 2}))
+// Expect(2).Should(BeElementOf([2]int{1, 2}))
//
-//Actual must be typed.
+// Otherwise, BeElementOf() provides a syntactic sugar for Or(Equal(_), Equal(_), ...):
+//
+// Expect(2).Should(BeElementOf(1, 2))
+//
+// Actual must be typed.
func BeElementOf(elements ...interface{}) types.GomegaMatcher {
return &matchers.BeElementOfMatcher{
Elements: elements,
}
}
-//ConsistOf succeeds if actual contains precisely the elements passed into the matcher. The ordering of the elements does not matter.
-//By default ConsistOf() uses Equal() to match the elements, however custom matchers can be passed in instead. Here are some examples:
+// BeKeyOf succeeds if actual is contained in the keys of the passed in map.
+// BeKeyOf() always uses Equal() to perform the match between actual and the map keys.
//
-// Expect([]string{"Foo", "FooBar"}).Should(ConsistOf("FooBar", "Foo"))
-// Expect([]string{"Foo", "FooBar"}).Should(ConsistOf(ContainSubstring("Bar"), "Foo"))
-// Expect([]string{"Foo", "FooBar"}).Should(ConsistOf(ContainSubstring("Foo"), ContainSubstring("Foo")))
+// Expect("foo").Should(BeKeyOf(map[string]bool{"foo": true, "bar": false}))
+func BeKeyOf(element interface{}) types.GomegaMatcher {
+ return &matchers.BeKeyOfMatcher{
+ Map: element,
+ }
+}
+
+// ConsistOf succeeds if actual contains precisely the elements passed into the matcher. The ordering of the elements does not matter.
+// By default ConsistOf() uses Equal() to match the elements, however custom matchers can be passed in instead. Here are some examples:
+//
+// Expect([]string{"Foo", "FooBar"}).Should(ConsistOf("FooBar", "Foo"))
+// Expect([]string{"Foo", "FooBar"}).Should(ConsistOf(ContainSubstring("Bar"), "Foo"))
+// Expect([]string{"Foo", "FooBar"}).Should(ConsistOf(ContainSubstring("Foo"), ContainSubstring("Foo")))
//
-//Actual must be an array, slice or map. For maps, ConsistOf matches against the map's values.
+// Actual must be an array, slice or map. For maps, ConsistOf matches against the map's values.
//
-//You typically pass variadic arguments to ConsistOf (as in the examples above). However, if you need to pass in a slice you can provided that it
-//is the only element passed in to ConsistOf:
+// You typically pass variadic arguments to ConsistOf (as in the examples above). However, if you need to pass in a slice you can provided that it
+// is the only element passed in to ConsistOf:
//
-// Expect([]string{"Foo", "FooBar"}).Should(ConsistOf([]string{"FooBar", "Foo"}))
+// Expect([]string{"Foo", "FooBar"}).Should(ConsistOf([]string{"FooBar", "Foo"}))
//
-//Note that Go's type system does not allow you to write this as ConsistOf([]string{"FooBar", "Foo"}...) as []string and []interface{} are different types - hence the need for this special rule.
+// Note that Go's type system does not allow you to write this as ConsistOf([]string{"FooBar", "Foo"}...) as []string and []interface{} are different types - hence the need for this special rule.
func ConsistOf(elements ...interface{}) types.GomegaMatcher {
return &matchers.ConsistOfMatcher{
Elements: elements,
}
}
-//HaveKey succeeds if actual is a map with the passed in key.
-//By default HaveKey uses Equal() to perform the match, however a
-//matcher can be passed in instead:
-// Expect(map[string]string{"Foo": "Bar", "BazFoo": "Duck"}).Should(HaveKey(MatchRegexp(`.+Foo$`)))
+// HaveExactElements succeeds if actual contains elements that precisely match the elemets passed into the matcher. The ordering of the elements does matter.
+// By default HaveExactElements() uses Equal() to match the elements, however custom matchers can be passed in instead. Here are some examples:
+//
+// Expect([]string{"Foo", "FooBar"}).Should(HaveExactElements("Foo", "FooBar"))
+// Expect([]string{"Foo", "FooBar"}).Should(HaveExactElements("Foo", ContainSubstring("Bar")))
+// Expect([]string{"Foo", "FooBar"}).Should(HaveExactElements(ContainSubstring("Foo"), ContainSubstring("Foo")))
+//
+// Actual must be an array or slice.
+func HaveExactElements(elements ...interface{}) types.GomegaMatcher {
+ return &matchers.HaveExactElementsMatcher{
+ Elements: elements,
+ }
+}
+
+// ContainElements succeeds if actual contains the passed in elements. The ordering of the elements does not matter.
+// By default ContainElements() uses Equal() to match the elements, however custom matchers can be passed in instead. Here are some examples:
+//
+// Expect([]string{"Foo", "FooBar"}).Should(ContainElements("FooBar"))
+// Expect([]string{"Foo", "FooBar"}).Should(ContainElements(ContainSubstring("Bar"), "Foo"))
+//
+// Actual must be an array, slice or map.
+// For maps, ContainElements searches through the map's values.
+func ContainElements(elements ...interface{}) types.GomegaMatcher {
+ return &matchers.ContainElementsMatcher{
+ Elements: elements,
+ }
+}
+
+// HaveEach succeeds if actual solely contains elements that match the passed in element.
+// Please note that if actual is empty, HaveEach always will fail.
+// By default HaveEach() uses Equal() to perform the match, however a
+// matcher can be passed in instead:
+//
+// Expect([]string{"Foo", "FooBar"}).Should(HaveEach(ContainSubstring("Foo")))
+//
+// Actual must be an array, slice or map.
+// For maps, HaveEach searches through the map's values.
+func HaveEach(element interface{}) types.GomegaMatcher {
+ return &matchers.HaveEachMatcher{
+ Element: element,
+ }
+}
+
+// HaveKey succeeds if actual is a map with the passed in key.
+// By default HaveKey uses Equal() to perform the match, however a
+// matcher can be passed in instead:
+//
+// Expect(map[string]string{"Foo": "Bar", "BazFoo": "Duck"}).Should(HaveKey(MatchRegexp(`.+Foo$`)))
func HaveKey(key interface{}) types.GomegaMatcher {
return &matchers.HaveKeyMatcher{
Key: key,
}
}
-//HaveKeyWithValue succeeds if actual is a map with the passed in key and value.
-//By default HaveKeyWithValue uses Equal() to perform the match, however a
-//matcher can be passed in instead:
-// Expect(map[string]string{"Foo": "Bar", "BazFoo": "Duck"}).Should(HaveKeyWithValue("Foo", "Bar"))
-// Expect(map[string]string{"Foo": "Bar", "BazFoo": "Duck"}).Should(HaveKeyWithValue(MatchRegexp(`.+Foo$`), "Bar"))
+// HaveKeyWithValue succeeds if actual is a map with the passed in key and value.
+// By default HaveKeyWithValue uses Equal() to perform the match, however a
+// matcher can be passed in instead:
+//
+// Expect(map[string]string{"Foo": "Bar", "BazFoo": "Duck"}).Should(HaveKeyWithValue("Foo", "Bar"))
+// Expect(map[string]string{"Foo": "Bar", "BazFoo": "Duck"}).Should(HaveKeyWithValue(MatchRegexp(`.+Foo$`), "Bar"))
func HaveKeyWithValue(key interface{}, value interface{}) types.GomegaMatcher {
return &matchers.HaveKeyWithValueMatcher{
Key: key,
@@ -328,17 +462,79 @@ func HaveKeyWithValue(key interface{}, value interface{}) types.GomegaMatcher {
}
}
-//BeNumerically performs numerical assertions in a type-agnostic way.
-//Actual and expected should be numbers, though the specific type of
-//number is irrelevant (float32, float64, uint8, etc...).
+// HaveField succeeds if actual is a struct and the value at the passed in field
+// matches the passed in matcher. By default HaveField used Equal() to perform the match,
+// however a matcher can be passed in in stead.
+//
+// The field must be a string that resolves to the name of a field in the struct. Structs can be traversed
+// using the '.' delimiter. If the field ends with '()' a method named field is assumed to exist on the struct and is invoked.
+// Such methods must take no arguments and return a single value:
+//
+// type Book struct {
+// Title string
+// Author Person
+// }
+// type Person struct {
+// FirstName string
+// LastName string
+// DOB time.Time
+// }
+// Expect(book).To(HaveField("Title", "Les Miserables"))
+// Expect(book).To(HaveField("Title", ContainSubstring("Les"))
+// Expect(book).To(HaveField("Author.FirstName", Equal("Victor"))
+// Expect(book).To(HaveField("Author.DOB.Year()", BeNumerically("<", 1900))
+func HaveField(field string, expected interface{}) types.GomegaMatcher {
+ return &matchers.HaveFieldMatcher{
+ Field: field,
+ Expected: expected,
+ }
+}
+
+// HaveExistingField succeeds if actual is a struct and the specified field
+// exists.
+//
+// HaveExistingField can be combined with HaveField in order to cover use cases
+// with optional fields. HaveField alone would trigger an error in such situations.
+//
+// Expect(MrHarmless).NotTo(And(HaveExistingField("Title"), HaveField("Title", "Supervillain")))
+func HaveExistingField(field string) types.GomegaMatcher {
+ return &matchers.HaveExistingFieldMatcher{
+ Field: field,
+ }
+}
+
+// HaveValue applies the given matcher to the value of actual, optionally and
+// repeatedly dereferencing pointers or taking the concrete value of interfaces.
+// Thus, the matcher will always be applied to non-pointer and non-interface
+// values only. HaveValue will fail with an error if a pointer or interface is
+// nil. It will also fail for more than 31 pointer or interface dereferences to
+// guard against mistakenly applying it to arbitrarily deep linked pointers.
+//
+// HaveValue differs from gstruct.PointTo in that it does not expect actual to
+// be a pointer (as gstruct.PointTo does) but instead also accepts non-pointer
+// and even interface values.
+//
+// actual := 42
+// Expect(actual).To(HaveValue(42))
+// Expect(&actual).To(HaveValue(42))
+func HaveValue(matcher types.GomegaMatcher) types.GomegaMatcher {
+ return &matchers.HaveValueMatcher{
+ Matcher: matcher,
+ }
+}
+
+// BeNumerically performs numerical assertions in a type-agnostic way.
+// Actual and expected should be numbers, though the specific type of
+// number is irrelevant (float32, float64, uint8, etc...).
+//
+// There are six, self-explanatory, supported comparators:
//
-//There are six, self-explanatory, supported comparators:
-// Expect(1.0).Should(BeNumerically("==", 1))
-// Expect(1.0).Should(BeNumerically("~", 0.999, 0.01))
-// Expect(1.0).Should(BeNumerically(">", 0.9))
-// Expect(1.0).Should(BeNumerically(">=", 1.0))
-// Expect(1.0).Should(BeNumerically("<", 3))
-// Expect(1.0).Should(BeNumerically("<=", 1.0))
+// Expect(1.0).Should(BeNumerically("==", 1))
+// Expect(1.0).Should(BeNumerically("~", 0.999, 0.01))
+// Expect(1.0).Should(BeNumerically(">", 0.9))
+// Expect(1.0).Should(BeNumerically(">=", 1.0))
+// Expect(1.0).Should(BeNumerically("<", 3))
+// Expect(1.0).Should(BeNumerically("<=", 1.0))
func BeNumerically(comparator string, compareTo ...interface{}) types.GomegaMatcher {
return &matchers.BeNumericallyMatcher{
Comparator: comparator,
@@ -346,10 +542,11 @@ func BeNumerically(comparator string, compareTo ...interface{}) types.GomegaMatc
}
}
-//BeTemporally compares time.Time's like BeNumerically
-//Actual and expected must be time.Time. The comparators are the same as for BeNumerically
-// Expect(time.Now()).Should(BeTemporally(">", time.Time{}))
-// Expect(time.Now()).Should(BeTemporally("~", time.Now(), time.Second))
+// BeTemporally compares time.Time's like BeNumerically
+// Actual and expected must be time.Time. The comparators are the same as for BeNumerically
+//
+// Expect(time.Now()).Should(BeTemporally(">", time.Time{}))
+// Expect(time.Now()).Should(BeTemporally("~", time.Now(), time.Second))
func BeTemporally(comparator string, compareTo time.Time, threshold ...time.Duration) types.GomegaMatcher {
return &matchers.BeTemporallyMatcher{
Comparator: comparator,
@@ -358,86 +555,147 @@ func BeTemporally(comparator string, compareTo time.Time, threshold ...time.Dura
}
}
-//BeAssignableToTypeOf succeeds if actual is assignable to the type of expected.
-//It will return an error when one of the values is nil.
-// Expect(0).Should(BeAssignableToTypeOf(0)) // Same values
-// Expect(5).Should(BeAssignableToTypeOf(-1)) // different values same type
-// Expect("foo").Should(BeAssignableToTypeOf("bar")) // different values same type
-// Expect(struct{ Foo string }{}).Should(BeAssignableToTypeOf(struct{ Foo string }{}))
+// BeAssignableToTypeOf succeeds if actual is assignable to the type of expected.
+// It will return an error when one of the values is nil.
+//
+// Expect(0).Should(BeAssignableToTypeOf(0)) // Same values
+// Expect(5).Should(BeAssignableToTypeOf(-1)) // different values same type
+// Expect("foo").Should(BeAssignableToTypeOf("bar")) // different values same type
+// Expect(struct{ Foo string }{}).Should(BeAssignableToTypeOf(struct{ Foo string }{}))
func BeAssignableToTypeOf(expected interface{}) types.GomegaMatcher {
return &matchers.AssignableToTypeOfMatcher{
Expected: expected,
}
}
-//Panic succeeds if actual is a function that, when invoked, panics.
-//Actual must be a function that takes no arguments and returns no results.
+// Panic succeeds if actual is a function that, when invoked, panics.
+// Actual must be a function that takes no arguments and returns no results.
func Panic() types.GomegaMatcher {
return &matchers.PanicMatcher{}
}
-//BeAnExistingFile succeeds if a file exists.
-//Actual must be a string representing the abs path to the file being checked.
+// PanicWith succeeds if actual is a function that, when invoked, panics with a specific value.
+// Actual must be a function that takes no arguments and returns no results.
+//
+// By default PanicWith uses Equal() to perform the match, however a
+// matcher can be passed in instead:
+//
+// Expect(fn).Should(PanicWith(MatchRegexp(`.+Foo$`)))
+func PanicWith(expected interface{}) types.GomegaMatcher {
+ return &matchers.PanicMatcher{Expected: expected}
+}
+
+// BeAnExistingFile succeeds if a file exists.
+// Actual must be a string representing the abs path to the file being checked.
func BeAnExistingFile() types.GomegaMatcher {
return &matchers.BeAnExistingFileMatcher{}
}
-//BeARegularFile succeeds if a file exists and is a regular file.
-//Actual must be a string representing the abs path to the file being checked.
+// BeARegularFile succeeds if a file exists and is a regular file.
+// Actual must be a string representing the abs path to the file being checked.
func BeARegularFile() types.GomegaMatcher {
return &matchers.BeARegularFileMatcher{}
}
-//BeADirectory succeeds if a file exists and is a directory.
-//Actual must be a string representing the abs path to the file being checked.
+// BeADirectory succeeds if a file exists and is a directory.
+// Actual must be a string representing the abs path to the file being checked.
func BeADirectory() types.GomegaMatcher {
return &matchers.BeADirectoryMatcher{}
}
-//And succeeds only if all of the given matchers succeed.
-//The matchers are tried in order, and will fail-fast if one doesn't succeed.
-// Expect("hi").To(And(HaveLen(2), Equal("hi"))
+// HaveHTTPStatus succeeds if the Status or StatusCode field of an HTTP response matches.
+// Actual must be either a *http.Response or *httptest.ResponseRecorder.
+// Expected must be either an int or a string.
+//
+// Expect(resp).Should(HaveHTTPStatus(http.StatusOK)) // asserts that resp.StatusCode == 200
+// Expect(resp).Should(HaveHTTPStatus("404 Not Found")) // asserts that resp.Status == "404 Not Found"
+// Expect(resp).Should(HaveHTTPStatus(http.StatusOK, http.StatusNoContent)) // asserts that resp.StatusCode == 200 || resp.StatusCode == 204
+func HaveHTTPStatus(expected ...interface{}) types.GomegaMatcher {
+ return &matchers.HaveHTTPStatusMatcher{Expected: expected}
+}
+
+// HaveHTTPHeaderWithValue succeeds if the header is found and the value matches.
+// Actual must be either a *http.Response or *httptest.ResponseRecorder.
+// Expected must be a string header name, followed by a header value which
+// can be a string, or another matcher.
+func HaveHTTPHeaderWithValue(header string, value interface{}) types.GomegaMatcher {
+ return &matchers.HaveHTTPHeaderWithValueMatcher{
+ Header: header,
+ Value: value,
+ }
+}
+
+// HaveHTTPBody matches if the body matches.
+// Actual must be either a *http.Response or *httptest.ResponseRecorder.
+// Expected must be either a string, []byte, or other matcher
+func HaveHTTPBody(expected interface{}) types.GomegaMatcher {
+ return &matchers.HaveHTTPBodyMatcher{Expected: expected}
+}
+
+// And succeeds only if all of the given matchers succeed.
+// The matchers are tried in order, and will fail-fast if one doesn't succeed.
+//
+// Expect("hi").To(And(HaveLen(2), Equal("hi"))
//
-//And(), Or(), Not() and WithTransform() allow matchers to be composed into complex expressions.
+// And(), Or(), Not() and WithTransform() allow matchers to be composed into complex expressions.
func And(ms ...types.GomegaMatcher) types.GomegaMatcher {
return &matchers.AndMatcher{Matchers: ms}
}
-//SatisfyAll is an alias for And().
-// Expect("hi").Should(SatisfyAll(HaveLen(2), Equal("hi")))
+// SatisfyAll is an alias for And().
+//
+// Expect("hi").Should(SatisfyAll(HaveLen(2), Equal("hi")))
func SatisfyAll(matchers ...types.GomegaMatcher) types.GomegaMatcher {
return And(matchers...)
}
-//Or succeeds if any of the given matchers succeed.
-//The matchers are tried in order and will return immediately upon the first successful match.
-// Expect("hi").To(Or(HaveLen(3), HaveLen(2))
+// Or succeeds if any of the given matchers succeed.
+// The matchers are tried in order and will return immediately upon the first successful match.
+//
+// Expect("hi").To(Or(HaveLen(3), HaveLen(2))
//
-//And(), Or(), Not() and WithTransform() allow matchers to be composed into complex expressions.
+// And(), Or(), Not() and WithTransform() allow matchers to be composed into complex expressions.
func Or(ms ...types.GomegaMatcher) types.GomegaMatcher {
return &matchers.OrMatcher{Matchers: ms}
}
-//SatisfyAny is an alias for Or().
-// Expect("hi").SatisfyAny(Or(HaveLen(3), HaveLen(2))
+// SatisfyAny is an alias for Or().
+//
+// Expect("hi").SatisfyAny(Or(HaveLen(3), HaveLen(2))
func SatisfyAny(matchers ...types.GomegaMatcher) types.GomegaMatcher {
return Or(matchers...)
}
-//Not negates the given matcher; it succeeds if the given matcher fails.
-// Expect(1).To(Not(Equal(2))
+// Not negates the given matcher; it succeeds if the given matcher fails.
//
-//And(), Or(), Not() and WithTransform() allow matchers to be composed into complex expressions.
+// Expect(1).To(Not(Equal(2))
+//
+// And(), Or(), Not() and WithTransform() allow matchers to be composed into complex expressions.
func Not(matcher types.GomegaMatcher) types.GomegaMatcher {
return &matchers.NotMatcher{Matcher: matcher}
}
-//WithTransform applies the `transform` to the actual value and matches it against `matcher`.
-//The given transform must be a function of one parameter that returns one value.
-// var plus1 = func(i int) int { return i + 1 }
-// Expect(1).To(WithTransform(plus1, Equal(2))
+// WithTransform applies the `transform` to the actual value and matches it against `matcher`.
+// The given transform must be either a function of one parameter that returns one value or a
+// function of one parameter that returns two values, where the second value must be of the
+// error type.
+//
+// var plus1 = func(i int) int { return i + 1 }
+// Expect(1).To(WithTransform(plus1, Equal(2))
+//
+// var failingplus1 = func(i int) (int, error) { return 42, "this does not compute" }
+// Expect(1).To(WithTransform(failingplus1, Equal(2)))
//
-//And(), Or(), Not() and WithTransform() allow matchers to be composed into complex expressions.
+// And(), Or(), Not() and WithTransform() allow matchers to be composed into complex expressions.
func WithTransform(transform interface{}, matcher types.GomegaMatcher) types.GomegaMatcher {
return matchers.NewWithTransformMatcher(transform, matcher)
}
+
+// Satisfy matches the actual value against the `predicate` function.
+// The given predicate must be a function of one paramter that returns bool.
+//
+// var isEven = func(i int) bool { return i%2 == 0 }
+// Expect(2).To(Satisfy(isEven))
+func Satisfy(predicate interface{}) types.GomegaMatcher {
+ return matchers.NewSatisfyMatcher(predicate)
+}
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/matchers/and.go b/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/matchers/and.go
index d83a29164c6..6bd826adc5c 100644
--- a/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/matchers/and.go
+++ b/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/matchers/and.go
@@ -4,7 +4,6 @@ import (
"fmt"
"github.com/onsi/gomega/format"
- "github.com/onsi/gomega/internal/oraclematcher"
"github.com/onsi/gomega/types"
)
@@ -52,12 +51,12 @@ func (m *AndMatcher) MatchMayChangeInTheFuture(actual interface{}) bool {
if m.firstFailedMatcher == nil {
// so all matchers succeeded.. Any one of them changing would change the result.
for _, matcher := range m.Matchers {
- if oraclematcher.MatchMayChangeInTheFuture(matcher, actual) {
+ if types.MatchMayChangeInTheFuture(matcher, actual) {
return true
}
}
return false // none of were going to change
}
// one of the matchers failed.. it must be able to change in order to affect the result
- return oraclematcher.MatchMayChangeInTheFuture(m.firstFailedMatcher, actual)
+ return types.MatchMayChangeInTheFuture(m.firstFailedMatcher, actual)
}
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/matchers/be_a_directory.go b/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/matchers/be_a_directory.go
index acffc8570f9..93d4497c705 100644
--- a/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/matchers/be_a_directory.go
+++ b/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/matchers/be_a_directory.go
@@ -52,5 +52,5 @@ func (matcher *BeADirectoryMatcher) FailureMessage(actual interface{}) (message
}
func (matcher *BeADirectoryMatcher) NegatedFailureMessage(actual interface{}) (message string) {
- return format.Message(actual, fmt.Sprintf("not be a directory"))
+ return format.Message(actual, "not be a directory")
}
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/matchers/be_a_regular_file.go b/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/matchers/be_a_regular_file.go
index 89441c80036..8fefc4deb7a 100644
--- a/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/matchers/be_a_regular_file.go
+++ b/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/matchers/be_a_regular_file.go
@@ -52,5 +52,5 @@ func (matcher *BeARegularFileMatcher) FailureMessage(actual interface{}) (messag
}
func (matcher *BeARegularFileMatcher) NegatedFailureMessage(actual interface{}) (message string) {
- return format.Message(actual, fmt.Sprintf("not be a regular file"))
+ return format.Message(actual, "not be a regular file")
}
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/matchers/be_an_existing_file.go b/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/matchers/be_an_existing_file.go
index ec6506b001e..e2bdd28113b 100644
--- a/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/matchers/be_an_existing_file.go
+++ b/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/matchers/be_an_existing_file.go
@@ -32,9 +32,9 @@ func (matcher *BeAnExistingFileMatcher) Match(actual interface{}) (success bool,
}
func (matcher *BeAnExistingFileMatcher) FailureMessage(actual interface{}) (message string) {
- return format.Message(actual, fmt.Sprintf("to exist"))
+ return format.Message(actual, "to exist")
}
func (matcher *BeAnExistingFileMatcher) NegatedFailureMessage(actual interface{}) (message string) {
- return format.Message(actual, fmt.Sprintf("not to exist"))
+ return format.Message(actual, "not to exist")
}
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/matchers/be_comparable_to_matcher.go b/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/matchers/be_comparable_to_matcher.go
new file mode 100644
index 00000000000..4e3897858c7
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/matchers/be_comparable_to_matcher.go
@@ -0,0 +1,49 @@
+package matchers
+
+import (
+ "bytes"
+ "fmt"
+
+ "github.com/google/go-cmp/cmp"
+ "github.com/onsi/gomega/format"
+)
+
+type BeComparableToMatcher struct {
+ Expected interface{}
+ Options cmp.Options
+}
+
+func (matcher *BeComparableToMatcher) Match(actual interface{}) (success bool, matchErr error) {
+ if actual == nil && matcher.Expected == nil {
+ return false, fmt.Errorf("Refusing to compare to .\nBe explicit and use BeNil() instead. This is to avoid mistakes where both sides of an assertion are erroneously uninitialized.")
+ }
+ // Shortcut for byte slices.
+ // Comparing long byte slices with reflect.DeepEqual is very slow,
+ // so use bytes.Equal if actual and expected are both byte slices.
+ if actualByteSlice, ok := actual.([]byte); ok {
+ if expectedByteSlice, ok := matcher.Expected.([]byte); ok {
+ return bytes.Equal(actualByteSlice, expectedByteSlice), nil
+ }
+ }
+
+ defer func() {
+ if r := recover(); r != nil {
+ success = false
+ if err, ok := r.(error); ok {
+ matchErr = err
+ } else if errMsg, ok := r.(string); ok {
+ matchErr = fmt.Errorf(errMsg)
+ }
+ }
+ }()
+
+ return cmp.Equal(actual, matcher.Expected, matcher.Options...), nil
+}
+
+func (matcher *BeComparableToMatcher) FailureMessage(actual interface{}) (message string) {
+ return fmt.Sprint("Expected object to be comparable, diff: ", cmp.Diff(actual, matcher.Expected, matcher.Options...))
+}
+
+func (matcher *BeComparableToMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+ return format.Message(actual, "not to be comparable to", matcher.Expected)
+}
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/matchers/be_element_of_matcher.go b/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/matchers/be_element_of_matcher.go
index 1f9d7a8e620..9ee75a5d511 100644
--- a/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/matchers/be_element_of_matcher.go
+++ b/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/matchers/be_element_of_matcher.go
@@ -18,23 +18,9 @@ func (matcher *BeElementOfMatcher) Match(actual interface{}) (success bool, err
return false, fmt.Errorf("BeElement matcher expects actual to be typed")
}
- length := len(matcher.Elements)
- valueAt := func(i int) interface{} {
- return matcher.Elements[i]
- }
- // Special handling of a single element of type Array or Slice
- if length == 1 && isArrayOrSlice(valueAt(0)) {
- element := valueAt(0)
- value := reflect.ValueOf(element)
- length = value.Len()
- valueAt = func(i int) interface{} {
- return value.Index(i).Interface()
- }
- }
-
var lastError error
- for i := 0; i < length; i++ {
- matcher := &EqualMatcher{Expected: valueAt(i)}
+ for _, m := range flatten(matcher.Elements) {
+ matcher := &EqualMatcher{Expected: m}
success, err := matcher.Match(actual)
if err != nil {
lastError = err
@@ -49,9 +35,9 @@ func (matcher *BeElementOfMatcher) Match(actual interface{}) (success bool, err
}
func (matcher *BeElementOfMatcher) FailureMessage(actual interface{}) (message string) {
- return format.Message(actual, "to be an element of", matcher.Elements)
+ return format.Message(actual, "to be an element of", presentable(matcher.Elements))
}
func (matcher *BeElementOfMatcher) NegatedFailureMessage(actual interface{}) (message string) {
- return format.Message(actual, "not to be an element of", matcher.Elements)
+ return format.Message(actual, "not to be an element of", presentable(matcher.Elements))
}
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/matchers/be_false_matcher.go b/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/matchers/be_false_matcher.go
index e326c015774..8ee2b1c51e7 100644
--- a/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/matchers/be_false_matcher.go
+++ b/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/matchers/be_false_matcher.go
@@ -9,6 +9,7 @@ import (
)
type BeFalseMatcher struct {
+ Reason string
}
func (matcher *BeFalseMatcher) Match(actual interface{}) (success bool, err error) {
@@ -20,9 +21,17 @@ func (matcher *BeFalseMatcher) Match(actual interface{}) (success bool, err erro
}
func (matcher *BeFalseMatcher) FailureMessage(actual interface{}) (message string) {
- return format.Message(actual, "to be false")
+ if matcher.Reason == "" {
+ return format.Message(actual, "to be false")
+ } else {
+ return matcher.Reason
+ }
}
func (matcher *BeFalseMatcher) NegatedFailureMessage(actual interface{}) (message string) {
- return format.Message(actual, "not to be false")
+ if matcher.Reason == "" {
+ return format.Message(actual, "not to be false")
+ } else {
+ return fmt.Sprintf(`Expected not false but got false\nNegation of "%s" failed`, matcher.Reason)
+ }
}
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/matchers/be_key_of_matcher.go b/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/matchers/be_key_of_matcher.go
new file mode 100644
index 00000000000..449a291ef9a
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/matchers/be_key_of_matcher.go
@@ -0,0 +1,45 @@
+package matchers
+
+import (
+ "fmt"
+ "reflect"
+
+ "github.com/onsi/gomega/format"
+)
+
+type BeKeyOfMatcher struct {
+ Map interface{}
+}
+
+func (matcher *BeKeyOfMatcher) Match(actual interface{}) (success bool, err error) {
+ if !isMap(matcher.Map) {
+ return false, fmt.Errorf("BeKeyOf matcher needs expected to be a map type")
+ }
+
+ if reflect.TypeOf(actual) == nil {
+ return false, fmt.Errorf("BeKeyOf matcher expects actual to be typed")
+ }
+
+ var lastError error
+ for _, key := range reflect.ValueOf(matcher.Map).MapKeys() {
+ matcher := &EqualMatcher{Expected: key.Interface()}
+ success, err := matcher.Match(actual)
+ if err != nil {
+ lastError = err
+ continue
+ }
+ if success {
+ return true, nil
+ }
+ }
+
+ return false, lastError
+}
+
+func (matcher *BeKeyOfMatcher) FailureMessage(actual interface{}) (message string) {
+ return format.Message(actual, "to be a key of", presentable(valuesOf(matcher.Map)))
+}
+
+func (matcher *BeKeyOfMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+ return format.Message(actual, "not to be a key of", presentable(valuesOf(matcher.Map)))
+}
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/matchers/be_numerically_matcher.go b/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/matchers/be_numerically_matcher.go
index f72591a1a8d..100735de325 100644
--- a/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/matchers/be_numerically_matcher.go
+++ b/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/matchers/be_numerically_matcher.go
@@ -45,7 +45,7 @@ func (matcher *BeNumericallyMatcher) Match(actual interface{}) (success bool, er
return false, fmt.Errorf("Expected a number. Got:\n%s", format.Object(matcher.CompareTo[0], 1))
}
if len(matcher.CompareTo) == 2 && !isNumber(matcher.CompareTo[1]) {
- return false, fmt.Errorf("Expected a number. Got:\n%s", format.Object(matcher.CompareTo[0], 1))
+ return false, fmt.Errorf("Expected a number. Got:\n%s", format.Object(matcher.CompareTo[1], 1))
}
switch matcher.Comparator {
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/matchers/be_true_matcher.go b/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/matchers/be_true_matcher.go
index 60bc1e3fa7e..3576aac884e 100644
--- a/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/matchers/be_true_matcher.go
+++ b/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/matchers/be_true_matcher.go
@@ -9,6 +9,7 @@ import (
)
type BeTrueMatcher struct {
+ Reason string
}
func (matcher *BeTrueMatcher) Match(actual interface{}) (success bool, err error) {
@@ -20,9 +21,17 @@ func (matcher *BeTrueMatcher) Match(actual interface{}) (success bool, err error
}
func (matcher *BeTrueMatcher) FailureMessage(actual interface{}) (message string) {
- return format.Message(actual, "to be true")
+ if matcher.Reason == "" {
+ return format.Message(actual, "to be true")
+ } else {
+ return matcher.Reason
+ }
}
func (matcher *BeTrueMatcher) NegatedFailureMessage(actual interface{}) (message string) {
- return format.Message(actual, "not to be true")
+ if matcher.Reason == "" {
+ return format.Message(actual, "not to be true")
+ } else {
+ return fmt.Sprintf(`Expected not true but got true\nNegation of "%s" failed`, matcher.Reason)
+ }
}
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/matchers/consist_of.go b/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/matchers/consist_of.go
index cbbf6180299..f69037a4f0e 100644
--- a/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/matchers/consist_of.go
+++ b/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/matchers/consist_of.go
@@ -11,7 +11,9 @@ import (
)
type ConsistOfMatcher struct {
- Elements []interface{}
+ Elements []interface{}
+ missingElements []interface{}
+ extraElements []interface{}
}
func (matcher *ConsistOfMatcher) Match(actual interface{}) (success bool, err error) {
@@ -19,44 +21,99 @@ func (matcher *ConsistOfMatcher) Match(actual interface{}) (success bool, err er
return false, fmt.Errorf("ConsistOf matcher expects an array/slice/map. Got:\n%s", format.Object(actual, 1))
}
- elements := matcher.Elements
- if len(matcher.Elements) == 1 && isArrayOrSlice(matcher.Elements[0]) {
- elements = []interface{}{}
- value := reflect.ValueOf(matcher.Elements[0])
- for i := 0; i < value.Len(); i++ {
- elements = append(elements, value.Index(i).Interface())
+ matchers := matchers(matcher.Elements)
+ values := valuesOf(actual)
+
+ bipartiteGraph, err := bipartitegraph.NewBipartiteGraph(values, matchers, neighbours)
+ if err != nil {
+ return false, err
+ }
+
+ edges := bipartiteGraph.LargestMatching()
+ if len(edges) == len(values) && len(edges) == len(matchers) {
+ return true, nil
+ }
+
+ var missingMatchers []interface{}
+ matcher.extraElements, missingMatchers = bipartiteGraph.FreeLeftRight(edges)
+ matcher.missingElements = equalMatchersToElements(missingMatchers)
+
+ return false, nil
+}
+
+func neighbours(value, matcher interface{}) (bool, error) {
+ match, err := matcher.(omegaMatcher).Match(value)
+ return match && err == nil, nil
+}
+
+func equalMatchersToElements(matchers []interface{}) (elements []interface{}) {
+ for _, matcher := range matchers {
+ if equalMatcher, ok := matcher.(*EqualMatcher); ok {
+ elements = append(elements, equalMatcher.Expected)
+ } else if _, ok := matcher.(*BeNilMatcher); ok {
+ elements = append(elements, nil)
+ } else {
+ elements = append(elements, matcher)
}
}
+ return
+}
+
+func flatten(elems []interface{}) []interface{} {
+ if len(elems) != 1 || !isArrayOrSlice(elems[0]) {
+ return elems
+ }
+
+ value := reflect.ValueOf(elems[0])
+ flattened := make([]interface{}, value.Len())
+ for i := 0; i < value.Len(); i++ {
+ flattened[i] = value.Index(i).Interface()
+ }
+ return flattened
+}
- matchers := []interface{}{}
- for _, element := range elements {
- matcher, isMatcher := element.(omegaMatcher)
- if !isMatcher {
- matcher = &EqualMatcher{Expected: element}
+func matchers(expectedElems []interface{}) (matchers []interface{}) {
+ for _, e := range flatten(expectedElems) {
+ if e == nil {
+ matchers = append(matchers, &BeNilMatcher{})
+ } else if matcher, isMatcher := e.(omegaMatcher); isMatcher {
+ matchers = append(matchers, matcher)
+ } else {
+ matchers = append(matchers, &EqualMatcher{Expected: e})
}
- matchers = append(matchers, matcher)
}
+ return
+}
- values := matcher.valuesOf(actual)
+func presentable(elems []interface{}) interface{} {
+ elems = flatten(elems)
- if len(values) != len(matchers) {
- return false, nil
+ if len(elems) == 0 {
+ return []interface{}{}
}
- neighbours := func(v, m interface{}) (bool, error) {
- match, err := m.(omegaMatcher).Match(v)
- return match && err == nil, nil
+ sv := reflect.ValueOf(elems)
+ firstEl := sv.Index(0)
+ if firstEl.IsNil() {
+ return elems
+ }
+ tt := firstEl.Elem().Type()
+ for i := 1; i < sv.Len(); i++ {
+ el := sv.Index(i)
+ if el.IsNil() || (sv.Index(i).Elem().Type() != tt) {
+ return elems
+ }
}
- bipartiteGraph, err := bipartitegraph.NewBipartiteGraph(values, matchers, neighbours)
- if err != nil {
- return false, err
+ ss := reflect.MakeSlice(reflect.SliceOf(tt), sv.Len(), sv.Len())
+ for i := 0; i < sv.Len(); i++ {
+ ss.Index(i).Set(sv.Index(i).Elem())
}
- return len(bipartiteGraph.LargestMatching()) == len(values), nil
+ return ss.Interface()
}
-func (matcher *ConsistOfMatcher) valuesOf(actual interface{}) []interface{} {
+func valuesOf(actual interface{}) []interface{} {
value := reflect.ValueOf(actual)
values := []interface{}{}
if isMap(actual) {
@@ -74,9 +131,23 @@ func (matcher *ConsistOfMatcher) valuesOf(actual interface{}) []interface{} {
}
func (matcher *ConsistOfMatcher) FailureMessage(actual interface{}) (message string) {
- return format.Message(actual, "to consist of", matcher.Elements)
+ message = format.Message(actual, "to consist of", presentable(matcher.Elements))
+ message = appendMissingElements(message, matcher.missingElements)
+ if len(matcher.extraElements) > 0 {
+ message = fmt.Sprintf("%s\nthe extra elements were\n%s", message,
+ format.Object(presentable(matcher.extraElements), 1))
+ }
+ return
+}
+
+func appendMissingElements(message string, missingElements []interface{}) string {
+ if len(missingElements) == 0 {
+ return message
+ }
+ return fmt.Sprintf("%s\nthe missing elements were\n%s", message,
+ format.Object(presentable(missingElements), 1))
}
func (matcher *ConsistOfMatcher) NegatedFailureMessage(actual interface{}) (message string) {
- return format.Message(actual, "not to consist of", matcher.Elements)
+ return format.Message(actual, "not to consist of", presentable(matcher.Elements))
}
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/matchers/contain_element_matcher.go b/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/matchers/contain_element_matcher.go
index 8d6c44c7a10..3d45c9ebc69 100644
--- a/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/matchers/contain_element_matcher.go
+++ b/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/matchers/contain_element_matcher.go
@@ -3,6 +3,7 @@
package matchers
import (
+ "errors"
"fmt"
"reflect"
@@ -11,6 +12,7 @@ import (
type ContainElementMatcher struct {
Element interface{}
+ Result []interface{}
}
func (matcher *ContainElementMatcher) Match(actual interface{}) (success bool, err error) {
@@ -18,6 +20,49 @@ func (matcher *ContainElementMatcher) Match(actual interface{}) (success bool, e
return false, fmt.Errorf("ContainElement matcher expects an array/slice/map. Got:\n%s", format.Object(actual, 1))
}
+ var actualT reflect.Type
+ var result reflect.Value
+ switch l := len(matcher.Result); {
+ case l > 1:
+ return false, errors.New("ContainElement matcher expects at most a single optional pointer to store its findings at")
+ case l == 1:
+ if reflect.ValueOf(matcher.Result[0]).Kind() != reflect.Ptr {
+ return false, fmt.Errorf("ContainElement matcher expects a non-nil pointer to store its findings at. Got\n%s",
+ format.Object(matcher.Result[0], 1))
+ }
+ actualT = reflect.TypeOf(actual)
+ resultReference := matcher.Result[0]
+ result = reflect.ValueOf(resultReference).Elem() // what ResultReference points to, to stash away our findings
+ switch result.Kind() {
+ case reflect.Array:
+ return false, fmt.Errorf("ContainElement cannot return findings. Need *%s, got *%s",
+ reflect.SliceOf(actualT.Elem()).String(), result.Type().String())
+ case reflect.Slice:
+ if !isArrayOrSlice(actual) {
+ return false, fmt.Errorf("ContainElement cannot return findings. Need *%s, got *%s",
+ reflect.MapOf(actualT.Key(), actualT.Elem()).String(), result.Type().String())
+ }
+ if !actualT.Elem().AssignableTo(result.Type().Elem()) {
+ return false, fmt.Errorf("ContainElement cannot return findings. Need *%s, got *%s",
+ actualT.String(), result.Type().String())
+ }
+ case reflect.Map:
+ if !isMap(actual) {
+ return false, fmt.Errorf("ContainElement cannot return findings. Need *%s, got *%s",
+ actualT.String(), result.Type().String())
+ }
+ if !actualT.AssignableTo(result.Type()) {
+ return false, fmt.Errorf("ContainElement cannot return findings. Need *%s, got *%s",
+ actualT.String(), result.Type().String())
+ }
+ default:
+ if !actualT.Elem().AssignableTo(result.Type()) {
+ return false, fmt.Errorf("ContainElement cannot return findings. Need *%s, got *%s",
+ actualT.Elem().String(), result.Type().String())
+ }
+ }
+ }
+
elemMatcher, elementIsMatcher := matcher.Element.(omegaMatcher)
if !elementIsMatcher {
elemMatcher = &EqualMatcher{Expected: matcher.Element}
@@ -25,30 +70,99 @@ func (matcher *ContainElementMatcher) Match(actual interface{}) (success bool, e
value := reflect.ValueOf(actual)
var valueAt func(int) interface{}
+
+ var getFindings func() reflect.Value
+ var foundAt func(int)
+
if isMap(actual) {
keys := value.MapKeys()
valueAt = func(i int) interface{} {
return value.MapIndex(keys[i]).Interface()
}
+ if result.Kind() != reflect.Invalid {
+ fm := reflect.MakeMap(actualT)
+ getFindings = func() reflect.Value {
+ return fm
+ }
+ foundAt = func(i int) {
+ fm.SetMapIndex(keys[i], value.MapIndex(keys[i]))
+ }
+ }
} else {
valueAt = func(i int) interface{} {
return value.Index(i).Interface()
}
+ if result.Kind() != reflect.Invalid {
+ var f reflect.Value
+ if result.Kind() == reflect.Slice {
+ f = reflect.MakeSlice(result.Type(), 0, 0)
+ } else {
+ f = reflect.MakeSlice(reflect.SliceOf(result.Type()), 0, 0)
+ }
+ getFindings = func() reflect.Value {
+ return f
+ }
+ foundAt = func(i int) {
+ f = reflect.Append(f, value.Index(i))
+ }
+ }
}
var lastError error
for i := 0; i < value.Len(); i++ {
- success, err := elemMatcher.Match(valueAt(i))
+ elem := valueAt(i)
+ success, err := elemMatcher.Match(elem)
if err != nil {
lastError = err
continue
}
if success {
- return true, nil
+ if result.Kind() == reflect.Invalid {
+ return true, nil
+ }
+ foundAt(i)
}
}
- return false, lastError
+ // when the expectation isn't interested in the findings except for success
+ // or non-success, then we're done here and return the last matcher error
+ // seen, if any, as well as non-success.
+ if result.Kind() == reflect.Invalid {
+ return false, lastError
+ }
+
+ // pick up any findings the test is interested in as it specified a non-nil
+ // result reference. However, the expection always is that there are at
+ // least one or multiple findings. So, if a result is expected, but we had
+ // no findings, then this is an error.
+ findings := getFindings()
+ if findings.Len() == 0 {
+ return false, lastError
+ }
+
+ // there's just a single finding and the result is neither a slice nor a map
+ // (so it's a scalar): pick the one and only finding and return it in the
+ // place the reference points to.
+ if findings.Len() == 1 && !isArrayOrSlice(result.Interface()) && !isMap(result.Interface()) {
+ if isMap(actual) {
+ miter := findings.MapRange()
+ miter.Next()
+ result.Set(miter.Value())
+ } else {
+ result.Set(findings.Index(0))
+ }
+ return true, nil
+ }
+
+ // at least one or even multiple findings and a the result references a
+ // slice or a map, so all we need to do is to store our findings where the
+ // reference points to.
+ if !findings.Type().AssignableTo(result.Type()) {
+ return false, fmt.Errorf("ContainElement cannot return multiple findings. Need *%s, got *%s",
+ findings.Type().String(), result.Type().String())
+ }
+ result.Set(findings)
+ return true, nil
}
func (matcher *ContainElementMatcher) FailureMessage(actual interface{}) (message string) {
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/matchers/contain_elements_matcher.go b/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/matchers/contain_elements_matcher.go
new file mode 100644
index 00000000000..946cd8bea52
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/matchers/contain_elements_matcher.go
@@ -0,0 +1,44 @@
+package matchers
+
+import (
+ "fmt"
+
+ "github.com/onsi/gomega/format"
+ "github.com/onsi/gomega/matchers/support/goraph/bipartitegraph"
+)
+
+type ContainElementsMatcher struct {
+ Elements []interface{}
+ missingElements []interface{}
+}
+
+func (matcher *ContainElementsMatcher) Match(actual interface{}) (success bool, err error) {
+ if !isArrayOrSlice(actual) && !isMap(actual) {
+ return false, fmt.Errorf("ContainElements matcher expects an array/slice/map. Got:\n%s", format.Object(actual, 1))
+ }
+
+ matchers := matchers(matcher.Elements)
+ bipartiteGraph, err := bipartitegraph.NewBipartiteGraph(valuesOf(actual), matchers, neighbours)
+ if err != nil {
+ return false, err
+ }
+
+ edges := bipartiteGraph.LargestMatching()
+ if len(edges) == len(matchers) {
+ return true, nil
+ }
+
+ _, missingMatchers := bipartiteGraph.FreeLeftRight(edges)
+ matcher.missingElements = equalMatchersToElements(missingMatchers)
+
+ return false, nil
+}
+
+func (matcher *ContainElementsMatcher) FailureMessage(actual interface{}) (message string) {
+ message = format.Message(actual, "to contain elements", presentable(matcher.Elements))
+ return appendMissingElements(message, matcher.missingElements)
+}
+
+func (matcher *ContainElementsMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+ return format.Message(actual, "not to contain elements", presentable(matcher.Elements))
+}
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/matchers/have_each_matcher.go b/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/matchers/have_each_matcher.go
new file mode 100644
index 00000000000..025b6e1ac2c
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/matchers/have_each_matcher.go
@@ -0,0 +1,65 @@
+package matchers
+
+import (
+ "fmt"
+ "reflect"
+
+ "github.com/onsi/gomega/format"
+)
+
+type HaveEachMatcher struct {
+ Element interface{}
+}
+
+func (matcher *HaveEachMatcher) Match(actual interface{}) (success bool, err error) {
+ if !isArrayOrSlice(actual) && !isMap(actual) {
+ return false, fmt.Errorf("HaveEach matcher expects an array/slice/map. Got:\n%s",
+ format.Object(actual, 1))
+ }
+
+ elemMatcher, elementIsMatcher := matcher.Element.(omegaMatcher)
+ if !elementIsMatcher {
+ elemMatcher = &EqualMatcher{Expected: matcher.Element}
+ }
+
+ value := reflect.ValueOf(actual)
+ if value.Len() == 0 {
+ return false, fmt.Errorf("HaveEach matcher expects a non-empty array/slice/map. Got:\n%s",
+ format.Object(actual, 1))
+ }
+
+ var valueAt func(int) interface{}
+ if isMap(actual) {
+ keys := value.MapKeys()
+ valueAt = func(i int) interface{} {
+ return value.MapIndex(keys[i]).Interface()
+ }
+ } else {
+ valueAt = func(i int) interface{} {
+ return value.Index(i).Interface()
+ }
+ }
+
+ // if there are no elements, then HaveEach will match.
+ for i := 0; i < value.Len(); i++ {
+ success, err := elemMatcher.Match(valueAt(i))
+ if err != nil {
+ return false, err
+ }
+ if !success {
+ return false, nil
+ }
+ }
+
+ return true, nil
+}
+
+// FailureMessage returns a suitable failure message.
+func (matcher *HaveEachMatcher) FailureMessage(actual interface{}) (message string) {
+ return format.Message(actual, "to contain element matching", matcher.Element)
+}
+
+// NegatedFailureMessage returns a suitable negated failure message.
+func (matcher *HaveEachMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+ return format.Message(actual, "not to contain element matching", matcher.Element)
+}
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/matchers/have_exact_elements.go b/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/matchers/have_exact_elements.go
new file mode 100644
index 00000000000..dca5b944672
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/matchers/have_exact_elements.go
@@ -0,0 +1,88 @@
+package matchers
+
+import (
+ "fmt"
+
+ "github.com/onsi/gomega/format"
+)
+
+type mismatchFailure struct {
+ failure string
+ index int
+}
+
+type HaveExactElementsMatcher struct {
+ Elements []interface{}
+ mismatchFailures []mismatchFailure
+ missingIndex int
+ extraIndex int
+}
+
+func (matcher *HaveExactElementsMatcher) Match(actual interface{}) (success bool, err error) {
+ matcher.resetState()
+
+ if isMap(actual) {
+ return false, fmt.Errorf("error")
+ }
+
+ matchers := matchers(matcher.Elements)
+ values := valuesOf(actual)
+
+ lenMatchers := len(matchers)
+ lenValues := len(values)
+
+ for i := 0; i < lenMatchers || i < lenValues; i++ {
+ if i >= lenMatchers {
+ matcher.extraIndex = i
+ continue
+ }
+
+ if i >= lenValues {
+ matcher.missingIndex = i
+ return
+ }
+
+ elemMatcher := matchers[i].(omegaMatcher)
+ match, err := elemMatcher.Match(values[i])
+ if err != nil {
+ matcher.mismatchFailures = append(matcher.mismatchFailures, mismatchFailure{
+ index: i,
+ failure: err.Error(),
+ })
+ } else if !match {
+ matcher.mismatchFailures = append(matcher.mismatchFailures, mismatchFailure{
+ index: i,
+ failure: elemMatcher.FailureMessage(values[i]),
+ })
+ }
+ }
+
+ return matcher.missingIndex+matcher.extraIndex+len(matcher.mismatchFailures) == 0, nil
+}
+
+func (matcher *HaveExactElementsMatcher) FailureMessage(actual interface{}) (message string) {
+ message = format.Message(actual, "to have exact elements with", presentable(matcher.Elements))
+ if matcher.missingIndex > 0 {
+ message = fmt.Sprintf("%s\nthe missing elements start from index %d", message, matcher.missingIndex)
+ }
+ if matcher.extraIndex > 0 {
+ message = fmt.Sprintf("%s\nthe extra elements start from index %d", message, matcher.extraIndex)
+ }
+ if len(matcher.mismatchFailures) != 0 {
+ message = fmt.Sprintf("%s\nthe mismatch indexes were:", message)
+ }
+ for _, mismatch := range matcher.mismatchFailures {
+ message = fmt.Sprintf("%s\n%d: %s", message, mismatch.index, mismatch.failure)
+ }
+ return
+}
+
+func (matcher *HaveExactElementsMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+ return format.Message(actual, "not to contain elements", presentable(matcher.Elements))
+}
+
+func (matcher *HaveExactElementsMatcher) resetState() {
+ matcher.mismatchFailures = nil
+ matcher.missingIndex = 0
+ matcher.extraIndex = 0
+}
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/matchers/have_existing_field_matcher.go b/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/matchers/have_existing_field_matcher.go
new file mode 100644
index 00000000000..b57018745fa
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/matchers/have_existing_field_matcher.go
@@ -0,0 +1,36 @@
+package matchers
+
+import (
+ "errors"
+ "fmt"
+
+ "github.com/onsi/gomega/format"
+)
+
+type HaveExistingFieldMatcher struct {
+ Field string
+}
+
+func (matcher *HaveExistingFieldMatcher) Match(actual interface{}) (success bool, err error) {
+ // we don't care about the field's actual value, just about any error in
+ // trying to find the field (or method).
+ _, err = extractField(actual, matcher.Field, "HaveExistingField")
+ if err == nil {
+ return true, nil
+ }
+ var mferr missingFieldError
+ if errors.As(err, &mferr) {
+ // missing field errors aren't errors in this context, but instead
+ // unsuccessful matches.
+ return false, nil
+ }
+ return false, err
+}
+
+func (matcher *HaveExistingFieldMatcher) FailureMessage(actual interface{}) (message string) {
+ return fmt.Sprintf("Expected\n%s\nto have field '%s'", format.Object(actual, 1), matcher.Field)
+}
+
+func (matcher *HaveExistingFieldMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+ return fmt.Sprintf("Expected\n%s\nnot to have field '%s'", format.Object(actual, 1), matcher.Field)
+}
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/matchers/have_field.go b/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/matchers/have_field.go
new file mode 100644
index 00000000000..6989f78c4b4
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/matchers/have_field.go
@@ -0,0 +1,99 @@
+package matchers
+
+import (
+ "fmt"
+ "reflect"
+ "strings"
+
+ "github.com/onsi/gomega/format"
+)
+
+// missingFieldError represents a missing field extraction error that
+// HaveExistingFieldMatcher can ignore, as opposed to other, sever field
+// extraction errors, such as nil pointers, et cetera.
+type missingFieldError string
+
+func (e missingFieldError) Error() string {
+ return string(e)
+}
+
+func extractField(actual interface{}, field string, matchername string) (interface{}, error) {
+ fields := strings.SplitN(field, ".", 2)
+ actualValue := reflect.ValueOf(actual)
+
+ if actualValue.Kind() == reflect.Ptr {
+ actualValue = actualValue.Elem()
+ }
+ if actualValue == (reflect.Value{}) {
+ return nil, fmt.Errorf("%s encountered nil while dereferencing a pointer of type %T.", matchername, actual)
+ }
+
+ if actualValue.Kind() != reflect.Struct {
+ return nil, fmt.Errorf("%s encountered:\n%s\nWhich is not a struct.", matchername, format.Object(actual, 1))
+ }
+
+ var extractedValue reflect.Value
+
+ if strings.HasSuffix(fields[0], "()") {
+ extractedValue = actualValue.MethodByName(strings.TrimSuffix(fields[0], "()"))
+ if extractedValue == (reflect.Value{}) && actualValue.CanAddr() {
+ extractedValue = actualValue.Addr().MethodByName(strings.TrimSuffix(fields[0], "()"))
+ }
+ if extractedValue == (reflect.Value{}) {
+ return nil, missingFieldError(fmt.Sprintf("%s could not find method named '%s' in struct of type %T.", matchername, fields[0], actual))
+ }
+ t := extractedValue.Type()
+ if t.NumIn() != 0 || t.NumOut() != 1 {
+ return nil, fmt.Errorf("%s found an invalid method named '%s' in struct of type %T.\nMethods must take no arguments and return exactly one value.", matchername, fields[0], actual)
+ }
+ extractedValue = extractedValue.Call([]reflect.Value{})[0]
+ } else {
+ extractedValue = actualValue.FieldByName(fields[0])
+ if extractedValue == (reflect.Value{}) {
+ return nil, missingFieldError(fmt.Sprintf("%s could not find field named '%s' in struct:\n%s", matchername, fields[0], format.Object(actual, 1)))
+ }
+ }
+
+ if len(fields) == 1 {
+ return extractedValue.Interface(), nil
+ } else {
+ return extractField(extractedValue.Interface(), fields[1], matchername)
+ }
+}
+
+type HaveFieldMatcher struct {
+ Field string
+ Expected interface{}
+
+ extractedField interface{}
+ expectedMatcher omegaMatcher
+}
+
+func (matcher *HaveFieldMatcher) Match(actual interface{}) (success bool, err error) {
+ matcher.extractedField, err = extractField(actual, matcher.Field, "HaveField")
+ if err != nil {
+ return false, err
+ }
+
+ var isMatcher bool
+ matcher.expectedMatcher, isMatcher = matcher.Expected.(omegaMatcher)
+ if !isMatcher {
+ matcher.expectedMatcher = &EqualMatcher{Expected: matcher.Expected}
+ }
+
+ return matcher.expectedMatcher.Match(matcher.extractedField)
+}
+
+func (matcher *HaveFieldMatcher) FailureMessage(actual interface{}) (message string) {
+ message = fmt.Sprintf("Value for field '%s' failed to satisfy matcher.\n", matcher.Field)
+ message += matcher.expectedMatcher.FailureMessage(matcher.extractedField)
+
+ return message
+}
+
+func (matcher *HaveFieldMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+ message = fmt.Sprintf("Value for field '%s' satisfied matcher, but should not have.\n", matcher.Field)
+ message += matcher.expectedMatcher.NegatedFailureMessage(matcher.extractedField)
+
+ return message
+}
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/matchers/have_http_body_matcher.go b/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/matchers/have_http_body_matcher.go
new file mode 100644
index 00000000000..d14d9e5fc60
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/matchers/have_http_body_matcher.go
@@ -0,0 +1,104 @@
+package matchers
+
+import (
+ "fmt"
+ "net/http"
+ "net/http/httptest"
+
+ "github.com/onsi/gomega/format"
+ "github.com/onsi/gomega/internal/gutil"
+ "github.com/onsi/gomega/types"
+)
+
+type HaveHTTPBodyMatcher struct {
+ Expected interface{}
+ cachedResponse interface{}
+ cachedBody []byte
+}
+
+func (matcher *HaveHTTPBodyMatcher) Match(actual interface{}) (bool, error) {
+ body, err := matcher.body(actual)
+ if err != nil {
+ return false, err
+ }
+
+ switch e := matcher.Expected.(type) {
+ case string:
+ return (&EqualMatcher{Expected: e}).Match(string(body))
+ case []byte:
+ return (&EqualMatcher{Expected: e}).Match(body)
+ case types.GomegaMatcher:
+ return e.Match(body)
+ default:
+ return false, fmt.Errorf("HaveHTTPBody matcher expects string, []byte, or GomegaMatcher. Got:\n%s", format.Object(matcher.Expected, 1))
+ }
+}
+
+func (matcher *HaveHTTPBodyMatcher) FailureMessage(actual interface{}) (message string) {
+ body, err := matcher.body(actual)
+ if err != nil {
+ return fmt.Sprintf("failed to read body: %s", err)
+ }
+
+ switch e := matcher.Expected.(type) {
+ case string:
+ return (&EqualMatcher{Expected: e}).FailureMessage(string(body))
+ case []byte:
+ return (&EqualMatcher{Expected: e}).FailureMessage(body)
+ case types.GomegaMatcher:
+ return e.FailureMessage(body)
+ default:
+ return fmt.Sprintf("HaveHTTPBody matcher expects string, []byte, or GomegaMatcher. Got:\n%s", format.Object(matcher.Expected, 1))
+ }
+}
+
+func (matcher *HaveHTTPBodyMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+ body, err := matcher.body(actual)
+ if err != nil {
+ return fmt.Sprintf("failed to read body: %s", err)
+ }
+
+ switch e := matcher.Expected.(type) {
+ case string:
+ return (&EqualMatcher{Expected: e}).NegatedFailureMessage(string(body))
+ case []byte:
+ return (&EqualMatcher{Expected: e}).NegatedFailureMessage(body)
+ case types.GomegaMatcher:
+ return e.NegatedFailureMessage(body)
+ default:
+ return fmt.Sprintf("HaveHTTPBody matcher expects string, []byte, or GomegaMatcher. Got:\n%s", format.Object(matcher.Expected, 1))
+ }
+}
+
+// body returns the body. It is cached because once we read it in Match()
+// the Reader is closed and it is not readable again in FailureMessage()
+// or NegatedFailureMessage()
+func (matcher *HaveHTTPBodyMatcher) body(actual interface{}) ([]byte, error) {
+ if matcher.cachedResponse == actual && matcher.cachedBody != nil {
+ return matcher.cachedBody, nil
+ }
+
+ body := func(a *http.Response) ([]byte, error) {
+ if a.Body != nil {
+ defer a.Body.Close()
+ var err error
+ matcher.cachedBody, err = gutil.ReadAll(a.Body)
+ if err != nil {
+ return nil, fmt.Errorf("error reading response body: %w", err)
+ }
+ }
+ return matcher.cachedBody, nil
+ }
+
+ switch a := actual.(type) {
+ case *http.Response:
+ matcher.cachedResponse = a
+ return body(a)
+ case *httptest.ResponseRecorder:
+ matcher.cachedResponse = a
+ return body(a.Result())
+ default:
+ return nil, fmt.Errorf("HaveHTTPBody matcher expects *http.Response or *httptest.ResponseRecorder. Got:\n%s", format.Object(actual, 1))
+ }
+
+}
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/matchers/have_http_header_with_value_matcher.go b/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/matchers/have_http_header_with_value_matcher.go
new file mode 100644
index 00000000000..c256f452e84
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/matchers/have_http_header_with_value_matcher.go
@@ -0,0 +1,81 @@
+package matchers
+
+import (
+ "fmt"
+ "net/http"
+ "net/http/httptest"
+
+ "github.com/onsi/gomega/format"
+ "github.com/onsi/gomega/types"
+)
+
+type HaveHTTPHeaderWithValueMatcher struct {
+ Header string
+ Value interface{}
+}
+
+func (matcher *HaveHTTPHeaderWithValueMatcher) Match(actual interface{}) (success bool, err error) {
+ headerValue, err := matcher.extractHeader(actual)
+ if err != nil {
+ return false, err
+ }
+
+ headerMatcher, err := matcher.getSubMatcher()
+ if err != nil {
+ return false, err
+ }
+
+ return headerMatcher.Match(headerValue)
+}
+
+func (matcher *HaveHTTPHeaderWithValueMatcher) FailureMessage(actual interface{}) string {
+ headerValue, err := matcher.extractHeader(actual)
+ if err != nil {
+ panic(err) // protected by Match()
+ }
+
+ headerMatcher, err := matcher.getSubMatcher()
+ if err != nil {
+ panic(err) // protected by Match()
+ }
+
+ diff := format.IndentString(headerMatcher.FailureMessage(headerValue), 1)
+ return fmt.Sprintf("HTTP header %q:\n%s", matcher.Header, diff)
+}
+
+func (matcher *HaveHTTPHeaderWithValueMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+ headerValue, err := matcher.extractHeader(actual)
+ if err != nil {
+ panic(err) // protected by Match()
+ }
+
+ headerMatcher, err := matcher.getSubMatcher()
+ if err != nil {
+ panic(err) // protected by Match()
+ }
+
+ diff := format.IndentString(headerMatcher.NegatedFailureMessage(headerValue), 1)
+ return fmt.Sprintf("HTTP header %q:\n%s", matcher.Header, diff)
+}
+
+func (matcher *HaveHTTPHeaderWithValueMatcher) getSubMatcher() (types.GomegaMatcher, error) {
+ switch m := matcher.Value.(type) {
+ case string:
+ return &EqualMatcher{Expected: matcher.Value}, nil
+ case types.GomegaMatcher:
+ return m, nil
+ default:
+ return nil, fmt.Errorf("HaveHTTPHeaderWithValue matcher must be passed a string or a GomegaMatcher. Got:\n%s", format.Object(matcher.Value, 1))
+ }
+}
+
+func (matcher *HaveHTTPHeaderWithValueMatcher) extractHeader(actual interface{}) (string, error) {
+ switch r := actual.(type) {
+ case *http.Response:
+ return r.Header.Get(matcher.Header), nil
+ case *httptest.ResponseRecorder:
+ return r.Result().Header.Get(matcher.Header), nil
+ default:
+ return "", fmt.Errorf("HaveHTTPHeaderWithValue matcher expects *http.Response or *httptest.ResponseRecorder. Got:\n%s", format.Object(actual, 1))
+ }
+}
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/matchers/have_http_status_matcher.go b/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/matchers/have_http_status_matcher.go
new file mode 100644
index 00000000000..0f66e46ece4
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/matchers/have_http_status_matcher.go
@@ -0,0 +1,96 @@
+package matchers
+
+import (
+ "fmt"
+ "net/http"
+ "net/http/httptest"
+ "reflect"
+ "strings"
+
+ "github.com/onsi/gomega/format"
+ "github.com/onsi/gomega/internal/gutil"
+)
+
+type HaveHTTPStatusMatcher struct {
+ Expected []interface{}
+}
+
+func (matcher *HaveHTTPStatusMatcher) Match(actual interface{}) (success bool, err error) {
+ var resp *http.Response
+ switch a := actual.(type) {
+ case *http.Response:
+ resp = a
+ case *httptest.ResponseRecorder:
+ resp = a.Result()
+ default:
+ return false, fmt.Errorf("HaveHTTPStatus matcher expects *http.Response or *httptest.ResponseRecorder. Got:\n%s", format.Object(actual, 1))
+ }
+
+ if len(matcher.Expected) == 0 {
+ return false, fmt.Errorf("HaveHTTPStatus matcher must be passed an int or a string. Got nothing")
+ }
+
+ for _, expected := range matcher.Expected {
+ switch e := expected.(type) {
+ case int:
+ if resp.StatusCode == e {
+ return true, nil
+ }
+ case string:
+ if resp.Status == e {
+ return true, nil
+ }
+ default:
+ return false, fmt.Errorf("HaveHTTPStatus matcher must be passed int or string types. Got:\n%s", format.Object(expected, 1))
+ }
+ }
+
+ return false, nil
+}
+
+func (matcher *HaveHTTPStatusMatcher) FailureMessage(actual interface{}) (message string) {
+ return fmt.Sprintf("Expected\n%s\n%s\n%s", formatHttpResponse(actual), "to have HTTP status", matcher.expectedString())
+}
+
+func (matcher *HaveHTTPStatusMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+ return fmt.Sprintf("Expected\n%s\n%s\n%s", formatHttpResponse(actual), "not to have HTTP status", matcher.expectedString())
+}
+
+func (matcher *HaveHTTPStatusMatcher) expectedString() string {
+ var lines []string
+ for _, expected := range matcher.Expected {
+ lines = append(lines, format.Object(expected, 1))
+ }
+ return strings.Join(lines, "\n")
+}
+
+func formatHttpResponse(input interface{}) string {
+ var resp *http.Response
+ switch r := input.(type) {
+ case *http.Response:
+ resp = r
+ case *httptest.ResponseRecorder:
+ resp = r.Result()
+ default:
+ return "cannot format invalid HTTP response"
+ }
+
+ body := ""
+ if resp.Body != nil {
+ defer resp.Body.Close()
+ data, err := gutil.ReadAll(resp.Body)
+ if err != nil {
+ data = []byte("")
+ }
+ body = format.Object(string(data), 0)
+ }
+
+ var s strings.Builder
+ s.WriteString(fmt.Sprintf("%s<%s>: {\n", format.Indent, reflect.TypeOf(input)))
+ s.WriteString(fmt.Sprintf("%s%sStatus: %s\n", format.Indent, format.Indent, format.Object(resp.Status, 0)))
+ s.WriteString(fmt.Sprintf("%s%sStatusCode: %s\n", format.Indent, format.Indent, format.Object(resp.StatusCode, 0)))
+ s.WriteString(fmt.Sprintf("%s%sBody: %s\n", format.Indent, format.Indent, body))
+ s.WriteString(fmt.Sprintf("%s}", format.Indent))
+
+ return s.String()
+}
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/matchers/have_occurred_matcher.go b/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/matchers/have_occurred_matcher.go
index 5bcfdd2ade9..22a1b67306f 100644
--- a/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/matchers/have_occurred_matcher.go
+++ b/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/matchers/have_occurred_matcher.go
@@ -31,5 +31,5 @@ func (matcher *HaveOccurredMatcher) FailureMessage(actual interface{}) (message
}
func (matcher *HaveOccurredMatcher) NegatedFailureMessage(actual interface{}) (message string) {
- return fmt.Sprintf("Unexpected error:\n%s\n%s\n%s", format.Object(actual, 1), format.IndentString(actual.(error).Error(), 1), "occurred")
+ return fmt.Sprintf("Unexpected error:\n%s\n%s", format.Object(actual, 1), "occurred")
}
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/matchers/have_value.go b/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/matchers/have_value.go
new file mode 100644
index 00000000000..f6725283570
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/matchers/have_value.go
@@ -0,0 +1,54 @@
+package matchers
+
+import (
+ "errors"
+ "reflect"
+
+ "github.com/onsi/gomega/format"
+ "github.com/onsi/gomega/types"
+)
+
+const maxIndirections = 31
+
+type HaveValueMatcher struct {
+ Matcher types.GomegaMatcher // the matcher to apply to the "resolved" actual value.
+ resolvedActual interface{} // the ("resolved") value.
+}
+
+func (m *HaveValueMatcher) Match(actual interface{}) (bool, error) {
+ val := reflect.ValueOf(actual)
+ for allowedIndirs := maxIndirections; allowedIndirs > 0; allowedIndirs-- {
+ // return an error if value isn't valid. Please note that we cannot
+ // check for nil here, as we might not deal with a pointer or interface
+ // at this point.
+ if !val.IsValid() {
+ return false, errors.New(format.Message(
+ actual, "not to be "))
+ }
+ switch val.Kind() {
+ case reflect.Ptr, reflect.Interface:
+ // resolve pointers and interfaces to their values, then rinse and
+ // repeat.
+ if val.IsNil() {
+ return false, errors.New(format.Message(
+ actual, "not to be "))
+ }
+ val = val.Elem()
+ continue
+ default:
+ // forward the final value to the specified matcher.
+ m.resolvedActual = val.Interface()
+ return m.Matcher.Match(m.resolvedActual)
+ }
+ }
+ // too many indirections: extreme star gazing, indeed...?
+ return false, errors.New(format.Message(actual, "too many indirections"))
+}
+
+func (m *HaveValueMatcher) FailureMessage(_ interface{}) (message string) {
+ return m.Matcher.FailureMessage(m.resolvedActual)
+}
+
+func (m *HaveValueMatcher) NegatedFailureMessage(_ interface{}) (message string) {
+ return m.Matcher.NegatedFailureMessage(m.resolvedActual)
+}
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/matchers/match_error_matcher.go b/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/matchers/match_error_matcher.go
index 4e09239fffe..c539dd389c8 100644
--- a/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/matchers/match_error_matcher.go
+++ b/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/matchers/match_error_matcher.go
@@ -1,18 +1,22 @@
package matchers
import (
+ "errors"
"fmt"
"reflect"
"github.com/onsi/gomega/format"
- "golang.org/x/xerrors"
)
type MatchErrorMatcher struct {
- Expected interface{}
+ Expected any
+ FuncErrDescription []any
+ isFunc bool
}
-func (matcher *MatchErrorMatcher) Match(actual interface{}) (success bool, err error) {
+func (matcher *MatchErrorMatcher) Match(actual any) (success bool, err error) {
+ matcher.isFunc = false
+
if isNil(actual) {
return false, fmt.Errorf("Expected an error, got nil")
}
@@ -25,13 +29,34 @@ func (matcher *MatchErrorMatcher) Match(actual interface{}) (success bool, err e
expected := matcher.Expected
if isError(expected) {
- return reflect.DeepEqual(actualErr, expected) || xerrors.Is(actualErr, expected.(error)), nil
+ // first try the built-in errors.Is
+ if errors.Is(actualErr, expected.(error)) {
+ return true, nil
+ }
+ // if not, try DeepEqual along the error chain
+ for unwrapped := actualErr; unwrapped != nil; unwrapped = errors.Unwrap(unwrapped) {
+ if reflect.DeepEqual(unwrapped, expected) {
+ return true, nil
+ }
+ }
+ return false, nil
}
if isString(expected) {
return actualErr.Error() == expected, nil
}
+ v := reflect.ValueOf(expected)
+ t := v.Type()
+ errorInterface := reflect.TypeOf((*error)(nil)).Elem()
+ if t.Kind() == reflect.Func && t.NumIn() == 1 && t.In(0).Implements(errorInterface) && t.NumOut() == 1 && t.Out(0).Kind() == reflect.Bool {
+ if len(matcher.FuncErrDescription) == 0 {
+ return false, fmt.Errorf("MatchError requires an additional description when passed a function")
+ }
+ matcher.isFunc = true
+ return v.Call([]reflect.Value{reflect.ValueOf(actualErr)})[0].Bool(), nil
+ }
+
var subMatcher omegaMatcher
var hasSubMatcher bool
if expected != nil {
@@ -47,9 +72,15 @@ func (matcher *MatchErrorMatcher) Match(actual interface{}) (success bool, err e
}
func (matcher *MatchErrorMatcher) FailureMessage(actual interface{}) (message string) {
+ if matcher.isFunc {
+ return format.Message(actual, fmt.Sprintf("to match error function %s", matcher.FuncErrDescription[0]))
+ }
return format.Message(actual, "to match error", matcher.Expected)
}
func (matcher *MatchErrorMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+ if matcher.isFunc {
+ return format.Message(actual, fmt.Sprintf("not to match error function %s", matcher.FuncErrDescription[0]))
+ }
return format.Message(actual, "not to match error", matcher.Expected)
}
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/matchers/match_yaml_matcher.go b/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/matchers/match_yaml_matcher.go
index 0c83c2b638a..2cb6b47db95 100644
--- a/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/matchers/match_yaml_matcher.go
+++ b/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/matchers/match_yaml_matcher.go
@@ -5,7 +5,7 @@ import (
"strings"
"github.com/onsi/gomega/format"
- "gopkg.in/yaml.v2"
+ "gopkg.in/yaml.v3"
)
type MatchYAMLMatcher struct {
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/matchers/not.go b/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/matchers/not.go
index 2c91670bd9b..78b71910d12 100644
--- a/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/matchers/not.go
+++ b/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/matchers/not.go
@@ -1,7 +1,6 @@
package matchers
import (
- "github.com/onsi/gomega/internal/oraclematcher"
"github.com/onsi/gomega/types"
)
@@ -26,5 +25,5 @@ func (m *NotMatcher) NegatedFailureMessage(actual interface{}) (message string)
}
func (m *NotMatcher) MatchMayChangeInTheFuture(actual interface{}) bool {
- return oraclematcher.MatchMayChangeInTheFuture(m.Matcher, actual) // just return m.Matcher's value
+ return types.MatchMayChangeInTheFuture(m.Matcher, actual) // just return m.Matcher's value
}
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/matchers/or.go b/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/matchers/or.go
index 3bf7998001d..841ae26ab07 100644
--- a/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/matchers/or.go
+++ b/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/matchers/or.go
@@ -4,7 +4,6 @@ import (
"fmt"
"github.com/onsi/gomega/format"
- "github.com/onsi/gomega/internal/oraclematcher"
"github.com/onsi/gomega/types"
)
@@ -54,11 +53,11 @@ func (m *OrMatcher) MatchMayChangeInTheFuture(actual interface{}) bool {
if m.firstSuccessfulMatcher != nil {
// one of the matchers succeeded.. it must be able to change in order to affect the result
- return oraclematcher.MatchMayChangeInTheFuture(m.firstSuccessfulMatcher, actual)
+ return types.MatchMayChangeInTheFuture(m.firstSuccessfulMatcher, actual)
} else {
// so all matchers failed.. Any one of them changing would change the result.
for _, matcher := range m.Matchers {
- if oraclematcher.MatchMayChangeInTheFuture(matcher, actual) {
+ if types.MatchMayChangeInTheFuture(matcher, actual) {
return true
}
}
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/matchers/panic_matcher.go b/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/matchers/panic_matcher.go
index 640f4db1a35..adc8cee6306 100644
--- a/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/matchers/panic_matcher.go
+++ b/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/matchers/panic_matcher.go
@@ -8,7 +8,8 @@ import (
)
type PanicMatcher struct {
- object interface{}
+ Expected interface{}
+ object interface{}
}
func (matcher *PanicMatcher) Match(actual interface{}) (success bool, err error) {
@@ -28,7 +29,21 @@ func (matcher *PanicMatcher) Match(actual interface{}) (success bool, err error)
defer func() {
if e := recover(); e != nil {
matcher.object = e
- success = true
+
+ if matcher.Expected == nil {
+ success = true
+ return
+ }
+
+ valueMatcher, valueIsMatcher := matcher.Expected.(omegaMatcher)
+ if !valueIsMatcher {
+ valueMatcher = &EqualMatcher{Expected: matcher.Expected}
+ }
+
+ success, err = valueMatcher.Match(e)
+ if err != nil {
+ err = fmt.Errorf("PanicMatcher's value matcher failed with:\n%s%s", format.Indent, err.Error())
+ }
}
}()
@@ -38,9 +53,62 @@ func (matcher *PanicMatcher) Match(actual interface{}) (success bool, err error)
}
func (matcher *PanicMatcher) FailureMessage(actual interface{}) (message string) {
- return format.Message(actual, "to panic")
+ if matcher.Expected == nil {
+ // We wanted any panic to occur, but none did.
+ return format.Message(actual, "to panic")
+ }
+
+ if matcher.object == nil {
+ // We wanted a panic with a specific value to occur, but none did.
+ switch matcher.Expected.(type) {
+ case omegaMatcher:
+ return format.Message(actual, "to panic with a value matching", matcher.Expected)
+ default:
+ return format.Message(actual, "to panic with", matcher.Expected)
+ }
+ }
+
+ // We got a panic, but the value isn't what we expected.
+ switch matcher.Expected.(type) {
+ case omegaMatcher:
+ return format.Message(
+ actual,
+ fmt.Sprintf(
+ "to panic with a value matching\n%s\nbut panicked with\n%s",
+ format.Object(matcher.Expected, 1),
+ format.Object(matcher.object, 1),
+ ),
+ )
+ default:
+ return format.Message(
+ actual,
+ fmt.Sprintf(
+ "to panic with\n%s\nbut panicked with\n%s",
+ format.Object(matcher.Expected, 1),
+ format.Object(matcher.object, 1),
+ ),
+ )
+ }
}
func (matcher *PanicMatcher) NegatedFailureMessage(actual interface{}) (message string) {
- return format.Message(actual, fmt.Sprintf("not to panic, but panicked with\n%s", format.Object(matcher.object, 1)))
+ if matcher.Expected == nil {
+ // We didn't want any panic to occur, but one did.
+ return format.Message(actual, fmt.Sprintf("not to panic, but panicked with\n%s", format.Object(matcher.object, 1)))
+ }
+
+ // We wanted a to ensure a panic with a specific value did not occur, but it did.
+ switch matcher.Expected.(type) {
+ case omegaMatcher:
+ return format.Message(
+ actual,
+ fmt.Sprintf(
+ "not to panic with a value matching\n%s\nbut panicked with\n%s",
+ format.Object(matcher.Expected, 1),
+ format.Object(matcher.object, 1),
+ ),
+ )
+ default:
+ return format.Message(actual, "not to panic with", matcher.Expected)
+ }
}
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/matchers/receive_matcher.go b/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/matchers/receive_matcher.go
index 1936a2ba52f..948164eaf88 100644
--- a/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/matchers/receive_matcher.go
+++ b/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/matchers/receive_matcher.go
@@ -3,6 +3,7 @@
package matchers
import (
+ "errors"
"fmt"
"reflect"
@@ -10,7 +11,7 @@ import (
)
type ReceiveMatcher struct {
- Arg interface{}
+ Args []interface{}
receivedValue reflect.Value
channelClosed bool
}
@@ -29,15 +30,38 @@ func (matcher *ReceiveMatcher) Match(actual interface{}) (success bool, err erro
var subMatcher omegaMatcher
var hasSubMatcher bool
-
- if matcher.Arg != nil {
- subMatcher, hasSubMatcher = (matcher.Arg).(omegaMatcher)
+ var resultReference interface{}
+
+ // Valid arg formats are as follows, always with optional POINTER before
+ // optional MATCHER:
+ // - Receive()
+ // - Receive(POINTER)
+ // - Receive(MATCHER)
+ // - Receive(POINTER, MATCHER)
+ args := matcher.Args
+ if len(args) > 0 {
+ arg := args[0]
+ _, isSubMatcher := arg.(omegaMatcher)
+ if !isSubMatcher && reflect.ValueOf(arg).Kind() == reflect.Ptr {
+ // Consume optional POINTER arg first, if it ain't no matcher ;)
+ resultReference = arg
+ args = args[1:]
+ }
+ }
+ if len(args) > 0 {
+ arg := args[0]
+ subMatcher, hasSubMatcher = arg.(omegaMatcher)
if !hasSubMatcher {
- argType := reflect.TypeOf(matcher.Arg)
- if argType.Kind() != reflect.Ptr {
- return false, fmt.Errorf("Cannot assign a value from the channel:\n%s\nTo:\n%s\nYou need to pass a pointer!", format.Object(actual, 1), format.Object(matcher.Arg, 1))
- }
+ // At this point we assume the dev user wanted to assign a received
+ // value, so [POINTER,]MATCHER.
+ return false, fmt.Errorf("Cannot assign a value from the channel:\n%s\nTo:\n%s\nYou need to pass a pointer!", format.Object(actual, 1), format.Object(arg, 1))
}
+ // Consume optional MATCHER arg.
+ args = args[1:]
+ }
+ if len(args) > 0 {
+ // If there are still args present, reject all.
+ return false, errors.New("Receive matcher expects at most an optional pointer and/or an optional matcher")
}
winnerIndex, value, open := reflect.Select([]reflect.SelectCase{
@@ -58,16 +82,20 @@ func (matcher *ReceiveMatcher) Match(actual interface{}) (success bool, err erro
}
if hasSubMatcher {
- if didReceive {
- matcher.receivedValue = value
- return subMatcher.Match(matcher.receivedValue.Interface())
+ if !didReceive {
+ return false, nil
}
- return false, nil
+ matcher.receivedValue = value
+ if match, err := subMatcher.Match(matcher.receivedValue.Interface()); err != nil || !match {
+ return match, err
+ }
+ // if we received a match, then fall through in order to handle an
+ // optional assignment of the received value to the specified reference.
}
if didReceive {
- if matcher.Arg != nil {
- outValue := reflect.ValueOf(matcher.Arg)
+ if resultReference != nil {
+ outValue := reflect.ValueOf(resultReference)
if value.Type().AssignableTo(outValue.Elem().Type()) {
outValue.Elem().Set(value)
@@ -77,7 +105,7 @@ func (matcher *ReceiveMatcher) Match(actual interface{}) (success bool, err erro
outValue.Elem().Set(value.Elem())
return true, nil
} else {
- return false, fmt.Errorf("Cannot assign a value from the channel:\n%s\nType:\n%s\nTo:\n%s", format.Object(actual, 1), format.Object(value.Interface(), 1), format.Object(matcher.Arg, 1))
+ return false, fmt.Errorf("Cannot assign a value from the channel:\n%s\nType:\n%s\nTo:\n%s", format.Object(actual, 1), format.Object(value.Interface(), 1), format.Object(resultReference, 1))
}
}
@@ -88,7 +116,11 @@ func (matcher *ReceiveMatcher) Match(actual interface{}) (success bool, err erro
}
func (matcher *ReceiveMatcher) FailureMessage(actual interface{}) (message string) {
- subMatcher, hasSubMatcher := (matcher.Arg).(omegaMatcher)
+ var matcherArg interface{}
+ if len(matcher.Args) > 0 {
+ matcherArg = matcher.Args[len(matcher.Args)-1]
+ }
+ subMatcher, hasSubMatcher := (matcherArg).(omegaMatcher)
closedAddendum := ""
if matcher.channelClosed {
@@ -105,7 +137,11 @@ func (matcher *ReceiveMatcher) FailureMessage(actual interface{}) (message strin
}
func (matcher *ReceiveMatcher) NegatedFailureMessage(actual interface{}) (message string) {
- subMatcher, hasSubMatcher := (matcher.Arg).(omegaMatcher)
+ var matcherArg interface{}
+ if len(matcher.Args) > 0 {
+ matcherArg = matcher.Args[len(matcher.Args)-1]
+ }
+ subMatcher, hasSubMatcher := (matcherArg).(omegaMatcher)
closedAddendum := ""
if matcher.channelClosed {
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/matchers/satisfy_matcher.go b/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/matchers/satisfy_matcher.go
new file mode 100644
index 00000000000..ec68fe8b62c
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/matchers/satisfy_matcher.go
@@ -0,0 +1,66 @@
+package matchers
+
+import (
+ "fmt"
+ "reflect"
+
+ "github.com/onsi/gomega/format"
+)
+
+type SatisfyMatcher struct {
+ Predicate interface{}
+
+ // cached type
+ predicateArgType reflect.Type
+}
+
+func NewSatisfyMatcher(predicate interface{}) *SatisfyMatcher {
+ if predicate == nil {
+ panic("predicate cannot be nil")
+ }
+ predicateType := reflect.TypeOf(predicate)
+ if predicateType.Kind() != reflect.Func {
+ panic("predicate must be a function")
+ }
+ if predicateType.NumIn() != 1 {
+ panic("predicate must have 1 argument")
+ }
+ if predicateType.NumOut() != 1 || predicateType.Out(0).Kind() != reflect.Bool {
+ panic("predicate must return bool")
+ }
+
+ return &SatisfyMatcher{
+ Predicate: predicate,
+ predicateArgType: predicateType.In(0),
+ }
+}
+
+func (m *SatisfyMatcher) Match(actual interface{}) (success bool, err error) {
+ // prepare a parameter to pass to the predicate
+ var param reflect.Value
+ if actual != nil && reflect.TypeOf(actual).AssignableTo(m.predicateArgType) {
+ // The dynamic type of actual is compatible with the predicate argument.
+ param = reflect.ValueOf(actual)
+
+ } else if actual == nil && m.predicateArgType.Kind() == reflect.Interface {
+ // The dynamic type of actual is unknown, so there's no way to make its
+ // reflect.Value. Create a nil of the predicate argument, which is known.
+ param = reflect.Zero(m.predicateArgType)
+
+ } else {
+ return false, fmt.Errorf("predicate expects '%s' but we have '%T'", m.predicateArgType, actual)
+ }
+
+ // call the predicate with `actual`
+ fn := reflect.ValueOf(m.Predicate)
+ result := fn.Call([]reflect.Value{param})
+ return result[0].Bool(), nil
+}
+
+func (m *SatisfyMatcher) FailureMessage(actual interface{}) (message string) {
+ return format.Message(actual, "to satisfy predicate", m.Predicate)
+}
+
+func (m *SatisfyMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+ return format.Message(actual, "to not satisfy predicate", m.Predicate)
+}
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/matchers/succeed_matcher.go b/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/matchers/succeed_matcher.go
index 721ed5529bc..327350f7b71 100644
--- a/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/matchers/succeed_matcher.go
+++ b/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/matchers/succeed_matcher.go
@@ -1,11 +1,16 @@
package matchers
import (
+ "errors"
"fmt"
"github.com/onsi/gomega/format"
)
+type formattedGomegaError interface {
+ FormattedGomegaError() string
+}
+
type SucceedMatcher struct {
}
@@ -25,7 +30,11 @@ func (matcher *SucceedMatcher) Match(actual interface{}) (success bool, err erro
}
func (matcher *SucceedMatcher) FailureMessage(actual interface{}) (message string) {
- return fmt.Sprintf("Expected success, but got an error:\n%s\n%s", format.Object(actual, 1), format.IndentString(actual.(error).Error(), 1))
+ var fgErr formattedGomegaError
+ if errors.As(actual.(error), &fgErr) {
+ return fgErr.FormattedGomegaError()
+ }
+ return fmt.Sprintf("Expected success, but got an error:\n%s", format.Object(actual, 1))
}
func (matcher *SucceedMatcher) NegatedFailureMessage(actual interface{}) (message string) {
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/matchers/support/goraph/bipartitegraph/bipartitegraph.go b/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/matchers/support/goraph/bipartitegraph/bipartitegraph.go
index 108f285866b..830e308274e 100644
--- a/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/matchers/support/goraph/bipartitegraph/bipartitegraph.go
+++ b/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/matchers/support/goraph/bipartitegraph/bipartitegraph.go
@@ -13,13 +13,13 @@ type BipartiteGraph struct {
func NewBipartiteGraph(leftValues, rightValues []interface{}, neighbours func(interface{}, interface{}) (bool, error)) (*BipartiteGraph, error) {
left := NodeOrderedSet{}
- for i := range leftValues {
- left = append(left, Node{Id: i})
+ for i, v := range leftValues {
+ left = append(left, Node{ID: i, Value: v})
}
right := NodeOrderedSet{}
- for j := range rightValues {
- right = append(right, Node{Id: j + len(left)})
+ for j, v := range rightValues {
+ right = append(right, Node{ID: j + len(left), Value: v})
}
edges := EdgeSet{}
@@ -31,10 +31,26 @@ func NewBipartiteGraph(leftValues, rightValues []interface{}, neighbours func(in
}
if neighbours {
- edges = append(edges, Edge{Node1: left[i], Node2: right[j]})
+ edges = append(edges, Edge{Node1: left[i].ID, Node2: right[j].ID})
}
}
}
return &BipartiteGraph{left, right, edges}, nil
}
+
+// FreeLeftRight returns left node values and right node values
+// of the BipartiteGraph's nodes which are not part of the given edges.
+func (bg *BipartiteGraph) FreeLeftRight(edges EdgeSet) (leftValues, rightValues []interface{}) {
+ for _, node := range bg.Left {
+ if edges.Free(node) {
+ leftValues = append(leftValues, node.Value)
+ }
+ }
+ for _, node := range bg.Right {
+ if edges.Free(node) {
+ rightValues = append(rightValues, node.Value)
+ }
+ }
+ return
+}
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/matchers/support/goraph/bipartitegraph/bipartitegraphmatching.go b/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/matchers/support/goraph/bipartitegraph/bipartitegraphmatching.go
index 8181f43a40d..1c54edd8f17 100644
--- a/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/matchers/support/goraph/bipartitegraph/bipartitegraphmatching.go
+++ b/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/matchers/support/goraph/bipartitegraph/bipartitegraphmatching.go
@@ -1,9 +1,14 @@
package bipartitegraph
-import . "github.com/onsi/gomega/matchers/support/goraph/node"
-import . "github.com/onsi/gomega/matchers/support/goraph/edge"
-import "github.com/onsi/gomega/matchers/support/goraph/util"
-
+import (
+ . "github.com/onsi/gomega/matchers/support/goraph/edge"
+ . "github.com/onsi/gomega/matchers/support/goraph/node"
+ "github.com/onsi/gomega/matchers/support/goraph/util"
+)
+
+// LargestMatching implements the Hopcroft–Karp algorithm taking as input a bipartite graph
+// and outputting a maximum cardinality matching, i.e. a set of as many edges as possible
+// with the property that no two edges share an endpoint.
func (bg *BipartiteGraph) LargestMatching() (matching EdgeSet) {
paths := bg.maximalDisjointSLAPCollection(matching)
@@ -23,7 +28,7 @@ func (bg *BipartiteGraph) maximalDisjointSLAPCollection(matching EdgeSet) (resul
return
}
- used := make(map[Node]bool)
+ used := make(map[int]bool)
for _, u := range guideLayers[len(guideLayers)-1] {
slap, found := bg.findDisjointSLAP(u, matching, guideLayers, used)
@@ -43,7 +48,7 @@ func (bg *BipartiteGraph) findDisjointSLAP(
start Node,
matching EdgeSet,
guideLayers []NodeOrderedSet,
- used map[Node]bool,
+ used map[int]bool,
) ([]Edge, bool) {
return bg.findDisjointSLAPHelper(start, EdgeSet{}, len(guideLayers)-1, matching, guideLayers, used)
}
@@ -54,16 +59,16 @@ func (bg *BipartiteGraph) findDisjointSLAPHelper(
currentLevel int,
matching EdgeSet,
guideLayers []NodeOrderedSet,
- used map[Node]bool,
+ used map[int]bool,
) (EdgeSet, bool) {
- used[currentNode] = true
+ used[currentNode.ID] = true
if currentLevel == 0 {
return currentSLAP, true
}
for _, nextNode := range guideLayers[currentLevel-1] {
- if used[nextNode] {
+ if used[nextNode.ID] {
continue
}
@@ -84,17 +89,17 @@ func (bg *BipartiteGraph) findDisjointSLAPHelper(
currentSLAP = currentSLAP[:len(currentSLAP)-1]
}
- used[currentNode] = false
+ used[currentNode.ID] = false
return nil, false
}
func (bg *BipartiteGraph) createSLAPGuideLayers(matching EdgeSet) (guideLayers []NodeOrderedSet) {
- used := make(map[Node]bool)
+ used := make(map[int]bool)
currentLayer := NodeOrderedSet{}
for _, node := range bg.Left {
if matching.Free(node) {
- used[node] = true
+ used[node.ID] = true
currentLayer = append(currentLayer, node)
}
}
@@ -113,7 +118,7 @@ func (bg *BipartiteGraph) createSLAPGuideLayers(matching EdgeSet) (guideLayers [
if util.Odd(len(guideLayers)) {
for _, leftNode := range lastLayer {
for _, rightNode := range bg.Right {
- if used[rightNode] {
+ if used[rightNode.ID] {
continue
}
@@ -123,7 +128,7 @@ func (bg *BipartiteGraph) createSLAPGuideLayers(matching EdgeSet) (guideLayers [
}
currentLayer = append(currentLayer, rightNode)
- used[rightNode] = true
+ used[rightNode.ID] = true
if matching.Free(rightNode) {
done = true
@@ -133,7 +138,7 @@ func (bg *BipartiteGraph) createSLAPGuideLayers(matching EdgeSet) (guideLayers [
} else {
for _, rightNode := range lastLayer {
for _, leftNode := range bg.Left {
- if used[leftNode] {
+ if used[leftNode.ID] {
continue
}
@@ -143,7 +148,7 @@ func (bg *BipartiteGraph) createSLAPGuideLayers(matching EdgeSet) (guideLayers [
}
currentLayer = append(currentLayer, leftNode)
- used[leftNode] = true
+ used[leftNode.ID] = true
}
}
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/matchers/support/goraph/edge/edge.go b/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/matchers/support/goraph/edge/edge.go
index 4fd15cc0694..8c38411b283 100644
--- a/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/matchers/support/goraph/edge/edge.go
+++ b/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/matchers/support/goraph/edge/edge.go
@@ -3,15 +3,15 @@ package edge
import . "github.com/onsi/gomega/matchers/support/goraph/node"
type Edge struct {
- Node1 Node
- Node2 Node
+ Node1 int
+ Node2 int
}
type EdgeSet []Edge
func (ec EdgeSet) Free(node Node) bool {
for _, e := range ec {
- if e.Node1 == node || e.Node2 == node {
+ if e.Node1 == node.ID || e.Node2 == node.ID {
return false
}
}
@@ -31,7 +31,7 @@ func (ec EdgeSet) Contains(edge Edge) bool {
func (ec EdgeSet) FindByNodes(node1, node2 Node) (Edge, bool) {
for _, e := range ec {
- if (e.Node1 == node1 && e.Node2 == node2) || (e.Node1 == node2 && e.Node2 == node1) {
+ if (e.Node1 == node1.ID && e.Node2 == node2.ID) || (e.Node1 == node2.ID && e.Node2 == node1.ID) {
return e, true
}
}
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/matchers/support/goraph/node/node.go b/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/matchers/support/goraph/node/node.go
index 800c2ea8caf..cd597a2f220 100644
--- a/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/matchers/support/goraph/node/node.go
+++ b/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/matchers/support/goraph/node/node.go
@@ -1,7 +1,8 @@
package node
type Node struct {
- Id int
+ ID int
+ Value interface{}
}
type NodeOrderedSet []Node
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/matchers/with_transform.go b/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/matchers/with_transform.go
index 8e58d8a0fb7..6f743b1b32d 100644
--- a/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/matchers/with_transform.go
+++ b/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/matchers/with_transform.go
@@ -4,13 +4,12 @@ import (
"fmt"
"reflect"
- "github.com/onsi/gomega/internal/oraclematcher"
"github.com/onsi/gomega/types"
)
type WithTransformMatcher struct {
// input
- Transform interface{} // must be a function of one parameter that returns one value
+ Transform interface{} // must be a function of one parameter that returns one value and an optional error
Matcher types.GomegaMatcher
// cached value
@@ -20,6 +19,9 @@ type WithTransformMatcher struct {
transformedValue interface{}
}
+// reflect.Type for error
+var errorT = reflect.TypeOf((*error)(nil)).Elem()
+
func NewWithTransformMatcher(transform interface{}, matcher types.GomegaMatcher) *WithTransformMatcher {
if transform == nil {
panic("transform function cannot be nil")
@@ -28,8 +30,10 @@ func NewWithTransformMatcher(transform interface{}, matcher types.GomegaMatcher)
if txType.NumIn() != 1 {
panic("transform function must have 1 argument")
}
- if txType.NumOut() != 1 {
- panic("transform function must have 1 return value")
+ if numout := txType.NumOut(); numout != 1 {
+ if numout != 2 || !txType.Out(1).AssignableTo(errorT) {
+ panic("transform function must either have 1 return value, or 1 return value plus 1 error value")
+ }
}
return &WithTransformMatcher{
@@ -40,15 +44,29 @@ func NewWithTransformMatcher(transform interface{}, matcher types.GomegaMatcher)
}
func (m *WithTransformMatcher) Match(actual interface{}) (bool, error) {
- // return error if actual's type is incompatible with Transform function's argument type
- actualType := reflect.TypeOf(actual)
- if !actualType.AssignableTo(m.transformArgType) {
- return false, fmt.Errorf("Transform function expects '%s' but we have '%s'", m.transformArgType, actualType)
+ // prepare a parameter to pass to the Transform function
+ var param reflect.Value
+ if actual != nil && reflect.TypeOf(actual).AssignableTo(m.transformArgType) {
+ // The dynamic type of actual is compatible with the transform argument.
+ param = reflect.ValueOf(actual)
+
+ } else if actual == nil && m.transformArgType.Kind() == reflect.Interface {
+ // The dynamic type of actual is unknown, so there's no way to make its
+ // reflect.Value. Create a nil of the transform argument, which is known.
+ param = reflect.Zero(m.transformArgType)
+
+ } else {
+ return false, fmt.Errorf("Transform function expects '%s' but we have '%T'", m.transformArgType, actual)
}
// call the Transform function with `actual`
fn := reflect.ValueOf(m.Transform)
- result := fn.Call([]reflect.Value{reflect.ValueOf(actual)})
+ result := fn.Call([]reflect.Value{param})
+ if len(result) == 2 {
+ if !result[1].IsNil() {
+ return false, fmt.Errorf("Transform function failed: %s", result[1].Interface().(error).Error())
+ }
+ }
m.transformedValue = result[0].Interface() // expect exactly one value
return m.Matcher.Match(m.transformedValue)
@@ -68,5 +86,5 @@ func (m *WithTransformMatcher) MatchMayChangeInTheFuture(_ interface{}) bool {
// Querying the next matcher is fine if the transformer always will return the same value.
// But if the transformer is non-deterministic and returns a different value each time, then there
// is no point in querying the next matcher, since it can only comment on the last transformed value.
- return oraclematcher.MatchMayChangeInTheFuture(m.Matcher, m.transformedValue)
+ return types.MatchMayChangeInTheFuture(m.Matcher, m.transformedValue)
}
diff --git a/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/types/types.go b/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/types/types.go
index ac59a3a5a48..7c7adb9415d 100644
--- a/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/types/types.go
+++ b/integration/assets/hydrabroker/vendor/github.com/onsi/gomega/types/types.go
@@ -1,26 +1,93 @@
package types
-type TWithHelper interface {
- Helper()
-}
+import (
+ "context"
+ "time"
+)
type GomegaFailHandler func(message string, callerSkip ...int)
-type GomegaFailWrapper struct {
- Fail GomegaFailHandler
- TWithHelper TWithHelper
-}
-
-//A simple *testing.T interface wrapper
+// A simple *testing.T interface wrapper
type GomegaTestingT interface {
+ Helper()
Fatalf(format string, args ...interface{})
}
-//All Gomega matchers must implement the GomegaMatcher interface
+// Gomega represents an object that can perform synchronous and assynchronous assertions with Gomega matchers
+type Gomega interface {
+ Ω(actual interface{}, extra ...interface{}) Assertion
+ Expect(actual interface{}, extra ...interface{}) Assertion
+ ExpectWithOffset(offset int, actual interface{}, extra ...interface{}) Assertion
+
+ Eventually(actualOrCtx interface{}, args ...interface{}) AsyncAssertion
+ EventuallyWithOffset(offset int, actualOrCtx interface{}, args ...interface{}) AsyncAssertion
+
+ Consistently(actualOrCtx interface{}, args ...interface{}) AsyncAssertion
+ ConsistentlyWithOffset(offset int, actualOrCtx interface{}, args ...interface{}) AsyncAssertion
+
+ SetDefaultEventuallyTimeout(time.Duration)
+ SetDefaultEventuallyPollingInterval(time.Duration)
+ SetDefaultConsistentlyDuration(time.Duration)
+ SetDefaultConsistentlyPollingInterval(time.Duration)
+}
+
+// All Gomega matchers must implement the GomegaMatcher interface
//
-//For details on writing custom matchers, check out: http://onsi.github.io/gomega/#adding-your-own-matchers
+// For details on writing custom matchers, check out: http://onsi.github.io/gomega/#adding-your-own-matchers
type GomegaMatcher interface {
Match(actual interface{}) (success bool, err error)
FailureMessage(actual interface{}) (message string)
NegatedFailureMessage(actual interface{}) (message string)
}
+
+/*
+GomegaMatchers that also match the OracleMatcher interface can convey information about
+whether or not their result will change upon future attempts.
+
+This allows `Eventually` and `Consistently` to short circuit if success becomes impossible.
+
+For example, a process' exit code can never change. So, gexec's Exit matcher returns `true`
+for `MatchMayChangeInTheFuture` until the process exits, at which point it returns `false` forevermore.
+*/
+type OracleMatcher interface {
+ MatchMayChangeInTheFuture(actual interface{}) bool
+}
+
+func MatchMayChangeInTheFuture(matcher GomegaMatcher, value interface{}) bool {
+ oracleMatcher, ok := matcher.(OracleMatcher)
+ if !ok {
+ return true
+ }
+
+ return oracleMatcher.MatchMayChangeInTheFuture(value)
+}
+
+// AsyncAssertions are returned by Eventually and Consistently and enable matchers to be polled repeatedly to ensure
+// they are eventually satisfied
+type AsyncAssertion interface {
+ Should(matcher GomegaMatcher, optionalDescription ...interface{}) bool
+ ShouldNot(matcher GomegaMatcher, optionalDescription ...interface{}) bool
+
+ WithOffset(offset int) AsyncAssertion
+ WithTimeout(interval time.Duration) AsyncAssertion
+ WithPolling(interval time.Duration) AsyncAssertion
+ Within(timeout time.Duration) AsyncAssertion
+ ProbeEvery(interval time.Duration) AsyncAssertion
+ WithContext(ctx context.Context) AsyncAssertion
+ WithArguments(argsToForward ...interface{}) AsyncAssertion
+ MustPassRepeatedly(count int) AsyncAssertion
+}
+
+// Assertions are returned by Ω and Expect and enable assertions against Gomega matchers
+type Assertion interface {
+ Should(matcher GomegaMatcher, optionalDescription ...interface{}) bool
+ ShouldNot(matcher GomegaMatcher, optionalDescription ...interface{}) bool
+
+ To(matcher GomegaMatcher, optionalDescription ...interface{}) bool
+ ToNot(matcher GomegaMatcher, optionalDescription ...interface{}) bool
+ NotTo(matcher GomegaMatcher, optionalDescription ...interface{}) bool
+
+ WithOffset(offset int) Assertion
+
+ Error() Assertion
+}
diff --git a/integration/assets/hydrabroker/vendor/github.com/pivotal-cf/brokerapi/v7/domain/apiresponses/responses.go b/integration/assets/hydrabroker/vendor/github.com/pivotal-cf/brokerapi/v7/domain/apiresponses/responses.go
index 3b4655c568e..2c8c6c81833 100644
--- a/integration/assets/hydrabroker/vendor/github.com/pivotal-cf/brokerapi/v7/domain/apiresponses/responses.go
+++ b/integration/assets/hydrabroker/vendor/github.com/pivotal-cf/brokerapi/v7/domain/apiresponses/responses.go
@@ -29,8 +29,9 @@ type CatalogResponse struct {
}
type ProvisioningResponse struct {
- DashboardURL string `json:"dashboard_url,omitempty"`
- OperationData string `json:"operation,omitempty"`
+ DashboardURL string `json:"dashboard_url,omitempty"`
+ OperationData string `json:"operation,omitempty"`
+ Metadata interface{} `json:"metadata,omitempty"`
}
type GetInstanceResponse struct {
diff --git a/integration/assets/hydrabroker/vendor/github.com/pivotal-cf/brokerapi/v7/domain/service_broker.go b/integration/assets/hydrabroker/vendor/github.com/pivotal-cf/brokerapi/v7/domain/service_broker.go
index 9aff08e19a3..ad186ad3c36 100644
--- a/integration/assets/hydrabroker/vendor/github.com/pivotal-cf/brokerapi/v7/domain/service_broker.go
+++ b/integration/assets/hydrabroker/vendor/github.com/pivotal-cf/brokerapi/v7/domain/service_broker.go
@@ -94,6 +94,12 @@ type ProvisionedServiceSpec struct {
AlreadyExists bool
DashboardURL string
OperationData string
+ Metadata InstanceMetadata
+}
+
+type InstanceMetadata struct {
+ Labels map[string]string `json:"labels,omitempty"`
+ Attributes map[string]string `json:"attributes,omitempty"`
}
type DeprovisionDetails struct {
@@ -205,6 +211,10 @@ func (d BindDetails) GetRawParameters() json.RawMessage {
return d.RawParameters
}
+func (m InstanceMetadata) IsEmpty() bool {
+ return len(m.Attributes) == 0 && len(m.Labels) == 0
+}
+
func (d UpdateDetails) GetRawContext() json.RawMessage {
return d.RawContext
}
diff --git a/integration/assets/hydrabroker/vendor/github.com/pivotal-cf/brokerapi/v7/domain/service_catalog.go b/integration/assets/hydrabroker/vendor/github.com/pivotal-cf/brokerapi/v7/domain/service_catalog.go
index ad048f5ed1c..d36aa154597 100644
--- a/integration/assets/hydrabroker/vendor/github.com/pivotal-cf/brokerapi/v7/domain/service_catalog.go
+++ b/integration/assets/hydrabroker/vendor/github.com/pivotal-cf/brokerapi/v7/domain/service_catalog.go
@@ -21,17 +21,19 @@ type Service struct {
Requires []RequiredPermission `json:"requires,omitempty"`
Metadata *ServiceMetadata `json:"metadata,omitempty"`
DashboardClient *ServiceDashboardClient `json:"dashboard_client,omitempty"`
+ AllowContextUpdates bool `json:"allow_context_updates,omitempty"`
}
type ServicePlan struct {
- ID string `json:"id"`
- Name string `json:"name"`
- Description string `json:"description"`
- Free *bool `json:"free,omitempty"`
- Bindable *bool `json:"bindable,omitempty"`
- Metadata *ServicePlanMetadata `json:"metadata,omitempty"`
- Schemas *ServiceSchemas `json:"schemas,omitempty"`
- MaintenanceInfo *MaintenanceInfo `json:"maintenance_info,omitempty"`
+ ID string `json:"id"`
+ Name string `json:"name"`
+ Description string `json:"description"`
+ Free *bool `json:"free,omitempty"`
+ Bindable *bool `json:"bindable,omitempty"`
+ Metadata *ServicePlanMetadata `json:"metadata,omitempty"`
+ Schemas *ServiceSchemas `json:"schemas,omitempty"`
+ MaximumPollingDuration *int `json:"maximum_polling_duration,omitempty"`
+ MaintenanceInfo *MaintenanceInfo `json:"maintenance_info,omitempty"`
}
type ServiceSchemas struct {
diff --git a/integration/assets/hydrabroker/vendor/github.com/pkg/errors/go113.go b/integration/assets/hydrabroker/vendor/github.com/pkg/errors/go113.go
index 2c83c724532..be0d10d0c79 100644
--- a/integration/assets/hydrabroker/vendor/github.com/pkg/errors/go113.go
+++ b/integration/assets/hydrabroker/vendor/github.com/pkg/errors/go113.go
@@ -1,4 +1,3 @@
-//go:build go1.13
// +build go1.13
package errors
diff --git a/integration/assets/hydrabroker/vendor/gopkg.in/fsnotify.v1/LICENSE b/integration/assets/hydrabroker/vendor/golang.org/x/crypto/LICENSE
similarity index 92%
rename from integration/assets/hydrabroker/vendor/gopkg.in/fsnotify.v1/LICENSE
rename to integration/assets/hydrabroker/vendor/golang.org/x/crypto/LICENSE
index f21e5408009..6a66aea5eaf 100644
--- a/integration/assets/hydrabroker/vendor/gopkg.in/fsnotify.v1/LICENSE
+++ b/integration/assets/hydrabroker/vendor/golang.org/x/crypto/LICENSE
@@ -1,5 +1,4 @@
-Copyright (c) 2012 The Go Authors. All rights reserved.
-Copyright (c) 2012 fsnotify Authors. All rights reserved.
+Copyright (c) 2009 The Go Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
diff --git a/integration/assets/hydrabroker/vendor/golang.org/x/xerrors/PATENTS b/integration/assets/hydrabroker/vendor/golang.org/x/crypto/PATENTS
similarity index 100%
rename from integration/assets/hydrabroker/vendor/golang.org/x/xerrors/PATENTS
rename to integration/assets/hydrabroker/vendor/golang.org/x/crypto/PATENTS
diff --git a/integration/assets/hydrabroker/vendor/golang.org/x/crypto/sha3/doc.go b/integration/assets/hydrabroker/vendor/golang.org/x/crypto/sha3/doc.go
new file mode 100644
index 00000000000..decd8cf9bf7
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/golang.org/x/crypto/sha3/doc.go
@@ -0,0 +1,62 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package sha3 implements the SHA-3 fixed-output-length hash functions and
+// the SHAKE variable-output-length hash functions defined by FIPS-202.
+//
+// Both types of hash function use the "sponge" construction and the Keccak
+// permutation. For a detailed specification see http://keccak.noekeon.org/
+//
+// # Guidance
+//
+// If you aren't sure what function you need, use SHAKE256 with at least 64
+// bytes of output. The SHAKE instances are faster than the SHA3 instances;
+// the latter have to allocate memory to conform to the hash.Hash interface.
+//
+// If you need a secret-key MAC (message authentication code), prepend the
+// secret key to the input, hash with SHAKE256 and read at least 32 bytes of
+// output.
+//
+// # Security strengths
+//
+// The SHA3-x (x equals 224, 256, 384, or 512) functions have a security
+// strength against preimage attacks of x bits. Since they only produce "x"
+// bits of output, their collision-resistance is only "x/2" bits.
+//
+// The SHAKE-256 and -128 functions have a generic security strength of 256 and
+// 128 bits against all attacks, provided that at least 2x bits of their output
+// is used. Requesting more than 64 or 32 bytes of output, respectively, does
+// not increase the collision-resistance of the SHAKE functions.
+//
+// # The sponge construction
+//
+// A sponge builds a pseudo-random function from a public pseudo-random
+// permutation, by applying the permutation to a state of "rate + capacity"
+// bytes, but hiding "capacity" of the bytes.
+//
+// A sponge starts out with a zero state. To hash an input using a sponge, up
+// to "rate" bytes of the input are XORed into the sponge's state. The sponge
+// is then "full" and the permutation is applied to "empty" it. This process is
+// repeated until all the input has been "absorbed". The input is then padded.
+// The digest is "squeezed" from the sponge in the same way, except that output
+// is copied out instead of input being XORed in.
+//
+// A sponge is parameterized by its generic security strength, which is equal
+// to half its capacity; capacity + rate is equal to the permutation's width.
+// Since the KeccakF-1600 permutation is 1600 bits (200 bytes) wide, this means
+// that the security strength of a sponge instance is equal to (1600 - bitrate) / 2.
+//
+// # Recommendations
+//
+// The SHAKE functions are recommended for most new uses. They can produce
+// output of arbitrary length. SHAKE256, with an output length of at least
+// 64 bytes, provides 256-bit security against all attacks. The Keccak team
+// recommends it for most applications upgrading from SHA2-512. (NIST chose a
+// much stronger, but much slower, sponge instance for SHA3-512.)
+//
+// The SHA-3 functions are "drop-in" replacements for the SHA-2 functions.
+// They produce output of the same length, with the same security strengths
+// against all attacks. This means, in particular, that SHA3-256 only has
+// 128-bit collision resistance, because its output length is 32 bytes.
+package sha3 // import "golang.org/x/crypto/sha3"
diff --git a/integration/assets/hydrabroker/vendor/golang.org/x/crypto/sha3/hashes.go b/integration/assets/hydrabroker/vendor/golang.org/x/crypto/sha3/hashes.go
new file mode 100644
index 00000000000..0d8043fd2a1
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/golang.org/x/crypto/sha3/hashes.go
@@ -0,0 +1,97 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package sha3
+
+// This file provides functions for creating instances of the SHA-3
+// and SHAKE hash functions, as well as utility functions for hashing
+// bytes.
+
+import (
+ "hash"
+)
+
+// New224 creates a new SHA3-224 hash.
+// Its generic security strength is 224 bits against preimage attacks,
+// and 112 bits against collision attacks.
+func New224() hash.Hash {
+ if h := new224Asm(); h != nil {
+ return h
+ }
+ return &state{rate: 144, outputLen: 28, dsbyte: 0x06}
+}
+
+// New256 creates a new SHA3-256 hash.
+// Its generic security strength is 256 bits against preimage attacks,
+// and 128 bits against collision attacks.
+func New256() hash.Hash {
+ if h := new256Asm(); h != nil {
+ return h
+ }
+ return &state{rate: 136, outputLen: 32, dsbyte: 0x06}
+}
+
+// New384 creates a new SHA3-384 hash.
+// Its generic security strength is 384 bits against preimage attacks,
+// and 192 bits against collision attacks.
+func New384() hash.Hash {
+ if h := new384Asm(); h != nil {
+ return h
+ }
+ return &state{rate: 104, outputLen: 48, dsbyte: 0x06}
+}
+
+// New512 creates a new SHA3-512 hash.
+// Its generic security strength is 512 bits against preimage attacks,
+// and 256 bits against collision attacks.
+func New512() hash.Hash {
+ if h := new512Asm(); h != nil {
+ return h
+ }
+ return &state{rate: 72, outputLen: 64, dsbyte: 0x06}
+}
+
+// NewLegacyKeccak256 creates a new Keccak-256 hash.
+//
+// Only use this function if you require compatibility with an existing cryptosystem
+// that uses non-standard padding. All other users should use New256 instead.
+func NewLegacyKeccak256() hash.Hash { return &state{rate: 136, outputLen: 32, dsbyte: 0x01} }
+
+// NewLegacyKeccak512 creates a new Keccak-512 hash.
+//
+// Only use this function if you require compatibility with an existing cryptosystem
+// that uses non-standard padding. All other users should use New512 instead.
+func NewLegacyKeccak512() hash.Hash { return &state{rate: 72, outputLen: 64, dsbyte: 0x01} }
+
+// Sum224 returns the SHA3-224 digest of the data.
+func Sum224(data []byte) (digest [28]byte) {
+ h := New224()
+ h.Write(data)
+ h.Sum(digest[:0])
+ return
+}
+
+// Sum256 returns the SHA3-256 digest of the data.
+func Sum256(data []byte) (digest [32]byte) {
+ h := New256()
+ h.Write(data)
+ h.Sum(digest[:0])
+ return
+}
+
+// Sum384 returns the SHA3-384 digest of the data.
+func Sum384(data []byte) (digest [48]byte) {
+ h := New384()
+ h.Write(data)
+ h.Sum(digest[:0])
+ return
+}
+
+// Sum512 returns the SHA3-512 digest of the data.
+func Sum512(data []byte) (digest [64]byte) {
+ h := New512()
+ h.Write(data)
+ h.Sum(digest[:0])
+ return
+}
diff --git a/integration/assets/hydrabroker/vendor/golang.org/x/crypto/sha3/hashes_generic.go b/integration/assets/hydrabroker/vendor/golang.org/x/crypto/sha3/hashes_generic.go
new file mode 100644
index 00000000000..fe8c84793c0
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/golang.org/x/crypto/sha3/hashes_generic.go
@@ -0,0 +1,27 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !gc || purego || !s390x
+
+package sha3
+
+import (
+ "hash"
+)
+
+// new224Asm returns an assembly implementation of SHA3-224 if available,
+// otherwise it returns nil.
+func new224Asm() hash.Hash { return nil }
+
+// new256Asm returns an assembly implementation of SHA3-256 if available,
+// otherwise it returns nil.
+func new256Asm() hash.Hash { return nil }
+
+// new384Asm returns an assembly implementation of SHA3-384 if available,
+// otherwise it returns nil.
+func new384Asm() hash.Hash { return nil }
+
+// new512Asm returns an assembly implementation of SHA3-512 if available,
+// otherwise it returns nil.
+func new512Asm() hash.Hash { return nil }
diff --git a/integration/assets/hydrabroker/vendor/golang.org/x/crypto/sha3/keccakf.go b/integration/assets/hydrabroker/vendor/golang.org/x/crypto/sha3/keccakf.go
new file mode 100644
index 00000000000..ce48b1dd3ed
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/golang.org/x/crypto/sha3/keccakf.go
@@ -0,0 +1,414 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !amd64 || purego || !gc
+
+package sha3
+
+import "math/bits"
+
+// rc stores the round constants for use in the ι step.
+var rc = [24]uint64{
+ 0x0000000000000001,
+ 0x0000000000008082,
+ 0x800000000000808A,
+ 0x8000000080008000,
+ 0x000000000000808B,
+ 0x0000000080000001,
+ 0x8000000080008081,
+ 0x8000000000008009,
+ 0x000000000000008A,
+ 0x0000000000000088,
+ 0x0000000080008009,
+ 0x000000008000000A,
+ 0x000000008000808B,
+ 0x800000000000008B,
+ 0x8000000000008089,
+ 0x8000000000008003,
+ 0x8000000000008002,
+ 0x8000000000000080,
+ 0x000000000000800A,
+ 0x800000008000000A,
+ 0x8000000080008081,
+ 0x8000000000008080,
+ 0x0000000080000001,
+ 0x8000000080008008,
+}
+
+// keccakF1600 applies the Keccak permutation to a 1600b-wide
+// state represented as a slice of 25 uint64s.
+func keccakF1600(a *[25]uint64) {
+ // Implementation translated from Keccak-inplace.c
+ // in the keccak reference code.
+ var t, bc0, bc1, bc2, bc3, bc4, d0, d1, d2, d3, d4 uint64
+
+ for i := 0; i < 24; i += 4 {
+ // Combines the 5 steps in each round into 2 steps.
+ // Unrolls 4 rounds per loop and spreads some steps across rounds.
+
+ // Round 1
+ bc0 = a[0] ^ a[5] ^ a[10] ^ a[15] ^ a[20]
+ bc1 = a[1] ^ a[6] ^ a[11] ^ a[16] ^ a[21]
+ bc2 = a[2] ^ a[7] ^ a[12] ^ a[17] ^ a[22]
+ bc3 = a[3] ^ a[8] ^ a[13] ^ a[18] ^ a[23]
+ bc4 = a[4] ^ a[9] ^ a[14] ^ a[19] ^ a[24]
+ d0 = bc4 ^ (bc1<<1 | bc1>>63)
+ d1 = bc0 ^ (bc2<<1 | bc2>>63)
+ d2 = bc1 ^ (bc3<<1 | bc3>>63)
+ d3 = bc2 ^ (bc4<<1 | bc4>>63)
+ d4 = bc3 ^ (bc0<<1 | bc0>>63)
+
+ bc0 = a[0] ^ d0
+ t = a[6] ^ d1
+ bc1 = bits.RotateLeft64(t, 44)
+ t = a[12] ^ d2
+ bc2 = bits.RotateLeft64(t, 43)
+ t = a[18] ^ d3
+ bc3 = bits.RotateLeft64(t, 21)
+ t = a[24] ^ d4
+ bc4 = bits.RotateLeft64(t, 14)
+ a[0] = bc0 ^ (bc2 &^ bc1) ^ rc[i]
+ a[6] = bc1 ^ (bc3 &^ bc2)
+ a[12] = bc2 ^ (bc4 &^ bc3)
+ a[18] = bc3 ^ (bc0 &^ bc4)
+ a[24] = bc4 ^ (bc1 &^ bc0)
+
+ t = a[10] ^ d0
+ bc2 = bits.RotateLeft64(t, 3)
+ t = a[16] ^ d1
+ bc3 = bits.RotateLeft64(t, 45)
+ t = a[22] ^ d2
+ bc4 = bits.RotateLeft64(t, 61)
+ t = a[3] ^ d3
+ bc0 = bits.RotateLeft64(t, 28)
+ t = a[9] ^ d4
+ bc1 = bits.RotateLeft64(t, 20)
+ a[10] = bc0 ^ (bc2 &^ bc1)
+ a[16] = bc1 ^ (bc3 &^ bc2)
+ a[22] = bc2 ^ (bc4 &^ bc3)
+ a[3] = bc3 ^ (bc0 &^ bc4)
+ a[9] = bc4 ^ (bc1 &^ bc0)
+
+ t = a[20] ^ d0
+ bc4 = bits.RotateLeft64(t, 18)
+ t = a[1] ^ d1
+ bc0 = bits.RotateLeft64(t, 1)
+ t = a[7] ^ d2
+ bc1 = bits.RotateLeft64(t, 6)
+ t = a[13] ^ d3
+ bc2 = bits.RotateLeft64(t, 25)
+ t = a[19] ^ d4
+ bc3 = bits.RotateLeft64(t, 8)
+ a[20] = bc0 ^ (bc2 &^ bc1)
+ a[1] = bc1 ^ (bc3 &^ bc2)
+ a[7] = bc2 ^ (bc4 &^ bc3)
+ a[13] = bc3 ^ (bc0 &^ bc4)
+ a[19] = bc4 ^ (bc1 &^ bc0)
+
+ t = a[5] ^ d0
+ bc1 = bits.RotateLeft64(t, 36)
+ t = a[11] ^ d1
+ bc2 = bits.RotateLeft64(t, 10)
+ t = a[17] ^ d2
+ bc3 = bits.RotateLeft64(t, 15)
+ t = a[23] ^ d3
+ bc4 = bits.RotateLeft64(t, 56)
+ t = a[4] ^ d4
+ bc0 = bits.RotateLeft64(t, 27)
+ a[5] = bc0 ^ (bc2 &^ bc1)
+ a[11] = bc1 ^ (bc3 &^ bc2)
+ a[17] = bc2 ^ (bc4 &^ bc3)
+ a[23] = bc3 ^ (bc0 &^ bc4)
+ a[4] = bc4 ^ (bc1 &^ bc0)
+
+ t = a[15] ^ d0
+ bc3 = bits.RotateLeft64(t, 41)
+ t = a[21] ^ d1
+ bc4 = bits.RotateLeft64(t, 2)
+ t = a[2] ^ d2
+ bc0 = bits.RotateLeft64(t, 62)
+ t = a[8] ^ d3
+ bc1 = bits.RotateLeft64(t, 55)
+ t = a[14] ^ d4
+ bc2 = bits.RotateLeft64(t, 39)
+ a[15] = bc0 ^ (bc2 &^ bc1)
+ a[21] = bc1 ^ (bc3 &^ bc2)
+ a[2] = bc2 ^ (bc4 &^ bc3)
+ a[8] = bc3 ^ (bc0 &^ bc4)
+ a[14] = bc4 ^ (bc1 &^ bc0)
+
+ // Round 2
+ bc0 = a[0] ^ a[5] ^ a[10] ^ a[15] ^ a[20]
+ bc1 = a[1] ^ a[6] ^ a[11] ^ a[16] ^ a[21]
+ bc2 = a[2] ^ a[7] ^ a[12] ^ a[17] ^ a[22]
+ bc3 = a[3] ^ a[8] ^ a[13] ^ a[18] ^ a[23]
+ bc4 = a[4] ^ a[9] ^ a[14] ^ a[19] ^ a[24]
+ d0 = bc4 ^ (bc1<<1 | bc1>>63)
+ d1 = bc0 ^ (bc2<<1 | bc2>>63)
+ d2 = bc1 ^ (bc3<<1 | bc3>>63)
+ d3 = bc2 ^ (bc4<<1 | bc4>>63)
+ d4 = bc3 ^ (bc0<<1 | bc0>>63)
+
+ bc0 = a[0] ^ d0
+ t = a[16] ^ d1
+ bc1 = bits.RotateLeft64(t, 44)
+ t = a[7] ^ d2
+ bc2 = bits.RotateLeft64(t, 43)
+ t = a[23] ^ d3
+ bc3 = bits.RotateLeft64(t, 21)
+ t = a[14] ^ d4
+ bc4 = bits.RotateLeft64(t, 14)
+ a[0] = bc0 ^ (bc2 &^ bc1) ^ rc[i+1]
+ a[16] = bc1 ^ (bc3 &^ bc2)
+ a[7] = bc2 ^ (bc4 &^ bc3)
+ a[23] = bc3 ^ (bc0 &^ bc4)
+ a[14] = bc4 ^ (bc1 &^ bc0)
+
+ t = a[20] ^ d0
+ bc2 = bits.RotateLeft64(t, 3)
+ t = a[11] ^ d1
+ bc3 = bits.RotateLeft64(t, 45)
+ t = a[2] ^ d2
+ bc4 = bits.RotateLeft64(t, 61)
+ t = a[18] ^ d3
+ bc0 = bits.RotateLeft64(t, 28)
+ t = a[9] ^ d4
+ bc1 = bits.RotateLeft64(t, 20)
+ a[20] = bc0 ^ (bc2 &^ bc1)
+ a[11] = bc1 ^ (bc3 &^ bc2)
+ a[2] = bc2 ^ (bc4 &^ bc3)
+ a[18] = bc3 ^ (bc0 &^ bc4)
+ a[9] = bc4 ^ (bc1 &^ bc0)
+
+ t = a[15] ^ d0
+ bc4 = bits.RotateLeft64(t, 18)
+ t = a[6] ^ d1
+ bc0 = bits.RotateLeft64(t, 1)
+ t = a[22] ^ d2
+ bc1 = bits.RotateLeft64(t, 6)
+ t = a[13] ^ d3
+ bc2 = bits.RotateLeft64(t, 25)
+ t = a[4] ^ d4
+ bc3 = bits.RotateLeft64(t, 8)
+ a[15] = bc0 ^ (bc2 &^ bc1)
+ a[6] = bc1 ^ (bc3 &^ bc2)
+ a[22] = bc2 ^ (bc4 &^ bc3)
+ a[13] = bc3 ^ (bc0 &^ bc4)
+ a[4] = bc4 ^ (bc1 &^ bc0)
+
+ t = a[10] ^ d0
+ bc1 = bits.RotateLeft64(t, 36)
+ t = a[1] ^ d1
+ bc2 = bits.RotateLeft64(t, 10)
+ t = a[17] ^ d2
+ bc3 = bits.RotateLeft64(t, 15)
+ t = a[8] ^ d3
+ bc4 = bits.RotateLeft64(t, 56)
+ t = a[24] ^ d4
+ bc0 = bits.RotateLeft64(t, 27)
+ a[10] = bc0 ^ (bc2 &^ bc1)
+ a[1] = bc1 ^ (bc3 &^ bc2)
+ a[17] = bc2 ^ (bc4 &^ bc3)
+ a[8] = bc3 ^ (bc0 &^ bc4)
+ a[24] = bc4 ^ (bc1 &^ bc0)
+
+ t = a[5] ^ d0
+ bc3 = bits.RotateLeft64(t, 41)
+ t = a[21] ^ d1
+ bc4 = bits.RotateLeft64(t, 2)
+ t = a[12] ^ d2
+ bc0 = bits.RotateLeft64(t, 62)
+ t = a[3] ^ d3
+ bc1 = bits.RotateLeft64(t, 55)
+ t = a[19] ^ d4
+ bc2 = bits.RotateLeft64(t, 39)
+ a[5] = bc0 ^ (bc2 &^ bc1)
+ a[21] = bc1 ^ (bc3 &^ bc2)
+ a[12] = bc2 ^ (bc4 &^ bc3)
+ a[3] = bc3 ^ (bc0 &^ bc4)
+ a[19] = bc4 ^ (bc1 &^ bc0)
+
+ // Round 3
+ bc0 = a[0] ^ a[5] ^ a[10] ^ a[15] ^ a[20]
+ bc1 = a[1] ^ a[6] ^ a[11] ^ a[16] ^ a[21]
+ bc2 = a[2] ^ a[7] ^ a[12] ^ a[17] ^ a[22]
+ bc3 = a[3] ^ a[8] ^ a[13] ^ a[18] ^ a[23]
+ bc4 = a[4] ^ a[9] ^ a[14] ^ a[19] ^ a[24]
+ d0 = bc4 ^ (bc1<<1 | bc1>>63)
+ d1 = bc0 ^ (bc2<<1 | bc2>>63)
+ d2 = bc1 ^ (bc3<<1 | bc3>>63)
+ d3 = bc2 ^ (bc4<<1 | bc4>>63)
+ d4 = bc3 ^ (bc0<<1 | bc0>>63)
+
+ bc0 = a[0] ^ d0
+ t = a[11] ^ d1
+ bc1 = bits.RotateLeft64(t, 44)
+ t = a[22] ^ d2
+ bc2 = bits.RotateLeft64(t, 43)
+ t = a[8] ^ d3
+ bc3 = bits.RotateLeft64(t, 21)
+ t = a[19] ^ d4
+ bc4 = bits.RotateLeft64(t, 14)
+ a[0] = bc0 ^ (bc2 &^ bc1) ^ rc[i+2]
+ a[11] = bc1 ^ (bc3 &^ bc2)
+ a[22] = bc2 ^ (bc4 &^ bc3)
+ a[8] = bc3 ^ (bc0 &^ bc4)
+ a[19] = bc4 ^ (bc1 &^ bc0)
+
+ t = a[15] ^ d0
+ bc2 = bits.RotateLeft64(t, 3)
+ t = a[1] ^ d1
+ bc3 = bits.RotateLeft64(t, 45)
+ t = a[12] ^ d2
+ bc4 = bits.RotateLeft64(t, 61)
+ t = a[23] ^ d3
+ bc0 = bits.RotateLeft64(t, 28)
+ t = a[9] ^ d4
+ bc1 = bits.RotateLeft64(t, 20)
+ a[15] = bc0 ^ (bc2 &^ bc1)
+ a[1] = bc1 ^ (bc3 &^ bc2)
+ a[12] = bc2 ^ (bc4 &^ bc3)
+ a[23] = bc3 ^ (bc0 &^ bc4)
+ a[9] = bc4 ^ (bc1 &^ bc0)
+
+ t = a[5] ^ d0
+ bc4 = bits.RotateLeft64(t, 18)
+ t = a[16] ^ d1
+ bc0 = bits.RotateLeft64(t, 1)
+ t = a[2] ^ d2
+ bc1 = bits.RotateLeft64(t, 6)
+ t = a[13] ^ d3
+ bc2 = bits.RotateLeft64(t, 25)
+ t = a[24] ^ d4
+ bc3 = bits.RotateLeft64(t, 8)
+ a[5] = bc0 ^ (bc2 &^ bc1)
+ a[16] = bc1 ^ (bc3 &^ bc2)
+ a[2] = bc2 ^ (bc4 &^ bc3)
+ a[13] = bc3 ^ (bc0 &^ bc4)
+ a[24] = bc4 ^ (bc1 &^ bc0)
+
+ t = a[20] ^ d0
+ bc1 = bits.RotateLeft64(t, 36)
+ t = a[6] ^ d1
+ bc2 = bits.RotateLeft64(t, 10)
+ t = a[17] ^ d2
+ bc3 = bits.RotateLeft64(t, 15)
+ t = a[3] ^ d3
+ bc4 = bits.RotateLeft64(t, 56)
+ t = a[14] ^ d4
+ bc0 = bits.RotateLeft64(t, 27)
+ a[20] = bc0 ^ (bc2 &^ bc1)
+ a[6] = bc1 ^ (bc3 &^ bc2)
+ a[17] = bc2 ^ (bc4 &^ bc3)
+ a[3] = bc3 ^ (bc0 &^ bc4)
+ a[14] = bc4 ^ (bc1 &^ bc0)
+
+ t = a[10] ^ d0
+ bc3 = bits.RotateLeft64(t, 41)
+ t = a[21] ^ d1
+ bc4 = bits.RotateLeft64(t, 2)
+ t = a[7] ^ d2
+ bc0 = bits.RotateLeft64(t, 62)
+ t = a[18] ^ d3
+ bc1 = bits.RotateLeft64(t, 55)
+ t = a[4] ^ d4
+ bc2 = bits.RotateLeft64(t, 39)
+ a[10] = bc0 ^ (bc2 &^ bc1)
+ a[21] = bc1 ^ (bc3 &^ bc2)
+ a[7] = bc2 ^ (bc4 &^ bc3)
+ a[18] = bc3 ^ (bc0 &^ bc4)
+ a[4] = bc4 ^ (bc1 &^ bc0)
+
+ // Round 4
+ bc0 = a[0] ^ a[5] ^ a[10] ^ a[15] ^ a[20]
+ bc1 = a[1] ^ a[6] ^ a[11] ^ a[16] ^ a[21]
+ bc2 = a[2] ^ a[7] ^ a[12] ^ a[17] ^ a[22]
+ bc3 = a[3] ^ a[8] ^ a[13] ^ a[18] ^ a[23]
+ bc4 = a[4] ^ a[9] ^ a[14] ^ a[19] ^ a[24]
+ d0 = bc4 ^ (bc1<<1 | bc1>>63)
+ d1 = bc0 ^ (bc2<<1 | bc2>>63)
+ d2 = bc1 ^ (bc3<<1 | bc3>>63)
+ d3 = bc2 ^ (bc4<<1 | bc4>>63)
+ d4 = bc3 ^ (bc0<<1 | bc0>>63)
+
+ bc0 = a[0] ^ d0
+ t = a[1] ^ d1
+ bc1 = bits.RotateLeft64(t, 44)
+ t = a[2] ^ d2
+ bc2 = bits.RotateLeft64(t, 43)
+ t = a[3] ^ d3
+ bc3 = bits.RotateLeft64(t, 21)
+ t = a[4] ^ d4
+ bc4 = bits.RotateLeft64(t, 14)
+ a[0] = bc0 ^ (bc2 &^ bc1) ^ rc[i+3]
+ a[1] = bc1 ^ (bc3 &^ bc2)
+ a[2] = bc2 ^ (bc4 &^ bc3)
+ a[3] = bc3 ^ (bc0 &^ bc4)
+ a[4] = bc4 ^ (bc1 &^ bc0)
+
+ t = a[5] ^ d0
+ bc2 = bits.RotateLeft64(t, 3)
+ t = a[6] ^ d1
+ bc3 = bits.RotateLeft64(t, 45)
+ t = a[7] ^ d2
+ bc4 = bits.RotateLeft64(t, 61)
+ t = a[8] ^ d3
+ bc0 = bits.RotateLeft64(t, 28)
+ t = a[9] ^ d4
+ bc1 = bits.RotateLeft64(t, 20)
+ a[5] = bc0 ^ (bc2 &^ bc1)
+ a[6] = bc1 ^ (bc3 &^ bc2)
+ a[7] = bc2 ^ (bc4 &^ bc3)
+ a[8] = bc3 ^ (bc0 &^ bc4)
+ a[9] = bc4 ^ (bc1 &^ bc0)
+
+ t = a[10] ^ d0
+ bc4 = bits.RotateLeft64(t, 18)
+ t = a[11] ^ d1
+ bc0 = bits.RotateLeft64(t, 1)
+ t = a[12] ^ d2
+ bc1 = bits.RotateLeft64(t, 6)
+ t = a[13] ^ d3
+ bc2 = bits.RotateLeft64(t, 25)
+ t = a[14] ^ d4
+ bc3 = bits.RotateLeft64(t, 8)
+ a[10] = bc0 ^ (bc2 &^ bc1)
+ a[11] = bc1 ^ (bc3 &^ bc2)
+ a[12] = bc2 ^ (bc4 &^ bc3)
+ a[13] = bc3 ^ (bc0 &^ bc4)
+ a[14] = bc4 ^ (bc1 &^ bc0)
+
+ t = a[15] ^ d0
+ bc1 = bits.RotateLeft64(t, 36)
+ t = a[16] ^ d1
+ bc2 = bits.RotateLeft64(t, 10)
+ t = a[17] ^ d2
+ bc3 = bits.RotateLeft64(t, 15)
+ t = a[18] ^ d3
+ bc4 = bits.RotateLeft64(t, 56)
+ t = a[19] ^ d4
+ bc0 = bits.RotateLeft64(t, 27)
+ a[15] = bc0 ^ (bc2 &^ bc1)
+ a[16] = bc1 ^ (bc3 &^ bc2)
+ a[17] = bc2 ^ (bc4 &^ bc3)
+ a[18] = bc3 ^ (bc0 &^ bc4)
+ a[19] = bc4 ^ (bc1 &^ bc0)
+
+ t = a[20] ^ d0
+ bc3 = bits.RotateLeft64(t, 41)
+ t = a[21] ^ d1
+ bc4 = bits.RotateLeft64(t, 2)
+ t = a[22] ^ d2
+ bc0 = bits.RotateLeft64(t, 62)
+ t = a[23] ^ d3
+ bc1 = bits.RotateLeft64(t, 55)
+ t = a[24] ^ d4
+ bc2 = bits.RotateLeft64(t, 39)
+ a[20] = bc0 ^ (bc2 &^ bc1)
+ a[21] = bc1 ^ (bc3 &^ bc2)
+ a[22] = bc2 ^ (bc4 &^ bc3)
+ a[23] = bc3 ^ (bc0 &^ bc4)
+ a[24] = bc4 ^ (bc1 &^ bc0)
+ }
+}
diff --git a/integration/assets/hydrabroker/vendor/golang.org/x/crypto/sha3/keccakf_amd64.go b/integration/assets/hydrabroker/vendor/golang.org/x/crypto/sha3/keccakf_amd64.go
new file mode 100644
index 00000000000..b908696be58
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/golang.org/x/crypto/sha3/keccakf_amd64.go
@@ -0,0 +1,13 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build amd64 && !purego && gc
+
+package sha3
+
+// This function is implemented in keccakf_amd64.s.
+
+//go:noescape
+
+func keccakF1600(a *[25]uint64)
diff --git a/integration/assets/hydrabroker/vendor/golang.org/x/crypto/sha3/keccakf_amd64.s b/integration/assets/hydrabroker/vendor/golang.org/x/crypto/sha3/keccakf_amd64.s
new file mode 100644
index 00000000000..1f539388619
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/golang.org/x/crypto/sha3/keccakf_amd64.s
@@ -0,0 +1,390 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build amd64 && !purego && gc
+
+// This code was translated into a form compatible with 6a from the public
+// domain sources at https://github.com/gvanas/KeccakCodePackage
+
+// Offsets in state
+#define _ba (0*8)
+#define _be (1*8)
+#define _bi (2*8)
+#define _bo (3*8)
+#define _bu (4*8)
+#define _ga (5*8)
+#define _ge (6*8)
+#define _gi (7*8)
+#define _go (8*8)
+#define _gu (9*8)
+#define _ka (10*8)
+#define _ke (11*8)
+#define _ki (12*8)
+#define _ko (13*8)
+#define _ku (14*8)
+#define _ma (15*8)
+#define _me (16*8)
+#define _mi (17*8)
+#define _mo (18*8)
+#define _mu (19*8)
+#define _sa (20*8)
+#define _se (21*8)
+#define _si (22*8)
+#define _so (23*8)
+#define _su (24*8)
+
+// Temporary registers
+#define rT1 AX
+
+// Round vars
+#define rpState DI
+#define rpStack SP
+
+#define rDa BX
+#define rDe CX
+#define rDi DX
+#define rDo R8
+#define rDu R9
+
+#define rBa R10
+#define rBe R11
+#define rBi R12
+#define rBo R13
+#define rBu R14
+
+#define rCa SI
+#define rCe BP
+#define rCi rBi
+#define rCo rBo
+#define rCu R15
+
+#define MOVQ_RBI_RCE MOVQ rBi, rCe
+#define XORQ_RT1_RCA XORQ rT1, rCa
+#define XORQ_RT1_RCE XORQ rT1, rCe
+#define XORQ_RBA_RCU XORQ rBa, rCu
+#define XORQ_RBE_RCU XORQ rBe, rCu
+#define XORQ_RDU_RCU XORQ rDu, rCu
+#define XORQ_RDA_RCA XORQ rDa, rCa
+#define XORQ_RDE_RCE XORQ rDe, rCe
+
+#define mKeccakRound(iState, oState, rc, B_RBI_RCE, G_RT1_RCA, G_RT1_RCE, G_RBA_RCU, K_RT1_RCA, K_RT1_RCE, K_RBA_RCU, M_RT1_RCA, M_RT1_RCE, M_RBE_RCU, S_RDU_RCU, S_RDA_RCA, S_RDE_RCE) \
+ /* Prepare round */ \
+ MOVQ rCe, rDa; \
+ ROLQ $1, rDa; \
+ \
+ MOVQ _bi(iState), rCi; \
+ XORQ _gi(iState), rDi; \
+ XORQ rCu, rDa; \
+ XORQ _ki(iState), rCi; \
+ XORQ _mi(iState), rDi; \
+ XORQ rDi, rCi; \
+ \
+ MOVQ rCi, rDe; \
+ ROLQ $1, rDe; \
+ \
+ MOVQ _bo(iState), rCo; \
+ XORQ _go(iState), rDo; \
+ XORQ rCa, rDe; \
+ XORQ _ko(iState), rCo; \
+ XORQ _mo(iState), rDo; \
+ XORQ rDo, rCo; \
+ \
+ MOVQ rCo, rDi; \
+ ROLQ $1, rDi; \
+ \
+ MOVQ rCu, rDo; \
+ XORQ rCe, rDi; \
+ ROLQ $1, rDo; \
+ \
+ MOVQ rCa, rDu; \
+ XORQ rCi, rDo; \
+ ROLQ $1, rDu; \
+ \
+ /* Result b */ \
+ MOVQ _ba(iState), rBa; \
+ MOVQ _ge(iState), rBe; \
+ XORQ rCo, rDu; \
+ MOVQ _ki(iState), rBi; \
+ MOVQ _mo(iState), rBo; \
+ MOVQ _su(iState), rBu; \
+ XORQ rDe, rBe; \
+ ROLQ $44, rBe; \
+ XORQ rDi, rBi; \
+ XORQ rDa, rBa; \
+ ROLQ $43, rBi; \
+ \
+ MOVQ rBe, rCa; \
+ MOVQ rc, rT1; \
+ ORQ rBi, rCa; \
+ XORQ rBa, rT1; \
+ XORQ rT1, rCa; \
+ MOVQ rCa, _ba(oState); \
+ \
+ XORQ rDu, rBu; \
+ ROLQ $14, rBu; \
+ MOVQ rBa, rCu; \
+ ANDQ rBe, rCu; \
+ XORQ rBu, rCu; \
+ MOVQ rCu, _bu(oState); \
+ \
+ XORQ rDo, rBo; \
+ ROLQ $21, rBo; \
+ MOVQ rBo, rT1; \
+ ANDQ rBu, rT1; \
+ XORQ rBi, rT1; \
+ MOVQ rT1, _bi(oState); \
+ \
+ NOTQ rBi; \
+ ORQ rBa, rBu; \
+ ORQ rBo, rBi; \
+ XORQ rBo, rBu; \
+ XORQ rBe, rBi; \
+ MOVQ rBu, _bo(oState); \
+ MOVQ rBi, _be(oState); \
+ B_RBI_RCE; \
+ \
+ /* Result g */ \
+ MOVQ _gu(iState), rBe; \
+ XORQ rDu, rBe; \
+ MOVQ _ka(iState), rBi; \
+ ROLQ $20, rBe; \
+ XORQ rDa, rBi; \
+ ROLQ $3, rBi; \
+ MOVQ _bo(iState), rBa; \
+ MOVQ rBe, rT1; \
+ ORQ rBi, rT1; \
+ XORQ rDo, rBa; \
+ MOVQ _me(iState), rBo; \
+ MOVQ _si(iState), rBu; \
+ ROLQ $28, rBa; \
+ XORQ rBa, rT1; \
+ MOVQ rT1, _ga(oState); \
+ G_RT1_RCA; \
+ \
+ XORQ rDe, rBo; \
+ ROLQ $45, rBo; \
+ MOVQ rBi, rT1; \
+ ANDQ rBo, rT1; \
+ XORQ rBe, rT1; \
+ MOVQ rT1, _ge(oState); \
+ G_RT1_RCE; \
+ \
+ XORQ rDi, rBu; \
+ ROLQ $61, rBu; \
+ MOVQ rBu, rT1; \
+ ORQ rBa, rT1; \
+ XORQ rBo, rT1; \
+ MOVQ rT1, _go(oState); \
+ \
+ ANDQ rBe, rBa; \
+ XORQ rBu, rBa; \
+ MOVQ rBa, _gu(oState); \
+ NOTQ rBu; \
+ G_RBA_RCU; \
+ \
+ ORQ rBu, rBo; \
+ XORQ rBi, rBo; \
+ MOVQ rBo, _gi(oState); \
+ \
+ /* Result k */ \
+ MOVQ _be(iState), rBa; \
+ MOVQ _gi(iState), rBe; \
+ MOVQ _ko(iState), rBi; \
+ MOVQ _mu(iState), rBo; \
+ MOVQ _sa(iState), rBu; \
+ XORQ rDi, rBe; \
+ ROLQ $6, rBe; \
+ XORQ rDo, rBi; \
+ ROLQ $25, rBi; \
+ MOVQ rBe, rT1; \
+ ORQ rBi, rT1; \
+ XORQ rDe, rBa; \
+ ROLQ $1, rBa; \
+ XORQ rBa, rT1; \
+ MOVQ rT1, _ka(oState); \
+ K_RT1_RCA; \
+ \
+ XORQ rDu, rBo; \
+ ROLQ $8, rBo; \
+ MOVQ rBi, rT1; \
+ ANDQ rBo, rT1; \
+ XORQ rBe, rT1; \
+ MOVQ rT1, _ke(oState); \
+ K_RT1_RCE; \
+ \
+ XORQ rDa, rBu; \
+ ROLQ $18, rBu; \
+ NOTQ rBo; \
+ MOVQ rBo, rT1; \
+ ANDQ rBu, rT1; \
+ XORQ rBi, rT1; \
+ MOVQ rT1, _ki(oState); \
+ \
+ MOVQ rBu, rT1; \
+ ORQ rBa, rT1; \
+ XORQ rBo, rT1; \
+ MOVQ rT1, _ko(oState); \
+ \
+ ANDQ rBe, rBa; \
+ XORQ rBu, rBa; \
+ MOVQ rBa, _ku(oState); \
+ K_RBA_RCU; \
+ \
+ /* Result m */ \
+ MOVQ _ga(iState), rBe; \
+ XORQ rDa, rBe; \
+ MOVQ _ke(iState), rBi; \
+ ROLQ $36, rBe; \
+ XORQ rDe, rBi; \
+ MOVQ _bu(iState), rBa; \
+ ROLQ $10, rBi; \
+ MOVQ rBe, rT1; \
+ MOVQ _mi(iState), rBo; \
+ ANDQ rBi, rT1; \
+ XORQ rDu, rBa; \
+ MOVQ _so(iState), rBu; \
+ ROLQ $27, rBa; \
+ XORQ rBa, rT1; \
+ MOVQ rT1, _ma(oState); \
+ M_RT1_RCA; \
+ \
+ XORQ rDi, rBo; \
+ ROLQ $15, rBo; \
+ MOVQ rBi, rT1; \
+ ORQ rBo, rT1; \
+ XORQ rBe, rT1; \
+ MOVQ rT1, _me(oState); \
+ M_RT1_RCE; \
+ \
+ XORQ rDo, rBu; \
+ ROLQ $56, rBu; \
+ NOTQ rBo; \
+ MOVQ rBo, rT1; \
+ ORQ rBu, rT1; \
+ XORQ rBi, rT1; \
+ MOVQ rT1, _mi(oState); \
+ \
+ ORQ rBa, rBe; \
+ XORQ rBu, rBe; \
+ MOVQ rBe, _mu(oState); \
+ \
+ ANDQ rBa, rBu; \
+ XORQ rBo, rBu; \
+ MOVQ rBu, _mo(oState); \
+ M_RBE_RCU; \
+ \
+ /* Result s */ \
+ MOVQ _bi(iState), rBa; \
+ MOVQ _go(iState), rBe; \
+ MOVQ _ku(iState), rBi; \
+ XORQ rDi, rBa; \
+ MOVQ _ma(iState), rBo; \
+ ROLQ $62, rBa; \
+ XORQ rDo, rBe; \
+ MOVQ _se(iState), rBu; \
+ ROLQ $55, rBe; \
+ \
+ XORQ rDu, rBi; \
+ MOVQ rBa, rDu; \
+ XORQ rDe, rBu; \
+ ROLQ $2, rBu; \
+ ANDQ rBe, rDu; \
+ XORQ rBu, rDu; \
+ MOVQ rDu, _su(oState); \
+ \
+ ROLQ $39, rBi; \
+ S_RDU_RCU; \
+ NOTQ rBe; \
+ XORQ rDa, rBo; \
+ MOVQ rBe, rDa; \
+ ANDQ rBi, rDa; \
+ XORQ rBa, rDa; \
+ MOVQ rDa, _sa(oState); \
+ S_RDA_RCA; \
+ \
+ ROLQ $41, rBo; \
+ MOVQ rBi, rDe; \
+ ORQ rBo, rDe; \
+ XORQ rBe, rDe; \
+ MOVQ rDe, _se(oState); \
+ S_RDE_RCE; \
+ \
+ MOVQ rBo, rDi; \
+ MOVQ rBu, rDo; \
+ ANDQ rBu, rDi; \
+ ORQ rBa, rDo; \
+ XORQ rBi, rDi; \
+ XORQ rBo, rDo; \
+ MOVQ rDi, _si(oState); \
+ MOVQ rDo, _so(oState) \
+
+// func keccakF1600(a *[25]uint64)
+TEXT ·keccakF1600(SB), 0, $200-8
+ MOVQ a+0(FP), rpState
+
+ // Convert the user state into an internal state
+ NOTQ _be(rpState)
+ NOTQ _bi(rpState)
+ NOTQ _go(rpState)
+ NOTQ _ki(rpState)
+ NOTQ _mi(rpState)
+ NOTQ _sa(rpState)
+
+ // Execute the KeccakF permutation
+ MOVQ _ba(rpState), rCa
+ MOVQ _be(rpState), rCe
+ MOVQ _bu(rpState), rCu
+
+ XORQ _ga(rpState), rCa
+ XORQ _ge(rpState), rCe
+ XORQ _gu(rpState), rCu
+
+ XORQ _ka(rpState), rCa
+ XORQ _ke(rpState), rCe
+ XORQ _ku(rpState), rCu
+
+ XORQ _ma(rpState), rCa
+ XORQ _me(rpState), rCe
+ XORQ _mu(rpState), rCu
+
+ XORQ _sa(rpState), rCa
+ XORQ _se(rpState), rCe
+ MOVQ _si(rpState), rDi
+ MOVQ _so(rpState), rDo
+ XORQ _su(rpState), rCu
+
+ mKeccakRound(rpState, rpStack, $0x0000000000000001, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
+ mKeccakRound(rpStack, rpState, $0x0000000000008082, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
+ mKeccakRound(rpState, rpStack, $0x800000000000808a, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
+ mKeccakRound(rpStack, rpState, $0x8000000080008000, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
+ mKeccakRound(rpState, rpStack, $0x000000000000808b, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
+ mKeccakRound(rpStack, rpState, $0x0000000080000001, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
+ mKeccakRound(rpState, rpStack, $0x8000000080008081, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
+ mKeccakRound(rpStack, rpState, $0x8000000000008009, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
+ mKeccakRound(rpState, rpStack, $0x000000000000008a, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
+ mKeccakRound(rpStack, rpState, $0x0000000000000088, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
+ mKeccakRound(rpState, rpStack, $0x0000000080008009, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
+ mKeccakRound(rpStack, rpState, $0x000000008000000a, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
+ mKeccakRound(rpState, rpStack, $0x000000008000808b, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
+ mKeccakRound(rpStack, rpState, $0x800000000000008b, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
+ mKeccakRound(rpState, rpStack, $0x8000000000008089, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
+ mKeccakRound(rpStack, rpState, $0x8000000000008003, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
+ mKeccakRound(rpState, rpStack, $0x8000000000008002, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
+ mKeccakRound(rpStack, rpState, $0x8000000000000080, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
+ mKeccakRound(rpState, rpStack, $0x000000000000800a, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
+ mKeccakRound(rpStack, rpState, $0x800000008000000a, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
+ mKeccakRound(rpState, rpStack, $0x8000000080008081, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
+ mKeccakRound(rpStack, rpState, $0x8000000000008080, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
+ mKeccakRound(rpState, rpStack, $0x0000000080000001, MOVQ_RBI_RCE, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBA_RCU, XORQ_RT1_RCA, XORQ_RT1_RCE, XORQ_RBE_RCU, XORQ_RDU_RCU, XORQ_RDA_RCA, XORQ_RDE_RCE)
+ mKeccakRound(rpStack, rpState, $0x8000000080008008, NOP, NOP, NOP, NOP, NOP, NOP, NOP, NOP, NOP, NOP, NOP, NOP, NOP)
+
+ // Revert the internal state to the user state
+ NOTQ _be(rpState)
+ NOTQ _bi(rpState)
+ NOTQ _go(rpState)
+ NOTQ _ki(rpState)
+ NOTQ _mi(rpState)
+ NOTQ _sa(rpState)
+
+ RET
diff --git a/integration/assets/hydrabroker/vendor/golang.org/x/crypto/sha3/register.go b/integration/assets/hydrabroker/vendor/golang.org/x/crypto/sha3/register.go
new file mode 100644
index 00000000000..addfd5049bb
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/golang.org/x/crypto/sha3/register.go
@@ -0,0 +1,18 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.4
+
+package sha3
+
+import (
+ "crypto"
+)
+
+func init() {
+ crypto.RegisterHash(crypto.SHA3_224, New224)
+ crypto.RegisterHash(crypto.SHA3_256, New256)
+ crypto.RegisterHash(crypto.SHA3_384, New384)
+ crypto.RegisterHash(crypto.SHA3_512, New512)
+}
diff --git a/integration/assets/hydrabroker/vendor/golang.org/x/crypto/sha3/sha3.go b/integration/assets/hydrabroker/vendor/golang.org/x/crypto/sha3/sha3.go
new file mode 100644
index 00000000000..4884d172a49
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/golang.org/x/crypto/sha3/sha3.go
@@ -0,0 +1,197 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package sha3
+
+// spongeDirection indicates the direction bytes are flowing through the sponge.
+type spongeDirection int
+
+const (
+ // spongeAbsorbing indicates that the sponge is absorbing input.
+ spongeAbsorbing spongeDirection = iota
+ // spongeSqueezing indicates that the sponge is being squeezed.
+ spongeSqueezing
+)
+
+const (
+ // maxRate is the maximum size of the internal buffer. SHAKE-256
+ // currently needs the largest buffer.
+ maxRate = 168
+)
+
+type state struct {
+ // Generic sponge components.
+ a [25]uint64 // main state of the hash
+ buf []byte // points into storage
+ rate int // the number of bytes of state to use
+
+ // dsbyte contains the "domain separation" bits and the first bit of
+ // the padding. Sections 6.1 and 6.2 of [1] separate the outputs of the
+ // SHA-3 and SHAKE functions by appending bitstrings to the message.
+ // Using a little-endian bit-ordering convention, these are "01" for SHA-3
+ // and "1111" for SHAKE, or 00000010b and 00001111b, respectively. Then the
+ // padding rule from section 5.1 is applied to pad the message to a multiple
+ // of the rate, which involves adding a "1" bit, zero or more "0" bits, and
+ // a final "1" bit. We merge the first "1" bit from the padding into dsbyte,
+ // giving 00000110b (0x06) and 00011111b (0x1f).
+ // [1] http://csrc.nist.gov/publications/drafts/fips-202/fips_202_draft.pdf
+ // "Draft FIPS 202: SHA-3 Standard: Permutation-Based Hash and
+ // Extendable-Output Functions (May 2014)"
+ dsbyte byte
+
+ storage storageBuf
+
+ // Specific to SHA-3 and SHAKE.
+ outputLen int // the default output size in bytes
+ state spongeDirection // whether the sponge is absorbing or squeezing
+}
+
+// BlockSize returns the rate of sponge underlying this hash function.
+func (d *state) BlockSize() int { return d.rate }
+
+// Size returns the output size of the hash function in bytes.
+func (d *state) Size() int { return d.outputLen }
+
+// Reset clears the internal state by zeroing the sponge state and
+// the byte buffer, and setting Sponge.state to absorbing.
+func (d *state) Reset() {
+ // Zero the permutation's state.
+ for i := range d.a {
+ d.a[i] = 0
+ }
+ d.state = spongeAbsorbing
+ d.buf = d.storage.asBytes()[:0]
+}
+
+func (d *state) clone() *state {
+ ret := *d
+ if ret.state == spongeAbsorbing {
+ ret.buf = ret.storage.asBytes()[:len(ret.buf)]
+ } else {
+ ret.buf = ret.storage.asBytes()[d.rate-cap(d.buf) : d.rate]
+ }
+
+ return &ret
+}
+
+// permute applies the KeccakF-1600 permutation. It handles
+// any input-output buffering.
+func (d *state) permute() {
+ switch d.state {
+ case spongeAbsorbing:
+ // If we're absorbing, we need to xor the input into the state
+ // before applying the permutation.
+ xorIn(d, d.buf)
+ d.buf = d.storage.asBytes()[:0]
+ keccakF1600(&d.a)
+ case spongeSqueezing:
+ // If we're squeezing, we need to apply the permutation before
+ // copying more output.
+ keccakF1600(&d.a)
+ d.buf = d.storage.asBytes()[:d.rate]
+ copyOut(d, d.buf)
+ }
+}
+
+// pads appends the domain separation bits in dsbyte, applies
+// the multi-bitrate 10..1 padding rule, and permutes the state.
+func (d *state) padAndPermute(dsbyte byte) {
+ if d.buf == nil {
+ d.buf = d.storage.asBytes()[:0]
+ }
+ // Pad with this instance's domain-separator bits. We know that there's
+ // at least one byte of space in d.buf because, if it were full,
+ // permute would have been called to empty it. dsbyte also contains the
+ // first one bit for the padding. See the comment in the state struct.
+ d.buf = append(d.buf, dsbyte)
+ zerosStart := len(d.buf)
+ d.buf = d.storage.asBytes()[:d.rate]
+ for i := zerosStart; i < d.rate; i++ {
+ d.buf[i] = 0
+ }
+ // This adds the final one bit for the padding. Because of the way that
+ // bits are numbered from the LSB upwards, the final bit is the MSB of
+ // the last byte.
+ d.buf[d.rate-1] ^= 0x80
+ // Apply the permutation
+ d.permute()
+ d.state = spongeSqueezing
+ d.buf = d.storage.asBytes()[:d.rate]
+ copyOut(d, d.buf)
+}
+
+// Write absorbs more data into the hash's state. It panics if any
+// output has already been read.
+func (d *state) Write(p []byte) (written int, err error) {
+ if d.state != spongeAbsorbing {
+ panic("sha3: Write after Read")
+ }
+ if d.buf == nil {
+ d.buf = d.storage.asBytes()[:0]
+ }
+ written = len(p)
+
+ for len(p) > 0 {
+ if len(d.buf) == 0 && len(p) >= d.rate {
+ // The fast path; absorb a full "rate" bytes of input and apply the permutation.
+ xorIn(d, p[:d.rate])
+ p = p[d.rate:]
+ keccakF1600(&d.a)
+ } else {
+ // The slow path; buffer the input until we can fill the sponge, and then xor it in.
+ todo := d.rate - len(d.buf)
+ if todo > len(p) {
+ todo = len(p)
+ }
+ d.buf = append(d.buf, p[:todo]...)
+ p = p[todo:]
+
+ // If the sponge is full, apply the permutation.
+ if len(d.buf) == d.rate {
+ d.permute()
+ }
+ }
+ }
+
+ return
+}
+
+// Read squeezes an arbitrary number of bytes from the sponge.
+func (d *state) Read(out []byte) (n int, err error) {
+ // If we're still absorbing, pad and apply the permutation.
+ if d.state == spongeAbsorbing {
+ d.padAndPermute(d.dsbyte)
+ }
+
+ n = len(out)
+
+ // Now, do the squeezing.
+ for len(out) > 0 {
+ n := copy(out, d.buf)
+ d.buf = d.buf[n:]
+ out = out[n:]
+
+ // Apply the permutation if we've squeezed the sponge dry.
+ if len(d.buf) == 0 {
+ d.permute()
+ }
+ }
+
+ return
+}
+
+// Sum applies padding to the hash state and then squeezes out the desired
+// number of output bytes. It panics if any output has already been read.
+func (d *state) Sum(in []byte) []byte {
+ if d.state != spongeAbsorbing {
+ panic("sha3: Sum after Read")
+ }
+
+ // Make a copy of the original hash so that caller can keep writing
+ // and summing.
+ dup := d.clone()
+ hash := make([]byte, dup.outputLen, 64) // explicit cap to allow stack allocation
+ dup.Read(hash)
+ return append(in, hash...)
+}
diff --git a/integration/assets/hydrabroker/vendor/golang.org/x/crypto/sha3/sha3_s390x.go b/integration/assets/hydrabroker/vendor/golang.org/x/crypto/sha3/sha3_s390x.go
new file mode 100644
index 00000000000..b4fbbf8695c
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/golang.org/x/crypto/sha3/sha3_s390x.go
@@ -0,0 +1,303 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build gc && !purego
+
+package sha3
+
+// This file contains code for using the 'compute intermediate
+// message digest' (KIMD) and 'compute last message digest' (KLMD)
+// instructions to compute SHA-3 and SHAKE hashes on IBM Z.
+
+import (
+ "hash"
+
+ "golang.org/x/sys/cpu"
+)
+
+// codes represent 7-bit KIMD/KLMD function codes as defined in
+// the Principles of Operation.
+type code uint64
+
+const (
+ // function codes for KIMD/KLMD
+ sha3_224 code = 32
+ sha3_256 = 33
+ sha3_384 = 34
+ sha3_512 = 35
+ shake_128 = 36
+ shake_256 = 37
+ nopad = 0x100
+)
+
+// kimd is a wrapper for the 'compute intermediate message digest' instruction.
+// src must be a multiple of the rate for the given function code.
+//
+//go:noescape
+func kimd(function code, chain *[200]byte, src []byte)
+
+// klmd is a wrapper for the 'compute last message digest' instruction.
+// src padding is handled by the instruction.
+//
+//go:noescape
+func klmd(function code, chain *[200]byte, dst, src []byte)
+
+type asmState struct {
+ a [200]byte // 1600 bit state
+ buf []byte // care must be taken to ensure cap(buf) is a multiple of rate
+ rate int // equivalent to block size
+ storage [3072]byte // underlying storage for buf
+ outputLen int // output length for full security
+ function code // KIMD/KLMD function code
+ state spongeDirection // whether the sponge is absorbing or squeezing
+}
+
+func newAsmState(function code) *asmState {
+ var s asmState
+ s.function = function
+ switch function {
+ case sha3_224:
+ s.rate = 144
+ s.outputLen = 28
+ case sha3_256:
+ s.rate = 136
+ s.outputLen = 32
+ case sha3_384:
+ s.rate = 104
+ s.outputLen = 48
+ case sha3_512:
+ s.rate = 72
+ s.outputLen = 64
+ case shake_128:
+ s.rate = 168
+ s.outputLen = 32
+ case shake_256:
+ s.rate = 136
+ s.outputLen = 64
+ default:
+ panic("sha3: unrecognized function code")
+ }
+
+ // limit s.buf size to a multiple of s.rate
+ s.resetBuf()
+ return &s
+}
+
+func (s *asmState) clone() *asmState {
+ c := *s
+ c.buf = c.storage[:len(s.buf):cap(s.buf)]
+ return &c
+}
+
+// copyIntoBuf copies b into buf. It will panic if there is not enough space to
+// store all of b.
+func (s *asmState) copyIntoBuf(b []byte) {
+ bufLen := len(s.buf)
+ s.buf = s.buf[:len(s.buf)+len(b)]
+ copy(s.buf[bufLen:], b)
+}
+
+// resetBuf points buf at storage, sets the length to 0 and sets cap to be a
+// multiple of the rate.
+func (s *asmState) resetBuf() {
+ max := (cap(s.storage) / s.rate) * s.rate
+ s.buf = s.storage[:0:max]
+}
+
+// Write (via the embedded io.Writer interface) adds more data to the running hash.
+// It never returns an error.
+func (s *asmState) Write(b []byte) (int, error) {
+ if s.state != spongeAbsorbing {
+ panic("sha3: Write after Read")
+ }
+ length := len(b)
+ for len(b) > 0 {
+ if len(s.buf) == 0 && len(b) >= cap(s.buf) {
+ // Hash the data directly and push any remaining bytes
+ // into the buffer.
+ remainder := len(b) % s.rate
+ kimd(s.function, &s.a, b[:len(b)-remainder])
+ if remainder != 0 {
+ s.copyIntoBuf(b[len(b)-remainder:])
+ }
+ return length, nil
+ }
+
+ if len(s.buf) == cap(s.buf) {
+ // flush the buffer
+ kimd(s.function, &s.a, s.buf)
+ s.buf = s.buf[:0]
+ }
+
+ // copy as much as we can into the buffer
+ n := len(b)
+ if len(b) > cap(s.buf)-len(s.buf) {
+ n = cap(s.buf) - len(s.buf)
+ }
+ s.copyIntoBuf(b[:n])
+ b = b[n:]
+ }
+ return length, nil
+}
+
+// Read squeezes an arbitrary number of bytes from the sponge.
+func (s *asmState) Read(out []byte) (n int, err error) {
+ // The 'compute last message digest' instruction only stores the digest
+ // at the first operand (dst) for SHAKE functions.
+ if s.function != shake_128 && s.function != shake_256 {
+ panic("sha3: can only call Read for SHAKE functions")
+ }
+
+ n = len(out)
+
+ // need to pad if we were absorbing
+ if s.state == spongeAbsorbing {
+ s.state = spongeSqueezing
+
+ // write hash directly into out if possible
+ if len(out)%s.rate == 0 {
+ klmd(s.function, &s.a, out, s.buf) // len(out) may be 0
+ s.buf = s.buf[:0]
+ return
+ }
+
+ // write hash into buffer
+ max := cap(s.buf)
+ if max > len(out) {
+ max = (len(out)/s.rate)*s.rate + s.rate
+ }
+ klmd(s.function, &s.a, s.buf[:max], s.buf)
+ s.buf = s.buf[:max]
+ }
+
+ for len(out) > 0 {
+ // flush the buffer
+ if len(s.buf) != 0 {
+ c := copy(out, s.buf)
+ out = out[c:]
+ s.buf = s.buf[c:]
+ continue
+ }
+
+ // write hash directly into out if possible
+ if len(out)%s.rate == 0 {
+ klmd(s.function|nopad, &s.a, out, nil)
+ return
+ }
+
+ // write hash into buffer
+ s.resetBuf()
+ if cap(s.buf) > len(out) {
+ s.buf = s.buf[:(len(out)/s.rate)*s.rate+s.rate]
+ }
+ klmd(s.function|nopad, &s.a, s.buf, nil)
+ }
+ return
+}
+
+// Sum appends the current hash to b and returns the resulting slice.
+// It does not change the underlying hash state.
+func (s *asmState) Sum(b []byte) []byte {
+ if s.state != spongeAbsorbing {
+ panic("sha3: Sum after Read")
+ }
+
+ // Copy the state to preserve the original.
+ a := s.a
+
+ // Hash the buffer. Note that we don't clear it because we
+ // aren't updating the state.
+ switch s.function {
+ case sha3_224, sha3_256, sha3_384, sha3_512:
+ klmd(s.function, &a, nil, s.buf)
+ return append(b, a[:s.outputLen]...)
+ case shake_128, shake_256:
+ d := make([]byte, s.outputLen, 64)
+ klmd(s.function, &a, d, s.buf)
+ return append(b, d[:s.outputLen]...)
+ default:
+ panic("sha3: unknown function")
+ }
+}
+
+// Reset resets the Hash to its initial state.
+func (s *asmState) Reset() {
+ for i := range s.a {
+ s.a[i] = 0
+ }
+ s.resetBuf()
+ s.state = spongeAbsorbing
+}
+
+// Size returns the number of bytes Sum will return.
+func (s *asmState) Size() int {
+ return s.outputLen
+}
+
+// BlockSize returns the hash's underlying block size.
+// The Write method must be able to accept any amount
+// of data, but it may operate more efficiently if all writes
+// are a multiple of the block size.
+func (s *asmState) BlockSize() int {
+ return s.rate
+}
+
+// Clone returns a copy of the ShakeHash in its current state.
+func (s *asmState) Clone() ShakeHash {
+ return s.clone()
+}
+
+// new224Asm returns an assembly implementation of SHA3-224 if available,
+// otherwise it returns nil.
+func new224Asm() hash.Hash {
+ if cpu.S390X.HasSHA3 {
+ return newAsmState(sha3_224)
+ }
+ return nil
+}
+
+// new256Asm returns an assembly implementation of SHA3-256 if available,
+// otherwise it returns nil.
+func new256Asm() hash.Hash {
+ if cpu.S390X.HasSHA3 {
+ return newAsmState(sha3_256)
+ }
+ return nil
+}
+
+// new384Asm returns an assembly implementation of SHA3-384 if available,
+// otherwise it returns nil.
+func new384Asm() hash.Hash {
+ if cpu.S390X.HasSHA3 {
+ return newAsmState(sha3_384)
+ }
+ return nil
+}
+
+// new512Asm returns an assembly implementation of SHA3-512 if available,
+// otherwise it returns nil.
+func new512Asm() hash.Hash {
+ if cpu.S390X.HasSHA3 {
+ return newAsmState(sha3_512)
+ }
+ return nil
+}
+
+// newShake128Asm returns an assembly implementation of SHAKE-128 if available,
+// otherwise it returns nil.
+func newShake128Asm() ShakeHash {
+ if cpu.S390X.HasSHA3 {
+ return newAsmState(shake_128)
+ }
+ return nil
+}
+
+// newShake256Asm returns an assembly implementation of SHAKE-256 if available,
+// otherwise it returns nil.
+func newShake256Asm() ShakeHash {
+ if cpu.S390X.HasSHA3 {
+ return newAsmState(shake_256)
+ }
+ return nil
+}
diff --git a/integration/assets/hydrabroker/vendor/golang.org/x/crypto/sha3/sha3_s390x.s b/integration/assets/hydrabroker/vendor/golang.org/x/crypto/sha3/sha3_s390x.s
new file mode 100644
index 00000000000..826b862c779
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/golang.org/x/crypto/sha3/sha3_s390x.s
@@ -0,0 +1,33 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build gc && !purego
+
+#include "textflag.h"
+
+// func kimd(function code, chain *[200]byte, src []byte)
+TEXT ·kimd(SB), NOFRAME|NOSPLIT, $0-40
+ MOVD function+0(FP), R0
+ MOVD chain+8(FP), R1
+ LMG src+16(FP), R2, R3 // R2=base, R3=len
+
+continue:
+ WORD $0xB93E0002 // KIMD --, R2
+ BVS continue // continue if interrupted
+ MOVD $0, R0 // reset R0 for pre-go1.8 compilers
+ RET
+
+// func klmd(function code, chain *[200]byte, dst, src []byte)
+TEXT ·klmd(SB), NOFRAME|NOSPLIT, $0-64
+ // TODO: SHAKE support
+ MOVD function+0(FP), R0
+ MOVD chain+8(FP), R1
+ LMG dst+16(FP), R2, R3 // R2=base, R3=len
+ LMG src+40(FP), R4, R5 // R4=base, R5=len
+
+continue:
+ WORD $0xB93F0024 // KLMD R2, R4
+ BVS continue // continue if interrupted
+ MOVD $0, R0 // reset R0 for pre-go1.8 compilers
+ RET
diff --git a/integration/assets/hydrabroker/vendor/golang.org/x/crypto/sha3/shake.go b/integration/assets/hydrabroker/vendor/golang.org/x/crypto/sha3/shake.go
new file mode 100644
index 00000000000..bb69984027f
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/golang.org/x/crypto/sha3/shake.go
@@ -0,0 +1,172 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package sha3
+
+// This file defines the ShakeHash interface, and provides
+// functions for creating SHAKE and cSHAKE instances, as well as utility
+// functions for hashing bytes to arbitrary-length output.
+//
+//
+// SHAKE implementation is based on FIPS PUB 202 [1]
+// cSHAKE implementations is based on NIST SP 800-185 [2]
+//
+// [1] https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.202.pdf
+// [2] https://doi.org/10.6028/NIST.SP.800-185
+
+import (
+ "encoding/binary"
+ "hash"
+ "io"
+)
+
+// ShakeHash defines the interface to hash functions that support
+// arbitrary-length output. When used as a plain [hash.Hash], it
+// produces minimum-length outputs that provide full-strength generic
+// security.
+type ShakeHash interface {
+ hash.Hash
+
+ // Read reads more output from the hash; reading affects the hash's
+ // state. (ShakeHash.Read is thus very different from Hash.Sum)
+ // It never returns an error, but subsequent calls to Write or Sum
+ // will panic.
+ io.Reader
+
+ // Clone returns a copy of the ShakeHash in its current state.
+ Clone() ShakeHash
+}
+
+// cSHAKE specific context
+type cshakeState struct {
+ *state // SHA-3 state context and Read/Write operations
+
+ // initBlock is the cSHAKE specific initialization set of bytes. It is initialized
+ // by newCShake function and stores concatenation of N followed by S, encoded
+ // by the method specified in 3.3 of [1].
+ // It is stored here in order for Reset() to be able to put context into
+ // initial state.
+ initBlock []byte
+}
+
+// Consts for configuring initial SHA-3 state
+const (
+ dsbyteShake = 0x1f
+ dsbyteCShake = 0x04
+ rate128 = 168
+ rate256 = 136
+)
+
+func bytepad(input []byte, w int) []byte {
+ // leftEncode always returns max 9 bytes
+ buf := make([]byte, 0, 9+len(input)+w)
+ buf = append(buf, leftEncode(uint64(w))...)
+ buf = append(buf, input...)
+ padlen := w - (len(buf) % w)
+ return append(buf, make([]byte, padlen)...)
+}
+
+func leftEncode(value uint64) []byte {
+ var b [9]byte
+ binary.BigEndian.PutUint64(b[1:], value)
+ // Trim all but last leading zero bytes
+ i := byte(1)
+ for i < 8 && b[i] == 0 {
+ i++
+ }
+ // Prepend number of encoded bytes
+ b[i-1] = 9 - i
+ return b[i-1:]
+}
+
+func newCShake(N, S []byte, rate, outputLen int, dsbyte byte) ShakeHash {
+ c := cshakeState{state: &state{rate: rate, outputLen: outputLen, dsbyte: dsbyte}}
+
+ // leftEncode returns max 9 bytes
+ c.initBlock = make([]byte, 0, 9*2+len(N)+len(S))
+ c.initBlock = append(c.initBlock, leftEncode(uint64(len(N)*8))...)
+ c.initBlock = append(c.initBlock, N...)
+ c.initBlock = append(c.initBlock, leftEncode(uint64(len(S)*8))...)
+ c.initBlock = append(c.initBlock, S...)
+ c.Write(bytepad(c.initBlock, c.rate))
+ return &c
+}
+
+// Reset resets the hash to initial state.
+func (c *cshakeState) Reset() {
+ c.state.Reset()
+ c.Write(bytepad(c.initBlock, c.rate))
+}
+
+// Clone returns copy of a cSHAKE context within its current state.
+func (c *cshakeState) Clone() ShakeHash {
+ b := make([]byte, len(c.initBlock))
+ copy(b, c.initBlock)
+ return &cshakeState{state: c.clone(), initBlock: b}
+}
+
+// Clone returns copy of SHAKE context within its current state.
+func (c *state) Clone() ShakeHash {
+ return c.clone()
+}
+
+// NewShake128 creates a new SHAKE128 variable-output-length ShakeHash.
+// Its generic security strength is 128 bits against all attacks if at
+// least 32 bytes of its output are used.
+func NewShake128() ShakeHash {
+ if h := newShake128Asm(); h != nil {
+ return h
+ }
+ return &state{rate: rate128, outputLen: 32, dsbyte: dsbyteShake}
+}
+
+// NewShake256 creates a new SHAKE256 variable-output-length ShakeHash.
+// Its generic security strength is 256 bits against all attacks if
+// at least 64 bytes of its output are used.
+func NewShake256() ShakeHash {
+ if h := newShake256Asm(); h != nil {
+ return h
+ }
+ return &state{rate: rate256, outputLen: 64, dsbyte: dsbyteShake}
+}
+
+// NewCShake128 creates a new instance of cSHAKE128 variable-output-length ShakeHash,
+// a customizable variant of SHAKE128.
+// N is used to define functions based on cSHAKE, it can be empty when plain cSHAKE is
+// desired. S is a customization byte string used for domain separation - two cSHAKE
+// computations on same input with different S yield unrelated outputs.
+// When N and S are both empty, this is equivalent to NewShake128.
+func NewCShake128(N, S []byte) ShakeHash {
+ if len(N) == 0 && len(S) == 0 {
+ return NewShake128()
+ }
+ return newCShake(N, S, rate128, 32, dsbyteCShake)
+}
+
+// NewCShake256 creates a new instance of cSHAKE256 variable-output-length ShakeHash,
+// a customizable variant of SHAKE256.
+// N is used to define functions based on cSHAKE, it can be empty when plain cSHAKE is
+// desired. S is a customization byte string used for domain separation - two cSHAKE
+// computations on same input with different S yield unrelated outputs.
+// When N and S are both empty, this is equivalent to NewShake256.
+func NewCShake256(N, S []byte) ShakeHash {
+ if len(N) == 0 && len(S) == 0 {
+ return NewShake256()
+ }
+ return newCShake(N, S, rate256, 64, dsbyteCShake)
+}
+
+// ShakeSum128 writes an arbitrary-length digest of data into hash.
+func ShakeSum128(hash, data []byte) {
+ h := NewShake128()
+ h.Write(data)
+ h.Read(hash)
+}
+
+// ShakeSum256 writes an arbitrary-length digest of data into hash.
+func ShakeSum256(hash, data []byte) {
+ h := NewShake256()
+ h.Write(data)
+ h.Read(hash)
+}
diff --git a/integration/assets/hydrabroker/vendor/golang.org/x/crypto/sha3/shake_generic.go b/integration/assets/hydrabroker/vendor/golang.org/x/crypto/sha3/shake_generic.go
new file mode 100644
index 00000000000..8d31cf5be2d
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/golang.org/x/crypto/sha3/shake_generic.go
@@ -0,0 +1,19 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !gc || purego || !s390x
+
+package sha3
+
+// newShake128Asm returns an assembly implementation of SHAKE-128 if available,
+// otherwise it returns nil.
+func newShake128Asm() ShakeHash {
+ return nil
+}
+
+// newShake256Asm returns an assembly implementation of SHAKE-256 if available,
+// otherwise it returns nil.
+func newShake256Asm() ShakeHash {
+ return nil
+}
diff --git a/integration/assets/hydrabroker/vendor/golang.org/x/crypto/sha3/xor.go b/integration/assets/hydrabroker/vendor/golang.org/x/crypto/sha3/xor.go
new file mode 100644
index 00000000000..7337cca88ed
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/golang.org/x/crypto/sha3/xor.go
@@ -0,0 +1,23 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build (!amd64 && !386 && !ppc64le) || purego
+
+package sha3
+
+// A storageBuf is an aligned array of maxRate bytes.
+type storageBuf [maxRate]byte
+
+func (b *storageBuf) asBytes() *[maxRate]byte {
+ return (*[maxRate]byte)(b)
+}
+
+var (
+ xorIn = xorInGeneric
+ copyOut = copyOutGeneric
+ xorInUnaligned = xorInGeneric
+ copyOutUnaligned = copyOutGeneric
+)
+
+const xorImplementationUnaligned = "generic"
diff --git a/integration/assets/hydrabroker/vendor/golang.org/x/crypto/sha3/xor_generic.go b/integration/assets/hydrabroker/vendor/golang.org/x/crypto/sha3/xor_generic.go
new file mode 100644
index 00000000000..8d947711272
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/golang.org/x/crypto/sha3/xor_generic.go
@@ -0,0 +1,28 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package sha3
+
+import "encoding/binary"
+
+// xorInGeneric xors the bytes in buf into the state; it
+// makes no non-portable assumptions about memory layout
+// or alignment.
+func xorInGeneric(d *state, buf []byte) {
+ n := len(buf) / 8
+
+ for i := 0; i < n; i++ {
+ a := binary.LittleEndian.Uint64(buf)
+ d.a[i] ^= a
+ buf = buf[8:]
+ }
+}
+
+// copyOutGeneric copies uint64s to a byte buffer.
+func copyOutGeneric(d *state, b []byte) {
+ for i := 0; len(b) >= 8; i++ {
+ binary.LittleEndian.PutUint64(b, d.a[i])
+ b = b[8:]
+ }
+}
diff --git a/integration/assets/hydrabroker/vendor/golang.org/x/crypto/sha3/xor_unaligned.go b/integration/assets/hydrabroker/vendor/golang.org/x/crypto/sha3/xor_unaligned.go
new file mode 100644
index 00000000000..870e2d16e07
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/golang.org/x/crypto/sha3/xor_unaligned.go
@@ -0,0 +1,66 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build (amd64 || 386 || ppc64le) && !purego
+
+package sha3
+
+import "unsafe"
+
+// A storageBuf is an aligned array of maxRate bytes.
+type storageBuf [maxRate / 8]uint64
+
+func (b *storageBuf) asBytes() *[maxRate]byte {
+ return (*[maxRate]byte)(unsafe.Pointer(b))
+}
+
+// xorInUnaligned uses unaligned reads and writes to update d.a to contain d.a
+// XOR buf.
+func xorInUnaligned(d *state, buf []byte) {
+ n := len(buf)
+ bw := (*[maxRate / 8]uint64)(unsafe.Pointer(&buf[0]))[: n/8 : n/8]
+ if n >= 72 {
+ d.a[0] ^= bw[0]
+ d.a[1] ^= bw[1]
+ d.a[2] ^= bw[2]
+ d.a[3] ^= bw[3]
+ d.a[4] ^= bw[4]
+ d.a[5] ^= bw[5]
+ d.a[6] ^= bw[6]
+ d.a[7] ^= bw[7]
+ d.a[8] ^= bw[8]
+ }
+ if n >= 104 {
+ d.a[9] ^= bw[9]
+ d.a[10] ^= bw[10]
+ d.a[11] ^= bw[11]
+ d.a[12] ^= bw[12]
+ }
+ if n >= 136 {
+ d.a[13] ^= bw[13]
+ d.a[14] ^= bw[14]
+ d.a[15] ^= bw[15]
+ d.a[16] ^= bw[16]
+ }
+ if n >= 144 {
+ d.a[17] ^= bw[17]
+ }
+ if n >= 168 {
+ d.a[18] ^= bw[18]
+ d.a[19] ^= bw[19]
+ d.a[20] ^= bw[20]
+ }
+}
+
+func copyOutUnaligned(d *state, buf []byte) {
+ ab := (*[maxRate]uint8)(unsafe.Pointer(&d.a[0]))
+ copy(buf, ab[:])
+}
+
+var (
+ xorIn = xorInUnaligned
+ copyOut = copyOutUnaligned
+)
+
+const xorImplementationUnaligned = "unaligned"
diff --git a/integration/assets/hydrabroker/vendor/golang.org/x/net/AUTHORS b/integration/assets/hydrabroker/vendor/golang.org/x/net/AUTHORS
deleted file mode 100644
index 15167cd746c..00000000000
--- a/integration/assets/hydrabroker/vendor/golang.org/x/net/AUTHORS
+++ /dev/null
@@ -1,3 +0,0 @@
-# This source code refers to The Go Authors for copyright purposes.
-# The master list of authors is in the main Go distribution,
-# visible at http://tip.golang.org/AUTHORS.
diff --git a/integration/assets/hydrabroker/vendor/golang.org/x/net/CONTRIBUTORS b/integration/assets/hydrabroker/vendor/golang.org/x/net/CONTRIBUTORS
deleted file mode 100644
index 1c4577e9680..00000000000
--- a/integration/assets/hydrabroker/vendor/golang.org/x/net/CONTRIBUTORS
+++ /dev/null
@@ -1,3 +0,0 @@
-# This source code was written by the Go contributors.
-# The master list of contributors is in the main Go distribution,
-# visible at http://tip.golang.org/CONTRIBUTORS.
diff --git a/integration/assets/hydrabroker/vendor/golang.org/x/net/context/context.go b/integration/assets/hydrabroker/vendor/golang.org/x/net/context/context.go
new file mode 100644
index 00000000000..cf66309c4a8
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/golang.org/x/net/context/context.go
@@ -0,0 +1,56 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package context defines the Context type, which carries deadlines,
+// cancelation signals, and other request-scoped values across API boundaries
+// and between processes.
+// As of Go 1.7 this package is available in the standard library under the
+// name context. https://golang.org/pkg/context.
+//
+// Incoming requests to a server should create a Context, and outgoing calls to
+// servers should accept a Context. The chain of function calls between must
+// propagate the Context, optionally replacing it with a modified copy created
+// using WithDeadline, WithTimeout, WithCancel, or WithValue.
+//
+// Programs that use Contexts should follow these rules to keep interfaces
+// consistent across packages and enable static analysis tools to check context
+// propagation:
+//
+// Do not store Contexts inside a struct type; instead, pass a Context
+// explicitly to each function that needs it. The Context should be the first
+// parameter, typically named ctx:
+//
+// func DoSomething(ctx context.Context, arg Arg) error {
+// // ... use ctx ...
+// }
+//
+// Do not pass a nil Context, even if a function permits it. Pass context.TODO
+// if you are unsure about which Context to use.
+//
+// Use context Values only for request-scoped data that transits processes and
+// APIs, not for passing optional parameters to functions.
+//
+// The same Context may be passed to functions running in different goroutines;
+// Contexts are safe for simultaneous use by multiple goroutines.
+//
+// See http://blog.golang.org/context for example code for a server that uses
+// Contexts.
+package context // import "golang.org/x/net/context"
+
+// Background returns a non-nil, empty Context. It is never canceled, has no
+// values, and has no deadline. It is typically used by the main function,
+// initialization, and tests, and as the top-level Context for incoming
+// requests.
+func Background() Context {
+ return background
+}
+
+// TODO returns a non-nil, empty Context. Code should use context.TODO when
+// it's unclear which Context to use or it is not yet available (because the
+// surrounding function has not yet been extended to accept a Context
+// parameter). TODO is recognized by static analysis tools that determine
+// whether Contexts are propagated correctly in a program.
+func TODO() Context {
+ return todo
+}
diff --git a/integration/assets/hydrabroker/vendor/golang.org/x/net/context/go17.go b/integration/assets/hydrabroker/vendor/golang.org/x/net/context/go17.go
new file mode 100644
index 00000000000..0c1b8679376
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/golang.org/x/net/context/go17.go
@@ -0,0 +1,72 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.7
+
+package context
+
+import (
+ "context" // standard library's context, as of Go 1.7
+ "time"
+)
+
+var (
+ todo = context.TODO()
+ background = context.Background()
+)
+
+// Canceled is the error returned by Context.Err when the context is canceled.
+var Canceled = context.Canceled
+
+// DeadlineExceeded is the error returned by Context.Err when the context's
+// deadline passes.
+var DeadlineExceeded = context.DeadlineExceeded
+
+// WithCancel returns a copy of parent with a new Done channel. The returned
+// context's Done channel is closed when the returned cancel function is called
+// or when the parent context's Done channel is closed, whichever happens first.
+//
+// Canceling this context releases resources associated with it, so code should
+// call cancel as soon as the operations running in this Context complete.
+func WithCancel(parent Context) (ctx Context, cancel CancelFunc) {
+ ctx, f := context.WithCancel(parent)
+ return ctx, f
+}
+
+// WithDeadline returns a copy of the parent context with the deadline adjusted
+// to be no later than d. If the parent's deadline is already earlier than d,
+// WithDeadline(parent, d) is semantically equivalent to parent. The returned
+// context's Done channel is closed when the deadline expires, when the returned
+// cancel function is called, or when the parent context's Done channel is
+// closed, whichever happens first.
+//
+// Canceling this context releases resources associated with it, so code should
+// call cancel as soon as the operations running in this Context complete.
+func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) {
+ ctx, f := context.WithDeadline(parent, deadline)
+ return ctx, f
+}
+
+// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)).
+//
+// Canceling this context releases resources associated with it, so code should
+// call cancel as soon as the operations running in this Context complete:
+//
+// func slowOperationWithTimeout(ctx context.Context) (Result, error) {
+// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond)
+// defer cancel() // releases resources if slowOperation completes before timeout elapses
+// return slowOperation(ctx)
+// }
+func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) {
+ return WithDeadline(parent, time.Now().Add(timeout))
+}
+
+// WithValue returns a copy of parent in which the value associated with key is
+// val.
+//
+// Use context Values only for request-scoped data that transits processes and
+// APIs, not for passing optional parameters to functions.
+func WithValue(parent Context, key interface{}, val interface{}) Context {
+ return context.WithValue(parent, key, val)
+}
diff --git a/integration/assets/hydrabroker/vendor/golang.org/x/net/context/go19.go b/integration/assets/hydrabroker/vendor/golang.org/x/net/context/go19.go
new file mode 100644
index 00000000000..e31e35a9045
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/golang.org/x/net/context/go19.go
@@ -0,0 +1,20 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.9
+
+package context
+
+import "context" // standard library's context, as of Go 1.7
+
+// A Context carries a deadline, a cancelation signal, and other values across
+// API boundaries.
+//
+// Context's methods may be called by multiple goroutines simultaneously.
+type Context = context.Context
+
+// A CancelFunc tells an operation to abandon its work.
+// A CancelFunc does not wait for the work to stop.
+// After the first call, subsequent calls to a CancelFunc do nothing.
+type CancelFunc = context.CancelFunc
diff --git a/integration/assets/hydrabroker/vendor/golang.org/x/net/context/pre_go17.go b/integration/assets/hydrabroker/vendor/golang.org/x/net/context/pre_go17.go
new file mode 100644
index 00000000000..065ff3dfa52
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/golang.org/x/net/context/pre_go17.go
@@ -0,0 +1,300 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !go1.7
+
+package context
+
+import (
+ "errors"
+ "fmt"
+ "sync"
+ "time"
+)
+
+// An emptyCtx is never canceled, has no values, and has no deadline. It is not
+// struct{}, since vars of this type must have distinct addresses.
+type emptyCtx int
+
+func (*emptyCtx) Deadline() (deadline time.Time, ok bool) {
+ return
+}
+
+func (*emptyCtx) Done() <-chan struct{} {
+ return nil
+}
+
+func (*emptyCtx) Err() error {
+ return nil
+}
+
+func (*emptyCtx) Value(key interface{}) interface{} {
+ return nil
+}
+
+func (e *emptyCtx) String() string {
+ switch e {
+ case background:
+ return "context.Background"
+ case todo:
+ return "context.TODO"
+ }
+ return "unknown empty Context"
+}
+
+var (
+ background = new(emptyCtx)
+ todo = new(emptyCtx)
+)
+
+// Canceled is the error returned by Context.Err when the context is canceled.
+var Canceled = errors.New("context canceled")
+
+// DeadlineExceeded is the error returned by Context.Err when the context's
+// deadline passes.
+var DeadlineExceeded = errors.New("context deadline exceeded")
+
+// WithCancel returns a copy of parent with a new Done channel. The returned
+// context's Done channel is closed when the returned cancel function is called
+// or when the parent context's Done channel is closed, whichever happens first.
+//
+// Canceling this context releases resources associated with it, so code should
+// call cancel as soon as the operations running in this Context complete.
+func WithCancel(parent Context) (ctx Context, cancel CancelFunc) {
+ c := newCancelCtx(parent)
+ propagateCancel(parent, c)
+ return c, func() { c.cancel(true, Canceled) }
+}
+
+// newCancelCtx returns an initialized cancelCtx.
+func newCancelCtx(parent Context) *cancelCtx {
+ return &cancelCtx{
+ Context: parent,
+ done: make(chan struct{}),
+ }
+}
+
+// propagateCancel arranges for child to be canceled when parent is.
+func propagateCancel(parent Context, child canceler) {
+ if parent.Done() == nil {
+ return // parent is never canceled
+ }
+ if p, ok := parentCancelCtx(parent); ok {
+ p.mu.Lock()
+ if p.err != nil {
+ // parent has already been canceled
+ child.cancel(false, p.err)
+ } else {
+ if p.children == nil {
+ p.children = make(map[canceler]bool)
+ }
+ p.children[child] = true
+ }
+ p.mu.Unlock()
+ } else {
+ go func() {
+ select {
+ case <-parent.Done():
+ child.cancel(false, parent.Err())
+ case <-child.Done():
+ }
+ }()
+ }
+}
+
+// parentCancelCtx follows a chain of parent references until it finds a
+// *cancelCtx. This function understands how each of the concrete types in this
+// package represents its parent.
+func parentCancelCtx(parent Context) (*cancelCtx, bool) {
+ for {
+ switch c := parent.(type) {
+ case *cancelCtx:
+ return c, true
+ case *timerCtx:
+ return c.cancelCtx, true
+ case *valueCtx:
+ parent = c.Context
+ default:
+ return nil, false
+ }
+ }
+}
+
+// removeChild removes a context from its parent.
+func removeChild(parent Context, child canceler) {
+ p, ok := parentCancelCtx(parent)
+ if !ok {
+ return
+ }
+ p.mu.Lock()
+ if p.children != nil {
+ delete(p.children, child)
+ }
+ p.mu.Unlock()
+}
+
+// A canceler is a context type that can be canceled directly. The
+// implementations are *cancelCtx and *timerCtx.
+type canceler interface {
+ cancel(removeFromParent bool, err error)
+ Done() <-chan struct{}
+}
+
+// A cancelCtx can be canceled. When canceled, it also cancels any children
+// that implement canceler.
+type cancelCtx struct {
+ Context
+
+ done chan struct{} // closed by the first cancel call.
+
+ mu sync.Mutex
+ children map[canceler]bool // set to nil by the first cancel call
+ err error // set to non-nil by the first cancel call
+}
+
+func (c *cancelCtx) Done() <-chan struct{} {
+ return c.done
+}
+
+func (c *cancelCtx) Err() error {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+ return c.err
+}
+
+func (c *cancelCtx) String() string {
+ return fmt.Sprintf("%v.WithCancel", c.Context)
+}
+
+// cancel closes c.done, cancels each of c's children, and, if
+// removeFromParent is true, removes c from its parent's children.
+func (c *cancelCtx) cancel(removeFromParent bool, err error) {
+ if err == nil {
+ panic("context: internal error: missing cancel error")
+ }
+ c.mu.Lock()
+ if c.err != nil {
+ c.mu.Unlock()
+ return // already canceled
+ }
+ c.err = err
+ close(c.done)
+ for child := range c.children {
+ // NOTE: acquiring the child's lock while holding parent's lock.
+ child.cancel(false, err)
+ }
+ c.children = nil
+ c.mu.Unlock()
+
+ if removeFromParent {
+ removeChild(c.Context, c)
+ }
+}
+
+// WithDeadline returns a copy of the parent context with the deadline adjusted
+// to be no later than d. If the parent's deadline is already earlier than d,
+// WithDeadline(parent, d) is semantically equivalent to parent. The returned
+// context's Done channel is closed when the deadline expires, when the returned
+// cancel function is called, or when the parent context's Done channel is
+// closed, whichever happens first.
+//
+// Canceling this context releases resources associated with it, so code should
+// call cancel as soon as the operations running in this Context complete.
+func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) {
+ if cur, ok := parent.Deadline(); ok && cur.Before(deadline) {
+ // The current deadline is already sooner than the new one.
+ return WithCancel(parent)
+ }
+ c := &timerCtx{
+ cancelCtx: newCancelCtx(parent),
+ deadline: deadline,
+ }
+ propagateCancel(parent, c)
+ d := deadline.Sub(time.Now())
+ if d <= 0 {
+ c.cancel(true, DeadlineExceeded) // deadline has already passed
+ return c, func() { c.cancel(true, Canceled) }
+ }
+ c.mu.Lock()
+ defer c.mu.Unlock()
+ if c.err == nil {
+ c.timer = time.AfterFunc(d, func() {
+ c.cancel(true, DeadlineExceeded)
+ })
+ }
+ return c, func() { c.cancel(true, Canceled) }
+}
+
+// A timerCtx carries a timer and a deadline. It embeds a cancelCtx to
+// implement Done and Err. It implements cancel by stopping its timer then
+// delegating to cancelCtx.cancel.
+type timerCtx struct {
+ *cancelCtx
+ timer *time.Timer // Under cancelCtx.mu.
+
+ deadline time.Time
+}
+
+func (c *timerCtx) Deadline() (deadline time.Time, ok bool) {
+ return c.deadline, true
+}
+
+func (c *timerCtx) String() string {
+ return fmt.Sprintf("%v.WithDeadline(%s [%s])", c.cancelCtx.Context, c.deadline, c.deadline.Sub(time.Now()))
+}
+
+func (c *timerCtx) cancel(removeFromParent bool, err error) {
+ c.cancelCtx.cancel(false, err)
+ if removeFromParent {
+ // Remove this timerCtx from its parent cancelCtx's children.
+ removeChild(c.cancelCtx.Context, c)
+ }
+ c.mu.Lock()
+ if c.timer != nil {
+ c.timer.Stop()
+ c.timer = nil
+ }
+ c.mu.Unlock()
+}
+
+// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)).
+//
+// Canceling this context releases resources associated with it, so code should
+// call cancel as soon as the operations running in this Context complete:
+//
+// func slowOperationWithTimeout(ctx context.Context) (Result, error) {
+// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond)
+// defer cancel() // releases resources if slowOperation completes before timeout elapses
+// return slowOperation(ctx)
+// }
+func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) {
+ return WithDeadline(parent, time.Now().Add(timeout))
+}
+
+// WithValue returns a copy of parent in which the value associated with key is
+// val.
+//
+// Use context Values only for request-scoped data that transits processes and
+// APIs, not for passing optional parameters to functions.
+func WithValue(parent Context, key interface{}, val interface{}) Context {
+ return &valueCtx{parent, key, val}
+}
+
+// A valueCtx carries a key-value pair. It implements Value for that key and
+// delegates all other calls to the embedded Context.
+type valueCtx struct {
+ Context
+ key, val interface{}
+}
+
+func (c *valueCtx) String() string {
+ return fmt.Sprintf("%v.WithValue(%#v, %#v)", c.Context, c.key, c.val)
+}
+
+func (c *valueCtx) Value(key interface{}) interface{} {
+ if c.key == key {
+ return c.val
+ }
+ return c.Context.Value(key)
+}
diff --git a/integration/assets/hydrabroker/vendor/golang.org/x/net/context/pre_go19.go b/integration/assets/hydrabroker/vendor/golang.org/x/net/context/pre_go19.go
new file mode 100644
index 00000000000..ec5a6380335
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/golang.org/x/net/context/pre_go19.go
@@ -0,0 +1,109 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !go1.9
+
+package context
+
+import "time"
+
+// A Context carries a deadline, a cancelation signal, and other values across
+// API boundaries.
+//
+// Context's methods may be called by multiple goroutines simultaneously.
+type Context interface {
+ // Deadline returns the time when work done on behalf of this context
+ // should be canceled. Deadline returns ok==false when no deadline is
+ // set. Successive calls to Deadline return the same results.
+ Deadline() (deadline time.Time, ok bool)
+
+ // Done returns a channel that's closed when work done on behalf of this
+ // context should be canceled. Done may return nil if this context can
+ // never be canceled. Successive calls to Done return the same value.
+ //
+ // WithCancel arranges for Done to be closed when cancel is called;
+ // WithDeadline arranges for Done to be closed when the deadline
+ // expires; WithTimeout arranges for Done to be closed when the timeout
+ // elapses.
+ //
+ // Done is provided for use in select statements:
+ //
+ // // Stream generates values with DoSomething and sends them to out
+ // // until DoSomething returns an error or ctx.Done is closed.
+ // func Stream(ctx context.Context, out chan<- Value) error {
+ // for {
+ // v, err := DoSomething(ctx)
+ // if err != nil {
+ // return err
+ // }
+ // select {
+ // case <-ctx.Done():
+ // return ctx.Err()
+ // case out <- v:
+ // }
+ // }
+ // }
+ //
+ // See http://blog.golang.org/pipelines for more examples of how to use
+ // a Done channel for cancelation.
+ Done() <-chan struct{}
+
+ // Err returns a non-nil error value after Done is closed. Err returns
+ // Canceled if the context was canceled or DeadlineExceeded if the
+ // context's deadline passed. No other values for Err are defined.
+ // After Done is closed, successive calls to Err return the same value.
+ Err() error
+
+ // Value returns the value associated with this context for key, or nil
+ // if no value is associated with key. Successive calls to Value with
+ // the same key returns the same result.
+ //
+ // Use context values only for request-scoped data that transits
+ // processes and API boundaries, not for passing optional parameters to
+ // functions.
+ //
+ // A key identifies a specific value in a Context. Functions that wish
+ // to store values in Context typically allocate a key in a global
+ // variable then use that key as the argument to context.WithValue and
+ // Context.Value. A key can be any type that supports equality;
+ // packages should define keys as an unexported type to avoid
+ // collisions.
+ //
+ // Packages that define a Context key should provide type-safe accessors
+ // for the values stores using that key:
+ //
+ // // Package user defines a User type that's stored in Contexts.
+ // package user
+ //
+ // import "golang.org/x/net/context"
+ //
+ // // User is the type of value stored in the Contexts.
+ // type User struct {...}
+ //
+ // // key is an unexported type for keys defined in this package.
+ // // This prevents collisions with keys defined in other packages.
+ // type key int
+ //
+ // // userKey is the key for user.User values in Contexts. It is
+ // // unexported; clients use user.NewContext and user.FromContext
+ // // instead of using this key directly.
+ // var userKey key = 0
+ //
+ // // NewContext returns a new Context that carries value u.
+ // func NewContext(ctx context.Context, u *User) context.Context {
+ // return context.WithValue(ctx, userKey, u)
+ // }
+ //
+ // // FromContext returns the User value stored in ctx, if any.
+ // func FromContext(ctx context.Context) (*User, bool) {
+ // u, ok := ctx.Value(userKey).(*User)
+ // return u, ok
+ // }
+ Value(key interface{}) interface{}
+}
+
+// A CancelFunc tells an operation to abandon its work.
+// A CancelFunc does not wait for the work to stop.
+// After the first call, subsequent calls to a CancelFunc do nothing.
+type CancelFunc func()
diff --git a/integration/assets/hydrabroker/vendor/golang.org/x/net/html/const.go b/integration/assets/hydrabroker/vendor/golang.org/x/net/html/const.go
index a3a918f0b38..ff7acf2d5b4 100644
--- a/integration/assets/hydrabroker/vendor/golang.org/x/net/html/const.go
+++ b/integration/assets/hydrabroker/vendor/golang.org/x/net/html/const.go
@@ -52,8 +52,7 @@ var isSpecialElementMap = map[string]bool{
"iframe": true,
"img": true,
"input": true,
- "isindex": true, // The 'isindex' element has been removed, but keep it for backwards compatibility.
- "keygen": true,
+ "keygen": true, // "keygen" has been removed from the spec, but are kept here for backwards compatibility.
"li": true,
"link": true,
"listing": true,
diff --git a/integration/assets/hydrabroker/vendor/golang.org/x/net/html/doc.go b/integration/assets/hydrabroker/vendor/golang.org/x/net/html/doc.go
index 822ed42a04c..3a7e5ab1765 100644
--- a/integration/assets/hydrabroker/vendor/golang.org/x/net/html/doc.go
+++ b/integration/assets/hydrabroker/vendor/golang.org/x/net/html/doc.go
@@ -92,6 +92,27 @@ example, to process each anchor node in depth-first order:
The relevant specifications include:
https://html.spec.whatwg.org/multipage/syntax.html and
https://html.spec.whatwg.org/multipage/syntax.html#tokenization
+
+# Security Considerations
+
+Care should be taken when parsing and interpreting HTML, whether full documents
+or fragments, within the framework of the HTML specification, especially with
+regard to untrusted inputs.
+
+This package provides both a tokenizer and a parser, which implement the
+tokenization, and tokenization and tree construction stages of the WHATWG HTML
+parsing specification respectively. While the tokenizer parses and normalizes
+individual HTML tokens, only the parser constructs the DOM tree from the
+tokenized HTML, as described in the tree construction stage of the
+specification, dynamically modifying or extending the document's DOM tree.
+
+If your use case requires semantically well-formed HTML documents, as defined by
+the WHATWG specification, the parser should be used rather than the tokenizer.
+
+In security contexts, if trust decisions are being made using the tokenized or
+parsed content, the input must be re-serialized (for instance by using Render or
+Token.String) in order for those trust decisions to hold, as the process of
+tokenization or parsing may alter the content.
*/
package html // import "golang.org/x/net/html"
diff --git a/integration/assets/hydrabroker/vendor/golang.org/x/net/html/escape.go b/integration/assets/hydrabroker/vendor/golang.org/x/net/html/escape.go
index d8561396200..04c6bec2107 100644
--- a/integration/assets/hydrabroker/vendor/golang.org/x/net/html/escape.go
+++ b/integration/assets/hydrabroker/vendor/golang.org/x/net/html/escape.go
@@ -193,6 +193,87 @@ func lower(b []byte) []byte {
return b
}
+// escapeComment is like func escape but escapes its input bytes less often.
+// Per https://github.com/golang/go/issues/58246 some HTML comments are (1)
+// meaningful and (2) contain angle brackets that we'd like to avoid escaping
+// unless we have to.
+//
+// "We have to" includes the '&' byte, since that introduces other escapes.
+//
+// It also includes those bytes (not including EOF) that would otherwise end
+// the comment. Per the summary table at the bottom of comment_test.go, this is
+// the '>' byte that, per above, we'd like to avoid escaping unless we have to.
+//
+// Studying the summary table (and T actions in its '>' column) closely, we
+// only need to escape in states 43, 44, 49, 51 and 52. State 43 is at the
+// start of the comment data. State 52 is after a '!'. The other three states
+// are after a '-'.
+//
+// Our algorithm is thus to escape every '&' and to escape '>' if and only if:
+// - The '>' is after a '!' or '-' (in the unescaped data) or
+// - The '>' is at the start of the comment data (after the opening ""); err != nil {
@@ -96,7 +96,7 @@ func render1(w writer, n *Node) error {
if _, err := w.WriteString("')
+ case RawNode:
+ _, err := w.WriteString(n.Data)
+ return err
default:
return errors.New("html: unknown node type")
}
@@ -191,9 +194,8 @@ func render1(w writer, n *Node) error {
}
}
- // Render any child nodes.
- switch n.Data {
- case "iframe", "noembed", "noframes", "noscript", "plaintext", "script", "style", "xmp":
+ // Render any child nodes
+ if childTextNodesAreLiteral(n) {
for c := n.FirstChild; c != nil; c = c.NextSibling {
if c.Type == TextNode {
if _, err := w.WriteString(c.Data); err != nil {
@@ -210,7 +212,7 @@ func render1(w writer, n *Node) error {
// last element in the file, with no closing tag.
return plaintextAbort
}
- default:
+ } else {
for c := n.FirstChild; c != nil; c = c.NextSibling {
if err := render1(w, c); err != nil {
return err
@@ -228,6 +230,27 @@ func render1(w writer, n *Node) error {
return w.WriteByte('>')
}
+func childTextNodesAreLiteral(n *Node) bool {
+ // Per WHATWG HTML 13.3, if the parent of the current node is a style,
+ // script, xmp, iframe, noembed, noframes, or plaintext element, and the
+ // current node is a text node, append the value of the node's data
+ // literally. The specification is not explicit about it, but we only
+ // enforce this if we are in the HTML namespace (i.e. when the namespace is
+ // "").
+ // NOTE: we also always include noscript elements, although the
+ // specification states that they should only be rendered as such if
+ // scripting is enabled for the node (which is not something we track).
+ if n.Namespace != "" {
+ return false
+ }
+ switch n.Data {
+ case "iframe", "noembed", "noframes", "noscript", "plaintext", "script", "style", "xmp":
+ return true
+ default:
+ return false
+ }
+}
+
// writeQuoted writes s to w surrounded by quotes. Normally it will use double
// quotes, but if s contains a double quote, it will use single quotes.
// It is used for writing the identifiers in a doctype declaration.
@@ -252,20 +275,19 @@ func writeQuoted(w writer, s string) error {
// Section 12.1.2, "Elements", gives this list of void elements. Void elements
// are those that can't have any contents.
var voidElements = map[string]bool{
- "area": true,
- "base": true,
- "br": true,
- "col": true,
- "command": true,
- "embed": true,
- "hr": true,
- "img": true,
- "input": true,
- "keygen": true,
- "link": true,
- "meta": true,
- "param": true,
- "source": true,
- "track": true,
- "wbr": true,
+ "area": true,
+ "base": true,
+ "br": true,
+ "col": true,
+ "embed": true,
+ "hr": true,
+ "img": true,
+ "input": true,
+ "keygen": true, // "keygen" has been removed from the spec, but are kept here for backwards compatibility.
+ "link": true,
+ "meta": true,
+ "param": true,
+ "source": true,
+ "track": true,
+ "wbr": true,
}
diff --git a/integration/assets/hydrabroker/vendor/golang.org/x/net/html/token.go b/integration/assets/hydrabroker/vendor/golang.org/x/net/html/token.go
index e3c01d7c906..3c57880d697 100644
--- a/integration/assets/hydrabroker/vendor/golang.org/x/net/html/token.go
+++ b/integration/assets/hydrabroker/vendor/golang.org/x/net/html/token.go
@@ -110,9 +110,9 @@ func (t Token) String() string {
case SelfClosingTagToken:
return "<" + t.tagString() + "/>"
case CommentToken:
- return ""
+ return ""
case DoctypeToken:
- return ""
+ return ""
}
return "Invalid(" + strconv.Itoa(int(t.Type)) + ")"
}
@@ -296,8 +296,7 @@ func (z *Tokenizer) Buffered() []byte {
// too many times in succession.
func readAtLeastOneByte(r io.Reader, b []byte) (int, error) {
for i := 0; i < 100; i++ {
- n, err := r.Read(b)
- if n != 0 || err != nil {
+ if n, err := r.Read(b); n != 0 || err != nil {
return n, err
}
}
@@ -347,6 +346,7 @@ loop:
break loop
}
if c != '/' {
+ z.raw.end--
continue loop
}
if z.readRawEndTag() || z.err != nil {
@@ -598,6 +598,11 @@ scriptDataDoubleEscapeEnd:
// readComment reads the next comment token starting with "")
return
}
@@ -628,19 +632,52 @@ func (z *Tokenizer) readComment() {
if dashCount >= 2 {
c = z.readByte()
if z.err != nil {
- z.data.end = z.raw.end
+ z.data.end = z.calculateAbruptCommentDataEnd()
return
- }
- if c == '>' {
+ } else if c == '>' {
z.data.end = z.raw.end - len("--!>")
return
+ } else if c == '-' {
+ dashCount = 1
+ beginning = false
+ continue
}
}
}
dashCount = 0
+ beginning = false
}
}
+func (z *Tokenizer) calculateAbruptCommentDataEnd() int {
+ raw := z.Raw()
+ const prefixLen = len(" R1
+ CMP R8, $0
+ BEQ docall
+ SUB $1, R8
+ MOVD 0(R7), R1
+
+ // arg 2 ---> R2
+ CMP R8, $0
+ BEQ docall
+ SUB $1, R8
+ ADD $8, R7
+ MOVD 0(R7), R2
+
+ // arg 3 --> R3
+ CMP R8, $0
+ BEQ docall
+ SUB $1, R8
+ ADD $8, R7
+ MOVD 0(R7), R3
+
+ CMP R8, $0
+ BEQ docall
+ MOVD $2176+16, R6 // starting LE stack address-8 to store 4th argument
+
+repeat:
+ ADD $8, R7
+ MOVD 0(R7), R0 // advance arg pointer by 8 byte
+ ADD $8, R6 // advance LE argument address by 8 byte
+ MOVD R0, (R4)(R6*1) // copy argument from go-slice to le-frame
+ SUB $1, R8
+ CMP R8, $0
+ BNE repeat
+
+docall:
+ MOVD funcdesc+0(FP), R8 // R8-> function descriptor
+ LMG 0(R8), R5, R6
+ MOVD $0, 0(R9) // R9 address of SAVSTACK_ASYNC
+ LE_CALL // balr R7, R6 (return #1)
+ NOPH
+ MOVD R3, ret+32(FP)
+ CMP R3, $-1 // compare result to -1
+ BNE done
+
+ // retrieve errno and errno2
+ MOVD zosLibVec<>(SB), R8
+ ADD $(__errno), R8
+ LMG 0(R8), R5, R6
+ LE_CALL // balr R7, R6 __errno (return #3)
+ NOPH
+ MOVWZ 0(R3), R3
+ MOVD R3, err+48(FP)
+ MOVD zosLibVec<>(SB), R8
+ ADD $(__err2ad), R8
+ LMG 0(R8), R5, R6
+ LE_CALL // balr R7, R6 __err2ad (return #2)
+ NOPH
+ MOVW (R3), R2 // retrieve errno2
+ MOVD R2, errno2+40(FP) // store in return area
+
+done:
+ MOVD R4, 0(R9) // Save stack pointer.
+ RET
+
+//
+// Call LE function, if the return is 0
+// errno and errno2 is retrieved
+//
+TEXT ·CallLeFuncWithPtrReturn(SB), NOSPLIT, $0
+ MOVW PSALAA, R8
+ MOVD LCA64(R8), R8
+ MOVD CAA(R8), R9
+ MOVD g, GOCB(R9)
+
+ // Restore LE stack.
+ MOVD SAVSTACK_ASYNC(R8), R9 // R9-> LE stack frame saving address
+ MOVD 0(R9), R4 // R4-> restore previously saved stack frame pointer
+
+ MOVD parms_base+8(FP), R7 // R7 -> argument array
+ MOVD parms_len+16(FP), R8 // R8 number of arguments
+
+ // arg 1 ---> R1
+ CMP R8, $0
+ BEQ docall
+ SUB $1, R8
+ MOVD 0(R7), R1
+
+ // arg 2 ---> R2
+ CMP R8, $0
+ BEQ docall
+ SUB $1, R8
+ ADD $8, R7
+ MOVD 0(R7), R2
+
+ // arg 3 --> R3
+ CMP R8, $0
+ BEQ docall
+ SUB $1, R8
+ ADD $8, R7
+ MOVD 0(R7), R3
+
+ CMP R8, $0
+ BEQ docall
+ MOVD $2176+16, R6 // starting LE stack address-8 to store 4th argument
+
+repeat:
+ ADD $8, R7
+ MOVD 0(R7), R0 // advance arg pointer by 8 byte
+ ADD $8, R6 // advance LE argument address by 8 byte
+ MOVD R0, (R4)(R6*1) // copy argument from go-slice to le-frame
+ SUB $1, R8
+ CMP R8, $0
+ BNE repeat
+
+docall:
+ MOVD funcdesc+0(FP), R8 // R8-> function descriptor
+ LMG 0(R8), R5, R6
+ MOVD $0, 0(R9) // R9 address of SAVSTACK_ASYNC
+ LE_CALL // balr R7, R6 (return #1)
+ NOPH
+ MOVD R3, ret+32(FP)
+ CMP R3, $0 // compare result to 0
+ BNE done
+
+ // retrieve errno and errno2
+ MOVD zosLibVec<>(SB), R8
+ ADD $(__errno), R8
+ LMG 0(R8), R5, R6
+ LE_CALL // balr R7, R6 __errno (return #3)
+ NOPH
+ MOVWZ 0(R3), R3
+ MOVD R3, err+48(FP)
+ MOVD zosLibVec<>(SB), R8
+ ADD $(__err2ad), R8
+ LMG 0(R8), R5, R6
+ LE_CALL // balr R7, R6 __err2ad (return #2)
+ NOPH
+ MOVW (R3), R2 // retrieve errno2
+ MOVD R2, errno2+40(FP) // store in return area
+ XOR R2, R2
+ MOVWZ R2, (R3) // clear errno2
+
+done:
+ MOVD R4, 0(R9) // Save stack pointer.
+ RET
+
+//
+// function to test if a pointer can be safely dereferenced (content read)
+// return 0 for succces
+//
+TEXT ·ptrtest(SB), NOSPLIT, $0-16
+ MOVD arg+0(FP), R10 // test pointer in R10
+
+ // set up R2 to point to CEECAADMC
+ BYTE $0xE3; BYTE $0x20; BYTE $0x04; BYTE $0xB8; BYTE $0x00; BYTE $0x17 // llgt 2,1208
+ BYTE $0xB9; BYTE $0x17; BYTE $0x00; BYTE $0x22 // llgtr 2,2
+ BYTE $0xA5; BYTE $0x26; BYTE $0x7F; BYTE $0xFF // nilh 2,32767
+ BYTE $0xE3; BYTE $0x22; BYTE $0x00; BYTE $0x58; BYTE $0x00; BYTE $0x04 // lg 2,88(2)
+ BYTE $0xE3; BYTE $0x22; BYTE $0x00; BYTE $0x08; BYTE $0x00; BYTE $0x04 // lg 2,8(2)
+ BYTE $0x41; BYTE $0x22; BYTE $0x03; BYTE $0x68 // la 2,872(2)
+
+ // set up R5 to point to the "shunt" path which set 1 to R3 (failure)
+ BYTE $0xB9; BYTE $0x82; BYTE $0x00; BYTE $0x33 // xgr 3,3
+ BYTE $0xA7; BYTE $0x55; BYTE $0x00; BYTE $0x04 // bras 5,lbl1
+ BYTE $0xA7; BYTE $0x39; BYTE $0x00; BYTE $0x01 // lghi 3,1
+
+ // if r3 is not zero (failed) then branch to finish
+ BYTE $0xB9; BYTE $0x02; BYTE $0x00; BYTE $0x33 // lbl1 ltgr 3,3
+ BYTE $0xA7; BYTE $0x74; BYTE $0x00; BYTE $0x08 // brc b'0111',lbl2
+
+ // stomic store shunt address in R5 into CEECAADMC
+ BYTE $0xE3; BYTE $0x52; BYTE $0x00; BYTE $0x00; BYTE $0x00; BYTE $0x24 // stg 5,0(2)
+
+ // now try reading from the test pointer in R10, if it fails it branches to the "lghi" instruction above
+ BYTE $0xE3; BYTE $0x9A; BYTE $0x00; BYTE $0x00; BYTE $0x00; BYTE $0x04 // lg 9,0(10)
+
+ // finish here, restore 0 into CEECAADMC
+ BYTE $0xB9; BYTE $0x82; BYTE $0x00; BYTE $0x99 // lbl2 xgr 9,9
+ BYTE $0xE3; BYTE $0x92; BYTE $0x00; BYTE $0x00; BYTE $0x00; BYTE $0x24 // stg 9,0(2)
+ MOVD R3, ret+8(FP) // result in R3
+ RET
+
+//
+// function to test if a untptr can be loaded from a pointer
+// return 1: the 8-byte content
+// 2: 0 for success, 1 for failure
+//
+// func safeload(ptr uintptr) ( value uintptr, error uintptr)
+TEXT ·safeload(SB), NOSPLIT, $0-24
+ MOVD ptr+0(FP), R10 // test pointer in R10
+ MOVD $0x0, R6
+ BYTE $0xE3; BYTE $0x20; BYTE $0x04; BYTE $0xB8; BYTE $0x00; BYTE $0x17 // llgt 2,1208
+ BYTE $0xB9; BYTE $0x17; BYTE $0x00; BYTE $0x22 // llgtr 2,2
+ BYTE $0xA5; BYTE $0x26; BYTE $0x7F; BYTE $0xFF // nilh 2,32767
+ BYTE $0xE3; BYTE $0x22; BYTE $0x00; BYTE $0x58; BYTE $0x00; BYTE $0x04 // lg 2,88(2)
+ BYTE $0xE3; BYTE $0x22; BYTE $0x00; BYTE $0x08; BYTE $0x00; BYTE $0x04 // lg 2,8(2)
+ BYTE $0x41; BYTE $0x22; BYTE $0x03; BYTE $0x68 // la 2,872(2)
+ BYTE $0xB9; BYTE $0x82; BYTE $0x00; BYTE $0x33 // xgr 3,3
+ BYTE $0xA7; BYTE $0x55; BYTE $0x00; BYTE $0x04 // bras 5,lbl1
+ BYTE $0xA7; BYTE $0x39; BYTE $0x00; BYTE $0x01 // lghi 3,1
+ BYTE $0xB9; BYTE $0x02; BYTE $0x00; BYTE $0x33 // lbl1 ltgr 3,3
+ BYTE $0xA7; BYTE $0x74; BYTE $0x00; BYTE $0x08 // brc b'0111',lbl2
+ BYTE $0xE3; BYTE $0x52; BYTE $0x00; BYTE $0x00; BYTE $0x00; BYTE $0x24 // stg 5,0(2)
+ BYTE $0xE3; BYTE $0x6A; BYTE $0x00; BYTE $0x00; BYTE $0x00; BYTE $0x04 // lg 6,0(10)
+ BYTE $0xB9; BYTE $0x82; BYTE $0x00; BYTE $0x99 // lbl2 xgr 9,9
+ BYTE $0xE3; BYTE $0x92; BYTE $0x00; BYTE $0x00; BYTE $0x00; BYTE $0x24 // stg 9,0(2)
+ MOVD R6, value+8(FP) // result in R6
+ MOVD R3, error+16(FP) // error in R3
+ RET
diff --git a/integration/assets/hydrabroker/vendor/golang.org/x/sys/unix/bluetooth_linux.go b/integration/assets/hydrabroker/vendor/golang.org/x/sys/unix/bluetooth_linux.go
index 6e322969706..a178a6149bb 100644
--- a/integration/assets/hydrabroker/vendor/golang.org/x/sys/unix/bluetooth_linux.go
+++ b/integration/assets/hydrabroker/vendor/golang.org/x/sys/unix/bluetooth_linux.go
@@ -23,6 +23,7 @@ const (
HCI_CHANNEL_USER = 1
HCI_CHANNEL_MONITOR = 2
HCI_CHANNEL_CONTROL = 3
+ HCI_CHANNEL_LOGGING = 4
)
// Socketoption Level
diff --git a/integration/assets/hydrabroker/vendor/golang.org/x/sys/unix/bpxsvc_zos.go b/integration/assets/hydrabroker/vendor/golang.org/x/sys/unix/bpxsvc_zos.go
new file mode 100644
index 00000000000..39d647d863a
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/golang.org/x/sys/unix/bpxsvc_zos.go
@@ -0,0 +1,657 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build zos
+
+package unix
+
+import (
+ "bytes"
+ "fmt"
+ "unsafe"
+)
+
+//go:noescape
+func bpxcall(plist []unsafe.Pointer, bpx_offset int64)
+
+//go:noescape
+func A2e([]byte)
+
+//go:noescape
+func E2a([]byte)
+
+const (
+ BPX4STA = 192 // stat
+ BPX4FST = 104 // fstat
+ BPX4LST = 132 // lstat
+ BPX4OPN = 156 // open
+ BPX4CLO = 72 // close
+ BPX4CHR = 500 // chattr
+ BPX4FCR = 504 // fchattr
+ BPX4LCR = 1180 // lchattr
+ BPX4CTW = 492 // cond_timed_wait
+ BPX4GTH = 1056 // __getthent
+ BPX4PTQ = 412 // pthread_quiesc
+ BPX4PTR = 320 // ptrace
+)
+
+const (
+ //options
+ //byte1
+ BPX_OPNFHIGH = 0x80
+ //byte2
+ BPX_OPNFEXEC = 0x80
+ //byte3
+ BPX_O_NOLARGEFILE = 0x08
+ BPX_O_LARGEFILE = 0x04
+ BPX_O_ASYNCSIG = 0x02
+ BPX_O_SYNC = 0x01
+ //byte4
+ BPX_O_CREXCL = 0xc0
+ BPX_O_CREAT = 0x80
+ BPX_O_EXCL = 0x40
+ BPX_O_NOCTTY = 0x20
+ BPX_O_TRUNC = 0x10
+ BPX_O_APPEND = 0x08
+ BPX_O_NONBLOCK = 0x04
+ BPX_FNDELAY = 0x04
+ BPX_O_RDWR = 0x03
+ BPX_O_RDONLY = 0x02
+ BPX_O_WRONLY = 0x01
+ BPX_O_ACCMODE = 0x03
+ BPX_O_GETFL = 0x0f
+
+ //mode
+ // byte1 (file type)
+ BPX_FT_DIR = 1
+ BPX_FT_CHARSPEC = 2
+ BPX_FT_REGFILE = 3
+ BPX_FT_FIFO = 4
+ BPX_FT_SYMLINK = 5
+ BPX_FT_SOCKET = 6
+ //byte3
+ BPX_S_ISUID = 0x08
+ BPX_S_ISGID = 0x04
+ BPX_S_ISVTX = 0x02
+ BPX_S_IRWXU1 = 0x01
+ BPX_S_IRUSR = 0x01
+ //byte4
+ BPX_S_IRWXU2 = 0xc0
+ BPX_S_IWUSR = 0x80
+ BPX_S_IXUSR = 0x40
+ BPX_S_IRWXG = 0x38
+ BPX_S_IRGRP = 0x20
+ BPX_S_IWGRP = 0x10
+ BPX_S_IXGRP = 0x08
+ BPX_S_IRWXOX = 0x07
+ BPX_S_IROTH = 0x04
+ BPX_S_IWOTH = 0x02
+ BPX_S_IXOTH = 0x01
+
+ CW_INTRPT = 1
+ CW_CONDVAR = 32
+ CW_TIMEOUT = 64
+
+ PGTHA_NEXT = 2
+ PGTHA_CURRENT = 1
+ PGTHA_FIRST = 0
+ PGTHA_LAST = 3
+ PGTHA_PROCESS = 0x80
+ PGTHA_CONTTY = 0x40
+ PGTHA_PATH = 0x20
+ PGTHA_COMMAND = 0x10
+ PGTHA_FILEDATA = 0x08
+ PGTHA_THREAD = 0x04
+ PGTHA_PTAG = 0x02
+ PGTHA_COMMANDLONG = 0x01
+ PGTHA_THREADFAST = 0x80
+ PGTHA_FILEPATH = 0x40
+ PGTHA_THDSIGMASK = 0x20
+ // thread quiece mode
+ QUIESCE_TERM int32 = 1
+ QUIESCE_FORCE int32 = 2
+ QUIESCE_QUERY int32 = 3
+ QUIESCE_FREEZE int32 = 4
+ QUIESCE_UNFREEZE int32 = 5
+ FREEZE_THIS_THREAD int32 = 6
+ FREEZE_EXIT int32 = 8
+ QUIESCE_SRB int32 = 9
+)
+
+type Pgtha struct {
+ Pid uint32 // 0
+ Tid0 uint32 // 4
+ Tid1 uint32
+ Accesspid byte // C
+ Accesstid byte // D
+ Accessasid uint16 // E
+ Loginname [8]byte // 10
+ Flag1 byte // 18
+ Flag1b2 byte // 19
+}
+
+type Bpxystat_t struct { // DSECT BPXYSTAT
+ St_id [4]uint8 // 0
+ St_length uint16 // 0x4
+ St_version uint16 // 0x6
+ St_mode uint32 // 0x8
+ St_ino uint32 // 0xc
+ St_dev uint32 // 0x10
+ St_nlink uint32 // 0x14
+ St_uid uint32 // 0x18
+ St_gid uint32 // 0x1c
+ St_size uint64 // 0x20
+ St_atime uint32 // 0x28
+ St_mtime uint32 // 0x2c
+ St_ctime uint32 // 0x30
+ St_rdev uint32 // 0x34
+ St_auditoraudit uint32 // 0x38
+ St_useraudit uint32 // 0x3c
+ St_blksize uint32 // 0x40
+ St_createtime uint32 // 0x44
+ St_auditid [4]uint32 // 0x48
+ St_res01 uint32 // 0x58
+ Ft_ccsid uint16 // 0x5c
+ Ft_flags uint16 // 0x5e
+ St_res01a [2]uint32 // 0x60
+ St_res02 uint32 // 0x68
+ St_blocks uint32 // 0x6c
+ St_opaque [3]uint8 // 0x70
+ St_visible uint8 // 0x73
+ St_reftime uint32 // 0x74
+ St_fid uint64 // 0x78
+ St_filefmt uint8 // 0x80
+ St_fspflag2 uint8 // 0x81
+ St_res03 [2]uint8 // 0x82
+ St_ctimemsec uint32 // 0x84
+ St_seclabel [8]uint8 // 0x88
+ St_res04 [4]uint8 // 0x90
+ // end of version 1
+ _ uint32 // 0x94
+ St_atime64 uint64 // 0x98
+ St_mtime64 uint64 // 0xa0
+ St_ctime64 uint64 // 0xa8
+ St_createtime64 uint64 // 0xb0
+ St_reftime64 uint64 // 0xb8
+ _ uint64 // 0xc0
+ St_res05 [16]uint8 // 0xc8
+ // end of version 2
+}
+
+type BpxFilestatus struct {
+ Oflag1 byte
+ Oflag2 byte
+ Oflag3 byte
+ Oflag4 byte
+}
+
+type BpxMode struct {
+ Ftype byte
+ Mode1 byte
+ Mode2 byte
+ Mode3 byte
+}
+
+// Thr attribute structure for extended attributes
+type Bpxyatt_t struct { // DSECT BPXYATT
+ Att_id [4]uint8
+ Att_version uint16
+ Att_res01 [2]uint8
+ Att_setflags1 uint8
+ Att_setflags2 uint8
+ Att_setflags3 uint8
+ Att_setflags4 uint8
+ Att_mode uint32
+ Att_uid uint32
+ Att_gid uint32
+ Att_opaquemask [3]uint8
+ Att_visblmaskres uint8
+ Att_opaque [3]uint8
+ Att_visibleres uint8
+ Att_size_h uint32
+ Att_size_l uint32
+ Att_atime uint32
+ Att_mtime uint32
+ Att_auditoraudit uint32
+ Att_useraudit uint32
+ Att_ctime uint32
+ Att_reftime uint32
+ // end of version 1
+ Att_filefmt uint8
+ Att_res02 [3]uint8
+ Att_filetag uint32
+ Att_res03 [8]uint8
+ // end of version 2
+ Att_atime64 uint64
+ Att_mtime64 uint64
+ Att_ctime64 uint64
+ Att_reftime64 uint64
+ Att_seclabel [8]uint8
+ Att_ver3res02 [8]uint8
+ // end of version 3
+}
+
+func BpxOpen(name string, options *BpxFilestatus, mode *BpxMode) (rv int32, rc int32, rn int32) {
+ if len(name) < 1024 {
+ var namebuf [1024]byte
+ sz := int32(copy(namebuf[:], name))
+ A2e(namebuf[:sz])
+ var parms [7]unsafe.Pointer
+ parms[0] = unsafe.Pointer(&sz)
+ parms[1] = unsafe.Pointer(&namebuf[0])
+ parms[2] = unsafe.Pointer(options)
+ parms[3] = unsafe.Pointer(mode)
+ parms[4] = unsafe.Pointer(&rv)
+ parms[5] = unsafe.Pointer(&rc)
+ parms[6] = unsafe.Pointer(&rn)
+ bpxcall(parms[:], BPX4OPN)
+ return rv, rc, rn
+ }
+ return -1, -1, -1
+}
+
+func BpxClose(fd int32) (rv int32, rc int32, rn int32) {
+ var parms [4]unsafe.Pointer
+ parms[0] = unsafe.Pointer(&fd)
+ parms[1] = unsafe.Pointer(&rv)
+ parms[2] = unsafe.Pointer(&rc)
+ parms[3] = unsafe.Pointer(&rn)
+ bpxcall(parms[:], BPX4CLO)
+ return rv, rc, rn
+}
+
+func BpxFileFStat(fd int32, st *Bpxystat_t) (rv int32, rc int32, rn int32) {
+ st.St_id = [4]uint8{0xe2, 0xe3, 0xc1, 0xe3}
+ st.St_version = 2
+ stat_sz := uint32(unsafe.Sizeof(*st))
+ var parms [6]unsafe.Pointer
+ parms[0] = unsafe.Pointer(&fd)
+ parms[1] = unsafe.Pointer(&stat_sz)
+ parms[2] = unsafe.Pointer(st)
+ parms[3] = unsafe.Pointer(&rv)
+ parms[4] = unsafe.Pointer(&rc)
+ parms[5] = unsafe.Pointer(&rn)
+ bpxcall(parms[:], BPX4FST)
+ return rv, rc, rn
+}
+
+func BpxFileStat(name string, st *Bpxystat_t) (rv int32, rc int32, rn int32) {
+ if len(name) < 1024 {
+ var namebuf [1024]byte
+ sz := int32(copy(namebuf[:], name))
+ A2e(namebuf[:sz])
+ st.St_id = [4]uint8{0xe2, 0xe3, 0xc1, 0xe3}
+ st.St_version = 2
+ stat_sz := uint32(unsafe.Sizeof(*st))
+ var parms [7]unsafe.Pointer
+ parms[0] = unsafe.Pointer(&sz)
+ parms[1] = unsafe.Pointer(&namebuf[0])
+ parms[2] = unsafe.Pointer(&stat_sz)
+ parms[3] = unsafe.Pointer(st)
+ parms[4] = unsafe.Pointer(&rv)
+ parms[5] = unsafe.Pointer(&rc)
+ parms[6] = unsafe.Pointer(&rn)
+ bpxcall(parms[:], BPX4STA)
+ return rv, rc, rn
+ }
+ return -1, -1, -1
+}
+
+func BpxFileLStat(name string, st *Bpxystat_t) (rv int32, rc int32, rn int32) {
+ if len(name) < 1024 {
+ var namebuf [1024]byte
+ sz := int32(copy(namebuf[:], name))
+ A2e(namebuf[:sz])
+ st.St_id = [4]uint8{0xe2, 0xe3, 0xc1, 0xe3}
+ st.St_version = 2
+ stat_sz := uint32(unsafe.Sizeof(*st))
+ var parms [7]unsafe.Pointer
+ parms[0] = unsafe.Pointer(&sz)
+ parms[1] = unsafe.Pointer(&namebuf[0])
+ parms[2] = unsafe.Pointer(&stat_sz)
+ parms[3] = unsafe.Pointer(st)
+ parms[4] = unsafe.Pointer(&rv)
+ parms[5] = unsafe.Pointer(&rc)
+ parms[6] = unsafe.Pointer(&rn)
+ bpxcall(parms[:], BPX4LST)
+ return rv, rc, rn
+ }
+ return -1, -1, -1
+}
+
+func BpxChattr(path string, attr *Bpxyatt_t) (rv int32, rc int32, rn int32) {
+ if len(path) >= 1024 {
+ return -1, -1, -1
+ }
+ var namebuf [1024]byte
+ sz := int32(copy(namebuf[:], path))
+ A2e(namebuf[:sz])
+ attr_sz := uint32(unsafe.Sizeof(*attr))
+ var parms [7]unsafe.Pointer
+ parms[0] = unsafe.Pointer(&sz)
+ parms[1] = unsafe.Pointer(&namebuf[0])
+ parms[2] = unsafe.Pointer(&attr_sz)
+ parms[3] = unsafe.Pointer(attr)
+ parms[4] = unsafe.Pointer(&rv)
+ parms[5] = unsafe.Pointer(&rc)
+ parms[6] = unsafe.Pointer(&rn)
+ bpxcall(parms[:], BPX4CHR)
+ return rv, rc, rn
+}
+
+func BpxLchattr(path string, attr *Bpxyatt_t) (rv int32, rc int32, rn int32) {
+ if len(path) >= 1024 {
+ return -1, -1, -1
+ }
+ var namebuf [1024]byte
+ sz := int32(copy(namebuf[:], path))
+ A2e(namebuf[:sz])
+ attr_sz := uint32(unsafe.Sizeof(*attr))
+ var parms [7]unsafe.Pointer
+ parms[0] = unsafe.Pointer(&sz)
+ parms[1] = unsafe.Pointer(&namebuf[0])
+ parms[2] = unsafe.Pointer(&attr_sz)
+ parms[3] = unsafe.Pointer(attr)
+ parms[4] = unsafe.Pointer(&rv)
+ parms[5] = unsafe.Pointer(&rc)
+ parms[6] = unsafe.Pointer(&rn)
+ bpxcall(parms[:], BPX4LCR)
+ return rv, rc, rn
+}
+
+func BpxFchattr(fd int32, attr *Bpxyatt_t) (rv int32, rc int32, rn int32) {
+ attr_sz := uint32(unsafe.Sizeof(*attr))
+ var parms [6]unsafe.Pointer
+ parms[0] = unsafe.Pointer(&fd)
+ parms[1] = unsafe.Pointer(&attr_sz)
+ parms[2] = unsafe.Pointer(attr)
+ parms[3] = unsafe.Pointer(&rv)
+ parms[4] = unsafe.Pointer(&rc)
+ parms[5] = unsafe.Pointer(&rn)
+ bpxcall(parms[:], BPX4FCR)
+ return rv, rc, rn
+}
+
+func BpxCondTimedWait(sec uint32, nsec uint32, events uint32, secrem *uint32, nsecrem *uint32) (rv int32, rc int32, rn int32) {
+ var parms [8]unsafe.Pointer
+ parms[0] = unsafe.Pointer(&sec)
+ parms[1] = unsafe.Pointer(&nsec)
+ parms[2] = unsafe.Pointer(&events)
+ parms[3] = unsafe.Pointer(secrem)
+ parms[4] = unsafe.Pointer(nsecrem)
+ parms[5] = unsafe.Pointer(&rv)
+ parms[6] = unsafe.Pointer(&rc)
+ parms[7] = unsafe.Pointer(&rn)
+ bpxcall(parms[:], BPX4CTW)
+ return rv, rc, rn
+}
+func BpxGetthent(in *Pgtha, outlen *uint32, out unsafe.Pointer) (rv int32, rc int32, rn int32) {
+ var parms [7]unsafe.Pointer
+ inlen := uint32(26) // nothing else will work. Go says Pgtha is 28-byte because of alignment, but Pgtha is "packed" and must be 26-byte
+ parms[0] = unsafe.Pointer(&inlen)
+ parms[1] = unsafe.Pointer(&in)
+ parms[2] = unsafe.Pointer(outlen)
+ parms[3] = unsafe.Pointer(&out)
+ parms[4] = unsafe.Pointer(&rv)
+ parms[5] = unsafe.Pointer(&rc)
+ parms[6] = unsafe.Pointer(&rn)
+ bpxcall(parms[:], BPX4GTH)
+ return rv, rc, rn
+}
+func ZosJobname() (jobname string, err error) {
+ var pgtha Pgtha
+ pgtha.Pid = uint32(Getpid())
+ pgtha.Accesspid = PGTHA_CURRENT
+ pgtha.Flag1 = PGTHA_PROCESS
+ var out [256]byte
+ var outlen uint32
+ outlen = 256
+ rv, rc, rn := BpxGetthent(&pgtha, &outlen, unsafe.Pointer(&out[0]))
+ if rv == 0 {
+ gthc := []byte{0x87, 0xa3, 0x88, 0x83} // 'gthc' in ebcdic
+ ix := bytes.Index(out[:], gthc)
+ if ix == -1 {
+ err = fmt.Errorf("BPX4GTH: gthc return data not found")
+ return
+ }
+ jn := out[ix+80 : ix+88] // we didn't declare Pgthc, but jobname is 8-byte at offset 80
+ E2a(jn)
+ jobname = string(bytes.TrimRight(jn, " "))
+
+ } else {
+ err = fmt.Errorf("BPX4GTH: rc=%d errno=%d reason=code=0x%x", rv, rc, rn)
+ }
+ return
+}
+func Bpx4ptq(code int32, data string) (rv int32, rc int32, rn int32) {
+ var userdata [8]byte
+ var parms [5]unsafe.Pointer
+ copy(userdata[:], data+" ")
+ A2e(userdata[:])
+ parms[0] = unsafe.Pointer(&code)
+ parms[1] = unsafe.Pointer(&userdata[0])
+ parms[2] = unsafe.Pointer(&rv)
+ parms[3] = unsafe.Pointer(&rc)
+ parms[4] = unsafe.Pointer(&rn)
+ bpxcall(parms[:], BPX4PTQ)
+ return rv, rc, rn
+}
+
+const (
+ PT_TRACE_ME = 0 // Debug this process
+ PT_READ_I = 1 // Read a full word
+ PT_READ_D = 2 // Read a full word
+ PT_READ_U = 3 // Read control info
+ PT_WRITE_I = 4 //Write a full word
+ PT_WRITE_D = 5 //Write a full word
+ PT_CONTINUE = 7 //Continue the process
+ PT_KILL = 8 //Terminate the process
+ PT_READ_GPR = 11 // Read GPR, CR, PSW
+ PT_READ_FPR = 12 // Read FPR
+ PT_READ_VR = 13 // Read VR
+ PT_WRITE_GPR = 14 // Write GPR, CR, PSW
+ PT_WRITE_FPR = 15 // Write FPR
+ PT_WRITE_VR = 16 // Write VR
+ PT_READ_BLOCK = 17 // Read storage
+ PT_WRITE_BLOCK = 19 // Write storage
+ PT_READ_GPRH = 20 // Read GPRH
+ PT_WRITE_GPRH = 21 // Write GPRH
+ PT_REGHSET = 22 // Read all GPRHs
+ PT_ATTACH = 30 // Attach to a process
+ PT_DETACH = 31 // Detach from a process
+ PT_REGSET = 32 // Read all GPRs
+ PT_REATTACH = 33 // Reattach to a process
+ PT_LDINFO = 34 // Read loader info
+ PT_MULTI = 35 // Multi process mode
+ PT_LD64INFO = 36 // RMODE64 Info Area
+ PT_BLOCKREQ = 40 // Block request
+ PT_THREAD_INFO = 60 // Read thread info
+ PT_THREAD_MODIFY = 61
+ PT_THREAD_READ_FOCUS = 62
+ PT_THREAD_WRITE_FOCUS = 63
+ PT_THREAD_HOLD = 64
+ PT_THREAD_SIGNAL = 65
+ PT_EXPLAIN = 66
+ PT_EVENTS = 67
+ PT_THREAD_INFO_EXTENDED = 68
+ PT_REATTACH2 = 71
+ PT_CAPTURE = 72
+ PT_UNCAPTURE = 73
+ PT_GET_THREAD_TCB = 74
+ PT_GET_ALET = 75
+ PT_SWAPIN = 76
+ PT_EXTENDED_EVENT = 98
+ PT_RECOVER = 99 // Debug a program check
+ PT_GPR0 = 0 // General purpose register 0
+ PT_GPR1 = 1 // General purpose register 1
+ PT_GPR2 = 2 // General purpose register 2
+ PT_GPR3 = 3 // General purpose register 3
+ PT_GPR4 = 4 // General purpose register 4
+ PT_GPR5 = 5 // General purpose register 5
+ PT_GPR6 = 6 // General purpose register 6
+ PT_GPR7 = 7 // General purpose register 7
+ PT_GPR8 = 8 // General purpose register 8
+ PT_GPR9 = 9 // General purpose register 9
+ PT_GPR10 = 10 // General purpose register 10
+ PT_GPR11 = 11 // General purpose register 11
+ PT_GPR12 = 12 // General purpose register 12
+ PT_GPR13 = 13 // General purpose register 13
+ PT_GPR14 = 14 // General purpose register 14
+ PT_GPR15 = 15 // General purpose register 15
+ PT_FPR0 = 16 // Floating point register 0
+ PT_FPR1 = 17 // Floating point register 1
+ PT_FPR2 = 18 // Floating point register 2
+ PT_FPR3 = 19 // Floating point register 3
+ PT_FPR4 = 20 // Floating point register 4
+ PT_FPR5 = 21 // Floating point register 5
+ PT_FPR6 = 22 // Floating point register 6
+ PT_FPR7 = 23 // Floating point register 7
+ PT_FPR8 = 24 // Floating point register 8
+ PT_FPR9 = 25 // Floating point register 9
+ PT_FPR10 = 26 // Floating point register 10
+ PT_FPR11 = 27 // Floating point register 11
+ PT_FPR12 = 28 // Floating point register 12
+ PT_FPR13 = 29 // Floating point register 13
+ PT_FPR14 = 30 // Floating point register 14
+ PT_FPR15 = 31 // Floating point register 15
+ PT_FPC = 32 // Floating point control register
+ PT_PSW = 40 // PSW
+ PT_PSW0 = 40 // Left half of the PSW
+ PT_PSW1 = 41 // Right half of the PSW
+ PT_CR0 = 42 // Control register 0
+ PT_CR1 = 43 // Control register 1
+ PT_CR2 = 44 // Control register 2
+ PT_CR3 = 45 // Control register 3
+ PT_CR4 = 46 // Control register 4
+ PT_CR5 = 47 // Control register 5
+ PT_CR6 = 48 // Control register 6
+ PT_CR7 = 49 // Control register 7
+ PT_CR8 = 50 // Control register 8
+ PT_CR9 = 51 // Control register 9
+ PT_CR10 = 52 // Control register 10
+ PT_CR11 = 53 // Control register 11
+ PT_CR12 = 54 // Control register 12
+ PT_CR13 = 55 // Control register 13
+ PT_CR14 = 56 // Control register 14
+ PT_CR15 = 57 // Control register 15
+ PT_GPRH0 = 58 // GP High register 0
+ PT_GPRH1 = 59 // GP High register 1
+ PT_GPRH2 = 60 // GP High register 2
+ PT_GPRH3 = 61 // GP High register 3
+ PT_GPRH4 = 62 // GP High register 4
+ PT_GPRH5 = 63 // GP High register 5
+ PT_GPRH6 = 64 // GP High register 6
+ PT_GPRH7 = 65 // GP High register 7
+ PT_GPRH8 = 66 // GP High register 8
+ PT_GPRH9 = 67 // GP High register 9
+ PT_GPRH10 = 68 // GP High register 10
+ PT_GPRH11 = 69 // GP High register 11
+ PT_GPRH12 = 70 // GP High register 12
+ PT_GPRH13 = 71 // GP High register 13
+ PT_GPRH14 = 72 // GP High register 14
+ PT_GPRH15 = 73 // GP High register 15
+ PT_VR0 = 74 // Vector register 0
+ PT_VR1 = 75 // Vector register 1
+ PT_VR2 = 76 // Vector register 2
+ PT_VR3 = 77 // Vector register 3
+ PT_VR4 = 78 // Vector register 4
+ PT_VR5 = 79 // Vector register 5
+ PT_VR6 = 80 // Vector register 6
+ PT_VR7 = 81 // Vector register 7
+ PT_VR8 = 82 // Vector register 8
+ PT_VR9 = 83 // Vector register 9
+ PT_VR10 = 84 // Vector register 10
+ PT_VR11 = 85 // Vector register 11
+ PT_VR12 = 86 // Vector register 12
+ PT_VR13 = 87 // Vector register 13
+ PT_VR14 = 88 // Vector register 14
+ PT_VR15 = 89 // Vector register 15
+ PT_VR16 = 90 // Vector register 16
+ PT_VR17 = 91 // Vector register 17
+ PT_VR18 = 92 // Vector register 18
+ PT_VR19 = 93 // Vector register 19
+ PT_VR20 = 94 // Vector register 20
+ PT_VR21 = 95 // Vector register 21
+ PT_VR22 = 96 // Vector register 22
+ PT_VR23 = 97 // Vector register 23
+ PT_VR24 = 98 // Vector register 24
+ PT_VR25 = 99 // Vector register 25
+ PT_VR26 = 100 // Vector register 26
+ PT_VR27 = 101 // Vector register 27
+ PT_VR28 = 102 // Vector register 28
+ PT_VR29 = 103 // Vector register 29
+ PT_VR30 = 104 // Vector register 30
+ PT_VR31 = 105 // Vector register 31
+ PT_PSWG = 106 // PSWG
+ PT_PSWG0 = 106 // Bytes 0-3
+ PT_PSWG1 = 107 // Bytes 4-7
+ PT_PSWG2 = 108 // Bytes 8-11 (IA high word)
+ PT_PSWG3 = 109 // Bytes 12-15 (IA low word)
+)
+
+func Bpx4ptr(request int32, pid int32, addr unsafe.Pointer, data unsafe.Pointer, buffer unsafe.Pointer) (rv int32, rc int32, rn int32) {
+ var parms [8]unsafe.Pointer
+ parms[0] = unsafe.Pointer(&request)
+ parms[1] = unsafe.Pointer(&pid)
+ parms[2] = unsafe.Pointer(&addr)
+ parms[3] = unsafe.Pointer(&data)
+ parms[4] = unsafe.Pointer(&buffer)
+ parms[5] = unsafe.Pointer(&rv)
+ parms[6] = unsafe.Pointer(&rc)
+ parms[7] = unsafe.Pointer(&rn)
+ bpxcall(parms[:], BPX4PTR)
+ return rv, rc, rn
+}
+
+func copyU8(val uint8, dest []uint8) int {
+ if len(dest) < 1 {
+ return 0
+ }
+ dest[0] = val
+ return 1
+}
+
+func copyU8Arr(src, dest []uint8) int {
+ if len(dest) < len(src) {
+ return 0
+ }
+ for i, v := range src {
+ dest[i] = v
+ }
+ return len(src)
+}
+
+func copyU16(val uint16, dest []uint16) int {
+ if len(dest) < 1 {
+ return 0
+ }
+ dest[0] = val
+ return 1
+}
+
+func copyU32(val uint32, dest []uint32) int {
+ if len(dest) < 1 {
+ return 0
+ }
+ dest[0] = val
+ return 1
+}
+
+func copyU32Arr(src, dest []uint32) int {
+ if len(dest) < len(src) {
+ return 0
+ }
+ for i, v := range src {
+ dest[i] = v
+ }
+ return len(src)
+}
+
+func copyU64(val uint64, dest []uint64) int {
+ if len(dest) < 1 {
+ return 0
+ }
+ dest[0] = val
+ return 1
+}
diff --git a/integration/assets/hydrabroker/vendor/golang.org/x/sys/unix/bpxsvc_zos.s b/integration/assets/hydrabroker/vendor/golang.org/x/sys/unix/bpxsvc_zos.s
new file mode 100644
index 00000000000..4bd4a179821
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/golang.org/x/sys/unix/bpxsvc_zos.s
@@ -0,0 +1,192 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "go_asm.h"
+#include "textflag.h"
+
+// function to call USS assembly language services
+//
+// doc: https://www.ibm.com/support/knowledgecenter/en/SSLTBW_3.1.0/com.ibm.zos.v3r1.bpxb100/bit64env.htm
+//
+// arg1 unsafe.Pointer array that ressembles an OS PLIST
+//
+// arg2 function offset as in
+// doc: https://www.ibm.com/support/knowledgecenter/en/SSLTBW_3.1.0/com.ibm.zos.v3r1.bpxb100/bpx2cr_List_of_offsets.htm
+//
+// func bpxcall(plist []unsafe.Pointer, bpx_offset int64)
+
+TEXT ·bpxcall(SB), NOSPLIT|NOFRAME, $0
+ MOVD plist_base+0(FP), R1 // r1 points to plist
+ MOVD bpx_offset+24(FP), R2 // r2 offset to BPX vector table
+ MOVD R14, R7 // save r14
+ MOVD R15, R8 // save r15
+ MOVWZ 16(R0), R9
+ MOVWZ 544(R9), R9
+ MOVWZ 24(R9), R9 // call vector in r9
+ ADD R2, R9 // add offset to vector table
+ MOVWZ (R9), R9 // r9 points to entry point
+ BYTE $0x0D // BL R14,R9 --> basr r14,r9
+ BYTE $0xE9 // clobbers 0,1,14,15
+ MOVD R8, R15 // restore 15
+ JMP R7 // return via saved return address
+
+// func A2e(arr [] byte)
+// code page conversion from 819 to 1047
+TEXT ·A2e(SB), NOSPLIT|NOFRAME, $0
+ MOVD arg_base+0(FP), R2 // pointer to arry of characters
+ MOVD arg_len+8(FP), R3 // count
+ XOR R0, R0
+ XOR R1, R1
+ BYTE $0xA7; BYTE $0x15; BYTE $0x00; BYTE $0x82 // BRAS 1,(2+(256/2))
+
+ // ASCII -> EBCDIC conversion table:
+ BYTE $0x00; BYTE $0x01; BYTE $0x02; BYTE $0x03
+ BYTE $0x37; BYTE $0x2d; BYTE $0x2e; BYTE $0x2f
+ BYTE $0x16; BYTE $0x05; BYTE $0x15; BYTE $0x0b
+ BYTE $0x0c; BYTE $0x0d; BYTE $0x0e; BYTE $0x0f
+ BYTE $0x10; BYTE $0x11; BYTE $0x12; BYTE $0x13
+ BYTE $0x3c; BYTE $0x3d; BYTE $0x32; BYTE $0x26
+ BYTE $0x18; BYTE $0x19; BYTE $0x3f; BYTE $0x27
+ BYTE $0x1c; BYTE $0x1d; BYTE $0x1e; BYTE $0x1f
+ BYTE $0x40; BYTE $0x5a; BYTE $0x7f; BYTE $0x7b
+ BYTE $0x5b; BYTE $0x6c; BYTE $0x50; BYTE $0x7d
+ BYTE $0x4d; BYTE $0x5d; BYTE $0x5c; BYTE $0x4e
+ BYTE $0x6b; BYTE $0x60; BYTE $0x4b; BYTE $0x61
+ BYTE $0xf0; BYTE $0xf1; BYTE $0xf2; BYTE $0xf3
+ BYTE $0xf4; BYTE $0xf5; BYTE $0xf6; BYTE $0xf7
+ BYTE $0xf8; BYTE $0xf9; BYTE $0x7a; BYTE $0x5e
+ BYTE $0x4c; BYTE $0x7e; BYTE $0x6e; BYTE $0x6f
+ BYTE $0x7c; BYTE $0xc1; BYTE $0xc2; BYTE $0xc3
+ BYTE $0xc4; BYTE $0xc5; BYTE $0xc6; BYTE $0xc7
+ BYTE $0xc8; BYTE $0xc9; BYTE $0xd1; BYTE $0xd2
+ BYTE $0xd3; BYTE $0xd4; BYTE $0xd5; BYTE $0xd6
+ BYTE $0xd7; BYTE $0xd8; BYTE $0xd9; BYTE $0xe2
+ BYTE $0xe3; BYTE $0xe4; BYTE $0xe5; BYTE $0xe6
+ BYTE $0xe7; BYTE $0xe8; BYTE $0xe9; BYTE $0xad
+ BYTE $0xe0; BYTE $0xbd; BYTE $0x5f; BYTE $0x6d
+ BYTE $0x79; BYTE $0x81; BYTE $0x82; BYTE $0x83
+ BYTE $0x84; BYTE $0x85; BYTE $0x86; BYTE $0x87
+ BYTE $0x88; BYTE $0x89; BYTE $0x91; BYTE $0x92
+ BYTE $0x93; BYTE $0x94; BYTE $0x95; BYTE $0x96
+ BYTE $0x97; BYTE $0x98; BYTE $0x99; BYTE $0xa2
+ BYTE $0xa3; BYTE $0xa4; BYTE $0xa5; BYTE $0xa6
+ BYTE $0xa7; BYTE $0xa8; BYTE $0xa9; BYTE $0xc0
+ BYTE $0x4f; BYTE $0xd0; BYTE $0xa1; BYTE $0x07
+ BYTE $0x20; BYTE $0x21; BYTE $0x22; BYTE $0x23
+ BYTE $0x24; BYTE $0x25; BYTE $0x06; BYTE $0x17
+ BYTE $0x28; BYTE $0x29; BYTE $0x2a; BYTE $0x2b
+ BYTE $0x2c; BYTE $0x09; BYTE $0x0a; BYTE $0x1b
+ BYTE $0x30; BYTE $0x31; BYTE $0x1a; BYTE $0x33
+ BYTE $0x34; BYTE $0x35; BYTE $0x36; BYTE $0x08
+ BYTE $0x38; BYTE $0x39; BYTE $0x3a; BYTE $0x3b
+ BYTE $0x04; BYTE $0x14; BYTE $0x3e; BYTE $0xff
+ BYTE $0x41; BYTE $0xaa; BYTE $0x4a; BYTE $0xb1
+ BYTE $0x9f; BYTE $0xb2; BYTE $0x6a; BYTE $0xb5
+ BYTE $0xbb; BYTE $0xb4; BYTE $0x9a; BYTE $0x8a
+ BYTE $0xb0; BYTE $0xca; BYTE $0xaf; BYTE $0xbc
+ BYTE $0x90; BYTE $0x8f; BYTE $0xea; BYTE $0xfa
+ BYTE $0xbe; BYTE $0xa0; BYTE $0xb6; BYTE $0xb3
+ BYTE $0x9d; BYTE $0xda; BYTE $0x9b; BYTE $0x8b
+ BYTE $0xb7; BYTE $0xb8; BYTE $0xb9; BYTE $0xab
+ BYTE $0x64; BYTE $0x65; BYTE $0x62; BYTE $0x66
+ BYTE $0x63; BYTE $0x67; BYTE $0x9e; BYTE $0x68
+ BYTE $0x74; BYTE $0x71; BYTE $0x72; BYTE $0x73
+ BYTE $0x78; BYTE $0x75; BYTE $0x76; BYTE $0x77
+ BYTE $0xac; BYTE $0x69; BYTE $0xed; BYTE $0xee
+ BYTE $0xeb; BYTE $0xef; BYTE $0xec; BYTE $0xbf
+ BYTE $0x80; BYTE $0xfd; BYTE $0xfe; BYTE $0xfb
+ BYTE $0xfc; BYTE $0xba; BYTE $0xae; BYTE $0x59
+ BYTE $0x44; BYTE $0x45; BYTE $0x42; BYTE $0x46
+ BYTE $0x43; BYTE $0x47; BYTE $0x9c; BYTE $0x48
+ BYTE $0x54; BYTE $0x51; BYTE $0x52; BYTE $0x53
+ BYTE $0x58; BYTE $0x55; BYTE $0x56; BYTE $0x57
+ BYTE $0x8c; BYTE $0x49; BYTE $0xcd; BYTE $0xce
+ BYTE $0xcb; BYTE $0xcf; BYTE $0xcc; BYTE $0xe1
+ BYTE $0x70; BYTE $0xdd; BYTE $0xde; BYTE $0xdb
+ BYTE $0xdc; BYTE $0x8d; BYTE $0x8e; BYTE $0xdf
+
+retry:
+ WORD $0xB9931022 // TROO 2,2,b'0001'
+ BVS retry
+ RET
+
+// func e2a(arr [] byte)
+// code page conversion from 1047 to 819
+TEXT ·E2a(SB), NOSPLIT|NOFRAME, $0
+ MOVD arg_base+0(FP), R2 // pointer to arry of characters
+ MOVD arg_len+8(FP), R3 // count
+ XOR R0, R0
+ XOR R1, R1
+ BYTE $0xA7; BYTE $0x15; BYTE $0x00; BYTE $0x82 // BRAS 1,(2+(256/2))
+
+ // EBCDIC -> ASCII conversion table:
+ BYTE $0x00; BYTE $0x01; BYTE $0x02; BYTE $0x03
+ BYTE $0x9c; BYTE $0x09; BYTE $0x86; BYTE $0x7f
+ BYTE $0x97; BYTE $0x8d; BYTE $0x8e; BYTE $0x0b
+ BYTE $0x0c; BYTE $0x0d; BYTE $0x0e; BYTE $0x0f
+ BYTE $0x10; BYTE $0x11; BYTE $0x12; BYTE $0x13
+ BYTE $0x9d; BYTE $0x0a; BYTE $0x08; BYTE $0x87
+ BYTE $0x18; BYTE $0x19; BYTE $0x92; BYTE $0x8f
+ BYTE $0x1c; BYTE $0x1d; BYTE $0x1e; BYTE $0x1f
+ BYTE $0x80; BYTE $0x81; BYTE $0x82; BYTE $0x83
+ BYTE $0x84; BYTE $0x85; BYTE $0x17; BYTE $0x1b
+ BYTE $0x88; BYTE $0x89; BYTE $0x8a; BYTE $0x8b
+ BYTE $0x8c; BYTE $0x05; BYTE $0x06; BYTE $0x07
+ BYTE $0x90; BYTE $0x91; BYTE $0x16; BYTE $0x93
+ BYTE $0x94; BYTE $0x95; BYTE $0x96; BYTE $0x04
+ BYTE $0x98; BYTE $0x99; BYTE $0x9a; BYTE $0x9b
+ BYTE $0x14; BYTE $0x15; BYTE $0x9e; BYTE $0x1a
+ BYTE $0x20; BYTE $0xa0; BYTE $0xe2; BYTE $0xe4
+ BYTE $0xe0; BYTE $0xe1; BYTE $0xe3; BYTE $0xe5
+ BYTE $0xe7; BYTE $0xf1; BYTE $0xa2; BYTE $0x2e
+ BYTE $0x3c; BYTE $0x28; BYTE $0x2b; BYTE $0x7c
+ BYTE $0x26; BYTE $0xe9; BYTE $0xea; BYTE $0xeb
+ BYTE $0xe8; BYTE $0xed; BYTE $0xee; BYTE $0xef
+ BYTE $0xec; BYTE $0xdf; BYTE $0x21; BYTE $0x24
+ BYTE $0x2a; BYTE $0x29; BYTE $0x3b; BYTE $0x5e
+ BYTE $0x2d; BYTE $0x2f; BYTE $0xc2; BYTE $0xc4
+ BYTE $0xc0; BYTE $0xc1; BYTE $0xc3; BYTE $0xc5
+ BYTE $0xc7; BYTE $0xd1; BYTE $0xa6; BYTE $0x2c
+ BYTE $0x25; BYTE $0x5f; BYTE $0x3e; BYTE $0x3f
+ BYTE $0xf8; BYTE $0xc9; BYTE $0xca; BYTE $0xcb
+ BYTE $0xc8; BYTE $0xcd; BYTE $0xce; BYTE $0xcf
+ BYTE $0xcc; BYTE $0x60; BYTE $0x3a; BYTE $0x23
+ BYTE $0x40; BYTE $0x27; BYTE $0x3d; BYTE $0x22
+ BYTE $0xd8; BYTE $0x61; BYTE $0x62; BYTE $0x63
+ BYTE $0x64; BYTE $0x65; BYTE $0x66; BYTE $0x67
+ BYTE $0x68; BYTE $0x69; BYTE $0xab; BYTE $0xbb
+ BYTE $0xf0; BYTE $0xfd; BYTE $0xfe; BYTE $0xb1
+ BYTE $0xb0; BYTE $0x6a; BYTE $0x6b; BYTE $0x6c
+ BYTE $0x6d; BYTE $0x6e; BYTE $0x6f; BYTE $0x70
+ BYTE $0x71; BYTE $0x72; BYTE $0xaa; BYTE $0xba
+ BYTE $0xe6; BYTE $0xb8; BYTE $0xc6; BYTE $0xa4
+ BYTE $0xb5; BYTE $0x7e; BYTE $0x73; BYTE $0x74
+ BYTE $0x75; BYTE $0x76; BYTE $0x77; BYTE $0x78
+ BYTE $0x79; BYTE $0x7a; BYTE $0xa1; BYTE $0xbf
+ BYTE $0xd0; BYTE $0x5b; BYTE $0xde; BYTE $0xae
+ BYTE $0xac; BYTE $0xa3; BYTE $0xa5; BYTE $0xb7
+ BYTE $0xa9; BYTE $0xa7; BYTE $0xb6; BYTE $0xbc
+ BYTE $0xbd; BYTE $0xbe; BYTE $0xdd; BYTE $0xa8
+ BYTE $0xaf; BYTE $0x5d; BYTE $0xb4; BYTE $0xd7
+ BYTE $0x7b; BYTE $0x41; BYTE $0x42; BYTE $0x43
+ BYTE $0x44; BYTE $0x45; BYTE $0x46; BYTE $0x47
+ BYTE $0x48; BYTE $0x49; BYTE $0xad; BYTE $0xf4
+ BYTE $0xf6; BYTE $0xf2; BYTE $0xf3; BYTE $0xf5
+ BYTE $0x7d; BYTE $0x4a; BYTE $0x4b; BYTE $0x4c
+ BYTE $0x4d; BYTE $0x4e; BYTE $0x4f; BYTE $0x50
+ BYTE $0x51; BYTE $0x52; BYTE $0xb9; BYTE $0xfb
+ BYTE $0xfc; BYTE $0xf9; BYTE $0xfa; BYTE $0xff
+ BYTE $0x5c; BYTE $0xf7; BYTE $0x53; BYTE $0x54
+ BYTE $0x55; BYTE $0x56; BYTE $0x57; BYTE $0x58
+ BYTE $0x59; BYTE $0x5a; BYTE $0xb2; BYTE $0xd4
+ BYTE $0xd6; BYTE $0xd2; BYTE $0xd3; BYTE $0xd5
+ BYTE $0x30; BYTE $0x31; BYTE $0x32; BYTE $0x33
+ BYTE $0x34; BYTE $0x35; BYTE $0x36; BYTE $0x37
+ BYTE $0x38; BYTE $0x39; BYTE $0xb3; BYTE $0xdb
+ BYTE $0xdc; BYTE $0xd9; BYTE $0xda; BYTE $0x9f
+
+retry:
+ WORD $0xB9931022 // TROO 2,2,b'0001'
+ BVS retry
+ RET
diff --git a/integration/assets/hydrabroker/vendor/golang.org/x/sys/unix/cap_freebsd.go b/integration/assets/hydrabroker/vendor/golang.org/x/sys/unix/cap_freebsd.go
index 0b7c6adb866..a08657890f3 100644
--- a/integration/assets/hydrabroker/vendor/golang.org/x/sys/unix/cap_freebsd.go
+++ b/integration/assets/hydrabroker/vendor/golang.org/x/sys/unix/cap_freebsd.go
@@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
//go:build freebsd
-// +build freebsd
package unix
diff --git a/integration/assets/hydrabroker/vendor/golang.org/x/sys/unix/constants.go b/integration/assets/hydrabroker/vendor/golang.org/x/sys/unix/constants.go
index c0cd2ff9041..6fb7cb77d0a 100644
--- a/integration/assets/hydrabroker/vendor/golang.org/x/sys/unix/constants.go
+++ b/integration/assets/hydrabroker/vendor/golang.org/x/sys/unix/constants.go
@@ -2,8 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris
-// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris
+//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos
package unix
diff --git a/integration/assets/hydrabroker/vendor/golang.org/x/sys/unix/dev_aix_ppc.go b/integration/assets/hydrabroker/vendor/golang.org/x/sys/unix/dev_aix_ppc.go
index 65a998508db..d7851346177 100644
--- a/integration/assets/hydrabroker/vendor/golang.org/x/sys/unix/dev_aix_ppc.go
+++ b/integration/assets/hydrabroker/vendor/golang.org/x/sys/unix/dev_aix_ppc.go
@@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
//go:build aix && ppc
-// +build aix,ppc
// Functions to access/create device major and minor numbers matching the
// encoding used by AIX.
diff --git a/integration/assets/hydrabroker/vendor/golang.org/x/sys/unix/dev_aix_ppc64.go b/integration/assets/hydrabroker/vendor/golang.org/x/sys/unix/dev_aix_ppc64.go
index 8fc08ad0aae..623a5e6973a 100644
--- a/integration/assets/hydrabroker/vendor/golang.org/x/sys/unix/dev_aix_ppc64.go
+++ b/integration/assets/hydrabroker/vendor/golang.org/x/sys/unix/dev_aix_ppc64.go
@@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
//go:build aix && ppc64
-// +build aix,ppc64
// Functions to access/create device major and minor numbers matching the
// encoding used AIX.
diff --git a/integration/assets/hydrabroker/vendor/golang.org/x/sys/unix/dev_zos.go b/integration/assets/hydrabroker/vendor/golang.org/x/sys/unix/dev_zos.go
new file mode 100644
index 00000000000..bb6a64fe92d
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/golang.org/x/sys/unix/dev_zos.go
@@ -0,0 +1,28 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build zos && s390x
+
+// Functions to access/create device major and minor numbers matching the
+// encoding used by z/OS.
+//
+// The information below is extracted and adapted from macros.
+
+package unix
+
+// Major returns the major component of a z/OS device number.
+func Major(dev uint64) uint32 {
+ return uint32((dev >> 16) & 0x0000FFFF)
+}
+
+// Minor returns the minor component of a z/OS device number.
+func Minor(dev uint64) uint32 {
+ return uint32(dev & 0x0000FFFF)
+}
+
+// Mkdev returns a z/OS device number generated from the given major and minor
+// components.
+func Mkdev(major, minor uint32) uint64 {
+ return (uint64(major) << 16) | uint64(minor)
+}
diff --git a/integration/assets/hydrabroker/vendor/golang.org/x/sys/unix/dirent.go b/integration/assets/hydrabroker/vendor/golang.org/x/sys/unix/dirent.go
index 3017473779c..1ebf1178269 100644
--- a/integration/assets/hydrabroker/vendor/golang.org/x/sys/unix/dirent.go
+++ b/integration/assets/hydrabroker/vendor/golang.org/x/sys/unix/dirent.go
@@ -2,8 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-//go:build aix || darwin || dragonfly || freebsd || linux || nacl || netbsd || openbsd || solaris
-// +build aix darwin dragonfly freebsd linux nacl netbsd openbsd solaris
+//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos
package unix
diff --git a/integration/assets/hydrabroker/vendor/golang.org/x/sys/unix/endian_big.go b/integration/assets/hydrabroker/vendor/golang.org/x/sys/unix/endian_big.go
index 79806db9a41..1095fd31d68 100644
--- a/integration/assets/hydrabroker/vendor/golang.org/x/sys/unix/endian_big.go
+++ b/integration/assets/hydrabroker/vendor/golang.org/x/sys/unix/endian_big.go
@@ -2,8 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//
-//go:build ppc64 || s390x || mips || mips64
-// +build ppc64 s390x mips mips64
+//go:build armbe || arm64be || m68k || mips || mips64 || mips64p32 || ppc || ppc64 || s390 || s390x || shbe || sparc || sparc64
package unix
diff --git a/integration/assets/hydrabroker/vendor/golang.org/x/sys/unix/endian_little.go b/integration/assets/hydrabroker/vendor/golang.org/x/sys/unix/endian_little.go
index d562bd36c77..b9f0e277b14 100644
--- a/integration/assets/hydrabroker/vendor/golang.org/x/sys/unix/endian_little.go
+++ b/integration/assets/hydrabroker/vendor/golang.org/x/sys/unix/endian_little.go
@@ -2,8 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//
-//go:build 386 || amd64 || amd64p32 || arm || arm64 || ppc64le || mipsle || mips64le
-// +build 386 amd64 amd64p32 arm arm64 ppc64le mipsle mips64le
+//go:build 386 || amd64 || amd64p32 || alpha || arm || arm64 || loong64 || mipsle || mips64le || mips64p32le || nios2 || ppc64le || riscv || riscv64 || sh
package unix
diff --git a/integration/assets/hydrabroker/vendor/golang.org/x/sys/unix/env_unix.go b/integration/assets/hydrabroker/vendor/golang.org/x/sys/unix/env_unix.go
index cc210c7ea6b..a96da71f473 100644
--- a/integration/assets/hydrabroker/vendor/golang.org/x/sys/unix/env_unix.go
+++ b/integration/assets/hydrabroker/vendor/golang.org/x/sys/unix/env_unix.go
@@ -2,8 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris
-// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris
+//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos
// Unix environment variables.
diff --git a/integration/assets/hydrabroker/vendor/golang.org/x/sys/unix/errors_freebsd_386.go b/integration/assets/hydrabroker/vendor/golang.org/x/sys/unix/errors_freebsd_386.go
deleted file mode 100644
index c56bc8b05e0..00000000000
--- a/integration/assets/hydrabroker/vendor/golang.org/x/sys/unix/errors_freebsd_386.go
+++ /dev/null
@@ -1,227 +0,0 @@
-// Copyright 2017 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Constants that were deprecated or moved to enums in the FreeBSD headers. Keep
-// them here for backwards compatibility.
-
-package unix
-
-const (
- IFF_SMART = 0x20
- IFT_1822 = 0x2
- IFT_A12MPPSWITCH = 0x82
- IFT_AAL2 = 0xbb
- IFT_AAL5 = 0x31
- IFT_ADSL = 0x5e
- IFT_AFLANE8023 = 0x3b
- IFT_AFLANE8025 = 0x3c
- IFT_ARAP = 0x58
- IFT_ARCNET = 0x23
- IFT_ARCNETPLUS = 0x24
- IFT_ASYNC = 0x54
- IFT_ATM = 0x25
- IFT_ATMDXI = 0x69
- IFT_ATMFUNI = 0x6a
- IFT_ATMIMA = 0x6b
- IFT_ATMLOGICAL = 0x50
- IFT_ATMRADIO = 0xbd
- IFT_ATMSUBINTERFACE = 0x86
- IFT_ATMVCIENDPT = 0xc2
- IFT_ATMVIRTUAL = 0x95
- IFT_BGPPOLICYACCOUNTING = 0xa2
- IFT_BSC = 0x53
- IFT_CCTEMUL = 0x3d
- IFT_CEPT = 0x13
- IFT_CES = 0x85
- IFT_CHANNEL = 0x46
- IFT_CNR = 0x55
- IFT_COFFEE = 0x84
- IFT_COMPOSITELINK = 0x9b
- IFT_DCN = 0x8d
- IFT_DIGITALPOWERLINE = 0x8a
- IFT_DIGITALWRAPPEROVERHEADCHANNEL = 0xba
- IFT_DLSW = 0x4a
- IFT_DOCSCABLEDOWNSTREAM = 0x80
- IFT_DOCSCABLEMACLAYER = 0x7f
- IFT_DOCSCABLEUPSTREAM = 0x81
- IFT_DS0 = 0x51
- IFT_DS0BUNDLE = 0x52
- IFT_DS1FDL = 0xaa
- IFT_DS3 = 0x1e
- IFT_DTM = 0x8c
- IFT_DVBASILN = 0xac
- IFT_DVBASIOUT = 0xad
- IFT_DVBRCCDOWNSTREAM = 0x93
- IFT_DVBRCCMACLAYER = 0x92
- IFT_DVBRCCUPSTREAM = 0x94
- IFT_ENC = 0xf4
- IFT_EON = 0x19
- IFT_EPLRS = 0x57
- IFT_ESCON = 0x49
- IFT_ETHER = 0x6
- IFT_FAITH = 0xf2
- IFT_FAST = 0x7d
- IFT_FASTETHER = 0x3e
- IFT_FASTETHERFX = 0x45
- IFT_FDDI = 0xf
- IFT_FIBRECHANNEL = 0x38
- IFT_FRAMERELAYINTERCONNECT = 0x3a
- IFT_FRAMERELAYMPI = 0x5c
- IFT_FRDLCIENDPT = 0xc1
- IFT_FRELAY = 0x20
- IFT_FRELAYDCE = 0x2c
- IFT_FRF16MFRBUNDLE = 0xa3
- IFT_FRFORWARD = 0x9e
- IFT_G703AT2MB = 0x43
- IFT_G703AT64K = 0x42
- IFT_GIF = 0xf0
- IFT_GIGABITETHERNET = 0x75
- IFT_GR303IDT = 0xb2
- IFT_GR303RDT = 0xb1
- IFT_H323GATEKEEPER = 0xa4
- IFT_H323PROXY = 0xa5
- IFT_HDH1822 = 0x3
- IFT_HDLC = 0x76
- IFT_HDSL2 = 0xa8
- IFT_HIPERLAN2 = 0xb7
- IFT_HIPPI = 0x2f
- IFT_HIPPIINTERFACE = 0x39
- IFT_HOSTPAD = 0x5a
- IFT_HSSI = 0x2e
- IFT_HY = 0xe
- IFT_IBM370PARCHAN = 0x48
- IFT_IDSL = 0x9a
- IFT_IEEE80211 = 0x47
- IFT_IEEE80212 = 0x37
- IFT_IEEE8023ADLAG = 0xa1
- IFT_IFGSN = 0x91
- IFT_IMT = 0xbe
- IFT_INTERLEAVE = 0x7c
- IFT_IP = 0x7e
- IFT_IPFORWARD = 0x8e
- IFT_IPOVERATM = 0x72
- IFT_IPOVERCDLC = 0x6d
- IFT_IPOVERCLAW = 0x6e
- IFT_IPSWITCH = 0x4e
- IFT_IPXIP = 0xf9
- IFT_ISDN = 0x3f
- IFT_ISDNBASIC = 0x14
- IFT_ISDNPRIMARY = 0x15
- IFT_ISDNS = 0x4b
- IFT_ISDNU = 0x4c
- IFT_ISO88022LLC = 0x29
- IFT_ISO88023 = 0x7
- IFT_ISO88024 = 0x8
- IFT_ISO88025 = 0x9
- IFT_ISO88025CRFPINT = 0x62
- IFT_ISO88025DTR = 0x56
- IFT_ISO88025FIBER = 0x73
- IFT_ISO88026 = 0xa
- IFT_ISUP = 0xb3
- IFT_L3IPXVLAN = 0x89
- IFT_LAPB = 0x10
- IFT_LAPD = 0x4d
- IFT_LAPF = 0x77
- IFT_LOCALTALK = 0x2a
- IFT_LOOP = 0x18
- IFT_MEDIAMAILOVERIP = 0x8b
- IFT_MFSIGLINK = 0xa7
- IFT_MIOX25 = 0x26
- IFT_MODEM = 0x30
- IFT_MPC = 0x71
- IFT_MPLS = 0xa6
- IFT_MPLSTUNNEL = 0x96
- IFT_MSDSL = 0x8f
- IFT_MVL = 0xbf
- IFT_MYRINET = 0x63
- IFT_NFAS = 0xaf
- IFT_NSIP = 0x1b
- IFT_OPTICALCHANNEL = 0xc3
- IFT_OPTICALTRANSPORT = 0xc4
- IFT_OTHER = 0x1
- IFT_P10 = 0xc
- IFT_P80 = 0xd
- IFT_PARA = 0x22
- IFT_PFLOG = 0xf6
- IFT_PFSYNC = 0xf7
- IFT_PLC = 0xae
- IFT_POS = 0xab
- IFT_PPPMULTILINKBUNDLE = 0x6c
- IFT_PROPBWAP2MP = 0xb8
- IFT_PROPCNLS = 0x59
- IFT_PROPDOCSWIRELESSDOWNSTREAM = 0xb5
- IFT_PROPDOCSWIRELESSMACLAYER = 0xb4
- IFT_PROPDOCSWIRELESSUPSTREAM = 0xb6
- IFT_PROPMUX = 0x36
- IFT_PROPWIRELESSP2P = 0x9d
- IFT_PTPSERIAL = 0x16
- IFT_PVC = 0xf1
- IFT_QLLC = 0x44
- IFT_RADIOMAC = 0xbc
- IFT_RADSL = 0x5f
- IFT_REACHDSL = 0xc0
- IFT_RFC1483 = 0x9f
- IFT_RS232 = 0x21
- IFT_RSRB = 0x4f
- IFT_SDLC = 0x11
- IFT_SDSL = 0x60
- IFT_SHDSL = 0xa9
- IFT_SIP = 0x1f
- IFT_SLIP = 0x1c
- IFT_SMDSDXI = 0x2b
- IFT_SMDSICIP = 0x34
- IFT_SONET = 0x27
- IFT_SONETOVERHEADCHANNEL = 0xb9
- IFT_SONETPATH = 0x32
- IFT_SONETVT = 0x33
- IFT_SRP = 0x97
- IFT_SS7SIGLINK = 0x9c
- IFT_STACKTOSTACK = 0x6f
- IFT_STARLAN = 0xb
- IFT_STF = 0xd7
- IFT_T1 = 0x12
- IFT_TDLC = 0x74
- IFT_TERMPAD = 0x5b
- IFT_TR008 = 0xb0
- IFT_TRANSPHDLC = 0x7b
- IFT_TUNNEL = 0x83
- IFT_ULTRA = 0x1d
- IFT_USB = 0xa0
- IFT_V11 = 0x40
- IFT_V35 = 0x2d
- IFT_V36 = 0x41
- IFT_V37 = 0x78
- IFT_VDSL = 0x61
- IFT_VIRTUALIPADDRESS = 0x70
- IFT_VOICEEM = 0x64
- IFT_VOICEENCAP = 0x67
- IFT_VOICEFXO = 0x65
- IFT_VOICEFXS = 0x66
- IFT_VOICEOVERATM = 0x98
- IFT_VOICEOVERFRAMERELAY = 0x99
- IFT_VOICEOVERIP = 0x68
- IFT_X213 = 0x5d
- IFT_X25 = 0x5
- IFT_X25DDN = 0x4
- IFT_X25HUNTGROUP = 0x7a
- IFT_X25MLP = 0x79
- IFT_X25PLE = 0x28
- IFT_XETHER = 0x1a
- IPPROTO_MAXID = 0x34
- IPV6_FAITH = 0x1d
- IP_FAITH = 0x16
- MAP_NORESERVE = 0x40
- MAP_RENAME = 0x20
- NET_RT_MAXID = 0x6
- RTF_PRCLONING = 0x10000
- RTM_OLDADD = 0x9
- RTM_OLDDEL = 0xa
- SIOCADDRT = 0x8030720a
- SIOCALIFADDR = 0x8118691b
- SIOCDELRT = 0x8030720b
- SIOCDLIFADDR = 0x8118691d
- SIOCGLIFADDR = 0xc118691c
- SIOCGLIFPHYADDR = 0xc118694b
- SIOCSLIFPHYADDR = 0x8118694a
-)
diff --git a/integration/assets/hydrabroker/vendor/golang.org/x/sys/unix/errors_freebsd_amd64.go b/integration/assets/hydrabroker/vendor/golang.org/x/sys/unix/errors_freebsd_amd64.go
deleted file mode 100644
index 3e9771175ab..00000000000
--- a/integration/assets/hydrabroker/vendor/golang.org/x/sys/unix/errors_freebsd_amd64.go
+++ /dev/null
@@ -1,227 +0,0 @@
-// Copyright 2017 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Constants that were deprecated or moved to enums in the FreeBSD headers. Keep
-// them here for backwards compatibility.
-
-package unix
-
-const (
- IFF_SMART = 0x20
- IFT_1822 = 0x2
- IFT_A12MPPSWITCH = 0x82
- IFT_AAL2 = 0xbb
- IFT_AAL5 = 0x31
- IFT_ADSL = 0x5e
- IFT_AFLANE8023 = 0x3b
- IFT_AFLANE8025 = 0x3c
- IFT_ARAP = 0x58
- IFT_ARCNET = 0x23
- IFT_ARCNETPLUS = 0x24
- IFT_ASYNC = 0x54
- IFT_ATM = 0x25
- IFT_ATMDXI = 0x69
- IFT_ATMFUNI = 0x6a
- IFT_ATMIMA = 0x6b
- IFT_ATMLOGICAL = 0x50
- IFT_ATMRADIO = 0xbd
- IFT_ATMSUBINTERFACE = 0x86
- IFT_ATMVCIENDPT = 0xc2
- IFT_ATMVIRTUAL = 0x95
- IFT_BGPPOLICYACCOUNTING = 0xa2
- IFT_BSC = 0x53
- IFT_CCTEMUL = 0x3d
- IFT_CEPT = 0x13
- IFT_CES = 0x85
- IFT_CHANNEL = 0x46
- IFT_CNR = 0x55
- IFT_COFFEE = 0x84
- IFT_COMPOSITELINK = 0x9b
- IFT_DCN = 0x8d
- IFT_DIGITALPOWERLINE = 0x8a
- IFT_DIGITALWRAPPEROVERHEADCHANNEL = 0xba
- IFT_DLSW = 0x4a
- IFT_DOCSCABLEDOWNSTREAM = 0x80
- IFT_DOCSCABLEMACLAYER = 0x7f
- IFT_DOCSCABLEUPSTREAM = 0x81
- IFT_DS0 = 0x51
- IFT_DS0BUNDLE = 0x52
- IFT_DS1FDL = 0xaa
- IFT_DS3 = 0x1e
- IFT_DTM = 0x8c
- IFT_DVBASILN = 0xac
- IFT_DVBASIOUT = 0xad
- IFT_DVBRCCDOWNSTREAM = 0x93
- IFT_DVBRCCMACLAYER = 0x92
- IFT_DVBRCCUPSTREAM = 0x94
- IFT_ENC = 0xf4
- IFT_EON = 0x19
- IFT_EPLRS = 0x57
- IFT_ESCON = 0x49
- IFT_ETHER = 0x6
- IFT_FAITH = 0xf2
- IFT_FAST = 0x7d
- IFT_FASTETHER = 0x3e
- IFT_FASTETHERFX = 0x45
- IFT_FDDI = 0xf
- IFT_FIBRECHANNEL = 0x38
- IFT_FRAMERELAYINTERCONNECT = 0x3a
- IFT_FRAMERELAYMPI = 0x5c
- IFT_FRDLCIENDPT = 0xc1
- IFT_FRELAY = 0x20
- IFT_FRELAYDCE = 0x2c
- IFT_FRF16MFRBUNDLE = 0xa3
- IFT_FRFORWARD = 0x9e
- IFT_G703AT2MB = 0x43
- IFT_G703AT64K = 0x42
- IFT_GIF = 0xf0
- IFT_GIGABITETHERNET = 0x75
- IFT_GR303IDT = 0xb2
- IFT_GR303RDT = 0xb1
- IFT_H323GATEKEEPER = 0xa4
- IFT_H323PROXY = 0xa5
- IFT_HDH1822 = 0x3
- IFT_HDLC = 0x76
- IFT_HDSL2 = 0xa8
- IFT_HIPERLAN2 = 0xb7
- IFT_HIPPI = 0x2f
- IFT_HIPPIINTERFACE = 0x39
- IFT_HOSTPAD = 0x5a
- IFT_HSSI = 0x2e
- IFT_HY = 0xe
- IFT_IBM370PARCHAN = 0x48
- IFT_IDSL = 0x9a
- IFT_IEEE80211 = 0x47
- IFT_IEEE80212 = 0x37
- IFT_IEEE8023ADLAG = 0xa1
- IFT_IFGSN = 0x91
- IFT_IMT = 0xbe
- IFT_INTERLEAVE = 0x7c
- IFT_IP = 0x7e
- IFT_IPFORWARD = 0x8e
- IFT_IPOVERATM = 0x72
- IFT_IPOVERCDLC = 0x6d
- IFT_IPOVERCLAW = 0x6e
- IFT_IPSWITCH = 0x4e
- IFT_IPXIP = 0xf9
- IFT_ISDN = 0x3f
- IFT_ISDNBASIC = 0x14
- IFT_ISDNPRIMARY = 0x15
- IFT_ISDNS = 0x4b
- IFT_ISDNU = 0x4c
- IFT_ISO88022LLC = 0x29
- IFT_ISO88023 = 0x7
- IFT_ISO88024 = 0x8
- IFT_ISO88025 = 0x9
- IFT_ISO88025CRFPINT = 0x62
- IFT_ISO88025DTR = 0x56
- IFT_ISO88025FIBER = 0x73
- IFT_ISO88026 = 0xa
- IFT_ISUP = 0xb3
- IFT_L3IPXVLAN = 0x89
- IFT_LAPB = 0x10
- IFT_LAPD = 0x4d
- IFT_LAPF = 0x77
- IFT_LOCALTALK = 0x2a
- IFT_LOOP = 0x18
- IFT_MEDIAMAILOVERIP = 0x8b
- IFT_MFSIGLINK = 0xa7
- IFT_MIOX25 = 0x26
- IFT_MODEM = 0x30
- IFT_MPC = 0x71
- IFT_MPLS = 0xa6
- IFT_MPLSTUNNEL = 0x96
- IFT_MSDSL = 0x8f
- IFT_MVL = 0xbf
- IFT_MYRINET = 0x63
- IFT_NFAS = 0xaf
- IFT_NSIP = 0x1b
- IFT_OPTICALCHANNEL = 0xc3
- IFT_OPTICALTRANSPORT = 0xc4
- IFT_OTHER = 0x1
- IFT_P10 = 0xc
- IFT_P80 = 0xd
- IFT_PARA = 0x22
- IFT_PFLOG = 0xf6
- IFT_PFSYNC = 0xf7
- IFT_PLC = 0xae
- IFT_POS = 0xab
- IFT_PPPMULTILINKBUNDLE = 0x6c
- IFT_PROPBWAP2MP = 0xb8
- IFT_PROPCNLS = 0x59
- IFT_PROPDOCSWIRELESSDOWNSTREAM = 0xb5
- IFT_PROPDOCSWIRELESSMACLAYER = 0xb4
- IFT_PROPDOCSWIRELESSUPSTREAM = 0xb6
- IFT_PROPMUX = 0x36
- IFT_PROPWIRELESSP2P = 0x9d
- IFT_PTPSERIAL = 0x16
- IFT_PVC = 0xf1
- IFT_QLLC = 0x44
- IFT_RADIOMAC = 0xbc
- IFT_RADSL = 0x5f
- IFT_REACHDSL = 0xc0
- IFT_RFC1483 = 0x9f
- IFT_RS232 = 0x21
- IFT_RSRB = 0x4f
- IFT_SDLC = 0x11
- IFT_SDSL = 0x60
- IFT_SHDSL = 0xa9
- IFT_SIP = 0x1f
- IFT_SLIP = 0x1c
- IFT_SMDSDXI = 0x2b
- IFT_SMDSICIP = 0x34
- IFT_SONET = 0x27
- IFT_SONETOVERHEADCHANNEL = 0xb9
- IFT_SONETPATH = 0x32
- IFT_SONETVT = 0x33
- IFT_SRP = 0x97
- IFT_SS7SIGLINK = 0x9c
- IFT_STACKTOSTACK = 0x6f
- IFT_STARLAN = 0xb
- IFT_STF = 0xd7
- IFT_T1 = 0x12
- IFT_TDLC = 0x74
- IFT_TERMPAD = 0x5b
- IFT_TR008 = 0xb0
- IFT_TRANSPHDLC = 0x7b
- IFT_TUNNEL = 0x83
- IFT_ULTRA = 0x1d
- IFT_USB = 0xa0
- IFT_V11 = 0x40
- IFT_V35 = 0x2d
- IFT_V36 = 0x41
- IFT_V37 = 0x78
- IFT_VDSL = 0x61
- IFT_VIRTUALIPADDRESS = 0x70
- IFT_VOICEEM = 0x64
- IFT_VOICEENCAP = 0x67
- IFT_VOICEFXO = 0x65
- IFT_VOICEFXS = 0x66
- IFT_VOICEOVERATM = 0x98
- IFT_VOICEOVERFRAMERELAY = 0x99
- IFT_VOICEOVERIP = 0x68
- IFT_X213 = 0x5d
- IFT_X25 = 0x5
- IFT_X25DDN = 0x4
- IFT_X25HUNTGROUP = 0x7a
- IFT_X25MLP = 0x79
- IFT_X25PLE = 0x28
- IFT_XETHER = 0x1a
- IPPROTO_MAXID = 0x34
- IPV6_FAITH = 0x1d
- IP_FAITH = 0x16
- MAP_NORESERVE = 0x40
- MAP_RENAME = 0x20
- NET_RT_MAXID = 0x6
- RTF_PRCLONING = 0x10000
- RTM_OLDADD = 0x9
- RTM_OLDDEL = 0xa
- SIOCADDRT = 0x8040720a
- SIOCALIFADDR = 0x8118691b
- SIOCDELRT = 0x8040720b
- SIOCDLIFADDR = 0x8118691d
- SIOCGLIFADDR = 0xc118691c
- SIOCGLIFPHYADDR = 0xc118694b
- SIOCSLIFPHYADDR = 0x8118694a
-)
diff --git a/integration/assets/hydrabroker/vendor/golang.org/x/sys/unix/errors_freebsd_arm.go b/integration/assets/hydrabroker/vendor/golang.org/x/sys/unix/errors_freebsd_arm.go
deleted file mode 100644
index 856dca32543..00000000000
--- a/integration/assets/hydrabroker/vendor/golang.org/x/sys/unix/errors_freebsd_arm.go
+++ /dev/null
@@ -1,226 +0,0 @@
-// Copyright 2017 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package unix
-
-const (
- IFT_1822 = 0x2
- IFT_A12MPPSWITCH = 0x82
- IFT_AAL2 = 0xbb
- IFT_AAL5 = 0x31
- IFT_ADSL = 0x5e
- IFT_AFLANE8023 = 0x3b
- IFT_AFLANE8025 = 0x3c
- IFT_ARAP = 0x58
- IFT_ARCNET = 0x23
- IFT_ARCNETPLUS = 0x24
- IFT_ASYNC = 0x54
- IFT_ATM = 0x25
- IFT_ATMDXI = 0x69
- IFT_ATMFUNI = 0x6a
- IFT_ATMIMA = 0x6b
- IFT_ATMLOGICAL = 0x50
- IFT_ATMRADIO = 0xbd
- IFT_ATMSUBINTERFACE = 0x86
- IFT_ATMVCIENDPT = 0xc2
- IFT_ATMVIRTUAL = 0x95
- IFT_BGPPOLICYACCOUNTING = 0xa2
- IFT_BSC = 0x53
- IFT_CCTEMUL = 0x3d
- IFT_CEPT = 0x13
- IFT_CES = 0x85
- IFT_CHANNEL = 0x46
- IFT_CNR = 0x55
- IFT_COFFEE = 0x84
- IFT_COMPOSITELINK = 0x9b
- IFT_DCN = 0x8d
- IFT_DIGITALPOWERLINE = 0x8a
- IFT_DIGITALWRAPPEROVERHEADCHANNEL = 0xba
- IFT_DLSW = 0x4a
- IFT_DOCSCABLEDOWNSTREAM = 0x80
- IFT_DOCSCABLEMACLAYER = 0x7f
- IFT_DOCSCABLEUPSTREAM = 0x81
- IFT_DS0 = 0x51
- IFT_DS0BUNDLE = 0x52
- IFT_DS1FDL = 0xaa
- IFT_DS3 = 0x1e
- IFT_DTM = 0x8c
- IFT_DVBASILN = 0xac
- IFT_DVBASIOUT = 0xad
- IFT_DVBRCCDOWNSTREAM = 0x93
- IFT_DVBRCCMACLAYER = 0x92
- IFT_DVBRCCUPSTREAM = 0x94
- IFT_ENC = 0xf4
- IFT_EON = 0x19
- IFT_EPLRS = 0x57
- IFT_ESCON = 0x49
- IFT_ETHER = 0x6
- IFT_FAST = 0x7d
- IFT_FASTETHER = 0x3e
- IFT_FASTETHERFX = 0x45
- IFT_FDDI = 0xf
- IFT_FIBRECHANNEL = 0x38
- IFT_FRAMERELAYINTERCONNECT = 0x3a
- IFT_FRAMERELAYMPI = 0x5c
- IFT_FRDLCIENDPT = 0xc1
- IFT_FRELAY = 0x20
- IFT_FRELAYDCE = 0x2c
- IFT_FRF16MFRBUNDLE = 0xa3
- IFT_FRFORWARD = 0x9e
- IFT_G703AT2MB = 0x43
- IFT_G703AT64K = 0x42
- IFT_GIF = 0xf0
- IFT_GIGABITETHERNET = 0x75
- IFT_GR303IDT = 0xb2
- IFT_GR303RDT = 0xb1
- IFT_H323GATEKEEPER = 0xa4
- IFT_H323PROXY = 0xa5
- IFT_HDH1822 = 0x3
- IFT_HDLC = 0x76
- IFT_HDSL2 = 0xa8
- IFT_HIPERLAN2 = 0xb7
- IFT_HIPPI = 0x2f
- IFT_HIPPIINTERFACE = 0x39
- IFT_HOSTPAD = 0x5a
- IFT_HSSI = 0x2e
- IFT_HY = 0xe
- IFT_IBM370PARCHAN = 0x48
- IFT_IDSL = 0x9a
- IFT_IEEE80211 = 0x47
- IFT_IEEE80212 = 0x37
- IFT_IEEE8023ADLAG = 0xa1
- IFT_IFGSN = 0x91
- IFT_IMT = 0xbe
- IFT_INTERLEAVE = 0x7c
- IFT_IP = 0x7e
- IFT_IPFORWARD = 0x8e
- IFT_IPOVERATM = 0x72
- IFT_IPOVERCDLC = 0x6d
- IFT_IPOVERCLAW = 0x6e
- IFT_IPSWITCH = 0x4e
- IFT_ISDN = 0x3f
- IFT_ISDNBASIC = 0x14
- IFT_ISDNPRIMARY = 0x15
- IFT_ISDNS = 0x4b
- IFT_ISDNU = 0x4c
- IFT_ISO88022LLC = 0x29
- IFT_ISO88023 = 0x7
- IFT_ISO88024 = 0x8
- IFT_ISO88025 = 0x9
- IFT_ISO88025CRFPINT = 0x62
- IFT_ISO88025DTR = 0x56
- IFT_ISO88025FIBER = 0x73
- IFT_ISO88026 = 0xa
- IFT_ISUP = 0xb3
- IFT_L3IPXVLAN = 0x89
- IFT_LAPB = 0x10
- IFT_LAPD = 0x4d
- IFT_LAPF = 0x77
- IFT_LOCALTALK = 0x2a
- IFT_LOOP = 0x18
- IFT_MEDIAMAILOVERIP = 0x8b
- IFT_MFSIGLINK = 0xa7
- IFT_MIOX25 = 0x26
- IFT_MODEM = 0x30
- IFT_MPC = 0x71
- IFT_MPLS = 0xa6
- IFT_MPLSTUNNEL = 0x96
- IFT_MSDSL = 0x8f
- IFT_MVL = 0xbf
- IFT_MYRINET = 0x63
- IFT_NFAS = 0xaf
- IFT_NSIP = 0x1b
- IFT_OPTICALCHANNEL = 0xc3
- IFT_OPTICALTRANSPORT = 0xc4
- IFT_OTHER = 0x1
- IFT_P10 = 0xc
- IFT_P80 = 0xd
- IFT_PARA = 0x22
- IFT_PFLOG = 0xf6
- IFT_PFSYNC = 0xf7
- IFT_PLC = 0xae
- IFT_POS = 0xab
- IFT_PPPMULTILINKBUNDLE = 0x6c
- IFT_PROPBWAP2MP = 0xb8
- IFT_PROPCNLS = 0x59
- IFT_PROPDOCSWIRELESSDOWNSTREAM = 0xb5
- IFT_PROPDOCSWIRELESSMACLAYER = 0xb4
- IFT_PROPDOCSWIRELESSUPSTREAM = 0xb6
- IFT_PROPMUX = 0x36
- IFT_PROPWIRELESSP2P = 0x9d
- IFT_PTPSERIAL = 0x16
- IFT_PVC = 0xf1
- IFT_QLLC = 0x44
- IFT_RADIOMAC = 0xbc
- IFT_RADSL = 0x5f
- IFT_REACHDSL = 0xc0
- IFT_RFC1483 = 0x9f
- IFT_RS232 = 0x21
- IFT_RSRB = 0x4f
- IFT_SDLC = 0x11
- IFT_SDSL = 0x60
- IFT_SHDSL = 0xa9
- IFT_SIP = 0x1f
- IFT_SLIP = 0x1c
- IFT_SMDSDXI = 0x2b
- IFT_SMDSICIP = 0x34
- IFT_SONET = 0x27
- IFT_SONETOVERHEADCHANNEL = 0xb9
- IFT_SONETPATH = 0x32
- IFT_SONETVT = 0x33
- IFT_SRP = 0x97
- IFT_SS7SIGLINK = 0x9c
- IFT_STACKTOSTACK = 0x6f
- IFT_STARLAN = 0xb
- IFT_STF = 0xd7
- IFT_T1 = 0x12
- IFT_TDLC = 0x74
- IFT_TERMPAD = 0x5b
- IFT_TR008 = 0xb0
- IFT_TRANSPHDLC = 0x7b
- IFT_TUNNEL = 0x83
- IFT_ULTRA = 0x1d
- IFT_USB = 0xa0
- IFT_V11 = 0x40
- IFT_V35 = 0x2d
- IFT_V36 = 0x41
- IFT_V37 = 0x78
- IFT_VDSL = 0x61
- IFT_VIRTUALIPADDRESS = 0x70
- IFT_VOICEEM = 0x64
- IFT_VOICEENCAP = 0x67
- IFT_VOICEFXO = 0x65
- IFT_VOICEFXS = 0x66
- IFT_VOICEOVERATM = 0x98
- IFT_VOICEOVERFRAMERELAY = 0x99
- IFT_VOICEOVERIP = 0x68
- IFT_X213 = 0x5d
- IFT_X25 = 0x5
- IFT_X25DDN = 0x4
- IFT_X25HUNTGROUP = 0x7a
- IFT_X25MLP = 0x79
- IFT_X25PLE = 0x28
- IFT_XETHER = 0x1a
-
- // missing constants on FreeBSD-11.1-RELEASE, copied from old values in ztypes_freebsd_arm.go
- IFF_SMART = 0x20
- IFT_FAITH = 0xf2
- IFT_IPXIP = 0xf9
- IPPROTO_MAXID = 0x34
- IPV6_FAITH = 0x1d
- IP_FAITH = 0x16
- MAP_NORESERVE = 0x40
- MAP_RENAME = 0x20
- NET_RT_MAXID = 0x6
- RTF_PRCLONING = 0x10000
- RTM_OLDADD = 0x9
- RTM_OLDDEL = 0xa
- SIOCADDRT = 0x8030720a
- SIOCALIFADDR = 0x8118691b
- SIOCDELRT = 0x8030720b
- SIOCDLIFADDR = 0x8118691d
- SIOCGLIFADDR = 0xc118691c
- SIOCGLIFPHYADDR = 0xc118694b
- SIOCSLIFPHYADDR = 0x8118694a
-)
diff --git a/integration/assets/hydrabroker/vendor/golang.org/x/sys/unix/fcntl.go b/integration/assets/hydrabroker/vendor/golang.org/x/sys/unix/fcntl.go
index d3b1208ac73..6200876fb28 100644
--- a/integration/assets/hydrabroker/vendor/golang.org/x/sys/unix/fcntl.go
+++ b/integration/assets/hydrabroker/vendor/golang.org/x/sys/unix/fcntl.go
@@ -2,20 +2,18 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-//go:build dragonfly || freebsd || linux || netbsd || openbsd
-// +build dragonfly freebsd linux netbsd openbsd
+//go:build dragonfly || freebsd || linux || netbsd
package unix
import "unsafe"
// fcntl64Syscall is usually SYS_FCNTL, but is overridden on 32-bit Linux
-// systems by flock_linux_32bit.go to be SYS_FCNTL64.
+// systems by fcntl_linux_32bit.go to be SYS_FCNTL64.
var fcntl64Syscall uintptr = SYS_FCNTL
-// FcntlInt performs a fcntl syscall on fd with the provided command and argument.
-func FcntlInt(fd uintptr, cmd, arg int) (int, error) {
- valptr, _, errno := Syscall(fcntl64Syscall, fd, uintptr(cmd), uintptr(arg))
+func fcntl(fd int, cmd, arg int) (int, error) {
+ valptr, _, errno := Syscall(fcntl64Syscall, uintptr(fd), uintptr(cmd), uintptr(arg))
var err error
if errno != 0 {
err = errno
@@ -23,6 +21,11 @@ func FcntlInt(fd uintptr, cmd, arg int) (int, error) {
return int(valptr), err
}
+// FcntlInt performs a fcntl syscall on fd with the provided command and argument.
+func FcntlInt(fd uintptr, cmd, arg int) (int, error) {
+ return fcntl(int(fd), cmd, arg)
+}
+
// FcntlFlock performs a fcntl syscall for the F_GETLK, F_SETLK or F_SETLKW command.
func FcntlFlock(fd uintptr, cmd int, lk *Flock_t) error {
_, _, errno := Syscall(fcntl64Syscall, fd, uintptr(cmd), uintptr(unsafe.Pointer(lk)))
diff --git a/integration/assets/hydrabroker/vendor/golang.org/x/sys/unix/fcntl_darwin.go b/integration/assets/hydrabroker/vendor/golang.org/x/sys/unix/fcntl_darwin.go
index 5868a4a47b4..a9911c7c1d8 100644
--- a/integration/assets/hydrabroker/vendor/golang.org/x/sys/unix/fcntl_darwin.go
+++ b/integration/assets/hydrabroker/vendor/golang.org/x/sys/unix/fcntl_darwin.go
@@ -16,3 +16,9 @@ func FcntlFlock(fd uintptr, cmd int, lk *Flock_t) error {
_, err := fcntl(int(fd), cmd, int(uintptr(unsafe.Pointer(lk))))
return err
}
+
+// FcntlFstore performs a fcntl syscall for the F_PREALLOCATE command.
+func FcntlFstore(fd uintptr, cmd int, fstore *Fstore_t) error {
+ _, err := fcntl(int(fd), cmd, int(uintptr(unsafe.Pointer(fstore))))
+ return err
+}
diff --git a/integration/assets/hydrabroker/vendor/golang.org/x/sys/unix/fcntl_linux_32bit.go b/integration/assets/hydrabroker/vendor/golang.org/x/sys/unix/fcntl_linux_32bit.go
index a31e0edc7f0..13b4acd5c69 100644
--- a/integration/assets/hydrabroker/vendor/golang.org/x/sys/unix/fcntl_linux_32bit.go
+++ b/integration/assets/hydrabroker/vendor/golang.org/x/sys/unix/fcntl_linux_32bit.go
@@ -1,10 +1,9 @@
-//go:build (linux && 386) || (linux && arm) || (linux && mips) || (linux && mipsle)
-// +build linux,386 linux,arm linux,mips linux,mipsle
-
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
+//go:build (linux && 386) || (linux && arm) || (linux && mips) || (linux && mipsle) || (linux && ppc)
+
package unix
func init() {
diff --git a/integration/assets/hydrabroker/vendor/golang.org/x/sys/unix/fdset.go b/integration/assets/hydrabroker/vendor/golang.org/x/sys/unix/fdset.go
new file mode 100644
index 00000000000..9e83d18cd04
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/golang.org/x/sys/unix/fdset.go
@@ -0,0 +1,29 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos
+
+package unix
+
+// Set adds fd to the set fds.
+func (fds *FdSet) Set(fd int) {
+ fds.Bits[fd/NFDBITS] |= (1 << (uintptr(fd) % NFDBITS))
+}
+
+// Clear removes fd from the set fds.
+func (fds *FdSet) Clear(fd int) {
+ fds.Bits[fd/NFDBITS] &^= (1 << (uintptr(fd) % NFDBITS))
+}
+
+// IsSet returns whether fd is in the set fds.
+func (fds *FdSet) IsSet(fd int) bool {
+ return fds.Bits[fd/NFDBITS]&(1<<(uintptr(fd)%NFDBITS)) != 0
+}
+
+// Zero clears the set fds.
+func (fds *FdSet) Zero() {
+ for i := range fds.Bits {
+ fds.Bits[i] = 0
+ }
+}
diff --git a/integration/assets/hydrabroker/vendor/golang.org/x/sys/unix/gccgo.go b/integration/assets/hydrabroker/vendor/golang.org/x/sys/unix/gccgo.go
index 411fe0f89a5..aca5721ddcc 100644
--- a/integration/assets/hydrabroker/vendor/golang.org/x/sys/unix/gccgo.go
+++ b/integration/assets/hydrabroker/vendor/golang.org/x/sys/unix/gccgo.go
@@ -2,8 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-//go:build gccgo && !aix
-// +build gccgo,!aix
+//go:build gccgo && !aix && !hurd
package unix
@@ -12,10 +11,8 @@ import "syscall"
// We can't use the gc-syntax .s files for gccgo. On the plus side
// much of the functionality can be written directly in Go.
-//extern gccgoRealSyscallNoError
func realSyscallNoError(trap, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r uintptr)
-//extern gccgoRealSyscall
func realSyscall(trap, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r, errno uintptr)
func SyscallNoError(trap, a1, a2, a3 uintptr) (r1, r2 uintptr) {
diff --git a/integration/assets/hydrabroker/vendor/golang.org/x/sys/unix/gccgo_c.c b/integration/assets/hydrabroker/vendor/golang.org/x/sys/unix/gccgo_c.c
index c44730c5e99..d468b7b47f1 100644
--- a/integration/assets/hydrabroker/vendor/golang.org/x/sys/unix/gccgo_c.c
+++ b/integration/assets/hydrabroker/vendor/golang.org/x/sys/unix/gccgo_c.c
@@ -2,8 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// +build gccgo
-// +build !aix
+//go:build gccgo && !aix && !hurd
#include
#include
@@ -21,6 +20,9 @@ struct ret {
uintptr_t err;
};
+struct ret gccgoRealSyscall(uintptr_t trap, uintptr_t a1, uintptr_t a2, uintptr_t a3, uintptr_t a4, uintptr_t a5, uintptr_t a6, uintptr_t a7, uintptr_t a8, uintptr_t a9)
+ __asm__(GOSYM_PREFIX GOPKGPATH ".realSyscall");
+
struct ret
gccgoRealSyscall(uintptr_t trap, uintptr_t a1, uintptr_t a2, uintptr_t a3, uintptr_t a4, uintptr_t a5, uintptr_t a6, uintptr_t a7, uintptr_t a8, uintptr_t a9)
{
@@ -32,6 +34,9 @@ gccgoRealSyscall(uintptr_t trap, uintptr_t a1, uintptr_t a2, uintptr_t a3, uintp
return r;
}
+uintptr_t gccgoRealSyscallNoError(uintptr_t trap, uintptr_t a1, uintptr_t a2, uintptr_t a3, uintptr_t a4, uintptr_t a5, uintptr_t a6, uintptr_t a7, uintptr_t a8, uintptr_t a9)
+ __asm__(GOSYM_PREFIX GOPKGPATH ".realSyscallNoError");
+
uintptr_t
gccgoRealSyscallNoError(uintptr_t trap, uintptr_t a1, uintptr_t a2, uintptr_t a3, uintptr_t a4, uintptr_t a5, uintptr_t a6, uintptr_t a7, uintptr_t a8, uintptr_t a9)
{
diff --git a/integration/assets/hydrabroker/vendor/golang.org/x/sys/unix/gccgo_linux_amd64.go b/integration/assets/hydrabroker/vendor/golang.org/x/sys/unix/gccgo_linux_amd64.go
index e60e49a3d9c..972d61bd754 100644
--- a/integration/assets/hydrabroker/vendor/golang.org/x/sys/unix/gccgo_linux_amd64.go
+++ b/integration/assets/hydrabroker/vendor/golang.org/x/sys/unix/gccgo_linux_amd64.go
@@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
//go:build gccgo && linux && amd64
-// +build gccgo,linux,amd64
package unix
diff --git a/integration/assets/hydrabroker/vendor/golang.org/x/sys/unix/ifreq_linux.go b/integration/assets/hydrabroker/vendor/golang.org/x/sys/unix/ifreq_linux.go
new file mode 100644
index 00000000000..848840ae4c7
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/golang.org/x/sys/unix/ifreq_linux.go
@@ -0,0 +1,141 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build linux
+
+package unix
+
+import (
+ "unsafe"
+)
+
+// Helpers for dealing with ifreq since it contains a union and thus requires a
+// lot of unsafe.Pointer casts to use properly.
+
+// An Ifreq is a type-safe wrapper around the raw ifreq struct. An Ifreq
+// contains an interface name and a union of arbitrary data which can be
+// accessed using the Ifreq's methods. To create an Ifreq, use the NewIfreq
+// function.
+//
+// Use the Name method to access the stored interface name. The union data
+// fields can be get and set using the following methods:
+// - Uint16/SetUint16: flags
+// - Uint32/SetUint32: ifindex, metric, mtu
+type Ifreq struct{ raw ifreq }
+
+// NewIfreq creates an Ifreq with the input network interface name after
+// validating the name does not exceed IFNAMSIZ-1 (trailing NULL required)
+// bytes.
+func NewIfreq(name string) (*Ifreq, error) {
+ // Leave room for terminating NULL byte.
+ if len(name) >= IFNAMSIZ {
+ return nil, EINVAL
+ }
+
+ var ifr ifreq
+ copy(ifr.Ifrn[:], name)
+
+ return &Ifreq{raw: ifr}, nil
+}
+
+// TODO(mdlayher): get/set methods for hardware address sockaddr, char array, etc.
+
+// Name returns the interface name associated with the Ifreq.
+func (ifr *Ifreq) Name() string {
+ return ByteSliceToString(ifr.raw.Ifrn[:])
+}
+
+// According to netdevice(7), only AF_INET addresses are returned for numerous
+// sockaddr ioctls. For convenience, we expose these as Inet4Addr since the Port
+// field and other data is always empty.
+
+// Inet4Addr returns the Ifreq union data from an embedded sockaddr as a C
+// in_addr/Go []byte (4-byte IPv4 address) value. If the sockaddr family is not
+// AF_INET, an error is returned.
+func (ifr *Ifreq) Inet4Addr() ([]byte, error) {
+ raw := *(*RawSockaddrInet4)(unsafe.Pointer(&ifr.raw.Ifru[:SizeofSockaddrInet4][0]))
+ if raw.Family != AF_INET {
+ // Cannot safely interpret raw.Addr bytes as an IPv4 address.
+ return nil, EINVAL
+ }
+
+ return raw.Addr[:], nil
+}
+
+// SetInet4Addr sets a C in_addr/Go []byte (4-byte IPv4 address) value in an
+// embedded sockaddr within the Ifreq's union data. v must be 4 bytes in length
+// or an error will be returned.
+func (ifr *Ifreq) SetInet4Addr(v []byte) error {
+ if len(v) != 4 {
+ return EINVAL
+ }
+
+ var addr [4]byte
+ copy(addr[:], v)
+
+ ifr.clear()
+ *(*RawSockaddrInet4)(
+ unsafe.Pointer(&ifr.raw.Ifru[:SizeofSockaddrInet4][0]),
+ ) = RawSockaddrInet4{
+ // Always set IP family as ioctls would require it anyway.
+ Family: AF_INET,
+ Addr: addr,
+ }
+
+ return nil
+}
+
+// Uint16 returns the Ifreq union data as a C short/Go uint16 value.
+func (ifr *Ifreq) Uint16() uint16 {
+ return *(*uint16)(unsafe.Pointer(&ifr.raw.Ifru[:2][0]))
+}
+
+// SetUint16 sets a C short/Go uint16 value as the Ifreq's union data.
+func (ifr *Ifreq) SetUint16(v uint16) {
+ ifr.clear()
+ *(*uint16)(unsafe.Pointer(&ifr.raw.Ifru[:2][0])) = v
+}
+
+// Uint32 returns the Ifreq union data as a C int/Go uint32 value.
+func (ifr *Ifreq) Uint32() uint32 {
+ return *(*uint32)(unsafe.Pointer(&ifr.raw.Ifru[:4][0]))
+}
+
+// SetUint32 sets a C int/Go uint32 value as the Ifreq's union data.
+func (ifr *Ifreq) SetUint32(v uint32) {
+ ifr.clear()
+ *(*uint32)(unsafe.Pointer(&ifr.raw.Ifru[:4][0])) = v
+}
+
+// clear zeroes the ifreq's union field to prevent trailing garbage data from
+// being sent to the kernel if an ifreq is reused.
+func (ifr *Ifreq) clear() {
+ for i := range ifr.raw.Ifru {
+ ifr.raw.Ifru[i] = 0
+ }
+}
+
+// TODO(mdlayher): export as IfreqData? For now we can provide helpers such as
+// IoctlGetEthtoolDrvinfo which use these APIs under the hood.
+
+// An ifreqData is an Ifreq which carries pointer data. To produce an ifreqData,
+// use the Ifreq.withData method.
+type ifreqData struct {
+ name [IFNAMSIZ]byte
+ // A type separate from ifreq is required in order to comply with the
+ // unsafe.Pointer rules since the "pointer-ness" of data would not be
+ // preserved if it were cast into the byte array of a raw ifreq.
+ data unsafe.Pointer
+ // Pad to the same size as ifreq.
+ _ [len(ifreq{}.Ifru) - SizeofPtr]byte
+}
+
+// withData produces an ifreqData with the pointer p set for ioctls which require
+// arbitrary pointer data.
+func (ifr Ifreq) withData(p unsafe.Pointer) ifreqData {
+ return ifreqData{
+ name: ifr.raw.Ifrn,
+ data: p,
+ }
+}
diff --git a/integration/assets/hydrabroker/vendor/golang.org/x/sys/unix/ioctl.go b/integration/assets/hydrabroker/vendor/golang.org/x/sys/unix/ioctl.go
deleted file mode 100644
index 8906fce5c36..00000000000
--- a/integration/assets/hydrabroker/vendor/golang.org/x/sys/unix/ioctl.go
+++ /dev/null
@@ -1,31 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris
-// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris
-
-package unix
-
-import "runtime"
-
-// IoctlSetWinsize performs an ioctl on fd with a *Winsize argument.
-//
-// To change fd's window size, the req argument should be TIOCSWINSZ.
-func IoctlSetWinsize(fd int, req uint, value *Winsize) error {
- // TODO: if we get the chance, remove the req parameter and
- // hardcode TIOCSWINSZ.
- err := ioctlSetWinsize(fd, req, value)
- runtime.KeepAlive(value)
- return err
-}
-
-// IoctlSetTermios performs an ioctl on fd with a *Termios.
-//
-// The req value will usually be TCSETA or TIOCSETA.
-func IoctlSetTermios(fd int, req uint, value *Termios) error {
- // TODO: if we get the chance, remove the req parameter.
- err := ioctlSetTermios(fd, req, value)
- runtime.KeepAlive(value)
- return err
-}
diff --git a/integration/assets/hydrabroker/vendor/golang.org/x/sys/unix/ioctl_linux.go b/integration/assets/hydrabroker/vendor/golang.org/x/sys/unix/ioctl_linux.go
new file mode 100644
index 00000000000..dbe680eab88
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/golang.org/x/sys/unix/ioctl_linux.go
@@ -0,0 +1,238 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package unix
+
+import "unsafe"
+
+// IoctlRetInt performs an ioctl operation specified by req on a device
+// associated with opened file descriptor fd, and returns a non-negative
+// integer that is returned by the ioctl syscall.
+func IoctlRetInt(fd int, req uint) (int, error) {
+ ret, _, err := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), 0)
+ if err != 0 {
+ return 0, err
+ }
+ return int(ret), nil
+}
+
+func IoctlGetUint32(fd int, req uint) (uint32, error) {
+ var value uint32
+ err := ioctlPtr(fd, req, unsafe.Pointer(&value))
+ return value, err
+}
+
+func IoctlGetRTCTime(fd int) (*RTCTime, error) {
+ var value RTCTime
+ err := ioctlPtr(fd, RTC_RD_TIME, unsafe.Pointer(&value))
+ return &value, err
+}
+
+func IoctlSetRTCTime(fd int, value *RTCTime) error {
+ return ioctlPtr(fd, RTC_SET_TIME, unsafe.Pointer(value))
+}
+
+func IoctlGetRTCWkAlrm(fd int) (*RTCWkAlrm, error) {
+ var value RTCWkAlrm
+ err := ioctlPtr(fd, RTC_WKALM_RD, unsafe.Pointer(&value))
+ return &value, err
+}
+
+func IoctlSetRTCWkAlrm(fd int, value *RTCWkAlrm) error {
+ return ioctlPtr(fd, RTC_WKALM_SET, unsafe.Pointer(value))
+}
+
+// IoctlGetEthtoolDrvinfo fetches ethtool driver information for the network
+// device specified by ifname.
+func IoctlGetEthtoolDrvinfo(fd int, ifname string) (*EthtoolDrvinfo, error) {
+ ifr, err := NewIfreq(ifname)
+ if err != nil {
+ return nil, err
+ }
+
+ value := EthtoolDrvinfo{Cmd: ETHTOOL_GDRVINFO}
+ ifrd := ifr.withData(unsafe.Pointer(&value))
+
+ err = ioctlIfreqData(fd, SIOCETHTOOL, &ifrd)
+ return &value, err
+}
+
+// IoctlGetWatchdogInfo fetches information about a watchdog device from the
+// Linux watchdog API. For more information, see:
+// https://www.kernel.org/doc/html/latest/watchdog/watchdog-api.html.
+func IoctlGetWatchdogInfo(fd int) (*WatchdogInfo, error) {
+ var value WatchdogInfo
+ err := ioctlPtr(fd, WDIOC_GETSUPPORT, unsafe.Pointer(&value))
+ return &value, err
+}
+
+// IoctlWatchdogKeepalive issues a keepalive ioctl to a watchdog device. For
+// more information, see:
+// https://www.kernel.org/doc/html/latest/watchdog/watchdog-api.html.
+func IoctlWatchdogKeepalive(fd int) error {
+ // arg is ignored and not a pointer, so ioctl is fine instead of ioctlPtr.
+ return ioctl(fd, WDIOC_KEEPALIVE, 0)
+}
+
+// IoctlFileCloneRange performs an FICLONERANGE ioctl operation to clone the
+// range of data conveyed in value to the file associated with the file
+// descriptor destFd. See the ioctl_ficlonerange(2) man page for details.
+func IoctlFileCloneRange(destFd int, value *FileCloneRange) error {
+ return ioctlPtr(destFd, FICLONERANGE, unsafe.Pointer(value))
+}
+
+// IoctlFileClone performs an FICLONE ioctl operation to clone the entire file
+// associated with the file description srcFd to the file associated with the
+// file descriptor destFd. See the ioctl_ficlone(2) man page for details.
+func IoctlFileClone(destFd, srcFd int) error {
+ return ioctl(destFd, FICLONE, uintptr(srcFd))
+}
+
+type FileDedupeRange struct {
+ Src_offset uint64
+ Src_length uint64
+ Reserved1 uint16
+ Reserved2 uint32
+ Info []FileDedupeRangeInfo
+}
+
+type FileDedupeRangeInfo struct {
+ Dest_fd int64
+ Dest_offset uint64
+ Bytes_deduped uint64
+ Status int32
+ Reserved uint32
+}
+
+// IoctlFileDedupeRange performs an FIDEDUPERANGE ioctl operation to share the
+// range of data conveyed in value from the file associated with the file
+// descriptor srcFd to the value.Info destinations. See the
+// ioctl_fideduperange(2) man page for details.
+func IoctlFileDedupeRange(srcFd int, value *FileDedupeRange) error {
+ buf := make([]byte, SizeofRawFileDedupeRange+
+ len(value.Info)*SizeofRawFileDedupeRangeInfo)
+ rawrange := (*RawFileDedupeRange)(unsafe.Pointer(&buf[0]))
+ rawrange.Src_offset = value.Src_offset
+ rawrange.Src_length = value.Src_length
+ rawrange.Dest_count = uint16(len(value.Info))
+ rawrange.Reserved1 = value.Reserved1
+ rawrange.Reserved2 = value.Reserved2
+
+ for i := range value.Info {
+ rawinfo := (*RawFileDedupeRangeInfo)(unsafe.Pointer(
+ uintptr(unsafe.Pointer(&buf[0])) + uintptr(SizeofRawFileDedupeRange) +
+ uintptr(i*SizeofRawFileDedupeRangeInfo)))
+ rawinfo.Dest_fd = value.Info[i].Dest_fd
+ rawinfo.Dest_offset = value.Info[i].Dest_offset
+ rawinfo.Bytes_deduped = value.Info[i].Bytes_deduped
+ rawinfo.Status = value.Info[i].Status
+ rawinfo.Reserved = value.Info[i].Reserved
+ }
+
+ err := ioctlPtr(srcFd, FIDEDUPERANGE, unsafe.Pointer(&buf[0]))
+
+ // Output
+ for i := range value.Info {
+ rawinfo := (*RawFileDedupeRangeInfo)(unsafe.Pointer(
+ uintptr(unsafe.Pointer(&buf[0])) + uintptr(SizeofRawFileDedupeRange) +
+ uintptr(i*SizeofRawFileDedupeRangeInfo)))
+ value.Info[i].Dest_fd = rawinfo.Dest_fd
+ value.Info[i].Dest_offset = rawinfo.Dest_offset
+ value.Info[i].Bytes_deduped = rawinfo.Bytes_deduped
+ value.Info[i].Status = rawinfo.Status
+ value.Info[i].Reserved = rawinfo.Reserved
+ }
+
+ return err
+}
+
+func IoctlHIDGetDesc(fd int, value *HIDRawReportDescriptor) error {
+ return ioctlPtr(fd, HIDIOCGRDESC, unsafe.Pointer(value))
+}
+
+func IoctlHIDGetRawInfo(fd int) (*HIDRawDevInfo, error) {
+ var value HIDRawDevInfo
+ err := ioctlPtr(fd, HIDIOCGRAWINFO, unsafe.Pointer(&value))
+ return &value, err
+}
+
+func IoctlHIDGetRawName(fd int) (string, error) {
+ var value [_HIDIOCGRAWNAME_LEN]byte
+ err := ioctlPtr(fd, _HIDIOCGRAWNAME, unsafe.Pointer(&value[0]))
+ return ByteSliceToString(value[:]), err
+}
+
+func IoctlHIDGetRawPhys(fd int) (string, error) {
+ var value [_HIDIOCGRAWPHYS_LEN]byte
+ err := ioctlPtr(fd, _HIDIOCGRAWPHYS, unsafe.Pointer(&value[0]))
+ return ByteSliceToString(value[:]), err
+}
+
+func IoctlHIDGetRawUniq(fd int) (string, error) {
+ var value [_HIDIOCGRAWUNIQ_LEN]byte
+ err := ioctlPtr(fd, _HIDIOCGRAWUNIQ, unsafe.Pointer(&value[0]))
+ return ByteSliceToString(value[:]), err
+}
+
+// IoctlIfreq performs an ioctl using an Ifreq structure for input and/or
+// output. See the netdevice(7) man page for details.
+func IoctlIfreq(fd int, req uint, value *Ifreq) error {
+ // It is possible we will add more fields to *Ifreq itself later to prevent
+ // misuse, so pass the raw *ifreq directly.
+ return ioctlPtr(fd, req, unsafe.Pointer(&value.raw))
+}
+
+// TODO(mdlayher): export if and when IfreqData is exported.
+
+// ioctlIfreqData performs an ioctl using an ifreqData structure for input
+// and/or output. See the netdevice(7) man page for details.
+func ioctlIfreqData(fd int, req uint, value *ifreqData) error {
+ // The memory layout of IfreqData (type-safe) and ifreq (not type-safe) are
+ // identical so pass *IfreqData directly.
+ return ioctlPtr(fd, req, unsafe.Pointer(value))
+}
+
+// IoctlKCMClone attaches a new file descriptor to a multiplexor by cloning an
+// existing KCM socket, returning a structure containing the file descriptor of
+// the new socket.
+func IoctlKCMClone(fd int) (*KCMClone, error) {
+ var info KCMClone
+ if err := ioctlPtr(fd, SIOCKCMCLONE, unsafe.Pointer(&info)); err != nil {
+ return nil, err
+ }
+
+ return &info, nil
+}
+
+// IoctlKCMAttach attaches a TCP socket and associated BPF program file
+// descriptor to a multiplexor.
+func IoctlKCMAttach(fd int, info KCMAttach) error {
+ return ioctlPtr(fd, SIOCKCMATTACH, unsafe.Pointer(&info))
+}
+
+// IoctlKCMUnattach unattaches a TCP socket file descriptor from a multiplexor.
+func IoctlKCMUnattach(fd int, info KCMUnattach) error {
+ return ioctlPtr(fd, SIOCKCMUNATTACH, unsafe.Pointer(&info))
+}
+
+// IoctlLoopGetStatus64 gets the status of the loop device associated with the
+// file descriptor fd using the LOOP_GET_STATUS64 operation.
+func IoctlLoopGetStatus64(fd int) (*LoopInfo64, error) {
+ var value LoopInfo64
+ if err := ioctlPtr(fd, LOOP_GET_STATUS64, unsafe.Pointer(&value)); err != nil {
+ return nil, err
+ }
+ return &value, nil
+}
+
+// IoctlLoopSetStatus64 sets the status of the loop device associated with the
+// file descriptor fd using the LOOP_SET_STATUS64 operation.
+func IoctlLoopSetStatus64(fd int, value *LoopInfo64) error {
+ return ioctlPtr(fd, LOOP_SET_STATUS64, unsafe.Pointer(value))
+}
+
+// IoctlLoopConfigure configures all loop device parameters in a single step
+func IoctlLoopConfigure(fd int, value *LoopConfig) error {
+ return ioctlPtr(fd, LOOP_CONFIGURE, unsafe.Pointer(value))
+}
diff --git a/integration/assets/hydrabroker/vendor/golang.org/x/sys/unix/ioctl_signed.go b/integration/assets/hydrabroker/vendor/golang.org/x/sys/unix/ioctl_signed.go
new file mode 100644
index 00000000000..5b0759bd865
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/golang.org/x/sys/unix/ioctl_signed.go
@@ -0,0 +1,69 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build aix || solaris
+
+package unix
+
+import (
+ "unsafe"
+)
+
+// ioctl itself should not be exposed directly, but additional get/set
+// functions for specific types are permissible.
+
+// IoctlSetInt performs an ioctl operation which sets an integer value
+// on fd, using the specified request number.
+func IoctlSetInt(fd int, req int, value int) error {
+ return ioctl(fd, req, uintptr(value))
+}
+
+// IoctlSetPointerInt performs an ioctl operation which sets an
+// integer value on fd, using the specified request number. The ioctl
+// argument is called with a pointer to the integer value, rather than
+// passing the integer value directly.
+func IoctlSetPointerInt(fd int, req int, value int) error {
+ v := int32(value)
+ return ioctlPtr(fd, req, unsafe.Pointer(&v))
+}
+
+// IoctlSetWinsize performs an ioctl on fd with a *Winsize argument.
+//
+// To change fd's window size, the req argument should be TIOCSWINSZ.
+func IoctlSetWinsize(fd int, req int, value *Winsize) error {
+ // TODO: if we get the chance, remove the req parameter and
+ // hardcode TIOCSWINSZ.
+ return ioctlPtr(fd, req, unsafe.Pointer(value))
+}
+
+// IoctlSetTermios performs an ioctl on fd with a *Termios.
+//
+// The req value will usually be TCSETA or TIOCSETA.
+func IoctlSetTermios(fd int, req int, value *Termios) error {
+ // TODO: if we get the chance, remove the req parameter.
+ return ioctlPtr(fd, req, unsafe.Pointer(value))
+}
+
+// IoctlGetInt performs an ioctl operation which gets an integer value
+// from fd, using the specified request number.
+//
+// A few ioctl requests use the return value as an output parameter;
+// for those, IoctlRetInt should be used instead of this function.
+func IoctlGetInt(fd int, req int) (int, error) {
+ var value int
+ err := ioctlPtr(fd, req, unsafe.Pointer(&value))
+ return value, err
+}
+
+func IoctlGetWinsize(fd int, req int) (*Winsize, error) {
+ var value Winsize
+ err := ioctlPtr(fd, req, unsafe.Pointer(&value))
+ return &value, err
+}
+
+func IoctlGetTermios(fd int, req int) (*Termios, error) {
+ var value Termios
+ err := ioctlPtr(fd, req, unsafe.Pointer(&value))
+ return &value, err
+}
diff --git a/integration/assets/hydrabroker/vendor/golang.org/x/sys/unix/ioctl_unsigned.go b/integration/assets/hydrabroker/vendor/golang.org/x/sys/unix/ioctl_unsigned.go
new file mode 100644
index 00000000000..20f470b9d09
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/golang.org/x/sys/unix/ioctl_unsigned.go
@@ -0,0 +1,69 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build darwin || dragonfly || freebsd || hurd || linux || netbsd || openbsd
+
+package unix
+
+import (
+ "unsafe"
+)
+
+// ioctl itself should not be exposed directly, but additional get/set
+// functions for specific types are permissible.
+
+// IoctlSetInt performs an ioctl operation which sets an integer value
+// on fd, using the specified request number.
+func IoctlSetInt(fd int, req uint, value int) error {
+ return ioctl(fd, req, uintptr(value))
+}
+
+// IoctlSetPointerInt performs an ioctl operation which sets an
+// integer value on fd, using the specified request number. The ioctl
+// argument is called with a pointer to the integer value, rather than
+// passing the integer value directly.
+func IoctlSetPointerInt(fd int, req uint, value int) error {
+ v := int32(value)
+ return ioctlPtr(fd, req, unsafe.Pointer(&v))
+}
+
+// IoctlSetWinsize performs an ioctl on fd with a *Winsize argument.
+//
+// To change fd's window size, the req argument should be TIOCSWINSZ.
+func IoctlSetWinsize(fd int, req uint, value *Winsize) error {
+ // TODO: if we get the chance, remove the req parameter and
+ // hardcode TIOCSWINSZ.
+ return ioctlPtr(fd, req, unsafe.Pointer(value))
+}
+
+// IoctlSetTermios performs an ioctl on fd with a *Termios.
+//
+// The req value will usually be TCSETA or TIOCSETA.
+func IoctlSetTermios(fd int, req uint, value *Termios) error {
+ // TODO: if we get the chance, remove the req parameter.
+ return ioctlPtr(fd, req, unsafe.Pointer(value))
+}
+
+// IoctlGetInt performs an ioctl operation which gets an integer value
+// from fd, using the specified request number.
+//
+// A few ioctl requests use the return value as an output parameter;
+// for those, IoctlRetInt should be used instead of this function.
+func IoctlGetInt(fd int, req uint) (int, error) {
+ var value int
+ err := ioctlPtr(fd, req, unsafe.Pointer(&value))
+ return value, err
+}
+
+func IoctlGetWinsize(fd int, req uint) (*Winsize, error) {
+ var value Winsize
+ err := ioctlPtr(fd, req, unsafe.Pointer(&value))
+ return &value, err
+}
+
+func IoctlGetTermios(fd int, req uint) (*Termios, error) {
+ var value Termios
+ err := ioctlPtr(fd, req, unsafe.Pointer(&value))
+ return &value, err
+}
diff --git a/integration/assets/hydrabroker/vendor/golang.org/x/sys/unix/ioctl_zos.go b/integration/assets/hydrabroker/vendor/golang.org/x/sys/unix/ioctl_zos.go
new file mode 100644
index 00000000000..c8b2a750f8c
--- /dev/null
+++ b/integration/assets/hydrabroker/vendor/golang.org/x/sys/unix/ioctl_zos.go
@@ -0,0 +1,71 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build zos && s390x
+
+package unix
+
+import (
+ "runtime"
+ "unsafe"
+)
+
+// ioctl itself should not be exposed directly, but additional get/set
+// functions for specific types are permissible.
+
+// IoctlSetInt performs an ioctl operation which sets an integer value
+// on fd, using the specified request number.
+func IoctlSetInt(fd int, req int, value int) error {
+ return ioctl(fd, req, uintptr(value))
+}
+
+// IoctlSetWinsize performs an ioctl on fd with a *Winsize argument.
+//
+// To change fd's window size, the req argument should be TIOCSWINSZ.
+func IoctlSetWinsize(fd int, req int, value *Winsize) error {
+ // TODO: if we get the chance, remove the req parameter and
+ // hardcode TIOCSWINSZ.
+ return ioctlPtr(fd, req, unsafe.Pointer(value))
+}
+
+// IoctlSetTermios performs an ioctl on fd with a *Termios.
+//
+// The req value is expected to be TCSETS, TCSETSW, or TCSETSF
+func IoctlSetTermios(fd int, req int, value *Termios) error {
+ if (req != TCSETS) && (req != TCSETSW) && (req != TCSETSF) {
+ return ENOSYS
+ }
+ err := Tcsetattr(fd, int(req), value)
+ runtime.KeepAlive(value)
+ return err
+}
+
+// IoctlGetInt performs an ioctl operation which gets an integer value
+// from fd, using the specified request number.
+//
+// A few ioctl requests use the return value as an output parameter;
+// for those, IoctlRetInt should be used instead of this function.
+func IoctlGetInt(fd int, req int) (int, error) {
+ var value int
+ err := ioctlPtr(fd, req, unsafe.Pointer(&value))
+ return value, err
+}
+
+func IoctlGetWinsize(fd int, req int) (*Winsize, error) {
+ var value Winsize
+ err := ioctlPtr(fd, req, unsafe.Pointer(&value))
+ return &value, err
+}
+
+// IoctlGetTermios performs an ioctl on fd with a *Termios.
+//
+// The req value is expected to be TCGETS
+func IoctlGetTermios(fd int, req int) (*Termios, error) {
+ var value Termios
+ if req != TCGETS {
+ return &value, ENOSYS
+ }
+ err := Tcgetattr(fd, &value)
+ return &value, err
+}
diff --git a/integration/assets/hydrabroker/vendor/golang.org/x/sys/unix/mkall.sh b/integration/assets/hydrabroker/vendor/golang.org/x/sys/unix/mkall.sh
index 5a22eca9676..e6f31d374df 100644
--- a/integration/assets/hydrabroker/vendor/golang.org/x/sys/unix/mkall.sh
+++ b/integration/assets/hydrabroker/vendor/golang.org/x/sys/unix/mkall.sh
@@ -50,7 +50,7 @@ if [[ "$GOOS" = "linux" ]]; then
# Use the Docker-based build system
# Files generated through docker (use $cmd so you can Ctl-C the build or run)
$cmd docker build --tag generate:$GOOS $GOOS
- $cmd docker run --interactive --tty --volume $(dirname "$(readlink -f "$0")"):/build generate:$GOOS
+ $cmd docker run --interactive --tty --volume $(cd -- "$(dirname -- "$0")/.." && pwd):/build generate:$GOOS
exit
fi
@@ -70,31 +70,15 @@ aix_ppc64)
mksyscall="go run mksyscall_aix_ppc64.go -aix"
mktypes="GOARCH=$GOARCH go tool cgo -godefs"
;;
-darwin_386)
- mkerrors="$mkerrors -m32"
- mksyscall="go run mksyscall.go -l32"
- mksysnum="go run mksysnum.go $(xcrun --show-sdk-path --sdk macosx)/usr/include/sys/syscall.h"
- mktypes="GOARCH=$GOARCH go tool cgo -godefs"
- mkasm="go run mkasm_darwin.go"
- ;;
darwin_amd64)
mkerrors="$mkerrors -m64"
- mksysnum="go run mksysnum.go $(xcrun --show-sdk-path --sdk macosx)/usr/include/sys/syscall.h"
mktypes="GOARCH=$GOARCH go tool cgo -godefs"
- mkasm="go run mkasm_darwin.go"
- ;;
-darwin_arm)
- mkerrors="$mkerrors"
- mksyscall="go run mksyscall.go -l32"
- mksysnum="go run mksysnum.go $(xcrun --show-sdk-path --sdk iphoneos)/usr/include/sys/syscall.h"
- mktypes="GOARCH=$GOARCH go tool cgo -godefs"
- mkasm="go run mkasm_darwin.go"
+ mkasm="go run mkasm.go"
;;
darwin_arm64)
mkerrors="$mkerrors -m64"
- mksysnum="go run mksysnum.go $(xcrun --show-sdk-path --sdk iphoneos)/usr/include/sys/syscall.h"
mktypes="GOARCH=$GOARCH go tool cgo -godefs"
- mkasm="go run mkasm_darwin.go"
+ mkasm="go run mkasm.go"
;;
dragonfly_amd64)
mkerrors="$mkerrors -m64"
@@ -105,26 +89,31 @@ dragonfly_amd64)
freebsd_386)
mkerrors="$mkerrors -m32"
mksyscall="go run mksyscall.go -l32"
- mksysnum="go run mksysnum.go 'https://svn.freebsd.org/base/stable/11/sys/kern/syscalls.master'"
+ mksysnum="go run mksysnum.go 'https://cgit.freebsd.org/src/plain/sys/kern/syscalls.master?h=stable/12'"
mktypes="GOARCH=$GOARCH go tool cgo -godefs"
;;
freebsd_amd64)
mkerrors="$mkerrors -m64"
- mksysnum="go run mksysnum.go 'https://svn.freebsd.org/base/stable/11/sys/kern/syscalls.master'"
+ mksysnum="go run mksysnum.go 'https://cgit.freebsd.org/src/plain/sys/kern/syscalls.master?h=stable/12'"
mktypes="GOARCH=$GOARCH go tool cgo -godefs"
;;
freebsd_arm)
mkerrors="$mkerrors"
mksyscall="go run mksyscall.go -l32 -arm"
- mksysnum="go run mksysnum.go 'https://svn.freebsd.org/base/stable/11/sys/kern/syscalls.master'"
+ mksysnum="go run mksysnum.go 'https://cgit.freebsd.org/src/plain/sys/kern/syscalls.master?h=stable/12'"
# Let the type of C char be signed for making the bare syscall
# API consistent across platforms.
mktypes="GOARCH=$GOARCH go tool cgo -godefs -- -fsigned-char"
;;
freebsd_arm64)
mkerrors="$mkerrors -m64"
- mksysnum="go run mksysnum.go 'https://svn.freebsd.org/base/stable/11/sys/kern/syscalls.master'"
- mktypes="GOARCH=$GOARCH go tool cgo -godefs"
+ mksysnum="go run mksysnum.go 'https://cgit.freebsd.org/src/plain/sys/kern/syscalls.master?h=stable/12'"
+ mktypes="GOARCH=$GOARCH go tool cgo -godefs -- -fsigned-char"
+ ;;
+freebsd_riscv64)
+ mkerrors="$mkerrors -m64"
+ mksysnum="go run mksysnum.go 'https://cgit.freebsd.org/src/plain/sys/kern/syscalls.master?h=stable/12'"
+ mktypes="GOARCH=$GOARCH go tool cgo -godefs -- -fsigned-char"
;;
netbsd_386)
mkerrors="$mkerrors -m32"
@@ -153,33 +142,60 @@ netbsd_arm64)
mktypes="GOARCH=$GOARCH go tool cgo -godefs"
;;
openbsd_386)
+ mkasm="go run mkasm.go"
mkerrors="$mkerrors -m32"
- mksyscall="go run mksyscall.go -l32 -openbsd"
+ mksyscall="go run mksyscall.go -l32 -openbsd -libc"
mksysctl="go run mksysctl_openbsd.go"
- mksysnum="go run mksysnum.go 'https://cvsweb.openbsd.org/cgi-bin/cvsweb/~checkout~/src/sys/kern/syscalls.master'"
mktypes="GOARCH=$GOARCH go tool cgo -godefs"
;;
openbsd_amd64)
+ mkasm="go run mkasm.go"
mkerrors="$mkerrors -m64"
- mksyscall="go run mksyscall.go -openbsd"
+ mksyscall="go run mksyscall.go -openbsd -libc"
mksysctl="go run mksysctl_openbsd.go"
- mksysnum="go run mksysnum.go 'https://cvsweb.openbsd.org/cgi-bin/cvsweb/~checkout~/src/sys/kern/syscalls.master'"
mktypes="GOARCH=$GOARCH go tool cgo -godefs"
;;
openbsd_arm)
+ mkasm="go run mkasm.go"
mkerrors="$mkerrors"
- mksyscall="go run mksyscall.go -l32 -openbsd -arm"
+ mksyscall="go run mksyscall.go -l32 -openbsd -arm -libc"
mksysctl="go run mksysctl_openbsd.go"
- mksysnum="go run mksysnum.go 'https://cvsweb.openbsd.org/cgi-bin/cvsweb/~checkout~/src/sys/kern/syscalls.master'"
# Let the type of C char be signed for making the bare syscall
# API consistent across platforms.
mktypes="GOARCH=$GOARCH go tool cgo -godefs -- -fsigned-char"
;;
openbsd_arm64)
+ mkasm="go run mkasm.go"
+ mkerrors="$mkerrors -m64"
+ mksyscall="go run mksyscall.go -openbsd -libc"
+ mksysctl="go run mksysctl_openbsd.go"
+ # Let the type of C char be signed for making the bare syscall
+ # API consistent across platforms.
+ mktypes="GOARCH=$GOARCH go tool cgo -godefs -- -fsigned-char"
+ ;;
+openbsd_mips64)
+ mkasm="go run mkasm.go"
mkerrors="$mkerrors -m64"
- mksyscall="go run mksyscall.go -openbsd"
+ mksyscall="go run mksyscall.go -openbsd -libc"
+ mksysctl="go run mksysctl_openbsd.go"
+ # Let the type of C char be signed for making the bare syscall
+ # API consistent across platforms.
+ mktypes="GOARCH=$GOARCH go tool cgo -godefs -- -fsigned-char"
+ ;;
+openbsd_ppc64)
+ mkasm="go run mkasm.go"
+ mkerrors="$mkerrors -m64"
+ mksyscall="go run mksyscall.go -openbsd -libc"
+ mksysctl="go run mksysctl_openbsd.go"
+ # Let the type of C char be signed for making the bare syscall
+ # API consistent across platforms.
+ mktypes="GOARCH=$GOARCH go tool cgo -godefs -- -fsigned-char"
+ ;;
+openbsd_riscv64)
+ mkasm="go run mkasm.go"
+ mkerrors="$mkerrors -m64"
+ mksyscall="go run mksyscall.go -openbsd -libc"
mksysctl="go run mksysctl_openbsd.go"
- mksysnum="go run mksysnum.go 'https://cvsweb.openbsd.org/cgi-bin/cvsweb/~checkout~/src/sys/kern/syscalls.master'"
# Let the type of C char be signed for making the bare syscall
# API consistent across platforms.
mktypes="GOARCH=$GOARCH go tool cgo -godefs -- -fsigned-char"
@@ -190,6 +206,12 @@ solaris_amd64)
mksysnum=
mktypes="GOARCH=$GOARCH go tool cgo -godefs"
;;
+illumos_amd64)
+ mksyscall="go run mksyscall_solaris.go"
+ mkerrors=
+ mksysnum=
+ mktypes="GOARCH=$GOARCH go tool cgo -godefs"
+ ;;
*)
echo 'unrecognized $GOOS_$GOARCH: ' "$GOOSARCH" 1>&2
exit 1
@@ -210,11 +232,11 @@ esac
if [ "$GOOSARCH" == "aix_ppc64" ]; then
# aix/ppc64 script generates files instead of writing to stdin.
echo "$mksyscall -tags $GOOS,$GOARCH $syscall_goos $GOOSARCH_in && gofmt -w zsyscall_$GOOSARCH.go && gofmt -w zsyscall_"$GOOSARCH"_gccgo.go && gofmt -w zsyscall_"$GOOSARCH"_gc.go " ;
- elif [ "$GOOS" == "darwin" ]; then
- # pre-1.12, direct syscalls
- echo "$mksyscall -tags $GOOS,$GOARCH,!go1.12 $syscall_goos $GOOSARCH_in |gofmt >zsyscall_$GOOSARCH.1_11.go";
- # 1.12 and later, syscalls via libSystem
- echo "$mksyscall -tags $GOOS,$GOARCH,go1.12 $syscall_goos $GOOSARCH_in |gofmt >zsyscall_$GOOSARCH.go";
+ elif [ "$GOOS" == "illumos" ]; then
+ # illumos code generation requires a --illumos switch
+ echo "$mksyscall -illumos -tags illumos,$GOARCH syscall_illumos.go |gofmt > zsyscall_illumos_$GOARCH.go";
+ # illumos implies solaris, so solaris code generation is also required
+ echo "$mksyscall -tags solaris,$GOARCH syscall_solaris.go syscall_solaris_$GOARCH.go |gofmt >zsyscall_solaris_$GOARCH.go";
else
echo "$mksyscall -tags $GOOS,$GOARCH $syscall_goos $GOOSARCH_in |gofmt >zsyscall_$GOOSARCH.go";
fi
@@ -223,5 +245,5 @@ esac
if [ -n "$mksysctl" ]; then echo "$mksysctl |gofmt >$zsysctl"; fi
if [ -n "$mksysnum" ]; then echo "$mksysnum |gofmt >zsysnum_$GOOSARCH.go"; fi
if [ -n "$mktypes" ]; then echo "$mktypes types_$GOOS.go | go run mkpost.go > ztypes_$GOOSARCH.go"; fi
- if [ -n "$mkasm" ]; then echo "$mkasm $GOARCH"; fi
+ if [ -n "$mkasm" ]; then echo "$mkasm $GOOS $GOARCH"; fi
) | $run
diff --git a/integration/assets/hydrabroker/vendor/golang.org/x/sys/unix/mkerrors.sh b/integration/assets/hydrabroker/vendor/golang.org/x/sys/unix/mkerrors.sh
index 3d85f2795e2..fdcaa974d23 100644
--- a/integration/assets/hydrabroker/vendor/golang.org/x/sys/unix/mkerrors.sh
+++ b/integration/assets/hydrabroker/vendor/golang.org/x/sys/unix/mkerrors.sh
@@ -44,6 +44,7 @@ includes_AIX='
#include
#include
#include
+#include
#include
#include
#include
@@ -53,21 +54,29 @@ includes_AIX='
includes_Darwin='
#define _DARWIN_C_SOURCE
-#define KERNEL
+#define KERNEL 1
#define _DARWIN_USE_64_BIT_INODE
+#define __APPLE_USE_RFC_3542
#include
#include
+#include
+#include
#include
#include
#include
+#include
#include
+#include
+#include
#include
+#include
#include
#include
#include
#include
#include
#include
+#include
#include
#include
#include
@@ -75,11 +84,15 @@ includes_Darwin='
#include
#include
#include
+
+// for backwards compatibility because moved TIOCREMOTE to Kernel.framework after MacOSX12.0.sdk.
+#define TIOCREMOTE 0x80047469
'
includes_DragonFly='
#include
#include
+#include
#include
#include
#include
@@ -90,6 +103,7 @@ includes_DragonFly='
#include
#include
#include
+#include
#include
#include
#include
@@ -102,8 +116,12 @@ includes_FreeBSD='
#include
#include
#include
+#include
#include
+#include
+#include
#include
+#include
#include
#include
#include
@@ -111,6 +129,7 @@ includes_FreeBSD='
#include
#include
#include
+#include
#include
#include
#include
@@ -179,53 +198,81 @@ struct ltchars {
#include
#include
#include
+#include
#include
#include
+#include
+#include
#include
+#include
+#include
#include
+#include
+#include
+#include
+#include
#include
+#include
+#include
+#include
#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
#include
+#include
#include
#include
#include
#include
#include
#include
-#include
-#include
-#include
-#include
-#include
+#include
+#include
+#include
#include
#include
+#include
+#include
+#include
#include
#include
#include
+#include
#include
+#include
#include
#include
+#include
+#include
#include
+#include
+#include
#include
#include
+#include
#include
-#include
#include
#include
-#include
-#include
-#include
#include
-#include
-#include
+#include
#include
-#include
+#include
+#include
+#include
#include
-#include
-#include
-#include
-#include
+#include
+
#include
+#include
#include
#if defined(__sparc__)
@@ -237,10 +284,6 @@ struct ltchars {
#include
#endif
-#ifndef MSG_FASTOPEN
-#define MSG_FASTOPEN 0x20000000
-#endif
-
#ifndef PTRACE_GETREGS
#define PTRACE_GETREGS 0xc
#endif
@@ -249,10 +292,6 @@ struct ltchars {
#define PTRACE_SETREGS 0xd
#endif
-#ifndef SOL_NETLINK
-#define SOL_NETLINK 270
-#endif
-
#ifdef SOL_BLUETOOTH
// SPARC includes this in /usr/include/sparc64-linux-gnu/bits/socket.h
// but it is already in bluetooth_linux.go
@@ -263,6 +302,40 @@ struct ltchars {
#define FS_KEY_DESC_PREFIX "fscrypt:"
#define FS_KEY_DESC_PREFIX_SIZE 8
#define FS_MAX_KEY_SIZE 64
+
+// The code generator produces -0x1 for (~0), but an unsigned value is necessary
+// for the tipc_subscr timeout __u32 field.
+#undef TIPC_WAIT_FOREVER
+#define TIPC_WAIT_FOREVER 0xffffffff
+
+// Copied from linux/netfilter/nf_nat.h
+// Including linux/netfilter/nf_nat.h here causes conflicts between linux/in.h
+// and netinet/in.h.
+#define NF_NAT_RANGE_MAP_IPS (1 << 0)
+#define NF_NAT_RANGE_PROTO_SPECIFIED (1 << 1)
+#define NF_NAT_RANGE_PROTO_RANDOM (1 << 2)
+#define NF_NAT_RANGE_PERSISTENT (1 << 3)
+#define NF_NAT_RANGE_PROTO_RANDOM_FULLY (1 << 4)
+#define NF_NAT_RANGE_PROTO_OFFSET (1 << 5)
+#define NF_NAT_RANGE_NETMAP (1 << 6)
+#define NF_NAT_RANGE_PROTO_RANDOM_ALL \
+ (NF_NAT_RANGE_PROTO_RANDOM | NF_NAT_RANGE_PROTO_RANDOM_FULLY)
+#define NF_NAT_RANGE_MASK \
+ (NF_NAT_RANGE_MAP_IPS | NF_NAT_RANGE_PROTO_SPECIFIED | \
+ NF_NAT_RANGE_PROTO_RANDOM | NF_NAT_RANGE_PERSISTENT | \
+ NF_NAT_RANGE_PROTO_RANDOM_FULLY | NF_NAT_RANGE_PROTO_OFFSET | \
+ NF_NAT_RANGE_NETMAP)
+
+// Copied from linux/hid.h.
+// Keep in sync with the size of the referenced fields.
+#define _HIDIOCGRAWNAME_LEN 128 // sizeof_field(struct hid_device, name)
+#define _HIDIOCGRAWPHYS_LEN 64 // sizeof_field(struct hid_device, phys)
+#define _HIDIOCGRAWUNIQ_LEN 64 // sizeof_field(struct hid_device, uniq)
+
+#define _HIDIOCGRAWNAME HIDIOCGRAWNAME(_HIDIOCGRAWNAME_LEN)
+#define _HIDIOCGRAWPHYS HIDIOCGRAWPHYS(_HIDIOCGRAWPHYS_LEN)
+#define _HIDIOCGRAWUNIQ HIDIOCGRAWUNIQ(_HIDIOCGRAWUNIQ_LEN)
+
'
includes_NetBSD='
@@ -272,6 +345,8 @@ includes_NetBSD='
#include
#include
#include
+#include
+#include
#include
#include
#include
@@ -298,6 +373,8 @@ includes_OpenBSD='
#include
#include
#include
+#include
+#include
#include
#include