diff --git a/go.mod b/go.mod index c1cdb0376e..1cba0286a9 100644 --- a/go.mod +++ b/go.mod @@ -11,15 +11,15 @@ replace ( require ( github.com/coreos/prometheus-operator v0.36.0 github.com/knative/pkg v0.0.0-20190817231834-12ee58e32cc8 - github.com/pkg/errors v0.8.1 + github.com/pkg/errors v0.9.1 github.com/rancher/norman v0.0.0-20200520181341-ab75acb55410 - github.com/rancher/wrangler v0.5.4-0.20200520040055-b8d49179cfc8 + github.com/rancher/wrangler v0.7.4-security1 github.com/rancher/wrangler-api v0.5.1-0.20200326194427-c13310506d04 github.com/sirupsen/logrus v1.4.2 golang.org/x/tools v0.0.0-20200624223020-7a9acb0a45bb // indirect - k8s.io/api v0.18.0 + k8s.io/api v0.18.8 k8s.io/apiextensions-apiserver v0.18.0 - k8s.io/apimachinery v0.18.0 + k8s.io/apimachinery v0.18.8 k8s.io/apiserver v0.18.0 k8s.io/client-go v12.0.0+incompatible k8s.io/gengo v0.0.0-20200114144118-36b2048a9120 diff --git a/go.sum b/go.sum index ad1395a361..eb42884322 100644 --- a/go.sum +++ b/go.sum @@ -14,6 +14,7 @@ cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiy cloud.google.com/go/storage v1.3.0/go.mod h1:9IAwXhoyBJ7z9LcAwkj0/7NnPzYaPeZxxVp3zm+5IqA= contrib.go.opencensus.io/exporter/ocagent v0.6.0/go.mod h1:zmKjrJcdo0aYcVS7bmEeSEBLPA9YJp5bjrofdU3pIXs= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +github.com/360EntSecGroup-Skylar/excelize v1.4.1/go.mod h1:vnax29X2usfl7HHkBrX5EvSCJcmH3dT9luvxzu8iGAE= github.com/Azure/azure-pipeline-go v0.2.1/go.mod h1:UGSo8XybXnIGZ3epmeBw7Jdz+HiUVpqIlpz/HKHylF4= github.com/Azure/azure-pipeline-go v0.2.2/go.mod h1:4rQ/NZncSvGqNkkOsNpOU1tgoNuIlp9AfUH5G1tvCHc= github.com/Azure/azure-sdk-for-go v23.2.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= @@ -37,18 +38,16 @@ github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbt github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= +github.com/MakeNowJust/heredoc v0.0.0-20170808103936-bb23615498cd/go.mod h1:64YHyfSL2R96J44Nlwm39UHepQbyR5q10x7iYa1ks2E= github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/OneOfOne/xxhash v1.2.6/go.mod h1:eZbhyaAYD41SGSSsnmcpxVoRiQ/MPUTjUdIIOT9Um7Q= -github.com/PuerkitoBio/purell v1.0.0 h1:0GoNN3taZV6QI81IXgCbxMyEaJDXMSIjArYBCYzVVvs= +github.com/PuerkitoBio/goquery v1.5.0/go.mod h1:qD2PgZ9lccMbQlc7eEOjaeRlFQON7xY8kdmcsrnKqMg= github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= -github.com/PuerkitoBio/purell v1.1.1 h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tNFfI= github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= -github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2 h1:JCHLVE3B+kJde7bIEo5N4J+ZbLhp0J1Fs+ulyRws4gE= github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= -github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= @@ -60,6 +59,7 @@ github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRF github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= github.com/aliyun/aliyun-oss-go-sdk v2.0.4+incompatible/go.mod h1:T/Aws4fEfogEE9v+HPhhw+CntffsBHJ8nXQCwKr0/g8= github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8= +github.com/andybalholm/cascadia v1.0.0/go.mod h1:GsXiBklL0woXo1j/WYWtSYYC4ouU9PqHO0sqidkEA4Y= github.com/antihax/optional v0.0.0-20180407024304-ca021399b1a6/go.mod h1:V8iCPQYkqmusNa815XgQio277wI47sdRh1dUOLdyC6Q= github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= @@ -72,9 +72,7 @@ github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:l github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= github.com/aws/aws-sdk-go v1.25.48/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/baiyubin/aliyun-sts-go-sdk v0.0.0-20180326062324-cfa1a18b161f/go.mod h1:AuiFmCCPBSrqvVMvuqFuk0qogytodnVFVSN5CeJB8Gc= -github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 h1:xJ4a3vCFaGF/jqvzLMYoU8P317H5OQ+Via4RmuPwCS0= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= -github.com/beorn7/perks v1.0.0 h1:HWo1m869IqiPhD389kmkxeTalrjNbbJTC8LXupb+sl0= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= @@ -92,12 +90,17 @@ github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghf github.com/cespare/xxhash/v2 v2.1.0/go.mod h1:dgIUBU3pDso/gPgZ1osOZ0iQf77oPR28Tjxl5dIMyVM= github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/chai2010/gettext-go v0.0.0-20160711120539-c6fed771bfd5/go.mod h1:/iP1qXHoty45bqomnu2LM+VVyAEdWN+vtSHGlQgyxbw= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= github.com/coreos/bbolt v1.3.1-coreos.6/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= +github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/etcd v3.3.15+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= @@ -108,16 +111,21 @@ github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7 github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/coreos/pkg v0.0.0-20180108230652-97fdf19511ea/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/coreos/prometheus-operator v0.36.0 h1:ayzkQoqfbnEne+ZMOZl30ZzSymY6KFpzA58QLz+4wlw= github.com/coreos/prometheus-operator v0.36.0/go.mod h1:b1ydz/Rg9TqDtHu2MDXKfiX/Hv0Bogy1iY82fRbhEFg= github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= +github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/davecgh/go-spew v0.0.0-20151105211317-5215b55f46b2/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/daviddengcn/go-colortext v0.0.0-20160507010035-511bcaf42ccd/go.mod h1:dv4zxwHi5C/8AeI+4gX4dCWOIvNi7I6JCSX0HvlKPgE= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= github.com/dgryski/go-sip13 v0.0.0-20190329191031-25c5027a8c7b/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= +github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/docker v0.7.3-0.20190327010347-be7ac8be2ae0/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= @@ -125,6 +133,7 @@ github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZ github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/dustmop/soup v1.1.2-0.20190516214245-38228baa104e/go.mod h1:CgNC6SGbT+Xb8wGGvzilttZL1mc5sQ/5KkcxsZttMIk= github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= @@ -135,19 +144,22 @@ github.com/elastic/go-windows v1.0.0/go.mod h1:TsU0Nrp7/y3+VwE82FoZF8gC/XFg/Elz6 github.com/elastic/go-windows v1.0.1/go.mod h1:FoVvqWSun28vaDQPbj2Elfc0JahhPB7WQEGa3c814Ss= github.com/elazarl/goproxy v0.0.0-20170405201442-c4fc26588b6e/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= -github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633 h1:H2pdYOb3KQ1/YsqVWoWNLQO+fusocsw354rqGTZtAgw= github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= -github.com/emicklei/go-restful v2.9.5+incompatible h1:spTtZBk5DYEvbxMVutUuTyh1Ao2r4iyvLdACqsl/Ljk= github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/evanphx/json-patch v0.0.0-20200808040245-162e5629780b/go.mod h1:NAJj0yf/KaRKURN6nyi7A9IZydMivZEm9oQLWNjfKDc= github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/evanphx/json-patch v4.5.0+incompatible h1:ouOWdg56aJriqS0huScTkVXPC5IcNrDCXZ6OoTAWu7M= github.com/evanphx/json-patch v4.5.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d/go.mod h1:ZZMPRZwes7CROmyNKgQzC3XPs6L/G2EJLHddWejkmf4= github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb/go.mod h1:bH6Xx7IW64qjjJq8M2u4dxNaBiDfKK+z/3eGDpXEQhc= +github.com/fatih/camelcase v1.0.0/go.mod h1:yN2Sb0lFhZJUdVvtELVWefmrXpuZESvPmqwoZc+/fpc= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/structtag v1.1.0/go.mod h1:mBJUNpUnHmRKrKlQQlmCrh5PuhftFbNv8Ys4/aAZl94= github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= +github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= @@ -155,12 +167,14 @@ github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeME github.com/globalsign/mgo v0.0.0-20180905125535-1ca0a4f7cbcb/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= github.com/go-bindata/go-bindata v3.1.2+incompatible/go.mod h1:xK8Dsgwmeed+BBsSy2XTopBn/8uK2HWuGSnA11C3Joo= +github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= +github.com/go-logr/zapr v0.1.0/go.mod h1:tabnROwaDl0UNxkVeFRbY8bwB37GwRv0P8lg6aAiEnk= github.com/go-openapi/analysis v0.0.0-20180825180245-b006789cd277/go.mod h1:k70tL6pCuVxPJOHXQ+wIac1FUrvNkHolPie/cLEU6hI= github.com/go-openapi/analysis v0.17.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= github.com/go-openapi/analysis v0.17.2/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= @@ -171,23 +185,17 @@ github.com/go-openapi/errors v0.17.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQH github.com/go-openapi/errors v0.17.2/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= github.com/go-openapi/errors v0.18.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= github.com/go-openapi/errors v0.19.2/go.mod h1:qX0BLWsyaKfvhluLejVpVNwNRdXZhEbTA4kxxpKBC94= -github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1 h1:wSt/4CYxs70xbATrGXhokKF1i0tZjENLOo1ioIO13zk= github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0= github.com/go-openapi/jsonpointer v0.17.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= github.com/go-openapi/jsonpointer v0.17.2/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= github.com/go-openapi/jsonpointer v0.18.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= -github.com/go-openapi/jsonpointer v0.19.2 h1:A9+F4Dc/MCNB5jibxf6rRvOvR/iFgQdyNx9eIhnGqq0= github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= -github.com/go-openapi/jsonpointer v0.19.3 h1:gihV7YNZK1iK6Tgwwsxo2rJbD1GTbdm72325Bq8FI3w= github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= -github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9 h1:tF+augKRWlWx0J0B7ZyyKSiTyV6E1zZe+7b3qQlcEf8= github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg= github.com/go-openapi/jsonreference v0.17.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= github.com/go-openapi/jsonreference v0.17.2/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= github.com/go-openapi/jsonreference v0.18.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= -github.com/go-openapi/jsonreference v0.19.2 h1:o20suLFB4Ri0tuzpWtyHlh7E7HnkqTNLq6aR6WVNS1w= github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= -github.com/go-openapi/jsonreference v0.19.3 h1:5cxNfTy0UVC3X8JL5ymxzyoUZmo8iZb+jeTWn7tUa8o= github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= github.com/go-openapi/loads v0.17.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= github.com/go-openapi/loads v0.17.2/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= @@ -199,50 +207,44 @@ github.com/go-openapi/runtime v0.0.0-20180920151709-4f900dc2ade9/go.mod h1:6v9a6 github.com/go-openapi/runtime v0.18.0/go.mod h1:uI6pHuxWYTy94zZxgcwJkUWa9wbIlhteGfloI10GD4U= github.com/go-openapi/runtime v0.19.0/go.mod h1:OwNfisksmmaZse4+gpV3Ne9AyMOlP1lt4sK4FXt0O64= github.com/go-openapi/runtime v0.19.4/go.mod h1:X277bwSUBxVlCYR3r7xgZZGKVvBd/29gLDlFGtJ8NL4= -github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501 h1:C1JKChikHGpXwT5UQDFaryIpDtyyGL/CR6C2kB7F1oc= github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc= github.com/go-openapi/spec v0.17.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= github.com/go-openapi/spec v0.17.2/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= github.com/go-openapi/spec v0.18.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= -github.com/go-openapi/spec v0.19.2 h1:SStNd1jRcYtfKCN7R0laGNs80WYYvn5CbBjM2sOmCrE= github.com/go-openapi/spec v0.19.2/go.mod h1:sCxk3jxKgioEJikev4fgkNmwS+3kuYdJtcsZsD5zxMY= -github.com/go-openapi/spec v0.19.3 h1:0XRyw8kguri6Yw4SxhsQA/atC88yqrk0+G4YhI2wabc= github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo= +github.com/go-openapi/spec v0.19.5/go.mod h1:Hm2Jr4jv8G1ciIAo+frC/Ft+rR2kQDh8JHKHb3gWUSk= github.com/go-openapi/strfmt v0.17.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= github.com/go-openapi/strfmt v0.17.2/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= github.com/go-openapi/strfmt v0.18.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= github.com/go-openapi/strfmt v0.19.0/go.mod h1:+uW+93UVvGGq2qGaZxdDeJqSAqBqBdl+ZPMF/cC8nDY= github.com/go-openapi/strfmt v0.19.2/go.mod h1:0yX7dbo8mKIvc3XSKp7MNfxw4JytCfCD6+bY1AVL9LU= github.com/go-openapi/strfmt v0.19.3/go.mod h1:0yX7dbo8mKIvc3XSKp7MNfxw4JytCfCD6+bY1AVL9LU= -github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87 h1:zP3nY8Tk2E6RTkqGYrarZXuzh+ffyLDljLxCy1iJw80= +github.com/go-openapi/strfmt v0.19.5/go.mod h1:eftuHTlB/dI8Uq8JJOyRlieZf+WkkxUuk0dgdHXr2Qk= github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I= github.com/go-openapi/swag v0.17.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= github.com/go-openapi/swag v0.17.2/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= github.com/go-openapi/swag v0.18.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= -github.com/go-openapi/swag v0.19.2 h1:jvO6bCMBEilGwMfHhrd61zIID4oIFdwb76V17SM88dE= github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/swag v0.19.4/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= -github.com/go-openapi/swag v0.19.5 h1:lTz6Ys4CmqqCQmZPBlbQENR1/GucA2bzYTE12Pw4tFY= github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/validate v0.17.2/go.mod h1:Uh4HdOzKt19xGIGm1qHf/ofbX1YQ4Y+MYsct2VUrAJ4= github.com/go-openapi/validate v0.18.0/go.mod h1:Uh4HdOzKt19xGIGm1qHf/ofbX1YQ4Y+MYsct2VUrAJ4= github.com/go-openapi/validate v0.19.2/go.mod h1:1tRCw7m3jtI8eNWEEliiAqUIcBztB2KDnRCRMUi7GTA= github.com/go-openapi/validate v0.19.5/go.mod h1:8DJv2CVJQ6kGNpFW6eV9N3JviE1C85nY1c2z52x1Gk4= +github.com/go-openapi/validate v0.19.8/go.mod h1:8DJv2CVJQ6kGNpFW6eV9N3JviE1C85nY1c2z52x1Gk4= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/gobuffalo/flect v0.1.5/go.mod h1:W3K3X9ksuZfir8f/LrfVtWmCDQFfayuylOJ7sz/Fj80= -github.com/gogo/protobuf v1.1.1 h1:72R+M5VuhED/KujmZVcIquuo8mBgX4oVda//DQb3PXo= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/gogo/protobuf v1.2.1 h1:/s5zKNz0uPFCZ5hddgPdo2TK2TVrUNMn0OOX8/aZMTE= github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= -github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d h1:3PaI8p3seN09VjbTYC/QWlUZdZ1qS1zGjy7LH2Wt07I= github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.2.2-0.20190730201129-28a6bbf47e48/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls= github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef h1:veQD95Isof8w9/WXiA+pa3tz3fJXkt5B7QaRBrM62gk= +github.com/golang/groupcache v0.0.0-20180513044358-24b0969c4cb7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191027212112-611e8accdfc9 h1:uHTyIjqVhYRhLbJ8nIiOJHkEZZ+5YoOsAbD3sk82NiE= @@ -251,25 +253,25 @@ github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfb github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.0.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golangplus/bytes v0.0.0-20160111154220-45c989fe5450/go.mod h1:Bk6SMAONeMXrxql8uvOKuAZSu8aM5RUGv+1C6IJaEho= +github.com/golangplus/fmt v0.0.0-20150411045040-2a5d6d7d2995/go.mod h1:lJgMEyOkYFkPcDKwRXegd+iM6E7matEszMG5HhwytU8= +github.com/golangplus/testing v0.0.0-20180327235837-af21d9c3145e/go.mod h1:0AA//k/eakGydO4jKRoRL2j92ZKSzTgj9tclaCrvXHk= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= -github.com/google/gofuzz v0.0.0-20161122191042-44d81051d367 h1:ScAXWS+TR6MZKex+7Z8rneuSJH+FSDqd6ocQyl+ZHo4= github.com/google/gofuzz v0.0.0-20161122191042-44d81051d367/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= -github.com/google/gofuzz v1.0.0 h1:A8PeW59pxE9IoFRqBp37U+mSNaQoZ46F1f0f863XSXw= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= @@ -288,7 +290,6 @@ github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5m github.com/googleapis/gnostic v0.0.0-20170426233943-68f4ded48ba9/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= github.com/googleapis/gnostic v0.1.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= -github.com/googleapis/gnostic v0.2.0 h1:l6N3VoaVzTncYYW+9yOz2LJJammFZGBO13sqgEhpy9g= github.com/googleapis/gnostic v0.2.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= github.com/googleapis/gnostic v0.3.1 h1:WeAefnSUHlBb0iJKwxFDZdbfGwkd7xRNuV+IpXMJhYk= github.com/googleapis/gnostic v0.3.1/go.mod h1:on+2t9HRStVgn95RSsFWFz+6Q0Snyqv1awfrALZdbtU= @@ -304,10 +305,12 @@ github.com/gorilla/websocket v1.4.0 h1:WDFjx/TMzVgy9VdMMQi2K2Emtwi2QcUQsztZ/zLaH github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/grpc-ecosystem/go-grpc-middleware v0.0.0-20190222133341-cfaf5686ec79/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-middleware v1.1.0/go.mod h1:f5nM7jw/oeRSadq3xCzHAvxcr8HZnzsqU6ILg/0NiiE= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway v1.3.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= +github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.9.4/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.12.1/go.mod h1:8XEsbTttt/W+VvjtQhLACqCisSPWTxCZ7sBRjU6iH9c= @@ -334,7 +337,6 @@ github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/b github.com/hashicorp/go-version v1.1.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.3 h1:YPkqC67at8FYaadspW/6uE0COsBxS2656RLEr8Bppgk= github.com/hashicorp/golang-lru v0.5.3/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= @@ -345,8 +347,10 @@ github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2p github.com/hashicorp/memberlist v0.1.5/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= github.com/hashicorp/serf v0.8.5/go.mod h1:UpNcs7fFbpKIyZaUuSW6EPiH+eZC7OuyFD+wc1oal+k= +github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.7/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/influxdata/influxdb v1.7.7/go.mod h1:qZna6X/4elxqT3yI9iZYdZrWWdeFOOprn86kgg4+IzY= @@ -357,11 +361,8 @@ github.com/joeshaw/multierror v0.0.0-20140124173710-69b34d4ec901/go.mod h1:Z86h9 github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/json-iterator/go v0.0.0-20180612202835-f2b4162afba3/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= -github.com/json-iterator/go v1.1.6 h1:MrUvLMLTMxbqFJ9kzlvat/rYZqZnW3u4wkLzWTaFwKs= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= -github.com/json-iterator/go v1.1.7 h1:KfgG9LzI+pYjr4xvmz/5H4FXjokeP+rlHLhv3iH62Fo= github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.8 h1:QiWkFLKq0T7mpzwOTu6BzNDbfTE8OLrYhVKYMLF46Ok= github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.9 h1:9yzud/Ht36ygwatGx56VwCZtlI/2AD15T1X2sjSuGns= github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= @@ -374,7 +375,6 @@ github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8 github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.2 h1:DB17ag19krx9CFsz4o3enTrPXyIXCl+2iCXH/aMAp9s= github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= @@ -388,18 +388,17 @@ github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kylelemons/godebug v0.0.0-20160406211939-eadb3ce320cb/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k= github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k= github.com/leanovate/gopter v0.2.4/go.mod h1:gNcbPWNEWRe4lm+bycKqxUYoH5uoVje5SkOJ3uoLer8= +github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9B/r2mtpb6U+EI2rYA5OAXxsYw6wTamcNW+zcE= github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= github.com/lightstep/lightstep-tracer-go v0.18.0/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= +github.com/lithammer/dedent v1.1.0/go.mod h1:jrXYCQtgg0nJiN+StA2KgR7w6CiQNv9Fd/Z9BP0jIOc= github.com/lovoo/gcloud-opentracing v0.3.0/go.mod h1:ZFqk2y38kMDDikZPAK7ynTTGuyt17nSPdS3K5e+ZTBY= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= -github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a h1:TpvdAwDAt1K4ANVOfcihouRdvP+MgAfDWwBuct4l6ZY= github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63 h1:nTT4s92Dgz2HlrB2NaMgvlfqHH39OgMhA7z3PK7PGD4= github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.7.0 h1:aizVhC/NAAcKWb+5QsU1iNOZb4Yws5UO2I+aIprQITM= github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= github.com/maruel/panicparse v0.0.0-20171209025017-c0182c169410/go.mod h1:nty42YY5QByNC5MM7q/nj938VbgPU7avs45z6NClpxI= github.com/maruel/ut v1.0.0/go.mod h1:I68ffiAt5qre9obEVTy7S2/fj2dJku2NYLvzPuY0gqE= @@ -439,6 +438,7 @@ github.com/modern-go/reflect2 v0.0.0-20180320133207-05fbef0ca5da/go.mod h1:bx2lN github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826/go.mod h1:TaXosZuwdSHYgviHp1DAtfrULt5eUgsSMsZf+YrPgl8= github.com/mozillazg/go-cos v0.13.0/go.mod h1:Zp6DvvXn0RUOXGJ2chmWt2bLEqRAnJnS3DnAZsJsoaE= github.com/mozillazg/go-httpheader v0.2.1/go.mod h1:jJ8xECTlalr6ValeXYdOF8fFUISeBAdw6E61aqQma60= github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= @@ -453,17 +453,23 @@ github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:v github.com/olekukonko/tablewriter v0.0.1/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= github.com/olekukonko/tablewriter v0.0.2/go.mod h1:rSAaSIOAGT9odnlyGlUfAJaoc5w2fSBUmeGDbRWPxyQ= github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.4.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.11.0 h1:JAKSXpt1YjtLA7YpPiqO9ss6sNXEsPfSGdwN0UHqzrw= github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= +github.com/onsi/gomega v1.3.0/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= +github.com/onsi/gomega v1.8.1 h1:C5Dqfs/LeauYDX0jJXIe2SWmwCbGzx9yF8C8xy3Lh34= +github.com/onsi/gomega v1.8.1/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA= +github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= github.com/openshift/prom-label-proxy v0.1.1-0.20191016113035-b8153a7f39f1/go.mod h1:p5MuxzsYP1JPsNGwtjtcgRHHlGziCJJfztff91nNixw= github.com/opentracing-contrib/go-stdlib v0.0.0-20190519235532-cf7a6c988dc9/go.mod h1:PLldrQSroqzH70Xl+1DQcGnefIbqsKR7UDaiux3zV+w= github.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKwFXS9KnPs5lxoYwgW74= @@ -472,14 +478,15 @@ github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFSt github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/paulmach/orb v0.1.3/go.mod h1:VFlX/8C+IQ1p6FTRRKzKoOPJnvEtA5G0Veuqwbu//Vk= github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= -github.com/pkg/errors v0.8.0 h1:WdK/asTD0HN+q6hsWO3/vpuAkAr+tw6aNJNDFFf0+qw= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= @@ -488,35 +495,32 @@ github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prY github.com/prometheus/alertmanager v0.18.0/go.mod h1:WcxHBl40VSPuOaqWae6l6HpnEOVRIycEJ7i9iYkadEE= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4xf/QclQDMrYNZzcM= -github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829 h1:D+CiwcpGTW6pL6bv6KI3KbyEyCKyS+1JWS2h8PNDnGA= github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs= -github.com/prometheus/client_golang v1.0.0 h1:vrDKnkGzuGvhNAL56c7DBz29ZL+KxnoR0x7enabFceM= +github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.2.0/go.mod h1:XMU6Z2MjaRKVu/dC1qupJI9SiNkDYzz3xecMgSW/F+U= github.com/prometheus/client_golang v1.2.1/go.mod h1:XMU6Z2MjaRKVu/dC1qupJI9SiNkDYzz3xecMgSW/F+U= github.com/prometheus/client_golang v1.4.0 h1:YVIb/fVcOTMSqtqZWSKnHpSLBxu8DKgxq8z6RuBZwqI= github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f h1:BVwpUVJDADN2ufcGik7W992pyps0wZ888b/y9GXcLTU= github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90 h1:S/YWwWx/RA8rT8tKFRuGUZhuA90OyIBpPCXkcbwU8DE= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.4.1 h1:K0MGApIoQvMw27RTdJkPbr3JZ7DNbtxQNyi5STVM6Kw= +github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA= github.com/prometheus/common v0.9.1 h1:KOMtN28tlbam3/7ZKEYKHhKoJZYYj3gMH4uc62x7X7U= github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1 h1:/K3IL0Z1quvmJ7X0A1AwNEK7CRkVK3YwfOU/QAL4WGg= github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190425082905-87a4384529e0/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/procfs v0.0.2 h1:6LJUbpNm42llc4HRCuvApCSWB/WfhuNo9K98Q9sNGfs= +github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= github.com/prometheus/procfs v0.0.5/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= @@ -526,6 +530,10 @@ github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+Gx github.com/prometheus/prometheus v0.0.0-20180315085919-58e2a31db8de/go.mod h1:oAIUtOny2rjMX0OWN5vPR5/q/twIROJvdqnQKDdil/s= github.com/prometheus/prometheus v1.8.2-0.20200107122003-4708915ac6ef/go.mod h1:7U90zPoLkWjEIQcy/rweQla82OCTUzxVHE51G3OhJbI= github.com/prometheus/prometheus v2.3.2+incompatible/go.mod h1:oAIUtOny2rjMX0OWN5vPR5/q/twIROJvdqnQKDdil/s= +github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= +github.com/qri-io/starlib v0.4.2-0.20200213133954-ff2e8cd5ef8d/go.mod h1:7DPO4domFU579Ga6E61sB9VFNaniPVwJP5C4bBCu3wA= +github.com/rancher/lasso v0.0.0-20200905045615-7fcb07d6a20b h1:DQLpu44dsR+2qYvg1wyYadk108MWMHUOqvb2z4274Vs= +github.com/rancher/lasso v0.0.0-20200905045615-7fcb07d6a20b/go.mod h1:OhBBBO1pBwYp0hacWdnvSGOj+XE9yMLOLnaypIlic18= github.com/rancher/moq v0.0.0-20190404221404-ee5226d43009 h1:Xsxh7fX3+2wAUJtPy8g2lZh0cYuyifqhBL0vxCIYojs= github.com/rancher/moq v0.0.0-20190404221404-ee5226d43009/go.mod h1:wpITyDPTi/Na5h73XkbuEf2AP9fbgrIGqqxVzFhYD6U= github.com/rancher/norman v0.0.0-20200520181341-ab75acb55410 h1:iPheUCT5yzEWhMsILMUeFfbcwkhKur5BsuMTL/pGC5s= @@ -533,8 +541,9 @@ github.com/rancher/norman v0.0.0-20200520181341-ab75acb55410/go.mod h1:j0ffXOmIn github.com/rancher/pkg v0.0.0-20190514055449-b30ab9de040e h1:j6+HqCET/NLPBtew2m5apL7jWw/PStQ7iGwXjgAqdvo= github.com/rancher/pkg v0.0.0-20190514055449-b30ab9de040e/go.mod h1:XbYHTPaXuw8ZY9bylhYKQh/nJxDaTKk3YhAxPl4Qy/k= github.com/rancher/wrangler v0.5.4-0.20200326191509-4054411d9736/go.mod h1:L4HtjPeX8iqLgsxfJgz+JjKMcX2q3qbRXSeTlC/CSd4= -github.com/rancher/wrangler v0.5.4-0.20200520040055-b8d49179cfc8 h1:gyboq5L4bx+iWyz4xHHFRqt7wyoou8+akRczPgVlFaQ= github.com/rancher/wrangler v0.5.4-0.20200520040055-b8d49179cfc8/go.mod h1:L4HtjPeX8iqLgsxfJgz+JjKMcX2q3qbRXSeTlC/CSd4= +github.com/rancher/wrangler v0.7.4-security1 h1:RQR56EZVQ/ow96pK9otKjSqs1jEQeVl2Mb2yElPbkHA= +github.com/rancher/wrangler v0.7.4-security1/go.mod h1:goezjesEKwMxHLfltdjg9DW0xWV7txQee6vOuSDqXAI= github.com/rancher/wrangler-api v0.5.1-0.20200326194427-c13310506d04 h1:y55e+kUaz/UswjN/oJdqHWMuoCG1FxwZJkxJEUONZZE= github.com/rancher/wrangler-api v0.5.1-0.20200326194427-c13310506d04/go.mod h1:R3nemXoECcrDqXDSHdY7yJay4j42TeEkU79Hep0rdJ8= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= @@ -544,6 +553,7 @@ github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6L github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rs/cors v1.6.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= +github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/samuel/go-zookeeper v0.0.0-20190810000440-0ceca61e4d75/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= @@ -553,8 +563,10 @@ github.com/satori/go.uuid v0.0.0-20160603004225-b111a074d5ef/go.mod h1:dA0hQrYB0 github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= +github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= github.com/shurcooL/httpfs v0.0.0-20171119174359-809beceb2371/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg= github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg= +github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/shurcooL/vfsgen v0.0.0-20180825020608-02ddb050ef6b/go.mod h1:TrYk7fJVaAttu97ZZKrO9UbRa8izdowaMIZcxYMbVaw= github.com/shurcooL/vfsgen v0.0.0-20181202132449-6a9ea43bcacd/go.mod h1:TrYk7fJVaAttu97ZZKrO9UbRa8izdowaMIZcxYMbVaw= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= @@ -573,43 +585,47 @@ github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTd github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= +github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= -github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff h1:VARhShG49tiji6mdRNp7JTNDtJ0FhuprF93GBQ37xGU= github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= +github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= github.com/stretchr/testify v0.0.0-20151208002404-e3a8ff8ce365/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= +github.com/stretchr/testify v1.2.3-0.20181224173747-660f15d67dbb/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/thanos-io/thanos v0.10.1 h1:rVYzLyJcjV2cmvBzIw1kx96msKQ4uwyi+6edTHVUPwA= github.com/thanos-io/thanos v0.10.1/go.mod h1:usT/TxtJQ7DzinTt+G9kinDQmRS5sxwu0unVKZ9vdcw= github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= github.com/uber/jaeger-client-go v2.20.1+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= github.com/uber/jaeger-lib v2.2.0+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U= +github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= github.com/vektah/gqlparser v1.1.2/go.mod h1:1ycwN7Ij5njmMkPPAOaRFY4rET2Enx7IkVv3vaXspKw= github.com/xiang90/probing v0.0.0-20160813154853-07dd2e8dfe18/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= +github.com/xlab/handysort v0.0.0-20150421192137-fb3537ed64a1/go.mod h1:QcJo0QPSfTONNIgpN5RA8prR7fF8nkF6cTWTcNerRO8= github.com/xlab/treeprint v0.0.0-20180616005107-d6fb6747feb6/go.mod h1:ce1O1j6UtZfjr22oyGxGLbauSBp2YVXpARAosm7dHBg= +github.com/xlab/treeprint v0.0.0-20181112141820-a009c3971eca/go.mod h1:ce1O1j6UtZfjr22oyGxGLbauSBp2YVXpARAosm7dHBg= github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= go.elastic.co/apm v1.5.0/go.mod h1:OdB9sPtM6Vt7oz3VXt7+KR96i9li74qrxBGHTQygFvk= go.elastic.co/apm/module/apmhttp v1.5.0/go.mod h1:1FbmNuyD3ddauwzgVwFB0fqY6KbZt3JkV187tGCYYhY= go.elastic.co/apm/module/apmot v1.5.0/go.mod h1:d2KYwhJParTpyw2WnTNy8geNlHKKFX+4oK3YLlsesWE= go.elastic.co/fastjson v1.0.0/go.mod h1:PmeUOMMtLHQr9ZS9J9owrAVg0FkaZDRZJEFTTGHtchs= +go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= go.mongodb.org/mongo-driver v1.0.3/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= @@ -620,6 +636,8 @@ go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.starlark.net v0.0.0-20190528202925-30ae18b8564f/go.mod h1:c1/X6cHgvdXj6pUlmWKMkuqRnW4K8x2vwt6JAaaircg= +go.starlark.net v0.0.0-20200306205701-8dd3e2ee1dd5/go.mod h1:nmDLcffg48OtT/PSW0Hg7FvpRQsQh5OSqIylirxKC7o= go.uber.org/atomic v0.0.0-20181018215023-8dc6146f7569/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= @@ -628,6 +646,7 @@ go.uber.org/automaxprocs v1.2.0/go.mod h1:YfO3fm683kQpzETxlTGZhGIVmXAhaw3gxeBADb go.uber.org/multierr v0.0.0-20180122172545-ddea229ff1df/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/zap v0.0.0-20180814183419-67bc79d13d15/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= @@ -637,14 +656,12 @@ golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACk golang.org/x/crypto v0.0.0-20190320223903-b7391e95e576/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190513172903-22d7a77e9e5f/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5 h1:58fnuSXlxZmFdJyvtTFVmVhcMLU6v5fEb/ok4wyqtNU= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8 h1:1wopBVtVdWnn03fZelqdXTqk7U7zPQCb+T4rbU9ZEoU= golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190617133340-57b3e21c3d56/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20191112222119-e1110fd1c708 h1:pXVtWnwHkrWD9ru3sDxY/qFK/bfc0egRovX91EjWjf4= golang.org/x/crypto v0.0.0-20191112222119-e1110fd1c708/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975 h1:/Tl7pH94bvbAAHBdZJT947M/+gp0+CqQXDtMRC0fseo= golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= @@ -671,6 +688,8 @@ golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= golang.org/x/mod v0.2.0 h1:KU7oHjnv3XNWfa5COkzUifxZmxp1TyI7ImMXqFxLwvQ= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180112015858-5ccada7d0a7b/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180218175443-cbe0f9307d01/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -679,19 +698,16 @@ golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73r golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e h1:bRhVy7zSSasaqNksaRZiA5EEI+Ei4I1nO5Jh72wfHlg= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190320064053-1272bf9dcd53/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3 h1:0GoQqolDA55aaLxZyTzK/Y2ePZzZTUrRacwib7cNsYQ= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= -golang.org/x/net v0.0.0-20190613194153-d28f0bde5980 h1:dfGZHvZk057jK2MCeWus/TowKpJ8y4AmooUzdBSR9GU= golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -701,9 +717,7 @@ golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191002035440-2ec189313ef0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191004110552-13f9640d40b9 h1:rjwSpXsdiK0dV8/Naq3kAw9ymfAeJIyd0upUIElB+lI= golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191112182307-2180aed22343 h1:00ohfJ4K98s3m6BGUoBd8nyfp4Yl0GoIKvw5abItTjI= golang.org/x/net v0.0.0-20191112182307-2180aed22343/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b h1:0mm1VjtFUOIlE1SbDlwjYaDxZVDP2S5ou6y0gSgXHu8= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -713,14 +727,13 @@ golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 h1:SVwTIAaPC2U/AvvLNZ2a7O golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4 h1:YUO/7uOKsKeq9UokNS62b8FYywz3ker1l1vDZRCRefw= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190423024810-112230192c58 h1:8gQV6CLnAEikrhgkHFbMAEhagSSnXWGV915qUMm9mrU= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e h1:vcxGaoTs7kV8m5Np9uUNQin4BrLOthgV7252N8V+FwY= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180117170059-2c42eef0765b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -737,22 +750,20 @@ golang.org/x/sys v0.0.0-20190310054646-10058d7d4faa/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190321052220-f7bb7a8bee54/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190422165155-953cdadca894 h1:Cz4ceDQGXuKRnVBDTS23GTn/pU5OE2C0WrNTOYK1Uuc= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190425145619-16072639606e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f h1:25KHgbfyiSm6vwQLbM3zZIe1v9p/3ea4Rz+nnM5K/i4= golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190712062909-fae7ac547cb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456 h1:ng0gs1AKnRRuEMZoTLLlbOd+C17zUDepwGQBb/n+JVg= golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191002063906-3421d5a6bb1c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191010194322-b09406accb47/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191025021431-6c3a3bfe00ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -761,22 +772,20 @@ golang.org/x/sys v0.0.0-20191113165036-4c7a9d0fe056/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200122134326-e047566fdf82 h1:ywK/j/KkyTHcdyYSZNXGjMwgmDSfjglYZ3vStQ/gSCU= golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20171227012246-e19ae1496984/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180805044716-cb6730876b98/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0 h1:/5xXl8Y5W96D+TtHSlonuFqGHIWVuyCkGJLwGh9JJFs= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09 h1:6Cq5LXQ/D2J5E7sYJemWSQApczOzY1rxSp8TWloyxIY= golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -791,7 +800,6 @@ golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3 golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b h1:mSUCVIwDx4hfXJfWsOPfdzEHxzb2Xjl6BQ8YgPnazQA= golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190617190820-da514acc4774/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= @@ -804,11 +812,9 @@ golang.org/x/tools v0.0.0-20190918214516-5a1a30219888/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20190920225731-5eefd052ad72/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191017205301-920acffc3e65/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c h1:IGkKhmfzcztjm6gYkykvu/NiS8kaqbCWAEWWAyf8J5U= golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191111182352-50fa39b762bc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2 h1:EtTFh6h4SAKemS+CURDMTDIANuduG5zKEXShyy18bGA= golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200624223020-7a9acb0a45bb h1:NyaeMQ3XiyZSnTS7YjPk1ksNbe4EUlrse3jBvmVU2l8= @@ -817,6 +823,7 @@ golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gomodules.xyz/jsonpatch/v2 v2.0.1/go.mod h1:IhYNNY4jnS53ZnfE4PAmpKtDpTCj1JFXc+3mwe7XcUU= gonum.org/v1/gonum v0.0.0-20190331200053-3d26580ed485/go.mod h1:2ltnJ7xHfj0zHS40VVPYEAAMTa3ZGguvHGBSJeRWqE0= gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= gonum.org/v1/netlib v0.0.0-20190331212654-76723241ea4e/go.mod h1:kS+toOQn6AQKjmKJ7gzohV1XkqsFehRA2FbsbkopSuQ= @@ -828,10 +835,8 @@ google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEn google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/appengine v1.4.0 h1:/wp5JvzpHIxhs/dumFmF7BXTf3Z+dd4uXta4kVyO508= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.6.1 h1:QzqyMA1tlu6CgqCDUtU9V+ZKhLFT2dkJuANu5QaxI3I= google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= google.golang.org/appengine v1.6.5 h1:tycE03LOZYQNhDpS27tcQdAzLCVMaj7QT2SXxebnpCM= google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= @@ -861,14 +866,13 @@ google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRn google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/fsnotify/fsnotify.v1 v1.4.7/go.mod h1:Fyux9zXlo4rWoMSIzpn9fDAYjalPqJ/K1qJ27s+7ltE= gopkg.in/inf.v0 v0.9.0/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= @@ -879,20 +883,22 @@ gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= +gopkg.in/yaml.v2 v2.0.0/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= gopkg.in/yaml.v2 v2.1.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.5 h1:ymVxjfMaHvXD8RqPRmzHHsB3VvucivSkIAvJFDI5O3c= gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v3 v3.0.0-20190905181640-827449938966/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20191120175047-4206685974f2/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20200121175148-a6ecf24a6d71/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= @@ -904,32 +910,45 @@ howett.net/plist v0.0.0-20181124034731-591f970eefbb/go.mod h1:vMygbs4qMhSZSc4lCU k8s.io/api v0.0.0-20190813020757-36bff7324fb7/go.mod h1:3Iy+myeAORNCLgjd/Xu9ebwN7Vh59Bw0vh9jhoX+V58= k8s.io/api v0.0.0-20190918155943-95b840bb6a1f/go.mod h1:uWuOHnjmNrtQomJrvEBg0c0HRNyQ+8KTEERVsK0PW48= k8s.io/api v0.0.0-20191115095533-47f6de673b26/go.mod h1:iA/8arsvelvo4IDqIhX4IbjTEKBGgvsf2OraTuRtLFU= -k8s.io/api v0.18.0 h1:lwYk8Vt7rsVTwjRU6pzEsa9YNhThbmbocQlKvNBB4EQ= +k8s.io/api v0.0.0-20191214185829-ca1d04f8b0d3/go.mod h1:itOjKREfmUTvcjantxOsyYU5mbFsU7qUnyUuRfF5+5M= +k8s.io/api v0.17.2/go.mod h1:BS9fjjLc4CMuqfSO8vgbHPKMt5+SF0ET6u/RVDihTo4= k8s.io/api v0.18.0/go.mod h1:q2HRQkfDzHMBZL9l/y9rH63PkQl4vae0xRT+8prbrK8= +k8s.io/api v0.18.8 h1:aIKUzJPb96f3fKec2lxtY7acZC9gQNDLVhfSGpxBAC4= +k8s.io/api v0.18.8/go.mod h1:d/CXqwWv+Z2XEG1LgceeDmHQwpUJhROPx16SlxJgERY= k8s.io/apiextensions-apiserver v0.0.0-20190918161926-8f644eb6e783/go.mod h1:xvae1SZB3E17UpV59AWc271W/Ph25N+bjPyR63X6tPY= +k8s.io/apiextensions-apiserver v0.17.2/go.mod h1:4KdMpjkEjjDI2pPfBA15OscyNldHWdBCfsWMDWAmSTs= k8s.io/apiextensions-apiserver v0.18.0 h1:HN4/P8vpGZFvB5SOMuPPH2Wt9Y/ryX+KRvIyAkchu1Q= k8s.io/apiextensions-apiserver v0.18.0/go.mod h1:18Cwn1Xws4xnWQNC00FLq1E350b9lUF+aOdIWDOZxgo= k8s.io/apimachinery v0.0.0-20190809020650-423f5d784010/go.mod h1:Waf/xTS2FGRrgXCkO5FP3XxTOWh0qLf2QhL1qFZZ/R8= k8s.io/apimachinery v0.0.0-20190913080033-27d36303b655/go.mod h1:nL6pwRT8NgfF8TT68DBI8uEePRt89cSvoXUVqbkWHq4= k8s.io/apimachinery v0.0.0-20191115015347-3c7067801da2/go.mod h1:dXFS2zaQR8fyzuvRdJDHw2Aerij/yVGJSre0bZQSVJA= -k8s.io/apimachinery v0.18.0 h1:fuPfYpk3cs1Okp/515pAf0dNhL66+8zk8RLbSX+EgAE= +k8s.io/apimachinery v0.0.0-20191214185652-442f8fb2f03a/go.mod h1:Ng1IY8TS7sC44KJxT/WUR6qFRfWwahYYYpNXyYRKOCY= +k8s.io/apimachinery v0.0.0-20191216025728-0ee8b4573e3a/go.mod h1:Ng1IY8TS7sC44KJxT/WUR6qFRfWwahYYYpNXyYRKOCY= +k8s.io/apimachinery v0.17.2/go.mod h1:b9qmWdKlLuU9EBh+06BtLcSf/Mu89rWL33naRxs1uZg= k8s.io/apimachinery v0.18.0/go.mod h1:9SnR/e11v5IbyPCGbvJViimtJ0SwHG4nfZFjU77ftcA= +k8s.io/apimachinery v0.18.8 h1:jimPrycCqgx2QPearX3to1JePz7wSbVLq+7PdBTTwQ0= +k8s.io/apimachinery v0.18.8/go.mod h1:6sQd+iHEqmOtALqOFjSWp2KZ9F0wlU/nWm0ZgsYWMig= k8s.io/apiserver v0.0.0-20190918160949-bfa5e2e684ad/go.mod h1:XPCXEwhjaFN29a8NldXA901ElnKeKLrLtREO9ZhFyhg= +k8s.io/apiserver v0.17.2/go.mod h1:lBmw/TtQdtxvrTk0e2cgtOxHizXI+d0mmGQURIHQZlo= k8s.io/apiserver v0.18.0 h1:ELAWpGWC6XdbRLi5lwAbEbvksD7hkXxPdxaJsdpist4= k8s.io/apiserver v0.18.0/go.mod h1:3S2O6FeBBd6XTo0njUrLxiqk8GNy6wWOftjhJcXYnjw= +k8s.io/cli-runtime v0.0.0-20191214191754-e6dc6d5c8724/go.mod h1:wzlq80lvjgHW9if6MlE4OIGC86MDKsy5jtl9nxz/IYY= +k8s.io/cli-runtime v0.17.2/go.mod h1:aa8t9ziyQdbkuizkNLAw3qe3srSyWh9zlSB7zTqRNPI= k8s.io/client-go v0.18.0 h1:yqKw4cTUQraZK3fcVCMeSa+lqKwcjZ5wtcOIPnxQno4= k8s.io/client-go v0.18.0/go.mod h1:uQSYDYs4WhVZ9i6AIoEZuwUggLVEF64HOD37boKAtF8= k8s.io/code-generator v0.0.0-20190912054826-cd179ad6a269/go.mod h1:V5BD6M4CyaN5m+VthcclXWsVcT1Hu+glwa1bi3MIsyE= +k8s.io/code-generator v0.0.0-20191214185510-0b9b3c99f9f2/go.mod h1:BjGKcoq1MRUmcssvHiSxodCco1T6nVIt4YeCT5CMSao= +k8s.io/code-generator v0.17.2/go.mod h1:DVmfPQgxQENqDIzVR2ddLXMH34qeszkKSdH/N+s+38s= k8s.io/code-generator v0.18.0/go.mod h1:+UHX5rSbxmR8kzS+FAv7um6dtYrZokQvjHpDSYRVkTc= k8s.io/component-base v0.0.0-20190918160511-547f6c5d7090/go.mod h1:933PBGtQFJky3TEwYx4aEPZ4IxqhWh3R6DCmzqIn1hA= +k8s.io/component-base v0.0.0-20191214190519-d868452632e2/go.mod h1:wupxkh1T/oUDqyTtcIjiEfpbmIHGm8By/vqpSKC6z8c= +k8s.io/component-base v0.17.2/go.mod h1:zMPW3g5aH7cHJpKYQ/ZsGMcgbsA/VyhEugF3QT1awLs= k8s.io/component-base v0.18.0/go.mod h1:u3BCg0z1uskkzrnAKFzulmYaEpZF7XC9Pf/uFyb1v2c= k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= -k8s.io/gengo v0.0.0-20190822140433-26a664648505 h1:ZY6yclUKVbZ+SdWnkfY+Je5vrMpKOxmGeKRbsXVmqYM= k8s.io/gengo v0.0.0-20190822140433-26a664648505/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/gengo v0.0.0-20200114144118-36b2048a9120 h1:RPscN6KhmG54S33L+lr3GS+oD1jmchIU0ll519K6FA4= k8s.io/gengo v0.0.0-20200114144118-36b2048a9120/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= -k8s.io/klog v0.3.0 h1:0VPpR+sizsiivjIfIAQH/rl8tan6jvWkS7lU+0di3lE= k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= k8s.io/klog v0.3.1/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= k8s.io/klog v0.4.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= @@ -939,11 +958,14 @@ k8s.io/kube-aggregator v0.18.0 h1:J+wa9FDQ3SbgyA8wQBNg2m2FMSm+mMQfs2A58500hs0= k8s.io/kube-aggregator v0.18.0/go.mod h1:ateewQ5QbjMZF/dihEFXwaEwoA4v/mayRvzfmvb6eqI= k8s.io/kube-openapi v0.0.0-20190709113604-33be087ad058/go.mod h1:nfDlWeOsu3pUf4yWGL+ERqohP4YsZcBJXWMK+gkzOA4= k8s.io/kube-openapi v0.0.0-20190816220812-743ec37842bf/go.mod h1:1TqjTSzOxsLGIKfj0lK8EeCP7K1iUG65v09OM0/WG5E= -k8s.io/kube-openapi v0.0.0-20191107075043-30be4d16710a h1:UcxjrRMyNx/i/y8G7kPvLyy7rfbeuf1PYyBf973pgyU= k8s.io/kube-openapi v0.0.0-20191107075043-30be4d16710a/go.mod h1:1TqjTSzOxsLGIKfj0lK8EeCP7K1iUG65v09OM0/WG5E= k8s.io/kube-openapi v0.0.0-20200121204235-bf4fb3bd569c/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E= +k8s.io/kube-openapi v0.0.0-20200410145947-61e04a5be9a6 h1:Oh3Mzx5pJ+yIumsAD0MOECPVeXsVot0UkiaCGVyfGQY= +k8s.io/kube-openapi v0.0.0-20200410145947-61e04a5be9a6/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E= +k8s.io/kubectl v0.0.0-20191219154910-1528d4eea6dd/go.mod h1:9ehGcuUGjXVZh0qbYSB0vvofQw2JQe6c6cO0k4wu/Oo= +k8s.io/metrics v0.0.0-20191214191643-6b1944c9f765/go.mod h1:5V7rewilItwK0cz4nomU0b3XCcees2Ka5EBYWS1HBeM= k8s.io/utils v0.0.0-20190801114015-581e00157fb1/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= -k8s.io/utils v0.0.0-20191114200735-6ca3b61696b6 h1:p0Ai3qVtkbCG/Af26dBmU0E1W58NID3hSSh7cMyylpM= +k8s.io/utils v0.0.0-20191114184206-e782cd3c129f/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= k8s.io/utils v0.0.0-20191114200735-6ca3b61696b6/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= k8s.io/utils v0.0.0-20200324210504-a9aa75ae1b89 h1:d4vVOjXm687F1iLSP2q3lyPPuyvTUt3aVoBpi2DqRsU= k8s.io/utils v0.0.0-20200324210504-a9aa75ae1b89/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= @@ -954,14 +976,21 @@ modernc.org/strutil v1.0.0/go.mod h1:lstksw84oURvj9y3tn8lGvRxyRC1S2+g5uuIzNfIOBs modernc.org/xc v1.0.0/go.mod h1:mRNCo0bvLjGhHO9WsyuKVU4q0ceiDDDoEeWDJHrNx8I= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.7/go.mod h1:PHgbrJT7lCHcxMU+mDHEm+nx46H4zuuHZkDP6icnhu0= +sigs.k8s.io/cli-utils v0.16.0 h1:Wr32m1oxjIqc9G9l+igr13PeIM9LCyq8jQ8KjXKelvg= +sigs.k8s.io/cli-utils v0.16.0/go.mod h1:9Jqm9K2W6ShhCxsEuaz6HSRKKOXigPUx3ZfypGgxBLY= +sigs.k8s.io/controller-runtime v0.4.0/go.mod h1:ApC79lpY3PHW9xj/w9pj+lYkLgwAAUZwfXkME1Lajns= sigs.k8s.io/controller-tools v0.2.4/go.mod h1:m/ztfQNocGYBgTTCmFdnK94uVvgxeZeE3LtJvd/jIzA= +sigs.k8s.io/kustomize v2.0.3+incompatible/go.mod h1:MkjgH3RdOWrievjo6c9T245dYlB5QeXV4WCbnt/PEpU= +sigs.k8s.io/kustomize/kyaml v0.4.0/go.mod h1:XJL84E6sOFeNrQ7CADiemc1B0EjIxHo3OhW4o1aJYNw= sigs.k8s.io/structured-merge-diff v0.0.0-20190525122527-15d366b2352e/go.mod h1:wWxsB5ozmmv/SG7nM11ayaAW51xMvak/t1r0CSlcokI= -sigs.k8s.io/structured-merge-diff v0.0.0-20190817042607-6149e4549fca h1:6dsH6AYQWbyZmtttJNe8Gq1cXOeS1BdV3eW37zHilAQ= sigs.k8s.io/structured-merge-diff v0.0.0-20190817042607-6149e4549fca/go.mod h1:IIgPezJWb76P0hotTxzDbWsMYB8APh18qZnxkomBpxA= +sigs.k8s.io/structured-merge-diff v1.0.1-0.20191108220359-b1b620dd3f06 h1:zD2IemQ4LmOcAumeiyDWXKUI2SO0NYDe3H6QGvPOVgU= +sigs.k8s.io/structured-merge-diff v1.0.1-0.20191108220359-b1b620dd3f06/go.mod h1:/ULNhyfzRopfcjskuui0cTITekDduZ7ycKN3oUT9R18= sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200116222232-67a7b8c61874/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw= sigs.k8s.io/structured-merge-diff/v3 v3.0.0 h1:dOmIZBMfhcHS09XZkMyUgkq5trg3/jRyJYFZUiaOp8E= sigs.k8s.io/structured-merge-diff/v3 v3.0.0/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw= -sigs.k8s.io/yaml v1.1.0 h1:4A07+ZFc2wgJwo8YNlQpr1rVlgUDlxXHhPJciaPY5gs= +sigs.k8s.io/testing_frameworks v0.1.2/go.mod h1:ToQrwSC3s8Xf/lADdZp3Mktcql9CG0UAmdJG9th5i0w= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q= sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= +vbom.ml/util v0.0.0-20160121211510-db5cfe13f5cc/go.mod h1:so/NYdZXCz+E3ZpW0uAoCj6uzU2+8OWDFv/HxUSs7kI= diff --git a/vendor/github.com/evanphx/json-patch/.travis.yml b/vendor/github.com/evanphx/json-patch/.travis.yml new file mode 100644 index 0000000000..2092c72c46 --- /dev/null +++ b/vendor/github.com/evanphx/json-patch/.travis.yml @@ -0,0 +1,16 @@ +language: go + +go: + - 1.8 + - 1.7 + +install: + - if ! go get code.google.com/p/go.tools/cmd/cover; then go get golang.org/x/tools/cmd/cover; fi + - go get github.com/jessevdk/go-flags + +script: + - go get + - go test -cover ./... + +notifications: + email: false diff --git a/vendor/github.com/evanphx/json-patch/LICENSE b/vendor/github.com/evanphx/json-patch/LICENSE new file mode 100644 index 0000000000..0eb9b72d84 --- /dev/null +++ b/vendor/github.com/evanphx/json-patch/LICENSE @@ -0,0 +1,25 @@ +Copyright (c) 2014, Evan Phoenix +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. +* Redistributions in binary form must reproduce the above copyright notice + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. +* Neither the name of the Evan Phoenix nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/evanphx/json-patch/README.md b/vendor/github.com/evanphx/json-patch/README.md new file mode 100644 index 0000000000..9c7f87f7ce --- /dev/null +++ b/vendor/github.com/evanphx/json-patch/README.md @@ -0,0 +1,297 @@ +# JSON-Patch +`jsonpatch` is a library which provides functionallity for both applying +[RFC6902 JSON patches](http://tools.ietf.org/html/rfc6902) against documents, as +well as for calculating & applying [RFC7396 JSON merge patches](https://tools.ietf.org/html/rfc7396). + +[![GoDoc](https://godoc.org/github.com/evanphx/json-patch?status.svg)](http://godoc.org/github.com/evanphx/json-patch) +[![Build Status](https://travis-ci.org/evanphx/json-patch.svg?branch=master)](https://travis-ci.org/evanphx/json-patch) +[![Report Card](https://goreportcard.com/badge/github.com/evanphx/json-patch)](https://goreportcard.com/report/github.com/evanphx/json-patch) + +# Get It! + +**Latest and greatest**: +```bash +go get -u github.com/evanphx/json-patch +``` + +**Stable Versions**: +* Version 4: `go get -u gopkg.in/evanphx/json-patch.v4` + +(previous versions below `v3` are unavailable) + +# Use It! +* [Create and apply a merge patch](#create-and-apply-a-merge-patch) +* [Create and apply a JSON Patch](#create-and-apply-a-json-patch) +* [Comparing JSON documents](#comparing-json-documents) +* [Combine merge patches](#combine-merge-patches) + + +# Configuration + +* There is a global configuration variable `jsonpatch.SupportNegativeIndices`. + This defaults to `true` and enables the non-standard practice of allowing + negative indices to mean indices starting at the end of an array. This + functionality can be disabled by setting `jsonpatch.SupportNegativeIndices = + false`. + +* There is a global configuration variable `jsonpatch.AccumulatedCopySizeLimit`, + which limits the total size increase in bytes caused by "copy" operations in a + patch. It defaults to 0, which means there is no limit. + +## Create and apply a merge patch +Given both an original JSON document and a modified JSON document, you can create +a [Merge Patch](https://tools.ietf.org/html/rfc7396) document. + +It can describe the changes needed to convert from the original to the +modified JSON document. + +Once you have a merge patch, you can apply it to other JSON documents using the +`jsonpatch.MergePatch(document, patch)` function. + +```go +package main + +import ( + "fmt" + + jsonpatch "github.com/evanphx/json-patch" +) + +func main() { + // Let's create a merge patch from these two documents... + original := []byte(`{"name": "John", "age": 24, "height": 3.21}`) + target := []byte(`{"name": "Jane", "age": 24}`) + + patch, err := jsonpatch.CreateMergePatch(original, target) + if err != nil { + panic(err) + } + + // Now lets apply the patch against a different JSON document... + + alternative := []byte(`{"name": "Tina", "age": 28, "height": 3.75}`) + modifiedAlternative, err := jsonpatch.MergePatch(alternative, patch) + + fmt.Printf("patch document: %s\n", patch) + fmt.Printf("updated alternative doc: %s\n", modifiedAlternative) +} +``` + +When ran, you get the following output: + +```bash +$ go run main.go +patch document: {"height":null,"name":"Jane"} +updated tina doc: {"age":28,"name":"Jane"} +``` + +## Create and apply a JSON Patch +You can create patch objects using `DecodePatch([]byte)`, which can then +be applied against JSON documents. + +The following is an example of creating a patch from two operations, and +applying it against a JSON document. + +```go +package main + +import ( + "fmt" + + jsonpatch "github.com/evanphx/json-patch" +) + +func main() { + original := []byte(`{"name": "John", "age": 24, "height": 3.21}`) + patchJSON := []byte(`[ + {"op": "replace", "path": "/name", "value": "Jane"}, + {"op": "remove", "path": "/height"} + ]`) + + patch, err := jsonpatch.DecodePatch(patchJSON) + if err != nil { + panic(err) + } + + modified, err := patch.Apply(original) + if err != nil { + panic(err) + } + + fmt.Printf("Original document: %s\n", original) + fmt.Printf("Modified document: %s\n", modified) +} +``` + +When ran, you get the following output: + +```bash +$ go run main.go +Original document: {"name": "John", "age": 24, "height": 3.21} +Modified document: {"age":24,"name":"Jane"} +``` + +## Comparing JSON documents +Due to potential whitespace and ordering differences, one cannot simply compare +JSON strings or byte-arrays directly. + +As such, you can instead use `jsonpatch.Equal(document1, document2)` to +determine if two JSON documents are _structurally_ equal. This ignores +whitespace differences, and key-value ordering. + +```go +package main + +import ( + "fmt" + + jsonpatch "github.com/evanphx/json-patch" +) + +func main() { + original := []byte(`{"name": "John", "age": 24, "height": 3.21}`) + similar := []byte(` + { + "age": 24, + "height": 3.21, + "name": "John" + } + `) + different := []byte(`{"name": "Jane", "age": 20, "height": 3.37}`) + + if jsonpatch.Equal(original, similar) { + fmt.Println(`"original" is structurally equal to "similar"`) + } + + if !jsonpatch.Equal(original, different) { + fmt.Println(`"original" is _not_ structurally equal to "similar"`) + } +} +``` + +When ran, you get the following output: +```bash +$ go run main.go +"original" is structurally equal to "similar" +"original" is _not_ structurally equal to "similar" +``` + +## Combine merge patches +Given two JSON merge patch documents, it is possible to combine them into a +single merge patch which can describe both set of changes. + +The resulting merge patch can be used such that applying it results in a +document structurally similar as merging each merge patch to the document +in succession. + +```go +package main + +import ( + "fmt" + + jsonpatch "github.com/evanphx/json-patch" +) + +func main() { + original := []byte(`{"name": "John", "age": 24, "height": 3.21}`) + + nameAndHeight := []byte(`{"height":null,"name":"Jane"}`) + ageAndEyes := []byte(`{"age":4.23,"eyes":"blue"}`) + + // Let's combine these merge patch documents... + combinedPatch, err := jsonpatch.MergeMergePatches(nameAndHeight, ageAndEyes) + if err != nil { + panic(err) + } + + // Apply each patch individual against the original document + withoutCombinedPatch, err := jsonpatch.MergePatch(original, nameAndHeight) + if err != nil { + panic(err) + } + + withoutCombinedPatch, err = jsonpatch.MergePatch(withoutCombinedPatch, ageAndEyes) + if err != nil { + panic(err) + } + + // Apply the combined patch against the original document + + withCombinedPatch, err := jsonpatch.MergePatch(original, combinedPatch) + if err != nil { + panic(err) + } + + // Do both result in the same thing? They should! + if jsonpatch.Equal(withCombinedPatch, withoutCombinedPatch) { + fmt.Println("Both JSON documents are structurally the same!") + } + + fmt.Printf("combined merge patch: %s", combinedPatch) +} +``` + +When ran, you get the following output: +```bash +$ go run main.go +Both JSON documents are structurally the same! +combined merge patch: {"age":4.23,"eyes":"blue","height":null,"name":"Jane"} +``` + +# CLI for comparing JSON documents +You can install the commandline program `json-patch`. + +This program can take multiple JSON patch documents as arguments, +and fed a JSON document from `stdin`. It will apply the patch(es) against +the document and output the modified doc. + +**patch.1.json** +```json +[ + {"op": "replace", "path": "/name", "value": "Jane"}, + {"op": "remove", "path": "/height"} +] +``` + +**patch.2.json** +```json +[ + {"op": "add", "path": "/address", "value": "123 Main St"}, + {"op": "replace", "path": "/age", "value": "21"} +] +``` + +**document.json** +```json +{ + "name": "John", + "age": 24, + "height": 3.21 +} +``` + +You can then run: + +```bash +$ go install github.com/evanphx/json-patch/cmd/json-patch +$ cat document.json | json-patch -p patch.1.json -p patch.2.json +{"address":"123 Main St","age":"21","name":"Jane"} +``` + +# Help It! +Contributions are welcomed! Leave [an issue](https://github.com/evanphx/json-patch/issues) +or [create a PR](https://github.com/evanphx/json-patch/compare). + + +Before creating a pull request, we'd ask that you make sure tests are passing +and that you have added new tests when applicable. + +Contributors can run tests using: + +```bash +go test -cover ./... +``` + +Builds for pull requests are tested automatically +using [TravisCI](https://travis-ci.org/evanphx/json-patch). diff --git a/vendor/github.com/evanphx/json-patch/errors.go b/vendor/github.com/evanphx/json-patch/errors.go new file mode 100644 index 0000000000..75304b4437 --- /dev/null +++ b/vendor/github.com/evanphx/json-patch/errors.go @@ -0,0 +1,38 @@ +package jsonpatch + +import "fmt" + +// AccumulatedCopySizeError is an error type returned when the accumulated size +// increase caused by copy operations in a patch operation has exceeded the +// limit. +type AccumulatedCopySizeError struct { + limit int64 + accumulated int64 +} + +// NewAccumulatedCopySizeError returns an AccumulatedCopySizeError. +func NewAccumulatedCopySizeError(l, a int64) *AccumulatedCopySizeError { + return &AccumulatedCopySizeError{limit: l, accumulated: a} +} + +// Error implements the error interface. +func (a *AccumulatedCopySizeError) Error() string { + return fmt.Sprintf("Unable to complete the copy, the accumulated size increase of copy is %d, exceeding the limit %d", a.accumulated, a.limit) +} + +// ArraySizeError is an error type returned when the array size has exceeded +// the limit. +type ArraySizeError struct { + limit int + size int +} + +// NewArraySizeError returns an ArraySizeError. +func NewArraySizeError(l, s int) *ArraySizeError { + return &ArraySizeError{limit: l, size: s} +} + +// Error implements the error interface. +func (a *ArraySizeError) Error() string { + return fmt.Sprintf("Unable to create array of size %d, limit is %d", a.size, a.limit) +} diff --git a/vendor/github.com/evanphx/json-patch/merge.go b/vendor/github.com/evanphx/json-patch/merge.go new file mode 100644 index 0000000000..6806c4c200 --- /dev/null +++ b/vendor/github.com/evanphx/json-patch/merge.go @@ -0,0 +1,383 @@ +package jsonpatch + +import ( + "bytes" + "encoding/json" + "fmt" + "reflect" +) + +func merge(cur, patch *lazyNode, mergeMerge bool) *lazyNode { + curDoc, err := cur.intoDoc() + + if err != nil { + pruneNulls(patch) + return patch + } + + patchDoc, err := patch.intoDoc() + + if err != nil { + return patch + } + + mergeDocs(curDoc, patchDoc, mergeMerge) + + return cur +} + +func mergeDocs(doc, patch *partialDoc, mergeMerge bool) { + for k, v := range *patch { + if v == nil { + if mergeMerge { + (*doc)[k] = nil + } else { + delete(*doc, k) + } + } else { + cur, ok := (*doc)[k] + + if !ok || cur == nil { + pruneNulls(v) + (*doc)[k] = v + } else { + (*doc)[k] = merge(cur, v, mergeMerge) + } + } + } +} + +func pruneNulls(n *lazyNode) { + sub, err := n.intoDoc() + + if err == nil { + pruneDocNulls(sub) + } else { + ary, err := n.intoAry() + + if err == nil { + pruneAryNulls(ary) + } + } +} + +func pruneDocNulls(doc *partialDoc) *partialDoc { + for k, v := range *doc { + if v == nil { + delete(*doc, k) + } else { + pruneNulls(v) + } + } + + return doc +} + +func pruneAryNulls(ary *partialArray) *partialArray { + newAry := []*lazyNode{} + + for _, v := range *ary { + if v != nil { + pruneNulls(v) + newAry = append(newAry, v) + } + } + + *ary = newAry + + return ary +} + +var errBadJSONDoc = fmt.Errorf("Invalid JSON Document") +var errBadJSONPatch = fmt.Errorf("Invalid JSON Patch") +var errBadMergeTypes = fmt.Errorf("Mismatched JSON Documents") + +// MergeMergePatches merges two merge patches together, such that +// applying this resulting merged merge patch to a document yields the same +// as merging each merge patch to the document in succession. +func MergeMergePatches(patch1Data, patch2Data []byte) ([]byte, error) { + return doMergePatch(patch1Data, patch2Data, true) +} + +// MergePatch merges the patchData into the docData. +func MergePatch(docData, patchData []byte) ([]byte, error) { + return doMergePatch(docData, patchData, false) +} + +func doMergePatch(docData, patchData []byte, mergeMerge bool) ([]byte, error) { + doc := &partialDoc{} + + docErr := json.Unmarshal(docData, doc) + + patch := &partialDoc{} + + patchErr := json.Unmarshal(patchData, patch) + + if _, ok := docErr.(*json.SyntaxError); ok { + return nil, errBadJSONDoc + } + + if _, ok := patchErr.(*json.SyntaxError); ok { + return nil, errBadJSONPatch + } + + if docErr == nil && *doc == nil { + return nil, errBadJSONDoc + } + + if patchErr == nil && *patch == nil { + return nil, errBadJSONPatch + } + + if docErr != nil || patchErr != nil { + // Not an error, just not a doc, so we turn straight into the patch + if patchErr == nil { + if mergeMerge { + doc = patch + } else { + doc = pruneDocNulls(patch) + } + } else { + patchAry := &partialArray{} + patchErr = json.Unmarshal(patchData, patchAry) + + if patchErr != nil { + return nil, errBadJSONPatch + } + + pruneAryNulls(patchAry) + + out, patchErr := json.Marshal(patchAry) + + if patchErr != nil { + return nil, errBadJSONPatch + } + + return out, nil + } + } else { + mergeDocs(doc, patch, mergeMerge) + } + + return json.Marshal(doc) +} + +// resemblesJSONArray indicates whether the byte-slice "appears" to be +// a JSON array or not. +// False-positives are possible, as this function does not check the internal +// structure of the array. It only checks that the outer syntax is present and +// correct. +func resemblesJSONArray(input []byte) bool { + input = bytes.TrimSpace(input) + + hasPrefix := bytes.HasPrefix(input, []byte("[")) + hasSuffix := bytes.HasSuffix(input, []byte("]")) + + return hasPrefix && hasSuffix +} + +// CreateMergePatch will return a merge patch document capable of converting +// the original document(s) to the modified document(s). +// The parameters can be bytes of either two JSON Documents, or two arrays of +// JSON documents. +// The merge patch returned follows the specification defined at http://tools.ietf.org/html/draft-ietf-appsawg-json-merge-patch-07 +func CreateMergePatch(originalJSON, modifiedJSON []byte) ([]byte, error) { + originalResemblesArray := resemblesJSONArray(originalJSON) + modifiedResemblesArray := resemblesJSONArray(modifiedJSON) + + // Do both byte-slices seem like JSON arrays? + if originalResemblesArray && modifiedResemblesArray { + return createArrayMergePatch(originalJSON, modifiedJSON) + } + + // Are both byte-slices are not arrays? Then they are likely JSON objects... + if !originalResemblesArray && !modifiedResemblesArray { + return createObjectMergePatch(originalJSON, modifiedJSON) + } + + // None of the above? Then return an error because of mismatched types. + return nil, errBadMergeTypes +} + +// createObjectMergePatch will return a merge-patch document capable of +// converting the original document to the modified document. +func createObjectMergePatch(originalJSON, modifiedJSON []byte) ([]byte, error) { + originalDoc := map[string]interface{}{} + modifiedDoc := map[string]interface{}{} + + err := json.Unmarshal(originalJSON, &originalDoc) + if err != nil { + return nil, errBadJSONDoc + } + + err = json.Unmarshal(modifiedJSON, &modifiedDoc) + if err != nil { + return nil, errBadJSONDoc + } + + dest, err := getDiff(originalDoc, modifiedDoc) + if err != nil { + return nil, err + } + + return json.Marshal(dest) +} + +// createArrayMergePatch will return an array of merge-patch documents capable +// of converting the original document to the modified document for each +// pair of JSON documents provided in the arrays. +// Arrays of mismatched sizes will result in an error. +func createArrayMergePatch(originalJSON, modifiedJSON []byte) ([]byte, error) { + originalDocs := []json.RawMessage{} + modifiedDocs := []json.RawMessage{} + + err := json.Unmarshal(originalJSON, &originalDocs) + if err != nil { + return nil, errBadJSONDoc + } + + err = json.Unmarshal(modifiedJSON, &modifiedDocs) + if err != nil { + return nil, errBadJSONDoc + } + + total := len(originalDocs) + if len(modifiedDocs) != total { + return nil, errBadJSONDoc + } + + result := []json.RawMessage{} + for i := 0; i < len(originalDocs); i++ { + original := originalDocs[i] + modified := modifiedDocs[i] + + patch, err := createObjectMergePatch(original, modified) + if err != nil { + return nil, err + } + + result = append(result, json.RawMessage(patch)) + } + + return json.Marshal(result) +} + +// Returns true if the array matches (must be json types). +// As is idiomatic for go, an empty array is not the same as a nil array. +func matchesArray(a, b []interface{}) bool { + if len(a) != len(b) { + return false + } + if (a == nil && b != nil) || (a != nil && b == nil) { + return false + } + for i := range a { + if !matchesValue(a[i], b[i]) { + return false + } + } + return true +} + +// Returns true if the values matches (must be json types) +// The types of the values must match, otherwise it will always return false +// If two map[string]interface{} are given, all elements must match. +func matchesValue(av, bv interface{}) bool { + if reflect.TypeOf(av) != reflect.TypeOf(bv) { + return false + } + switch at := av.(type) { + case string: + bt := bv.(string) + if bt == at { + return true + } + case float64: + bt := bv.(float64) + if bt == at { + return true + } + case bool: + bt := bv.(bool) + if bt == at { + return true + } + case nil: + // Both nil, fine. + return true + case map[string]interface{}: + bt := bv.(map[string]interface{}) + for key := range at { + if !matchesValue(at[key], bt[key]) { + return false + } + } + for key := range bt { + if !matchesValue(at[key], bt[key]) { + return false + } + } + return true + case []interface{}: + bt := bv.([]interface{}) + return matchesArray(at, bt) + } + return false +} + +// getDiff returns the (recursive) difference between a and b as a map[string]interface{}. +func getDiff(a, b map[string]interface{}) (map[string]interface{}, error) { + into := map[string]interface{}{} + for key, bv := range b { + av, ok := a[key] + // value was added + if !ok { + into[key] = bv + continue + } + // If types have changed, replace completely + if reflect.TypeOf(av) != reflect.TypeOf(bv) { + into[key] = bv + continue + } + // Types are the same, compare values + switch at := av.(type) { + case map[string]interface{}: + bt := bv.(map[string]interface{}) + dst := make(map[string]interface{}, len(bt)) + dst, err := getDiff(at, bt) + if err != nil { + return nil, err + } + if len(dst) > 0 { + into[key] = dst + } + case string, float64, bool: + if !matchesValue(av, bv) { + into[key] = bv + } + case []interface{}: + bt := bv.([]interface{}) + if !matchesArray(at, bt) { + into[key] = bv + } + case nil: + switch bv.(type) { + case nil: + // Both nil, fine. + default: + into[key] = bv + } + default: + panic(fmt.Sprintf("Unknown type:%T in key %s", av, key)) + } + } + // Now add all deleted values as nil + for key := range a { + _, found := b[key] + if !found { + into[key] = nil + } + } + return into, nil +} diff --git a/vendor/github.com/evanphx/json-patch/patch.go b/vendor/github.com/evanphx/json-patch/patch.go new file mode 100644 index 0000000000..1b5f95e611 --- /dev/null +++ b/vendor/github.com/evanphx/json-patch/patch.go @@ -0,0 +1,776 @@ +package jsonpatch + +import ( + "bytes" + "encoding/json" + "fmt" + "strconv" + "strings" + + "github.com/pkg/errors" +) + +const ( + eRaw = iota + eDoc + eAry +) + +var ( + // SupportNegativeIndices decides whether to support non-standard practice of + // allowing negative indices to mean indices starting at the end of an array. + // Default to true. + SupportNegativeIndices bool = true + // AccumulatedCopySizeLimit limits the total size increase in bytes caused by + // "copy" operations in a patch. + AccumulatedCopySizeLimit int64 = 0 +) + +var ( + ErrTestFailed = errors.New("test failed") + ErrMissing = errors.New("missing value") + ErrUnknownType = errors.New("unknown object type") + ErrInvalid = errors.New("invalid state detected") + ErrInvalidIndex = errors.New("invalid index referenced") +) + +type lazyNode struct { + raw *json.RawMessage + doc partialDoc + ary partialArray + which int +} + +// Operation is a single JSON-Patch step, such as a single 'add' operation. +type Operation map[string]*json.RawMessage + +// Patch is an ordered collection of Operations. +type Patch []Operation + +type partialDoc map[string]*lazyNode +type partialArray []*lazyNode + +type container interface { + get(key string) (*lazyNode, error) + set(key string, val *lazyNode) error + add(key string, val *lazyNode) error + remove(key string) error +} + +func newLazyNode(raw *json.RawMessage) *lazyNode { + return &lazyNode{raw: raw, doc: nil, ary: nil, which: eRaw} +} + +func (n *lazyNode) MarshalJSON() ([]byte, error) { + switch n.which { + case eRaw: + return json.Marshal(n.raw) + case eDoc: + return json.Marshal(n.doc) + case eAry: + return json.Marshal(n.ary) + default: + return nil, ErrUnknownType + } +} + +func (n *lazyNode) UnmarshalJSON(data []byte) error { + dest := make(json.RawMessage, len(data)) + copy(dest, data) + n.raw = &dest + n.which = eRaw + return nil +} + +func deepCopy(src *lazyNode) (*lazyNode, int, error) { + if src == nil { + return nil, 0, nil + } + a, err := src.MarshalJSON() + if err != nil { + return nil, 0, err + } + sz := len(a) + ra := make(json.RawMessage, sz) + copy(ra, a) + return newLazyNode(&ra), sz, nil +} + +func (n *lazyNode) intoDoc() (*partialDoc, error) { + if n.which == eDoc { + return &n.doc, nil + } + + if n.raw == nil { + return nil, ErrInvalid + } + + err := json.Unmarshal(*n.raw, &n.doc) + + if err != nil { + return nil, err + } + + n.which = eDoc + return &n.doc, nil +} + +func (n *lazyNode) intoAry() (*partialArray, error) { + if n.which == eAry { + return &n.ary, nil + } + + if n.raw == nil { + return nil, ErrInvalid + } + + err := json.Unmarshal(*n.raw, &n.ary) + + if err != nil { + return nil, err + } + + n.which = eAry + return &n.ary, nil +} + +func (n *lazyNode) compact() []byte { + buf := &bytes.Buffer{} + + if n.raw == nil { + return nil + } + + err := json.Compact(buf, *n.raw) + + if err != nil { + return *n.raw + } + + return buf.Bytes() +} + +func (n *lazyNode) tryDoc() bool { + if n.raw == nil { + return false + } + + err := json.Unmarshal(*n.raw, &n.doc) + + if err != nil { + return false + } + + n.which = eDoc + return true +} + +func (n *lazyNode) tryAry() bool { + if n.raw == nil { + return false + } + + err := json.Unmarshal(*n.raw, &n.ary) + + if err != nil { + return false + } + + n.which = eAry + return true +} + +func (n *lazyNode) equal(o *lazyNode) bool { + if n.which == eRaw { + if !n.tryDoc() && !n.tryAry() { + if o.which != eRaw { + return false + } + + return bytes.Equal(n.compact(), o.compact()) + } + } + + if n.which == eDoc { + if o.which == eRaw { + if !o.tryDoc() { + return false + } + } + + if o.which != eDoc { + return false + } + + for k, v := range n.doc { + ov, ok := o.doc[k] + + if !ok { + return false + } + + if v == nil && ov == nil { + continue + } + + if !v.equal(ov) { + return false + } + } + + return true + } + + if o.which != eAry && !o.tryAry() { + return false + } + + if len(n.ary) != len(o.ary) { + return false + } + + for idx, val := range n.ary { + if !val.equal(o.ary[idx]) { + return false + } + } + + return true +} + +// Kind reads the "op" field of the Operation. +func (o Operation) Kind() string { + if obj, ok := o["op"]; ok && obj != nil { + var op string + + err := json.Unmarshal(*obj, &op) + + if err != nil { + return "unknown" + } + + return op + } + + return "unknown" +} + +// Path reads the "path" field of the Operation. +func (o Operation) Path() (string, error) { + if obj, ok := o["path"]; ok && obj != nil { + var op string + + err := json.Unmarshal(*obj, &op) + + if err != nil { + return "unknown", err + } + + return op, nil + } + + return "unknown", errors.Wrapf(ErrMissing, "operation missing path field") +} + +// From reads the "from" field of the Operation. +func (o Operation) From() (string, error) { + if obj, ok := o["from"]; ok && obj != nil { + var op string + + err := json.Unmarshal(*obj, &op) + + if err != nil { + return "unknown", err + } + + return op, nil + } + + return "unknown", errors.Wrapf(ErrMissing, "operation, missing from field") +} + +func (o Operation) value() *lazyNode { + if obj, ok := o["value"]; ok { + return newLazyNode(obj) + } + + return nil +} + +// ValueInterface decodes the operation value into an interface. +func (o Operation) ValueInterface() (interface{}, error) { + if obj, ok := o["value"]; ok && obj != nil { + var v interface{} + + err := json.Unmarshal(*obj, &v) + + if err != nil { + return nil, err + } + + return v, nil + } + + return nil, errors.Wrapf(ErrMissing, "operation, missing value field") +} + +func isArray(buf []byte) bool { +Loop: + for _, c := range buf { + switch c { + case ' ': + case '\n': + case '\t': + continue + case '[': + return true + default: + break Loop + } + } + + return false +} + +func findObject(pd *container, path string) (container, string) { + doc := *pd + + split := strings.Split(path, "/") + + if len(split) < 2 { + return nil, "" + } + + parts := split[1 : len(split)-1] + + key := split[len(split)-1] + + var err error + + for _, part := range parts { + + next, ok := doc.get(decodePatchKey(part)) + + if next == nil || ok != nil { + return nil, "" + } + + if isArray(*next.raw) { + doc, err = next.intoAry() + + if err != nil { + return nil, "" + } + } else { + doc, err = next.intoDoc() + + if err != nil { + return nil, "" + } + } + } + + return doc, decodePatchKey(key) +} + +func (d *partialDoc) set(key string, val *lazyNode) error { + (*d)[key] = val + return nil +} + +func (d *partialDoc) add(key string, val *lazyNode) error { + (*d)[key] = val + return nil +} + +func (d *partialDoc) get(key string) (*lazyNode, error) { + return (*d)[key], nil +} + +func (d *partialDoc) remove(key string) error { + _, ok := (*d)[key] + if !ok { + return errors.Wrapf(ErrMissing, "Unable to remove nonexistent key: %s", key) + } + + delete(*d, key) + return nil +} + +// set should only be used to implement the "replace" operation, so "key" must +// be an already existing index in "d". +func (d *partialArray) set(key string, val *lazyNode) error { + idx, err := strconv.Atoi(key) + if err != nil { + return err + } + (*d)[idx] = val + return nil +} + +func (d *partialArray) add(key string, val *lazyNode) error { + if key == "-" { + *d = append(*d, val) + return nil + } + + idx, err := strconv.Atoi(key) + if err != nil { + return errors.Wrapf(err, "value was not a proper array index: '%s'", key) + } + + sz := len(*d) + 1 + + ary := make([]*lazyNode, sz) + + cur := *d + + if idx >= len(ary) { + return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) + } + + if SupportNegativeIndices { + if idx < -len(ary) { + return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) + } + + if idx < 0 { + idx += len(ary) + } + } + + copy(ary[0:idx], cur[0:idx]) + ary[idx] = val + copy(ary[idx+1:], cur[idx:]) + + *d = ary + return nil +} + +func (d *partialArray) get(key string) (*lazyNode, error) { + idx, err := strconv.Atoi(key) + + if err != nil { + return nil, err + } + + if idx >= len(*d) { + return nil, errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) + } + + return (*d)[idx], nil +} + +func (d *partialArray) remove(key string) error { + idx, err := strconv.Atoi(key) + if err != nil { + return err + } + + cur := *d + + if idx >= len(cur) { + return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) + } + + if SupportNegativeIndices { + if idx < -len(cur) { + return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) + } + + if idx < 0 { + idx += len(cur) + } + } + + ary := make([]*lazyNode, len(cur)-1) + + copy(ary[0:idx], cur[0:idx]) + copy(ary[idx:], cur[idx+1:]) + + *d = ary + return nil + +} + +func (p Patch) add(doc *container, op Operation) error { + path, err := op.Path() + if err != nil { + return errors.Wrapf(ErrMissing, "add operation failed to decode path") + } + + con, key := findObject(doc, path) + + if con == nil { + return errors.Wrapf(ErrMissing, "add operation does not apply: doc is missing path: \"%s\"", path) + } + + err = con.add(key, op.value()) + if err != nil { + return errors.Wrapf(err, "error in add for path: '%s'", path) + } + + return nil +} + +func (p Patch) remove(doc *container, op Operation) error { + path, err := op.Path() + if err != nil { + return errors.Wrapf(ErrMissing, "remove operation failed to decode path") + } + + con, key := findObject(doc, path) + + if con == nil { + return errors.Wrapf(ErrMissing, "remove operation does not apply: doc is missing path: \"%s\"", path) + } + + err = con.remove(key) + if err != nil { + return errors.Wrapf(err, "error in remove for path: '%s'", path) + } + + return nil +} + +func (p Patch) replace(doc *container, op Operation) error { + path, err := op.Path() + if err != nil { + return errors.Wrapf(err, "replace operation failed to decode path") + } + + con, key := findObject(doc, path) + + if con == nil { + return errors.Wrapf(ErrMissing, "replace operation does not apply: doc is missing path: %s", path) + } + + _, ok := con.get(key) + if ok != nil { + return errors.Wrapf(ErrMissing, "replace operation does not apply: doc is missing key: %s", path) + } + + err = con.set(key, op.value()) + if err != nil { + return errors.Wrapf(err, "error in remove for path: '%s'", path) + } + + return nil +} + +func (p Patch) move(doc *container, op Operation) error { + from, err := op.From() + if err != nil { + return errors.Wrapf(err, "move operation failed to decode from") + } + + con, key := findObject(doc, from) + + if con == nil { + return errors.Wrapf(ErrMissing, "move operation does not apply: doc is missing from path: %s", from) + } + + val, err := con.get(key) + if err != nil { + return errors.Wrapf(err, "error in move for path: '%s'", key) + } + + err = con.remove(key) + if err != nil { + return errors.Wrapf(err, "error in move for path: '%s'", key) + } + + path, err := op.Path() + if err != nil { + return errors.Wrapf(err, "move operation failed to decode path") + } + + con, key = findObject(doc, path) + + if con == nil { + return errors.Wrapf(ErrMissing, "move operation does not apply: doc is missing destination path: %s", path) + } + + err = con.add(key, val) + if err != nil { + return errors.Wrapf(err, "error in move for path: '%s'", path) + } + + return nil +} + +func (p Patch) test(doc *container, op Operation) error { + path, err := op.Path() + if err != nil { + return errors.Wrapf(err, "test operation failed to decode path") + } + + con, key := findObject(doc, path) + + if con == nil { + return errors.Wrapf(ErrMissing, "test operation does not apply: is missing path: %s", path) + } + + val, err := con.get(key) + if err != nil { + return errors.Wrapf(err, "error in test for path: '%s'", path) + } + + if val == nil { + if op.value().raw == nil { + return nil + } + return errors.Wrapf(ErrTestFailed, "testing value %s failed", path) + } else if op.value() == nil { + return errors.Wrapf(ErrTestFailed, "testing value %s failed", path) + } + + if val.equal(op.value()) { + return nil + } + + return errors.Wrapf(ErrTestFailed, "testing value %s failed", path) +} + +func (p Patch) copy(doc *container, op Operation, accumulatedCopySize *int64) error { + from, err := op.From() + if err != nil { + return errors.Wrapf(err, "copy operation failed to decode from") + } + + con, key := findObject(doc, from) + + if con == nil { + return errors.Wrapf(ErrMissing, "copy operation does not apply: doc is missing from path: %s", from) + } + + val, err := con.get(key) + if err != nil { + return errors.Wrapf(err, "error in copy for from: '%s'", from) + } + + path, err := op.Path() + if err != nil { + return errors.Wrapf(ErrMissing, "copy operation failed to decode path") + } + + con, key = findObject(doc, path) + + if con == nil { + return errors.Wrapf(ErrMissing, "copy operation does not apply: doc is missing destination path: %s", path) + } + + valCopy, sz, err := deepCopy(val) + if err != nil { + return errors.Wrapf(err, "error while performing deep copy") + } + + (*accumulatedCopySize) += int64(sz) + if AccumulatedCopySizeLimit > 0 && *accumulatedCopySize > AccumulatedCopySizeLimit { + return NewAccumulatedCopySizeError(AccumulatedCopySizeLimit, *accumulatedCopySize) + } + + err = con.add(key, valCopy) + if err != nil { + return errors.Wrapf(err, "error while adding value during copy") + } + + return nil +} + +// Equal indicates if 2 JSON documents have the same structural equality. +func Equal(a, b []byte) bool { + ra := make(json.RawMessage, len(a)) + copy(ra, a) + la := newLazyNode(&ra) + + rb := make(json.RawMessage, len(b)) + copy(rb, b) + lb := newLazyNode(&rb) + + return la.equal(lb) +} + +// DecodePatch decodes the passed JSON document as an RFC 6902 patch. +func DecodePatch(buf []byte) (Patch, error) { + var p Patch + + err := json.Unmarshal(buf, &p) + + if err != nil { + return nil, err + } + + return p, nil +} + +// Apply mutates a JSON document according to the patch, and returns the new +// document. +func (p Patch) Apply(doc []byte) ([]byte, error) { + return p.ApplyIndent(doc, "") +} + +// ApplyIndent mutates a JSON document according to the patch, and returns the new +// document indented. +func (p Patch) ApplyIndent(doc []byte, indent string) ([]byte, error) { + var pd container + if doc[0] == '[' { + pd = &partialArray{} + } else { + pd = &partialDoc{} + } + + err := json.Unmarshal(doc, pd) + + if err != nil { + return nil, err + } + + err = nil + + var accumulatedCopySize int64 + + for _, op := range p { + switch op.Kind() { + case "add": + err = p.add(&pd, op) + case "remove": + err = p.remove(&pd, op) + case "replace": + err = p.replace(&pd, op) + case "move": + err = p.move(&pd, op) + case "test": + err = p.test(&pd, op) + case "copy": + err = p.copy(&pd, op, &accumulatedCopySize) + default: + err = fmt.Errorf("Unexpected kind: %s", op.Kind()) + } + + if err != nil { + return nil, err + } + } + + if indent != "" { + return json.MarshalIndent(pd, "", indent) + } + + return json.Marshal(pd) +} + +// From http://tools.ietf.org/html/rfc6901#section-4 : +// +// Evaluation of each reference token begins by decoding any escaped +// character sequence. This is performed by first transforming any +// occurrence of the sequence '~1' to '/', and then transforming any +// occurrence of the sequence '~0' to '~'. + +var ( + rfc6901Decoder = strings.NewReplacer("~1", "/", "~0", "~") +) + +func decodePatchKey(k string) string { + return rfc6901Decoder.Replace(k) +} diff --git a/vendor/github.com/pkg/errors/.travis.yml b/vendor/github.com/pkg/errors/.travis.yml index d4b92663ba..9159de03e0 100644 --- a/vendor/github.com/pkg/errors/.travis.yml +++ b/vendor/github.com/pkg/errors/.travis.yml @@ -1,15 +1,10 @@ language: go go_import_path: github.com/pkg/errors go: - - 1.4.x - - 1.5.x - - 1.6.x - - 1.7.x - - 1.8.x - - 1.9.x - - 1.10.x - 1.11.x + - 1.12.x + - 1.13.x - tip script: - - go test -v ./... + - make check diff --git a/vendor/github.com/pkg/errors/Makefile b/vendor/github.com/pkg/errors/Makefile new file mode 100644 index 0000000000..ce9d7cded6 --- /dev/null +++ b/vendor/github.com/pkg/errors/Makefile @@ -0,0 +1,44 @@ +PKGS := github.com/pkg/errors +SRCDIRS := $(shell go list -f '{{.Dir}}' $(PKGS)) +GO := go + +check: test vet gofmt misspell unconvert staticcheck ineffassign unparam + +test: + $(GO) test $(PKGS) + +vet: | test + $(GO) vet $(PKGS) + +staticcheck: + $(GO) get honnef.co/go/tools/cmd/staticcheck + staticcheck -checks all $(PKGS) + +misspell: + $(GO) get github.com/client9/misspell/cmd/misspell + misspell \ + -locale GB \ + -error \ + *.md *.go + +unconvert: + $(GO) get github.com/mdempsky/unconvert + unconvert -v $(PKGS) + +ineffassign: + $(GO) get github.com/gordonklaus/ineffassign + find $(SRCDIRS) -name '*.go' | xargs ineffassign + +pedantic: check errcheck + +unparam: + $(GO) get mvdan.cc/unparam + unparam ./... + +errcheck: + $(GO) get github.com/kisielk/errcheck + errcheck $(PKGS) + +gofmt: + @echo Checking code is gofmted + @test -z "$(shell gofmt -s -l -d -e $(SRCDIRS) | tee /dev/stderr)" diff --git a/vendor/github.com/pkg/errors/README.md b/vendor/github.com/pkg/errors/README.md index 6483ba2afb..54dfdcb12e 100644 --- a/vendor/github.com/pkg/errors/README.md +++ b/vendor/github.com/pkg/errors/README.md @@ -41,11 +41,18 @@ default: [Read the package documentation for more information](https://godoc.org/github.com/pkg/errors). +## Roadmap + +With the upcoming [Go2 error proposals](https://go.googlesource.com/proposal/+/master/design/go2draft.md) this package is moving into maintenance mode. The roadmap for a 1.0 release is as follows: + +- 0.9. Remove pre Go 1.9 and Go 1.10 support, address outstanding pull requests (if possible) +- 1.0. Final release. + ## Contributing -We welcome pull requests, bug fixes and issue reports. With that said, the bar for adding new symbols to this package is intentionally set high. +Because of the Go2 errors changes, this package is not accepting proposals for new functionality. With that said, we welcome pull requests, bug fixes and issue reports. -Before proposing a change, please discuss your change by raising an issue. +Before sending a PR, please discuss your change by raising an issue. ## License diff --git a/vendor/github.com/pkg/errors/errors.go b/vendor/github.com/pkg/errors/errors.go index 7421f326ff..161aea2582 100644 --- a/vendor/github.com/pkg/errors/errors.go +++ b/vendor/github.com/pkg/errors/errors.go @@ -82,7 +82,7 @@ // // if err, ok := err.(stackTracer); ok { // for _, f := range err.StackTrace() { -// fmt.Printf("%+s:%d", f) +// fmt.Printf("%+s:%d\n", f, f) // } // } // @@ -159,6 +159,9 @@ type withStack struct { func (w *withStack) Cause() error { return w.error } +// Unwrap provides compatibility for Go 1.13 error chains. +func (w *withStack) Unwrap() error { return w.error } + func (w *withStack) Format(s fmt.State, verb rune) { switch verb { case 'v': @@ -241,6 +244,9 @@ type withMessage struct { func (w *withMessage) Error() string { return w.msg + ": " + w.cause.Error() } func (w *withMessage) Cause() error { return w.cause } +// Unwrap provides compatibility for Go 1.13 error chains. +func (w *withMessage) Unwrap() error { return w.cause } + func (w *withMessage) Format(s fmt.State, verb rune) { switch verb { case 'v': diff --git a/vendor/github.com/pkg/errors/go113.go b/vendor/github.com/pkg/errors/go113.go new file mode 100644 index 0000000000..be0d10d0c7 --- /dev/null +++ b/vendor/github.com/pkg/errors/go113.go @@ -0,0 +1,38 @@ +// +build go1.13 + +package errors + +import ( + stderrors "errors" +) + +// Is reports whether any error in err's chain matches target. +// +// The chain consists of err itself followed by the sequence of errors obtained by +// repeatedly calling Unwrap. +// +// An error is considered to match a target if it is equal to that target or if +// it implements a method Is(error) bool such that Is(target) returns true. +func Is(err, target error) bool { return stderrors.Is(err, target) } + +// As finds the first error in err's chain that matches target, and if so, sets +// target to that error value and returns true. +// +// The chain consists of err itself followed by the sequence of errors obtained by +// repeatedly calling Unwrap. +// +// An error matches target if the error's concrete value is assignable to the value +// pointed to by target, or if the error has a method As(interface{}) bool such that +// As(target) returns true. In the latter case, the As method is responsible for +// setting target. +// +// As will panic if target is not a non-nil pointer to either a type that implements +// error, or to any interface type. As returns false if err is nil. +func As(err error, target interface{}) bool { return stderrors.As(err, target) } + +// Unwrap returns the result of calling the Unwrap method on err, if err's +// type contains an Unwrap method returning error. +// Otherwise, Unwrap returns nil. +func Unwrap(err error) error { + return stderrors.Unwrap(err) +} diff --git a/vendor/github.com/pkg/errors/stack.go b/vendor/github.com/pkg/errors/stack.go index 2874a048cf..779a8348fb 100644 --- a/vendor/github.com/pkg/errors/stack.go +++ b/vendor/github.com/pkg/errors/stack.go @@ -5,10 +5,13 @@ import ( "io" "path" "runtime" + "strconv" "strings" ) // Frame represents a program counter inside a stack frame. +// For historical reasons if Frame is interpreted as a uintptr +// its value represents the program counter + 1. type Frame uintptr // pc returns the program counter for this frame; @@ -37,6 +40,15 @@ func (f Frame) line() int { return line } +// name returns the name of this function, if known. +func (f Frame) name() string { + fn := runtime.FuncForPC(f.pc()) + if fn == nil { + return "unknown" + } + return fn.Name() +} + // Format formats the frame according to the fmt.Formatter interface. // // %s source file @@ -54,22 +66,16 @@ func (f Frame) Format(s fmt.State, verb rune) { case 's': switch { case s.Flag('+'): - pc := f.pc() - fn := runtime.FuncForPC(pc) - if fn == nil { - io.WriteString(s, "unknown") - } else { - file, _ := fn.FileLine(pc) - fmt.Fprintf(s, "%s\n\t%s", fn.Name(), file) - } + io.WriteString(s, f.name()) + io.WriteString(s, "\n\t") + io.WriteString(s, f.file()) default: io.WriteString(s, path.Base(f.file())) } case 'd': - fmt.Fprintf(s, "%d", f.line()) + io.WriteString(s, strconv.Itoa(f.line())) case 'n': - name := runtime.FuncForPC(f.pc()).Name() - io.WriteString(s, funcname(name)) + io.WriteString(s, funcname(f.name())) case 'v': f.Format(s, 's') io.WriteString(s, ":") @@ -77,6 +83,16 @@ func (f Frame) Format(s fmt.State, verb rune) { } } +// MarshalText formats a stacktrace Frame as a text string. The output is the +// same as that of fmt.Sprintf("%+v", f), but without newlines or tabs. +func (f Frame) MarshalText() ([]byte, error) { + name := f.name() + if name == "unknown" { + return []byte(name), nil + } + return []byte(fmt.Sprintf("%s %s:%d", name, f.file(), f.line())), nil +} + // StackTrace is stack of Frames from innermost (newest) to outermost (oldest). type StackTrace []Frame @@ -94,16 +110,30 @@ func (st StackTrace) Format(s fmt.State, verb rune) { switch { case s.Flag('+'): for _, f := range st { - fmt.Fprintf(s, "\n%+v", f) + io.WriteString(s, "\n") + f.Format(s, verb) } case s.Flag('#'): fmt.Fprintf(s, "%#v", []Frame(st)) default: - fmt.Fprintf(s, "%v", []Frame(st)) + st.formatSlice(s, verb) } case 's': - fmt.Fprintf(s, "%s", []Frame(st)) + st.formatSlice(s, verb) + } +} + +// formatSlice will format this StackTrace into the given buffer as a slice of +// Frame, only valid when called with '%s' or '%v'. +func (st StackTrace) formatSlice(s fmt.State, verb rune) { + io.WriteString(s, "[") + for i, f := range st { + if i > 0 { + io.WriteString(s, " ") + } + f.Format(s, verb) } + io.WriteString(s, "]") } // stack represents a stack of program counters. diff --git a/vendor/github.com/rancher/lasso/LICENSE b/vendor/github.com/rancher/lasso/LICENSE new file mode 100644 index 0000000000..e454a52586 --- /dev/null +++ b/vendor/github.com/rancher/lasso/LICENSE @@ -0,0 +1,178 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + diff --git a/vendor/github.com/rancher/lasso/pkg/cache/cache.go b/vendor/github.com/rancher/lasso/pkg/cache/cache.go new file mode 100644 index 0000000000..5ad4184947 --- /dev/null +++ b/vendor/github.com/rancher/lasso/pkg/cache/cache.go @@ -0,0 +1,119 @@ +package cache + +import ( + "context" + "fmt" + "time" + + "github.com/rancher/lasso/pkg/client" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/tools/cache" +) + +type Options struct { + Namespace string + Resync time.Duration + TweakList TweakListOptionsFunc +} + +func NewCache(obj, listObj runtime.Object, client *client.Client, opts *Options) cache.SharedIndexInformer { + indexers := cache.Indexers{} + + if client.Namespaced { + indexers[cache.NamespaceIndex] = cache.MetaNamespaceIndexFunc + } + + opts = applyDefaultCacheOptions(opts) + + lw := &deferredListWatcher{ + client: client, + tweakList: opts.TweakList, + namespace: opts.Namespace, + listObj: listObj, + } + + return &deferredCache{ + SharedIndexInformer: cache.NewSharedIndexInformer( + lw, + obj, + opts.Resync, + indexers, + ), + deferredListWatcher: lw, + } +} + +func applyDefaultCacheOptions(opts *Options) *Options { + var newOpts Options + if opts != nil { + newOpts = *opts + } + if newOpts.Resync == 0 { + newOpts.Resync = 10 * time.Hour + } + if newOpts.TweakList == nil { + newOpts.TweakList = func(*metav1.ListOptions) {} + } + return &newOpts +} + +type deferredCache struct { + cache.SharedIndexInformer + deferredListWatcher *deferredListWatcher +} + +type deferredListWatcher struct { + lw cache.ListerWatcher + client *client.Client + tweakList TweakListOptionsFunc + namespace string + listObj runtime.Object +} + +func (d *deferredListWatcher) List(options metav1.ListOptions) (runtime.Object, error) { + if d.lw == nil { + return nil, fmt.Errorf("cache not started") + } + return d.lw.List(options) +} + +func (d *deferredListWatcher) Watch(options metav1.ListOptions) (watch.Interface, error) { + if d.lw == nil { + return nil, fmt.Errorf("cache not started") + } + return d.lw.Watch(options) +} + +func (d *deferredListWatcher) run(stopCh <-chan struct{}) { + ctx, cancel := context.WithCancel(context.Background()) + go func() { + <-stopCh + cancel() + }() + + d.lw = &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + d.tweakList(&options) + // If ResourceVersion is set to 0 then the Limit is ignored on the API side. Usually + // that's not a problem, but with very large counts of a single object type the client will + // hit it's connection timeout + if options.ResourceVersion == "0" { + options.ResourceVersion = "" + } + listObj := d.listObj.DeepCopyObject() + err := d.client.List(ctx, d.namespace, listObj, options) + return listObj, err + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + d.tweakList(&options) + return d.client.Watch(ctx, d.namespace, options) + }, + } +} + +func (d *deferredCache) Run(stopCh <-chan struct{}) { + d.deferredListWatcher.run(stopCh) + d.SharedIndexInformer.Run(stopCh) +} diff --git a/vendor/github.com/rancher/lasso/pkg/cache/sharedinformerfactory.go b/vendor/github.com/rancher/lasso/pkg/cache/sharedinformerfactory.go new file mode 100644 index 0000000000..8041dbe11d --- /dev/null +++ b/vendor/github.com/rancher/lasso/pkg/cache/sharedinformerfactory.go @@ -0,0 +1,208 @@ +package cache + +import ( + "context" + "sync" + "time" + + "github.com/rancher/lasso/pkg/client" + + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/tools/cache" +) + +type TweakListOptionsFunc func(*v1.ListOptions) + +type SharedCacheFactoryOptions struct { + DefaultResync time.Duration + DefaultNamespace string + DefaultTweakList TweakListOptionsFunc + + KindResync map[schema.GroupVersionKind]time.Duration + KindNamespace map[schema.GroupVersionKind]string + KindTweakList map[schema.GroupVersionKind]TweakListOptionsFunc +} + +type sharedCacheFactory struct { + lock sync.Mutex + + tweakList TweakListOptionsFunc + defaultResync time.Duration + defaultNamespace string + customResync map[schema.GroupVersionKind]time.Duration + customNamespaces map[schema.GroupVersionKind]string + customTweakList map[schema.GroupVersionKind]TweakListOptionsFunc + sharedClientFactory client.SharedClientFactory + + caches map[schema.GroupVersionKind]cache.SharedIndexInformer + startedCaches map[schema.GroupVersionKind]bool +} + +// NewSharedInformerFactoryWithOptions constructs a new instance of a SharedInformerFactory with additional options. +func NewSharedCachedFactory(sharedClientFactory client.SharedClientFactory, opts *SharedCacheFactoryOptions) SharedCacheFactory { + opts = applyDefaults(opts) + + factory := &sharedCacheFactory{ + lock: sync.Mutex{}, + tweakList: opts.DefaultTweakList, + defaultResync: opts.DefaultResync, + defaultNamespace: opts.DefaultNamespace, + customResync: opts.KindResync, + customNamespaces: opts.KindNamespace, + customTweakList: opts.KindTweakList, + caches: map[schema.GroupVersionKind]cache.SharedIndexInformer{}, + startedCaches: map[schema.GroupVersionKind]bool{}, + sharedClientFactory: sharedClientFactory, + } + + return factory +} + +func applyDefaults(opts *SharedCacheFactoryOptions) *SharedCacheFactoryOptions { + var newOpts SharedCacheFactoryOptions + if opts != nil { + newOpts = *opts + } + + return &newOpts +} + +func (f *sharedCacheFactory) StartGVK(ctx context.Context, gvk schema.GroupVersionKind) error { + f.lock.Lock() + defer f.lock.Unlock() + + informer, ok := f.caches[gvk] + if !ok { + return nil + } + + if !f.startedCaches[gvk] { + go informer.Run(ctx.Done()) + f.startedCaches[gvk] = true + } + + return nil +} + +func (f *sharedCacheFactory) Start(ctx context.Context) error { + f.lock.Lock() + defer f.lock.Unlock() + + for informerType, informer := range f.caches { + if !f.startedCaches[informerType] { + go informer.Run(ctx.Done()) + f.startedCaches[informerType] = true + } + } + + return nil +} + +func (f *sharedCacheFactory) WaitForCacheSync(ctx context.Context) map[schema.GroupVersionKind]bool { + informers := func() map[schema.GroupVersionKind]cache.SharedIndexInformer { + f.lock.Lock() + defer f.lock.Unlock() + + informers := map[schema.GroupVersionKind]cache.SharedIndexInformer{} + for informerType, informer := range f.caches { + if f.startedCaches[informerType] { + informers[informerType] = informer + } + } + return informers + }() + + res := map[schema.GroupVersionKind]bool{} + for informType, informer := range informers { + res[informType] = cache.WaitForCacheSync(ctx.Done(), informer.HasSynced) + } + return res +} + +func (f *sharedCacheFactory) ForObject(obj runtime.Object) (cache.SharedIndexInformer, error) { + return f.ForKind(obj.GetObjectKind().GroupVersionKind()) +} + +func (f *sharedCacheFactory) ForResource(gvr schema.GroupVersionResource, namespaced bool) (cache.SharedIndexInformer, error) { + return f.ForResourceKind(gvr, "", namespaced) +} + +func (f *sharedCacheFactory) ForKind(gvk schema.GroupVersionKind) (cache.SharedIndexInformer, error) { + gvr, namespaced, err := f.sharedClientFactory.ResourceForGVK(gvk) + if err != nil { + return nil, err + } + return f.ForResourceKind(gvr, gvk.Kind, namespaced) +} + +func (f *sharedCacheFactory) ForResourceKind(gvr schema.GroupVersionResource, kind string, namespaced bool) (cache.SharedIndexInformer, error) { + var ( + gvk schema.GroupVersionKind + err error + ) + + if kind == "" { + gvk, err = f.sharedClientFactory.GVKForResource(gvr) + if err != nil { + return nil, err + } + } else { + gvk = gvr.GroupVersion().WithKind(kind) + } + + f.lock.Lock() + defer f.lock.Unlock() + + informer, ok := f.caches[gvk] + if ok { + return informer, nil + } + + resyncPeriod, ok := f.customResync[gvk] + if !ok { + resyncPeriod = f.defaultResync + } + + namespace, ok := f.customNamespaces[gvk] + if !ok { + namespace = f.defaultNamespace + } + + tweakList, ok := f.customTweakList[gvk] + if !ok { + tweakList = f.tweakList + } + + obj, objList, err := f.sharedClientFactory.NewObjects(gvk) + if err != nil { + return nil, err + } + + client := f.sharedClientFactory.ForResourceKind(gvr, kind, namespaced) + + cache := NewCache(obj, objList, client, &Options{ + Namespace: namespace, + Resync: resyncPeriod, + TweakList: tweakList, + }) + f.caches[gvk] = cache + + return cache, nil +} + +func (f *sharedCacheFactory) SharedClientFactory() client.SharedClientFactory { + return f.sharedClientFactory +} + +type SharedCacheFactory interface { + Start(ctx context.Context) error + StartGVK(ctx context.Context, gvk schema.GroupVersionKind) error + ForObject(obj runtime.Object) (cache.SharedIndexInformer, error) + ForKind(gvk schema.GroupVersionKind) (cache.SharedIndexInformer, error) + ForResource(gvr schema.GroupVersionResource, namespaced bool) (cache.SharedIndexInformer, error) + ForResourceKind(gvr schema.GroupVersionResource, kind string, namespaced bool) (cache.SharedIndexInformer, error) + WaitForCacheSync(ctx context.Context) map[schema.GroupVersionKind]bool + SharedClientFactory() client.SharedClientFactory +} diff --git a/vendor/github.com/rancher/lasso/pkg/client/client.go b/vendor/github.com/rancher/lasso/pkg/client/client.go new file mode 100644 index 0000000000..8d1d7b5e5f --- /dev/null +++ b/vendor/github.com/rancher/lasso/pkg/client/client.go @@ -0,0 +1,280 @@ +package client + +import ( + "context" + "time" + + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/rest" +) + +type Client struct { + RESTClient rest.Interface + timeout time.Duration + Namespaced bool + GVR schema.GroupVersionResource + resource string + prefix []string + apiVersion string + kind string +} + +func IsNamespaced(gvr schema.GroupVersionResource, mapper meta.RESTMapper) (bool, error) { + kind, err := mapper.KindFor(gvr) + if err != nil { + return false, err + } + + mapping, err := mapper.RESTMapping(kind.GroupKind(), kind.Version) + if err != nil { + return false, err + } + + return mapping.Scope.Name() == meta.RESTScopeNameNamespace, nil +} + +func NewClient(gvr schema.GroupVersionResource, kind string, namespaced bool, client rest.Interface, defaultTimeout time.Duration) *Client { + var ( + prefix []string + ) + + if gvr.Group == "" { + prefix = []string{ + "api", + gvr.Version, + } + } else { + prefix = []string{ + "apis", + gvr.Group, + gvr.Version, + } + } + + c := &Client{ + RESTClient: client, + timeout: defaultTimeout, + Namespaced: namespaced, + GVR: gvr, + prefix: prefix, + resource: gvr.Resource, + } + c.apiVersion, c.kind = gvr.GroupVersion().WithKind(kind).ToAPIVersionAndKind() + return c +} + +func noop() {} + +func (c *Client) setupCtx(ctx context.Context, minTimeout time.Duration) (context.Context, func()) { + if minTimeout == 0 && c.timeout == 0 { + return ctx, noop + } + + timeout := c.timeout + if minTimeout > 0 && timeout < minTimeout { + timeout = minTimeout + } + + return context.WithTimeout(ctx, timeout) +} + +func (c *Client) Get(ctx context.Context, namespace, name string, result runtime.Object, options metav1.GetOptions) (err error) { + defer c.setKind(result) + ctx, cancel := c.setupCtx(ctx, 0) + defer cancel() + err = c.RESTClient.Get(). + Prefix(c.prefix...). + NamespaceIfScoped(namespace, c.Namespaced). + Resource(c.resource). + Name(name). + VersionedParams(&options, metav1.ParameterCodec). + Do(ctx). + Into(result) + return +} + +func (c *Client) List(ctx context.Context, namespace string, result runtime.Object, opts metav1.ListOptions) (err error) { + ctx, cancel := c.setupCtx(ctx, 0) + defer cancel() + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + r := c.RESTClient.Get() + if namespace != "" { + r = r.NamespaceIfScoped(namespace, c.Namespaced) + } + err = r.Resource(c.resource). + Prefix(c.prefix...). + VersionedParams(&opts, metav1.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +func (c *Client) Watch(ctx context.Context, namespace string, opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.injectKind(c.RESTClient.Get(). + Prefix(c.prefix...). + NamespaceIfScoped(namespace, c.Namespaced). + Resource(c.resource). + VersionedParams(&opts, metav1.ParameterCodec). + Timeout(timeout). + Watch(ctx)) +} + +func (c *Client) Create(ctx context.Context, namespace string, obj, result runtime.Object, opts metav1.CreateOptions) (err error) { + defer c.setKind(result) + ctx, cancel := c.setupCtx(ctx, 0) + defer cancel() + err = c.RESTClient.Post(). + Prefix(c.prefix...). + NamespaceIfScoped(namespace, c.Namespaced). + Resource(c.resource). + VersionedParams(&opts, metav1.ParameterCodec). + Body(obj). + Do(ctx). + Into(result) + return +} + +func (c *Client) Update(ctx context.Context, namespace string, obj, result runtime.Object, opts metav1.UpdateOptions) (err error) { + defer c.setKind(result) + ctx, cancel := c.setupCtx(ctx, 0) + defer cancel() + m, err := meta.Accessor(obj) + if err != nil { + return err + } + err = c.RESTClient.Put(). + Prefix(c.prefix...). + NamespaceIfScoped(namespace, c.Namespaced). + Resource(c.resource). + Name(m.GetName()). + VersionedParams(&opts, metav1.ParameterCodec). + Body(obj). + Do(ctx). + Into(result) + return +} + +func (c *Client) UpdateStatus(ctx context.Context, namespace string, obj, result runtime.Object, opts metav1.UpdateOptions) (err error) { + defer c.setKind(result) + ctx, cancel := c.setupCtx(ctx, 0) + defer cancel() + m, err := meta.Accessor(obj) + if err != nil { + return err + } + err = c.RESTClient.Put(). + Prefix(c.prefix...). + NamespaceIfScoped(namespace, c.Namespaced). + Resource(c.resource). + Name(m.GetName()). + SubResource("status"). + VersionedParams(&opts, metav1.ParameterCodec). + Body(obj). + Do(ctx). + Into(result) + return +} + +func (c *Client) Delete(ctx context.Context, namespace, name string, opts metav1.DeleteOptions) error { + ctx, cancel := c.setupCtx(ctx, 0) + defer cancel() + return c.RESTClient.Delete(). + Prefix(c.prefix...). + NamespaceIfScoped(namespace, c.Namespaced). + Resource(c.resource). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +func (c *Client) DeleteCollection(ctx context.Context, namespace string, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { + ctx, cancel := c.setupCtx(ctx, 0) + defer cancel() + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.RESTClient.Delete(). + Prefix(c.prefix...). + NamespaceIfScoped(namespace, c.Namespaced). + Resource(c.resource). + VersionedParams(&listOpts, metav1.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +func (c *Client) Patch(ctx context.Context, namespace, name string, pt types.PatchType, data []byte, result runtime.Object, opts metav1.PatchOptions, subresources ...string) (err error) { + defer c.setKind(result) + ctx, cancel := c.setupCtx(ctx, 0) + defer cancel() + err = c.RESTClient.Patch(pt). + Prefix(c.prefix...). + Namespace(namespace). + Resource(c.resource). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, metav1.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} + +func (c *Client) setKind(obj runtime.Object) { + if c.kind == "" { + return + } + if _, ok := obj.(*metav1.Status); !ok { + if meta, err := meta.TypeAccessor(obj); err == nil { + meta.SetKind(c.kind) + meta.SetAPIVersion(c.apiVersion) + } + } +} + +func (c *Client) injectKind(w watch.Interface, err error) (watch.Interface, error) { + if c.kind == "" || err != nil { + return w, err + } + + eventChan := make(chan watch.Event) + + go func() { + defer close(eventChan) + for event := range w.ResultChan() { + c.setKind(event.Object) + eventChan <- event + } + }() + + return &watcher{ + Interface: w, + eventChan: eventChan, + }, nil +} + +type watcher struct { + watch.Interface + eventChan chan watch.Event +} + +func (w *watcher) ResultChan() <-chan watch.Event { + return w.eventChan +} diff --git a/vendor/github.com/rancher/lasso/pkg/client/sharedclientfactory.go b/vendor/github.com/rancher/lasso/pkg/client/sharedclientfactory.go new file mode 100644 index 0000000000..9b68783d39 --- /dev/null +++ b/vendor/github.com/rancher/lasso/pkg/client/sharedclientfactory.go @@ -0,0 +1,183 @@ +package client + +import ( + "fmt" + "sync" + "time" + + "github.com/rancher/lasso/pkg/mapper" + "github.com/rancher/lasso/pkg/scheme" + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/client-go/rest" +) + +type SharedClientFactoryOptions struct { + Mapper meta.RESTMapper + Scheme *runtime.Scheme +} + +type SharedClientFactory interface { + ForKind(gvk schema.GroupVersionKind) (*Client, error) + ForResource(gvr schema.GroupVersionResource, namespaced bool) (*Client, error) + ForResourceKind(gvr schema.GroupVersionResource, kind string, namespaced bool) *Client + NewObjects(gvk schema.GroupVersionKind) (runtime.Object, runtime.Object, error) + GVKForObject(obj runtime.Object) (schema.GroupVersionKind, error) + GVKForResource(gvr schema.GroupVersionResource) (schema.GroupVersionKind, error) + ResourceForGVK(gvk schema.GroupVersionKind) (schema.GroupVersionResource, bool, error) +} + +type sharedClientFactory struct { + createLock sync.RWMutex + clients map[schema.GroupVersionResource]*Client + timeout time.Duration + rest rest.Interface + + Mapper meta.RESTMapper + Scheme *runtime.Scheme +} + +func NewSharedClientFactoryForConfig(config *rest.Config) (SharedClientFactory, error) { + return NewSharedClientFactory(config, nil) +} + +func NewSharedClientFactory(config *rest.Config, opts *SharedClientFactoryOptions) (_ SharedClientFactory, err error) { + opts, err = applyDefaults(config, opts) + if err != nil { + return nil, err + } + + config, timeout := populateConfig(opts.Scheme, config) + rest, err := rest.UnversionedRESTClientFor(config) + if err != nil { + return nil, err + } + + return &sharedClientFactory{ + timeout: timeout, + clients: map[schema.GroupVersionResource]*Client{}, + Scheme: opts.Scheme, + Mapper: opts.Mapper, + rest: rest, + }, nil +} + +func applyDefaults(config *rest.Config, opts *SharedClientFactoryOptions) (*SharedClientFactoryOptions, error) { + var newOpts SharedClientFactoryOptions + if opts != nil { + newOpts = *opts + } + + if newOpts.Scheme == nil { + newOpts.Scheme = scheme.All + } + + if newOpts.Mapper == nil { + mapperOpt, err := mapper.New(config) + if err != nil { + return nil, err + } + newOpts.Mapper = mapperOpt + } + + return &newOpts, nil +} + +func (s *sharedClientFactory) GVKForResource(gvr schema.GroupVersionResource) (schema.GroupVersionKind, error) { + return s.Mapper.KindFor(gvr) +} + +func (s *sharedClientFactory) ResourceForGVK(gvk schema.GroupVersionKind) (schema.GroupVersionResource, bool, error) { + mapping, err := s.Mapper.RESTMapping(gvk.GroupKind(), gvk.Version) + if err != nil { + return schema.GroupVersionResource{}, false, err + } + + nsed, err := IsNamespaced(mapping.Resource, s.Mapper) + if err != nil { + return schema.GroupVersionResource{}, false, err + } + + return mapping.Resource, nsed, nil +} + +func (s *sharedClientFactory) GVKForObject(obj runtime.Object) (schema.GroupVersionKind, error) { + gvks, _, err := s.Scheme.ObjectKinds(obj) + if err != nil { + return schema.GroupVersionKind{}, err + } + if len(gvks) == 0 { + return schema.GroupVersionKind{}, fmt.Errorf("failed to find schema.GroupVersionKind for %T", obj) + } + return gvks[0], nil +} + +func (s *sharedClientFactory) NewObjects(gvk schema.GroupVersionKind) (runtime.Object, runtime.Object, error) { + obj, err := s.Scheme.New(gvk) + if err != nil { + return nil, nil, err + } + + objList, err := s.Scheme.New(schema.GroupVersionKind{ + Group: gvk.Group, + Version: gvk.Version, + Kind: gvk.Kind + "List", + }) + return obj, objList, err +} + +func (s *sharedClientFactory) ForKind(gvk schema.GroupVersionKind) (*Client, error) { + gvr, nsed, err := s.ResourceForGVK(gvk) + if err != nil { + return nil, err + } + + return s.ForResourceKind(gvr, gvk.Kind, nsed), nil +} + +func (s *sharedClientFactory) ForResource(gvr schema.GroupVersionResource, namespaced bool) (*Client, error) { + gvk, err := s.GVKForResource(gvr) + if err != nil { + return nil, err + } + return s.ForResourceKind(gvr, gvk.Kind, namespaced), nil +} + +func (s *sharedClientFactory) ForResourceKind(gvr schema.GroupVersionResource, kind string, namespaced bool) *Client { + client := s.getClient(gvr) + if client != nil { + return client + } + + s.createLock.Lock() + defer s.createLock.Unlock() + + client = s.clients[gvr] + if client != nil { + return client + } + + client = NewClient(gvr, kind, namespaced, s.rest, s.timeout) + + s.clients[gvr] = client + return client +} + +func (s *sharedClientFactory) getClient(gvr schema.GroupVersionResource) *Client { + s.createLock.RLock() + defer s.createLock.RUnlock() + return s.clients[gvr] +} + +func populateConfig(scheme *runtime.Scheme, config *rest.Config) (*rest.Config, time.Duration) { + config = rest.CopyConfig(config) + config.NegotiatedSerializer = serializer.NewCodecFactory(scheme).WithoutConversion() + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + timeout := config.Timeout + config.Timeout = 0 + return config, timeout +} diff --git a/vendor/github.com/rancher/lasso/pkg/controller/controller.go b/vendor/github.com/rancher/lasso/pkg/controller/controller.go new file mode 100644 index 0000000000..9dc2786778 --- /dev/null +++ b/vendor/github.com/rancher/lasso/pkg/controller/controller.go @@ -0,0 +1,287 @@ +package controller + +import ( + "context" + "fmt" + "strings" + "sync" + "time" + + "github.com/rancher/lasso/pkg/log" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/util/workqueue" +) + +type Handler interface { + OnChange(key string, obj runtime.Object) error +} + +type HandlerFunc func(key string, obj runtime.Object) error + +func (h HandlerFunc) OnChange(key string, obj runtime.Object) error { + return h(key, obj) +} + +type Controller interface { + Enqueue(namespace, name string) + EnqueueAfter(namespace, name string, delay time.Duration) + EnqueueKey(key string) + Informer() cache.SharedIndexInformer + Start(ctx context.Context, workers int) error +} + +type controller struct { + startLock sync.Mutex + + name string + workqueue workqueue.RateLimitingInterface + rateLimiter workqueue.RateLimiter + informer cache.SharedIndexInformer + handler Handler + gvk schema.GroupVersionKind + startKeys []startKey + started bool + startCache func(context.Context) error +} + +type startKey struct { + key string + after time.Duration +} + +type Options struct { + RateLimiter workqueue.RateLimiter +} + +func New(name string, informer cache.SharedIndexInformer, startCache func(context.Context) error, handler Handler, opts *Options) Controller { + opts = applyDefaultOptions(opts) + + controller := &controller{ + name: name, + handler: handler, + informer: informer, + rateLimiter: opts.RateLimiter, + startCache: startCache, + } + + informer.AddEventHandler(cache.ResourceEventHandlerFuncs{ + AddFunc: controller.handleObject, + UpdateFunc: func(old, new interface{}) { + controller.handleObject(new) + }, + DeleteFunc: controller.handleObject, + }) + + return controller +} + +func applyDefaultOptions(opts *Options) *Options { + var newOpts Options + if opts != nil { + newOpts = *opts + } + if newOpts.RateLimiter == nil { + newOpts.RateLimiter = workqueue.DefaultControllerRateLimiter() + } + return &newOpts +} + +func (c *controller) Informer() cache.SharedIndexInformer { + return c.informer +} + +func (c *controller) GroupVersionKind() schema.GroupVersionKind { + return c.gvk +} + +func (c *controller) run(workers int, stopCh <-chan struct{}) { + c.startLock.Lock() + // we have to defer queue creation until we have a stopCh available because a workqueue + // will create a goroutine under the hood. It we instantiate a workqueue we must have + // a mechanism to Shutdown it down. Without the stopCh we don't know when to shutdown + // the queue and release the goroutine + c.workqueue = workqueue.NewNamedRateLimitingQueue(c.rateLimiter, c.name) + for _, start := range c.startKeys { + if start.after == 0 { + c.workqueue.Add(start.key) + } else { + c.workqueue.AddAfter(start.key, start.after) + } + } + c.startKeys = nil + c.startLock.Unlock() + + defer utilruntime.HandleCrash() + defer func() { + c.workqueue.ShutDown() + }() + + // Start the informer factories to begin populating the informer caches + log.Infof("Starting %s controller", c.name) + + // Launch two workers to process Foo resources + for i := 0; i < workers; i++ { + go wait.Until(c.runWorker, time.Second, stopCh) + } + + <-stopCh + log.Infof("Shutting down %s workers", c.name) +} + +func (c *controller) Start(ctx context.Context, workers int) error { + c.startLock.Lock() + defer c.startLock.Unlock() + + if c.started { + return nil + } + + if err := c.startCache(ctx); err != nil { + return err + } + + if ok := cache.WaitForCacheSync(ctx.Done(), c.informer.HasSynced); !ok { + return fmt.Errorf("failed to wait for caches to sync") + } + + go c.run(workers, ctx.Done()) + c.started = true + return nil +} + +func (c *controller) runWorker() { + for c.processNextWorkItem() { + } +} + +func (c *controller) processNextWorkItem() bool { + obj, shutdown := c.workqueue.Get() + + if shutdown { + return false + } + + if err := c.processSingleItem(obj); err != nil { + if !strings.Contains(err.Error(), "please apply your changes to the latest version and try again") { + log.Errorf("%v", err) + } + return true + } + + return true +} + +func (c *controller) processSingleItem(obj interface{}) error { + var ( + key string + ok bool + ) + + defer c.workqueue.Done(obj) + + if key, ok = obj.(string); !ok { + c.workqueue.Forget(obj) + log.Errorf("expected string in workqueue but got %#v", obj) + return nil + } + if err := c.syncHandler(key); err != nil { + c.workqueue.AddRateLimited(key) + return fmt.Errorf("error syncing '%s': %s, requeuing", key, err.Error()) + } + + c.workqueue.Forget(obj) + return nil +} + +func (c *controller) syncHandler(key string) error { + obj, exists, err := c.informer.GetStore().GetByKey(key) + if err != nil { + return err + } + if !exists { + return c.handler.OnChange(key, nil) + } + + return c.handler.OnChange(key, obj.(runtime.Object)) +} + +func (c *controller) EnqueueKey(key string) { + c.startLock.Lock() + defer c.startLock.Unlock() + + if c.workqueue == nil { + c.startKeys = append(c.startKeys, startKey{key: key}) + } else { + c.workqueue.AddRateLimited(key) + } +} + +func (c *controller) Enqueue(namespace, name string) { + key := keyFunc(namespace, name) + + c.startLock.Lock() + defer c.startLock.Unlock() + + if c.workqueue == nil { + c.startKeys = append(c.startKeys, startKey{key: key}) + } else { + c.workqueue.AddRateLimited(key) + } +} + +func (c *controller) EnqueueAfter(namespace, name string, duration time.Duration) { + key := keyFunc(namespace, name) + + c.startLock.Lock() + defer c.startLock.Unlock() + + if c.workqueue == nil { + c.startKeys = append(c.startKeys, startKey{key: key, after: duration}) + } else { + c.workqueue.AddAfter(key, duration) + } +} + +func keyFunc(namespace, name string) string { + if namespace == "" { + return name + } + return namespace + "/" + name +} + +func (c *controller) enqueue(obj interface{}) { + var key string + var err error + if key, err = cache.MetaNamespaceKeyFunc(obj); err != nil { + log.Errorf("%v", err) + return + } + c.startLock.Lock() + if c.workqueue == nil { + c.startKeys = append(c.startKeys, startKey{key: key}) + } else { + c.workqueue.Add(key) + } + c.startLock.Unlock() +} + +func (c *controller) handleObject(obj interface{}) { + if _, ok := obj.(metav1.Object); !ok { + tombstone, ok := obj.(cache.DeletedFinalStateUnknown) + if !ok { + log.Errorf("error decoding object, invalid type") + return + } + _, ok = tombstone.Obj.(metav1.Object) + if !ok { + log.Errorf("error decoding object tombstone, invalid type") + return + } + } + c.enqueue(obj) +} diff --git a/vendor/github.com/rancher/lasso/pkg/controller/errorcontroller.go b/vendor/github.com/rancher/lasso/pkg/controller/errorcontroller.go new file mode 100644 index 0000000000..f868eb1f32 --- /dev/null +++ b/vendor/github.com/rancher/lasso/pkg/controller/errorcontroller.go @@ -0,0 +1,35 @@ +package controller + +import ( + "context" + "time" + + "k8s.io/client-go/tools/cache" +) + +type errorController struct { + informer cache.SharedIndexInformer +} + +func newErrorController() *errorController { + return &errorController{ + informer: cache.NewSharedIndexInformer(nil, nil, 0, cache.Indexers{}), + } +} + +func (n *errorController) Enqueue(namespace, name string) { +} + +func (n *errorController) EnqueueAfter(namespace, name string, delay time.Duration) { +} + +func (n *errorController) EnqueueKey(key string) { +} + +func (n *errorController) Informer() cache.SharedIndexInformer { + return n.informer +} + +func (n *errorController) Start(ctx context.Context, workers int) error { + return nil +} diff --git a/vendor/github.com/rancher/lasso/pkg/controller/sharedcontroller.go b/vendor/github.com/rancher/lasso/pkg/controller/sharedcontroller.go new file mode 100644 index 0000000000..9f90b88b93 --- /dev/null +++ b/vendor/github.com/rancher/lasso/pkg/controller/sharedcontroller.go @@ -0,0 +1,122 @@ +package controller + +import ( + "context" + "sync" + "time" + + "github.com/rancher/lasso/pkg/cache" + "github.com/rancher/lasso/pkg/client" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + cachetools "k8s.io/client-go/tools/cache" +) + +type SharedControllerHandler interface { + OnChange(key string, obj runtime.Object) (runtime.Object, error) +} + +type SharedController interface { + Controller + + RegisterHandler(ctx context.Context, name string, handler SharedControllerHandler) + Client() *client.Client +} + +type SharedControllerHandlerFunc func(key string, obj runtime.Object) (runtime.Object, error) + +func (s SharedControllerHandlerFunc) OnChange(key string, obj runtime.Object) (runtime.Object, error) { + return s(key, obj) +} + +type sharedController struct { + // this allows one to create a sharedcontroller but it will not actually be started + // unless some aspect of the controllers informer is accessed or needed to be used + deferredController func() (Controller, error) + sharedCacheFactory cache.SharedCacheFactory + controller Controller + gvk schema.GroupVersionKind + handler *sharedHandler + startLock sync.Mutex + started bool + startError error + client *client.Client +} + +func (s *sharedController) Enqueue(namespace, name string) { + s.initController().Enqueue(namespace, name) +} + +func (s *sharedController) EnqueueAfter(namespace, name string, delay time.Duration) { + s.initController().EnqueueAfter(namespace, name, delay) +} + +func (s *sharedController) EnqueueKey(key string) { + s.initController().EnqueueKey(key) +} + +func (s *sharedController) Informer() cachetools.SharedIndexInformer { + return s.initController().Informer() +} + +func (s *sharedController) Client() *client.Client { + return s.client +} + +func (s *sharedController) initController() Controller { + s.startLock.Lock() + defer s.startLock.Unlock() + + if s.controller != nil { + return s.controller + } + + controller, err := s.deferredController() + if err != nil { + controller = newErrorController() + } + + s.startError = err + s.controller = controller + return s.controller +} + +func (s *sharedController) Start(ctx context.Context, workers int) error { + s.startLock.Lock() + defer s.startLock.Unlock() + + if s.startError != nil || s.controller == nil { + return s.startError + } + + if err := s.controller.Start(ctx, workers); err != nil { + return err + } + s.started = true + + go func() { + <-ctx.Done() + s.startLock.Lock() + defer s.startLock.Unlock() + s.started = false + }() + + return nil +} + +func (s *sharedController) RegisterHandler(ctx context.Context, name string, handler SharedControllerHandler) { + // Ensure that controller is initialized + c := s.initController() + + getHandlerTransaction(ctx).do(func() { + s.handler.Register(ctx, name, handler) + + s.startLock.Lock() + defer s.startLock.Unlock() + if s.started { + for _, key := range c.Informer().GetStore().ListKeys() { + c.EnqueueKey(key) + } + } + }) +} diff --git a/vendor/github.com/rancher/lasso/pkg/controller/sharedcontrollerfactory.go b/vendor/github.com/rancher/lasso/pkg/controller/sharedcontrollerfactory.go new file mode 100644 index 0000000000..cef64f503d --- /dev/null +++ b/vendor/github.com/rancher/lasso/pkg/controller/sharedcontrollerfactory.go @@ -0,0 +1,210 @@ +package controller + +import ( + "context" + "sync" + + "github.com/rancher/lasso/pkg/cache" + "github.com/rancher/lasso/pkg/client" + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/rest" + "k8s.io/client-go/util/workqueue" +) + +type SharedControllerFactory interface { + ForObject(obj runtime.Object) (SharedController, error) + ForKind(gvk schema.GroupVersionKind) (SharedController, error) + ForResource(gvr schema.GroupVersionResource, namespaced bool) SharedController + ForResourceKind(gvr schema.GroupVersionResource, kind string, namespaced bool) SharedController + SharedCacheFactory() cache.SharedCacheFactory + Start(ctx context.Context, workers int) error +} + +type SharedControllerFactoryOptions struct { + DefaultRateLimiter workqueue.RateLimiter + DefaultWorkers int + + KindRateLimiter map[schema.GroupVersionKind]workqueue.RateLimiter + KindWorkers map[schema.GroupVersionKind]int +} + +type sharedControllerFactory struct { + controllerLock sync.RWMutex + + sharedCacheFactory cache.SharedCacheFactory + controllers map[schema.GroupVersionResource]*sharedController + + rateLimiter workqueue.RateLimiter + workers int + kindRateLimiter map[schema.GroupVersionKind]workqueue.RateLimiter + kindWorkers map[schema.GroupVersionKind]int +} + +func NewSharedControllerFactoryFromConfig(config *rest.Config, scheme *runtime.Scheme) (SharedControllerFactory, error) { + cf, err := client.NewSharedClientFactory(config, &client.SharedClientFactoryOptions{ + Scheme: scheme, + }) + if err != nil { + return nil, err + } + return NewSharedControllerFactory(cache.NewSharedCachedFactory(cf, nil), nil), nil +} + +func NewSharedControllerFactory(cacheFactory cache.SharedCacheFactory, opts *SharedControllerFactoryOptions) SharedControllerFactory { + opts = applyDefaultSharedOptions(opts) + return &sharedControllerFactory{ + sharedCacheFactory: cacheFactory, + controllers: map[schema.GroupVersionResource]*sharedController{}, + workers: opts.DefaultWorkers, + kindWorkers: opts.KindWorkers, + rateLimiter: opts.DefaultRateLimiter, + kindRateLimiter: opts.KindRateLimiter, + } +} + +func applyDefaultSharedOptions(opts *SharedControllerFactoryOptions) *SharedControllerFactoryOptions { + var newOpts SharedControllerFactoryOptions + if opts != nil { + newOpts = *opts + } + if newOpts.DefaultWorkers == 0 { + newOpts.DefaultWorkers = 5 + } + return &newOpts +} + +func (s *sharedControllerFactory) Start(ctx context.Context, defaultWorkers int) error { + s.controllerLock.Lock() + defer s.controllerLock.Unlock() + + if err := s.sharedCacheFactory.Start(ctx); err != nil { + return err + } + + s.sharedCacheFactory.WaitForCacheSync(ctx) + + for gvr, controller := range s.controllers { + w, err := s.getWorkers(gvr, defaultWorkers) + if err != nil { + return err + } + if err := controller.Start(ctx, w); err != nil { + return err + } + } + + return nil +} + +func (s *sharedControllerFactory) ForObject(obj runtime.Object) (SharedController, error) { + gvk, err := s.sharedCacheFactory.SharedClientFactory().GVKForObject(obj) + if err != nil { + return nil, err + } + return s.ForKind(gvk) +} + +func (s *sharedControllerFactory) ForKind(gvk schema.GroupVersionKind) (SharedController, error) { + gvr, nsed, err := s.sharedCacheFactory.SharedClientFactory().ResourceForGVK(gvk) + if err != nil { + return nil, err + } + + return s.ForResourceKind(gvr, gvk.Kind, nsed), nil +} + +func (s *sharedControllerFactory) ForResource(gvr schema.GroupVersionResource, namespaced bool) SharedController { + return s.ForResourceKind(gvr, "", namespaced) +} + +func (s *sharedControllerFactory) ForResourceKind(gvr schema.GroupVersionResource, kind string, namespaced bool) SharedController { + controllerResult := s.byResource(gvr) + if controllerResult != nil { + return controllerResult + } + + s.controllerLock.Lock() + defer s.controllerLock.Unlock() + + controllerResult = s.controllers[gvr] + if controllerResult != nil { + return controllerResult + } + + client := s.sharedCacheFactory.SharedClientFactory().ForResourceKind(gvr, kind, namespaced) + + handler := &sharedHandler{} + + controllerResult = &sharedController{ + deferredController: func() (Controller, error) { + var ( + gvk schema.GroupVersionKind + err error + ) + + if kind == "" { + gvk, err = s.sharedCacheFactory.SharedClientFactory().GVKForResource(gvr) + if err != nil { + return nil, err + } + } else { + gvk = gvr.GroupVersion().WithKind(kind) + } + + cache, err := s.sharedCacheFactory.ForResourceKind(gvr, kind, namespaced) + if err != nil { + return nil, err + } + + rateLimiter, ok := s.kindRateLimiter[gvk] + if !ok { + rateLimiter = s.rateLimiter + } + + starter := func(ctx context.Context) error { + return s.sharedCacheFactory.StartGVK(ctx, gvk) + } + + c := New(gvk.String(), cache, starter, handler, &Options{ + RateLimiter: rateLimiter, + }) + + return c, err + }, + handler: handler, + client: client, + } + + s.controllers[gvr] = controllerResult + return controllerResult +} + +func (s *sharedControllerFactory) getWorkers(gvr schema.GroupVersionResource, workers int) (int, error) { + gvk, err := s.sharedCacheFactory.SharedClientFactory().GVKForResource(gvr) + if meta.IsNoMatchError(err) { + return workers, nil + } else if err != nil { + return 0, err + } + + w, ok := s.kindWorkers[gvk] + if ok { + return w, nil + } + if workers > 0 { + return workers, nil + } + return s.workers, nil +} + +func (s *sharedControllerFactory) byResource(gvr schema.GroupVersionResource) *sharedController { + s.controllerLock.RLock() + defer s.controllerLock.RUnlock() + return s.controllers[gvr] +} + +func (s *sharedControllerFactory) SharedCacheFactory() cache.SharedCacheFactory { + return s.sharedCacheFactory +} diff --git a/vendor/github.com/rancher/lasso/pkg/controller/sharedhandler.go b/vendor/github.com/rancher/lasso/pkg/controller/sharedhandler.go new file mode 100644 index 0000000000..a365eccf81 --- /dev/null +++ b/vendor/github.com/rancher/lasso/pkg/controller/sharedhandler.go @@ -0,0 +1,122 @@ +package controller + +import ( + "context" + "errors" + "fmt" + "reflect" + "strings" + "sync" + "sync/atomic" + + "k8s.io/apimachinery/pkg/runtime" +) + +var ( + ErrIgnore = errors.New("ignore handler error") +) + +type handlerEntry struct { + id int64 + name string + handler SharedControllerHandler +} + +type sharedHandler struct { + // keep first because arm32 needs atomic.AddInt64 target to be mem aligned + idCounter int64 + + lock sync.Mutex + handlers []handlerEntry +} + +func (h *sharedHandler) Register(ctx context.Context, name string, handler SharedControllerHandler) { + h.lock.Lock() + defer h.lock.Unlock() + + id := atomic.AddInt64(&h.idCounter, 1) + h.handlers = append(h.handlers, handlerEntry{ + id: id, + name: name, + handler: handler, + }) + + go func() { + <-ctx.Done() + + h.lock.Lock() + defer h.lock.Unlock() + + for i := range h.handlers { + if h.handlers[i].id == id { + h.handlers = append(h.handlers[:i], h.handlers[i+1:]...) + break + } + } + }() +} + +func (h *sharedHandler) OnChange(key string, obj runtime.Object) error { + var ( + errs errorList + ) + + for _, handler := range h.handlers { + newObj, err := handler.handler.OnChange(key, obj) + if err != nil && !errors.Is(err, ErrIgnore) { + errs = append(errs, &handlerError{ + HandlerName: handler.name, + Err: err, + }) + } + if newObj != nil && !reflect.ValueOf(newObj).IsNil() { + obj = newObj + } + } + + return errs.ToErr() +} + +type errorList []error + +func (e errorList) Error() string { + buf := strings.Builder{} + for _, err := range e { + if buf.Len() > 0 { + buf.WriteString(", ") + } + buf.WriteString(err.Error()) + } + return buf.String() +} + +func (e errorList) ToErr() error { + switch len(e) { + case 0: + return nil + case 1: + return e[0] + default: + return e + } +} + +func (e errorList) Cause() error { + if len(e) > 0 { + return e[0] + } + return nil +} + +type handlerError struct { + HandlerName string + Err error +} + +func (h handlerError) Error() string { + return fmt.Sprintf("handler %s: %v", h.HandlerName, h.Err) +} + +func (h handlerError) Cause() error { + return h.Err +} diff --git a/vendor/github.com/rancher/wrangler/pkg/generic/transaction.go b/vendor/github.com/rancher/lasso/pkg/controller/transaction.go similarity index 62% rename from vendor/github.com/rancher/wrangler/pkg/generic/transaction.go rename to vendor/github.com/rancher/lasso/pkg/controller/transaction.go index 23b9847d8d..e00f28a5e7 100644 --- a/vendor/github.com/rancher/wrangler/pkg/generic/transaction.go +++ b/vendor/github.com/rancher/lasso/pkg/controller/transaction.go @@ -1,40 +1,52 @@ -package generic +package controller import ( "context" + "sync" ) type hTransactionKey struct{} type HandlerTransaction struct { context.Context + + lock sync.Mutex parent context.Context - done chan struct{} - result bool + todo []func() } -func (h *HandlerTransaction) shouldContinue() bool { - select { - case <-h.parent.Done(): - return false - case <-h.done: - return h.result +func (h *HandlerTransaction) do(f func()) { + if h == nil { + f() + return } + + h.lock.Lock() + defer h.lock.Unlock() + + h.todo = append(h.todo, f) } func (h *HandlerTransaction) Commit() { - h.result = true - close(h.done) + h.lock.Lock() + fs := h.todo + h.todo = nil + h.lock.Unlock() + + for _, f := range fs { + f() + } } func (h *HandlerTransaction) Rollback() { - close(h.done) + h.lock.Lock() + h.todo = nil + h.lock.Unlock() } func NewHandlerTransaction(ctx context.Context) *HandlerTransaction { ht := &HandlerTransaction{ parent: ctx, - done: make(chan struct{}), } ctx = context.WithValue(ctx, hTransactionKey{}, ht) ht.Context = ctx diff --git a/vendor/github.com/rancher/lasso/pkg/log/log.go b/vendor/github.com/rancher/lasso/pkg/log/log.go new file mode 100644 index 0000000000..a8725e4532 --- /dev/null +++ b/vendor/github.com/rancher/lasso/pkg/log/log.go @@ -0,0 +1,13 @@ +package log + +import "log" + +var ( + // Stupid log abstraction + Infof = func(message string, obj ...interface{}) { + log.Printf("INFO: "+message+"\n", obj...) + } + Errorf = func(message string, obj ...interface{}) { + log.Printf("ERROR: "+message+"\n", obj...) + } +) diff --git a/vendor/github.com/rancher/lasso/pkg/mapper/mapper.go b/vendor/github.com/rancher/lasso/pkg/mapper/mapper.go new file mode 100644 index 0000000000..3466582484 --- /dev/null +++ b/vendor/github.com/rancher/lasso/pkg/mapper/mapper.go @@ -0,0 +1,90 @@ +package mapper + +import ( + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/discovery" + "k8s.io/client-go/discovery/cached/memory" + "k8s.io/client-go/rest" + "k8s.io/client-go/restmapper" +) + +func New(config *rest.Config) (meta.RESTMapper, error) { + d, err := discovery.NewDiscoveryClientForConfig(config) + if err != nil { + return nil, err + } + cached := memory.NewMemCacheClient(d) + return &retryMapper{ + target: restmapper.NewDeferredDiscoveryRESTMapper(cached), + cache: cached, + }, nil +} + +type retryMapper struct { + target meta.RESTMapper + cache discovery.CachedDiscoveryInterface +} + +func (r *retryMapper) KindFor(resource schema.GroupVersionResource) (schema.GroupVersionKind, error) { + result, err := r.target.KindFor(resource) + if err != nil { + r.cache.Invalidate() + return r.target.KindFor(resource) + } + return result, err +} + +func (r *retryMapper) KindsFor(resource schema.GroupVersionResource) ([]schema.GroupVersionKind, error) { + result, err := r.target.KindsFor(resource) + if err != nil { + r.cache.Invalidate() + return r.target.KindsFor(resource) + } + return result, err +} + +func (r *retryMapper) ResourceFor(input schema.GroupVersionResource) (schema.GroupVersionResource, error) { + result, err := r.target.ResourceFor(input) + if err != nil { + r.cache.Invalidate() + return r.target.ResourceFor(input) + } + return result, err +} + +func (r *retryMapper) ResourcesFor(input schema.GroupVersionResource) ([]schema.GroupVersionResource, error) { + result, err := r.target.ResourcesFor(input) + if err != nil { + r.cache.Invalidate() + return r.target.ResourcesFor(input) + } + return result, err +} + +func (r *retryMapper) RESTMapping(gk schema.GroupKind, versions ...string) (*meta.RESTMapping, error) { + result, err := r.target.RESTMapping(gk, versions...) + if err != nil { + r.cache.Invalidate() + return r.target.RESTMapping(gk, versions...) + } + return result, err +} + +func (r *retryMapper) RESTMappings(gk schema.GroupKind, versions ...string) ([]*meta.RESTMapping, error) { + result, err := r.target.RESTMappings(gk, versions...) + if err != nil { + r.cache.Invalidate() + return r.target.RESTMappings(gk, versions...) + } + return result, err +} + +func (r *retryMapper) ResourceSingularizer(resource string) (singular string, err error) { + result, err := r.target.ResourceSingularizer(resource) + if err != nil { + r.cache.Invalidate() + return r.target.ResourceSingularizer(resource) + } + return result, err +} diff --git a/vendor/github.com/rancher/lasso/pkg/scheme/all.go b/vendor/github.com/rancher/lasso/pkg/scheme/all.go new file mode 100644 index 0000000000..119766c4ce --- /dev/null +++ b/vendor/github.com/rancher/lasso/pkg/scheme/all.go @@ -0,0 +1,5 @@ +package scheme + +import "k8s.io/apimachinery/pkg/runtime" + +var All = runtime.NewScheme() diff --git a/vendor/github.com/rancher/wrangler/pkg/apply/apply.go b/vendor/github.com/rancher/wrangler/pkg/apply/apply.go new file mode 100644 index 0000000000..3fec8dcad3 --- /dev/null +++ b/vendor/github.com/rancher/wrangler/pkg/apply/apply.go @@ -0,0 +1,293 @@ +package apply + +import ( + "context" + "fmt" + "sync" + + "github.com/rancher/wrangler/pkg/apply/injectors" + "github.com/rancher/wrangler/pkg/objectset" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/discovery" + "k8s.io/client-go/dynamic" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/cache" +) + +const ( + defaultNamespace = "default" +) + +type Patcher func(namespace, name string, pt types.PatchType, data []byte) (runtime.Object, error) + +// return false if the Reconciler did not handler this object +type Reconciler func(oldObj runtime.Object, newObj runtime.Object) (bool, error) + +type ClientFactory func(gvr schema.GroupVersionResource) (dynamic.NamespaceableResourceInterface, error) + +type InformerFactory interface { + Get(gvk schema.GroupVersionKind, gvr schema.GroupVersionResource) (cache.SharedIndexInformer, error) +} + +type InformerGetter interface { + Informer() cache.SharedIndexInformer + GroupVersionKind() schema.GroupVersionKind +} + +type PatchByGVK map[schema.GroupVersionKind]map[objectset.ObjectKey]string + +func (p PatchByGVK) Add(gvk schema.GroupVersionKind, namespace, name, patch string) { + d, ok := p[gvk] + if !ok { + d = map[objectset.ObjectKey]string{} + p[gvk] = d + } + d[objectset.ObjectKey{ + Name: name, + Namespace: namespace, + }] = patch +} + +type Plan struct { + Create objectset.ObjectKeyByGVK + Delete objectset.ObjectKeyByGVK + Update PatchByGVK + Objects []runtime.Object +} + +type Apply interface { + Apply(set *objectset.ObjectSet) error + ApplyObjects(objs ...runtime.Object) error + WithContext(ctx context.Context) Apply + WithCacheTypes(igs ...InformerGetter) Apply + WithCacheTypeFactory(factory InformerFactory) Apply + WithSetID(id string) Apply + WithOwner(obj runtime.Object) Apply + WithOwnerKey(key string, gvk schema.GroupVersionKind) Apply + WithInjector(injs ...injectors.ConfigInjector) Apply + WithInjectorName(injs ...string) Apply + WithPatcher(gvk schema.GroupVersionKind, patchers Patcher) Apply + WithReconciler(gvk schema.GroupVersionKind, reconciler Reconciler) Apply + WithStrictCaching() Apply + WithDynamicLookup() Apply + WithRestrictClusterScoped() Apply + WithDefaultNamespace(ns string) Apply + WithListerNamespace(ns string) Apply + WithRateLimiting(ratelimitingQps float32) Apply + WithNoDelete() Apply + WithGVK(gvks ...schema.GroupVersionKind) Apply + WithSetOwnerReference(controller, block bool) Apply + WithIgnorePreviousApplied() Apply + WithDiffPatch(gvk schema.GroupVersionKind, namespace, name string, patch []byte) Apply + + FindOwner(obj runtime.Object) (runtime.Object, error) + PurgeOrphan(obj runtime.Object) error + DryRun(objs ...runtime.Object) (Plan, error) +} + +func NewForConfig(cfg *rest.Config) (Apply, error) { + k8s, err := kubernetes.NewForConfig(cfg) + if err != nil { + return nil, err + } + + return New(k8s.Discovery(), NewClientFactory(cfg)), nil +} + +func New(discovery discovery.DiscoveryInterface, cf ClientFactory, igs ...InformerGetter) Apply { + a := &apply{ + clients: &clients{ + clientFactory: cf, + discovery: discovery, + namespaced: map[schema.GroupVersionKind]bool{}, + gvkToGVR: map[schema.GroupVersionKind]schema.GroupVersionResource{}, + clients: map[schema.GroupVersionKind]dynamic.NamespaceableResourceInterface{}, + }, + informers: map[schema.GroupVersionKind]cache.SharedIndexInformer{}, + } + + for _, ig := range igs { + a.informers[ig.GroupVersionKind()] = ig.Informer() + } + + return a +} + +type apply struct { + clients *clients + informers map[schema.GroupVersionKind]cache.SharedIndexInformer +} + +type clients struct { + sync.Mutex + + clientFactory ClientFactory + discovery discovery.DiscoveryInterface + namespaced map[schema.GroupVersionKind]bool + gvkToGVR map[schema.GroupVersionKind]schema.GroupVersionResource + clients map[schema.GroupVersionKind]dynamic.NamespaceableResourceInterface +} + +func (c *clients) IsNamespaced(gvk schema.GroupVersionKind) bool { + c.Lock() + defer c.Unlock() + return c.namespaced[gvk] +} + +func (c *clients) gvr(gvk schema.GroupVersionKind) schema.GroupVersionResource { + c.Lock() + defer c.Unlock() + return c.gvkToGVR[gvk] +} + +func (c *clients) client(gvk schema.GroupVersionKind) (dynamic.NamespaceableResourceInterface, error) { + c.Lock() + defer c.Unlock() + + if client, ok := c.clients[gvk]; ok { + return client, nil + } + + resources, err := c.discovery.ServerResourcesForGroupVersion(gvk.GroupVersion().String()) + if err != nil { + return nil, err + } + + for _, resource := range resources.APIResources { + if resource.Kind != gvk.Kind { + continue + } + + client, err := c.clientFactory(gvk.GroupVersion().WithResource(resource.Name)) + if err != nil { + return nil, err + } + + c.namespaced[gvk] = resource.Namespaced + c.clients[gvk] = client + c.gvkToGVR[gvk] = gvk.GroupVersion().WithResource(resource.Name) + return client, nil + } + + return nil, fmt.Errorf("failed to discover client for %s", gvk) +} + +func (a *apply) newDesiredSet() desiredSet { + return desiredSet{ + a: a, + defaultNamespace: defaultNamespace, + ctx: context.Background(), + ratelimitingQps: 1, + reconcilers: defaultReconcilers, + strictCaching: true, + } +} + +func (a *apply) DryRun(objs ...runtime.Object) (Plan, error) { + return a.newDesiredSet().DryRun(objs...) +} + +func (a *apply) Apply(set *objectset.ObjectSet) error { + return a.newDesiredSet().Apply(set) +} + +func (a *apply) ApplyObjects(objs ...runtime.Object) error { + os := objectset.NewObjectSet() + os.Add(objs...) + return a.newDesiredSet().Apply(os) +} + +func (a *apply) WithSetID(id string) Apply { + return a.newDesiredSet().WithSetID(id) +} + +func (a *apply) WithOwner(obj runtime.Object) Apply { + return a.newDesiredSet().WithOwner(obj) +} + +func (a *apply) WithOwnerKey(key string, gvk schema.GroupVersionKind) Apply { + return a.newDesiredSet().WithOwnerKey(key, gvk) +} + +func (a *apply) WithInjector(injs ...injectors.ConfigInjector) Apply { + return a.newDesiredSet().WithInjector(injs...) +} + +func (a *apply) WithInjectorName(injs ...string) Apply { + return a.newDesiredSet().WithInjectorName(injs...) +} + +func (a *apply) WithCacheTypes(igs ...InformerGetter) Apply { + return a.newDesiredSet().WithCacheTypes(igs...) +} + +func (a *apply) WithCacheTypeFactory(factory InformerFactory) Apply { + return a.newDesiredSet().WithCacheTypeFactory(factory) +} + +func (a *apply) WithGVK(gvks ...schema.GroupVersionKind) Apply { + return a.newDesiredSet().WithGVK(gvks...) +} + +func (a *apply) WithPatcher(gvk schema.GroupVersionKind, patcher Patcher) Apply { + return a.newDesiredSet().WithPatcher(gvk, patcher) +} + +func (a *apply) WithReconciler(gvk schema.GroupVersionKind, reconciler Reconciler) Apply { + return a.newDesiredSet().WithReconciler(gvk, reconciler) +} + +func (a *apply) WithStrictCaching() Apply { + return a.newDesiredSet().WithStrictCaching() +} + +func (a *apply) WithDynamicLookup() Apply { + return a.newDesiredSet().WithDynamicLookup() +} + +func (a *apply) WithRestrictClusterScoped() Apply { + return a.newDesiredSet().WithRestrictClusterScoped() +} + +func (a *apply) WithDefaultNamespace(ns string) Apply { + return a.newDesiredSet().WithDefaultNamespace(ns) +} + +func (a *apply) WithListerNamespace(ns string) Apply { + return a.newDesiredSet().WithListerNamespace(ns) +} + +func (a *apply) WithRateLimiting(ratelimitingQps float32) Apply { + return a.newDesiredSet().WithRateLimiting(ratelimitingQps) +} + +func (a *apply) WithNoDelete() Apply { + return a.newDesiredSet().WithNoDelete() +} + +func (a *apply) WithSetOwnerReference(controller, block bool) Apply { + return a.newDesiredSet().WithSetOwnerReference(controller, block) +} + +func (a *apply) WithContext(ctx context.Context) Apply { + return a.newDesiredSet().WithContext(ctx) +} + +func (a *apply) WithIgnorePreviousApplied() Apply { + return a.newDesiredSet().WithIgnorePreviousApplied() +} + +func (a *apply) FindOwner(obj runtime.Object) (runtime.Object, error) { + return a.newDesiredSet().FindOwner(obj) +} + +func (a *apply) PurgeOrphan(obj runtime.Object) error { + return a.newDesiredSet().PurgeOrphan(obj) +} + +func (a *apply) WithDiffPatch(gvk schema.GroupVersionKind, namespace, name string, patch []byte) Apply { + return a.newDesiredSet().WithDiffPatch(gvk, namespace, name, patch) +} diff --git a/vendor/github.com/rancher/wrangler/pkg/apply/client_factory.go b/vendor/github.com/rancher/wrangler/pkg/apply/client_factory.go new file mode 100644 index 0000000000..0bde5b7887 --- /dev/null +++ b/vendor/github.com/rancher/wrangler/pkg/apply/client_factory.go @@ -0,0 +1,18 @@ +package apply + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/dynamic" + "k8s.io/client-go/rest" +) + +func NewClientFactory(config *rest.Config) ClientFactory { + return func(gvr schema.GroupVersionResource) (dynamic.NamespaceableResourceInterface, error) { + client, err := dynamic.NewForConfig(config) + if err != nil { + return nil, err + } + + return client.Resource(gvr), nil + } +} diff --git a/vendor/github.com/rancher/wrangler/pkg/apply/desiredset.go b/vendor/github.com/rancher/wrangler/pkg/apply/desiredset.go new file mode 100644 index 0000000000..0e68f45e2a --- /dev/null +++ b/vendor/github.com/rancher/wrangler/pkg/apply/desiredset.go @@ -0,0 +1,231 @@ +package apply + +import ( + "context" + + "github.com/rancher/wrangler/pkg/apply/injectors" + "github.com/rancher/wrangler/pkg/kv" + "github.com/rancher/wrangler/pkg/merr" + "github.com/rancher/wrangler/pkg/objectset" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/tools/cache" +) + +type patchKey struct { + schema.GroupVersionKind + objectset.ObjectKey +} + +type desiredSet struct { + a *apply + ctx context.Context + defaultNamespace string + listerNamespace string + ignorePreviousApplied bool + setOwnerReference bool + ownerReferenceController bool + ownerReferenceBlock bool + strictCaching bool + restrictClusterScoped bool + pruneTypes map[schema.GroupVersionKind]cache.SharedIndexInformer + patchers map[schema.GroupVersionKind]Patcher + reconcilers map[schema.GroupVersionKind]Reconciler + diffPatches map[patchKey][][]byte + informerFactory InformerFactory + remove bool + noDelete bool + setID string + objs *objectset.ObjectSet + codeVersion string + owner runtime.Object + injectors []injectors.ConfigInjector + ratelimitingQps float32 + injectorNames []string + errs []error + + createPlan bool + plan Plan +} + +func (o *desiredSet) err(err error) error { + o.errs = append(o.errs, err) + return o.Err() +} + +func (o desiredSet) Err() error { + return merr.NewErrors(append(o.errs, o.objs.Err())...) +} + +func (o desiredSet) DryRun(objs ...runtime.Object) (Plan, error) { + o.objs = objectset.NewObjectSet() + o.objs.Add(objs...) + return o.dryRun() +} + +func (o desiredSet) Apply(set *objectset.ObjectSet) error { + if set == nil { + set = objectset.NewObjectSet() + } + o.objs = set + return o.apply() +} + +func (o desiredSet) ApplyObjects(objs ...runtime.Object) error { + os := objectset.NewObjectSet() + os.Add(objs...) + return o.Apply(os) +} + +func (o desiredSet) WithDiffPatch(gvk schema.GroupVersionKind, namespace, name string, patch []byte) Apply { + patches := map[patchKey][][]byte{} + for k, v := range o.diffPatches { + patches[k] = v + } + key := patchKey{ + GroupVersionKind: gvk, + ObjectKey: objectset.ObjectKey{ + Name: name, + Namespace: namespace, + }, + } + patches[key] = append(patches[key], patch) + o.diffPatches = patches + return o +} + +// WithGVK uses a known listing of existing gvks to modify the the prune types to allow for deletion of objects +func (o desiredSet) WithGVK(gvks ...schema.GroupVersionKind) Apply { + pruneTypes := make(map[schema.GroupVersionKind]cache.SharedIndexInformer, len(gvks)) + for k, v := range o.pruneTypes { + pruneTypes[k] = v + } + for _, gvk := range gvks { + pruneTypes[gvk] = nil + } + o.pruneTypes = pruneTypes + return o +} + +func (o desiredSet) WithSetID(id string) Apply { + o.setID = id + return o +} + +func (o desiredSet) WithOwnerKey(key string, gvk schema.GroupVersionKind) Apply { + obj := &v1.PartialObjectMetadata{} + obj.Namespace, obj.Name = kv.RSplit(key, "/") + obj.SetGroupVersionKind(gvk) + o.owner = obj + return o +} + +func (o desiredSet) WithOwner(obj runtime.Object) Apply { + o.owner = obj + return o +} + +func (o desiredSet) WithSetOwnerReference(controller, block bool) Apply { + o.setOwnerReference = true + o.ownerReferenceController = controller + o.ownerReferenceBlock = block + return o +} + +func (o desiredSet) WithInjector(injs ...injectors.ConfigInjector) Apply { + o.injectors = append(o.injectors, injs...) + return o +} + +func (o desiredSet) WithInjectorName(injs ...string) Apply { + o.injectorNames = append(o.injectorNames, injs...) + return o +} + +func (o desiredSet) WithCacheTypeFactory(factory InformerFactory) Apply { + o.informerFactory = factory + return o +} + +func (o desiredSet) WithIgnorePreviousApplied() Apply { + o.ignorePreviousApplied = true + return o +} + +func (o desiredSet) WithCacheTypes(igs ...InformerGetter) Apply { + pruneTypes := make(map[schema.GroupVersionKind]cache.SharedIndexInformer, len(igs)) + for k, v := range o.pruneTypes { + pruneTypes[k] = v + } + + for _, ig := range igs { + pruneTypes[ig.GroupVersionKind()] = ig.Informer() + } + + o.pruneTypes = pruneTypes + return o +} + +func (o desiredSet) WithPatcher(gvk schema.GroupVersionKind, patcher Patcher) Apply { + patchers := map[schema.GroupVersionKind]Patcher{} + for k, v := range o.patchers { + patchers[k] = v + } + patchers[gvk] = patcher + o.patchers = patchers + return o +} + +func (o desiredSet) WithReconciler(gvk schema.GroupVersionKind, reconciler Reconciler) Apply { + reconcilers := map[schema.GroupVersionKind]Reconciler{} + for k, v := range o.reconcilers { + reconcilers[k] = v + } + reconcilers[gvk] = reconciler + o.reconcilers = reconcilers + return o +} + +func (o desiredSet) WithStrictCaching() Apply { + o.strictCaching = true + return o +} +func (o desiredSet) WithDynamicLookup() Apply { + o.strictCaching = false + return o +} + +func (o desiredSet) WithRestrictClusterScoped() Apply { + o.restrictClusterScoped = true + return o +} + +func (o desiredSet) WithDefaultNamespace(ns string) Apply { + if ns == "" { + o.defaultNamespace = defaultNamespace + } else { + o.defaultNamespace = ns + } + return o +} + +func (o desiredSet) WithListerNamespace(ns string) Apply { + o.listerNamespace = ns + return o +} + +func (o desiredSet) WithRateLimiting(ratelimitingQps float32) Apply { + o.ratelimitingQps = ratelimitingQps + return o +} + +func (o desiredSet) WithNoDelete() Apply { + o.noDelete = true + return o +} + +func (o desiredSet) WithContext(ctx context.Context) Apply { + o.ctx = ctx + return o +} diff --git a/vendor/github.com/rancher/wrangler/pkg/apply/desiredset_apply.go b/vendor/github.com/rancher/wrangler/pkg/apply/desiredset_apply.go new file mode 100644 index 0000000000..155946c852 --- /dev/null +++ b/vendor/github.com/rancher/wrangler/pkg/apply/desiredset_apply.go @@ -0,0 +1,268 @@ +package apply + +import ( + "crypto/sha1" + "encoding/hex" + "fmt" + "sync" + "time" + + "github.com/sirupsen/logrus" + + gvk2 "github.com/rancher/wrangler/pkg/gvk" + + "github.com/pkg/errors" + "github.com/rancher/wrangler/pkg/apply/injectors" + "github.com/rancher/wrangler/pkg/objectset" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/selection" + "k8s.io/client-go/util/flowcontrol" +) + +const ( + LabelID = "objectset.rio.cattle.io/id" + LabelGVK = "objectset.rio.cattle.io/owner-gvk" + LabelName = "objectset.rio.cattle.io/owner-name" + LabelNamespace = "objectset.rio.cattle.io/owner-namespace" + LabelHash = "objectset.rio.cattle.io/hash" + LabelPrefix = "objectset.rio.cattle.io/" +) + +var ( + hashOrder = []string{ + LabelID, + LabelGVK, + LabelName, + LabelNamespace, + } + rls = map[string]flowcontrol.RateLimiter{} + rlsLock sync.Mutex +) + +func (o *desiredSet) getRateLimit(labelHash string) flowcontrol.RateLimiter { + var rl flowcontrol.RateLimiter + + rlsLock.Lock() + defer rlsLock.Unlock() + if o.remove { + delete(rls, labelHash) + } else { + rl = rls[labelHash] + if rl == nil { + rl = flowcontrol.NewTokenBucketRateLimiter(o.ratelimitingQps, 10) + rls[labelHash] = rl + } + } + + return rl +} + +func (o *desiredSet) dryRun() (Plan, error) { + o.createPlan = true + o.plan.Create = objectset.ObjectKeyByGVK{} + o.plan.Update = PatchByGVK{} + o.plan.Delete = objectset.ObjectKeyByGVK{} + err := o.apply() + return o.plan, err +} + +func (o *desiredSet) apply() error { + if o.objs == nil || o.objs.Len() == 0 { + o.remove = true + } + + if err := o.Err(); err != nil { + return err + } + + labelSet, annotationSet, err := GetLabelsAndAnnotations(o.setID, o.owner) + if err != nil { + return o.err(err) + } + + rl := o.getRateLimit(labelSet[LabelHash]) + if rl != nil { + t := time.Now() + rl.Accept() + if d := time.Now().Sub(t); d.Seconds() > 1 { + logrus.Infof("rate limited %s(%s) %s", o.setID, labelSet, d) + } + } + + objList, err := o.injectLabelsAndAnnotations(labelSet, annotationSet) + if err != nil { + return o.err(err) + } + + objList, err = o.runInjectors(objList) + if err != nil { + return o.err(err) + } + + objs := o.collect(objList) + + debugID := o.debugID() + sel, err := GetSelector(labelSet) + if err != nil { + return o.err(err) + } + + for _, gvk := range o.objs.GVKOrder(o.knownGVK()...) { + o.process(debugID, sel, gvk, objs[gvk]) + } + + return o.Err() +} + +func (o *desiredSet) knownGVK() (ret []schema.GroupVersionKind) { + for k := range o.pruneTypes { + ret = append(ret, k) + } + return +} + +func (o *desiredSet) debugID() string { + if o.owner == nil { + return o.setID + } + metadata, err := meta.Accessor(o.owner) + if err != nil { + return o.setID + } + + return fmt.Sprintf("%s %s", o.setID, objectset.ObjectKey{ + Namespace: metadata.GetNamespace(), + Name: metadata.GetName(), + }) +} + +func (o *desiredSet) collect(objList []runtime.Object) objectset.ObjectByGVK { + result := objectset.ObjectByGVK{} + for _, obj := range objList { + result.Add(obj) + } + return result +} + +func (o *desiredSet) runInjectors(objList []runtime.Object) ([]runtime.Object, error) { + var err error + + for _, inj := range o.injectors { + if inj == nil { + continue + } + + objList, err = inj(objList) + if err != nil { + return nil, err + } + } + + for _, name := range o.injectorNames { + inj := injectors.Get(name) + if inj == nil { + continue + } + + objList, err = inj(objList) + if err != nil { + return nil, err + } + } + + return objList, nil +} + +func GetSelector(labelSet map[string]string) (labels.Selector, error) { + req, err := labels.NewRequirement(LabelHash, selection.Equals, []string{labelSet[LabelHash]}) + if err != nil { + return nil, err + } + return labels.NewSelector().Add(*req), nil +} + +func GetLabelsAndAnnotations(setID string, owner runtime.Object) (map[string]string, map[string]string, error) { + if setID == "" && owner == nil { + return nil, nil, fmt.Errorf("set ID or owner must be set") + } + + annotations := map[string]string{ + LabelID: setID, + } + + if owner != nil { + gvk, err := gvk2.Get(owner) + if err != nil { + return nil, nil, err + } + annotations[LabelGVK] = gvk.String() + metadata, err := meta.Accessor(owner) + if err != nil { + return nil, nil, fmt.Errorf("failed to get metadata for %s", gvk) + } + annotations[LabelName] = metadata.GetName() + annotations[LabelNamespace] = metadata.GetNamespace() + } + + labels := map[string]string{ + LabelHash: objectSetHash(annotations), + } + + return labels, annotations, nil +} + +func (o *desiredSet) injectLabelsAndAnnotations(labels, annotations map[string]string) ([]runtime.Object, error) { + var result []runtime.Object + + for _, objMap := range o.objs.ObjectsByGVK() { + for key, obj := range objMap { + obj = obj.DeepCopyObject() + meta, err := meta.Accessor(obj) + if err != nil { + return nil, errors.Wrapf(err, "failed to get metadata for %s", key) + } + + setLabels(meta, labels) + setAnnotations(meta, annotations) + + result = append(result, obj) + } + } + + return result, nil +} + +func setAnnotations(meta metav1.Object, annotations map[string]string) { + objAnn := meta.GetAnnotations() + if objAnn == nil { + objAnn = map[string]string{} + } + delete(objAnn, LabelApplied) + for k, v := range annotations { + objAnn[k] = v + } + meta.SetAnnotations(objAnn) +} + +func setLabels(meta metav1.Object, labels map[string]string) { + objLabels := meta.GetLabels() + if objLabels == nil { + objLabels = map[string]string{} + } + for k, v := range labels { + objLabels[k] = v + } + meta.SetLabels(objLabels) +} + +func objectSetHash(labels map[string]string) string { + dig := sha1.New() + for _, key := range hashOrder { + dig.Write([]byte(labels[key])) + } + return hex.EncodeToString(dig.Sum(nil)) +} diff --git a/vendor/github.com/rancher/wrangler/pkg/apply/desiredset_compare.go b/vendor/github.com/rancher/wrangler/pkg/apply/desiredset_compare.go new file mode 100644 index 0000000000..5533aba876 --- /dev/null +++ b/vendor/github.com/rancher/wrangler/pkg/apply/desiredset_compare.go @@ -0,0 +1,389 @@ +package apply + +import ( + "bytes" + "compress/gzip" + "encoding/base64" + "io/ioutil" + "strings" + + jsonpatch "github.com/evanphx/json-patch" + "github.com/pkg/errors" + data2 "github.com/rancher/wrangler/pkg/data" + "github.com/rancher/wrangler/pkg/data/convert" + "github.com/rancher/wrangler/pkg/objectset" + patch2 "github.com/rancher/wrangler/pkg/patch" + "github.com/sirupsen/logrus" + "k8s.io/apimachinery/pkg/api/meta" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/json" + "k8s.io/apimachinery/pkg/util/jsonmergepatch" + "k8s.io/apimachinery/pkg/util/strategicpatch" + "k8s.io/client-go/dynamic" +) + +const ( + LabelApplied = "objectset.rio.cattle.io/applied" +) + +func prepareObjectForCreate(gvk schema.GroupVersionKind, obj runtime.Object) (runtime.Object, error) { + serialized, err := json.Marshal(obj) + if err != nil { + return nil, err + } + + obj = obj.DeepCopyObject() + m, err := meta.Accessor(obj) + if err != nil { + return nil, err + } + annotations := m.GetAnnotations() + if annotations == nil { + annotations = map[string]string{} + } + + annotations[LabelApplied] = appliedToAnnotation(serialized) + m.SetAnnotations(annotations) + + typed, err := meta.TypeAccessor(obj) + if err != nil { + return nil, err + } + + apiVersion, kind := gvk.ToAPIVersionAndKind() + typed.SetAPIVersion(apiVersion) + typed.SetKind(kind) + + return obj, nil +} + +func originalAndModified(gvk schema.GroupVersionKind, oldMetadata v1.Object, newObject runtime.Object) ([]byte, []byte, error) { + original, err := getOriginalBytes(gvk, oldMetadata) + if err != nil { + return nil, nil, err + } + + newObject, err = prepareObjectForCreate(gvk, newObject) + if err != nil { + return nil, nil, err + } + + modified, err := json.Marshal(newObject) + + return original, modified, err +} + +func emptyMaps(data map[string]interface{}, keys ...string) bool { + for _, key := range append(keys, "__invalid_key__") { + if len(data) == 0 { + // map is empty so all children are empty too + return true + } else if len(data) > 1 { + // map has more than one key so not empty + return false + } + + value, ok := data[key] + if !ok { + // map has one key but not what we are expecting so not considered empty + return false + } + + data = convert.ToMapInterface(value) + } + + return true +} + +func sanitizePatch(patch []byte, removeObjectSetAnnotation bool) ([]byte, error) { + mod := false + data := map[string]interface{}{} + err := json.Unmarshal(patch, &data) + if err != nil { + return nil, err + } + + if _, ok := data["kind"]; ok { + mod = true + delete(data, "kind") + } + + if _, ok := data["apiVersion"]; ok { + mod = true + delete(data, "apiVersion") + } + + if _, ok := data["status"]; ok { + mod = true + delete(data, "status") + } + + if deleted := removeCreationTimestamp(data); deleted { + mod = true + } + + if removeObjectSetAnnotation { + metadata := convert.ToMapInterface(data2.GetValueN(data, "metadata")) + annotations := convert.ToMapInterface(data2.GetValueN(data, "metadata", "annotations")) + for k := range annotations { + if strings.HasPrefix(k, LabelPrefix) { + mod = true + delete(annotations, k) + } + } + if mod && len(annotations) == 0 { + delete(metadata, "annotations") + if len(metadata) == 0 { + delete(data, "metadata") + } + } + } + + if emptyMaps(data, "metadata", "annotations") { + return []byte("{}"), nil + } + + if !mod { + return patch, nil + } + + return json.Marshal(data) +} + +func applyPatch(gvk schema.GroupVersionKind, reconciler Reconciler, patcher Patcher, debugID string, ignoreOriginal bool, oldObject, newObject runtime.Object, diffPatches [][]byte) (bool, error) { + oldMetadata, err := meta.Accessor(oldObject) + if err != nil { + return false, err + } + + original, modified, err := originalAndModified(gvk, oldMetadata, newObject) + if err != nil { + return false, err + } + + if ignoreOriginal { + original = nil + } + + current, err := json.Marshal(oldObject) + if err != nil { + return false, err + } + + patchType, patch, err := doPatch(gvk, original, modified, current, diffPatches) + if err != nil { + return false, errors.Wrap(err, "patch generation") + } + + if string(patch) == "{}" { + return false, nil + } + + patch, err = sanitizePatch(patch, false) + if err != nil { + return false, err + } + + if string(patch) == "{}" { + return false, nil + } + + logrus.Debugf("DesiredSet - Patch %s %s/%s for %s -- [%s, %s, %s, %s]", gvk, oldMetadata.GetNamespace(), oldMetadata.GetName(), debugID, patch, original, modified, current) + + if reconciler != nil { + newObject, err := prepareObjectForCreate(gvk, newObject) + if err != nil { + return false, err + } + originalObject, err := getOriginalObject(gvk, oldMetadata) + if err != nil { + return false, err + } + if originalObject == nil { + originalObject = oldObject + } + handled, err := reconciler(originalObject, newObject) + if err != nil { + return false, err + } + if handled { + return true, nil + } + } + + logrus.Debugf("DesiredSet - Updated %s %s/%s for %s -- %s %s", gvk, oldMetadata.GetNamespace(), oldMetadata.GetName(), debugID, patchType, patch) + _, err = patcher(oldMetadata.GetNamespace(), oldMetadata.GetName(), patchType, patch) + + return true, err +} + +func (o *desiredSet) compareObjects(gvk schema.GroupVersionKind, reconciler Reconciler, patcher Patcher, client dynamic.NamespaceableResourceInterface, debugID string, oldObject, newObject runtime.Object, force bool) error { + oldMetadata, err := meta.Accessor(oldObject) + if err != nil { + return err + } + + if o.createPlan { + o.plan.Objects = append(o.plan.Objects, oldObject) + } + + diffPatches := o.diffPatches[patchKey{ + GroupVersionKind: gvk, + ObjectKey: objectset.ObjectKey{ + Namespace: oldMetadata.GetNamespace(), + Name: oldMetadata.GetName(), + }, + }] + diffPatches = append(diffPatches, o.diffPatches[patchKey{ + GroupVersionKind: gvk, + }]...) + + if ran, err := applyPatch(gvk, reconciler, patcher, debugID, o.ignorePreviousApplied, oldObject, newObject, diffPatches); err != nil { + return err + } else if !ran { + logrus.Debugf("DesiredSet - No change(2) %s %s/%s for %s", gvk, oldMetadata.GetNamespace(), oldMetadata.GetName(), debugID) + } + + return nil +} + +func removeCreationTimestamp(data map[string]interface{}) bool { + metadata, ok := data["metadata"] + if !ok { + return false + } + + data = convert.ToMapInterface(metadata) + if _, ok := data["creationTimestamp"]; ok { + delete(data, "creationTimestamp") + return true + } + + return false +} + +func getOriginalObject(gvk schema.GroupVersionKind, obj v1.Object) (runtime.Object, error) { + original := appliedFromAnnotation(obj.GetAnnotations()[LabelApplied]) + if len(original) == 0 { + return nil, nil + } + + mapObj := map[string]interface{}{} + err := json.Unmarshal(original, &mapObj) + if err != nil { + return nil, err + } + + removeCreationTimestamp(mapObj) + return prepareObjectForCreate(gvk, &unstructured.Unstructured{ + Object: mapObj, + }) +} + +func getOriginalBytes(gvk schema.GroupVersionKind, obj v1.Object) ([]byte, error) { + objCopy, err := getOriginalObject(gvk, obj) + if err != nil { + return nil, err + } + if objCopy == nil { + return []byte("{}"), nil + } + return json.Marshal(objCopy) +} + +func appliedFromAnnotation(str string) []byte { + if len(str) == 0 || str[0] == '{' { + return []byte(str) + } + + b, err := base64.RawStdEncoding.DecodeString(str) + if err != nil { + return nil + } + + r, err := gzip.NewReader(bytes.NewBuffer(b)) + if err != nil { + return nil + } + + b, err = ioutil.ReadAll(r) + if err != nil { + return nil + } + + return b +} + +func appliedToAnnotation(b []byte) string { + if len(b) < 1024 { + return string(b) + } + buf := &bytes.Buffer{} + w := gzip.NewWriter(buf) + if _, err := w.Write(b); err != nil { + return string(b) + } + if err := w.Close(); err != nil { + return string(b) + } + return base64.RawStdEncoding.EncodeToString(buf.Bytes()) +} + +func stripIgnores(original, modified, current []byte, patches [][]byte) ([]byte, []byte, []byte, error) { + for _, patch := range patches { + patch, err := jsonpatch.DecodePatch(patch) + if err != nil { + return nil, nil, nil, err + } + if len(original) > 0 { + b, err := patch.Apply(original) + if err == nil { + original = b + } + } + b, err := patch.Apply(modified) + if err == nil { + modified = b + } + b, err = patch.Apply(current) + if err == nil { + current = b + } + } + + return original, modified, current, nil +} + +// doPatch is adapted from "kubectl apply" +func doPatch(gvk schema.GroupVersionKind, original, modified, current []byte, diffPatch [][]byte) (types.PatchType, []byte, error) { + var ( + patchType types.PatchType + patch []byte + ) + + original, modified, current, err := stripIgnores(original, modified, current, diffPatch) + if err != nil { + return patchType, nil, err + } + + patchType, lookupPatchMeta, err := patch2.GetMergeStyle(gvk) + if err != nil { + return patchType, nil, err + } + + if patchType == types.StrategicMergePatchType { + patch, err = strategicpatch.CreateThreeWayMergePatch(original, modified, current, lookupPatchMeta, true) + } else { + patch, err = jsonmergepatch.CreateThreeWayJSONMergePatch(original, modified, current) + } + + if err != nil { + logrus.Errorf("Failed to calcuated patch: %v", err) + } + + return patchType, patch, err +} diff --git a/vendor/github.com/rancher/wrangler/pkg/apply/desiredset_crud.go b/vendor/github.com/rancher/wrangler/pkg/apply/desiredset_crud.go new file mode 100644 index 0000000000..7d74bcc1d4 --- /dev/null +++ b/vendor/github.com/rancher/wrangler/pkg/apply/desiredset_crud.go @@ -0,0 +1,66 @@ +package apply + +import ( + "bytes" + + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/json" + "k8s.io/client-go/dynamic" +) + +var ( + deletePolicy = v1.DeletePropagationBackground +) + +func (o *desiredSet) toUnstructured(obj runtime.Object) (*unstructured.Unstructured, error) { + unstruct, ok := obj.(*unstructured.Unstructured) + if ok { + return unstruct, nil + } + + buf := &bytes.Buffer{} + if err := json.NewEncoder(buf).Encode(obj); err != nil { + return nil, err + } + + unstruct = &unstructured.Unstructured{ + Object: map[string]interface{}{}, + } + + return unstruct, json.Unmarshal(buf.Bytes(), &unstruct.Object) +} + +func (o *desiredSet) create(nsed bool, namespace string, client dynamic.NamespaceableResourceInterface, obj runtime.Object) (runtime.Object, error) { + unstr, err := o.toUnstructured(obj) + if err != nil { + return nil, err + } + + if nsed { + return client.Namespace(namespace).Create(o.ctx, unstr, v1.CreateOptions{}) + } + return client.Create(o.ctx, unstr, v1.CreateOptions{}) +} + +func (o *desiredSet) get(nsed bool, namespace, name string, client dynamic.NamespaceableResourceInterface) (runtime.Object, error) { + if nsed { + return client.Namespace(namespace).Get(o.ctx, name, v1.GetOptions{}) + } + return client.Get(o.ctx, name, v1.GetOptions{}) +} + +func (o *desiredSet) delete(nsed bool, namespace, name string, client dynamic.NamespaceableResourceInterface, force bool) error { + if o.noDelete && !force { + return nil + } + opts := v1.DeleteOptions{ + PropagationPolicy: &deletePolicy, + } + if nsed { + return client.Namespace(namespace).Delete(o.ctx, name, opts) + } + + return client.Delete(o.ctx, name, opts) +} diff --git a/vendor/github.com/rancher/wrangler/pkg/apply/desiredset_owner.go b/vendor/github.com/rancher/wrangler/pkg/apply/desiredset_owner.go new file mode 100644 index 0000000000..126fe02be5 --- /dev/null +++ b/vendor/github.com/rancher/wrangler/pkg/apply/desiredset_owner.go @@ -0,0 +1,152 @@ +package apply + +import ( + "fmt" + "strings" + + "github.com/rancher/wrangler/pkg/gvk" + + "github.com/rancher/wrangler/pkg/kv" + + "github.com/pkg/errors" + namer "github.com/rancher/wrangler/pkg/name" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/dynamic" + "k8s.io/client-go/tools/cache" +) + +var ( + ErrOwnerNotFound = errors.New("owner not found") +) + +func notFound(name string, gvk schema.GroupVersionKind) error { + // this is not proper, but does it really matter that much? If you find this + // line while researching a bug, then the answer is probably yes. + resource := namer.GuessPluralName(strings.ToLower(gvk.Kind)) + return apierrors.NewNotFound(schema.GroupResource{ + Group: gvk.Group, + Resource: resource, + }, name) +} + +func getGVK(gvkLabel string, gvk *schema.GroupVersionKind) error { + parts := strings.Split(gvkLabel, ", Kind=") + if len(parts) != 2 { + return fmt.Errorf("invalid GVK format: %s", gvkLabel) + } + gvk.Group, gvk.Version = kv.Split(parts[0], "/") + gvk.Kind = parts[1] + return nil +} + +func (o desiredSet) FindOwner(obj runtime.Object) (runtime.Object, error) { + if obj == nil { + return nil, ErrOwnerNotFound + } + meta, err := meta.Accessor(obj) + if err != nil { + return nil, err + } + + var ( + debugID = fmt.Sprintf("%s/%s", meta.GetNamespace(), meta.GetName()) + gvkLabel = meta.GetAnnotations()[LabelGVK] + namespace = meta.GetAnnotations()[LabelNamespace] + name = meta.GetAnnotations()[LabelName] + gvk schema.GroupVersionKind + ) + + if gvkLabel == "" { + return nil, ErrOwnerNotFound + } + + if err := getGVK(gvkLabel, &gvk); err != nil { + return nil, err + } + + cache, client, err := o.getControllerAndClient(debugID, gvk) + if err != nil { + return nil, err + } + + if cache != nil { + return o.fromCache(cache, namespace, name, gvk) + } + + return o.fromClient(client, namespace, name, gvk) +} + +func (o *desiredSet) fromClient(client dynamic.NamespaceableResourceInterface, namespace, name string, gvk schema.GroupVersionKind) (runtime.Object, error) { + var ( + err error + obj interface{} + ) + if namespace == "" { + obj, err = client.Get(o.ctx, name, metav1.GetOptions{}) + } else { + obj, err = client.Namespace(namespace).Get(o.ctx, name, metav1.GetOptions{}) + } + if err != nil { + return nil, err + } + if ro, ok := obj.(runtime.Object); ok { + return ro, nil + } + return nil, notFound(name, gvk) +} + +func (o *desiredSet) fromCache(cache cache.SharedInformer, namespace, name string, gvk schema.GroupVersionKind) (runtime.Object, error) { + var key string + if namespace == "" { + key = name + } else { + key = namespace + "/" + name + } + item, ok, err := cache.GetStore().GetByKey(key) + if err != nil { + return nil, err + } else if !ok { + return nil, notFound(name, gvk) + } else if ro, ok := item.(runtime.Object); ok { + return ro, nil + } + return nil, notFound(name, gvk) +} + +func (o desiredSet) PurgeOrphan(obj runtime.Object) error { + if obj == nil { + return nil + } + + meta, err := meta.Accessor(obj) + if err != nil { + return err + } + + if _, err := o.FindOwner(obj); apierrors.IsNotFound(err) { + gvk, err := gvk.Get(obj) + if err != nil { + return err + } + + o.strictCaching = false + _, client, err := o.getControllerAndClient(meta.GetName(), gvk) + if err != nil { + return err + } + if meta.GetNamespace() == "" { + return client.Delete(o.ctx, meta.GetName(), metav1.DeleteOptions{}) + } else { + return client.Namespace(meta.GetNamespace()).Delete(o.ctx, meta.GetName(), metav1.DeleteOptions{}) + } + } else if err == ErrOwnerNotFound { + return nil + } else if err != nil { + return err + } + return nil +} diff --git a/vendor/github.com/rancher/wrangler/pkg/apply/desiredset_process.go b/vendor/github.com/rancher/wrangler/pkg/apply/desiredset_process.go new file mode 100644 index 0000000000..0d6d18b265 --- /dev/null +++ b/vendor/github.com/rancher/wrangler/pkg/apply/desiredset_process.go @@ -0,0 +1,404 @@ +package apply + +import ( + "fmt" + "sort" + + "github.com/pkg/errors" + gvk2 "github.com/rancher/wrangler/pkg/gvk" + "github.com/rancher/wrangler/pkg/merr" + "github.com/rancher/wrangler/pkg/objectset" + "github.com/sirupsen/logrus" + errors2 "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + types2 "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/dynamic" + "k8s.io/client-go/tools/cache" +) + +var ( + ErrReplace = errors.New("replace object with changes") + ReplaceOnChange = func(name string, o runtime.Object, patchType types2.PatchType, data []byte, subresources ...string) (runtime.Object, error) { + return nil, ErrReplace + } +) + +func (o *desiredSet) getControllerAndClient(debugID string, gvk schema.GroupVersionKind) (cache.SharedIndexInformer, dynamic.NamespaceableResourceInterface, error) { + // client needs to be accessed first so that the gvk->gvr mapping gets cached + client, err := o.a.clients.client(gvk) + if err != nil { + return nil, nil, err + } + + informer, ok := o.pruneTypes[gvk] + if !ok { + informer = o.a.informers[gvk] + } + if informer == nil && o.informerFactory != nil { + newInformer, err := o.informerFactory.Get(gvk, o.a.clients.gvr(gvk)) + if err != nil { + return nil, nil, errors.Wrapf(err, "failed to construct informer for %v for %s", gvk, debugID) + } + informer = newInformer + } + if informer == nil && o.strictCaching { + return nil, nil, fmt.Errorf("failed to find informer for %s for %s", gvk, debugID) + } + + return informer, client, nil +} + +func (o *desiredSet) assignOwnerReference(gvk schema.GroupVersionKind, objs map[objectset.ObjectKey]runtime.Object) error { + if o.owner == nil { + return fmt.Errorf("no owner set to assign owner reference") + } + ownerMeta, err := meta.Accessor(o.owner) + if err != nil { + return err + } + ownerGVK, err := gvk2.Get(o.owner) + ownerNSed := o.a.clients.IsNamespaced(ownerGVK) + + for k, v := range objs { + // can't set owners across boundaries + if ownerNSed && !o.a.clients.IsNamespaced(gvk) { + continue + } + + assignNS := false + assignOwner := true + if o.a.clients.IsNamespaced(gvk) { + if k.Namespace == "" { + assignNS = true + } else if k.Namespace != ownerMeta.GetNamespace() && ownerNSed { + assignOwner = false + } + } + + if !assignOwner { + continue + } + + v = v.DeepCopyObject() + meta, err := meta.Accessor(v) + if err != nil { + return err + } + + if assignNS { + meta.SetNamespace(ownerMeta.GetNamespace()) + } + + shouldSet := true + for _, of := range meta.GetOwnerReferences() { + if ownerMeta.GetUID() == of.UID { + shouldSet = false + break + } + } + + if shouldSet { + meta.SetOwnerReferences(append(meta.GetOwnerReferences(), v1.OwnerReference{ + APIVersion: ownerGVK.GroupVersion().String(), + Kind: ownerGVK.Kind, + Name: ownerMeta.GetName(), + UID: ownerMeta.GetUID(), + Controller: &o.ownerReferenceController, + BlockOwnerDeletion: &o.ownerReferenceBlock, + })) + } + + objs[k] = v + + if assignNS { + delete(objs, k) + k.Namespace = ownerMeta.GetNamespace() + objs[k] = v + } + } + + return nil +} + +func (o *desiredSet) adjustNamespace(gvk schema.GroupVersionKind, objs map[objectset.ObjectKey]runtime.Object) error { + for k, v := range objs { + if k.Namespace != "" { + continue + } + + v = v.DeepCopyObject() + meta, err := meta.Accessor(v) + if err != nil { + return err + } + + meta.SetNamespace(o.defaultNamespace) + delete(objs, k) + k.Namespace = o.defaultNamespace + objs[k] = v + } + + return nil +} + +func (o *desiredSet) clearNamespace(objs map[objectset.ObjectKey]runtime.Object) error { + for k, v := range objs { + if k.Namespace == "" { + continue + } + + v = v.DeepCopyObject() + meta, err := meta.Accessor(v) + if err != nil { + return err + } + + meta.SetNamespace("") + + delete(objs, k) + k.Namespace = "" + objs[k] = v + } + + return nil +} + +func (o *desiredSet) createPatcher(client dynamic.NamespaceableResourceInterface) Patcher { + return func(namespace, name string, pt types2.PatchType, data []byte) (object runtime.Object, e error) { + if namespace != "" { + return client.Namespace(namespace).Patch(o.ctx, name, pt, data, v1.PatchOptions{}) + } + return client.Patch(o.ctx, name, pt, data, v1.PatchOptions{}) + } +} + +func (o *desiredSet) filterCrossVersion(gvk schema.GroupVersionKind, keys []objectset.ObjectKey) []objectset.ObjectKey { + result := make([]objectset.ObjectKey, 0, len(keys)) + gk := gvk.GroupKind() + for _, key := range keys { + if o.objs.Contains(gk, key) { + continue + } + if key.Namespace == o.defaultNamespace && o.objs.Contains(gk, objectset.ObjectKey{Name: key.Name}) { + continue + } + result = append(result, key) + } + return result +} + +func (o *desiredSet) process(debugID string, set labels.Selector, gvk schema.GroupVersionKind, objs map[objectset.ObjectKey]runtime.Object) { + controller, client, err := o.getControllerAndClient(debugID, gvk) + if err != nil { + o.err(err) + return + } + + nsed := o.a.clients.IsNamespaced(gvk) + + if !nsed && o.restrictClusterScoped { + o.err(fmt.Errorf("invalid cluster scoped gvk: %v", gvk)) + return + } + + if o.setOwnerReference && o.owner != nil { + if err := o.assignOwnerReference(gvk, objs); err != nil { + o.err(err) + return + } + } + + if nsed { + if err := o.adjustNamespace(gvk, objs); err != nil { + o.err(err) + return + } + } else { + if err := o.clearNamespace(objs); err != nil { + o.err(err) + return + } + } + + patcher, ok := o.patchers[gvk] + if !ok { + patcher = o.createPatcher(client) + } + + reconciler := o.reconcilers[gvk] + + existing, err := o.list(controller, client, set) + if err != nil { + o.err(errors.Wrapf(err, "failed to list %s for %s", gvk, debugID)) + return + } + + toCreate, toDelete, toUpdate := compareSets(existing, objs) + + // check for resources in the objectset but under a different version of the same group/kind + toDelete = o.filterCrossVersion(gvk, toDelete) + + if o.createPlan { + o.plan.Create[gvk] = toCreate + o.plan.Delete[gvk] = toDelete + + reconciler = nil + patcher = func(namespace, name string, pt types2.PatchType, data []byte) (runtime.Object, error) { + data, err := sanitizePatch(data, true) + if err != nil { + return nil, err + } + if string(data) != "{}" { + o.plan.Update.Add(gvk, namespace, name, string(data)) + } + return nil, nil + } + + toCreate = nil + toDelete = nil + } + + createF := func(k objectset.ObjectKey) { + obj := objs[k] + obj, err := prepareObjectForCreate(gvk, obj) + if err != nil { + o.err(errors.Wrapf(err, "failed to prepare create %s %s for %s", k, gvk, debugID)) + return + } + + _, err = o.create(nsed, k.Namespace, client, obj) + if errors2.IsAlreadyExists(err) { + // Taking over an object that wasn't previously managed by us + existingObj, err := o.get(nsed, k.Namespace, k.Name, client) + if err == nil { + toUpdate = append(toUpdate, k) + existing[k] = existingObj + return + } + } + if err != nil { + o.err(errors.Wrapf(err, "failed to create %s %s for %s", k, gvk, debugID)) + return + } + logrus.Debugf("DesiredSet - Created %s %s for %s", gvk, k, debugID) + } + + deleteF := func(k objectset.ObjectKey, force bool) { + if err := o.delete(nsed, k.Namespace, k.Name, client, force); err != nil { + o.err(errors.Wrapf(err, "failed to delete %s %s for %s", k, gvk, debugID)) + return + } + logrus.Debugf("DesiredSet - Delete %s %s for %s", gvk, k, debugID) + } + + updateF := func(k objectset.ObjectKey) { + err := o.compareObjects(gvk, reconciler, patcher, client, debugID, existing[k], objs[k], len(toCreate) > 0 || len(toDelete) > 0) + if err == ErrReplace { + deleteF(k, true) + o.err(fmt.Errorf("DesiredSet - Replace Wait %s %s for %s", gvk, k, debugID)) + } else if err != nil { + o.err(errors.Wrapf(err, "failed to update %s %s for %s", k, gvk, debugID)) + } + } + + for _, k := range toCreate { + createF(k) + } + + for _, k := range toUpdate { + updateF(k) + } + + for _, k := range toDelete { + deleteF(k, false) + } +} + +func (o *desiredSet) list(informer cache.SharedIndexInformer, client dynamic.NamespaceableResourceInterface, selector labels.Selector) (map[objectset.ObjectKey]runtime.Object, error) { + var ( + errs []error + objs = map[objectset.ObjectKey]runtime.Object{} + ) + + if informer == nil { + var c dynamic.ResourceInterface + if o.listerNamespace != "" { + c = client.Namespace(o.listerNamespace) + } else { + c = client + } + + list, err := c.List(o.ctx, v1.ListOptions{ + LabelSelector: selector.String(), + }) + if err != nil { + return nil, err + } + + for _, obj := range list.Items { + copy := obj + if err := addObjectToMap(objs, ©); err != nil { + errs = append(errs, err) + } + } + + return objs, merr.NewErrors(errs...) + } + + err := cache.ListAllByNamespace(informer.GetIndexer(), o.listerNamespace, selector, func(obj interface{}) { + if err := addObjectToMap(objs, obj); err != nil { + errs = append(errs, err) + } + }) + if err != nil { + errs = append(errs, err) + } + + return objs, merr.NewErrors(errs...) +} + +func compareSets(existingSet, newSet map[objectset.ObjectKey]runtime.Object) (toCreate, toDelete, toUpdate []objectset.ObjectKey) { + for k := range newSet { + if _, ok := existingSet[k]; ok { + toUpdate = append(toUpdate, k) + } else { + toCreate = append(toCreate, k) + } + } + + for k := range existingSet { + if _, ok := newSet[k]; !ok { + toDelete = append(toDelete, k) + } + } + + sortObjectKeys(toCreate) + sortObjectKeys(toDelete) + sortObjectKeys(toUpdate) + + return +} + +func sortObjectKeys(keys []objectset.ObjectKey) { + sort.Slice(keys, func(i, j int) bool { + return keys[i].String() < keys[j].String() + }) +} + +func addObjectToMap(objs map[objectset.ObjectKey]runtime.Object, obj interface{}) error { + metadata, err := meta.Accessor(obj) + if err != nil { + return err + } + + objs[objectset.ObjectKey{ + Namespace: metadata.GetNamespace(), + Name: metadata.GetName(), + }] = obj.(runtime.Object) + + return nil +} diff --git a/vendor/github.com/rancher/wrangler/pkg/apply/injectors/registry.go b/vendor/github.com/rancher/wrangler/pkg/apply/injectors/registry.go new file mode 100644 index 0000000000..d9fd143050 --- /dev/null +++ b/vendor/github.com/rancher/wrangler/pkg/apply/injectors/registry.go @@ -0,0 +1,21 @@ +package injectors + +import "k8s.io/apimachinery/pkg/runtime" + +var ( + injectors = map[string]ConfigInjector{} + order []string +) + +type ConfigInjector func(config []runtime.Object) ([]runtime.Object, error) + +func Register(name string, injector ConfigInjector) { + if _, ok := injectors[name]; !ok { + order = append(order, name) + } + injectors[name] = injector +} + +func Get(name string) ConfigInjector { + return injectors[name] +} diff --git a/vendor/github.com/rancher/wrangler/pkg/apply/reconcilers.go b/vendor/github.com/rancher/wrangler/pkg/apply/reconcilers.go new file mode 100644 index 0000000000..2fba99a75f --- /dev/null +++ b/vendor/github.com/rancher/wrangler/pkg/apply/reconcilers.go @@ -0,0 +1,154 @@ +package apply + +import ( + "encoding/json" + "fmt" + "reflect" + + appsv1 "k8s.io/api/apps/v1" + batchv1 "k8s.io/api/batch/v1" + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/equality" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +var ( + defaultReconcilers = map[schema.GroupVersionKind]Reconciler{ + v1.SchemeGroupVersion.WithKind("Secret"): reconcileSecret, + v1.SchemeGroupVersion.WithKind("Service"): reconcileService, + batchv1.SchemeGroupVersion.WithKind("Job"): reconcileJob, + appsv1.SchemeGroupVersion.WithKind("Deployment"): reconcileDeployment, + appsv1.SchemeGroupVersion.WithKind("DaemonSet"): reconcileDaemonSet, + } +) + +func reconcileDaemonSet(oldObj, newObj runtime.Object) (bool, error) { + oldSvc, ok := oldObj.(*appsv1.DaemonSet) + if !ok { + oldSvc = &appsv1.DaemonSet{} + if err := convertObj(oldObj, oldSvc); err != nil { + return false, err + } + } + newSvc, ok := newObj.(*appsv1.DaemonSet) + if !ok { + newSvc = &appsv1.DaemonSet{} + if err := convertObj(newObj, newSvc); err != nil { + return false, err + } + } + + if !equality.Semantic.DeepEqual(oldSvc.Spec.Selector, newSvc.Spec.Selector) { + return false, ErrReplace + } + + return false, nil +} + +func reconcileDeployment(oldObj, newObj runtime.Object) (bool, error) { + oldSvc, ok := oldObj.(*appsv1.Deployment) + if !ok { + oldSvc = &appsv1.Deployment{} + if err := convertObj(oldObj, oldSvc); err != nil { + return false, err + } + } + newSvc, ok := newObj.(*appsv1.Deployment) + if !ok { + newSvc = &appsv1.Deployment{} + if err := convertObj(newObj, newSvc); err != nil { + return false, err + } + } + + if !equality.Semantic.DeepEqual(oldSvc.Spec.Selector, newSvc.Spec.Selector) { + return false, ErrReplace + } + + return false, nil +} + +func reconcileSecret(oldObj, newObj runtime.Object) (bool, error) { + oldSvc, ok := oldObj.(*v1.Secret) + if !ok { + oldSvc = &v1.Secret{} + if err := convertObj(oldObj, oldSvc); err != nil { + return false, err + } + } + newSvc, ok := newObj.(*v1.Secret) + if !ok { + newSvc = &v1.Secret{} + if err := convertObj(newObj, newSvc); err != nil { + return false, err + } + } + + if newSvc.Type != "" && oldSvc.Type != newSvc.Type { + return false, ErrReplace + } + + return false, nil +} + +func reconcileService(oldObj, newObj runtime.Object) (bool, error) { + oldSvc, ok := oldObj.(*v1.Service) + if !ok { + oldSvc = &v1.Service{} + if err := convertObj(oldObj, oldSvc); err != nil { + return false, err + } + } + newSvc, ok := newObj.(*v1.Service) + if !ok { + newSvc = &v1.Service{} + if err := convertObj(newObj, newSvc); err != nil { + return false, err + } + } + + if newSvc.Spec.Type != "" && oldSvc.Spec.Type != newSvc.Spec.Type { + return false, ErrReplace + } + + return false, nil +} + +func reconcileJob(oldObj, newObj runtime.Object) (bool, error) { + oldSvc, ok := oldObj.(*batchv1.Job) + if !ok { + oldSvc = &batchv1.Job{} + if err := convertObj(oldObj, oldSvc); err != nil { + return false, err + } + } + + newSvc, ok := newObj.(*batchv1.Job) + if !ok { + newSvc = &batchv1.Job{} + if err := convertObj(newObj, newSvc); err != nil { + return false, err + } + } + + if !equality.Semantic.DeepEqual(oldSvc.Spec.Template, newSvc.Spec.Template) { + return false, ErrReplace + } + + return false, nil +} + +func convertObj(src interface{}, obj interface{}) error { + uObj, ok := src.(*unstructured.Unstructured) + if !ok { + return fmt.Errorf("expected unstructured but got %v", reflect.TypeOf(src)) + } + + bytes, err := uObj.MarshalJSON() + if err != nil { + return err + } + return json.Unmarshal(bytes, obj) +} diff --git a/vendor/github.com/rancher/wrangler/pkg/data/convert/convert.go b/vendor/github.com/rancher/wrangler/pkg/data/convert/convert.go index 47af9bc74a..44a97028cb 100644 --- a/vendor/github.com/rancher/wrangler/pkg/data/convert/convert.go +++ b/vendor/github.com/rancher/wrangler/pkg/data/convert/convert.go @@ -259,3 +259,44 @@ func ToYAMLKey(str string) string { return string(result) } + +func ToArgKey(str string) string { + var ( + result []rune + input = []rune(str) + ) + cap := false + + for i := 0; i < len(input); i++ { + r := input[i] + if i == 0 { + if unicode.IsUpper(r) { + cap = true + } + result = append(result, unicode.ToLower(r)) + continue + } + + if unicode.IsUpper(r) { + if cap { + result = append(result, unicode.ToLower(r)) + } else if len(input) > i+2 && + unicode.IsUpper(input[i]) && + unicode.IsUpper(input[i+1]) && + unicode.IsUpper(input[i+2]) { + result = append(result, '-', + unicode.ToLower(input[i]), + unicode.ToLower(input[i+1]), + unicode.ToLower(input[i+2])) + i += 2 + } else { + result = append(result, '-', unicode.ToLower(r)) + } + } else { + cap = false + result = append(result, r) + } + } + + return "--" + string(result) +} diff --git a/vendor/github.com/rancher/wrangler/pkg/data/merge.go b/vendor/github.com/rancher/wrangler/pkg/data/merge.go new file mode 100644 index 0000000000..76970aa0a6 --- /dev/null +++ b/vendor/github.com/rancher/wrangler/pkg/data/merge.go @@ -0,0 +1,24 @@ +package data + +func MergeMaps(base, overlay map[string]interface{}) map[string]interface{} { + result := map[string]interface{}{} + for k, v := range base { + result[k] = v + } + for k, v := range overlay { + if baseMap, overlayMap, bothMaps := bothMaps(result[k], v); bothMaps { + v = MergeMaps(baseMap, overlayMap) + } + result[k] = v + } + return result +} + +func bothMaps(left, right interface{}) (map[string]interface{}, map[string]interface{}, bool) { + leftMap, ok := left.(map[string]interface{}) + if !ok { + return nil, nil, false + } + rightMap, ok := right.(map[string]interface{}) + return leftMap, rightMap, ok +} diff --git a/vendor/github.com/rancher/wrangler/pkg/generic/controller.go b/vendor/github.com/rancher/wrangler/pkg/generic/controller.go index 2ca34c8094..d713e0f2be 100644 --- a/vendor/github.com/rancher/wrangler/pkg/generic/controller.go +++ b/vendor/github.com/rancher/wrangler/pkg/generic/controller.go @@ -1,54 +1,19 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - package generic import ( "context" - errors2 "errors" - "fmt" - "strings" - "time" - errors3 "github.com/pkg/errors" - "github.com/sirupsen/logrus" - "k8s.io/apimachinery/pkg/api/meta" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "github.com/rancher/lasso/pkg/controller" + "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" - utilruntime "k8s.io/apimachinery/pkg/util/runtime" - "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/tools/cache" - "k8s.io/client-go/util/workqueue" ) -var ErrSkip = errors2.New("skip processing") +var ErrSkip = controller.ErrIgnore type Handler func(key string, obj runtime.Object) (runtime.Object, error) -// Controller is the controller implementation for Foo resources -type Controller struct { - name string - workqueue workqueue.RateLimitingInterface - informer cache.SharedIndexInformer - handler Handler - gvk schema.GroupVersionKind - preStart []string -} - type ControllerMeta interface { Informer() cache.SharedIndexInformer GroupVersionKind() schema.GroupVersionKind @@ -57,182 +22,3 @@ type ControllerMeta interface { AddGenericRemoveHandler(ctx context.Context, name string, handler Handler) Updater() Updater } - -// NewController returns a new sample controller -func NewController(gvk schema.GroupVersionKind, informer cache.SharedIndexInformer, handler Handler) *Controller { - controller := &Controller{ - name: gvk.String(), - handler: handler, - informer: informer, - } - - informer.AddEventHandler(cache.ResourceEventHandlerFuncs{ - AddFunc: controller.handleObject, - UpdateFunc: func(old, new interface{}) { - newMeta, err := meta.Accessor(new) - utilruntime.Must(err) - oldMeta, err := meta.Accessor(old) - utilruntime.Must(err) - if newMeta.GetResourceVersion() == oldMeta.GetResourceVersion() { - return - } - controller.handleObject(new) - }, - DeleteFunc: controller.handleObject, - }) - - return controller -} - -func (c *Controller) Informer() cache.SharedIndexInformer { - return c.informer -} - -func (c *Controller) GroupVersionKind() schema.GroupVersionKind { - return c.gvk -} - -func (c *Controller) run(threadiness int, stopCh <-chan struct{}) { - defer utilruntime.HandleCrash() - - if c.workqueue == nil { - c.workqueue = workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), c.name) - for _, key := range c.preStart { - c.workqueue.Add(key) - } - c.preStart = nil - } - - defer c.workqueue.ShutDown() - - // Start the informer factories to begin populating the informer caches - logrus.Infof("Starting %s controller", c.name) - - // Launch two workers to process Foo resources - for i := 0; i < threadiness; i++ { - go wait.Until(c.runWorker, time.Second, stopCh) - } - - <-stopCh - logrus.Infof("Shutting down %s workers", c.name) -} - -func (c *Controller) Run(threadiness int, stopCh <-chan struct{}) error { - if ok := cache.WaitForCacheSync(stopCh, c.informer.HasSynced); !ok { - if c.workqueue != nil { - c.workqueue.ShutDown() - } - return fmt.Errorf("failed to wait for caches to sync") - } - - go c.run(threadiness, stopCh) - return nil -} - -func (c *Controller) runWorker() { - for c.processNextWorkItem() { - } -} - -func (c *Controller) processNextWorkItem() bool { - obj, shutdown := c.workqueue.Get() - - if shutdown { - return false - } - - if err := c.processSingleItem(obj); err != nil { - if !strings.Contains(err.Error(), "please apply your changes to the latest version and try again") { - utilruntime.HandleError(err) - } - return true - } - - return true -} - -func (c *Controller) processSingleItem(obj interface{}) error { - var ( - key string - ok bool - ) - - defer c.workqueue.Done(obj) - - if key, ok = obj.(string); !ok { - c.workqueue.Forget(obj) - utilruntime.HandleError(fmt.Errorf("expected string in workqueue but got %#v", obj)) - return nil - } - if err := c.syncHandler(key); err != nil && errors3.Cause(err) != ErrSkip { - c.workqueue.AddRateLimited(key) - return fmt.Errorf("error syncing '%s': %s, requeuing", key, err.Error()) - } - - c.workqueue.Forget(obj) - return nil -} - -func (c *Controller) syncHandler(key string) error { - obj, exists, err := c.informer.GetStore().GetByKey(key) - if err != nil { - return err - } - if !exists { - _, err := c.handler(key, nil) - return err - } - - _, err = c.handler(key, obj.(runtime.Object)) - return err -} - -func (c *Controller) Enqueue(namespace, name string) { - key := name - if namespace != "" { - key = namespace + "/" + name - } - if c.workqueue == nil { - c.preStart = append(c.preStart, key) - } else { - c.workqueue.AddRateLimited(key) - } -} - -func (c *Controller) EnqueueAfter(namespace, name string, duration time.Duration) { - key := name - if namespace != "" { - key = namespace + "/" + name - } - if c.workqueue == nil { - c.preStart = append(c.preStart, key) - } else { - c.workqueue.AddAfter(key, duration) - } -} - -func (c *Controller) enqueue(obj interface{}) { - var key string - var err error - if key, err = cache.MetaNamespaceKeyFunc(obj); err != nil { - utilruntime.HandleError(err) - return - } - c.Enqueue("", key) -} - -func (c *Controller) handleObject(obj interface{}) { - if _, ok := obj.(metav1.Object); !ok { - tombstone, ok := obj.(cache.DeletedFinalStateUnknown) - if !ok { - utilruntime.HandleError(fmt.Errorf("error decoding object, invalid type")) - return - } - _, ok = tombstone.Obj.(metav1.Object) - if !ok { - utilruntime.HandleError(fmt.Errorf("error decoding object tombstone, invalid type")) - return - } - } - c.enqueue(obj) -} diff --git a/vendor/github.com/rancher/wrangler/pkg/generic/controllerfactory.go b/vendor/github.com/rancher/wrangler/pkg/generic/controllerfactory.go deleted file mode 100644 index f796b8785d..0000000000 --- a/vendor/github.com/rancher/wrangler/pkg/generic/controllerfactory.go +++ /dev/null @@ -1,193 +0,0 @@ -package generic - -import ( - "context" - "strings" - "sync" - "time" - - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/client-go/tools/cache" -) - -type ControllerManager struct { - lock sync.Mutex - generation int - started map[schema.GroupVersionKind]bool - controllers map[schema.GroupVersionKind]*Controller - handlers map[schema.GroupVersionKind]*Handlers -} - -func (g *ControllerManager) Controllers() map[schema.GroupVersionKind]*Controller { - return g.controllers -} - -func (g *ControllerManager) EnsureStart(ctx context.Context, gvk schema.GroupVersionKind, threadiness int) error { - g.lock.Lock() - defer g.lock.Unlock() - - return g.startController(ctx, gvk, threadiness) -} - -func (g *ControllerManager) startController(ctx context.Context, gvk schema.GroupVersionKind, threadiness int) error { - if g.started[gvk] { - return nil - } - - controller, ok := g.controllers[gvk] - if !ok { - return nil - } - - if err := controller.Run(threadiness, ctx.Done()); err != nil { - return err - } - - if g.started == nil { - g.started = map[schema.GroupVersionKind]bool{} - } - g.started[gvk] = true - - go func() { - <-ctx.Done() - g.lock.Lock() - defer g.lock.Unlock() - - delete(g.started, gvk) - delete(g.controllers, gvk) - }() - - return nil -} - -func (g *ControllerManager) Start(ctx context.Context, defaultThreadiness int, threadiness map[schema.GroupVersionKind]int) error { - g.lock.Lock() - defer g.lock.Unlock() - - for gvk := range g.controllers { - threadiness, ok := threadiness[gvk] - if !ok { - threadiness = defaultThreadiness - } - if err := g.startController(ctx, gvk, threadiness); err != nil { - return err - } - } - - return nil -} - -func (g *ControllerManager) Enqueue(gvk schema.GroupVersionKind, informer cache.SharedIndexInformer, namespace, name string) { - _, controller, _ := g.getController(gvk, informer, true) - - if namespace == "*" || name == "*" { - for _, key := range informer.GetStore().ListKeys() { - if namespace != "" && namespace != "*" && !strings.HasPrefix(key, namespace+"/") { - continue - } - if name != "*" && !nameMatches(key, name) { - continue - } - controller.Enqueue("", key) - } - } else { - controller.Enqueue(namespace, name) - } -} - -func nameMatches(key, name string) bool { - return key == name || strings.HasSuffix(key, "/"+name) -} - -func (g *ControllerManager) EnqueueAfter(gvk schema.GroupVersionKind, informer cache.SharedIndexInformer, namespace, name string, duration time.Duration) { - _, controller, _ := g.getController(gvk, informer, true) - controller.EnqueueAfter(namespace, name, duration) -} - -func (g *ControllerManager) removeHandler(gvk schema.GroupVersionKind, generation int) { - g.lock.Lock() - defer g.lock.Unlock() - - handlers, ok := g.handlers[gvk] - if !ok { - return - } - - var newHandlers []handlerEntry - for _, h := range handlers.handlers { - if h.generation == generation { - continue - } - newHandlers = append(newHandlers, h) - } - - handlers.handlers = newHandlers -} - -func (g *ControllerManager) getController(gvk schema.GroupVersionKind, informer cache.SharedIndexInformer, lock bool) (*Handlers, *Controller, bool) { - if lock { - g.lock.Lock() - defer g.lock.Unlock() - } - - if controller, ok := g.controllers[gvk]; ok { - return g.handlers[gvk], controller, true - } - - handlers := &Handlers{} - - controller := NewController(gvk, informer, handlers.Handle) - - if g.handlers == nil { - g.handlers = map[schema.GroupVersionKind]*Handlers{} - } - - if g.controllers == nil { - g.controllers = map[schema.GroupVersionKind]*Controller{} - } - - g.handlers[gvk] = handlers - g.controllers[gvk] = controller - - return handlers, controller, false -} - -func (g *ControllerManager) AddHandler(ctx context.Context, gvk schema.GroupVersionKind, informer cache.SharedIndexInformer, name string, handler Handler) { - t := getHandlerTransaction(ctx) - if t == nil { - g.addHandler(ctx, gvk, informer, name, handler) - return - } - - go func() { - if t.shouldContinue() { - g.addHandler(ctx, gvk, informer, name, handler) - } - }() -} - -func (g *ControllerManager) addHandler(ctx context.Context, gvk schema.GroupVersionKind, informer cache.SharedIndexInformer, name string, handler Handler) { - g.lock.Lock() - defer g.lock.Unlock() - - g.generation++ - entry := handlerEntry{ - generation: g.generation, - name: name, - handler: handler, - } - - go func() { - <-ctx.Done() - g.removeHandler(gvk, entry.generation) - }() - - handlers, controller, ok := g.getController(gvk, informer, false) - handlers.handlers = append(handlers.handlers, entry) - - if ok { - for _, key := range controller.informer.GetStore().ListKeys() { - controller.Enqueue("", key) - } - } -} diff --git a/vendor/github.com/rancher/wrangler/pkg/generic/factory.go b/vendor/github.com/rancher/wrangler/pkg/generic/factory.go new file mode 100644 index 0000000000..24256d0c33 --- /dev/null +++ b/vendor/github.com/rancher/wrangler/pkg/generic/factory.go @@ -0,0 +1,120 @@ +package generic + +import ( + "context" + "sync" + "time" + + "github.com/rancher/lasso/pkg/log" + "github.com/sirupsen/logrus" + + "github.com/rancher/lasso/pkg/cache" + "github.com/rancher/lasso/pkg/client" + "github.com/rancher/lasso/pkg/controller" + "github.com/rancher/wrangler/pkg/schemes" + "k8s.io/apimachinery/pkg/runtime/schema" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/client-go/rest" +) + +func init() { + log.Infof = logrus.Infof + log.Errorf = logrus.Errorf +} + +type Factory struct { + lock sync.Mutex + cacheFactory cache.SharedCacheFactory + controllerFactory controller.SharedControllerFactory + threadiness map[schema.GroupVersionKind]int + config *rest.Config + opts FactoryOptions +} + +type FactoryOptions struct { + Namespace string + Resync time.Duration + SharedCacheFactory cache.SharedCacheFactory + SharedControllerFactory controller.SharedControllerFactory +} + +func NewFactoryFromConfigWithOptions(config *rest.Config, opts *FactoryOptions) (*Factory, error) { + if opts == nil { + opts = &FactoryOptions{} + } + + f := &Factory{ + config: config, + threadiness: map[schema.GroupVersionKind]int{}, + cacheFactory: opts.SharedCacheFactory, + controllerFactory: opts.SharedControllerFactory, + opts: *opts, + } + + if f.cacheFactory == nil && f.controllerFactory != nil { + f.cacheFactory = f.controllerFactory.SharedCacheFactory() + } + + return f, nil +} + +func (c *Factory) SetThreadiness(gvk schema.GroupVersionKind, threadiness int) { + c.threadiness[gvk] = threadiness +} + +func (c *Factory) ControllerFactory() controller.SharedControllerFactory { + err := c.setControllerFactoryWithLock() + utilruntime.Must(err) + return c.controllerFactory +} + +func (c *Factory) setControllerFactoryWithLock() error { + c.lock.Lock() + defer c.lock.Unlock() + + if c.controllerFactory != nil { + return nil + } + + cacheFactory := c.cacheFactory + if cacheFactory == nil { + client, err := client.NewSharedClientFactory(c.config, &client.SharedClientFactoryOptions{ + Scheme: schemes.All, + }) + if err != nil { + return err + } + + cacheFactory = cache.NewSharedCachedFactory(client, &cache.SharedCacheFactoryOptions{ + DefaultNamespace: c.opts.Namespace, + DefaultResync: c.opts.Resync, + }) + } + + c.cacheFactory = cacheFactory + c.controllerFactory = controller.NewSharedControllerFactory(cacheFactory, &controller.SharedControllerFactoryOptions{ + KindWorkers: c.threadiness, + }) + + return nil +} + +func (c *Factory) Sync(ctx context.Context) error { + if c.cacheFactory != nil { + c.cacheFactory.Start(ctx) + c.cacheFactory.WaitForCacheSync(ctx) + } + return nil +} + +func (c *Factory) Start(ctx context.Context, defaultThreadiness int) error { + if err := c.Sync(ctx); err != nil { + return err + } + + if c.controllerFactory != nil { + return c.controllerFactory.Start(ctx, defaultThreadiness) + } + + return nil +} diff --git a/vendor/github.com/rancher/wrangler/pkg/generic/generating.go b/vendor/github.com/rancher/wrangler/pkg/generic/generating.go index 4ae27bebb3..9ac9fae863 100644 --- a/vendor/github.com/rancher/wrangler/pkg/generic/generating.go +++ b/vendor/github.com/rancher/wrangler/pkg/generic/generating.go @@ -1,7 +1,39 @@ package generic +import ( + "github.com/rancher/wrangler/pkg/apply" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + type GeneratingHandlerOptions struct { AllowCrossNamespace bool AllowClusterScoped bool + NoOwnerReference bool DynamicLookup bool } + +func ConfigureApplyForObject(apply apply.Apply, obj metav1.Object, opts *GeneratingHandlerOptions) apply.Apply { + if opts == nil { + opts = &GeneratingHandlerOptions{} + } + + if opts.DynamicLookup { + apply = apply.WithDynamicLookup() + } + + if opts.NoOwnerReference { + apply = apply.WithSetOwnerReference(true, false) + } + + if opts.AllowCrossNamespace && !opts.AllowClusterScoped { + apply = apply. + WithDefaultNamespace(obj.GetNamespace()). + WithListerNamespace(obj.GetNamespace()) + } + + if !opts.AllowClusterScoped { + apply = apply.WithRestrictClusterScoped() + } + + return apply +} diff --git a/vendor/github.com/rancher/wrangler/pkg/generic/handlers.go b/vendor/github.com/rancher/wrangler/pkg/generic/handlers.go deleted file mode 100644 index dad472fc34..0000000000 --- a/vendor/github.com/rancher/wrangler/pkg/generic/handlers.go +++ /dev/null @@ -1,95 +0,0 @@ -package generic - -import ( - "fmt" - "reflect" - "strings" - - "k8s.io/apimachinery/pkg/runtime" -) - -type handlerEntry struct { - generation int - name string - handler Handler -} - -type Handlers struct { - handlers []handlerEntry -} - -func (h *Handlers) Handle(key string, obj runtime.Object) (runtime.Object, error) { - var ( - errs errors - ) - - for _, handler := range h.handlers { - newObj, err := handler.handler(key, obj) - if err != nil { - errs = append(errs, &handlerError{ - HandlerName: handler.name, - Err: err, - }) - } - if newObj != nil { - obj = newObj - } - } - - return obj, errs.ToErr() -} - -type errors []error - -func (e errors) Error() string { - buf := strings.Builder{} - for _, err := range e { - if buf.Len() > 0 { - buf.WriteString(", ") - } - buf.WriteString(err.Error()) - } - return buf.String() -} - -func (e errors) ToErr() error { - switch len(e) { - case 0: - return nil - case 1: - return e[0] - default: - return e - } -} - -func (e errors) Cause() error { - if len(e) > 0 { - return e[0] - } - return nil -} - -type handlerError struct { - HandlerName string - Err error -} - -func (h handlerError) Error() string { - return fmt.Sprintf("handler %s: %v", h.HandlerName, h.Err) -} - -func (h handlerError) Cause() error { - return h.Err -} - -func ToName(h interface{}) string { - if str, ok := h.(fmt.Stringer); ok { - return str.String() - } - s := reflect.ValueOf(h).Type().String() - if len(s) > 1 && s[0] == '*' { - return s[1:] - } - return s -} diff --git a/vendor/github.com/rancher/wrangler/pkg/gvk/detect.go b/vendor/github.com/rancher/wrangler/pkg/gvk/detect.go new file mode 100644 index 0000000000..a8c16b86fd --- /dev/null +++ b/vendor/github.com/rancher/wrangler/pkg/gvk/detect.go @@ -0,0 +1,19 @@ +package gvk + +import ( + "encoding/json" + + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +func Detect(obj []byte) (schema.GroupVersionKind, bool, error) { + partial := v1.PartialObjectMetadata{} + if err := json.Unmarshal(obj, &partial); err != nil { + return schema.GroupVersionKind{}, false, err + } + + result := partial.GetObjectKind().GroupVersionKind() + ok := result.Kind != "" && result.Version != "" + return result, ok, nil +} diff --git a/vendor/github.com/rancher/wrangler/pkg/gvk/get.go b/vendor/github.com/rancher/wrangler/pkg/gvk/get.go new file mode 100644 index 0000000000..743dc00b1c --- /dev/null +++ b/vendor/github.com/rancher/wrangler/pkg/gvk/get.go @@ -0,0 +1,57 @@ +package gvk + +import ( + "fmt" + + "github.com/pkg/errors" + "github.com/rancher/wrangler/pkg/schemes" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +func Get(obj runtime.Object) (schema.GroupVersionKind, error) { + gvk := obj.GetObjectKind().GroupVersionKind() + if gvk.Kind != "" { + return gvk, nil + } + + gvks, _, err := schemes.All.ObjectKinds(obj) + if err != nil { + return schema.GroupVersionKind{}, errors.Wrapf(err, "failed to find gvk for %T, you may need to import the wrangler generated controller package", obj) + } + + if len(gvks) == 0 { + return schema.GroupVersionKind{}, fmt.Errorf("failed to find gvk for %T", obj) + } + + return gvks[0], nil +} + +func Set(objs ...runtime.Object) error { + for _, obj := range objs { + if err := setObject(obj); err != nil { + return err + } + } + return nil +} + +func setObject(obj runtime.Object) error { + gvk := obj.GetObjectKind().GroupVersionKind() + if gvk.Kind != "" { + return nil + } + + gvks, _, err := schemes.All.ObjectKinds(obj) + if err != nil { + return err + } + + if len(gvks) == 0 { + return nil + } + + kind := obj.GetObjectKind() + kind.SetGroupVersionKind(gvks[0]) + return nil +} diff --git a/vendor/github.com/rancher/wrangler/pkg/merr/error.go b/vendor/github.com/rancher/wrangler/pkg/merr/error.go new file mode 100644 index 0000000000..23b3c2e025 --- /dev/null +++ b/vendor/github.com/rancher/wrangler/pkg/merr/error.go @@ -0,0 +1,37 @@ +package merr + +import "bytes" + +type Errors []error + +func (e Errors) Err() error { + return NewErrors(e...) +} + +func (e Errors) Error() string { + buf := bytes.NewBuffer(nil) + for _, err := range e { + if buf.Len() > 0 { + buf.WriteString(", ") + } + buf.WriteString(err.Error()) + } + + return buf.String() +} + +func NewErrors(inErrors ...error) error { + var errors []error + for _, err := range inErrors { + if err != nil { + errors = append(errors, err) + } + } + + if len(errors) == 0 { + return nil + } else if len(errors) == 1 { + return errors[0] + } + return Errors(errors) +} diff --git a/vendor/github.com/rancher/wrangler/pkg/objectset/objectset.go b/vendor/github.com/rancher/wrangler/pkg/objectset/objectset.go new file mode 100644 index 0000000000..5c041dadf3 --- /dev/null +++ b/vendor/github.com/rancher/wrangler/pkg/objectset/objectset.go @@ -0,0 +1,197 @@ +package objectset + +import ( + "fmt" + "reflect" + "sort" + + "github.com/pkg/errors" + "github.com/rancher/wrangler/pkg/gvk" + + "github.com/rancher/wrangler/pkg/merr" + "k8s.io/apimachinery/pkg/api/meta" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +type ObjectKey struct { + Name string + Namespace string +} + +func NewObjectKey(obj v1.Object) ObjectKey { + return ObjectKey{ + Namespace: obj.GetNamespace(), + Name: obj.GetName(), + } +} + +func (o ObjectKey) String() string { + if o.Namespace == "" { + return o.Name + } + return fmt.Sprintf("%s/%s", o.Namespace, o.Name) +} + +type ObjectKeyByGVK map[schema.GroupVersionKind][]ObjectKey + +type ObjectByGVK map[schema.GroupVersionKind]map[ObjectKey]runtime.Object + +func (o ObjectByGVK) Add(obj runtime.Object) (schema.GroupVersionKind, error) { + metadata, err := meta.Accessor(obj) + if err != nil { + return schema.GroupVersionKind{}, err + } + + gvk, err := gvk.Get(obj) + if err != nil { + return schema.GroupVersionKind{}, err + } + + objs := o[gvk] + if objs == nil { + objs = map[ObjectKey]runtime.Object{} + o[gvk] = objs + } + + objs[ObjectKey{ + Namespace: metadata.GetNamespace(), + Name: metadata.GetName(), + }] = obj + + return gvk, nil +} + +type ObjectSet struct { + errs []error + objects ObjectByGVK + objectsByGK ObjectByGK + order []runtime.Object + gvkOrder []schema.GroupVersionKind + gvkSeen map[schema.GroupVersionKind]bool +} + +func NewObjectSet(objs ...runtime.Object) *ObjectSet { + os := &ObjectSet{ + objects: ObjectByGVK{}, + objectsByGK: ObjectByGK{}, + gvkSeen: map[schema.GroupVersionKind]bool{}, + } + os.Add(objs...) + return os +} + +func (o *ObjectSet) ObjectsByGVK() ObjectByGVK { + if o == nil { + return nil + } + return o.objects +} + +func (o *ObjectSet) Contains(gk schema.GroupKind, key ObjectKey) bool { + _, ok := o.objectsByGK[gk][key] + return ok +} + +func (o *ObjectSet) All() []runtime.Object { + return o.order +} + +func (o *ObjectSet) Add(objs ...runtime.Object) *ObjectSet { + for _, obj := range objs { + o.add(obj) + } + return o +} + +func (o *ObjectSet) add(obj runtime.Object) { + if obj == nil || reflect.ValueOf(obj).IsNil() { + return + } + + gvk, err := o.objects.Add(obj) + if err != nil { + o.err(errors.Wrapf(err, "failed to add %T", obj)) + return + } + + _, err = o.objectsByGK.Add(obj) + if err != nil { + o.err(errors.Wrapf(err, "failed to add %T", obj)) + return + } + + o.order = append(o.order, obj) + if !o.gvkSeen[gvk] { + o.gvkSeen[gvk] = true + o.gvkOrder = append(o.gvkOrder, gvk) + } +} + +func (o *ObjectSet) err(err error) error { + o.errs = append(o.errs, err) + return o.Err() +} + +func (o *ObjectSet) AddErr(err error) { + o.errs = append(o.errs, err) +} + +func (o *ObjectSet) Err() error { + return merr.NewErrors(o.errs...) +} + +func (o *ObjectSet) Len() int { + return len(o.objects) +} + +func (o *ObjectSet) GVKs() []schema.GroupVersionKind { + return o.GVKOrder() +} + +func (o *ObjectSet) GVKOrder(known ...schema.GroupVersionKind) []schema.GroupVersionKind { + var rest []schema.GroupVersionKind + + for _, gvk := range known { + if o.gvkSeen[gvk] { + continue + } + rest = append(rest, gvk) + } + + sort.Slice(rest, func(i, j int) bool { + return rest[i].String() < rest[j].String() + }) + + return append(o.gvkOrder, rest...) +} + +type ObjectByGK map[schema.GroupKind]map[ObjectKey]runtime.Object + +func (o ObjectByGK) Add(obj runtime.Object) (schema.GroupKind, error) { + metadata, err := meta.Accessor(obj) + if err != nil { + return schema.GroupKind{}, err + } + + gvk, err := gvk.Get(obj) + if err != nil { + return schema.GroupKind{}, err + } + + gk := gvk.GroupKind() + + objs := o[gk] + if objs == nil { + objs = map[ObjectKey]runtime.Object{} + o[gk] = objs + } + + objs[ObjectKey{ + Namespace: metadata.GetNamespace(), + Name: metadata.GetName(), + }] = obj + + return gk, nil +} diff --git a/vendor/github.com/rancher/wrangler/pkg/patch/apply.go b/vendor/github.com/rancher/wrangler/pkg/patch/apply.go new file mode 100644 index 0000000000..da9a25d33a --- /dev/null +++ b/vendor/github.com/rancher/wrangler/pkg/patch/apply.go @@ -0,0 +1,57 @@ +package patch + +import ( + "encoding/json" + "fmt" + + jsonpatch "github.com/evanphx/json-patch" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/strategicpatch" +) + +func Apply(original, patch []byte) ([]byte, error) { + style, metadata, err := GetPatchStyle(original, patch) + if err != nil { + return nil, err + } + + switch style { + case types.JSONPatchType: + return applyJSONPatch(original, patch) + case types.MergePatchType: + return applyMergePatch(original, patch) + case types.StrategicMergePatchType: + return applyStrategicMergePatch(original, patch, metadata) + default: + return nil, fmt.Errorf("invalid patch") + } +} + +func applyStrategicMergePatch(original, patch []byte, lookup strategicpatch.LookupPatchMeta) ([]byte, error) { + originalMap := map[string]interface{}{} + patchMap := map[string]interface{}{} + if err := json.Unmarshal(original, &originalMap); err != nil { + return nil, err + } + if err := json.Unmarshal(patch, &patchMap); err != nil { + return nil, err + } + patchedMap, err := strategicpatch.StrategicMergeMapPatchUsingLookupPatchMeta(originalMap, patchMap, lookup) + if err != nil { + return nil, err + } + return json.Marshal(patchedMap) +} + +func applyMergePatch(original, patch []byte) ([]byte, error) { + return jsonpatch.MergePatch(original, patch) +} + +func applyJSONPatch(original, patch []byte) ([]byte, error) { + jsonPatch, err := jsonpatch.DecodePatch(patch) + if err != nil { + return nil, err + } + + return jsonPatch.Apply(original) +} diff --git a/vendor/github.com/rancher/wrangler/pkg/patch/style.go b/vendor/github.com/rancher/wrangler/pkg/patch/style.go new file mode 100644 index 0000000000..eaa8d2120a --- /dev/null +++ b/vendor/github.com/rancher/wrangler/pkg/patch/style.go @@ -0,0 +1,79 @@ +package patch + +import ( + "sync" + + "github.com/rancher/wrangler/pkg/gvk" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/strategicpatch" + "k8s.io/client-go/kubernetes/scheme" +) + +var ( + patchCache = map[schema.GroupVersionKind]patchCacheEntry{} + patchCacheLock = sync.Mutex{} +) + +type patchCacheEntry struct { + patchType types.PatchType + lookup strategicpatch.LookupPatchMeta +} + +func isJSONPatch(patch []byte) bool { + // a JSON patch is a list + return len(patch) > 0 && patch[0] == '[' +} + +func GetPatchStyle(original, patch []byte) (types.PatchType, strategicpatch.LookupPatchMeta, error) { + if isJSONPatch(patch) { + return types.JSONPatchType, nil, nil + } + gvk, ok, err := gvk.Detect(original) + if err != nil { + return "", nil, err + } + if !ok { + return types.MergePatchType, nil, nil + } + return GetMergeStyle(gvk) +} + +func GetMergeStyle(gvk schema.GroupVersionKind) (types.PatchType, strategicpatch.LookupPatchMeta, error) { + var ( + patchType types.PatchType + lookupPatchMeta strategicpatch.LookupPatchMeta + ) + + patchCacheLock.Lock() + entry, ok := patchCache[gvk] + patchCacheLock.Unlock() + + if ok { + return entry.patchType, entry.lookup, nil + } + + versionedObject, err := scheme.Scheme.New(gvk) + + if runtime.IsNotRegisteredError(err) { + patchType = types.MergePatchType + } else if err != nil { + return patchType, nil, err + } else { + patchType = types.StrategicMergePatchType + lookupPatchMeta, err = strategicpatch.NewPatchMetaFromStruct(versionedObject) + if err != nil { + return patchType, nil, err + } + } + + patchCacheLock.Lock() + patchCache[gvk] = patchCacheEntry{ + patchType: patchType, + lookup: lookupPatchMeta, + } + patchCacheLock.Unlock() + + return patchType, lookupPatchMeta, nil +} diff --git a/vendor/github.com/rancher/wrangler/pkg/schemes/all.go b/vendor/github.com/rancher/wrangler/pkg/schemes/all.go index 8e64b5ca9f..0b87075c3f 100644 --- a/vendor/github.com/rancher/wrangler/pkg/schemes/all.go +++ b/vendor/github.com/rancher/wrangler/pkg/schemes/all.go @@ -1,7 +1,20 @@ package schemes import ( + "github.com/rancher/lasso/pkg/scheme" "k8s.io/apimachinery/pkg/runtime" ) -var All = runtime.NewScheme() +var ( + All = scheme.All + localSchemeBuilder = runtime.NewSchemeBuilder() +) + +func Register(addToScheme func(*runtime.Scheme) error) error { + localSchemeBuilder = append(localSchemeBuilder, addToScheme) + return addToScheme(All) +} + +func AddToScheme(scheme *runtime.Scheme) error { + return localSchemeBuilder.AddToScheme(scheme) +} diff --git a/vendor/github.com/rancher/wrangler/pkg/summary/cattletypes.go b/vendor/github.com/rancher/wrangler/pkg/summary/cattletypes.go new file mode 100644 index 0000000000..8dbc70fef9 --- /dev/null +++ b/vendor/github.com/rancher/wrangler/pkg/summary/cattletypes.go @@ -0,0 +1,43 @@ +package summary + +import ( + "strings" + + "github.com/rancher/wrangler/pkg/data" +) + +func checkCattleReady(obj data.Object, condition []Condition, summary Summary) Summary { + if strings.Contains(obj.String("apiVersion"), "cattle.io/") { + for _, condition := range condition { + if condition.Type() == "Ready" && condition.Status() == "False" && condition.Message() != "" { + summary.Message = append(summary.Message, condition.Message()) + summary.Error = true + return summary + } + } + } + + return summary +} + +func checkCattleTypes(obj data.Object, condition []Condition, summary Summary) Summary { + return checkRelease(obj, condition, summary) +} + +func checkRelease(obj data.Object, _ []Condition, summary Summary) Summary { + if !isKind(obj, "App", "catalog.cattle.io") { + return summary + } + if obj.String("status", "summary", "state") != "deployed" { + return summary + } + for _, resources := range obj.Slice("spec", "resources") { + summary.Relationships = append(summary.Relationships, Relationship{ + Name: resources.String("name"), + Kind: resources.String("kind"), + APIVersion: resources.String("apiVersion"), + Type: "helmresource", + }) + } + return summary +} diff --git a/vendor/github.com/rancher/wrangler/pkg/summary/condition.go b/vendor/github.com/rancher/wrangler/pkg/summary/condition.go index bc1be5f3c8..c356cb5e2f 100644 --- a/vendor/github.com/rancher/wrangler/pkg/summary/condition.go +++ b/vendor/github.com/rancher/wrangler/pkg/summary/condition.go @@ -3,6 +3,9 @@ package summary import ( "encoding/json" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "github.com/rancher/wrangler/pkg/data" ) @@ -43,3 +46,35 @@ func (c Condition) Reason() string { func (c Condition) Message() string { return c.d.String("message") } + +func NormalizeConditions(runtimeObj runtime.Object) { + var ( + obj data.Object + newConditions []map[string]interface{} + ) + + unstr, ok := runtimeObj.(*unstructured.Unstructured) + if !ok { + return + } + + obj = unstr.Object + for _, condition := range obj.Slice("status", "conditions") { + var summary Summary + for _, summarizer := range ConditionSummarizers { + summary = summarizer(obj, []Condition{{d: condition}}, summary) + } + condition.Set("error", summary.Error) + condition.Set("transitioning", summary.Transitioning) + + if condition.String("lastUpdateTime") == "" { + condition.Set("lastUpdateTime", condition.String("lastTransitionTime")) + } + newConditions = append(newConditions, condition) + } + + if len(newConditions) > 0 { + obj.SetNested(newConditions, "status", "conditions") + } + +} diff --git a/vendor/github.com/rancher/wrangler/pkg/summary/coretypes.go b/vendor/github.com/rancher/wrangler/pkg/summary/coretypes.go new file mode 100644 index 0000000000..da5ec29c78 --- /dev/null +++ b/vendor/github.com/rancher/wrangler/pkg/summary/coretypes.go @@ -0,0 +1,210 @@ +package summary + +import ( + "github.com/rancher/wrangler/pkg/data" + "github.com/rancher/wrangler/pkg/data/convert" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func checkHasPodTemplate(obj data.Object, condition []Condition, summary Summary) Summary { + template := obj.Map("spec", "template") + if template == nil { + return summary + } + + if !isKind(obj, "ReplicaSet", "apps/", "extension/") && + !isKind(obj, "DaemonSet", "apps/", "extension/") && + !isKind(obj, "StatefulSet", "apps/", "extension/") && + !isKind(obj, "Deployment", "apps/", "extension/") && + !isKind(obj, "Job", "batch/") && + !isKind(obj, "Service") { + return summary + } + + return checkPodTemplate(template, condition, summary) +} + +func checkHasPodSelector(obj data.Object, condition []Condition, summary Summary) Summary { + selector := obj.Map("spec", "selector") + if selector == nil { + return summary + } + + if !isKind(obj, "ReplicaSet", "apps/", "extension/") && + !isKind(obj, "DaemonSet", "apps/", "extension/") && + !isKind(obj, "StatefulSet", "apps/", "extension/") && + !isKind(obj, "Deployment", "apps/", "extension/") && + !isKind(obj, "Job", "batch/") && + !isKind(obj, "Service") { + return summary + } + + _, hasMatch := selector["matchLabels"] + if !hasMatch { + _, hasMatch = selector["matchExpressions"] + } + sel := metav1.LabelSelector{} + if hasMatch { + if err := convert.ToObj(selector, &sel); err != nil { + return summary + } + } else { + sel.MatchLabels = map[string]string{} + for k, v := range selector { + sel.MatchLabels[k] = convert.ToString(v) + } + } + + t := "creates" + if obj["kind"] == "Service" { + t = "selects" + } + + summary.Relationships = append(summary.Relationships, Relationship{ + Kind: "Pod", + APIVersion: "v1", + Type: t, + Selector: &sel, + }) + return summary +} + +func checkPod(obj data.Object, condition []Condition, summary Summary) Summary { + if !isKind(obj, "Pod") { + return summary + } + if obj.String("kind") != "Pod" || obj.String("apiVersion") != "v1" { + return summary + } + return checkPodTemplate(obj, condition, summary) +} + +func checkPodTemplate(obj data.Object, condition []Condition, summary Summary) Summary { + summary = checkPodConfigMaps(obj, condition, summary) + summary = checkPodSecrets(obj, condition, summary) + summary = checkPodServiceAccount(obj, condition, summary) + summary = checkPodProjectedVolume(obj, condition, summary) + summary = checkPodPullSecret(obj, condition, summary) + return summary +} + +func checkPodPullSecret(obj data.Object, _ []Condition, summary Summary) Summary { + for _, pullSecret := range obj.Slice("imagePullSecrets") { + if name := pullSecret.String("name"); name != "" { + summary.Relationships = append(summary.Relationships, Relationship{ + Name: name, + Kind: "Secret", + APIVersion: "v1", + Type: "uses", + }) + } + } + return summary +} + +func checkPodProjectedVolume(obj data.Object, _ []Condition, summary Summary) Summary { + for _, vol := range obj.Slice("spec", "volumes") { + for _, source := range vol.Slice("projected", "sources") { + if secretName := source.String("secret", "name"); secretName != "" { + summary.Relationships = append(summary.Relationships, Relationship{ + Name: secretName, + Kind: "Secret", + APIVersion: "v1", + Type: "uses", + }) + } + if configMap := source.String("configMap", "name"); configMap != "" { + summary.Relationships = append(summary.Relationships, Relationship{ + Name: configMap, + Kind: "Secret", + APIVersion: "v1", + Type: "uses", + }) + } + } + } + return summary +} + +func addEnvRef(summary Summary, names map[string]bool, obj data.Object, fieldPrefix, kind string) Summary { + for _, container := range obj.Slice("spec", "containers") { + for _, env := range container.Slice("envFrom") { + name := env.String(fieldPrefix+"Ref", "name") + if name == "" || names[name] { + continue + } + names[name] = true + summary.Relationships = append(summary.Relationships, Relationship{ + Name: name, + Kind: kind, + APIVersion: "v1", + Type: "uses", + }) + } + for _, env := range container.Slice("env") { + name := env.String("valueFrom", fieldPrefix+"KeyRef", "name") + if name == "" || names[name] { + continue + } + names[name] = true + summary.Relationships = append(summary.Relationships, Relationship{ + Name: name, + Kind: kind, + APIVersion: "v1", + Type: "uses", + }) + } + } + + return summary +} + +func checkPodConfigMaps(obj data.Object, _ []Condition, summary Summary) Summary { + names := map[string]bool{} + for _, vol := range obj.Slice("spec", "volumes") { + name := vol.String("configMap", "name") + if name == "" || names[name] { + continue + } + names[name] = true + summary.Relationships = append(summary.Relationships, Relationship{ + Name: name, + Kind: "ConfigMap", + APIVersion: "v1", + Type: "uses", + }) + } + summary = addEnvRef(summary, names, obj, "configMap", "ConfigMap") + return summary +} + +func checkPodSecrets(obj data.Object, _ []Condition, summary Summary) Summary { + names := map[string]bool{} + for _, vol := range obj.Slice("spec", "volumes") { + name := vol.String("secret", "secretName") + if name == "" || names[name] { + continue + } + names[name] = true + summary.Relationships = append(summary.Relationships, Relationship{ + Name: name, + Kind: "Secret", + APIVersion: "v1", + Type: "uses", + }) + } + summary = addEnvRef(summary, names, obj, "secret", "Secret") + return summary +} + +func checkPodServiceAccount(obj data.Object, _ []Condition, summary Summary) Summary { + saName := obj.String("spec", "serviceAccountName") + summary.Relationships = append(summary.Relationships, Relationship{ + Name: saName, + Kind: "ServiceAccount", + APIVersion: "v1", + Type: "uses", + }) + return summary + +} diff --git a/vendor/github.com/rancher/wrangler/pkg/summary/summarizers.go b/vendor/github.com/rancher/wrangler/pkg/summary/summarizers.go index 58cf23853f..0e0048e96f 100644 --- a/vendor/github.com/rancher/wrangler/pkg/summary/summarizers.go +++ b/vendor/github.com/rancher/wrangler/pkg/summary/summarizers.go @@ -7,6 +7,12 @@ import ( "github.com/rancher/wrangler/pkg/data" "github.com/rancher/wrangler/pkg/data/convert" "github.com/rancher/wrangler/pkg/kv" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + kstatus "sigs.k8s.io/cli-utils/pkg/kstatus/status" +) + +const ( + kindSep = ", Kind=" ) var ( @@ -64,6 +70,7 @@ var ( "KernelHasNoDeadlock": true, "Unschedulable": true, "ReplicaFailure": true, + "Stalled": true, } // True == @@ -84,21 +91,128 @@ var ( "Progressing": "inactive", } - Summarizers []Summarizer + // True == + // False == transitioning + // Unknown == error + TransitioningTrue = map[string]string{ + "Reconciling": "reconciling", + } + + Summarizers []Summarizer + ConditionSummarizers []Summarizer ) type Summarizer func(obj data.Object, conditions []Condition, summary Summary) Summary func init() { + ConditionSummarizers = []Summarizer{ + checkErrors, + checkTransitioning, + checkRemoving, + checkCattleReady, + } + Summarizers = []Summarizer{ + checkStatusSummary, checkErrors, checkTransitioning, checkActive, checkPhase, checkInitializing, checkRemoving, + checkStandard, checkLoadBalancer, + checkPod, + checkHasPodSelector, + checkHasPodTemplate, + checkOwner, + checkApplyOwned, + checkCattleTypes, + } +} + +func checkOwner(obj data.Object, conditions []Condition, summary Summary) Summary { + ustr := &unstructured.Unstructured{ + Object: obj, + } + for _, ownerref := range ustr.GetOwnerReferences() { + rel := Relationship{ + Name: ownerref.Name, + Kind: ownerref.Kind, + APIVersion: ownerref.APIVersion, + Type: "owner", + Inbound: true, + } + if ownerref.Controller != nil && *ownerref.Controller { + rel.ControlledBy = true + } + + summary.Relationships = append(summary.Relationships, rel) + } + + return summary +} + +func checkStatusSummary(obj data.Object, _ []Condition, summary Summary) Summary { + summaryObj := obj.Map("status", "display") + if len(summaryObj) == 0 { + summaryObj = obj.Map("status", "summary") + if len(summaryObj) == 0 { + return summary + } + } + obj = summaryObj + + if _, ok := obj["state"]; ok { + summary.State = obj.String("state") + } + if _, ok := obj["transitioning"]; ok { + summary.Transitioning = obj.Bool("transitioning") + } + if _, ok := obj["error"]; ok { + summary.Error = obj.Bool("error") + } + if _, ok := obj["message"]; ok { + summary.Message = append(summary.Message, obj.String("message")) + } + + return summary +} + +func checkStandard(obj data.Object, _ []Condition, summary Summary) Summary { + if summary.State != "" { + return summary + } + + // this is a hack to not call the standard summarizers on norman mapped objects + if strings.HasPrefix(obj.String("type"), "/") { + return summary + } + + result, err := kstatus.Compute(&unstructured.Unstructured{Object: obj}) + if err != nil { + return summary + } + + switch result.Status { + case kstatus.InProgressStatus: + summary.State = "in-progress" + summary.Message = append(summary.Message, result.Message) + summary.Transitioning = true + case kstatus.FailedStatus: + summary.State = "failed" + summary.Message = append(summary.Message, result.Message) + summary.Error = true + case kstatus.CurrentStatus: + summary.State = "active" + summary.Message = append(summary.Message, result.Message) + case kstatus.TerminatingStatus: + summary.State = "removing" + summary.Message = append(summary.Message, result.Message) + summary.Transitioning = true } + + return summary } func checkErrors(_ data.Object, conditions []Condition, summary Summary) Summary { @@ -106,6 +220,9 @@ func checkErrors(_ data.Object, conditions []Condition, summary Summary) Summary if (ErrorFalse[c.Type()] && c.Status() == "False") || c.Reason() == "Error" { summary.Error = true summary.Message = append(summary.Message, c.Message()) + if summary.State == "active" || summary.State == "" { + summary.State = "error" + } break } } @@ -160,6 +277,21 @@ func checkTransitioning(_ data.Object, conditions []Condition, summary Summary) } } + for _, c := range conditions { + if summary.State != "" { + break + } + newState, ok := TransitioningTrue[c.Type()] + if !ok { + continue + } + if c.Status() == "True" { + summary.Transitioning = true + summary.State = newState + summary.Message = append(summary.Message, c.Message()) + } + } + return summary } @@ -246,7 +378,8 @@ func checkRemoving(obj data.Object, conditions []Condition, summary Summary) Sum func checkLoadBalancer(obj data.Object, _ []Condition, summary Summary) Summary { if (summary.State == "active" || summary.State == "") && obj.String("kind") == "Service" && - obj.String("spec", "serviceKind") == "LoadBalancer" { + (obj.String("spec", "serviceKind") == "LoadBalancer" || + obj.String("spec", "type") == "LoadBalancer") { addresses := obj.Slice("status", "loadBalancer", "ingress") if len(addresses) == 0 { summary.State = "pending" @@ -257,3 +390,68 @@ func checkLoadBalancer(obj data.Object, _ []Condition, summary Summary) Summary return summary } + +func isKind(obj data.Object, kind string, apiGroups ...string) bool { + if obj.String("kind") != kind { + return false + } + + if len(apiGroups) == 0 { + return obj.String("apiVersion") == "v1" + } + + if len(apiGroups) == 0 { + apiGroups = []string{""} + } + + for _, group := range apiGroups { + switch { + case group == "": + if obj.String("apiVersion") == "v1" { + return true + } + case group[len(group)-1] == '/': + if strings.HasPrefix(obj.String("apiVersion"), group) { + return true + } + default: + if obj.String("apiVersion") != group { + return true + } + } + } + + return false +} + +func checkApplyOwned(obj data.Object, conditions []Condition, summary Summary) Summary { + if len(obj.Slice("metadata", "ownerReferences")) > 0 { + return summary + } + + annotations := obj.Map("metadata", "annotations") + gvkString := convert.ToString(annotations["objectset.rio.cattle.io/owner-gvk"]) + i := strings.Index(gvkString, kindSep) + if i <= 0 { + return summary + } + + name := convert.ToString(annotations["objectset.rio.cattle.io/owner-name"]) + namespace := convert.ToString(annotations["objectset.rio.cattle.io/owner-namespace"]) + + apiVersion := gvkString[:i] + kind := gvkString[i+len(kindSep):] + + rel := Relationship{ + Name: name, + Namespace: namespace, + Kind: kind, + APIVersion: apiVersion, + Type: "applies", + Inbound: true, + } + + summary.Relationships = append(summary.Relationships, rel) + + return summary +} diff --git a/vendor/github.com/rancher/wrangler/pkg/summary/summary.go b/vendor/github.com/rancher/wrangler/pkg/summary/summary.go index 6f6ea148b0..72f1dff475 100644 --- a/vendor/github.com/rancher/wrangler/pkg/summary/summary.go +++ b/vendor/github.com/rancher/wrangler/pkg/summary/summary.go @@ -3,17 +3,30 @@ package summary import ( "strings" - "k8s.io/apimachinery/pkg/runtime" - "github.com/rancher/wrangler/pkg/data" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" ) type Summary struct { - State string - Error bool - Transitioning bool - Message []string + State string `json:"state,omitempty"` + Error bool `json:"error,omitempty"` + Transitioning bool `json:"transitioning,omitempty"` + Message []string `json:"message,omitempty"` + Attributes map[string]interface{} `json:"-"` + Relationships []Relationship `json:"-"` +} + +type Relationship struct { + Name string + Namespace string + ControlledBy bool + Kind string + APIVersion string + Inbound bool + Type string + Selector *metav1.LabelSelector } func (s Summary) String() string { @@ -34,7 +47,7 @@ func (s Summary) String() string { msg += "]" } if len(s.Message) > 0 { - msg = msg + " " + s.Message[0] + msg = msg + " " + strings.Join(s.Message, ", ") } return msg } @@ -61,6 +74,10 @@ func dedupMessage(messages []string) []string { var result []string for _, message := range messages { + message = strings.TrimSpace(message) + if message == "" { + continue + } if seen[message] { continue } diff --git a/vendor/gopkg.in/yaml.v2/apic.go b/vendor/gopkg.in/yaml.v2/apic.go index 1f7e87e672..d2c2308f1f 100644 --- a/vendor/gopkg.in/yaml.v2/apic.go +++ b/vendor/gopkg.in/yaml.v2/apic.go @@ -86,6 +86,7 @@ func yaml_emitter_initialize(emitter *yaml_emitter_t) { raw_buffer: make([]byte, 0, output_raw_buffer_size), states: make([]yaml_emitter_state_t, 0, initial_stack_size), events: make([]yaml_event_t, 0, initial_queue_size), + best_width: -1, } } diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go index bf125b62a7..e7aaead8c3 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go @@ -873,6 +873,9 @@ const ( // FieldManagerConflict is used to report when another client claims to manage this field, // It should only be returned for a request using server-side apply. CauseTypeFieldManagerConflict CauseType = "FieldManagerConflict" + // CauseTypeResourceVersionTooLarge is used to report that the requested resource version + // is newer than the data observed by the API server, so the request cannot be served. + CauseTypeResourceVersionTooLarge CauseType = "ResourceVersionTooLarge" ) // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object diff --git a/vendor/k8s.io/apimachinery/pkg/util/json/json.go b/vendor/k8s.io/apimachinery/pkg/util/json/json.go index 0e2e301754..204834883f 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/json/json.go +++ b/vendor/k8s.io/apimachinery/pkg/util/json/json.go @@ -66,11 +66,36 @@ func Unmarshal(data []byte, v interface{}) error { // If the decode succeeds, post-process the map to convert json.Number objects to int64 or float64 return convertSliceNumbers(*v, 0) + case *interface{}: + // Build a decoder from the given data + decoder := json.NewDecoder(bytes.NewBuffer(data)) + // Preserve numbers, rather than casting to float64 automatically + decoder.UseNumber() + // Run the decode + if err := decoder.Decode(v); err != nil { + return err + } + // If the decode succeeds, post-process the map to convert json.Number objects to int64 or float64 + return convertInterfaceNumbers(v, 0) + default: return json.Unmarshal(data, v) } } +func convertInterfaceNumbers(v *interface{}, depth int) error { + var err error + switch v2 := (*v).(type) { + case json.Number: + *v, err = convertNumber(v2) + case map[string]interface{}: + err = convertMapNumbers(v2, depth+1) + case []interface{}: + err = convertSliceNumbers(v2, depth+1) + } + return err +} + // convertMapNumbers traverses the map, converting any json.Number values to int64 or float64. // values which are map[string]interface{} or []interface{} are recursively visited func convertMapNumbers(m map[string]interface{}, depth int) error { diff --git a/vendor/k8s.io/apimachinery/pkg/util/jsonmergepatch/patch.go b/vendor/k8s.io/apimachinery/pkg/util/jsonmergepatch/patch.go new file mode 100644 index 0000000000..e56e177341 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/jsonmergepatch/patch.go @@ -0,0 +1,160 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package jsonmergepatch + +import ( + "fmt" + "reflect" + + "github.com/evanphx/json-patch" + "k8s.io/apimachinery/pkg/util/json" + "k8s.io/apimachinery/pkg/util/mergepatch" +) + +// Create a 3-way merge patch based-on JSON merge patch. +// Calculate addition-and-change patch between current and modified. +// Calculate deletion patch between original and modified. +func CreateThreeWayJSONMergePatch(original, modified, current []byte, fns ...mergepatch.PreconditionFunc) ([]byte, error) { + if len(original) == 0 { + original = []byte(`{}`) + } + if len(modified) == 0 { + modified = []byte(`{}`) + } + if len(current) == 0 { + current = []byte(`{}`) + } + + addAndChangePatch, err := jsonpatch.CreateMergePatch(current, modified) + if err != nil { + return nil, err + } + // Only keep addition and changes + addAndChangePatch, addAndChangePatchObj, err := keepOrDeleteNullInJsonPatch(addAndChangePatch, false) + if err != nil { + return nil, err + } + + deletePatch, err := jsonpatch.CreateMergePatch(original, modified) + if err != nil { + return nil, err + } + // Only keep deletion + deletePatch, deletePatchObj, err := keepOrDeleteNullInJsonPatch(deletePatch, true) + if err != nil { + return nil, err + } + + hasConflicts, err := mergepatch.HasConflicts(addAndChangePatchObj, deletePatchObj) + if err != nil { + return nil, err + } + if hasConflicts { + return nil, mergepatch.NewErrConflict(mergepatch.ToYAMLOrError(addAndChangePatchObj), mergepatch.ToYAMLOrError(deletePatchObj)) + } + patch, err := jsonpatch.MergePatch(deletePatch, addAndChangePatch) + if err != nil { + return nil, err + } + + var patchMap map[string]interface{} + err = json.Unmarshal(patch, &patchMap) + if err != nil { + return nil, fmt.Errorf("Failed to unmarshal patch for precondition check: %s", patch) + } + meetPreconditions, err := meetPreconditions(patchMap, fns...) + if err != nil { + return nil, err + } + if !meetPreconditions { + return nil, mergepatch.NewErrPreconditionFailed(patchMap) + } + + return patch, nil +} + +// keepOrDeleteNullInJsonPatch takes a json-encoded byte array and a boolean. +// It returns a filtered object and its corresponding json-encoded byte array. +// It is a wrapper of func keepOrDeleteNullInObj +func keepOrDeleteNullInJsonPatch(patch []byte, keepNull bool) ([]byte, map[string]interface{}, error) { + var patchMap map[string]interface{} + err := json.Unmarshal(patch, &patchMap) + if err != nil { + return nil, nil, err + } + filteredMap, err := keepOrDeleteNullInObj(patchMap, keepNull) + if err != nil { + return nil, nil, err + } + o, err := json.Marshal(filteredMap) + return o, filteredMap, err +} + +// keepOrDeleteNullInObj will keep only the null value and delete all the others, +// if keepNull is true. Otherwise, it will delete all the null value and keep the others. +func keepOrDeleteNullInObj(m map[string]interface{}, keepNull bool) (map[string]interface{}, error) { + filteredMap := make(map[string]interface{}) + var err error + for key, val := range m { + switch { + case keepNull && val == nil: + filteredMap[key] = nil + case val != nil: + switch typedVal := val.(type) { + case map[string]interface{}: + // Explicitly-set empty maps are treated as values instead of empty patches + if len(typedVal) == 0 { + if !keepNull { + filteredMap[key] = typedVal + } + continue + } + + var filteredSubMap map[string]interface{} + filteredSubMap, err = keepOrDeleteNullInObj(typedVal, keepNull) + if err != nil { + return nil, err + } + + // If the returned filtered submap was empty, this is an empty patch for the entire subdict, so the key + // should not be set + if len(filteredSubMap) != 0 { + filteredMap[key] = filteredSubMap + } + + case []interface{}, string, float64, bool, int64, nil: + // Lists are always replaced in Json, no need to check each entry in the list. + if !keepNull { + filteredMap[key] = val + } + default: + return nil, fmt.Errorf("unknown type: %v", reflect.TypeOf(typedVal)) + } + } + } + return filteredMap, nil +} + +func meetPreconditions(patchObj map[string]interface{}, fns ...mergepatch.PreconditionFunc) (bool, error) { + // Apply the preconditions to the patch, and return an error if any of them fail. + for _, fn := range fns { + if !fn(patchObj) { + return false, fmt.Errorf("precondition failed for: %v", patchObj) + } + } + return true, nil +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/mergepatch/OWNERS b/vendor/k8s.io/apimachinery/pkg/util/mergepatch/OWNERS new file mode 100644 index 0000000000..3f72c69ba3 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/mergepatch/OWNERS @@ -0,0 +1,7 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +approvers: +- pwittrock +reviewers: +- mengqiy +- apelisse diff --git a/vendor/k8s.io/apimachinery/pkg/util/mergepatch/errors.go b/vendor/k8s.io/apimachinery/pkg/util/mergepatch/errors.go new file mode 100644 index 0000000000..16501d5afe --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/mergepatch/errors.go @@ -0,0 +1,102 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package mergepatch + +import ( + "errors" + "fmt" + "reflect" +) + +var ( + ErrBadJSONDoc = errors.New("invalid JSON document") + ErrNoListOfLists = errors.New("lists of lists are not supported") + ErrBadPatchFormatForPrimitiveList = errors.New("invalid patch format of primitive list") + ErrBadPatchFormatForRetainKeys = errors.New("invalid patch format of retainKeys") + ErrBadPatchFormatForSetElementOrderList = errors.New("invalid patch format of setElementOrder list") + ErrPatchContentNotMatchRetainKeys = errors.New("patch content doesn't match retainKeys list") + ErrUnsupportedStrategicMergePatchFormat = errors.New("strategic merge patch format is not supported") +) + +func ErrNoMergeKey(m map[string]interface{}, k string) error { + return fmt.Errorf("map: %v does not contain declared merge key: %s", m, k) +} + +func ErrBadArgType(expected, actual interface{}) error { + return fmt.Errorf("expected a %s, but received a %s", + reflect.TypeOf(expected), + reflect.TypeOf(actual)) +} + +func ErrBadArgKind(expected, actual interface{}) error { + var expectedKindString, actualKindString string + if expected == nil { + expectedKindString = "nil" + } else { + expectedKindString = reflect.TypeOf(expected).Kind().String() + } + if actual == nil { + actualKindString = "nil" + } else { + actualKindString = reflect.TypeOf(actual).Kind().String() + } + return fmt.Errorf("expected a %s, but received a %s", expectedKindString, actualKindString) +} + +func ErrBadPatchType(t interface{}, m map[string]interface{}) error { + return fmt.Errorf("unknown patch type: %s in map: %v", t, m) +} + +// IsPreconditionFailed returns true if the provided error indicates +// a precondition failed. +func IsPreconditionFailed(err error) bool { + _, ok := err.(ErrPreconditionFailed) + return ok +} + +type ErrPreconditionFailed struct { + message string +} + +func NewErrPreconditionFailed(target map[string]interface{}) ErrPreconditionFailed { + s := fmt.Sprintf("precondition failed for: %v", target) + return ErrPreconditionFailed{s} +} + +func (err ErrPreconditionFailed) Error() string { + return err.message +} + +type ErrConflict struct { + message string +} + +func NewErrConflict(patch, current string) ErrConflict { + s := fmt.Sprintf("patch:\n%s\nconflicts with changes made from original to current:\n%s\n", patch, current) + return ErrConflict{s} +} + +func (err ErrConflict) Error() string { + return err.message +} + +// IsConflict returns true if the provided error indicates +// a conflict between the patch and the current configuration. +func IsConflict(err error) bool { + _, ok := err.(ErrConflict) + return ok +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/mergepatch/util.go b/vendor/k8s.io/apimachinery/pkg/util/mergepatch/util.go new file mode 100644 index 0000000000..990fa0d43a --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/mergepatch/util.go @@ -0,0 +1,133 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package mergepatch + +import ( + "fmt" + "reflect" + + "github.com/davecgh/go-spew/spew" + "sigs.k8s.io/yaml" +) + +// PreconditionFunc asserts that an incompatible change is not present within a patch. +type PreconditionFunc func(interface{}) bool + +// RequireKeyUnchanged returns a precondition function that fails if the provided key +// is present in the patch (indicating that its value has changed). +func RequireKeyUnchanged(key string) PreconditionFunc { + return func(patch interface{}) bool { + patchMap, ok := patch.(map[string]interface{}) + if !ok { + return true + } + + // The presence of key means that its value has been changed, so the test fails. + _, ok = patchMap[key] + return !ok + } +} + +// RequireMetadataKeyUnchanged creates a precondition function that fails +// if the metadata.key is present in the patch (indicating its value +// has changed). +func RequireMetadataKeyUnchanged(key string) PreconditionFunc { + return func(patch interface{}) bool { + patchMap, ok := patch.(map[string]interface{}) + if !ok { + return true + } + patchMap1, ok := patchMap["metadata"] + if !ok { + return true + } + patchMap2, ok := patchMap1.(map[string]interface{}) + if !ok { + return true + } + _, ok = patchMap2[key] + return !ok + } +} + +func ToYAMLOrError(v interface{}) string { + y, err := toYAML(v) + if err != nil { + return err.Error() + } + + return y +} + +func toYAML(v interface{}) (string, error) { + y, err := yaml.Marshal(v) + if err != nil { + return "", fmt.Errorf("yaml marshal failed:%v\n%v\n", err, spew.Sdump(v)) + } + + return string(y), nil +} + +// HasConflicts returns true if the left and right JSON interface objects overlap with +// different values in any key. All keys are required to be strings. Since patches of the +// same Type have congruent keys, this is valid for multiple patch types. This method +// supports JSON merge patch semantics. +// +// NOTE: Numbers with different types (e.g. int(0) vs int64(0)) will be detected as conflicts. +// Make sure the unmarshaling of left and right are consistent (e.g. use the same library). +func HasConflicts(left, right interface{}) (bool, error) { + switch typedLeft := left.(type) { + case map[string]interface{}: + switch typedRight := right.(type) { + case map[string]interface{}: + for key, leftValue := range typedLeft { + rightValue, ok := typedRight[key] + if !ok { + continue + } + if conflict, err := HasConflicts(leftValue, rightValue); err != nil || conflict { + return conflict, err + } + } + + return false, nil + default: + return true, nil + } + case []interface{}: + switch typedRight := right.(type) { + case []interface{}: + if len(typedLeft) != len(typedRight) { + return true, nil + } + + for i := range typedLeft { + if conflict, err := HasConflicts(typedLeft[i], typedRight[i]); err != nil || conflict { + return conflict, err + } + } + + return false, nil + default: + return true, nil + } + case string, float64, bool, int64, nil: + return !reflect.DeepEqual(left, right), nil + default: + return true, fmt.Errorf("unknown type: %v", reflect.TypeOf(left)) + } +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/net/http.go b/vendor/k8s.io/apimachinery/pkg/util/net/http.go index 0ba586bfe5..7b64e68157 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/net/http.go +++ b/vendor/k8s.io/apimachinery/pkg/util/net/http.go @@ -55,6 +55,12 @@ func JoinPreservingTrailingSlash(elem ...string) string { return result } +// IsTimeout returns true if the given error is a network timeout error +func IsTimeout(err error) bool { + neterr, ok := err.(net.Error) + return ok && neterr != nil && neterr.Timeout() +} + // IsProbableEOF returns true if the given error resembles a connection termination // scenario that would justify assuming that the watch is empty. // These errors are what the Go http stack returns back to us which are general @@ -440,7 +446,7 @@ redirectLoop: // Only follow redirects to the same host. Otherwise, propagate the redirect response back. if requireSameHostRedirects && location.Hostname() != originalLocation.Hostname() { - break redirectLoop + return nil, nil, fmt.Errorf("hostname mismatch: expected %s, found %s", originalLocation.Hostname(), location.Hostname()) } // Reset the connection. diff --git a/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/OWNERS b/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/OWNERS new file mode 100644 index 0000000000..cfee199fa0 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/OWNERS @@ -0,0 +1,8 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +approvers: +- pwittrock +- mengqiy +reviewers: +- mengqiy +- apelisse diff --git a/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/errors.go b/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/errors.go new file mode 100644 index 0000000000..ab66d04523 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/errors.go @@ -0,0 +1,49 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package strategicpatch + +import ( + "fmt" +) + +type LookupPatchMetaError struct { + Path string + Err error +} + +func (e LookupPatchMetaError) Error() string { + return fmt.Sprintf("LookupPatchMetaError(%s): %v", e.Path, e.Err) +} + +type FieldNotFoundError struct { + Path string + Field string +} + +func (e FieldNotFoundError) Error() string { + return fmt.Sprintf("unable to find api field %q in %s", e.Field, e.Path) +} + +type InvalidTypeError struct { + Path string + Expected string + Actual string +} + +func (e InvalidTypeError) Error() string { + return fmt.Sprintf("invalid type for %s: got %q, expected %q", e.Path, e.Actual, e.Expected) +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/meta.go b/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/meta.go new file mode 100644 index 0000000000..c31de15e7a --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/meta.go @@ -0,0 +1,194 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package strategicpatch + +import ( + "errors" + "fmt" + "reflect" + + "k8s.io/apimachinery/pkg/util/mergepatch" + forkedjson "k8s.io/apimachinery/third_party/forked/golang/json" + openapi "k8s.io/kube-openapi/pkg/util/proto" +) + +type PatchMeta struct { + patchStrategies []string + patchMergeKey string +} + +func (pm PatchMeta) GetPatchStrategies() []string { + if pm.patchStrategies == nil { + return []string{} + } + return pm.patchStrategies +} + +func (pm PatchMeta) SetPatchStrategies(ps []string) { + pm.patchStrategies = ps +} + +func (pm PatchMeta) GetPatchMergeKey() string { + return pm.patchMergeKey +} + +func (pm PatchMeta) SetPatchMergeKey(pmk string) { + pm.patchMergeKey = pmk +} + +type LookupPatchMeta interface { + // LookupPatchMetadataForStruct gets subschema and the patch metadata (e.g. patch strategy and merge key) for map. + LookupPatchMetadataForStruct(key string) (LookupPatchMeta, PatchMeta, error) + // LookupPatchMetadataForSlice get subschema and the patch metadata for slice. + LookupPatchMetadataForSlice(key string) (LookupPatchMeta, PatchMeta, error) + // Get the type name of the field + Name() string +} + +type PatchMetaFromStruct struct { + T reflect.Type +} + +func NewPatchMetaFromStruct(dataStruct interface{}) (PatchMetaFromStruct, error) { + t, err := getTagStructType(dataStruct) + return PatchMetaFromStruct{T: t}, err +} + +var _ LookupPatchMeta = PatchMetaFromStruct{} + +func (s PatchMetaFromStruct) LookupPatchMetadataForStruct(key string) (LookupPatchMeta, PatchMeta, error) { + fieldType, fieldPatchStrategies, fieldPatchMergeKey, err := forkedjson.LookupPatchMetadataForStruct(s.T, key) + if err != nil { + return nil, PatchMeta{}, err + } + + return PatchMetaFromStruct{T: fieldType}, + PatchMeta{ + patchStrategies: fieldPatchStrategies, + patchMergeKey: fieldPatchMergeKey, + }, nil +} + +func (s PatchMetaFromStruct) LookupPatchMetadataForSlice(key string) (LookupPatchMeta, PatchMeta, error) { + subschema, patchMeta, err := s.LookupPatchMetadataForStruct(key) + if err != nil { + return nil, PatchMeta{}, err + } + elemPatchMetaFromStruct := subschema.(PatchMetaFromStruct) + t := elemPatchMetaFromStruct.T + + var elemType reflect.Type + switch t.Kind() { + // If t is an array or a slice, get the element type. + // If element is still an array or a slice, return an error. + // Otherwise, return element type. + case reflect.Array, reflect.Slice: + elemType = t.Elem() + if elemType.Kind() == reflect.Array || elemType.Kind() == reflect.Slice { + return nil, PatchMeta{}, errors.New("unexpected slice of slice") + } + // If t is an pointer, get the underlying element. + // If the underlying element is neither an array nor a slice, the pointer is pointing to a slice, + // e.g. https://github.com/kubernetes/kubernetes/blob/bc22e206c79282487ea0bf5696d5ccec7e839a76/staging/src/k8s.io/apimachinery/pkg/util/strategicpatch/patch_test.go#L2782-L2822 + // If the underlying element is either an array or a slice, return its element type. + case reflect.Ptr: + t = t.Elem() + if t.Kind() == reflect.Array || t.Kind() == reflect.Slice { + t = t.Elem() + } + elemType = t + default: + return nil, PatchMeta{}, fmt.Errorf("expected slice or array type, but got: %s", s.T.Kind().String()) + } + + return PatchMetaFromStruct{T: elemType}, patchMeta, nil +} + +func (s PatchMetaFromStruct) Name() string { + return s.T.Kind().String() +} + +func getTagStructType(dataStruct interface{}) (reflect.Type, error) { + if dataStruct == nil { + return nil, mergepatch.ErrBadArgKind(struct{}{}, nil) + } + + t := reflect.TypeOf(dataStruct) + // Get the underlying type for pointers + if t.Kind() == reflect.Ptr { + t = t.Elem() + } + + if t.Kind() != reflect.Struct { + return nil, mergepatch.ErrBadArgKind(struct{}{}, dataStruct) + } + + return t, nil +} + +func GetTagStructTypeOrDie(dataStruct interface{}) reflect.Type { + t, err := getTagStructType(dataStruct) + if err != nil { + panic(err) + } + return t +} + +type PatchMetaFromOpenAPI struct { + Schema openapi.Schema +} + +func NewPatchMetaFromOpenAPI(s openapi.Schema) PatchMetaFromOpenAPI { + return PatchMetaFromOpenAPI{Schema: s} +} + +var _ LookupPatchMeta = PatchMetaFromOpenAPI{} + +func (s PatchMetaFromOpenAPI) LookupPatchMetadataForStruct(key string) (LookupPatchMeta, PatchMeta, error) { + if s.Schema == nil { + return nil, PatchMeta{}, nil + } + kindItem := NewKindItem(key, s.Schema.GetPath()) + s.Schema.Accept(kindItem) + + err := kindItem.Error() + if err != nil { + return nil, PatchMeta{}, err + } + return PatchMetaFromOpenAPI{Schema: kindItem.subschema}, + kindItem.patchmeta, nil +} + +func (s PatchMetaFromOpenAPI) LookupPatchMetadataForSlice(key string) (LookupPatchMeta, PatchMeta, error) { + if s.Schema == nil { + return nil, PatchMeta{}, nil + } + sliceItem := NewSliceItem(key, s.Schema.GetPath()) + s.Schema.Accept(sliceItem) + + err := sliceItem.Error() + if err != nil { + return nil, PatchMeta{}, err + } + return PatchMetaFromOpenAPI{Schema: sliceItem.subschema}, + sliceItem.patchmeta, nil +} + +func (s PatchMetaFromOpenAPI) Name() string { + schema := s.Schema + return schema.GetName() +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/patch.go b/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/patch.go new file mode 100644 index 0000000000..c55894e502 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/patch.go @@ -0,0 +1,2174 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package strategicpatch + +import ( + "fmt" + "reflect" + "sort" + "strings" + + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/util/json" + "k8s.io/apimachinery/pkg/util/mergepatch" +) + +// An alternate implementation of JSON Merge Patch +// (https://tools.ietf.org/html/rfc7386) which supports the ability to annotate +// certain fields with metadata that indicates whether the elements of JSON +// lists should be merged or replaced. +// +// For more information, see the PATCH section of docs/devel/api-conventions.md. +// +// Some of the content of this package was borrowed with minor adaptations from +// evanphx/json-patch and openshift/origin. + +const ( + directiveMarker = "$patch" + deleteDirective = "delete" + replaceDirective = "replace" + mergeDirective = "merge" + + retainKeysStrategy = "retainKeys" + + deleteFromPrimitiveListDirectivePrefix = "$deleteFromPrimitiveList" + retainKeysDirective = "$" + retainKeysStrategy + setElementOrderDirectivePrefix = "$setElementOrder" +) + +// JSONMap is a representations of JSON object encoded as map[string]interface{} +// where the children can be either map[string]interface{}, []interface{} or +// primitive type). +// Operating on JSONMap representation is much faster as it doesn't require any +// json marshaling and/or unmarshaling operations. +type JSONMap map[string]interface{} + +type DiffOptions struct { + // SetElementOrder determines whether we generate the $setElementOrder parallel list. + SetElementOrder bool + // IgnoreChangesAndAdditions indicates if we keep the changes and additions in the patch. + IgnoreChangesAndAdditions bool + // IgnoreDeletions indicates if we keep the deletions in the patch. + IgnoreDeletions bool + // We introduce a new value retainKeys for patchStrategy. + // It indicates that all fields needing to be preserved must be + // present in the `retainKeys` list. + // And the fields that are present will be merged with live object. + // All the missing fields will be cleared when patching. + BuildRetainKeysDirective bool +} + +type MergeOptions struct { + // MergeParallelList indicates if we are merging the parallel list. + // We don't merge parallel list when calling mergeMap() in CreateThreeWayMergePatch() + // which is called client-side. + // We merge parallel list iff when calling mergeMap() in StrategicMergeMapPatch() + // which is called server-side + MergeParallelList bool + // IgnoreUnmatchedNulls indicates if we should process the unmatched nulls. + IgnoreUnmatchedNulls bool +} + +// The following code is adapted from github.com/openshift/origin/pkg/util/jsonmerge. +// Instead of defining a Delta that holds an original, a patch and a set of preconditions, +// the reconcile method accepts a set of preconditions as an argument. + +// CreateTwoWayMergePatch creates a patch that can be passed to StrategicMergePatch from an original +// document and a modified document, which are passed to the method as json encoded content. It will +// return a patch that yields the modified document when applied to the original document, or an error +// if either of the two documents is invalid. +func CreateTwoWayMergePatch(original, modified []byte, dataStruct interface{}, fns ...mergepatch.PreconditionFunc) ([]byte, error) { + schema, err := NewPatchMetaFromStruct(dataStruct) + if err != nil { + return nil, err + } + + return CreateTwoWayMergePatchUsingLookupPatchMeta(original, modified, schema, fns...) +} + +func CreateTwoWayMergePatchUsingLookupPatchMeta( + original, modified []byte, schema LookupPatchMeta, fns ...mergepatch.PreconditionFunc) ([]byte, error) { + originalMap := map[string]interface{}{} + if len(original) > 0 { + if err := json.Unmarshal(original, &originalMap); err != nil { + return nil, mergepatch.ErrBadJSONDoc + } + } + + modifiedMap := map[string]interface{}{} + if len(modified) > 0 { + if err := json.Unmarshal(modified, &modifiedMap); err != nil { + return nil, mergepatch.ErrBadJSONDoc + } + } + + patchMap, err := CreateTwoWayMergeMapPatchUsingLookupPatchMeta(originalMap, modifiedMap, schema, fns...) + if err != nil { + return nil, err + } + + return json.Marshal(patchMap) +} + +// CreateTwoWayMergeMapPatch creates a patch from an original and modified JSON objects, +// encoded JSONMap. +// The serialized version of the map can then be passed to StrategicMergeMapPatch. +func CreateTwoWayMergeMapPatch(original, modified JSONMap, dataStruct interface{}, fns ...mergepatch.PreconditionFunc) (JSONMap, error) { + schema, err := NewPatchMetaFromStruct(dataStruct) + if err != nil { + return nil, err + } + + return CreateTwoWayMergeMapPatchUsingLookupPatchMeta(original, modified, schema, fns...) +} + +func CreateTwoWayMergeMapPatchUsingLookupPatchMeta(original, modified JSONMap, schema LookupPatchMeta, fns ...mergepatch.PreconditionFunc) (JSONMap, error) { + diffOptions := DiffOptions{ + SetElementOrder: true, + } + patchMap, err := diffMaps(original, modified, schema, diffOptions) + if err != nil { + return nil, err + } + + // Apply the preconditions to the patch, and return an error if any of them fail. + for _, fn := range fns { + if !fn(patchMap) { + return nil, mergepatch.NewErrPreconditionFailed(patchMap) + } + } + + return patchMap, nil +} + +// Returns a (recursive) strategic merge patch that yields modified when applied to original. +// Including: +// - Adding fields to the patch present in modified, missing from original +// - Setting fields to the patch present in modified and original with different values +// - Delete fields present in original, missing from modified through +// - IFF map field - set to nil in patch +// - IFF list of maps && merge strategy - use deleteDirective for the elements +// - IFF list of primitives && merge strategy - use parallel deletion list +// - IFF list of maps or primitives with replace strategy (default) - set patch value to the value in modified +// - Build $retainKeys directive for fields with retainKeys patch strategy +func diffMaps(original, modified map[string]interface{}, schema LookupPatchMeta, diffOptions DiffOptions) (map[string]interface{}, error) { + patch := map[string]interface{}{} + + // This will be used to build the $retainKeys directive sent in the patch + retainKeysList := make([]interface{}, 0, len(modified)) + + // Compare each value in the modified map against the value in the original map + for key, modifiedValue := range modified { + // Get the underlying type for pointers + if diffOptions.BuildRetainKeysDirective && modifiedValue != nil { + retainKeysList = append(retainKeysList, key) + } + + originalValue, ok := original[key] + if !ok { + // Key was added, so add to patch + if !diffOptions.IgnoreChangesAndAdditions { + patch[key] = modifiedValue + } + continue + } + + // The patch may have a patch directive + // TODO: figure out if we need this. This shouldn't be needed by apply. When would the original map have patch directives in it? + foundDirectiveMarker, err := handleDirectiveMarker(key, originalValue, modifiedValue, patch) + if err != nil { + return nil, err + } + if foundDirectiveMarker { + continue + } + + if reflect.TypeOf(originalValue) != reflect.TypeOf(modifiedValue) { + // Types have changed, so add to patch + if !diffOptions.IgnoreChangesAndAdditions { + patch[key] = modifiedValue + } + continue + } + + // Types are the same, so compare values + switch originalValueTyped := originalValue.(type) { + case map[string]interface{}: + modifiedValueTyped := modifiedValue.(map[string]interface{}) + err = handleMapDiff(key, originalValueTyped, modifiedValueTyped, patch, schema, diffOptions) + case []interface{}: + modifiedValueTyped := modifiedValue.([]interface{}) + err = handleSliceDiff(key, originalValueTyped, modifiedValueTyped, patch, schema, diffOptions) + default: + replacePatchFieldIfNotEqual(key, originalValue, modifiedValue, patch, diffOptions) + } + if err != nil { + return nil, err + } + } + + updatePatchIfMissing(original, modified, patch, diffOptions) + // Insert the retainKeysList iff there are values present in the retainKeysList and + // either of the following is true: + // - the patch is not empty + // - there are additional field in original that need to be cleared + if len(retainKeysList) > 0 && + (len(patch) > 0 || hasAdditionalNewField(original, modified)) { + patch[retainKeysDirective] = sortScalars(retainKeysList) + } + return patch, nil +} + +// handleDirectiveMarker handles how to diff directive marker between 2 objects +func handleDirectiveMarker(key string, originalValue, modifiedValue interface{}, patch map[string]interface{}) (bool, error) { + if key == directiveMarker { + originalString, ok := originalValue.(string) + if !ok { + return false, fmt.Errorf("invalid value for special key: %s", directiveMarker) + } + modifiedString, ok := modifiedValue.(string) + if !ok { + return false, fmt.Errorf("invalid value for special key: %s", directiveMarker) + } + if modifiedString != originalString { + patch[directiveMarker] = modifiedValue + } + return true, nil + } + return false, nil +} + +// handleMapDiff diff between 2 maps `originalValueTyped` and `modifiedValue`, +// puts the diff in the `patch` associated with `key` +// key is the key associated with originalValue and modifiedValue. +// originalValue, modifiedValue are the old and new value respectively.They are both maps +// patch is the patch map that contains key and the updated value, and it is the parent of originalValue, modifiedValue +// diffOptions contains multiple options to control how we do the diff. +func handleMapDiff(key string, originalValue, modifiedValue, patch map[string]interface{}, + schema LookupPatchMeta, diffOptions DiffOptions) error { + subschema, patchMeta, err := schema.LookupPatchMetadataForStruct(key) + + if err != nil { + // We couldn't look up metadata for the field + // If the values are identical, this doesn't matter, no patch is needed + if reflect.DeepEqual(originalValue, modifiedValue) { + return nil + } + // Otherwise, return the error + return err + } + retainKeys, patchStrategy, err := extractRetainKeysPatchStrategy(patchMeta.GetPatchStrategies()) + if err != nil { + return err + } + diffOptions.BuildRetainKeysDirective = retainKeys + switch patchStrategy { + // The patch strategic from metadata tells us to replace the entire object instead of diffing it + case replaceDirective: + if !diffOptions.IgnoreChangesAndAdditions { + patch[key] = modifiedValue + } + default: + patchValue, err := diffMaps(originalValue, modifiedValue, subschema, diffOptions) + if err != nil { + return err + } + // Maps were not identical, use provided patch value + if len(patchValue) > 0 { + patch[key] = patchValue + } + } + return nil +} + +// handleSliceDiff diff between 2 slices `originalValueTyped` and `modifiedValue`, +// puts the diff in the `patch` associated with `key` +// key is the key associated with originalValue and modifiedValue. +// originalValue, modifiedValue are the old and new value respectively.They are both slices +// patch is the patch map that contains key and the updated value, and it is the parent of originalValue, modifiedValue +// diffOptions contains multiple options to control how we do the diff. +func handleSliceDiff(key string, originalValue, modifiedValue []interface{}, patch map[string]interface{}, + schema LookupPatchMeta, diffOptions DiffOptions) error { + subschema, patchMeta, err := schema.LookupPatchMetadataForSlice(key) + if err != nil { + // We couldn't look up metadata for the field + // If the values are identical, this doesn't matter, no patch is needed + if reflect.DeepEqual(originalValue, modifiedValue) { + return nil + } + // Otherwise, return the error + return err + } + retainKeys, patchStrategy, err := extractRetainKeysPatchStrategy(patchMeta.GetPatchStrategies()) + if err != nil { + return err + } + switch patchStrategy { + // Merge the 2 slices using mergePatchKey + case mergeDirective: + diffOptions.BuildRetainKeysDirective = retainKeys + addList, deletionList, setOrderList, err := diffLists(originalValue, modifiedValue, subschema, patchMeta.GetPatchMergeKey(), diffOptions) + if err != nil { + return err + } + if len(addList) > 0 { + patch[key] = addList + } + // generate a parallel list for deletion + if len(deletionList) > 0 { + parallelDeletionListKey := fmt.Sprintf("%s/%s", deleteFromPrimitiveListDirectivePrefix, key) + patch[parallelDeletionListKey] = deletionList + } + if len(setOrderList) > 0 { + parallelSetOrderListKey := fmt.Sprintf("%s/%s", setElementOrderDirectivePrefix, key) + patch[parallelSetOrderListKey] = setOrderList + } + default: + replacePatchFieldIfNotEqual(key, originalValue, modifiedValue, patch, diffOptions) + } + return nil +} + +// replacePatchFieldIfNotEqual updates the patch if original and modified are not deep equal +// if diffOptions.IgnoreChangesAndAdditions is false. +// original is the old value, maybe either the live cluster object or the last applied configuration +// modified is the new value, is always the users new config +func replacePatchFieldIfNotEqual(key string, original, modified interface{}, + patch map[string]interface{}, diffOptions DiffOptions) { + if diffOptions.IgnoreChangesAndAdditions { + // Ignoring changes - do nothing + return + } + if reflect.DeepEqual(original, modified) { + // Contents are identical - do nothing + return + } + // Create a patch to replace the old value with the new one + patch[key] = modified +} + +// updatePatchIfMissing iterates over `original` when ignoreDeletions is false. +// Clear the field whose key is not present in `modified`. +// original is the old value, maybe either the live cluster object or the last applied configuration +// modified is the new value, is always the users new config +func updatePatchIfMissing(original, modified, patch map[string]interface{}, diffOptions DiffOptions) { + if diffOptions.IgnoreDeletions { + // Ignoring deletion - do nothing + return + } + // Add nils for deleted values + for key := range original { + if _, found := modified[key]; !found { + patch[key] = nil + } + } +} + +// validateMergeKeyInLists checks if each map in the list has the mentryerge key. +func validateMergeKeyInLists(mergeKey string, lists ...[]interface{}) error { + for _, list := range lists { + for _, item := range list { + m, ok := item.(map[string]interface{}) + if !ok { + return mergepatch.ErrBadArgType(m, item) + } + if _, ok = m[mergeKey]; !ok { + return mergepatch.ErrNoMergeKey(m, mergeKey) + } + } + } + return nil +} + +// normalizeElementOrder sort `patch` list by `patchOrder` and sort `serverOnly` list by `serverOrder`. +// Then it merges the 2 sorted lists. +// It guarantee the relative order in the patch list and in the serverOnly list is kept. +// `patch` is a list of items in the patch, and `serverOnly` is a list of items in the live object. +// `patchOrder` is the order we want `patch` list to have and +// `serverOrder` is the order we want `serverOnly` list to have. +// kind is the kind of each item in the lists `patch` and `serverOnly`. +func normalizeElementOrder(patch, serverOnly, patchOrder, serverOrder []interface{}, mergeKey string, kind reflect.Kind) ([]interface{}, error) { + patch, err := normalizeSliceOrder(patch, patchOrder, mergeKey, kind) + if err != nil { + return nil, err + } + serverOnly, err = normalizeSliceOrder(serverOnly, serverOrder, mergeKey, kind) + if err != nil { + return nil, err + } + all := mergeSortedSlice(serverOnly, patch, serverOrder, mergeKey, kind) + + return all, nil +} + +// mergeSortedSlice merges the 2 sorted lists by serverOrder with best effort. +// It will insert each item in `left` list to `right` list. In most cases, the 2 lists will be interleaved. +// The relative order of left and right are guaranteed to be kept. +// They have higher precedence than the order in the live list. +// The place for a item in `left` is found by: +// scan from the place of last insertion in `right` to the end of `right`, +// the place is before the first item that is greater than the item we want to insert. +// example usage: using server-only items as left and patch items as right. We insert server-only items +// to patch list. We use the order of live object as record for comparison. +func mergeSortedSlice(left, right, serverOrder []interface{}, mergeKey string, kind reflect.Kind) []interface{} { + // Returns if l is less than r, and if both have been found. + // If l and r both present and l is in front of r, l is less than r. + less := func(l, r interface{}) (bool, bool) { + li := index(serverOrder, l, mergeKey, kind) + ri := index(serverOrder, r, mergeKey, kind) + if li >= 0 && ri >= 0 { + return li < ri, true + } else { + return false, false + } + } + + // left and right should be non-overlapping. + size := len(left) + len(right) + i, j := 0, 0 + s := make([]interface{}, size, size) + + for k := 0; k < size; k++ { + if i >= len(left) && j < len(right) { + // have items left in `right` list + s[k] = right[j] + j++ + } else if j >= len(right) && i < len(left) { + // have items left in `left` list + s[k] = left[i] + i++ + } else { + // compare them if i and j are both in bound + less, foundBoth := less(left[i], right[j]) + if foundBoth && less { + s[k] = left[i] + i++ + } else { + s[k] = right[j] + j++ + } + } + } + return s +} + +// index returns the index of the item in the given items, or -1 if it doesn't exist +// l must NOT be a slice of slices, this should be checked before calling. +func index(l []interface{}, valToLookUp interface{}, mergeKey string, kind reflect.Kind) int { + var getValFn func(interface{}) interface{} + // Get the correct `getValFn` based on item `kind`. + // It should return the value of merge key for maps and + // return the item for other kinds. + switch kind { + case reflect.Map: + getValFn = func(item interface{}) interface{} { + typedItem, ok := item.(map[string]interface{}) + if !ok { + return nil + } + val := typedItem[mergeKey] + return val + } + default: + getValFn = func(item interface{}) interface{} { + return item + } + } + + for i, v := range l { + if getValFn(valToLookUp) == getValFn(v) { + return i + } + } + return -1 +} + +// extractToDeleteItems takes a list and +// returns 2 lists: one contains items that should be kept and the other contains items to be deleted. +func extractToDeleteItems(l []interface{}) ([]interface{}, []interface{}, error) { + var nonDelete, toDelete []interface{} + for _, v := range l { + m, ok := v.(map[string]interface{}) + if !ok { + return nil, nil, mergepatch.ErrBadArgType(m, v) + } + + directive, foundDirective := m[directiveMarker] + if foundDirective && directive == deleteDirective { + toDelete = append(toDelete, v) + } else { + nonDelete = append(nonDelete, v) + } + } + return nonDelete, toDelete, nil +} + +// normalizeSliceOrder sort `toSort` list by `order` +func normalizeSliceOrder(toSort, order []interface{}, mergeKey string, kind reflect.Kind) ([]interface{}, error) { + var toDelete []interface{} + if kind == reflect.Map { + // make sure each item in toSort, order has merge key + err := validateMergeKeyInLists(mergeKey, toSort, order) + if err != nil { + return nil, err + } + toSort, toDelete, err = extractToDeleteItems(toSort) + if err != nil { + return nil, err + } + } + + sort.SliceStable(toSort, func(i, j int) bool { + if ii := index(order, toSort[i], mergeKey, kind); ii >= 0 { + if ij := index(order, toSort[j], mergeKey, kind); ij >= 0 { + return ii < ij + } + } + return true + }) + toSort = append(toSort, toDelete...) + return toSort, nil +} + +// Returns a (recursive) strategic merge patch, a parallel deletion list if necessary and +// another list to set the order of the list +// Only list of primitives with merge strategy will generate a parallel deletion list. +// These two lists should yield modified when applied to original, for lists with merge semantics. +func diffLists(original, modified []interface{}, schema LookupPatchMeta, mergeKey string, diffOptions DiffOptions) ([]interface{}, []interface{}, []interface{}, error) { + if len(original) == 0 { + // Both slices are empty - do nothing + if len(modified) == 0 || diffOptions.IgnoreChangesAndAdditions { + return nil, nil, nil, nil + } + + // Old slice was empty - add all elements from the new slice + return modified, nil, nil, nil + } + + elementType, err := sliceElementType(original, modified) + if err != nil { + return nil, nil, nil, err + } + + var patchList, deleteList, setOrderList []interface{} + kind := elementType.Kind() + switch kind { + case reflect.Map: + patchList, deleteList, err = diffListsOfMaps(original, modified, schema, mergeKey, diffOptions) + if err != nil { + return nil, nil, nil, err + } + patchList, err = normalizeSliceOrder(patchList, modified, mergeKey, kind) + if err != nil { + return nil, nil, nil, err + } + orderSame, err := isOrderSame(original, modified, mergeKey) + if err != nil { + return nil, nil, nil, err + } + // append the deletions to the end of the patch list. + patchList = append(patchList, deleteList...) + deleteList = nil + // generate the setElementOrder list when there are content changes or order changes + if diffOptions.SetElementOrder && + ((!diffOptions.IgnoreChangesAndAdditions && (len(patchList) > 0 || !orderSame)) || + (!diffOptions.IgnoreDeletions && len(patchList) > 0)) { + // Generate a list of maps that each item contains only the merge key. + setOrderList = make([]interface{}, len(modified)) + for i, v := range modified { + typedV := v.(map[string]interface{}) + setOrderList[i] = map[string]interface{}{ + mergeKey: typedV[mergeKey], + } + } + } + case reflect.Slice: + // Lists of Lists are not permitted by the api + return nil, nil, nil, mergepatch.ErrNoListOfLists + default: + patchList, deleteList, err = diffListsOfScalars(original, modified, diffOptions) + if err != nil { + return nil, nil, nil, err + } + patchList, err = normalizeSliceOrder(patchList, modified, mergeKey, kind) + // generate the setElementOrder list when there are content changes or order changes + if diffOptions.SetElementOrder && ((!diffOptions.IgnoreDeletions && len(deleteList) > 0) || + (!diffOptions.IgnoreChangesAndAdditions && !reflect.DeepEqual(original, modified))) { + setOrderList = modified + } + } + return patchList, deleteList, setOrderList, err +} + +// isOrderSame checks if the order in a list has changed +func isOrderSame(original, modified []interface{}, mergeKey string) (bool, error) { + if len(original) != len(modified) { + return false, nil + } + for i, modifiedItem := range modified { + equal, err := mergeKeyValueEqual(original[i], modifiedItem, mergeKey) + if err != nil || !equal { + return equal, err + } + } + return true, nil +} + +// diffListsOfScalars returns 2 lists, the first one is addList and the second one is deletionList. +// Argument diffOptions.IgnoreChangesAndAdditions controls if calculate addList. true means not calculate. +// Argument diffOptions.IgnoreDeletions controls if calculate deletionList. true means not calculate. +// original may be changed, but modified is guaranteed to not be changed +func diffListsOfScalars(original, modified []interface{}, diffOptions DiffOptions) ([]interface{}, []interface{}, error) { + modifiedCopy := make([]interface{}, len(modified)) + copy(modifiedCopy, modified) + // Sort the scalars for easier calculating the diff + originalScalars := sortScalars(original) + modifiedScalars := sortScalars(modifiedCopy) + + originalIndex, modifiedIndex := 0, 0 + addList := []interface{}{} + deletionList := []interface{}{} + + for { + originalInBounds := originalIndex < len(originalScalars) + modifiedInBounds := modifiedIndex < len(modifiedScalars) + if !originalInBounds && !modifiedInBounds { + break + } + // we need to compare the string representation of the scalar, + // because the scalar is an interface which doesn't support either < or > + // And that's how func sortScalars compare scalars. + var originalString, modifiedString string + var originalValue, modifiedValue interface{} + if originalInBounds { + originalValue = originalScalars[originalIndex] + originalString = fmt.Sprintf("%v", originalValue) + } + if modifiedInBounds { + modifiedValue = modifiedScalars[modifiedIndex] + modifiedString = fmt.Sprintf("%v", modifiedValue) + } + + originalV, modifiedV := compareListValuesAtIndex(originalInBounds, modifiedInBounds, originalString, modifiedString) + switch { + case originalV == nil && modifiedV == nil: + originalIndex++ + modifiedIndex++ + case originalV != nil && modifiedV == nil: + if !diffOptions.IgnoreDeletions { + deletionList = append(deletionList, originalValue) + } + originalIndex++ + case originalV == nil && modifiedV != nil: + if !diffOptions.IgnoreChangesAndAdditions { + addList = append(addList, modifiedValue) + } + modifiedIndex++ + default: + return nil, nil, fmt.Errorf("Unexpected returned value from compareListValuesAtIndex: %v and %v", originalV, modifiedV) + } + } + + return addList, deduplicateScalars(deletionList), nil +} + +// If first return value is non-nil, list1 contains an element not present in list2 +// If second return value is non-nil, list2 contains an element not present in list1 +func compareListValuesAtIndex(list1Inbounds, list2Inbounds bool, list1Value, list2Value string) (interface{}, interface{}) { + bothInBounds := list1Inbounds && list2Inbounds + switch { + // scalars are identical + case bothInBounds && list1Value == list2Value: + return nil, nil + // only list2 is in bound + case !list1Inbounds: + fallthrough + // list2 has additional scalar + case bothInBounds && list1Value > list2Value: + return nil, list2Value + // only original is in bound + case !list2Inbounds: + fallthrough + // original has additional scalar + case bothInBounds && list1Value < list2Value: + return list1Value, nil + default: + return nil, nil + } +} + +// diffListsOfMaps takes a pair of lists and +// returns a (recursive) strategic merge patch list contains additions and changes and +// a deletion list contains deletions +func diffListsOfMaps(original, modified []interface{}, schema LookupPatchMeta, mergeKey string, diffOptions DiffOptions) ([]interface{}, []interface{}, error) { + patch := make([]interface{}, 0, len(modified)) + deletionList := make([]interface{}, 0, len(original)) + + originalSorted, err := sortMergeListsByNameArray(original, schema, mergeKey, false) + if err != nil { + return nil, nil, err + } + modifiedSorted, err := sortMergeListsByNameArray(modified, schema, mergeKey, false) + if err != nil { + return nil, nil, err + } + + originalIndex, modifiedIndex := 0, 0 + for { + originalInBounds := originalIndex < len(originalSorted) + modifiedInBounds := modifiedIndex < len(modifiedSorted) + bothInBounds := originalInBounds && modifiedInBounds + if !originalInBounds && !modifiedInBounds { + break + } + + var originalElementMergeKeyValueString, modifiedElementMergeKeyValueString string + var originalElementMergeKeyValue, modifiedElementMergeKeyValue interface{} + var originalElement, modifiedElement map[string]interface{} + if originalInBounds { + originalElement, originalElementMergeKeyValue, err = getMapAndMergeKeyValueByIndex(originalIndex, mergeKey, originalSorted) + if err != nil { + return nil, nil, err + } + originalElementMergeKeyValueString = fmt.Sprintf("%v", originalElementMergeKeyValue) + } + if modifiedInBounds { + modifiedElement, modifiedElementMergeKeyValue, err = getMapAndMergeKeyValueByIndex(modifiedIndex, mergeKey, modifiedSorted) + if err != nil { + return nil, nil, err + } + modifiedElementMergeKeyValueString = fmt.Sprintf("%v", modifiedElementMergeKeyValue) + } + + switch { + case bothInBounds && ItemMatchesOriginalAndModifiedSlice(originalElementMergeKeyValueString, modifiedElementMergeKeyValueString): + // Merge key values are equal, so recurse + patchValue, err := diffMaps(originalElement, modifiedElement, schema, diffOptions) + if err != nil { + return nil, nil, err + } + if len(patchValue) > 0 { + patchValue[mergeKey] = modifiedElementMergeKeyValue + patch = append(patch, patchValue) + } + originalIndex++ + modifiedIndex++ + // only modified is in bound + case !originalInBounds: + fallthrough + // modified has additional map + case bothInBounds && ItemAddedToModifiedSlice(originalElementMergeKeyValueString, modifiedElementMergeKeyValueString): + if !diffOptions.IgnoreChangesAndAdditions { + patch = append(patch, modifiedElement) + } + modifiedIndex++ + // only original is in bound + case !modifiedInBounds: + fallthrough + // original has additional map + case bothInBounds && ItemRemovedFromModifiedSlice(originalElementMergeKeyValueString, modifiedElementMergeKeyValueString): + if !diffOptions.IgnoreDeletions { + // Item was deleted, so add delete directive + deletionList = append(deletionList, CreateDeleteDirective(mergeKey, originalElementMergeKeyValue)) + } + originalIndex++ + } + } + + return patch, deletionList, nil +} + +// getMapAndMergeKeyValueByIndex return a map in the list and its merge key value given the index of the map. +func getMapAndMergeKeyValueByIndex(index int, mergeKey string, listOfMaps []interface{}) (map[string]interface{}, interface{}, error) { + m, ok := listOfMaps[index].(map[string]interface{}) + if !ok { + return nil, nil, mergepatch.ErrBadArgType(m, listOfMaps[index]) + } + + val, ok := m[mergeKey] + if !ok { + return nil, nil, mergepatch.ErrNoMergeKey(m, mergeKey) + } + return m, val, nil +} + +// StrategicMergePatch applies a strategic merge patch. The patch and the original document +// must be json encoded content. A patch can be created from an original and a modified document +// by calling CreateStrategicMergePatch. +func StrategicMergePatch(original, patch []byte, dataStruct interface{}) ([]byte, error) { + schema, err := NewPatchMetaFromStruct(dataStruct) + if err != nil { + return nil, err + } + + return StrategicMergePatchUsingLookupPatchMeta(original, patch, schema) +} + +func StrategicMergePatchUsingLookupPatchMeta(original, patch []byte, schema LookupPatchMeta) ([]byte, error) { + originalMap, err := handleUnmarshal(original) + if err != nil { + return nil, err + } + patchMap, err := handleUnmarshal(patch) + if err != nil { + return nil, err + } + + result, err := StrategicMergeMapPatchUsingLookupPatchMeta(originalMap, patchMap, schema) + if err != nil { + return nil, err + } + + return json.Marshal(result) +} + +func handleUnmarshal(j []byte) (map[string]interface{}, error) { + if j == nil { + j = []byte("{}") + } + + m := map[string]interface{}{} + err := json.Unmarshal(j, &m) + if err != nil { + return nil, mergepatch.ErrBadJSONDoc + } + return m, nil +} + +// StrategicMergeMapPatch applies a strategic merge patch. The original and patch documents +// must be JSONMap. A patch can be created from an original and modified document by +// calling CreateTwoWayMergeMapPatch. +// Warning: the original and patch JSONMap objects are mutated by this function and should not be reused. +func StrategicMergeMapPatch(original, patch JSONMap, dataStruct interface{}) (JSONMap, error) { + schema, err := NewPatchMetaFromStruct(dataStruct) + if err != nil { + return nil, err + } + + // We need the go struct tags `patchMergeKey` and `patchStrategy` for fields that support a strategic merge patch. + // For native resources, we can easily figure out these tags since we know the fields. + + // Because custom resources are decoded as Unstructured and because we're missing the metadata about how to handle + // each field in a strategic merge patch, we can't find the go struct tags. Hence, we can't easily do a strategic merge + // for custom resources. So we should fail fast and return an error. + if _, ok := dataStruct.(*unstructured.Unstructured); ok { + return nil, mergepatch.ErrUnsupportedStrategicMergePatchFormat + } + + return StrategicMergeMapPatchUsingLookupPatchMeta(original, patch, schema) +} + +func StrategicMergeMapPatchUsingLookupPatchMeta(original, patch JSONMap, schema LookupPatchMeta) (JSONMap, error) { + mergeOptions := MergeOptions{ + MergeParallelList: true, + IgnoreUnmatchedNulls: true, + } + return mergeMap(original, patch, schema, mergeOptions) +} + +// MergeStrategicMergeMapPatchUsingLookupPatchMeta merges strategic merge +// patches retaining `null` fields and parallel lists. If 2 patches change the +// same fields and the latter one will override the former one. If you don't +// want that happen, you need to run func MergingMapsHaveConflicts before +// merging these patches. Applying the resulting merged merge patch to a JSONMap +// yields the same as merging each strategic merge patch to the JSONMap in +// succession. +func MergeStrategicMergeMapPatchUsingLookupPatchMeta(schema LookupPatchMeta, patches ...JSONMap) (JSONMap, error) { + mergeOptions := MergeOptions{ + MergeParallelList: false, + IgnoreUnmatchedNulls: false, + } + merged := JSONMap{} + var err error + for _, patch := range patches { + merged, err = mergeMap(merged, patch, schema, mergeOptions) + if err != nil { + return nil, err + } + } + return merged, nil +} + +// handleDirectiveInMergeMap handles the patch directive when merging 2 maps. +func handleDirectiveInMergeMap(directive interface{}, patch map[string]interface{}) (map[string]interface{}, error) { + if directive == replaceDirective { + // If the patch contains "$patch: replace", don't merge it, just use the + // patch directly. Later on, we can add a single level replace that only + // affects the map that the $patch is in. + delete(patch, directiveMarker) + return patch, nil + } + + if directive == deleteDirective { + // If the patch contains "$patch: delete", don't merge it, just return + // an empty map. + return map[string]interface{}{}, nil + } + + return nil, mergepatch.ErrBadPatchType(directive, patch) +} + +func containsDirectiveMarker(item interface{}) bool { + m, ok := item.(map[string]interface{}) + if ok { + if _, foundDirectiveMarker := m[directiveMarker]; foundDirectiveMarker { + return true + } + } + return false +} + +func mergeKeyValueEqual(left, right interface{}, mergeKey string) (bool, error) { + if len(mergeKey) == 0 { + return left == right, nil + } + typedLeft, ok := left.(map[string]interface{}) + if !ok { + return false, mergepatch.ErrBadArgType(typedLeft, left) + } + typedRight, ok := right.(map[string]interface{}) + if !ok { + return false, mergepatch.ErrBadArgType(typedRight, right) + } + mergeKeyLeft, ok := typedLeft[mergeKey] + if !ok { + return false, mergepatch.ErrNoMergeKey(typedLeft, mergeKey) + } + mergeKeyRight, ok := typedRight[mergeKey] + if !ok { + return false, mergepatch.ErrNoMergeKey(typedRight, mergeKey) + } + return mergeKeyLeft == mergeKeyRight, nil +} + +// extractKey trims the prefix and return the original key +func extractKey(s, prefix string) (string, error) { + substrings := strings.SplitN(s, "/", 2) + if len(substrings) <= 1 || substrings[0] != prefix { + switch prefix { + case deleteFromPrimitiveListDirectivePrefix: + return "", mergepatch.ErrBadPatchFormatForPrimitiveList + case setElementOrderDirectivePrefix: + return "", mergepatch.ErrBadPatchFormatForSetElementOrderList + default: + return "", fmt.Errorf("fail to find unknown prefix %q in %s\n", prefix, s) + } + } + return substrings[1], nil +} + +// validatePatchUsingSetOrderList verifies: +// the relative order of any two items in the setOrderList list matches that in the patch list. +// the items in the patch list must be a subset or the same as the $setElementOrder list (deletions are ignored). +func validatePatchWithSetOrderList(patchList, setOrderList interface{}, mergeKey string) error { + typedSetOrderList, ok := setOrderList.([]interface{}) + if !ok { + return mergepatch.ErrBadPatchFormatForSetElementOrderList + } + typedPatchList, ok := patchList.([]interface{}) + if !ok { + return mergepatch.ErrBadPatchFormatForSetElementOrderList + } + if len(typedSetOrderList) == 0 || len(typedPatchList) == 0 { + return nil + } + + var nonDeleteList, toDeleteList []interface{} + var err error + if len(mergeKey) > 0 { + nonDeleteList, toDeleteList, err = extractToDeleteItems(typedPatchList) + if err != nil { + return err + } + } else { + nonDeleteList = typedPatchList + } + + patchIndex, setOrderIndex := 0, 0 + for patchIndex < len(nonDeleteList) && setOrderIndex < len(typedSetOrderList) { + if containsDirectiveMarker(nonDeleteList[patchIndex]) { + patchIndex++ + continue + } + mergeKeyEqual, err := mergeKeyValueEqual(nonDeleteList[patchIndex], typedSetOrderList[setOrderIndex], mergeKey) + if err != nil { + return err + } + if mergeKeyEqual { + patchIndex++ + } + setOrderIndex++ + } + // If patchIndex is inbound but setOrderIndex if out of bound mean there are items mismatching between the patch list and setElementOrder list. + // the second check is a sanity check, and should always be true if the first is true. + if patchIndex < len(nonDeleteList) && setOrderIndex >= len(typedSetOrderList) { + return fmt.Errorf("The order in patch list:\n%v\n doesn't match %s list:\n%v\n", typedPatchList, setElementOrderDirectivePrefix, setOrderList) + } + typedPatchList = append(nonDeleteList, toDeleteList...) + return nil +} + +// preprocessDeletionListForMerging preprocesses the deletion list. +// it returns shouldContinue, isDeletionList, noPrefixKey +func preprocessDeletionListForMerging(key string, original map[string]interface{}, + patchVal interface{}, mergeDeletionList bool) (bool, bool, string, error) { + // If found a parallel list for deletion and we are going to merge the list, + // overwrite the key to the original key and set flag isDeleteList + foundParallelListPrefix := strings.HasPrefix(key, deleteFromPrimitiveListDirectivePrefix) + if foundParallelListPrefix { + if !mergeDeletionList { + original[key] = patchVal + return true, false, "", nil + } + originalKey, err := extractKey(key, deleteFromPrimitiveListDirectivePrefix) + return false, true, originalKey, err + } + return false, false, "", nil +} + +// applyRetainKeysDirective looks for a retainKeys directive and applies to original +// - if no directive exists do nothing +// - if directive is found, clear keys in original missing from the directive list +// - validate that all keys present in the patch are present in the retainKeys directive +// note: original may be another patch request, e.g. applying the add+modified patch to the deletions patch. In this case it may have directives +func applyRetainKeysDirective(original, patch map[string]interface{}, options MergeOptions) error { + retainKeysInPatch, foundInPatch := patch[retainKeysDirective] + if !foundInPatch { + return nil + } + // cleanup the directive + delete(patch, retainKeysDirective) + + if !options.MergeParallelList { + // If original is actually a patch, make sure the retainKeys directives are the same in both patches if present in both. + // If not present in the original patch, copy from the modified patch. + retainKeysInOriginal, foundInOriginal := original[retainKeysDirective] + if foundInOriginal { + if !reflect.DeepEqual(retainKeysInOriginal, retainKeysInPatch) { + // This error actually should never happen. + return fmt.Errorf("%v and %v are not deep equal: this may happen when calculating the 3-way diff patch", retainKeysInOriginal, retainKeysInPatch) + } + } else { + original[retainKeysDirective] = retainKeysInPatch + } + return nil + } + + retainKeysList, ok := retainKeysInPatch.([]interface{}) + if !ok { + return mergepatch.ErrBadPatchFormatForRetainKeys + } + + // validate patch to make sure all fields in the patch are present in the retainKeysList. + // The map is used only as a set, the value is never referenced + m := map[interface{}]struct{}{} + for _, v := range retainKeysList { + m[v] = struct{}{} + } + for k, v := range patch { + if v == nil || strings.HasPrefix(k, deleteFromPrimitiveListDirectivePrefix) || + strings.HasPrefix(k, setElementOrderDirectivePrefix) { + continue + } + // If there is an item present in the patch but not in the retainKeys list, + // the patch is invalid. + if _, found := m[k]; !found { + return mergepatch.ErrBadPatchFormatForRetainKeys + } + } + + // clear not present fields + for k := range original { + if _, found := m[k]; !found { + delete(original, k) + } + } + return nil +} + +// mergePatchIntoOriginal processes $setElementOrder list. +// When not merging the directive, it will make sure $setElementOrder list exist only in original. +// When merging the directive, it will try to find the $setElementOrder list and +// its corresponding patch list, validate it and merge it. +// Then, sort them by the relative order in setElementOrder, patch list and live list. +// The precedence is $setElementOrder > order in patch list > order in live list. +// This function will delete the item after merging it to prevent process it again in the future. +// Ref: https://git.k8s.io/community/contributors/design-proposals/cli/preserve-order-in-strategic-merge-patch.md +func mergePatchIntoOriginal(original, patch map[string]interface{}, schema LookupPatchMeta, mergeOptions MergeOptions) error { + for key, patchV := range patch { + // Do nothing if there is no ordering directive + if !strings.HasPrefix(key, setElementOrderDirectivePrefix) { + continue + } + + setElementOrderInPatch := patchV + // Copies directive from the second patch (`patch`) to the first patch (`original`) + // and checks they are equal and delete the directive in the second patch + if !mergeOptions.MergeParallelList { + setElementOrderListInOriginal, ok := original[key] + if ok { + // check if the setElementOrder list in original and the one in patch matches + if !reflect.DeepEqual(setElementOrderListInOriginal, setElementOrderInPatch) { + return mergepatch.ErrBadPatchFormatForSetElementOrderList + } + } else { + // move the setElementOrder list from patch to original + original[key] = setElementOrderInPatch + } + } + delete(patch, key) + + var ( + ok bool + originalFieldValue, patchFieldValue, merged []interface{} + patchStrategy string + patchMeta PatchMeta + subschema LookupPatchMeta + ) + typedSetElementOrderList, ok := setElementOrderInPatch.([]interface{}) + if !ok { + return mergepatch.ErrBadArgType(typedSetElementOrderList, setElementOrderInPatch) + } + // Trim the setElementOrderDirectivePrefix to get the key of the list field in original. + originalKey, err := extractKey(key, setElementOrderDirectivePrefix) + if err != nil { + return err + } + // try to find the list with `originalKey` in `original` and `modified` and merge them. + originalList, foundOriginal := original[originalKey] + patchList, foundPatch := patch[originalKey] + if foundOriginal { + originalFieldValue, ok = originalList.([]interface{}) + if !ok { + return mergepatch.ErrBadArgType(originalFieldValue, originalList) + } + } + if foundPatch { + patchFieldValue, ok = patchList.([]interface{}) + if !ok { + return mergepatch.ErrBadArgType(patchFieldValue, patchList) + } + } + subschema, patchMeta, err = schema.LookupPatchMetadataForSlice(originalKey) + if err != nil { + return err + } + _, patchStrategy, err = extractRetainKeysPatchStrategy(patchMeta.GetPatchStrategies()) + if err != nil { + return err + } + // Check for consistency between the element order list and the field it applies to + err = validatePatchWithSetOrderList(patchFieldValue, typedSetElementOrderList, patchMeta.GetPatchMergeKey()) + if err != nil { + return err + } + + switch { + case foundOriginal && !foundPatch: + // no change to list contents + merged = originalFieldValue + case !foundOriginal && foundPatch: + // list was added + merged = patchFieldValue + case foundOriginal && foundPatch: + merged, err = mergeSliceHandler(originalList, patchList, subschema, + patchStrategy, patchMeta.GetPatchMergeKey(), false, mergeOptions) + if err != nil { + return err + } + case !foundOriginal && !foundPatch: + continue + } + + // Split all items into patch items and server-only items and then enforce the order. + var patchItems, serverOnlyItems []interface{} + if len(patchMeta.GetPatchMergeKey()) == 0 { + // Primitives doesn't need merge key to do partitioning. + patchItems, serverOnlyItems = partitionPrimitivesByPresentInList(merged, typedSetElementOrderList) + + } else { + // Maps need merge key to do partitioning. + patchItems, serverOnlyItems, err = partitionMapsByPresentInList(merged, typedSetElementOrderList, patchMeta.GetPatchMergeKey()) + if err != nil { + return err + } + } + + elementType, err := sliceElementType(originalFieldValue, patchFieldValue) + if err != nil { + return err + } + kind := elementType.Kind() + // normalize merged list + // typedSetElementOrderList contains all the relative order in typedPatchList, + // so don't need to use typedPatchList + both, err := normalizeElementOrder(patchItems, serverOnlyItems, typedSetElementOrderList, originalFieldValue, patchMeta.GetPatchMergeKey(), kind) + if err != nil { + return err + } + original[originalKey] = both + // delete patch list from patch to prevent process again in the future + delete(patch, originalKey) + } + return nil +} + +// partitionPrimitivesByPresentInList partitions elements into 2 slices, the first containing items present in partitionBy, the other not. +func partitionPrimitivesByPresentInList(original, partitionBy []interface{}) ([]interface{}, []interface{}) { + patch := make([]interface{}, 0, len(original)) + serverOnly := make([]interface{}, 0, len(original)) + inPatch := map[interface{}]bool{} + for _, v := range partitionBy { + inPatch[v] = true + } + for _, v := range original { + if !inPatch[v] { + serverOnly = append(serverOnly, v) + } else { + patch = append(patch, v) + } + } + return patch, serverOnly +} + +// partitionMapsByPresentInList partitions elements into 2 slices, the first containing items present in partitionBy, the other not. +func partitionMapsByPresentInList(original, partitionBy []interface{}, mergeKey string) ([]interface{}, []interface{}, error) { + patch := make([]interface{}, 0, len(original)) + serverOnly := make([]interface{}, 0, len(original)) + for _, v := range original { + typedV, ok := v.(map[string]interface{}) + if !ok { + return nil, nil, mergepatch.ErrBadArgType(typedV, v) + } + mergeKeyValue, foundMergeKey := typedV[mergeKey] + if !foundMergeKey { + return nil, nil, mergepatch.ErrNoMergeKey(typedV, mergeKey) + } + _, _, found, err := findMapInSliceBasedOnKeyValue(partitionBy, mergeKey, mergeKeyValue) + if err != nil { + return nil, nil, err + } + if !found { + serverOnly = append(serverOnly, v) + } else { + patch = append(patch, v) + } + } + return patch, serverOnly, nil +} + +// Merge fields from a patch map into the original map. Note: This may modify +// both the original map and the patch because getting a deep copy of a map in +// golang is highly non-trivial. +// flag mergeOptions.MergeParallelList controls if using the parallel list to delete or keeping the list. +// If patch contains any null field (e.g. field_1: null) that is not +// present in original, then to propagate it to the end result use +// mergeOptions.IgnoreUnmatchedNulls == false. +func mergeMap(original, patch map[string]interface{}, schema LookupPatchMeta, mergeOptions MergeOptions) (map[string]interface{}, error) { + if v, ok := patch[directiveMarker]; ok { + return handleDirectiveInMergeMap(v, patch) + } + + // nil is an accepted value for original to simplify logic in other places. + // If original is nil, replace it with an empty map and then apply the patch. + if original == nil { + original = map[string]interface{}{} + } + + err := applyRetainKeysDirective(original, patch, mergeOptions) + if err != nil { + return nil, err + } + + // Process $setElementOrder list and other lists sharing the same key. + // When not merging the directive, it will make sure $setElementOrder list exist only in original. + // When merging the directive, it will process $setElementOrder and its patch list together. + // This function will delete the merged elements from patch so they will not be reprocessed + err = mergePatchIntoOriginal(original, patch, schema, mergeOptions) + if err != nil { + return nil, err + } + + // Start merging the patch into the original. + for k, patchV := range patch { + skipProcessing, isDeleteList, noPrefixKey, err := preprocessDeletionListForMerging(k, original, patchV, mergeOptions.MergeParallelList) + if err != nil { + return nil, err + } + if skipProcessing { + continue + } + if len(noPrefixKey) > 0 { + k = noPrefixKey + } + + // If the value of this key is null, delete the key if it exists in the + // original. Otherwise, check if we want to preserve it or skip it. + // Preserving the null value is useful when we want to send an explicit + // delete to the API server. + if patchV == nil { + if _, ok := original[k]; ok { + delete(original, k) + } + if mergeOptions.IgnoreUnmatchedNulls { + continue + } + } + + _, ok := original[k] + if !ok { + // If it's not in the original document, just take the patch value. + original[k] = patchV + continue + } + + originalType := reflect.TypeOf(original[k]) + patchType := reflect.TypeOf(patchV) + if originalType != patchType { + original[k] = patchV + continue + } + // If they're both maps or lists, recurse into the value. + switch originalType.Kind() { + case reflect.Map: + subschema, patchMeta, err2 := schema.LookupPatchMetadataForStruct(k) + if err2 != nil { + return nil, err2 + } + _, patchStrategy, err2 := extractRetainKeysPatchStrategy(patchMeta.GetPatchStrategies()) + if err2 != nil { + return nil, err2 + } + original[k], err = mergeMapHandler(original[k], patchV, subschema, patchStrategy, mergeOptions) + case reflect.Slice: + subschema, patchMeta, err2 := schema.LookupPatchMetadataForSlice(k) + if err2 != nil { + return nil, err2 + } + _, patchStrategy, err2 := extractRetainKeysPatchStrategy(patchMeta.GetPatchStrategies()) + if err2 != nil { + return nil, err2 + } + original[k], err = mergeSliceHandler(original[k], patchV, subschema, patchStrategy, patchMeta.GetPatchMergeKey(), isDeleteList, mergeOptions) + default: + original[k] = patchV + } + if err != nil { + return nil, err + } + } + return original, nil +} + +// mergeMapHandler handles how to merge `patchV` whose key is `key` with `original` respecting +// fieldPatchStrategy and mergeOptions. +func mergeMapHandler(original, patch interface{}, schema LookupPatchMeta, + fieldPatchStrategy string, mergeOptions MergeOptions) (map[string]interface{}, error) { + typedOriginal, typedPatch, err := mapTypeAssertion(original, patch) + if err != nil { + return nil, err + } + + if fieldPatchStrategy != replaceDirective { + return mergeMap(typedOriginal, typedPatch, schema, mergeOptions) + } else { + return typedPatch, nil + } +} + +// mergeSliceHandler handles how to merge `patchV` whose key is `key` with `original` respecting +// fieldPatchStrategy, fieldPatchMergeKey, isDeleteList and mergeOptions. +func mergeSliceHandler(original, patch interface{}, schema LookupPatchMeta, + fieldPatchStrategy, fieldPatchMergeKey string, isDeleteList bool, mergeOptions MergeOptions) ([]interface{}, error) { + typedOriginal, typedPatch, err := sliceTypeAssertion(original, patch) + if err != nil { + return nil, err + } + + if fieldPatchStrategy == mergeDirective { + return mergeSlice(typedOriginal, typedPatch, schema, fieldPatchMergeKey, mergeOptions, isDeleteList) + } else { + return typedPatch, nil + } +} + +// Merge two slices together. Note: This may modify both the original slice and +// the patch because getting a deep copy of a slice in golang is highly +// non-trivial. +func mergeSlice(original, patch []interface{}, schema LookupPatchMeta, mergeKey string, mergeOptions MergeOptions, isDeleteList bool) ([]interface{}, error) { + if len(original) == 0 && len(patch) == 0 { + return original, nil + } + + // All the values must be of the same type, but not a list. + t, err := sliceElementType(original, patch) + if err != nil { + return nil, err + } + + var merged []interface{} + kind := t.Kind() + // If the elements are not maps, merge the slices of scalars. + if kind != reflect.Map { + if mergeOptions.MergeParallelList && isDeleteList { + return deleteFromSlice(original, patch), nil + } + // Maybe in the future add a "concat" mode that doesn't + // deduplicate. + both := append(original, patch...) + merged = deduplicateScalars(both) + + } else { + if mergeKey == "" { + return nil, fmt.Errorf("cannot merge lists without merge key for %s", schema.Name()) + } + + original, patch, err = mergeSliceWithSpecialElements(original, patch, mergeKey) + if err != nil { + return nil, err + } + + merged, err = mergeSliceWithoutSpecialElements(original, patch, mergeKey, schema, mergeOptions) + if err != nil { + return nil, err + } + } + + // enforce the order + var patchItems, serverOnlyItems []interface{} + if len(mergeKey) == 0 { + patchItems, serverOnlyItems = partitionPrimitivesByPresentInList(merged, patch) + } else { + patchItems, serverOnlyItems, err = partitionMapsByPresentInList(merged, patch, mergeKey) + if err != nil { + return nil, err + } + } + return normalizeElementOrder(patchItems, serverOnlyItems, patch, original, mergeKey, kind) +} + +// mergeSliceWithSpecialElements handles special elements with directiveMarker +// before merging the slices. It returns a updated `original` and a patch without special elements. +// original and patch must be slices of maps, they should be checked before calling this function. +func mergeSliceWithSpecialElements(original, patch []interface{}, mergeKey string) ([]interface{}, []interface{}, error) { + patchWithoutSpecialElements := []interface{}{} + replace := false + for _, v := range patch { + typedV := v.(map[string]interface{}) + patchType, ok := typedV[directiveMarker] + if !ok { + patchWithoutSpecialElements = append(patchWithoutSpecialElements, v) + } else { + switch patchType { + case deleteDirective: + mergeValue, ok := typedV[mergeKey] + if ok { + var err error + original, err = deleteMatchingEntries(original, mergeKey, mergeValue) + if err != nil { + return nil, nil, err + } + } else { + return nil, nil, mergepatch.ErrNoMergeKey(typedV, mergeKey) + } + case replaceDirective: + replace = true + // Continue iterating through the array to prune any other $patch elements. + case mergeDirective: + return nil, nil, fmt.Errorf("merging lists cannot yet be specified in the patch") + default: + return nil, nil, mergepatch.ErrBadPatchType(patchType, typedV) + } + } + } + if replace { + return patchWithoutSpecialElements, nil, nil + } + return original, patchWithoutSpecialElements, nil +} + +// delete all matching entries (based on merge key) from a merging list +func deleteMatchingEntries(original []interface{}, mergeKey string, mergeValue interface{}) ([]interface{}, error) { + for { + _, originalKey, found, err := findMapInSliceBasedOnKeyValue(original, mergeKey, mergeValue) + if err != nil { + return nil, err + } + + if !found { + break + } + // Delete the element at originalKey. + original = append(original[:originalKey], original[originalKey+1:]...) + } + return original, nil +} + +// mergeSliceWithoutSpecialElements merges slices with non-special elements. +// original and patch must be slices of maps, they should be checked before calling this function. +func mergeSliceWithoutSpecialElements(original, patch []interface{}, mergeKey string, schema LookupPatchMeta, mergeOptions MergeOptions) ([]interface{}, error) { + for _, v := range patch { + typedV := v.(map[string]interface{}) + mergeValue, ok := typedV[mergeKey] + if !ok { + return nil, mergepatch.ErrNoMergeKey(typedV, mergeKey) + } + + // If we find a value with this merge key value in original, merge the + // maps. Otherwise append onto original. + originalMap, originalKey, found, err := findMapInSliceBasedOnKeyValue(original, mergeKey, mergeValue) + if err != nil { + return nil, err + } + + if found { + var mergedMaps interface{} + var err error + // Merge into original. + mergedMaps, err = mergeMap(originalMap, typedV, schema, mergeOptions) + if err != nil { + return nil, err + } + + original[originalKey] = mergedMaps + } else { + original = append(original, v) + } + } + return original, nil +} + +// deleteFromSlice uses the parallel list to delete the items in a list of scalars +func deleteFromSlice(current, toDelete []interface{}) []interface{} { + toDeleteMap := map[interface{}]interface{}{} + processed := make([]interface{}, 0, len(current)) + for _, v := range toDelete { + toDeleteMap[v] = true + } + for _, v := range current { + if _, found := toDeleteMap[v]; !found { + processed = append(processed, v) + } + } + return processed +} + +// This method no longer panics if any element of the slice is not a map. +func findMapInSliceBasedOnKeyValue(m []interface{}, key string, value interface{}) (map[string]interface{}, int, bool, error) { + for k, v := range m { + typedV, ok := v.(map[string]interface{}) + if !ok { + return nil, 0, false, fmt.Errorf("value for key %v is not a map", k) + } + + valueToMatch, ok := typedV[key] + if ok && valueToMatch == value { + return typedV, k, true, nil + } + } + + return nil, 0, false, nil +} + +// This function takes a JSON map and sorts all the lists that should be merged +// by key. This is needed by tests because in JSON, list order is significant, +// but in Strategic Merge Patch, merge lists do not have significant order. +// Sorting the lists allows for order-insensitive comparison of patched maps. +func sortMergeListsByName(mapJSON []byte, schema LookupPatchMeta) ([]byte, error) { + var m map[string]interface{} + err := json.Unmarshal(mapJSON, &m) + if err != nil { + return nil, mergepatch.ErrBadJSONDoc + } + + newM, err := sortMergeListsByNameMap(m, schema) + if err != nil { + return nil, err + } + + return json.Marshal(newM) +} + +// Function sortMergeListsByNameMap recursively sorts the merge lists by its mergeKey in a map. +func sortMergeListsByNameMap(s map[string]interface{}, schema LookupPatchMeta) (map[string]interface{}, error) { + newS := map[string]interface{}{} + for k, v := range s { + if k == retainKeysDirective { + typedV, ok := v.([]interface{}) + if !ok { + return nil, mergepatch.ErrBadPatchFormatForRetainKeys + } + v = sortScalars(typedV) + } else if strings.HasPrefix(k, deleteFromPrimitiveListDirectivePrefix) { + typedV, ok := v.([]interface{}) + if !ok { + return nil, mergepatch.ErrBadPatchFormatForPrimitiveList + } + v = sortScalars(typedV) + } else if strings.HasPrefix(k, setElementOrderDirectivePrefix) { + _, ok := v.([]interface{}) + if !ok { + return nil, mergepatch.ErrBadPatchFormatForSetElementOrderList + } + } else if k != directiveMarker { + // recurse for map and slice. + switch typedV := v.(type) { + case map[string]interface{}: + subschema, _, err := schema.LookupPatchMetadataForStruct(k) + if err != nil { + return nil, err + } + v, err = sortMergeListsByNameMap(typedV, subschema) + if err != nil { + return nil, err + } + case []interface{}: + subschema, patchMeta, err := schema.LookupPatchMetadataForSlice(k) + if err != nil { + return nil, err + } + _, patchStrategy, err := extractRetainKeysPatchStrategy(patchMeta.GetPatchStrategies()) + if err != nil { + return nil, err + } + if patchStrategy == mergeDirective { + var err error + v, err = sortMergeListsByNameArray(typedV, subschema, patchMeta.GetPatchMergeKey(), true) + if err != nil { + return nil, err + } + } + } + } + + newS[k] = v + } + + return newS, nil +} + +// Function sortMergeListsByNameMap recursively sorts the merge lists by its mergeKey in an array. +func sortMergeListsByNameArray(s []interface{}, schema LookupPatchMeta, mergeKey string, recurse bool) ([]interface{}, error) { + if len(s) == 0 { + return s, nil + } + + // We don't support lists of lists yet. + t, err := sliceElementType(s) + if err != nil { + return nil, err + } + + // If the elements are not maps... + if t.Kind() != reflect.Map { + // Sort the elements, because they may have been merged out of order. + return deduplicateAndSortScalars(s), nil + } + + // Elements are maps - if one of the keys of the map is a map or a + // list, we may need to recurse into it. + newS := []interface{}{} + for _, elem := range s { + if recurse { + typedElem := elem.(map[string]interface{}) + newElem, err := sortMergeListsByNameMap(typedElem, schema) + if err != nil { + return nil, err + } + + newS = append(newS, newElem) + } else { + newS = append(newS, elem) + } + } + + // Sort the maps. + newS = sortMapsBasedOnField(newS, mergeKey) + return newS, nil +} + +func sortMapsBasedOnField(m []interface{}, fieldName string) []interface{} { + mapM := mapSliceFromSlice(m) + ss := SortableSliceOfMaps{mapM, fieldName} + sort.Sort(ss) + newS := sliceFromMapSlice(ss.s) + return newS +} + +func mapSliceFromSlice(m []interface{}) []map[string]interface{} { + newM := []map[string]interface{}{} + for _, v := range m { + vt := v.(map[string]interface{}) + newM = append(newM, vt) + } + + return newM +} + +func sliceFromMapSlice(s []map[string]interface{}) []interface{} { + newS := []interface{}{} + for _, v := range s { + newS = append(newS, v) + } + + return newS +} + +type SortableSliceOfMaps struct { + s []map[string]interface{} + k string // key to sort on +} + +func (ss SortableSliceOfMaps) Len() int { + return len(ss.s) +} + +func (ss SortableSliceOfMaps) Less(i, j int) bool { + iStr := fmt.Sprintf("%v", ss.s[i][ss.k]) + jStr := fmt.Sprintf("%v", ss.s[j][ss.k]) + return sort.StringsAreSorted([]string{iStr, jStr}) +} + +func (ss SortableSliceOfMaps) Swap(i, j int) { + tmp := ss.s[i] + ss.s[i] = ss.s[j] + ss.s[j] = tmp +} + +func deduplicateAndSortScalars(s []interface{}) []interface{} { + s = deduplicateScalars(s) + return sortScalars(s) +} + +func sortScalars(s []interface{}) []interface{} { + ss := SortableSliceOfScalars{s} + sort.Sort(ss) + return ss.s +} + +func deduplicateScalars(s []interface{}) []interface{} { + // Clever algorithm to deduplicate. + length := len(s) - 1 + for i := 0; i < length; i++ { + for j := i + 1; j <= length; j++ { + if s[i] == s[j] { + s[j] = s[length] + s = s[0:length] + length-- + j-- + } + } + } + + return s +} + +type SortableSliceOfScalars struct { + s []interface{} +} + +func (ss SortableSliceOfScalars) Len() int { + return len(ss.s) +} + +func (ss SortableSliceOfScalars) Less(i, j int) bool { + iStr := fmt.Sprintf("%v", ss.s[i]) + jStr := fmt.Sprintf("%v", ss.s[j]) + return sort.StringsAreSorted([]string{iStr, jStr}) +} + +func (ss SortableSliceOfScalars) Swap(i, j int) { + tmp := ss.s[i] + ss.s[i] = ss.s[j] + ss.s[j] = tmp +} + +// Returns the type of the elements of N slice(s). If the type is different, +// another slice or undefined, returns an error. +func sliceElementType(slices ...[]interface{}) (reflect.Type, error) { + var prevType reflect.Type + for _, s := range slices { + // Go through elements of all given slices and make sure they are all the same type. + for _, v := range s { + currentType := reflect.TypeOf(v) + if prevType == nil { + prevType = currentType + // We don't support lists of lists yet. + if prevType.Kind() == reflect.Slice { + return nil, mergepatch.ErrNoListOfLists + } + } else { + if prevType != currentType { + return nil, fmt.Errorf("list element types are not identical: %v", fmt.Sprint(slices)) + } + prevType = currentType + } + } + } + + if prevType == nil { + return nil, fmt.Errorf("no elements in any of the given slices") + } + + return prevType, nil +} + +// MergingMapsHaveConflicts returns true if the left and right JSON interface +// objects overlap with different values in any key. All keys are required to be +// strings. Since patches of the same Type have congruent keys, this is valid +// for multiple patch types. This method supports strategic merge patch semantics. +func MergingMapsHaveConflicts(left, right map[string]interface{}, schema LookupPatchMeta) (bool, error) { + return mergingMapFieldsHaveConflicts(left, right, schema, "", "") +} + +func mergingMapFieldsHaveConflicts( + left, right interface{}, + schema LookupPatchMeta, + fieldPatchStrategy, fieldPatchMergeKey string, +) (bool, error) { + switch leftType := left.(type) { + case map[string]interface{}: + rightType, ok := right.(map[string]interface{}) + if !ok { + return true, nil + } + leftMarker, okLeft := leftType[directiveMarker] + rightMarker, okRight := rightType[directiveMarker] + // if one or the other has a directive marker, + // then we need to consider that before looking at the individual keys, + // since a directive operates on the whole map. + if okLeft || okRight { + // if one has a directive marker and the other doesn't, + // then we have a conflict, since one is deleting or replacing the whole map, + // and the other is doing things to individual keys. + if okLeft != okRight { + return true, nil + } + // if they both have markers, but they are not the same directive, + // then we have a conflict because they're doing different things to the map. + if leftMarker != rightMarker { + return true, nil + } + } + if fieldPatchStrategy == replaceDirective { + return false, nil + } + // Check the individual keys. + return mapsHaveConflicts(leftType, rightType, schema) + + case []interface{}: + rightType, ok := right.([]interface{}) + if !ok { + return true, nil + } + return slicesHaveConflicts(leftType, rightType, schema, fieldPatchStrategy, fieldPatchMergeKey) + case string, float64, bool, int64, nil: + return !reflect.DeepEqual(left, right), nil + default: + return true, fmt.Errorf("unknown type: %v", reflect.TypeOf(left)) + } +} + +func mapsHaveConflicts(typedLeft, typedRight map[string]interface{}, schema LookupPatchMeta) (bool, error) { + for key, leftValue := range typedLeft { + if key != directiveMarker && key != retainKeysDirective { + if rightValue, ok := typedRight[key]; ok { + var subschema LookupPatchMeta + var patchMeta PatchMeta + var patchStrategy string + var err error + switch leftValue.(type) { + case []interface{}: + subschema, patchMeta, err = schema.LookupPatchMetadataForSlice(key) + if err != nil { + return true, err + } + _, patchStrategy, err = extractRetainKeysPatchStrategy(patchMeta.patchStrategies) + if err != nil { + return true, err + } + case map[string]interface{}: + subschema, patchMeta, err = schema.LookupPatchMetadataForStruct(key) + if err != nil { + return true, err + } + _, patchStrategy, err = extractRetainKeysPatchStrategy(patchMeta.patchStrategies) + if err != nil { + return true, err + } + } + + if hasConflicts, err := mergingMapFieldsHaveConflicts(leftValue, rightValue, + subschema, patchStrategy, patchMeta.GetPatchMergeKey()); hasConflicts { + return true, err + } + } + } + } + + return false, nil +} + +func slicesHaveConflicts( + typedLeft, typedRight []interface{}, + schema LookupPatchMeta, + fieldPatchStrategy, fieldPatchMergeKey string, +) (bool, error) { + elementType, err := sliceElementType(typedLeft, typedRight) + if err != nil { + return true, err + } + + if fieldPatchStrategy == mergeDirective { + // Merging lists of scalars have no conflicts by definition + // So we only need to check further if the elements are maps + if elementType.Kind() != reflect.Map { + return false, nil + } + + // Build a map for each slice and then compare the two maps + leftMap, err := sliceOfMapsToMapOfMaps(typedLeft, fieldPatchMergeKey) + if err != nil { + return true, err + } + + rightMap, err := sliceOfMapsToMapOfMaps(typedRight, fieldPatchMergeKey) + if err != nil { + return true, err + } + + return mapsOfMapsHaveConflicts(leftMap, rightMap, schema) + } + + // Either we don't have type information, or these are non-merging lists + if len(typedLeft) != len(typedRight) { + return true, nil + } + + // Sort scalar slices to prevent ordering issues + // We have no way to sort non-merging lists of maps + if elementType.Kind() != reflect.Map { + typedLeft = deduplicateAndSortScalars(typedLeft) + typedRight = deduplicateAndSortScalars(typedRight) + } + + // Compare the slices element by element in order + // This test will fail if the slices are not sorted + for i := range typedLeft { + if hasConflicts, err := mergingMapFieldsHaveConflicts(typedLeft[i], typedRight[i], schema, "", ""); hasConflicts { + return true, err + } + } + + return false, nil +} + +func sliceOfMapsToMapOfMaps(slice []interface{}, mergeKey string) (map[string]interface{}, error) { + result := make(map[string]interface{}, len(slice)) + for _, value := range slice { + typedValue, ok := value.(map[string]interface{}) + if !ok { + return nil, fmt.Errorf("invalid element type in merging list:%v", slice) + } + + mergeValue, ok := typedValue[mergeKey] + if !ok { + return nil, fmt.Errorf("cannot find merge key `%s` in merging list element:%v", mergeKey, typedValue) + } + + result[fmt.Sprintf("%s", mergeValue)] = typedValue + } + + return result, nil +} + +func mapsOfMapsHaveConflicts(typedLeft, typedRight map[string]interface{}, schema LookupPatchMeta) (bool, error) { + for key, leftValue := range typedLeft { + if rightValue, ok := typedRight[key]; ok { + if hasConflicts, err := mergingMapFieldsHaveConflicts(leftValue, rightValue, schema, "", ""); hasConflicts { + return true, err + } + } + } + + return false, nil +} + +// CreateThreeWayMergePatch reconciles a modified configuration with an original configuration, +// while preserving any changes or deletions made to the original configuration in the interim, +// and not overridden by the current configuration. All three documents must be passed to the +// method as json encoded content. It will return a strategic merge patch, or an error if any +// of the documents is invalid, or if there are any preconditions that fail against the modified +// configuration, or, if overwrite is false and there are conflicts between the modified and current +// configurations. Conflicts are defined as keys changed differently from original to modified +// than from original to current. In other words, a conflict occurs if modified changes any key +// in a way that is different from how it is changed in current (e.g., deleting it, changing its +// value). We also propagate values fields that do not exist in original but are explicitly +// defined in modified. +func CreateThreeWayMergePatch(original, modified, current []byte, schema LookupPatchMeta, overwrite bool, fns ...mergepatch.PreconditionFunc) ([]byte, error) { + originalMap := map[string]interface{}{} + if len(original) > 0 { + if err := json.Unmarshal(original, &originalMap); err != nil { + return nil, mergepatch.ErrBadJSONDoc + } + } + + modifiedMap := map[string]interface{}{} + if len(modified) > 0 { + if err := json.Unmarshal(modified, &modifiedMap); err != nil { + return nil, mergepatch.ErrBadJSONDoc + } + } + + currentMap := map[string]interface{}{} + if len(current) > 0 { + if err := json.Unmarshal(current, ¤tMap); err != nil { + return nil, mergepatch.ErrBadJSONDoc + } + } + + // The patch is the difference from current to modified without deletions, plus deletions + // from original to modified. To find it, we compute deletions, which are the deletions from + // original to modified, and delta, which is the difference from current to modified without + // deletions, and then apply delta to deletions as a patch, which should be strictly additive. + deltaMapDiffOptions := DiffOptions{ + IgnoreDeletions: true, + SetElementOrder: true, + } + deltaMap, err := diffMaps(currentMap, modifiedMap, schema, deltaMapDiffOptions) + if err != nil { + return nil, err + } + deletionsMapDiffOptions := DiffOptions{ + SetElementOrder: true, + IgnoreChangesAndAdditions: true, + } + deletionsMap, err := diffMaps(originalMap, modifiedMap, schema, deletionsMapDiffOptions) + if err != nil { + return nil, err + } + + mergeOptions := MergeOptions{} + patchMap, err := mergeMap(deletionsMap, deltaMap, schema, mergeOptions) + if err != nil { + return nil, err + } + + // Apply the preconditions to the patch, and return an error if any of them fail. + for _, fn := range fns { + if !fn(patchMap) { + return nil, mergepatch.NewErrPreconditionFailed(patchMap) + } + } + + // If overwrite is false, and the patch contains any keys that were changed differently, + // then return a conflict error. + if !overwrite { + changeMapDiffOptions := DiffOptions{} + changedMap, err := diffMaps(originalMap, currentMap, schema, changeMapDiffOptions) + if err != nil { + return nil, err + } + + hasConflicts, err := MergingMapsHaveConflicts(patchMap, changedMap, schema) + if err != nil { + return nil, err + } + + if hasConflicts { + return nil, mergepatch.NewErrConflict(mergepatch.ToYAMLOrError(patchMap), mergepatch.ToYAMLOrError(changedMap)) + } + } + + return json.Marshal(patchMap) +} + +func ItemAddedToModifiedSlice(original, modified string) bool { return original > modified } + +func ItemRemovedFromModifiedSlice(original, modified string) bool { return original < modified } + +func ItemMatchesOriginalAndModifiedSlice(original, modified string) bool { return original == modified } + +func CreateDeleteDirective(mergeKey string, mergeKeyValue interface{}) map[string]interface{} { + return map[string]interface{}{mergeKey: mergeKeyValue, directiveMarker: deleteDirective} +} + +func mapTypeAssertion(original, patch interface{}) (map[string]interface{}, map[string]interface{}, error) { + typedOriginal, ok := original.(map[string]interface{}) + if !ok { + return nil, nil, mergepatch.ErrBadArgType(typedOriginal, original) + } + typedPatch, ok := patch.(map[string]interface{}) + if !ok { + return nil, nil, mergepatch.ErrBadArgType(typedPatch, patch) + } + return typedOriginal, typedPatch, nil +} + +func sliceTypeAssertion(original, patch interface{}) ([]interface{}, []interface{}, error) { + typedOriginal, ok := original.([]interface{}) + if !ok { + return nil, nil, mergepatch.ErrBadArgType(typedOriginal, original) + } + typedPatch, ok := patch.([]interface{}) + if !ok { + return nil, nil, mergepatch.ErrBadArgType(typedPatch, patch) + } + return typedOriginal, typedPatch, nil +} + +// extractRetainKeysPatchStrategy process patch strategy, which is a string may contains multiple +// patch strategies separated by ",". It returns a boolean var indicating if it has +// retainKeys strategies and a string for the other strategy. +func extractRetainKeysPatchStrategy(strategies []string) (bool, string, error) { + switch len(strategies) { + case 0: + return false, "", nil + case 1: + singleStrategy := strategies[0] + switch singleStrategy { + case retainKeysStrategy: + return true, "", nil + default: + return false, singleStrategy, nil + } + case 2: + switch { + case strategies[0] == retainKeysStrategy: + return true, strategies[1], nil + case strategies[1] == retainKeysStrategy: + return true, strategies[0], nil + default: + return false, "", fmt.Errorf("unexpected patch strategy: %v", strategies) + } + default: + return false, "", fmt.Errorf("unexpected patch strategy: %v", strategies) + } +} + +// hasAdditionalNewField returns if original map has additional key with non-nil value than modified. +func hasAdditionalNewField(original, modified map[string]interface{}) bool { + for k, v := range original { + if v == nil { + continue + } + if _, found := modified[k]; !found { + return true + } + } + return false +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/types.go b/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/types.go new file mode 100644 index 0000000000..f84d65aacb --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/types.go @@ -0,0 +1,193 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package strategicpatch + +import ( + "errors" + "strings" + + "k8s.io/apimachinery/pkg/util/mergepatch" + openapi "k8s.io/kube-openapi/pkg/util/proto" +) + +const ( + patchStrategyOpenapiextensionKey = "x-kubernetes-patch-strategy" + patchMergeKeyOpenapiextensionKey = "x-kubernetes-patch-merge-key" +) + +type LookupPatchItem interface { + openapi.SchemaVisitor + + Error() error + Path() *openapi.Path +} + +type kindItem struct { + key string + path *openapi.Path + err error + patchmeta PatchMeta + subschema openapi.Schema + hasVisitKind bool +} + +func NewKindItem(key string, path *openapi.Path) *kindItem { + return &kindItem{ + key: key, + path: path, + } +} + +var _ LookupPatchItem = &kindItem{} + +func (item *kindItem) Error() error { + return item.err +} + +func (item *kindItem) Path() *openapi.Path { + return item.path +} + +func (item *kindItem) VisitPrimitive(schema *openapi.Primitive) { + item.err = errors.New("expected kind, but got primitive") +} + +func (item *kindItem) VisitArray(schema *openapi.Array) { + item.err = errors.New("expected kind, but got slice") +} + +func (item *kindItem) VisitMap(schema *openapi.Map) { + item.err = errors.New("expected kind, but got map") +} + +func (item *kindItem) VisitReference(schema openapi.Reference) { + if !item.hasVisitKind { + schema.SubSchema().Accept(item) + } +} + +func (item *kindItem) VisitKind(schema *openapi.Kind) { + subschema, ok := schema.Fields[item.key] + if !ok { + item.err = FieldNotFoundError{Path: schema.GetPath().String(), Field: item.key} + return + } + + mergeKey, patchStrategies, err := parsePatchMetadata(subschema.GetExtensions()) + if err != nil { + item.err = err + return + } + item.patchmeta = PatchMeta{ + patchStrategies: patchStrategies, + patchMergeKey: mergeKey, + } + item.subschema = subschema +} + +type sliceItem struct { + key string + path *openapi.Path + err error + patchmeta PatchMeta + subschema openapi.Schema + hasVisitKind bool +} + +func NewSliceItem(key string, path *openapi.Path) *sliceItem { + return &sliceItem{ + key: key, + path: path, + } +} + +var _ LookupPatchItem = &sliceItem{} + +func (item *sliceItem) Error() error { + return item.err +} + +func (item *sliceItem) Path() *openapi.Path { + return item.path +} + +func (item *sliceItem) VisitPrimitive(schema *openapi.Primitive) { + item.err = errors.New("expected slice, but got primitive") +} + +func (item *sliceItem) VisitArray(schema *openapi.Array) { + if !item.hasVisitKind { + item.err = errors.New("expected visit kind first, then visit array") + } + subschema := schema.SubType + item.subschema = subschema +} + +func (item *sliceItem) VisitMap(schema *openapi.Map) { + item.err = errors.New("expected slice, but got map") +} + +func (item *sliceItem) VisitReference(schema openapi.Reference) { + if !item.hasVisitKind { + schema.SubSchema().Accept(item) + } else { + item.subschema = schema.SubSchema() + } +} + +func (item *sliceItem) VisitKind(schema *openapi.Kind) { + subschema, ok := schema.Fields[item.key] + if !ok { + item.err = FieldNotFoundError{Path: schema.GetPath().String(), Field: item.key} + return + } + + mergeKey, patchStrategies, err := parsePatchMetadata(subschema.GetExtensions()) + if err != nil { + item.err = err + return + } + item.patchmeta = PatchMeta{ + patchStrategies: patchStrategies, + patchMergeKey: mergeKey, + } + item.hasVisitKind = true + subschema.Accept(item) +} + +func parsePatchMetadata(extensions map[string]interface{}) (string, []string, error) { + ps, foundPS := extensions[patchStrategyOpenapiextensionKey] + var patchStrategies []string + var mergeKey, patchStrategy string + var ok bool + if foundPS { + patchStrategy, ok = ps.(string) + if ok { + patchStrategies = strings.Split(patchStrategy, ",") + } else { + return "", nil, mergepatch.ErrBadArgType(patchStrategy, ps) + } + } + mk, foundMK := extensions[patchMergeKeyOpenapiextensionKey] + if foundMK { + mergeKey, ok = mk.(string) + if !ok { + return "", nil, mergepatch.ErrBadArgType(mergeKey, mk) + } + } + return mergeKey, patchStrategies, nil +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/wait/wait.go b/vendor/k8s.io/apimachinery/pkg/util/wait/wait.go index 4cb0c122c0..d759d912be 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/wait/wait.go +++ b/vendor/k8s.io/apimachinery/pkg/util/wait/wait.go @@ -286,8 +286,9 @@ func contextForChannel(parentCh <-chan struct{}) (context.Context, context.Cance } // BackoffManager manages backoff with a particular scheme based on its underlying implementation. It provides -// an interface to return a timer for backoff, and caller shall backoff until Timer.C returns. If the second Backoff() -// is called before the timer from the first Backoff() call finishes, the first timer will NOT be drained. +// an interface to return a timer for backoff, and caller shall backoff until Timer.C() drains. If the second Backoff() +// is called before the timer from the first Backoff() call finishes, the first timer will NOT be drained and result in +// undetermined behavior. // The BackoffManager is supposed to be called in a single-threaded environment. type BackoffManager interface { Backoff() clock.Timer @@ -317,7 +318,7 @@ func NewExponentialBackoffManager(initBackoff, maxBackoff, resetDuration time.Du Steps: math.MaxInt32, Cap: maxBackoff, }, - backoffTimer: c.NewTimer(0), + backoffTimer: nil, initialBackoff: initBackoff, lastBackoffStart: c.Now(), backoffResetDuration: resetDuration, @@ -334,9 +335,14 @@ func (b *exponentialBackoffManagerImpl) getNextBackoff() time.Duration { return b.backoff.Step() } -// Backoff implements BackoffManager.Backoff, it returns a timer so caller can block on the timer for backoff. +// Backoff implements BackoffManager.Backoff, it returns a timer so caller can block on the timer for exponential backoff. +// The returned timer must be drained before calling Backoff() the second time func (b *exponentialBackoffManagerImpl) Backoff() clock.Timer { - b.backoffTimer.Reset(b.getNextBackoff()) + if b.backoffTimer == nil { + b.backoffTimer = b.clock.NewTimer(b.getNextBackoff()) + } else { + b.backoffTimer.Reset(b.getNextBackoff()) + } return b.backoffTimer } @@ -354,7 +360,7 @@ func NewJitteredBackoffManager(duration time.Duration, jitter float64, c clock.C clock: c, duration: duration, jitter: jitter, - backoffTimer: c.NewTimer(0), + backoffTimer: nil, } } @@ -366,8 +372,15 @@ func (j *jitteredBackoffManagerImpl) getNextBackoff() time.Duration { return jitteredPeriod } +// Backoff implements BackoffManager.Backoff, it returns a timer so caller can block on the timer for jittered backoff. +// The returned timer must be drained before calling Backoff() the second time func (j *jitteredBackoffManagerImpl) Backoff() clock.Timer { - j.backoffTimer.Reset(j.getNextBackoff()) + backoff := j.getNextBackoff() + if j.backoffTimer == nil { + j.backoffTimer = j.clock.NewTimer(backoff) + } else { + j.backoffTimer.Reset(backoff) + } return j.backoffTimer } diff --git a/vendor/k8s.io/apimachinery/pkg/watch/streamwatcher.go b/vendor/k8s.io/apimachinery/pkg/watch/streamwatcher.go index 8af256eb12..4269a836a8 100644 --- a/vendor/k8s.io/apimachinery/pkg/watch/streamwatcher.go +++ b/vendor/k8s.io/apimachinery/pkg/watch/streamwatcher.go @@ -113,7 +113,7 @@ func (sw *StreamWatcher) receive() { case io.ErrUnexpectedEOF: klog.V(1).Infof("Unexpected EOF during watch stream event decoding: %v", err) default: - if net.IsProbableEOF(err) { + if net.IsProbableEOF(err) || net.IsTimeout(err) { klog.V(5).Infof("Unable to decode an event from the watch stream: %v", err) } else { sw.result <- Event{ diff --git a/vendor/k8s.io/apimachinery/third_party/forked/golang/json/OWNERS b/vendor/k8s.io/apimachinery/third_party/forked/golang/json/OWNERS new file mode 100644 index 0000000000..3f72c69ba3 --- /dev/null +++ b/vendor/k8s.io/apimachinery/third_party/forked/golang/json/OWNERS @@ -0,0 +1,7 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +approvers: +- pwittrock +reviewers: +- mengqiy +- apelisse diff --git a/vendor/k8s.io/apimachinery/third_party/forked/golang/json/fields.go b/vendor/k8s.io/apimachinery/third_party/forked/golang/json/fields.go new file mode 100644 index 0000000000..8205a4dd13 --- /dev/null +++ b/vendor/k8s.io/apimachinery/third_party/forked/golang/json/fields.go @@ -0,0 +1,513 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package json is forked from the Go standard library to enable us to find the +// field of a struct that a given JSON key maps to. +package json + +import ( + "bytes" + "fmt" + "reflect" + "sort" + "strings" + "sync" + "unicode" + "unicode/utf8" +) + +const ( + patchStrategyTagKey = "patchStrategy" + patchMergeKeyTagKey = "patchMergeKey" +) + +// Finds the patchStrategy and patchMergeKey struct tag fields on a given +// struct field given the struct type and the JSON name of the field. +// It returns field type, a slice of patch strategies, merge key and error. +// TODO: fix the returned errors to be introspectable. +func LookupPatchMetadataForStruct(t reflect.Type, jsonField string) ( + elemType reflect.Type, patchStrategies []string, patchMergeKey string, e error) { + if t.Kind() == reflect.Ptr { + t = t.Elem() + } + + if t.Kind() != reflect.Struct { + e = fmt.Errorf("merging an object in json but data type is not struct, instead is: %s", + t.Kind().String()) + return + } + jf := []byte(jsonField) + // Find the field that the JSON library would use. + var f *field + fields := cachedTypeFields(t) + for i := range fields { + ff := &fields[i] + if bytes.Equal(ff.nameBytes, jf) { + f = ff + break + } + // Do case-insensitive comparison. + if f == nil && ff.equalFold(ff.nameBytes, jf) { + f = ff + } + } + if f != nil { + // Find the reflect.Value of the most preferential struct field. + tjf := t.Field(f.index[0]) + // we must navigate down all the anonymously included structs in the chain + for i := 1; i < len(f.index); i++ { + tjf = tjf.Type.Field(f.index[i]) + } + patchStrategy := tjf.Tag.Get(patchStrategyTagKey) + patchMergeKey = tjf.Tag.Get(patchMergeKeyTagKey) + patchStrategies = strings.Split(patchStrategy, ",") + elemType = tjf.Type + return + } + e = fmt.Errorf("unable to find api field in struct %s for the json field %q", t.Name(), jsonField) + return +} + +// A field represents a single field found in a struct. +type field struct { + name string + nameBytes []byte // []byte(name) + equalFold func(s, t []byte) bool // bytes.EqualFold or equivalent + + tag bool + // index is the sequence of indexes from the containing type fields to this field. + // it is a slice because anonymous structs will need multiple navigation steps to correctly + // resolve the proper fields + index []int + typ reflect.Type + omitEmpty bool + quoted bool +} + +func (f field) String() string { + return fmt.Sprintf("{name: %s, type: %v, tag: %v, index: %v, omitEmpty: %v, quoted: %v}", f.name, f.typ, f.tag, f.index, f.omitEmpty, f.quoted) +} + +func fillField(f field) field { + f.nameBytes = []byte(f.name) + f.equalFold = foldFunc(f.nameBytes) + return f +} + +// byName sorts field by name, breaking ties with depth, +// then breaking ties with "name came from json tag", then +// breaking ties with index sequence. +type byName []field + +func (x byName) Len() int { return len(x) } + +func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] } + +func (x byName) Less(i, j int) bool { + if x[i].name != x[j].name { + return x[i].name < x[j].name + } + if len(x[i].index) != len(x[j].index) { + return len(x[i].index) < len(x[j].index) + } + if x[i].tag != x[j].tag { + return x[i].tag + } + return byIndex(x).Less(i, j) +} + +// byIndex sorts field by index sequence. +type byIndex []field + +func (x byIndex) Len() int { return len(x) } + +func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] } + +func (x byIndex) Less(i, j int) bool { + for k, xik := range x[i].index { + if k >= len(x[j].index) { + return false + } + if xik != x[j].index[k] { + return xik < x[j].index[k] + } + } + return len(x[i].index) < len(x[j].index) +} + +// typeFields returns a list of fields that JSON should recognize for the given type. +// The algorithm is breadth-first search over the set of structs to include - the top struct +// and then any reachable anonymous structs. +func typeFields(t reflect.Type) []field { + // Anonymous fields to explore at the current level and the next. + current := []field{} + next := []field{{typ: t}} + + // Count of queued names for current level and the next. + count := map[reflect.Type]int{} + nextCount := map[reflect.Type]int{} + + // Types already visited at an earlier level. + visited := map[reflect.Type]bool{} + + // Fields found. + var fields []field + + for len(next) > 0 { + current, next = next, current[:0] + count, nextCount = nextCount, map[reflect.Type]int{} + + for _, f := range current { + if visited[f.typ] { + continue + } + visited[f.typ] = true + + // Scan f.typ for fields to include. + for i := 0; i < f.typ.NumField(); i++ { + sf := f.typ.Field(i) + if sf.PkgPath != "" { // unexported + continue + } + tag := sf.Tag.Get("json") + if tag == "-" { + continue + } + name, opts := parseTag(tag) + if !isValidTag(name) { + name = "" + } + index := make([]int, len(f.index)+1) + copy(index, f.index) + index[len(f.index)] = i + + ft := sf.Type + if ft.Name() == "" && ft.Kind() == reflect.Ptr { + // Follow pointer. + ft = ft.Elem() + } + + // Record found field and index sequence. + if name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct { + tagged := name != "" + if name == "" { + name = sf.Name + } + fields = append(fields, fillField(field{ + name: name, + tag: tagged, + index: index, + typ: ft, + omitEmpty: opts.Contains("omitempty"), + quoted: opts.Contains("string"), + })) + if count[f.typ] > 1 { + // If there were multiple instances, add a second, + // so that the annihilation code will see a duplicate. + // It only cares about the distinction between 1 or 2, + // so don't bother generating any more copies. + fields = append(fields, fields[len(fields)-1]) + } + continue + } + + // Record new anonymous struct to explore in next round. + nextCount[ft]++ + if nextCount[ft] == 1 { + next = append(next, fillField(field{name: ft.Name(), index: index, typ: ft})) + } + } + } + } + + sort.Sort(byName(fields)) + + // Delete all fields that are hidden by the Go rules for embedded fields, + // except that fields with JSON tags are promoted. + + // The fields are sorted in primary order of name, secondary order + // of field index length. Loop over names; for each name, delete + // hidden fields by choosing the one dominant field that survives. + out := fields[:0] + for advance, i := 0, 0; i < len(fields); i += advance { + // One iteration per name. + // Find the sequence of fields with the name of this first field. + fi := fields[i] + name := fi.name + for advance = 1; i+advance < len(fields); advance++ { + fj := fields[i+advance] + if fj.name != name { + break + } + } + if advance == 1 { // Only one field with this name + out = append(out, fi) + continue + } + dominant, ok := dominantField(fields[i : i+advance]) + if ok { + out = append(out, dominant) + } + } + + fields = out + sort.Sort(byIndex(fields)) + + return fields +} + +// dominantField looks through the fields, all of which are known to +// have the same name, to find the single field that dominates the +// others using Go's embedding rules, modified by the presence of +// JSON tags. If there are multiple top-level fields, the boolean +// will be false: This condition is an error in Go and we skip all +// the fields. +func dominantField(fields []field) (field, bool) { + // The fields are sorted in increasing index-length order. The winner + // must therefore be one with the shortest index length. Drop all + // longer entries, which is easy: just truncate the slice. + length := len(fields[0].index) + tagged := -1 // Index of first tagged field. + for i, f := range fields { + if len(f.index) > length { + fields = fields[:i] + break + } + if f.tag { + if tagged >= 0 { + // Multiple tagged fields at the same level: conflict. + // Return no field. + return field{}, false + } + tagged = i + } + } + if tagged >= 0 { + return fields[tagged], true + } + // All remaining fields have the same length. If there's more than one, + // we have a conflict (two fields named "X" at the same level) and we + // return no field. + if len(fields) > 1 { + return field{}, false + } + return fields[0], true +} + +var fieldCache struct { + sync.RWMutex + m map[reflect.Type][]field +} + +// cachedTypeFields is like typeFields but uses a cache to avoid repeated work. +func cachedTypeFields(t reflect.Type) []field { + fieldCache.RLock() + f := fieldCache.m[t] + fieldCache.RUnlock() + if f != nil { + return f + } + + // Compute fields without lock. + // Might duplicate effort but won't hold other computations back. + f = typeFields(t) + if f == nil { + f = []field{} + } + + fieldCache.Lock() + if fieldCache.m == nil { + fieldCache.m = map[reflect.Type][]field{} + } + fieldCache.m[t] = f + fieldCache.Unlock() + return f +} + +func isValidTag(s string) bool { + if s == "" { + return false + } + for _, c := range s { + switch { + case strings.ContainsRune("!#$%&()*+-./:<=>?@[]^_{|}~ ", c): + // Backslash and quote chars are reserved, but + // otherwise any punctuation chars are allowed + // in a tag name. + default: + if !unicode.IsLetter(c) && !unicode.IsDigit(c) { + return false + } + } + } + return true +} + +const ( + caseMask = ^byte(0x20) // Mask to ignore case in ASCII. + kelvin = '\u212a' + smallLongEss = '\u017f' +) + +// foldFunc returns one of four different case folding equivalence +// functions, from most general (and slow) to fastest: +// +// 1) bytes.EqualFold, if the key s contains any non-ASCII UTF-8 +// 2) equalFoldRight, if s contains special folding ASCII ('k', 'K', 's', 'S') +// 3) asciiEqualFold, no special, but includes non-letters (including _) +// 4) simpleLetterEqualFold, no specials, no non-letters. +// +// The letters S and K are special because they map to 3 runes, not just 2: +// * S maps to s and to U+017F 'Å¿' Latin small letter long s +// * k maps to K and to U+212A 'K' Kelvin sign +// See http://play.golang.org/p/tTxjOc0OGo +// +// The returned function is specialized for matching against s and +// should only be given s. It's not curried for performance reasons. +func foldFunc(s []byte) func(s, t []byte) bool { + nonLetter := false + special := false // special letter + for _, b := range s { + if b >= utf8.RuneSelf { + return bytes.EqualFold + } + upper := b & caseMask + if upper < 'A' || upper > 'Z' { + nonLetter = true + } else if upper == 'K' || upper == 'S' { + // See above for why these letters are special. + special = true + } + } + if special { + return equalFoldRight + } + if nonLetter { + return asciiEqualFold + } + return simpleLetterEqualFold +} + +// equalFoldRight is a specialization of bytes.EqualFold when s is +// known to be all ASCII (including punctuation), but contains an 's', +// 'S', 'k', or 'K', requiring a Unicode fold on the bytes in t. +// See comments on foldFunc. +func equalFoldRight(s, t []byte) bool { + for _, sb := range s { + if len(t) == 0 { + return false + } + tb := t[0] + if tb < utf8.RuneSelf { + if sb != tb { + sbUpper := sb & caseMask + if 'A' <= sbUpper && sbUpper <= 'Z' { + if sbUpper != tb&caseMask { + return false + } + } else { + return false + } + } + t = t[1:] + continue + } + // sb is ASCII and t is not. t must be either kelvin + // sign or long s; sb must be s, S, k, or K. + tr, size := utf8.DecodeRune(t) + switch sb { + case 's', 'S': + if tr != smallLongEss { + return false + } + case 'k', 'K': + if tr != kelvin { + return false + } + default: + return false + } + t = t[size:] + + } + if len(t) > 0 { + return false + } + return true +} + +// asciiEqualFold is a specialization of bytes.EqualFold for use when +// s is all ASCII (but may contain non-letters) and contains no +// special-folding letters. +// See comments on foldFunc. +func asciiEqualFold(s, t []byte) bool { + if len(s) != len(t) { + return false + } + for i, sb := range s { + tb := t[i] + if sb == tb { + continue + } + if ('a' <= sb && sb <= 'z') || ('A' <= sb && sb <= 'Z') { + if sb&caseMask != tb&caseMask { + return false + } + } else { + return false + } + } + return true +} + +// simpleLetterEqualFold is a specialization of bytes.EqualFold for +// use when s is all ASCII letters (no underscores, etc) and also +// doesn't contain 'k', 'K', 's', or 'S'. +// See comments on foldFunc. +func simpleLetterEqualFold(s, t []byte) bool { + if len(s) != len(t) { + return false + } + for i, b := range s { + if b&caseMask != t[i]&caseMask { + return false + } + } + return true +} + +// tagOptions is the string following a comma in a struct field's "json" +// tag, or the empty string. It does not include the leading comma. +type tagOptions string + +// parseTag splits a struct field's json tag into its name and +// comma-separated options. +func parseTag(tag string) (string, tagOptions) { + if idx := strings.Index(tag, ","); idx != -1 { + return tag[:idx], tagOptions(tag[idx+1:]) + } + return tag, tagOptions("") +} + +// Contains reports whether a comma-separated list of options +// contains a particular substr flag. substr must be surrounded by a +// string boundary or commas. +func (o tagOptions) Contains(optionName string) bool { + if len(o) == 0 { + return false + } + s := string(o) + for s != "" { + var next string + i := strings.Index(s, ",") + if i >= 0 { + s, next = s[:i], s[i+1:] + } + if s == optionName { + return true + } + s = next + } + return false +} diff --git a/vendor/k8s.io/client-go/discovery/cached/memory/memcache.go b/vendor/k8s.io/client-go/discovery/cached/memory/memcache.go new file mode 100644 index 0000000000..82a7cc470f --- /dev/null +++ b/vendor/k8s.io/client-go/discovery/cached/memory/memcache.go @@ -0,0 +1,243 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package memory + +import ( + "errors" + "fmt" + "net" + "net/url" + "sync" + "syscall" + + "github.com/googleapis/gnostic/OpenAPIv2" + + errorsutil "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/version" + "k8s.io/client-go/discovery" + restclient "k8s.io/client-go/rest" +) + +type cacheEntry struct { + resourceList *metav1.APIResourceList + err error +} + +// memCacheClient can Invalidate() to stay up-to-date with discovery +// information. +// +// TODO: Switch to a watch interface. Right now it will poll after each +// Invalidate() call. +type memCacheClient struct { + delegate discovery.DiscoveryInterface + + lock sync.RWMutex + groupToServerResources map[string]*cacheEntry + groupList *metav1.APIGroupList + cacheValid bool +} + +// Error Constants +var ( + ErrCacheNotFound = errors.New("not found") +) + +var _ discovery.CachedDiscoveryInterface = &memCacheClient{} + +// isTransientConnectionError checks whether given error is "Connection refused" or +// "Connection reset" error which usually means that apiserver is temporarily +// unavailable. +func isTransientConnectionError(err error) bool { + urlError, ok := err.(*url.Error) + if !ok { + return false + } + opError, ok := urlError.Err.(*net.OpError) + if !ok { + return false + } + errno, ok := opError.Err.(syscall.Errno) + if !ok { + return false + } + return errno == syscall.ECONNREFUSED || errno == syscall.ECONNRESET +} + +func isTransientError(err error) bool { + if isTransientConnectionError(err) { + return true + } + + if t, ok := err.(errorsutil.APIStatus); ok && t.Status().Code >= 500 { + return true + } + + return errorsutil.IsTooManyRequests(err) +} + +// ServerResourcesForGroupVersion returns the supported resources for a group and version. +func (d *memCacheClient) ServerResourcesForGroupVersion(groupVersion string) (*metav1.APIResourceList, error) { + d.lock.Lock() + defer d.lock.Unlock() + if !d.cacheValid { + if err := d.refreshLocked(); err != nil { + return nil, err + } + } + cachedVal, ok := d.groupToServerResources[groupVersion] + if !ok { + return nil, ErrCacheNotFound + } + + if cachedVal.err != nil && isTransientError(cachedVal.err) { + r, err := d.serverResourcesForGroupVersion(groupVersion) + if err != nil { + utilruntime.HandleError(fmt.Errorf("couldn't get resource list for %v: %v", groupVersion, err)) + } + cachedVal = &cacheEntry{r, err} + d.groupToServerResources[groupVersion] = cachedVal + } + + return cachedVal.resourceList, cachedVal.err +} + +// ServerResources returns the supported resources for all groups and versions. +// Deprecated: use ServerGroupsAndResources instead. +func (d *memCacheClient) ServerResources() ([]*metav1.APIResourceList, error) { + return discovery.ServerResources(d) +} + +// ServerGroupsAndResources returns the groups and supported resources for all groups and versions. +func (d *memCacheClient) ServerGroupsAndResources() ([]*metav1.APIGroup, []*metav1.APIResourceList, error) { + return discovery.ServerGroupsAndResources(d) +} + +func (d *memCacheClient) ServerGroups() (*metav1.APIGroupList, error) { + d.lock.Lock() + defer d.lock.Unlock() + if !d.cacheValid { + if err := d.refreshLocked(); err != nil { + return nil, err + } + } + return d.groupList, nil +} + +func (d *memCacheClient) RESTClient() restclient.Interface { + return d.delegate.RESTClient() +} + +func (d *memCacheClient) ServerPreferredResources() ([]*metav1.APIResourceList, error) { + return discovery.ServerPreferredResources(d) +} + +func (d *memCacheClient) ServerPreferredNamespacedResources() ([]*metav1.APIResourceList, error) { + return discovery.ServerPreferredNamespacedResources(d) +} + +func (d *memCacheClient) ServerVersion() (*version.Info, error) { + return d.delegate.ServerVersion() +} + +func (d *memCacheClient) OpenAPISchema() (*openapi_v2.Document, error) { + return d.delegate.OpenAPISchema() +} + +func (d *memCacheClient) Fresh() bool { + d.lock.RLock() + defer d.lock.RUnlock() + // Return whether the cache is populated at all. It is still possible that + // a single entry is missing due to transient errors and the attempt to read + // that entry will trigger retry. + return d.cacheValid +} + +// Invalidate enforces that no cached data that is older than the current time +// is used. +func (d *memCacheClient) Invalidate() { + d.lock.Lock() + defer d.lock.Unlock() + d.cacheValid = false + d.groupToServerResources = nil + d.groupList = nil +} + +// refreshLocked refreshes the state of cache. The caller must hold d.lock for +// writing. +func (d *memCacheClient) refreshLocked() error { + // TODO: Could this multiplicative set of calls be replaced by a single call + // to ServerResources? If it's possible for more than one resulting + // APIResourceList to have the same GroupVersion, the lists would need merged. + gl, err := d.delegate.ServerGroups() + if err != nil || len(gl.Groups) == 0 { + utilruntime.HandleError(fmt.Errorf("couldn't get current server API group list: %v", err)) + return err + } + + wg := &sync.WaitGroup{} + resultLock := &sync.Mutex{} + rl := map[string]*cacheEntry{} + for _, g := range gl.Groups { + for _, v := range g.Versions { + gv := v.GroupVersion + wg.Add(1) + go func() { + defer wg.Done() + defer utilruntime.HandleCrash() + + r, err := d.serverResourcesForGroupVersion(gv) + if err != nil { + utilruntime.HandleError(fmt.Errorf("couldn't get resource list for %v: %v", gv, err)) + } + + resultLock.Lock() + defer resultLock.Unlock() + rl[gv] = &cacheEntry{r, err} + }() + } + } + wg.Wait() + + d.groupToServerResources, d.groupList = rl, gl + d.cacheValid = true + return nil +} + +func (d *memCacheClient) serverResourcesForGroupVersion(groupVersion string) (*metav1.APIResourceList, error) { + r, err := d.delegate.ServerResourcesForGroupVersion(groupVersion) + if err != nil { + return r, err + } + if len(r.APIResources) == 0 { + return r, fmt.Errorf("Got empty response for: %v", groupVersion) + } + return r, nil +} + +// NewMemCacheClient creates a new CachedDiscoveryInterface which caches +// discovery information in memory and will stay up-to-date if Invalidate is +// called with regularity. +// +// NOTE: The client will NOT resort to live lookups on cache misses. +func NewMemCacheClient(delegate discovery.DiscoveryInterface) discovery.CachedDiscoveryInterface { + return &memCacheClient{ + delegate: delegate, + groupToServerResources: map[string]*cacheEntry{}, + } +} diff --git a/vendor/k8s.io/client-go/restmapper/category_expansion.go b/vendor/k8s.io/client-go/restmapper/category_expansion.go new file mode 100644 index 0000000000..2537a2b4e2 --- /dev/null +++ b/vendor/k8s.io/client-go/restmapper/category_expansion.go @@ -0,0 +1,119 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package restmapper + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/discovery" +) + +// CategoryExpander maps category strings to GroupResources. +// Categories are classification or 'tag' of a group of resources. +type CategoryExpander interface { + Expand(category string) ([]schema.GroupResource, bool) +} + +// SimpleCategoryExpander implements CategoryExpander interface +// using a static mapping of categories to GroupResource mapping. +type SimpleCategoryExpander struct { + Expansions map[string][]schema.GroupResource +} + +// Expand fulfills CategoryExpander +func (e SimpleCategoryExpander) Expand(category string) ([]schema.GroupResource, bool) { + ret, ok := e.Expansions[category] + return ret, ok +} + +// discoveryCategoryExpander struct lets a REST Client wrapper (discoveryClient) to retrieve list of APIResourceList, +// and then convert to fallbackExpander +type discoveryCategoryExpander struct { + discoveryClient discovery.DiscoveryInterface +} + +// NewDiscoveryCategoryExpander returns a category expander that makes use of the "categories" fields from +// the API, found through the discovery client. In case of any error or no category found (which likely +// means we're at a cluster prior to categories support, fallback to the expander provided. +func NewDiscoveryCategoryExpander(client discovery.DiscoveryInterface) CategoryExpander { + if client == nil { + panic("Please provide discovery client to shortcut expander") + } + return discoveryCategoryExpander{discoveryClient: client} +} + +// Expand fulfills CategoryExpander +func (e discoveryCategoryExpander) Expand(category string) ([]schema.GroupResource, bool) { + // Get all supported resources for groups and versions from server, if no resource found, fallback anyway. + apiResourceLists, _ := e.discoveryClient.ServerResources() + if len(apiResourceLists) == 0 { + return nil, false + } + + discoveredExpansions := map[string][]schema.GroupResource{} + for _, apiResourceList := range apiResourceLists { + gv, err := schema.ParseGroupVersion(apiResourceList.GroupVersion) + if err != nil { + continue + } + // Collect GroupVersions by categories + for _, apiResource := range apiResourceList.APIResources { + if categories := apiResource.Categories; len(categories) > 0 { + for _, category := range categories { + groupResource := schema.GroupResource{ + Group: gv.Group, + Resource: apiResource.Name, + } + discoveredExpansions[category] = append(discoveredExpansions[category], groupResource) + } + } + } + } + + ret, ok := discoveredExpansions[category] + return ret, ok +} + +// UnionCategoryExpander implements CategoryExpander interface. +// It maps given category string to union of expansions returned by all the CategoryExpanders in the list. +type UnionCategoryExpander []CategoryExpander + +// Expand fulfills CategoryExpander +func (u UnionCategoryExpander) Expand(category string) ([]schema.GroupResource, bool) { + ret := []schema.GroupResource{} + ok := false + + // Expand the category for each CategoryExpander in the list and merge/combine the results. + for _, expansion := range u { + curr, currOk := expansion.Expand(category) + + for _, currGR := range curr { + found := false + for _, existing := range ret { + if existing == currGR { + found = true + break + } + } + if !found { + ret = append(ret, currGR) + } + } + ok = ok || currOk + } + + return ret, ok +} diff --git a/vendor/k8s.io/client-go/restmapper/discovery.go b/vendor/k8s.io/client-go/restmapper/discovery.go new file mode 100644 index 0000000000..f8d7080d49 --- /dev/null +++ b/vendor/k8s.io/client-go/restmapper/discovery.go @@ -0,0 +1,338 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package restmapper + +import ( + "fmt" + "strings" + "sync" + + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/discovery" + + "k8s.io/klog" +) + +// APIGroupResources is an API group with a mapping of versions to +// resources. +type APIGroupResources struct { + Group metav1.APIGroup + // A mapping of version string to a slice of APIResources for + // that version. + VersionedResources map[string][]metav1.APIResource +} + +// NewDiscoveryRESTMapper returns a PriorityRESTMapper based on the discovered +// groups and resources passed in. +func NewDiscoveryRESTMapper(groupResources []*APIGroupResources) meta.RESTMapper { + unionMapper := meta.MultiRESTMapper{} + + var groupPriority []string + // /v1 is special. It should always come first + resourcePriority := []schema.GroupVersionResource{{Group: "", Version: "v1", Resource: meta.AnyResource}} + kindPriority := []schema.GroupVersionKind{{Group: "", Version: "v1", Kind: meta.AnyKind}} + + for _, group := range groupResources { + groupPriority = append(groupPriority, group.Group.Name) + + // Make sure the preferred version comes first + if len(group.Group.PreferredVersion.Version) != 0 { + preferred := group.Group.PreferredVersion.Version + if _, ok := group.VersionedResources[preferred]; ok { + resourcePriority = append(resourcePriority, schema.GroupVersionResource{ + Group: group.Group.Name, + Version: group.Group.PreferredVersion.Version, + Resource: meta.AnyResource, + }) + + kindPriority = append(kindPriority, schema.GroupVersionKind{ + Group: group.Group.Name, + Version: group.Group.PreferredVersion.Version, + Kind: meta.AnyKind, + }) + } + } + + for _, discoveryVersion := range group.Group.Versions { + resources, ok := group.VersionedResources[discoveryVersion.Version] + if !ok { + continue + } + + // Add non-preferred versions after the preferred version, in case there are resources that only exist in those versions + if discoveryVersion.Version != group.Group.PreferredVersion.Version { + resourcePriority = append(resourcePriority, schema.GroupVersionResource{ + Group: group.Group.Name, + Version: discoveryVersion.Version, + Resource: meta.AnyResource, + }) + + kindPriority = append(kindPriority, schema.GroupVersionKind{ + Group: group.Group.Name, + Version: discoveryVersion.Version, + Kind: meta.AnyKind, + }) + } + + gv := schema.GroupVersion{Group: group.Group.Name, Version: discoveryVersion.Version} + versionMapper := meta.NewDefaultRESTMapper([]schema.GroupVersion{gv}) + + for _, resource := range resources { + scope := meta.RESTScopeNamespace + if !resource.Namespaced { + scope = meta.RESTScopeRoot + } + + // if we have a slash, then this is a subresource and we shouldn't create mappings for those. + if strings.Contains(resource.Name, "/") { + continue + } + + plural := gv.WithResource(resource.Name) + singular := gv.WithResource(resource.SingularName) + // this is for legacy resources and servers which don't list singular forms. For those we must still guess. + if len(resource.SingularName) == 0 { + _, singular = meta.UnsafeGuessKindToResource(gv.WithKind(resource.Kind)) + } + + versionMapper.AddSpecific(gv.WithKind(strings.ToLower(resource.Kind)), plural, singular, scope) + versionMapper.AddSpecific(gv.WithKind(resource.Kind), plural, singular, scope) + // TODO this is producing unsafe guesses that don't actually work, but it matches previous behavior + versionMapper.Add(gv.WithKind(resource.Kind+"List"), scope) + } + // TODO why is this type not in discovery (at least for "v1") + versionMapper.Add(gv.WithKind("List"), meta.RESTScopeRoot) + unionMapper = append(unionMapper, versionMapper) + } + } + + for _, group := range groupPriority { + resourcePriority = append(resourcePriority, schema.GroupVersionResource{ + Group: group, + Version: meta.AnyVersion, + Resource: meta.AnyResource, + }) + kindPriority = append(kindPriority, schema.GroupVersionKind{ + Group: group, + Version: meta.AnyVersion, + Kind: meta.AnyKind, + }) + } + + return meta.PriorityRESTMapper{ + Delegate: unionMapper, + ResourcePriority: resourcePriority, + KindPriority: kindPriority, + } +} + +// GetAPIGroupResources uses the provided discovery client to gather +// discovery information and populate a slice of APIGroupResources. +func GetAPIGroupResources(cl discovery.DiscoveryInterface) ([]*APIGroupResources, error) { + gs, rs, err := cl.ServerGroupsAndResources() + if rs == nil || gs == nil { + return nil, err + // TODO track the errors and update callers to handle partial errors. + } + rsm := map[string]*metav1.APIResourceList{} + for _, r := range rs { + rsm[r.GroupVersion] = r + } + + var result []*APIGroupResources + for _, group := range gs { + groupResources := &APIGroupResources{ + Group: *group, + VersionedResources: make(map[string][]metav1.APIResource), + } + for _, version := range group.Versions { + resources, ok := rsm[version.GroupVersion] + if !ok { + continue + } + groupResources.VersionedResources[version.Version] = resources.APIResources + } + result = append(result, groupResources) + } + return result, nil +} + +// DeferredDiscoveryRESTMapper is a RESTMapper that will defer +// initialization of the RESTMapper until the first mapping is +// requested. +type DeferredDiscoveryRESTMapper struct { + initMu sync.Mutex + delegate meta.RESTMapper + cl discovery.CachedDiscoveryInterface +} + +// NewDeferredDiscoveryRESTMapper returns a +// DeferredDiscoveryRESTMapper that will lazily query the provided +// client for discovery information to do REST mappings. +func NewDeferredDiscoveryRESTMapper(cl discovery.CachedDiscoveryInterface) *DeferredDiscoveryRESTMapper { + return &DeferredDiscoveryRESTMapper{ + cl: cl, + } +} + +func (d *DeferredDiscoveryRESTMapper) getDelegate() (meta.RESTMapper, error) { + d.initMu.Lock() + defer d.initMu.Unlock() + + if d.delegate != nil { + return d.delegate, nil + } + + groupResources, err := GetAPIGroupResources(d.cl) + if err != nil { + return nil, err + } + + d.delegate = NewDiscoveryRESTMapper(groupResources) + return d.delegate, err +} + +// Reset resets the internally cached Discovery information and will +// cause the next mapping request to re-discover. +func (d *DeferredDiscoveryRESTMapper) Reset() { + klog.V(5).Info("Invalidating discovery information") + + d.initMu.Lock() + defer d.initMu.Unlock() + + d.cl.Invalidate() + d.delegate = nil +} + +// KindFor takes a partial resource and returns back the single match. +// It returns an error if there are multiple matches. +func (d *DeferredDiscoveryRESTMapper) KindFor(resource schema.GroupVersionResource) (gvk schema.GroupVersionKind, err error) { + del, err := d.getDelegate() + if err != nil { + return schema.GroupVersionKind{}, err + } + gvk, err = del.KindFor(resource) + if err != nil && !d.cl.Fresh() { + d.Reset() + gvk, err = d.KindFor(resource) + } + return +} + +// KindsFor takes a partial resource and returns back the list of +// potential kinds in priority order. +func (d *DeferredDiscoveryRESTMapper) KindsFor(resource schema.GroupVersionResource) (gvks []schema.GroupVersionKind, err error) { + del, err := d.getDelegate() + if err != nil { + return nil, err + } + gvks, err = del.KindsFor(resource) + if len(gvks) == 0 && !d.cl.Fresh() { + d.Reset() + gvks, err = d.KindsFor(resource) + } + return +} + +// ResourceFor takes a partial resource and returns back the single +// match. It returns an error if there are multiple matches. +func (d *DeferredDiscoveryRESTMapper) ResourceFor(input schema.GroupVersionResource) (gvr schema.GroupVersionResource, err error) { + del, err := d.getDelegate() + if err != nil { + return schema.GroupVersionResource{}, err + } + gvr, err = del.ResourceFor(input) + if err != nil && !d.cl.Fresh() { + d.Reset() + gvr, err = d.ResourceFor(input) + } + return +} + +// ResourcesFor takes a partial resource and returns back the list of +// potential resource in priority order. +func (d *DeferredDiscoveryRESTMapper) ResourcesFor(input schema.GroupVersionResource) (gvrs []schema.GroupVersionResource, err error) { + del, err := d.getDelegate() + if err != nil { + return nil, err + } + gvrs, err = del.ResourcesFor(input) + if len(gvrs) == 0 && !d.cl.Fresh() { + d.Reset() + gvrs, err = d.ResourcesFor(input) + } + return +} + +// RESTMapping identifies a preferred resource mapping for the +// provided group kind. +func (d *DeferredDiscoveryRESTMapper) RESTMapping(gk schema.GroupKind, versions ...string) (m *meta.RESTMapping, err error) { + del, err := d.getDelegate() + if err != nil { + return nil, err + } + m, err = del.RESTMapping(gk, versions...) + if err != nil && !d.cl.Fresh() { + d.Reset() + m, err = d.RESTMapping(gk, versions...) + } + return +} + +// RESTMappings returns the RESTMappings for the provided group kind +// in a rough internal preferred order. If no kind is found, it will +// return a NoResourceMatchError. +func (d *DeferredDiscoveryRESTMapper) RESTMappings(gk schema.GroupKind, versions ...string) (ms []*meta.RESTMapping, err error) { + del, err := d.getDelegate() + if err != nil { + return nil, err + } + ms, err = del.RESTMappings(gk, versions...) + if len(ms) == 0 && !d.cl.Fresh() { + d.Reset() + ms, err = d.RESTMappings(gk, versions...) + } + return +} + +// ResourceSingularizer converts a resource name from plural to +// singular (e.g., from pods to pod). +func (d *DeferredDiscoveryRESTMapper) ResourceSingularizer(resource string) (singular string, err error) { + del, err := d.getDelegate() + if err != nil { + return resource, err + } + singular, err = del.ResourceSingularizer(resource) + if err != nil && !d.cl.Fresh() { + d.Reset() + singular, err = d.ResourceSingularizer(resource) + } + return +} + +func (d *DeferredDiscoveryRESTMapper) String() string { + del, err := d.getDelegate() + if err != nil { + return fmt.Sprintf("DeferredDiscoveryRESTMapper{%v}", err) + } + return fmt.Sprintf("DeferredDiscoveryRESTMapper{\n\t%v\n}", del) +} + +// Make sure it satisfies the interface +var _ meta.RESTMapper = &DeferredDiscoveryRESTMapper{} diff --git a/vendor/k8s.io/client-go/restmapper/shortcut.go b/vendor/k8s.io/client-go/restmapper/shortcut.go new file mode 100644 index 0000000000..6f3c9d9306 --- /dev/null +++ b/vendor/k8s.io/client-go/restmapper/shortcut.go @@ -0,0 +1,172 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package restmapper + +import ( + "strings" + + "k8s.io/klog" + + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/discovery" +) + +// shortcutExpander is a RESTMapper that can be used for Kubernetes resources. It expands the resource first, then invokes the wrapped +type shortcutExpander struct { + RESTMapper meta.RESTMapper + + discoveryClient discovery.DiscoveryInterface +} + +var _ meta.RESTMapper = &shortcutExpander{} + +// NewShortcutExpander wraps a restmapper in a layer that expands shortcuts found via discovery +func NewShortcutExpander(delegate meta.RESTMapper, client discovery.DiscoveryInterface) meta.RESTMapper { + return shortcutExpander{RESTMapper: delegate, discoveryClient: client} +} + +// KindFor fulfills meta.RESTMapper +func (e shortcutExpander) KindFor(resource schema.GroupVersionResource) (schema.GroupVersionKind, error) { + return e.RESTMapper.KindFor(e.expandResourceShortcut(resource)) +} + +// KindsFor fulfills meta.RESTMapper +func (e shortcutExpander) KindsFor(resource schema.GroupVersionResource) ([]schema.GroupVersionKind, error) { + return e.RESTMapper.KindsFor(e.expandResourceShortcut(resource)) +} + +// ResourcesFor fulfills meta.RESTMapper +func (e shortcutExpander) ResourcesFor(resource schema.GroupVersionResource) ([]schema.GroupVersionResource, error) { + return e.RESTMapper.ResourcesFor(e.expandResourceShortcut(resource)) +} + +// ResourceFor fulfills meta.RESTMapper +func (e shortcutExpander) ResourceFor(resource schema.GroupVersionResource) (schema.GroupVersionResource, error) { + return e.RESTMapper.ResourceFor(e.expandResourceShortcut(resource)) +} + +// ResourceSingularizer fulfills meta.RESTMapper +func (e shortcutExpander) ResourceSingularizer(resource string) (string, error) { + return e.RESTMapper.ResourceSingularizer(e.expandResourceShortcut(schema.GroupVersionResource{Resource: resource}).Resource) +} + +// RESTMapping fulfills meta.RESTMapper +func (e shortcutExpander) RESTMapping(gk schema.GroupKind, versions ...string) (*meta.RESTMapping, error) { + return e.RESTMapper.RESTMapping(gk, versions...) +} + +// RESTMappings fulfills meta.RESTMapper +func (e shortcutExpander) RESTMappings(gk schema.GroupKind, versions ...string) ([]*meta.RESTMapping, error) { + return e.RESTMapper.RESTMappings(gk, versions...) +} + +// getShortcutMappings returns a set of tuples which holds short names for resources. +// First the list of potential resources will be taken from the API server. +// Next we will append the hardcoded list of resources - to be backward compatible with old servers. +// NOTE that the list is ordered by group priority. +func (e shortcutExpander) getShortcutMappings() ([]*metav1.APIResourceList, []resourceShortcuts, error) { + res := []resourceShortcuts{} + // get server resources + // This can return an error *and* the results it was able to find. We don't need to fail on the error. + apiResList, err := e.discoveryClient.ServerResources() + if err != nil { + klog.V(1).Infof("Error loading discovery information: %v", err) + } + for _, apiResources := range apiResList { + gv, err := schema.ParseGroupVersion(apiResources.GroupVersion) + if err != nil { + klog.V(1).Infof("Unable to parse groupversion = %s due to = %s", apiResources.GroupVersion, err.Error()) + continue + } + for _, apiRes := range apiResources.APIResources { + for _, shortName := range apiRes.ShortNames { + rs := resourceShortcuts{ + ShortForm: schema.GroupResource{Group: gv.Group, Resource: shortName}, + LongForm: schema.GroupResource{Group: gv.Group, Resource: apiRes.Name}, + } + res = append(res, rs) + } + } + } + + return apiResList, res, nil +} + +// expandResourceShortcut will return the expanded version of resource +// (something that a pkg/api/meta.RESTMapper can understand), if it is +// indeed a shortcut. If no match has been found, we will match on group prefixing. +// Lastly we will return resource unmodified. +func (e shortcutExpander) expandResourceShortcut(resource schema.GroupVersionResource) schema.GroupVersionResource { + // get the shortcut mappings and return on first match. + if allResources, shortcutResources, err := e.getShortcutMappings(); err == nil { + // avoid expanding if there's an exact match to a full resource name + for _, apiResources := range allResources { + gv, err := schema.ParseGroupVersion(apiResources.GroupVersion) + if err != nil { + continue + } + if len(resource.Group) != 0 && resource.Group != gv.Group { + continue + } + for _, apiRes := range apiResources.APIResources { + if resource.Resource == apiRes.Name { + return resource + } + if resource.Resource == apiRes.SingularName { + return resource + } + } + } + + for _, item := range shortcutResources { + if len(resource.Group) != 0 && resource.Group != item.ShortForm.Group { + continue + } + if resource.Resource == item.ShortForm.Resource { + resource.Resource = item.LongForm.Resource + resource.Group = item.LongForm.Group + return resource + } + } + + // we didn't find exact match so match on group prefixing. This allows autoscal to match autoscaling + if len(resource.Group) == 0 { + return resource + } + for _, item := range shortcutResources { + if !strings.HasPrefix(item.ShortForm.Group, resource.Group) { + continue + } + if resource.Resource == item.ShortForm.Resource { + resource.Resource = item.LongForm.Resource + resource.Group = item.LongForm.Group + return resource + } + } + } + + return resource +} + +// ResourceShortcuts represents a structure that holds the information how to +// transition from resource's shortcut to its full name. +type resourceShortcuts struct { + ShortForm schema.GroupResource + LongForm schema.GroupResource +} diff --git a/vendor/k8s.io/kube-openapi/LICENSE b/vendor/k8s.io/kube-openapi/LICENSE new file mode 100644 index 0000000000..d645695673 --- /dev/null +++ b/vendor/k8s.io/kube-openapi/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/k8s.io/kube-openapi/pkg/util/proto/OWNERS b/vendor/k8s.io/kube-openapi/pkg/util/proto/OWNERS new file mode 100644 index 0000000000..9621a6a3a4 --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/util/proto/OWNERS @@ -0,0 +1,2 @@ +approvers: +- apelisse diff --git a/vendor/k8s.io/kube-openapi/pkg/util/proto/doc.go b/vendor/k8s.io/kube-openapi/pkg/util/proto/doc.go new file mode 100644 index 0000000000..11ed8a6b7c --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/util/proto/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package proto is a collection of libraries for parsing and indexing the type definitions. +// The openapi spec contains the object model definitions and extensions metadata. +package proto diff --git a/vendor/k8s.io/kube-openapi/pkg/util/proto/document.go b/vendor/k8s.io/kube-openapi/pkg/util/proto/document.go new file mode 100644 index 0000000000..5eb957affb --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/util/proto/document.go @@ -0,0 +1,318 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package proto + +import ( + "fmt" + "sort" + "strings" + + "github.com/googleapis/gnostic/OpenAPIv2" + "gopkg.in/yaml.v2" +) + +func newSchemaError(path *Path, format string, a ...interface{}) error { + err := fmt.Sprintf(format, a...) + if path.Len() == 0 { + return fmt.Errorf("SchemaError: %v", err) + } + return fmt.Errorf("SchemaError(%v): %v", path, err) +} + +// VendorExtensionToMap converts openapi VendorExtension to a map. +func VendorExtensionToMap(e []*openapi_v2.NamedAny) map[string]interface{} { + values := map[string]interface{}{} + + for _, na := range e { + if na.GetName() == "" || na.GetValue() == nil { + continue + } + if na.GetValue().GetYaml() == "" { + continue + } + var value interface{} + err := yaml.Unmarshal([]byte(na.GetValue().GetYaml()), &value) + if err != nil { + continue + } + + values[na.GetName()] = value + } + + return values +} + +// Definitions is an implementation of `Models`. It looks for +// models in an openapi Schema. +type Definitions struct { + models map[string]Schema +} + +var _ Models = &Definitions{} + +// NewOpenAPIData creates a new `Models` out of the openapi document. +func NewOpenAPIData(doc *openapi_v2.Document) (Models, error) { + definitions := Definitions{ + models: map[string]Schema{}, + } + + // Save the list of all models first. This will allow us to + // validate that we don't have any dangling reference. + for _, namedSchema := range doc.GetDefinitions().GetAdditionalProperties() { + definitions.models[namedSchema.GetName()] = nil + } + + // Now, parse each model. We can validate that references exists. + for _, namedSchema := range doc.GetDefinitions().GetAdditionalProperties() { + path := NewPath(namedSchema.GetName()) + schema, err := definitions.ParseSchema(namedSchema.GetValue(), &path) + if err != nil { + return nil, err + } + definitions.models[namedSchema.GetName()] = schema + } + + return &definitions, nil +} + +// We believe the schema is a reference, verify that and returns a new +// Schema +func (d *Definitions) parseReference(s *openapi_v2.Schema, path *Path) (Schema, error) { + // TODO(wrong): a schema with a $ref can have properties. We can ignore them (would be incomplete), but we cannot return an error. + if len(s.GetProperties().GetAdditionalProperties()) > 0 { + return nil, newSchemaError(path, "unallowed embedded type definition") + } + // TODO(wrong): a schema with a $ref can have a type. We can ignore it (would be incomplete), but we cannot return an error. + if len(s.GetType().GetValue()) > 0 { + return nil, newSchemaError(path, "definition reference can't have a type") + } + + // TODO(wrong): $refs outside of the definitions are completely valid. We can ignore them (would be incomplete), but we cannot return an error. + if !strings.HasPrefix(s.GetXRef(), "#/definitions/") { + return nil, newSchemaError(path, "unallowed reference to non-definition %q", s.GetXRef()) + } + reference := strings.TrimPrefix(s.GetXRef(), "#/definitions/") + if _, ok := d.models[reference]; !ok { + return nil, newSchemaError(path, "unknown model in reference: %q", reference) + } + return &Ref{ + BaseSchema: d.parseBaseSchema(s, path), + reference: reference, + definitions: d, + }, nil +} + +func (d *Definitions) parseBaseSchema(s *openapi_v2.Schema, path *Path) BaseSchema { + return BaseSchema{ + Description: s.GetDescription(), + Extensions: VendorExtensionToMap(s.GetVendorExtension()), + Path: *path, + } +} + +// We believe the schema is a map, verify and return a new schema +func (d *Definitions) parseMap(s *openapi_v2.Schema, path *Path) (Schema, error) { + if len(s.GetType().GetValue()) != 0 && s.GetType().GetValue()[0] != object { + return nil, newSchemaError(path, "invalid object type") + } + var sub Schema + // TODO(incomplete): this misses the boolean case as AdditionalProperties is a bool+schema sum type. + if s.GetAdditionalProperties().GetSchema() == nil { + sub = &Arbitrary{ + BaseSchema: d.parseBaseSchema(s, path), + } + } else { + var err error + sub, err = d.ParseSchema(s.GetAdditionalProperties().GetSchema(), path) + if err != nil { + return nil, err + } + } + return &Map{ + BaseSchema: d.parseBaseSchema(s, path), + SubType: sub, + }, nil +} + +func (d *Definitions) parsePrimitive(s *openapi_v2.Schema, path *Path) (Schema, error) { + var t string + if len(s.GetType().GetValue()) > 1 { + return nil, newSchemaError(path, "primitive can't have more than 1 type") + } + if len(s.GetType().GetValue()) == 1 { + t = s.GetType().GetValue()[0] + } + switch t { + case String: // do nothing + case Number: // do nothing + case Integer: // do nothing + case Boolean: // do nothing + // TODO(wrong): this misses "null". Would skip the null case (would be incomplete), but we cannot return an error. + default: + return nil, newSchemaError(path, "Unknown primitive type: %q", t) + } + return &Primitive{ + BaseSchema: d.parseBaseSchema(s, path), + Type: t, + Format: s.GetFormat(), + }, nil +} + +func (d *Definitions) parseArray(s *openapi_v2.Schema, path *Path) (Schema, error) { + if len(s.GetType().GetValue()) != 1 { + return nil, newSchemaError(path, "array should have exactly one type") + } + if s.GetType().GetValue()[0] != array { + return nil, newSchemaError(path, `array should have type "array"`) + } + if len(s.GetItems().GetSchema()) != 1 { + // TODO(wrong): Items can have multiple elements. We can ignore Items then (would be incomplete), but we cannot return an error. + // TODO(wrong): "type: array" witohut any items at all is completely valid. + return nil, newSchemaError(path, "array should have exactly one sub-item") + } + sub, err := d.ParseSchema(s.GetItems().GetSchema()[0], path) + if err != nil { + return nil, err + } + return &Array{ + BaseSchema: d.parseBaseSchema(s, path), + SubType: sub, + }, nil +} + +func (d *Definitions) parseKind(s *openapi_v2.Schema, path *Path) (Schema, error) { + if len(s.GetType().GetValue()) != 0 && s.GetType().GetValue()[0] != object { + return nil, newSchemaError(path, "invalid object type") + } + if s.GetProperties() == nil { + return nil, newSchemaError(path, "object doesn't have properties") + } + + fields := map[string]Schema{} + fieldOrder := []string{} + + for _, namedSchema := range s.GetProperties().GetAdditionalProperties() { + var err error + name := namedSchema.GetName() + path := path.FieldPath(name) + fields[name], err = d.ParseSchema(namedSchema.GetValue(), &path) + if err != nil { + return nil, err + } + fieldOrder = append(fieldOrder, name) + } + + return &Kind{ + BaseSchema: d.parseBaseSchema(s, path), + RequiredFields: s.GetRequired(), + Fields: fields, + FieldOrder: fieldOrder, + }, nil +} + +func (d *Definitions) parseArbitrary(s *openapi_v2.Schema, path *Path) (Schema, error) { + return &Arbitrary{ + BaseSchema: d.parseBaseSchema(s, path), + }, nil +} + +// ParseSchema creates a walkable Schema from an openapi schema. While +// this function is public, it doesn't leak through the interface. +func (d *Definitions) ParseSchema(s *openapi_v2.Schema, path *Path) (Schema, error) { + if s.GetXRef() != "" { + // TODO(incomplete): ignoring the rest of s is wrong. As long as there are no conflict, everything from s must be considered + // Reference: https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#path-item-object + return d.parseReference(s, path) + } + objectTypes := s.GetType().GetValue() + switch len(objectTypes) { + case 0: + // in the OpenAPI schema served by older k8s versions, object definitions created from structs did not include + // the type:object property (they only included the "properties" property), so we need to handle this case + // TODO: validate that we ever published empty, non-nil properties. JSON roundtripping nils them. + if s.GetProperties() != nil { + // TODO(wrong): when verifying a non-object later against this, it will be rejected as invalid type. + // TODO(CRD validation schema publishing): we have to filter properties (empty or not) if type=object is not given + return d.parseKind(s, path) + } else { + // Definition has no type and no properties. Treat it as an arbitrary value + // TODO(incomplete): what if it has additionalProperties=false or patternProperties? + // ANSWER: parseArbitrary is less strict than it has to be with patternProperties (which is ignored). So this is correct (of course not complete). + return d.parseArbitrary(s, path) + } + case 1: + t := objectTypes[0] + switch t { + case object: + if s.GetProperties() != nil { + return d.parseKind(s, path) + } else { + return d.parseMap(s, path) + } + case array: + return d.parseArray(s, path) + } + return d.parsePrimitive(s, path) + default: + // the OpenAPI generator never generates (nor it ever did in the past) OpenAPI type definitions with multiple types + // TODO(wrong): this is rejecting a completely valid OpenAPI spec + // TODO(CRD validation schema publishing): filter these out + return nil, newSchemaError(path, "definitions with multiple types aren't supported") + } +} + +// LookupModel is public through the interface of Models. It +// returns a visitable schema from the given model name. +func (d *Definitions) LookupModel(model string) Schema { + return d.models[model] +} + +func (d *Definitions) ListModels() []string { + models := []string{} + + for model := range d.models { + models = append(models, model) + } + + sort.Strings(models) + return models +} + +type Ref struct { + BaseSchema + + reference string + definitions *Definitions +} + +var _ Reference = &Ref{} + +func (r *Ref) Reference() string { + return r.reference +} + +func (r *Ref) SubSchema() Schema { + return r.definitions.models[r.reference] +} + +func (r *Ref) Accept(v SchemaVisitor) { + v.VisitReference(r) +} + +func (r *Ref) GetName() string { + return fmt.Sprintf("Reference to %q", r.reference) +} diff --git a/vendor/k8s.io/kube-openapi/pkg/util/proto/openapi.go b/vendor/k8s.io/kube-openapi/pkg/util/proto/openapi.go new file mode 100644 index 0000000000..46643aa508 --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/util/proto/openapi.go @@ -0,0 +1,278 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package proto + +import ( + "fmt" + "sort" + "strings" +) + +// Defines openapi types. +const ( + Integer = "integer" + Number = "number" + String = "string" + Boolean = "boolean" + + // These types are private as they should never leak, and are + // represented by actual structs. + array = "array" + object = "object" +) + +// Models interface describe a model provider. They can give you the +// schema for a specific model. +type Models interface { + LookupModel(string) Schema + ListModels() []string +} + +// SchemaVisitor is an interface that you need to implement if you want +// to "visit" an openapi schema. A dispatch on the Schema type will call +// the appropriate function based on its actual type: +// - Array is a list of one and only one given subtype +// - Map is a map of string to one and only one given subtype +// - Primitive can be string, integer, number and boolean. +// - Kind is an object with specific fields mapping to specific types. +// - Reference is a link to another definition. +type SchemaVisitor interface { + VisitArray(*Array) + VisitMap(*Map) + VisitPrimitive(*Primitive) + VisitKind(*Kind) + VisitReference(Reference) +} + +// SchemaVisitorArbitrary is an additional visitor interface which handles +// arbitrary types. For backwards compatibility, it's a separate interface +// which is checked for at runtime. +type SchemaVisitorArbitrary interface { + SchemaVisitor + VisitArbitrary(*Arbitrary) +} + +// Schema is the base definition of an openapi type. +type Schema interface { + // Giving a visitor here will let you visit the actual type. + Accept(SchemaVisitor) + + // Pretty print the name of the type. + GetName() string + // Describes how to access this field. + GetPath() *Path + // Describes the field. + GetDescription() string + // Returns type extensions. + GetExtensions() map[string]interface{} +} + +// Path helps us keep track of type paths +type Path struct { + parent *Path + key string +} + +func NewPath(key string) Path { + return Path{key: key} +} + +func (p *Path) Get() []string { + if p == nil { + return []string{} + } + if p.key == "" { + return p.parent.Get() + } + return append(p.parent.Get(), p.key) +} + +func (p *Path) Len() int { + return len(p.Get()) +} + +func (p *Path) String() string { + return strings.Join(p.Get(), "") +} + +// ArrayPath appends an array index and creates a new path +func (p *Path) ArrayPath(i int) Path { + return Path{ + parent: p, + key: fmt.Sprintf("[%d]", i), + } +} + +// FieldPath appends a field name and creates a new path +func (p *Path) FieldPath(field string) Path { + return Path{ + parent: p, + key: fmt.Sprintf(".%s", field), + } +} + +// BaseSchema holds data used by each types of schema. +type BaseSchema struct { + Description string + Extensions map[string]interface{} + + Path Path +} + +func (b *BaseSchema) GetDescription() string { + return b.Description +} + +func (b *BaseSchema) GetExtensions() map[string]interface{} { + return b.Extensions +} + +func (b *BaseSchema) GetPath() *Path { + return &b.Path +} + +// Array must have all its element of the same `SubType`. +type Array struct { + BaseSchema + + SubType Schema +} + +var _ Schema = &Array{} + +func (a *Array) Accept(v SchemaVisitor) { + v.VisitArray(a) +} + +func (a *Array) GetName() string { + return fmt.Sprintf("Array of %s", a.SubType.GetName()) +} + +// Kind is a complex object. It can have multiple different +// subtypes for each field, as defined in the `Fields` field. Mandatory +// fields are listed in `RequiredFields`. The key of the object is +// always of type `string`. +type Kind struct { + BaseSchema + + // Lists names of required fields. + RequiredFields []string + // Maps field names to types. + Fields map[string]Schema + // FieldOrder reports the canonical order for the fields. + FieldOrder []string +} + +var _ Schema = &Kind{} + +func (k *Kind) Accept(v SchemaVisitor) { + v.VisitKind(k) +} + +func (k *Kind) GetName() string { + properties := []string{} + for key := range k.Fields { + properties = append(properties, key) + } + return fmt.Sprintf("Kind(%v)", properties) +} + +// IsRequired returns true if `field` is a required field for this type. +func (k *Kind) IsRequired(field string) bool { + for _, f := range k.RequiredFields { + if f == field { + return true + } + } + return false +} + +// Keys returns a alphabetically sorted list of keys. +func (k *Kind) Keys() []string { + keys := make([]string, 0) + for key := range k.Fields { + keys = append(keys, key) + } + sort.Strings(keys) + return keys +} + +// Map is an object who values must all be of the same `SubType`. +// The key of the object is always of type `string`. +type Map struct { + BaseSchema + + SubType Schema +} + +var _ Schema = &Map{} + +func (m *Map) Accept(v SchemaVisitor) { + v.VisitMap(m) +} + +func (m *Map) GetName() string { + return fmt.Sprintf("Map of %s", m.SubType.GetName()) +} + +// Primitive is a literal. There can be multiple types of primitives, +// and this subtype can be visited through the `subType` field. +type Primitive struct { + BaseSchema + + // Type of a primitive must be one of: integer, number, string, boolean. + Type string + Format string +} + +var _ Schema = &Primitive{} + +func (p *Primitive) Accept(v SchemaVisitor) { + v.VisitPrimitive(p) +} + +func (p *Primitive) GetName() string { + if p.Format == "" { + return p.Type + } + return fmt.Sprintf("%s (%s)", p.Type, p.Format) +} + +// Arbitrary is a value of any type (primitive, object or array) +type Arbitrary struct { + BaseSchema +} + +var _ Schema = &Arbitrary{} + +func (a *Arbitrary) Accept(v SchemaVisitor) { + if visitor, ok := v.(SchemaVisitorArbitrary); ok { + visitor.VisitArbitrary(a) + } +} + +func (a *Arbitrary) GetName() string { + return "Arbitrary value (primitive, object or array)" +} + +// Reference implementation depends on the type of document. +type Reference interface { + Schema + + Reference() string + SubSchema() Schema +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 392a5a45c7..ad041d2564 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -7,6 +7,8 @@ github.com/coreos/prometheus-operator/pkg/apis/monitoring github.com/coreos/prometheus-operator/pkg/apis/monitoring/v1 # github.com/davecgh/go-spew v1.1.1 github.com/davecgh/go-spew/spew +# github.com/evanphx/json-patch v4.5.0+incompatible +github.com/evanphx/json-patch # github.com/ghodss/yaml v1.0.0 github.com/ghodss/yaml # github.com/gogo/protobuf v1.3.1 @@ -51,7 +53,7 @@ github.com/matttproud/golang_protobuf_extensions/pbutil github.com/modern-go/concurrent # github.com/modern-go/reflect2 v1.0.1 github.com/modern-go/reflect2 -# github.com/pkg/errors v0.8.1 +# github.com/pkg/errors v0.9.1 github.com/pkg/errors # github.com/prometheus/client_golang v1.4.0 github.com/prometheus/client_golang/prometheus @@ -66,6 +68,13 @@ github.com/prometheus/common/model github.com/prometheus/procfs github.com/prometheus/procfs/internal/fs github.com/prometheus/procfs/internal/util +# github.com/rancher/lasso v0.0.0-20200905045615-7fcb07d6a20b +github.com/rancher/lasso/pkg/cache +github.com/rancher/lasso/pkg/client +github.com/rancher/lasso/pkg/controller +github.com/rancher/lasso/pkg/log +github.com/rancher/lasso/pkg/mapper +github.com/rancher/lasso/pkg/scheme # github.com/rancher/norman v0.0.0-20200520181341-ab75acb55410 github.com/rancher/norman/clientbase github.com/rancher/norman/condition @@ -88,12 +97,18 @@ github.com/rancher/norman/types/factory github.com/rancher/norman/types/mapper github.com/rancher/norman/types/slice github.com/rancher/norman/types/values -# github.com/rancher/wrangler v0.5.4-0.20200520040055-b8d49179cfc8 +# github.com/rancher/wrangler v0.7.4-security1 +github.com/rancher/wrangler/pkg/apply +github.com/rancher/wrangler/pkg/apply/injectors github.com/rancher/wrangler/pkg/data github.com/rancher/wrangler/pkg/data/convert github.com/rancher/wrangler/pkg/generic +github.com/rancher/wrangler/pkg/gvk github.com/rancher/wrangler/pkg/kv +github.com/rancher/wrangler/pkg/merr github.com/rancher/wrangler/pkg/name +github.com/rancher/wrangler/pkg/objectset +github.com/rancher/wrangler/pkg/patch github.com/rancher/wrangler/pkg/ratelimit github.com/rancher/wrangler/pkg/schemes github.com/rancher/wrangler/pkg/summary @@ -161,9 +176,9 @@ google.golang.org/appengine/internal/urlfetch google.golang.org/appengine/urlfetch # gopkg.in/inf.v0 v0.9.1 gopkg.in/inf.v0 -# gopkg.in/yaml.v2 v2.2.8 +# gopkg.in/yaml.v2 v2.3.0 gopkg.in/yaml.v2 -# k8s.io/api v0.18.0 +# k8s.io/api v0.18.8 k8s.io/api/admissionregistration/v1 k8s.io/api/admissionregistration/v1beta1 k8s.io/api/apps/v1 @@ -212,7 +227,7 @@ k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/scheme k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1 k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1 -# k8s.io/apimachinery v0.18.0 +# k8s.io/apimachinery v0.18.8 k8s.io/apimachinery/pkg/api/equality k8s.io/apimachinery/pkg/api/errors k8s.io/apimachinery/pkg/api/meta @@ -242,17 +257,21 @@ k8s.io/apimachinery/pkg/util/errors k8s.io/apimachinery/pkg/util/framer k8s.io/apimachinery/pkg/util/intstr k8s.io/apimachinery/pkg/util/json +k8s.io/apimachinery/pkg/util/jsonmergepatch +k8s.io/apimachinery/pkg/util/mergepatch k8s.io/apimachinery/pkg/util/naming k8s.io/apimachinery/pkg/util/net k8s.io/apimachinery/pkg/util/rand k8s.io/apimachinery/pkg/util/runtime k8s.io/apimachinery/pkg/util/sets +k8s.io/apimachinery/pkg/util/strategicpatch k8s.io/apimachinery/pkg/util/validation k8s.io/apimachinery/pkg/util/validation/field k8s.io/apimachinery/pkg/util/wait k8s.io/apimachinery/pkg/util/yaml k8s.io/apimachinery/pkg/version k8s.io/apimachinery/pkg/watch +k8s.io/apimachinery/third_party/forked/golang/json k8s.io/apimachinery/third_party/forked/golang/reflect # k8s.io/apiserver v0.18.0 k8s.io/apiserver/pkg/apis/apiserver @@ -262,6 +281,7 @@ k8s.io/apiserver/pkg/apis/audit/v1 k8s.io/apiserver/pkg/apis/config # k8s.io/client-go v12.0.0+incompatible => k8s.io/client-go v0.18.0 k8s.io/client-go/discovery +k8s.io/client-go/discovery/cached/memory k8s.io/client-go/dynamic k8s.io/client-go/informers k8s.io/client-go/informers/admissionregistration @@ -405,6 +425,7 @@ k8s.io/client-go/pkg/version k8s.io/client-go/plugin/pkg/client/auth/exec k8s.io/client-go/rest k8s.io/client-go/rest/watch +k8s.io/client-go/restmapper k8s.io/client-go/tools/cache k8s.io/client-go/tools/clientcmd/api k8s.io/client-go/tools/metrics @@ -429,11 +450,15 @@ k8s.io/klog # k8s.io/kube-aggregator v0.18.0 k8s.io/kube-aggregator/pkg/apis/apiregistration k8s.io/kube-aggregator/pkg/apis/apiregistration/v1 +# k8s.io/kube-openapi v0.0.0-20200410145947-61e04a5be9a6 +k8s.io/kube-openapi/pkg/util/proto # k8s.io/utils v0.0.0-20200324210504-a9aa75ae1b89 k8s.io/utils/buffer k8s.io/utils/integer k8s.io/utils/pointer k8s.io/utils/trace +# sigs.k8s.io/cli-utils v0.16.0 +sigs.k8s.io/cli-utils/pkg/kstatus/status # sigs.k8s.io/structured-merge-diff/v3 v3.0.0 sigs.k8s.io/structured-merge-diff/v3/value # sigs.k8s.io/yaml v1.2.0 diff --git a/vendor/sigs.k8s.io/cli-utils/LICENSE b/vendor/sigs.k8s.io/cli-utils/LICENSE new file mode 100644 index 0000000000..8dada3edaf --- /dev/null +++ b/vendor/sigs.k8s.io/cli-utils/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/sigs.k8s.io/cli-utils/LICENSE_TEMPLATE b/vendor/sigs.k8s.io/cli-utils/LICENSE_TEMPLATE new file mode 100644 index 0000000000..0c2b3b6556 --- /dev/null +++ b/vendor/sigs.k8s.io/cli-utils/LICENSE_TEMPLATE @@ -0,0 +1,2 @@ +Copyright {{.Year}} {{.Holder}} +SPDX-License-Identifier: Apache-2.0 diff --git a/vendor/sigs.k8s.io/cli-utils/pkg/kstatus/status/core.go b/vendor/sigs.k8s.io/cli-utils/pkg/kstatus/status/core.go new file mode 100644 index 0000000000..f3b0088fb2 --- /dev/null +++ b/vendor/sigs.k8s.io/cli-utils/pkg/kstatus/status/core.go @@ -0,0 +1,585 @@ +// Copyright 2019 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +package status + +import ( + "fmt" + "math" + "strings" + "time" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" +) + +// GetConditionsFn defines the signature for functions to compute the +// status of a built-in resource. +type GetConditionsFn func(*unstructured.Unstructured) (*Result, error) + +// legacyTypes defines the mapping from GroupKind to a function that can +// compute the status for the given resource. +var legacyTypes = map[string]GetConditionsFn{ + "Service": serviceConditions, + "Pod": podConditions, + "Secret": alwaysReady, + "PersistentVolumeClaim": pvcConditions, + "apps/StatefulSet": stsConditions, + "apps/DaemonSet": daemonsetConditions, + "extensions/DaemonSet": daemonsetConditions, + "apps/Deployment": deploymentConditions, + "extensions/Deployment": deploymentConditions, + "apps/ReplicaSet": replicasetConditions, + "extensions/ReplicaSet": replicasetConditions, + "policy/PodDisruptionBudget": pdbConditions, + "batch/CronJob": alwaysReady, + "ConfigMap": alwaysReady, + "batch/Job": jobConditions, + "apiextensions.k8s.io/CustomResourceDefinition": crdConditions, +} + +const ( + tooFewReady = "LessReady" + tooFewAvailable = "LessAvailable" + tooFewUpdated = "LessUpdated" + tooFewReplicas = "LessReplicas" + extraPods = "ExtraPods" + + onDeleteUpdateStrategy = "OnDelete" + + // How long a pod can be unscheduled before it is reported as + // unschedulable. + scheduleWindow = 15 * time.Second +) + +// GetLegacyConditionsFn returns a function that can compute the status for the +// given resource, or nil if the resource type is not known. +func GetLegacyConditionsFn(u *unstructured.Unstructured) GetConditionsFn { + gvk := u.GroupVersionKind() + g := gvk.Group + k := gvk.Kind + key := g + "/" + k + if g == "" { + key = k + } + return legacyTypes[key] +} + +// alwaysReady Used for resources that are always ready +func alwaysReady(u *unstructured.Unstructured) (*Result, error) { + return &Result{ + Status: CurrentStatus, + Message: "Resource is always ready", + Conditions: []Condition{}, + }, nil +} + +// stsConditions return standardized Conditions for Statefulset +// +// StatefulSet does define the .status.conditions property, but the controller never +// actually sets any Conditions. Thus, status must be computed only based on the other +// properties under .status. We don't have any way to find out if a reconcile for a +// StatefulSet has failed. +func stsConditions(u *unstructured.Unstructured) (*Result, error) { + obj := u.UnstructuredContent() + + // updateStrategy==ondelete is a user managed statefulset. + updateStrategy := GetStringField(obj, ".spec.updateStrategy.type", "") + if updateStrategy == onDeleteUpdateStrategy { + return &Result{ + Status: CurrentStatus, + Message: "StatefulSet is using the ondelete update strategy", + Conditions: []Condition{}, + }, nil + } + + // Replicas + specReplicas := GetIntField(obj, ".spec.replicas", 1) + readyReplicas := GetIntField(obj, ".status.readyReplicas", 0) + currentReplicas := GetIntField(obj, ".status.currentReplicas", 0) + updatedReplicas := GetIntField(obj, ".status.updatedReplicas", 0) + statusReplicas := GetIntField(obj, ".status.replicas", 0) + partition := GetIntField(obj, ".spec.updateStrategy.rollingUpdate.partition", -1) + + if specReplicas > statusReplicas { + message := fmt.Sprintf("Replicas: %d/%d", statusReplicas, specReplicas) + return newInProgressStatus(tooFewReplicas, message), nil + } + + if specReplicas > readyReplicas { + message := fmt.Sprintf("Ready: %d/%d", readyReplicas, specReplicas) + return newInProgressStatus(tooFewReady, message), nil + } + + if statusReplicas > specReplicas { + message := fmt.Sprintf("Pending termination: %d", statusReplicas-specReplicas) + return newInProgressStatus(extraPods, message), nil + } + + // https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions + if partition != -1 { + if updatedReplicas < (specReplicas - partition) { + message := fmt.Sprintf("updated: %d/%d", updatedReplicas, specReplicas-partition) + return newInProgressStatus("PartitionRollout", message), nil + } + // Partition case All ok + return &Result{ + Status: CurrentStatus, + Message: fmt.Sprintf("Partition rollout complete. updated: %d", updatedReplicas), + Conditions: []Condition{}, + }, nil + } + + if specReplicas > currentReplicas { + message := fmt.Sprintf("current: %d/%d", currentReplicas, specReplicas) + return newInProgressStatus("LessCurrent", message), nil + } + + // Revision + currentRevision := GetStringField(obj, ".status.currentRevision", "") + updatedRevision := GetStringField(obj, ".status.updateRevision", "") + if currentRevision != updatedRevision { + message := "Waiting for updated revision to match current" + return newInProgressStatus("RevisionMismatch", message), nil + } + + // All ok + return &Result{ + Status: CurrentStatus, + Message: fmt.Sprintf("All replicas scheduled as expected. Replicas: %d", statusReplicas), + Conditions: []Condition{}, + }, nil +} + +// deploymentConditions return standardized Conditions for Deployment. +// +// For Deployments, we look at .status.conditions as well as the other properties +// under .status. Status will be Failed if the progress deadline has been exceeded. +func deploymentConditions(u *unstructured.Unstructured) (*Result, error) { + obj := u.UnstructuredContent() + + progressing := false + + // Check if progressDeadlineSeconds is set. If not, the controller will not set + // the `Progressing` condition, so it will always consider a deployment to be + // progressing. The use of math.MaxInt32 is due to special handling in the + // controller: + // https://github.com/kubernetes/kubernetes/blob/a3ccea9d8743f2ff82e41b6c2af6dc2c41dc7b10/pkg/controller/deployment/util/deployment_util.go#L886 + progressDeadline := GetIntField(obj, ".spec.progressDeadlineSeconds", math.MaxInt32) + if progressDeadline == math.MaxInt32 { + progressing = true + } + + available := false + + objc, err := GetObjectWithConditions(obj) + if err != nil { + return nil, err + } + + for _, c := range objc.Status.Conditions { + switch c.Type { + case "Progressing": //appsv1.DeploymentProgressing: + // https://github.com/kubernetes/kubernetes/blob/a3ccea9d8743f2ff82e41b6c2af6dc2c41dc7b10/pkg/controller/deployment/progress.go#L52 + if c.Reason == "ProgressDeadlineExceeded" { + return &Result{ + Status: FailedStatus, + Message: "Progress deadline exceeded", + Conditions: []Condition{{ConditionStalled, corev1.ConditionTrue, c.Reason, c.Message}}, + }, nil + } + if c.Status == corev1.ConditionTrue && c.Reason == "NewReplicaSetAvailable" { + progressing = true + } + case "Available": //appsv1.DeploymentAvailable: + if c.Status == corev1.ConditionTrue { + available = true + } + } + } + + // replicas + specReplicas := GetIntField(obj, ".spec.replicas", 1) // Controller uses 1 as default if not specified. + statusReplicas := GetIntField(obj, ".status.replicas", 0) + updatedReplicas := GetIntField(obj, ".status.updatedReplicas", 0) + readyReplicas := GetIntField(obj, ".status.readyReplicas", 0) + availableReplicas := GetIntField(obj, ".status.availableReplicas", 0) + + // TODO spec.replicas zero case ?? + + if specReplicas > statusReplicas { + message := fmt.Sprintf("replicas: %d/%d", statusReplicas, specReplicas) + return newInProgressStatus(tooFewReplicas, message), nil + } + + if specReplicas > updatedReplicas { + message := fmt.Sprintf("Updated: %d/%d", updatedReplicas, specReplicas) + return newInProgressStatus(tooFewUpdated, message), nil + } + + if statusReplicas > specReplicas { + message := fmt.Sprintf("Pending termination: %d", statusReplicas-specReplicas) + return newInProgressStatus(extraPods, message), nil + } + + if updatedReplicas > availableReplicas { + message := fmt.Sprintf("Available: %d/%d", availableReplicas, updatedReplicas) + return newInProgressStatus(tooFewAvailable, message), nil + } + + if specReplicas > readyReplicas { + message := fmt.Sprintf("Ready: %d/%d", readyReplicas, specReplicas) + return newInProgressStatus(tooFewReady, message), nil + } + + // check conditions + if !progressing { + message := "ReplicaSet not Available" + return newInProgressStatus("ReplicaSetNotAvailable", message), nil + } + if !available { + message := "Deployment not Available" + return newInProgressStatus("DeploymentNotAvailable", message), nil + } + // All ok + return &Result{ + Status: CurrentStatus, + Message: fmt.Sprintf("Deployment is available. Replicas: %d", statusReplicas), + Conditions: []Condition{}, + }, nil +} + +// replicasetConditions return standardized Conditions for Replicaset +func replicasetConditions(u *unstructured.Unstructured) (*Result, error) { + obj := u.UnstructuredContent() + + // Conditions + objc, err := GetObjectWithConditions(obj) + if err != nil { + return nil, err + } + + for _, c := range objc.Status.Conditions { + // https://github.com/kubernetes/kubernetes/blob/a3ccea9d8743f2ff82e41b6c2af6dc2c41dc7b10/pkg/controller/replicaset/replica_set_utils.go + if c.Type == "ReplicaFailure" && c.Status == corev1.ConditionTrue { + message := "Replica Failure condition. Check Pods" + return newInProgressStatus("ReplicaFailure", message), nil + } + } + + // Replicas + specReplicas := GetIntField(obj, ".spec.replicas", 1) // Controller uses 1 as default if not specified. + statusReplicas := GetIntField(obj, ".status.replicas", 0) + readyReplicas := GetIntField(obj, ".status.readyReplicas", 0) + availableReplicas := GetIntField(obj, ".status.availableReplicas", 0) + fullyLabelledReplicas := GetIntField(obj, ".status.fullyLabeledReplicas", 0) + + if specReplicas > fullyLabelledReplicas { + message := fmt.Sprintf("Labelled: %d/%d", fullyLabelledReplicas, specReplicas) + return newInProgressStatus("LessLabelled", message), nil + } + + if specReplicas > availableReplicas { + message := fmt.Sprintf("Available: %d/%d", availableReplicas, specReplicas) + return newInProgressStatus(tooFewAvailable, message), nil + } + + if specReplicas > readyReplicas { + message := fmt.Sprintf("Ready: %d/%d", readyReplicas, specReplicas) + return newInProgressStatus(tooFewReady, message), nil + } + + if statusReplicas > specReplicas { + message := fmt.Sprintf("Pending termination: %d", statusReplicas-specReplicas) + return newInProgressStatus(extraPods, message), nil + } + // All ok + return &Result{ + Status: CurrentStatus, + Message: fmt.Sprintf("ReplicaSet is available. Replicas: %d", statusReplicas), + Conditions: []Condition{}, + }, nil +} + +// daemonsetConditions return standardized Conditions for DaemonSet +func daemonsetConditions(u *unstructured.Unstructured) (*Result, error) { + obj := u.UnstructuredContent() + + // replicas + desiredNumberScheduled := GetIntField(obj, ".status.desiredNumberScheduled", -1) + currentNumberScheduled := GetIntField(obj, ".status.currentNumberScheduled", 0) + updatedNumberScheduled := GetIntField(obj, ".status.updatedNumberScheduled", 0) + numberAvailable := GetIntField(obj, ".status.numberAvailable", 0) + numberReady := GetIntField(obj, ".status.numberReady", 0) + + if desiredNumberScheduled == -1 { + message := "Missing .status.desiredNumberScheduled" + return newInProgressStatus("NoDesiredNumber", message), nil + } + + if desiredNumberScheduled > currentNumberScheduled { + message := fmt.Sprintf("Current: %d/%d", currentNumberScheduled, desiredNumberScheduled) + return newInProgressStatus("LessCurrent", message), nil + } + + if desiredNumberScheduled > updatedNumberScheduled { + message := fmt.Sprintf("Updated: %d/%d", updatedNumberScheduled, desiredNumberScheduled) + return newInProgressStatus(tooFewUpdated, message), nil + } + + if desiredNumberScheduled > numberAvailable { + message := fmt.Sprintf("Available: %d/%d", numberAvailable, desiredNumberScheduled) + return newInProgressStatus(tooFewAvailable, message), nil + } + + if desiredNumberScheduled > numberReady { + message := fmt.Sprintf("Ready: %d/%d", numberReady, desiredNumberScheduled) + return newInProgressStatus(tooFewReady, message), nil + } + + // All ok + return &Result{ + Status: CurrentStatus, + Message: fmt.Sprintf("All replicas scheduled as expected. Replicas: %d", desiredNumberScheduled), + Conditions: []Condition{}, + }, nil +} + +// pvcConditions return standardized Conditions for PVC +func pvcConditions(u *unstructured.Unstructured) (*Result, error) { + obj := u.UnstructuredContent() + + phase := GetStringField(obj, ".status.phase", "unknown") + if phase != "Bound" { // corev1.ClaimBound + message := fmt.Sprintf("PVC is not Bound. phase: %s", phase) + return newInProgressStatus("NotBound", message), nil + } + // All ok + return &Result{ + Status: CurrentStatus, + Message: "PVC is Bound", + Conditions: []Condition{}, + }, nil +} + +// podConditions return standardized Conditions for Pod +func podConditions(u *unstructured.Unstructured) (*Result, error) { + obj := u.UnstructuredContent() + objc, err := GetObjectWithConditions(obj) + if err != nil { + return nil, err + } + phase := GetStringField(obj, ".status.phase", "") + + switch phase { + case "Succeeded": + return &Result{ + Status: CurrentStatus, + Message: "Pod has completed successfully", + Conditions: []Condition{}, + }, nil + case "Failed": + return &Result{ + Status: CurrentStatus, + Message: "Pod has completed, but not successfully", + Conditions: []Condition{}, + }, nil + case "Running": + if hasConditionWithStatus(objc.Status.Conditions, "Ready", corev1.ConditionTrue) { + return &Result{ + Status: CurrentStatus, + Message: "Pod is Ready", + Conditions: []Condition{}, + }, nil + } + + containerNames, isCrashLooping, err := getCrashLoopingContainers(obj) + if err != nil { + return nil, err + } + if isCrashLooping { + return newFailedStatus("ContainerCrashLooping", + fmt.Sprintf("Containers in CrashLoop state: %s", strings.Join(containerNames, ","))), nil + } + + return newInProgressStatus("PodRunningNotReady", "Pod is running but is not Ready"), nil + case "Pending": + c, found := getConditionWithStatus(objc.Status.Conditions, "PodScheduled", corev1.ConditionFalse) + if found && c.Reason == "Unschedulable" { + if time.Now().Add(-scheduleWindow).Before(u.GetCreationTimestamp().Time) { + // We give the pod 15 seconds to be scheduled before we report it + // as unschedulable. + return newInProgressStatus("PodNotScheduled", "Pod has not been scheduled"), nil + } + return newFailedStatus("PodUnschedulable", "Pod could not be scheduled"), nil + } + return newInProgressStatus("PodPending", "Pod is in the Pending phase"), nil + default: + // If the controller hasn't observed the pod yet, there is no phase. We consider this as it + // still being in progress. + if phase == "" { + return newInProgressStatus("PodNotObserved", "Pod phase not available"), nil + } + return nil, fmt.Errorf("unknown phase %s", phase) + } +} + +func getCrashLoopingContainers(obj map[string]interface{}) ([]string, bool, error) { + var containerNames []string + css, found, err := unstructured.NestedSlice(obj, "status", "containerStatuses") + if !found || err != nil { + return containerNames, found, err + } + for _, item := range css { + cs := item.(map[string]interface{}) + n, found := cs["name"] + if !found { + continue + } + name := n.(string) + s, found := cs["state"] + if !found { + continue + } + state := s.(map[string]interface{}) + + ws, found := state["waiting"] + if !found { + continue + } + waitingState := ws.(map[string]interface{}) + + r, found := waitingState["reason"] + if !found { + continue + } + reason := r.(string) + if reason == "CrashLoopBackOff" { + containerNames = append(containerNames, name) + } + } + if len(containerNames) > 0 { + return containerNames, true, nil + } + return containerNames, false, nil +} + +// pdbConditions computes the status for PodDisruptionBudgets. A PDB +// is currently considered Current if the disruption controller has +// observed the latest version of the PDB resource and has computed +// the AllowedDisruptions. PDBs do have ObservedGeneration in the +// Status object, so if this function gets called we know that +// the controller has observed the latest changes. +// The disruption controller does not set any conditions if +// computing the AllowedDisruptions fails (and there are many ways +// it can fail), but there is PR against OSS Kubernetes to address +// this: https://github.com/kubernetes/kubernetes/pull/86929 +func pdbConditions(_ *unstructured.Unstructured) (*Result, error) { + // All ok + return &Result{ + Status: CurrentStatus, + Message: "AllowedDisruptions has been computed.", + Conditions: []Condition{}, + }, nil +} + +// jobConditions return standardized Conditions for Job +// +// A job will have the InProgress status until it starts running. Then it will have the Current +// status while the job is running and after it has been completed successfully. It +// will have the Failed status if it the job has failed. +func jobConditions(u *unstructured.Unstructured) (*Result, error) { + obj := u.UnstructuredContent() + + parallelism := GetIntField(obj, ".spec.parallelism", 1) + completions := GetIntField(obj, ".spec.completions", parallelism) + succeeded := GetIntField(obj, ".status.succeeded", 0) + active := GetIntField(obj, ".status.active", 0) + failed := GetIntField(obj, ".status.failed", 0) + starttime := GetStringField(obj, ".status.startTime", "") + + // Conditions + // https://github.com/kubernetes/kubernetes/blob/master/pkg/controller/job/utils.go#L24 + objc, err := GetObjectWithConditions(obj) + if err != nil { + return nil, err + } + for _, c := range objc.Status.Conditions { + switch c.Type { + case "Complete": + if c.Status == corev1.ConditionTrue { + message := fmt.Sprintf("Job Completed. succeeded: %d/%d", succeeded, completions) + return &Result{ + Status: CurrentStatus, + Message: message, + Conditions: []Condition{}, + }, nil + } + case "Failed": + if c.Status == corev1.ConditionTrue { + return newFailedStatus("JobFailed", + fmt.Sprintf("Job Failed. failed: %d/%d", failed, completions)), nil + } + } + } + + // replicas + if starttime == "" { + message := "Job not started" + return newInProgressStatus("JobNotStarted", message), nil + } + return &Result{ + Status: CurrentStatus, + Message: fmt.Sprintf("Job in progress. success:%d, active: %d, failed: %d", succeeded, active, failed), + Conditions: []Condition{}, + }, nil +} + +// serviceConditions return standardized Conditions for Service +func serviceConditions(u *unstructured.Unstructured) (*Result, error) { + obj := u.UnstructuredContent() + + specType := GetStringField(obj, ".spec.type", "ClusterIP") + specClusterIP := GetStringField(obj, ".spec.clusterIP", "") + + if specType == "LoadBalancer" { + if specClusterIP == "" { + message := "ClusterIP not set. Service type: LoadBalancer" + return newInProgressStatus("NoIPAssigned", message), nil + } + } + + return &Result{ + Status: CurrentStatus, + Message: "Service is ready", + Conditions: []Condition{}, + }, nil +} + +func crdConditions(u *unstructured.Unstructured) (*Result, error) { + obj := u.UnstructuredContent() + + objc, err := GetObjectWithConditions(obj) + if err != nil { + return nil, err + } + + for _, c := range objc.Status.Conditions { + if c.Type == "NamesAccepted" && c.Status == corev1.ConditionFalse { + return newFailedStatus(c.Reason, c.Message), nil + } + if c.Type == "Established" { + if c.Status == corev1.ConditionFalse && c.Reason != "Installing" { + return newFailedStatus(c.Reason, c.Message), nil + } + if c.Status == corev1.ConditionTrue { + return &Result{ + Status: CurrentStatus, + Message: "CRD is established", + Conditions: []Condition{}, + }, nil + } + } + } + return newInProgressStatus("Installing", "Install in progress"), nil +} diff --git a/vendor/sigs.k8s.io/cli-utils/pkg/kstatus/status/doc.go b/vendor/sigs.k8s.io/cli-utils/pkg/kstatus/status/doc.go new file mode 100644 index 0000000000..f69b8714be --- /dev/null +++ b/vendor/sigs.k8s.io/cli-utils/pkg/kstatus/status/doc.go @@ -0,0 +1,41 @@ +// Copyright 2019 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +// Package kstatus contains functionality for computing the status +// of Kubernetes resources. +// +// The statuses defined in this package are: +// * InProgress +// * Current +// * Failed +// * Terminating +// * Unknown +// +// Computing the status of a resources can be done by calling the +// Compute function in the status package. +// +// import ( +// "sigs.k8s.io/cli-utils/pkg/kstatus/status" +// ) +// +// res, err := status.Compute(resource) +// +// The package also defines a set of new conditions: +// * InProgress +// * Failed +// These conditions have been chosen to follow the +// "abnormal-true" pattern where conditions should be set to true +// for error/abnormal conditions and the absence of a condition means +// things are normal. +// +// The Augment function augments any unstructured resource with +// the standard conditions described above. The values of +// these conditions are decided based on other status information +// available in the resources. +// +// import ( +// "sigs.k8s.io/cli-utils/pkg/kstatus/status +// ) +// +// err := status.Augment(resource) +package status diff --git a/vendor/sigs.k8s.io/cli-utils/pkg/kstatus/status/generic.go b/vendor/sigs.k8s.io/cli-utils/pkg/kstatus/status/generic.go new file mode 100644 index 0000000000..d8da90fb75 --- /dev/null +++ b/vendor/sigs.k8s.io/cli-utils/pkg/kstatus/status/generic.go @@ -0,0 +1,100 @@ +// Copyright 2019 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +package status + +import ( + "fmt" + + "github.com/pkg/errors" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" +) + +// checkGenericProperties looks at the properties that are available on +// all or most of the Kubernetes resources. If a decision can be made based +// on this information, there is no need to look at the resource-specidic +// rules. +// This also checks for the presence of the conditions defined in this package. +// If any of these are set on the resource, a decision is made solely based +// on this and none of the resource specific rules will be used. The goal here +// is that if controllers, built-in or custom, use these conditions, we can easily +// find status of resources. +func checkGenericProperties(u *unstructured.Unstructured) (*Result, error) { + obj := u.UnstructuredContent() + + // Check if the resource is scheduled for deletion + deletionTimestamp, found, err := unstructured.NestedString(obj, "metadata", "deletionTimestamp") + if err != nil { + return nil, errors.Wrap(err, "looking up metadata.deletionTimestamp from resource") + } + if found && deletionTimestamp != "" { + return &Result{ + Status: TerminatingStatus, + Message: "Resource scheduled for deletion", + Conditions: []Condition{}, + }, nil + } + + res, err := checkGeneration(u) + if res != nil || err != nil { + return res, err + } + + // Check if the resource has any of the standard conditions. If so, we just use them + // and no need to look at anything else. + objWithConditions, err := GetObjectWithConditions(obj) + if err != nil { + return nil, err + } + + for _, cond := range objWithConditions.Status.Conditions { + if cond.Type == string(ConditionReconciling) && cond.Status == corev1.ConditionTrue { + return newInProgressStatus(cond.Reason, cond.Message), nil + } + if cond.Type == string(ConditionStalled) && cond.Status == corev1.ConditionTrue { + return &Result{ + Status: FailedStatus, + Message: cond.Message, + Conditions: []Condition{ + { + Type: ConditionStalled, + Status: corev1.ConditionTrue, + Reason: cond.Reason, + Message: cond.Message, + }, + }, + }, nil + } + } + + return nil, nil +} + +func checkGeneration(u *unstructured.Unstructured) (*Result, error) { + // ensure that the meta generation is observed + generation, found, err := unstructured.NestedInt64(u.Object, "metadata", "generation") + if err != nil { + return nil, errors.Wrap(err, "looking up metadata.generation from resource") + } + if !found { + return nil, nil + } + observedGeneration, found, err := unstructured.NestedInt64(u.Object, "status", "observedGeneration") + if err != nil { + return nil, errors.Wrap(err, "looking up status.observedGeneration from resource") + } + if found { + // Resource does not have this field, so we can't do this check. + // TODO(mortent): Verify behavior of not set vs does not exist. + if observedGeneration != generation { + message := fmt.Sprintf("%s generation is %d, but latest observed generation is %d", u.GetKind(), generation, observedGeneration) + return &Result{ + Status: InProgressStatus, + Message: message, + Conditions: []Condition{newReconcilingCondition("LatestGenerationNotObserved", message)}, + }, nil + } + } + return nil, nil +} diff --git a/vendor/sigs.k8s.io/cli-utils/pkg/kstatus/status/status.go b/vendor/sigs.k8s.io/cli-utils/pkg/kstatus/status/status.go new file mode 100644 index 0000000000..f4c01089d9 --- /dev/null +++ b/vendor/sigs.k8s.io/cli-utils/pkg/kstatus/status/status.go @@ -0,0 +1,241 @@ +// Copyright 2019 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +package status + +import ( + "fmt" + "time" + + "github.com/pkg/errors" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" +) + +const ( + // The set of standard conditions defined in this package. These follow the "abnormality-true" + // convention where conditions should have a true value for abnormal/error situations and the absence + // of a condition should be interpreted as a false value, i.e. everything is normal. + ConditionStalled ConditionType = "Stalled" + ConditionReconciling ConditionType = "Reconciling" + + // The set of status conditions which can be assigned to resources. + InProgressStatus Status = "InProgress" + FailedStatus Status = "Failed" + CurrentStatus Status = "Current" + TerminatingStatus Status = "Terminating" + NotFoundStatus Status = "NotFound" + UnknownStatus Status = "Unknown" +) + +var ( + Statuses = []Status{InProgressStatus, FailedStatus, CurrentStatus, TerminatingStatus, UnknownStatus} +) + +// ConditionType defines the set of condition types allowed inside a Condition struct. +type ConditionType string + +// String returns the ConditionType as a string. +func (c ConditionType) String() string { + return string(c) +} + +// Status defines the set of statuses a resource can have. +type Status string + +// String returns the status as a string. +func (s Status) String() string { + return string(s) +} + +// StatusFromString turns a string into a Status. Will panic if the provided string is +// not a valid status. +func FromStringOrDie(text string) Status { + s := Status(text) + for _, r := range Statuses { + if s == r { + return s + } + } + panic(fmt.Errorf("string has invalid status: %s", s)) +} + +// Result contains the results of a call to compute the status of +// a resource. +type Result struct { + //Status + Status Status + // Message + Message string + // Conditions list of extracted conditions from Resource + Conditions []Condition +} + +// Condition defines the general format for conditions on Kubernetes resources. +// In practice, each kubernetes resource defines their own format for conditions, but +// most (maybe all) follows this structure. +type Condition struct { + // Type condition type + Type ConditionType `json:"type,omitempty"` + // Status String that describes the condition status + Status corev1.ConditionStatus `json:"status,omitempty"` + // Reason one work CamelCase reason + Reason string `json:"reason,omitempty"` + // Message Human readable reason string + Message string `json:"message,omitempty"` +} + +// Compute finds the status of a given unstructured resource. It does not +// fetch the state of the resource from a cluster, so the provided unstructured +// must have the complete state, including status. +// +// The returned result contains the status of the resource, which will be +// one of +// * InProgress +// * Current +// * Failed +// * Terminating +// It also contains a message that provides more information on why +// the resource has the given status. Finally, the result also contains +// a list of standard resources that would belong on the given resource. +func Compute(u *unstructured.Unstructured) (*Result, error) { + res, err := checkGenericProperties(u) + if err != nil { + return nil, err + } + + // If res is not nil, it means the generic checks was able to determine + // the status of the resource. We don't need to check the type-specific + // rules. + if res != nil { + return res, nil + } + + fn := GetLegacyConditionsFn(u) + if fn != nil { + return fn(u) + } + + // If neither the generic properties of the resource-specific rules + // can determine status, we do one last check to see if the resource + // does expose a Ready condition. Ready conditions do not adhere + // to the Kubernetes design recommendations, but they are pretty widely + // used. + res, err = checkReadyCondition(u) + if res != nil || err != nil { + return res, err + } + + // The resource is not one of the built-in types with specific + // rules and we were unable to make a decision based on the + // generic rules. In this case we assume that the absence of any known + // conditions means the resource is current. + return &Result{ + Status: CurrentStatus, + Message: "Resource is current", + Conditions: []Condition{}, + }, err +} + +// checkReadyCondition checks if a resource has a Ready condition, and +// if so, it will use the value of this condition to determine the +// status. +// There are a few challenges with this: +// - If a resource doesn't set the Ready condition until it is True, +// the library have no way of telling whether the resource is using the +// Ready condition, so it will fall back to the strategy for unknown +// resources, which is to assume they are always reconciled. +// - If the library sees the resource before the controller has had +// a chance to update the conditions, it also will not realize the +// resource use the Ready condition. +// - There is no way to determine if a resource with the Ready condition +// set to False is making progress or is doomed. +func checkReadyCondition(u *unstructured.Unstructured) (*Result, error) { + objWithConditions, err := GetObjectWithConditions(u.Object) + if err != nil { + return nil, err + } + + for _, cond := range objWithConditions.Status.Conditions { + if cond.Type != "Ready" { + continue + } + switch cond.Status { + case corev1.ConditionTrue: + return &Result{ + Status: CurrentStatus, + Message: "Resource is Ready", + Conditions: []Condition{}, + }, nil + case corev1.ConditionFalse: + return newInProgressStatus(cond.Reason, cond.Message), nil + case corev1.ConditionUnknown: + // For now we just treat an unknown condition value as + // InProgress. We should consider if there are better ways + // to handle it. + return newInProgressStatus(cond.Reason, cond.Message), nil + default: + // Do nothing in this case. + } + } + return nil, nil +} + +// Augment takes a resource and augments the resource with the +// standard status conditions. +func Augment(u *unstructured.Unstructured) error { + res, err := Compute(u) + if err != nil { + return err + } + + conditions, found, err := unstructured.NestedSlice(u.Object, "status", "conditions") + if err != nil { + return err + } + + if !found { + conditions = make([]interface{}, 0) + } + + currentTime := time.Now().UTC().Format(time.RFC3339) + + for _, resCondition := range res.Conditions { + present := false + for _, c := range conditions { + condition, ok := c.(map[string]interface{}) + if !ok { + return errors.New("condition does not have the expected structure") + } + conditionType, ok := condition["type"].(string) + if !ok { + return errors.New("condition type does not have the expected type") + } + if conditionType == string(resCondition.Type) { + conditionStatus, ok := condition["status"].(string) + if !ok { + return errors.New("condition status does not have the expected type") + } + if conditionStatus != string(resCondition.Status) { + condition["lastTransitionTime"] = currentTime + } + condition["status"] = string(resCondition.Status) + condition["lastUpdateTime"] = currentTime + condition["reason"] = resCondition.Reason + condition["message"] = resCondition.Message + present = true + } + } + if !present { + conditions = append(conditions, map[string]interface{}{ + "lastTransitionTime": currentTime, + "lastUpdateTime": currentTime, + "message": resCondition.Message, + "reason": resCondition.Reason, + "status": string(resCondition.Status), + "type": string(resCondition.Type), + }) + } + } + return unstructured.SetNestedSlice(u.Object, conditions, "status", "conditions") +} diff --git a/vendor/sigs.k8s.io/cli-utils/pkg/kstatus/status/util.go b/vendor/sigs.k8s.io/cli-utils/pkg/kstatus/status/util.go new file mode 100644 index 0000000000..b22152cc71 --- /dev/null +++ b/vendor/sigs.k8s.io/cli-utils/pkg/kstatus/status/util.go @@ -0,0 +1,143 @@ +// Copyright 2019 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +package status + +import ( + "strings" + + corev1 "k8s.io/api/core/v1" + apiunstructured "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" +) + +// newReconcilingCondition creates an reconciling condition with the given +// reason and message. +func newReconcilingCondition(reason, message string) Condition { + return Condition{ + Type: ConditionReconciling, + Status: corev1.ConditionTrue, + Reason: reason, + Message: message, + } +} + +func newStalledCondition(reason, message string) Condition { + return Condition{ + Type: ConditionStalled, + Status: corev1.ConditionTrue, + Reason: reason, + Message: message, + } +} + +// newInProgressStatus creates a status Result with the InProgress status +// and an InProgress condition. +func newInProgressStatus(reason, message string) *Result { + return &Result{ + Status: InProgressStatus, + Message: message, + Conditions: []Condition{newReconcilingCondition(reason, message)}, + } +} + +func newFailedStatus(reason, message string) *Result { + return &Result{ + Status: FailedStatus, + Message: message, + Conditions: []Condition{newStalledCondition(reason, message)}, + } +} + +// ObjWithConditions Represent meta object with status.condition array +type ObjWithConditions struct { + // Status as expected to be present in most compliant kubernetes resources + Status ConditionStatus `json:"status" yaml:"status"` +} + +// ConditionStatus represent status with condition array +type ConditionStatus struct { + // Array of Conditions as expected to be present in kubernetes resources + Conditions []BasicCondition `json:"conditions" yaml:"conditions"` +} + +// BasicCondition fields that are expected in a condition +type BasicCondition struct { + // Type Condition type + Type string `json:"type" yaml:"type"` + // Status is one of True,False,Unknown + Status corev1.ConditionStatus `json:"status" yaml:"status"` + // Reason simple single word reason in CamleCase + // +optional + Reason string `json:"reason,omitempty" yaml:"reason"` + // Message human readable reason + // +optional + Message string `json:"message,omitempty" yaml:"message"` +} + +// GetObjectWithConditions return typed object +func GetObjectWithConditions(in map[string]interface{}) (*ObjWithConditions, error) { + var out = new(ObjWithConditions) + err := runtime.DefaultUnstructuredConverter.FromUnstructured(in, out) + if err != nil { + return nil, err + } + return out, nil +} + +func hasConditionWithStatus(conditions []BasicCondition, conditionType string, status corev1.ConditionStatus) bool { + _, found := getConditionWithStatus(conditions, conditionType, status) + return found +} + +func getConditionWithStatus(conditions []BasicCondition, conditionType string, status corev1.ConditionStatus) (BasicCondition, bool) { + for _, c := range conditions { + if c.Type == conditionType && c.Status == status { + return c, true + } + } + return BasicCondition{}, false +} + +// GetStringField return field as string defaulting to value if not found +func GetStringField(obj map[string]interface{}, fieldPath string, defaultValue string) string { + var rv = defaultValue + + fields := strings.Split(fieldPath, ".") + if fields[0] == "" { + fields = fields[1:] + } + + val, found, err := apiunstructured.NestedFieldNoCopy(obj, fields...) + if !found || err != nil { + return rv + } + + if v, ok := val.(string); ok { + return v + } + return rv +} + +// GetIntField return field as string defaulting to value if not found +func GetIntField(obj map[string]interface{}, fieldPath string, defaultValue int) int { + fields := strings.Split(fieldPath, ".") + if fields[0] == "" { + fields = fields[1:] + } + + val, found, err := apiunstructured.NestedFieldNoCopy(obj, fields...) + if !found || err != nil { + return defaultValue + } + + switch v := val.(type) { + case int: + return v + case int32: + return int(v) + case int64: + return int(v) + } + return defaultValue +}