From ac375fa2a8505dcce3fa7dfe7283172529f4d6e5 Mon Sep 17 00:00:00 2001 From: Tanmay Rustagi Date: Wed, 27 Mar 2024 13:20:56 +0530 Subject: [PATCH 01/39] temp work --- go.mod | 25 ++++---- go.sum | 56 +++++++++-------- main.go | 61 ++++++++++++++++--- provider/provider_plugin_framework.go | 87 +++++++++++++++++++++++++++ 4 files changed, 183 insertions(+), 46 deletions(-) create mode 100644 provider/provider_plugin_framework.go diff --git a/go.mod b/go.mod index a15e5c3470..0f4e32ae58 100644 --- a/go.mod +++ b/go.mod @@ -7,19 +7,21 @@ require ( github.com/golang-jwt/jwt/v4 v4.5.0 github.com/hashicorp/go-cty v1.4.1-0.20200414143053-d3edf31b6320 github.com/hashicorp/hcl v1.0.0 - github.com/hashicorp/hcl/v2 v2.20.0 + github.com/hashicorp/hcl/v2 v2.20.1 github.com/hashicorp/terraform-plugin-log v0.9.0 github.com/hashicorp/terraform-plugin-sdk/v2 v2.33.0 github.com/stretchr/testify v1.9.0 github.com/zclconf/go-cty v1.14.4 - golang.org/x/exp v0.0.0-20240222234643-814bf88cf225 + golang.org/x/exp v0.0.0-20240325151524-a685a6edb6d8 golang.org/x/mod v0.16.0 ) +require github.com/hashicorp/terraform-plugin-mux v0.15.0 + require ( - cloud.google.com/go/compute v1.24.0 // indirect + cloud.google.com/go/compute v1.25.1 // indirect cloud.google.com/go/compute/metadata v0.2.3 // indirect - github.com/ProtonMail/go-crypto v1.1.0-alpha.0 // indirect + github.com/ProtonMail/go-crypto v1.1.0-alpha.2-proton // indirect github.com/agext/levenshtein v1.2.3 // indirect github.com/apparentlymart/go-textseg/v15 v15.0.0 // indirect github.com/cloudflare/circl v1.3.7 // indirect @@ -29,7 +31,7 @@ require ( github.com/go-logr/logr v1.4.1 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect - github.com/golang/protobuf v1.5.3 // indirect + github.com/golang/protobuf v1.5.4 // indirect github.com/google/go-cmp v0.6.0 // indirect github.com/google/go-querystring v1.1.0 // indirect github.com/google/s2a-go v0.1.7 // indirect @@ -38,7 +40,7 @@ require ( github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-checkpoint v0.5.0 // indirect github.com/hashicorp/go-cleanhttp v0.5.2 // indirect - github.com/hashicorp/go-hclog v1.5.0 // indirect + github.com/hashicorp/go-hclog v1.6.2 // indirect github.com/hashicorp/go-multierror v1.1.1 // indirect github.com/hashicorp/go-plugin v1.6.0 // indirect github.com/hashicorp/go-uuid v1.0.3 // indirect @@ -47,7 +49,8 @@ require ( github.com/hashicorp/logutils v1.0.0 // indirect github.com/hashicorp/terraform-exec v0.20.0 // indirect github.com/hashicorp/terraform-json v0.21.0 // indirect - github.com/hashicorp/terraform-plugin-go v0.22.0 // indirect + github.com/hashicorp/terraform-plugin-framework v1.7.0 + github.com/hashicorp/terraform-plugin-go v0.22.1 github.com/hashicorp/terraform-registry-address v0.2.3 // indirect github.com/hashicorp/terraform-svchost v0.1.1 // indirect github.com/hashicorp/yamux v0.1.1 // indirect @@ -75,11 +78,11 @@ require ( golang.org/x/sys v0.18.0 // indirect golang.org/x/text v0.14.0 // indirect golang.org/x/time v0.5.0 // indirect - golang.org/x/tools v0.18.0 // indirect - google.golang.org/api v0.169.0 // indirect + golang.org/x/tools v0.19.0 // indirect + google.golang.org/api v0.171.0 // indirect google.golang.org/appengine v1.6.8 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240304161311-37d4d3c04a78 // indirect - google.golang.org/grpc v1.62.0 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240325203815-454cdb8f5daa // indirect + google.golang.org/grpc v1.62.1 // indirect google.golang.org/protobuf v1.33.0 // indirect gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect diff --git a/go.sum b/go.sum index eb3ab8ec4d..27484638d2 100644 --- a/go.sum +++ b/go.sum @@ -1,6 +1,6 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go/compute v1.24.0 h1:phWcR2eWzRJaL/kOiJwfFsPs4BaKq1j6vnpZrc1YlVg= -cloud.google.com/go/compute v1.24.0/go.mod h1:kw1/T+h/+tK2LJK0wiPPx1intgdAM3j/g3hFDlscY40= +cloud.google.com/go/compute v1.25.1 h1:ZRpHJedLtTpKgr3RV1Fx23NuaAEN1Zfx9hw1u4aJdjU= +cloud.google.com/go/compute v1.25.1/go.mod h1:oopOIR53ly6viBYxaDhBfJwzUAxf1zE//uf3IB011ls= cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk= @@ -8,8 +8,8 @@ dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= -github.com/ProtonMail/go-crypto v1.1.0-alpha.0 h1:nHGfwXmFvJrSR9xu8qL7BkO4DqTHXE9N5vPhgY2I+j0= -github.com/ProtonMail/go-crypto v1.1.0-alpha.0/go.mod h1:rA3QumHc/FZ8pAHreoekgiAbzpNsfQAosU5td4SnOrE= +github.com/ProtonMail/go-crypto v1.1.0-alpha.2-proton h1:HKz85FwoXx86kVtTvFke7rgHvq/HoloSUvW5semjFWs= +github.com/ProtonMail/go-crypto v1.1.0-alpha.2-proton/go.mod h1:rA3QumHc/FZ8pAHreoekgiAbzpNsfQAosU5td4SnOrE= github.com/agext/levenshtein v1.2.3 h1:YB2fHEn0UJagG8T1rrWknE3ZQzWM06O8AMAatNn7lmo= github.com/agext/levenshtein v1.2.3/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558= github.com/apparentlymart/go-textseg/v12 v12.0.0/go.mod h1:S/4uRK2UtaQttw1GenVJEynmyUenKwP++x/+DdGV/Ec= @@ -72,8 +72,8 @@ github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QD github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= -github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= @@ -93,8 +93,8 @@ github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfFxPRy3Bf7vr3h0cechB90XaQs= github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0= -github.com/googleapis/gax-go/v2 v2.12.2 h1:mhN09QQW1jEWeMF74zGR81R30z4VJzjZsfkUhuHF+DA= -github.com/googleapis/gax-go/v2 v2.12.2/go.mod h1:61M8vcyyXR2kqKFxKrfA22jaA8JGF7Dc8App1U3H6jc= +github.com/googleapis/gax-go/v2 v2.12.3 h1:5/zPPDvw8Q1SuXjrqrZslrqT7dL/uJT2CQii/cLCKqA= +github.com/googleapis/gax-go/v2 v2.12.3/go.mod h1:AKloxT6GtNbaLm8QTNSidHUVsHYcBHwWRvkNFJUQcS4= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= @@ -105,8 +105,8 @@ github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9n github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= github.com/hashicorp/go-cty v1.4.1-0.20200414143053-d3edf31b6320 h1:1/D3zfFHttUKaCaGKZ/dR2roBXv0vKbSCnssIldfQdI= github.com/hashicorp/go-cty v1.4.1-0.20200414143053-d3edf31b6320/go.mod h1:EiZBMaudVLy8fmjf9Npq1dq9RalhveqZG5w/yz3mHWs= -github.com/hashicorp/go-hclog v1.5.0 h1:bI2ocEMgcVlz55Oj1xZNBsVi900c7II+fWDyV9o+13c= -github.com/hashicorp/go-hclog v1.5.0/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= +github.com/hashicorp/go-hclog v1.6.2 h1:NOtoftovWkDheyUM/8JW3QMiXyxJK3uHRK7wV04nD2I= +github.com/hashicorp/go-hclog v1.6.2/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= github.com/hashicorp/go-plugin v1.6.0 h1:wgd4KxHJTVGGqWBq4QPB1i5BZNEx9BR8+OFmHDmTk8A= @@ -120,18 +120,22 @@ github.com/hashicorp/hc-install v0.6.3 h1:yE/r1yJvWbtrJ0STwScgEnCanb0U9v7zp0Gbkm github.com/hashicorp/hc-install v0.6.3/go.mod h1:KamGdbodYzlufbWh4r9NRo8y6GLHWZP2GBtdnms1Ln0= github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= -github.com/hashicorp/hcl/v2 v2.20.0 h1:l++cRs/5jQOiKVvqXZm/P1ZEfVXJmvLS9WSVxkaeTb4= -github.com/hashicorp/hcl/v2 v2.20.0/go.mod h1:WmcD/Ym72MDOOx5F62Ly+leloeu6H7m0pG7VBiU6pQk= +github.com/hashicorp/hcl/v2 v2.20.1 h1:M6hgdyz7HYt1UN9e61j+qKJBqR3orTWbI1HKBJEdxtc= +github.com/hashicorp/hcl/v2 v2.20.1/go.mod h1:TZDqQ4kNKCbh1iJp99FdPiUaVDDUPivbqxZulxDYqL4= github.com/hashicorp/logutils v1.0.0 h1:dLEQVugN8vlakKOUE3ihGLTZJRB4j+M2cdTm/ORI65Y= github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= github.com/hashicorp/terraform-exec v0.20.0 h1:DIZnPsqzPGuUnq6cH8jWcPunBfY+C+M8JyYF3vpnuEo= github.com/hashicorp/terraform-exec v0.20.0/go.mod h1:ckKGkJWbsNqFKV1itgMnE0hY9IYf1HoiekpuN0eWoDw= github.com/hashicorp/terraform-json v0.21.0 h1:9NQxbLNqPbEMze+S6+YluEdXgJmhQykRyRNd+zTI05U= github.com/hashicorp/terraform-json v0.21.0/go.mod h1:qdeBs11ovMzo5puhrRibdD6d2Dq6TyE/28JiU4tIQxk= -github.com/hashicorp/terraform-plugin-go v0.22.0 h1:1OS1Jk5mO0f5hrziWJGXXIxBrMe2j/B8E+DVGw43Xmc= -github.com/hashicorp/terraform-plugin-go v0.22.0/go.mod h1:mPULV91VKss7sik6KFEcEu7HuTogMLLO/EvWCuFkRVE= +github.com/hashicorp/terraform-plugin-framework v1.7.0 h1:wOULbVmfONnJo9iq7/q+iBOBJul5vRovaYJIu2cY/Pw= +github.com/hashicorp/terraform-plugin-framework v1.7.0/go.mod h1:jY9Id+3KbZ17OMpulgnWLSfwxNVYSoYBQFTgsx044CI= +github.com/hashicorp/terraform-plugin-go v0.22.1 h1:iTS7WHNVrn7uhe3cojtvWWn83cm2Z6ryIUDTRO0EV7w= +github.com/hashicorp/terraform-plugin-go v0.22.1/go.mod h1:qrjnqRghvQ6KnDbB12XeZ4FluclYwptntoWCr9QaXTI= github.com/hashicorp/terraform-plugin-log v0.9.0 h1:i7hOA+vdAItN1/7UrfBqBwvYPQ9TFvymaRGZED3FCV0= github.com/hashicorp/terraform-plugin-log v0.9.0/go.mod h1:rKL8egZQ/eXSyDqzLUuwUYLVdlYeamldAHSxjUFADow= +github.com/hashicorp/terraform-plugin-mux v0.15.0 h1:+/+lDx0WUsIOpkAmdwBIoFU8UP9o2eZASoOnLsWbKME= +github.com/hashicorp/terraform-plugin-mux v0.15.0/go.mod h1:9ezplb1Dyq394zQ+ldB0nvy/qbNAz3mMoHHseMTMaKo= github.com/hashicorp/terraform-plugin-sdk/v2 v2.33.0 h1:qHprzXy/As0rxedphECBEQAh3R4yp6pKksKHcqZx5G8= github.com/hashicorp/terraform-plugin-sdk/v2 v2.33.0/go.mod h1:H+8tjs9TjV2w57QFVSMBQacf8k/E1XwLXGCARgViC6A= github.com/hashicorp/terraform-registry-address v0.2.3 h1:2TAiKJ1A3MAkZlH1YI/aTVcLZRu7JseiXNRHbOAyoTI= @@ -151,8 +155,6 @@ github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORN github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= -github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= @@ -206,6 +208,8 @@ github.com/xanzy/ssh-agent v0.3.3/go.mod h1:6dzNDKs0J9rVPHPhaGCukekBHKqfl+L3KghI github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= github.com/zclconf/go-cty v1.14.4 h1:uXXczd9QDGsgu0i/QFR/hzI5NYCHLf6NQw/atrbnhq8= github.com/zclconf/go-cty v1.14.4/go.mod h1:VvMs5i0vgZdhYawQNq5kePSpLAoz8u1xvZgrPIxfnZE= +github.com/zclconf/go-cty-debug v0.0.0-20191215020915-b22d67c1ba0b h1:FosyBZYxY34Wul7O/MSKey3txpPYyCqVO5ZyceuQJEI= +github.com/zclconf/go-cty-debug v0.0.0-20191215020915-b22d67c1ba0b/go.mod h1:ZRKQfBXbGkpdV6QMzT3rU1kSTAnfu1dO8dPKjYprgj8= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0 h1:4Pp6oUg3+e/6M4C0A/3kJ2VYa++dsWVTtGgLVj5xtHg= @@ -224,8 +228,8 @@ golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5y golang.org/x/crypto v0.21.0 h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA= golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20240222234643-814bf88cf225 h1:LfspQV/FYTatPTr/3HzIcmiUFH7PGP+OQ6mgDYo3yuQ= -golang.org/x/exp v0.0.0-20240222234643-814bf88cf225/go.mod h1:CxmFvTBINI24O/j8iY7H1xHzx2i4OsyguNBmN/uPtqc= +golang.org/x/exp v0.0.0-20240325151524-a685a6edb6d8 h1:aAcj0Da7eBAtrTp03QXWvm88pSyOt+UgdZw2BFZ+lEw= +golang.org/x/exp v0.0.0-20240325151524-a685a6edb6d8/go.mod h1:CQ1k9gNrJ50XIzaKCRR2hssIjF07kZFEiieALBM/ARQ= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= @@ -289,12 +293,12 @@ golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3 golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.18.0 h1:k8NLag8AGHnn+PHbl7g43CtqZAwG60vZkLqgyZgIHgQ= -golang.org/x/tools v0.18.0/go.mod h1:GL7B4CwcLLeo59yx/9UWWuNOW1n3VZ4f5axWfML7Lcg= +golang.org/x/tools v0.19.0 h1:tfGCXNR1OsFG+sVdLAitlpjAvD/I6dHDKnYrpEZUHkw= +golang.org/x/tools v0.19.0/go.mod h1:qoJWxmGSIBmAeriMx19ogtrEPrGtDbPK634QFIcLAhc= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/api v0.169.0 h1:QwWPy71FgMWqJN/l6jVlFHUa29a7dcUy02I8o799nPY= -google.golang.org/api v0.169.0/go.mod h1:gpNOiMA2tZ4mf5R9Iwf4rK/Dcz0fbdIgWYWVoxmsyLg= +google.golang.org/api v0.171.0 h1:w174hnBPqut76FzW5Qaupt7zY8Kql6fiVjgys4f58sU= +google.golang.org/api v0.171.0/go.mod h1:Hnq5AHm4OTMt2BUVjael2CWZFD6vksJdWCWiUAmjC9o= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM= @@ -302,15 +306,15 @@ google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJ google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240304161311-37d4d3c04a78 h1:Xs9lu+tLXxLIfuci70nG4cpwaRC+mRQPUL7LoIeDJC4= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240304161311-37d4d3c04a78/go.mod h1:UCOku4NytXMJuLQE5VuqA5lX3PcHCBo8pxNyvkf4xBs= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240325203815-454cdb8f5daa h1:RBgMaUMP+6soRkik4VoN8ojR2nex2TqZwjSSogic+eo= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240325203815-454cdb8f5daa/go.mod h1:WtryC6hu0hhx87FDGxWCDptyssuo68sk10vYjF+T9fY= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.62.0 h1:HQKZ/fa1bXkX1oFOvSjmZEUL8wLSaZTjCcLAlmZRtdk= -google.golang.org/grpc v1.62.0/go.mod h1:IWTG0VlJLCh1SkC58F7np9ka9mx/WNkjl4PGJaiq+QE= +google.golang.org/grpc v1.62.1 h1:B4n+nfKzOICUXMgyrNd19h/I9oH0L1pizfk1d4zSgTk= +google.golang.org/grpc v1.62.1/go.mod h1:IWTG0VlJLCh1SkC58F7np9ka9mx/WNkjl4PGJaiq+QE= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= diff --git a/main.go b/main.go index 277d8fefd2..f40f8397ba 100644 --- a/main.go +++ b/main.go @@ -1,6 +1,7 @@ package main import ( + "context" "fmt" "log" "os" @@ -8,7 +9,10 @@ import ( "github.com/databricks/terraform-provider-databricks/common" "github.com/databricks/terraform-provider-databricks/exporter" "github.com/databricks/terraform-provider-databricks/provider" - "github.com/hashicorp/terraform-plugin-sdk/v2/plugin" + "github.com/hashicorp/terraform-plugin-go/tfprotov5/tf5server" + "github.com/hashicorp/terraform-plugin-go/tfprotov6" + "github.com/hashicorp/terraform-plugin-mux/tf5to6server" + "github.com/hashicorp/terraform-plugin-mux/tf6muxserver" ) func main() { @@ -29,15 +33,54 @@ func main() { debug = true } log.Printf(`Databricks Terraform Provider + + Version %s + + https://registry.terraform.io/providers/databricks/databricks/latest/docs + + `, common.Version()) -Version %s + sdkPluginProvider := provider.DatabricksProvider() -https://registry.terraform.io/providers/databricks/databricks/latest/docs + // Translate terraform sdk plugin to protocol 6 + upgradedSdkPluginProvider, err := tf5to6server.UpgradeServer( + context.Background(), + sdkPluginProvider.GRPCProvider, + ) -`, common.Version()) - plugin.Serve(&plugin.ServeOpts{ - ProviderFunc: provider.DatabricksProvider, - ProviderAddr: "registry.terraform.io/databricks/databricks", - Debug: debug, - }) + pluginFrameworkProvider := provider.GetDatabricksProviderPluginFramework() + + providers := []func() tfprotov6.ProviderServer{ + upgradedSdkPluginProvider, + pluginFrameworkProvider, + } + + // Translate plugin framework to protocol 5, we would use tf5muxserver.NewMuxServer(ctx, providers...) below + // providers := []func() tfprotov5.ProviderServer{ + // sdkPluginProvider.GRPCProvider, + // providerserver.NewProtocol5( + // pluginFrameworkProvider, + // ), + // } + + ctx := context.Background() + muxServer, err := tf6muxserver.NewMuxServer(ctx, providers...) + if err != nil { + log.Fatal(err) + } + + var serveOpts []tf5server.ServeOpt + if debug { + serveOpts = append(serveOpts, tf5server.WithManagedDebug()) + } + + err = tf5server.Serve( + "registry.terraform.io/databricks/databricks", + muxServer.ProviderServer, + serveOpts..., + ) + + if err != nil { + log.Fatal(err) + } } diff --git a/provider/provider_plugin_framework.go b/provider/provider_plugin_framework.go new file mode 100644 index 0000000000..fd53cc181a --- /dev/null +++ b/provider/provider_plugin_framework.go @@ -0,0 +1,87 @@ +package provider + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/provider" + "github.com/hashicorp/terraform-plugin-framework/provider/schema" + "github.com/hashicorp/terraform-plugin-framework/providerserver" + "github.com/hashicorp/terraform-plugin-framework/resource" +) + +var _ provider.Provider = (*DatabricksProviderPluginFramework)(nil) + +func GetDatabricksProviderPluginFramework() provider.Provider { + p := &DatabricksProviderPluginFramework{} + providerserver.NewProtocol6(p) + return p +} + +type DatabricksProviderPluginFramework struct { +} + +func (p *DatabricksProviderPluginFramework) Resources(_ context.Context) []func() resource.Resource { + return []func() resource.Resource{ + func() resource.Resource { + return &DatabricksResource{} + }, + } +} + +func (p *DatabricksProviderPluginFramework) DataSources(_ context.Context) []func() datasource.DataSource { + return []func() datasource.DataSource{ + func() datasource.DataSource { + return &DatabricksDataSource{} + }, + } +} + +func (p *DatabricksProviderPluginFramework) Schema(_ context.Context, _ provider.SchemaRequest, resp *provider.SchemaResponse) { + resp.Schema = schema.Schema{ + Attributes: map[string]schema.Attribute{}, + } +} + +func (p *DatabricksProviderPluginFramework) Metadata(_ context.Context, _ provider.MetadataRequest, resp *provider.MetadataResponse) { + resp.TypeName = "databricks-terraform-provider-plugin-framework" + resp.Version = "0.0.1" +} + +func (p *DatabricksProviderPluginFramework) Configure(_ context.Context, _ provider.ConfigureRequest, resp *provider.ConfigureResponse) { +} + +// Data Source +type DatabricksDataSource struct { +} + +func (d *DatabricksDataSource) Metadata(_ context.Context, _ datasource.MetadataRequest, resp *datasource.MetadataResponse) { +} + +func (d *DatabricksDataSource) Read(_ context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { +} + +func (d *DatabricksDataSource) Schema(_ context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) { +} + +// Resource +type DatabricksResource struct { +} + +func (r *DatabricksResource) Metadata(_ context.Context, _ resource.MetadataRequest, resp *resource.MetadataResponse) { +} + +func (r *DatabricksResource) Schema(_ context.Context, _ resource.SchemaRequest, resp *resource.SchemaResponse) { +} + +func (r *DatabricksResource) Create(_ context.Context, req resource.CreateRequest, resp *resource.CreateResponse) { +} + +func (r *DatabricksResource) Read(_ context.Context, req resource.ReadRequest, resp *resource.ReadResponse) { +} + +func (r *DatabricksResource) Update(_ context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse) { +} + +func (r *DatabricksResource) Delete(_ context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) { +} From b1dacae19747c754892768dc4c6737d6dc7b11c0 Mon Sep 17 00:00:00 2001 From: Tanmay Rustagi Date: Wed, 27 Mar 2024 13:26:32 +0530 Subject: [PATCH 02/39] - --- main.go | 3 +- plugin-framework/data_source.go | 19 +++++++++++ plugin-framework/resource.go | 28 ++++++++++++++++ provider/provider_plugin_framework.go | 46 +++------------------------ 4 files changed, 54 insertions(+), 42 deletions(-) create mode 100644 plugin-framework/data_source.go create mode 100644 plugin-framework/resource.go diff --git a/main.go b/main.go index f40f8397ba..cdaac7e15b 100644 --- a/main.go +++ b/main.go @@ -9,6 +9,7 @@ import ( "github.com/databricks/terraform-provider-databricks/common" "github.com/databricks/terraform-provider-databricks/exporter" "github.com/databricks/terraform-provider-databricks/provider" + "github.com/hashicorp/terraform-plugin-framework/providerserver" "github.com/hashicorp/terraform-plugin-go/tfprotov5/tf5server" "github.com/hashicorp/terraform-plugin-go/tfprotov6" "github.com/hashicorp/terraform-plugin-mux/tf5to6server" @@ -52,7 +53,7 @@ func main() { providers := []func() tfprotov6.ProviderServer{ upgradedSdkPluginProvider, - pluginFrameworkProvider, + providerserver.NewProtocol6(pluginFrameworkProvider), } // Translate plugin framework to protocol 5, we would use tf5muxserver.NewMuxServer(ctx, providers...) below diff --git a/plugin-framework/data_source.go b/plugin-framework/data_source.go new file mode 100644 index 0000000000..da7084f768 --- /dev/null +++ b/plugin-framework/data_source.go @@ -0,0 +1,19 @@ +package pluginframework + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework/datasource" +) + +type DatabricksDataSource struct { +} + +func (d *DatabricksDataSource) Metadata(_ context.Context, _ datasource.MetadataRequest, resp *datasource.MetadataResponse) { +} + +func (d *DatabricksDataSource) Read(_ context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { +} + +func (d *DatabricksDataSource) Schema(_ context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) { +} diff --git a/plugin-framework/resource.go b/plugin-framework/resource.go new file mode 100644 index 0000000000..16f7d2ae39 --- /dev/null +++ b/plugin-framework/resource.go @@ -0,0 +1,28 @@ +package pluginframework + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework/resource" +) + +type DatabricksResource struct { +} + +func (r *DatabricksResource) Metadata(_ context.Context, _ resource.MetadataRequest, resp *resource.MetadataResponse) { +} + +func (r *DatabricksResource) Schema(_ context.Context, _ resource.SchemaRequest, resp *resource.SchemaResponse) { +} + +func (r *DatabricksResource) Create(_ context.Context, req resource.CreateRequest, resp *resource.CreateResponse) { +} + +func (r *DatabricksResource) Read(_ context.Context, req resource.ReadRequest, resp *resource.ReadResponse) { +} + +func (r *DatabricksResource) Update(_ context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse) { +} + +func (r *DatabricksResource) Delete(_ context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) { +} diff --git a/provider/provider_plugin_framework.go b/provider/provider_plugin_framework.go index fd53cc181a..a2ac98e2cb 100644 --- a/provider/provider_plugin_framework.go +++ b/provider/provider_plugin_framework.go @@ -3,28 +3,27 @@ package provider import ( "context" + pluginframework "github.com/databricks/terraform-provider-databricks/plugin-framework" "github.com/hashicorp/terraform-plugin-framework/datasource" "github.com/hashicorp/terraform-plugin-framework/provider" "github.com/hashicorp/terraform-plugin-framework/provider/schema" - "github.com/hashicorp/terraform-plugin-framework/providerserver" "github.com/hashicorp/terraform-plugin-framework/resource" ) -var _ provider.Provider = (*DatabricksProviderPluginFramework)(nil) - func GetDatabricksProviderPluginFramework() provider.Provider { p := &DatabricksProviderPluginFramework{} - providerserver.NewProtocol6(p) return p } type DatabricksProviderPluginFramework struct { } +var _ provider.Provider = (*DatabricksProviderPluginFramework)(nil) + func (p *DatabricksProviderPluginFramework) Resources(_ context.Context) []func() resource.Resource { return []func() resource.Resource{ func() resource.Resource { - return &DatabricksResource{} + return &pluginframework.DatabricksResource{} }, } } @@ -32,7 +31,7 @@ func (p *DatabricksProviderPluginFramework) Resources(_ context.Context) []func( func (p *DatabricksProviderPluginFramework) DataSources(_ context.Context) []func() datasource.DataSource { return []func() datasource.DataSource{ func() datasource.DataSource { - return &DatabricksDataSource{} + return &pluginframework.DatabricksDataSource{} }, } } @@ -50,38 +49,3 @@ func (p *DatabricksProviderPluginFramework) Metadata(_ context.Context, _ provid func (p *DatabricksProviderPluginFramework) Configure(_ context.Context, _ provider.ConfigureRequest, resp *provider.ConfigureResponse) { } - -// Data Source -type DatabricksDataSource struct { -} - -func (d *DatabricksDataSource) Metadata(_ context.Context, _ datasource.MetadataRequest, resp *datasource.MetadataResponse) { -} - -func (d *DatabricksDataSource) Read(_ context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { -} - -func (d *DatabricksDataSource) Schema(_ context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) { -} - -// Resource -type DatabricksResource struct { -} - -func (r *DatabricksResource) Metadata(_ context.Context, _ resource.MetadataRequest, resp *resource.MetadataResponse) { -} - -func (r *DatabricksResource) Schema(_ context.Context, _ resource.SchemaRequest, resp *resource.SchemaResponse) { -} - -func (r *DatabricksResource) Create(_ context.Context, req resource.CreateRequest, resp *resource.CreateResponse) { -} - -func (r *DatabricksResource) Read(_ context.Context, req resource.ReadRequest, resp *resource.ReadResponse) { -} - -func (r *DatabricksResource) Update(_ context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse) { -} - -func (r *DatabricksResource) Delete(_ context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) { -} From f7b6a7eaaa01e878c2661621e14318a46d9ad7c9 Mon Sep 17 00:00:00 2001 From: Tanmay Rustagi Date: Wed, 27 Mar 2024 13:34:21 +0530 Subject: [PATCH 03/39] mux support added, compiling works --- main.go | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/main.go b/main.go index cdaac7e15b..52c9ecb454 100644 --- a/main.go +++ b/main.go @@ -10,8 +10,8 @@ import ( "github.com/databricks/terraform-provider-databricks/exporter" "github.com/databricks/terraform-provider-databricks/provider" "github.com/hashicorp/terraform-plugin-framework/providerserver" - "github.com/hashicorp/terraform-plugin-go/tfprotov5/tf5server" "github.com/hashicorp/terraform-plugin-go/tfprotov6" + "github.com/hashicorp/terraform-plugin-go/tfprotov6/tf6server" "github.com/hashicorp/terraform-plugin-mux/tf5to6server" "github.com/hashicorp/terraform-plugin-mux/tf6muxserver" ) @@ -52,11 +52,14 @@ func main() { pluginFrameworkProvider := provider.GetDatabricksProviderPluginFramework() providers := []func() tfprotov6.ProviderServer{ - upgradedSdkPluginProvider, + func() tfprotov6.ProviderServer { + return upgradedSdkPluginProvider + }, providerserver.NewProtocol6(pluginFrameworkProvider), } - // Translate plugin framework to protocol 5, we would use tf5muxserver.NewMuxServer(ctx, providers...) below + // Translate plugin framework to protocol 5, + // we would use tf5muxserver.NewMuxServer(ctx, providers...) and tf5server.Serve below // providers := []func() tfprotov5.ProviderServer{ // sdkPluginProvider.GRPCProvider, // providerserver.NewProtocol5( @@ -70,12 +73,12 @@ func main() { log.Fatal(err) } - var serveOpts []tf5server.ServeOpt + var serveOpts []tf6server.ServeOpt if debug { - serveOpts = append(serveOpts, tf5server.WithManagedDebug()) + serveOpts = append(serveOpts, tf6server.WithManagedDebug()) } - err = tf5server.Serve( + err = tf6server.Serve( "registry.terraform.io/databricks/databricks", muxServer.ProviderServer, serveOpts..., From 232cb55e19f5407725bf0107a22fd88ca31fbcc4 Mon Sep 17 00:00:00 2001 From: Tanmay Rustagi Date: Tue, 2 Apr 2024 20:05:12 +0530 Subject: [PATCH 04/39] temp work --- .../catalog/resource_lakehouse_monitor.go | 56 +++++++++++++++++++ plugin-framework/data_source.go | 2 + plugin-framework/resource.go | 2 + 3 files changed, 60 insertions(+) create mode 100644 plugin-framework/catalog/resource_lakehouse_monitor.go diff --git a/plugin-framework/catalog/resource_lakehouse_monitor.go b/plugin-framework/catalog/resource_lakehouse_monitor.go new file mode 100644 index 0000000000..33be62ccbf --- /dev/null +++ b/plugin-framework/catalog/resource_lakehouse_monitor.go @@ -0,0 +1,56 @@ +package pluginframework + +import ( + "context" + "time" + + "github.com/databricks/databricks-sdk-go/service/catalog" + "github.com/databricks/terraform-provider-databricks/common" + pluginframework "github.com/databricks/terraform-provider-databricks/plugin-framework" + "github.com/hashicorp/terraform-plugin-framework/resource" +) + +const lakehouseMonitorDefaultProvisionTimeout = 15 * time.Minute + +type LakehouseMonitorResource struct { + pluginframework.DatabricksResource +} + +func (r *LakehouseMonitorResource) Metadata(ctx context.Context, req resource.MetadataRequest, resp *resource.MetadataResponse) { +} + +func (r *LakehouseMonitorResource) Schema(ctx context.Context, req resource.SchemaRequest, resp *resource.SchemaResponse) { + req = resource.SchemaRequest{} +} + +func (r *LakehouseMonitorResource) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) { + c := common.DatabricksClient{} + w, err := c.WorkspaceClient() + if err != nil { + resp.Diagnostics.AddError("Failed to get workspace client", err.Error()) + return + } + var create catalog.CreateMonitor + common.DataToStructPointer(d, monitorSchema, &create) + create.FullName = d.Get("table_name").(string) + + endpoint, err := w.LakehouseMonitors.Create(ctx, create) + if err != nil { + return err + } + err = WaitForMonitor(w, ctx, create.FullName) + if err != nil { + return err + } + d.SetId(endpoint.TableName) + return nil +} + +func (r *LakehouseMonitorResource) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) { +} + +func (r *LakehouseMonitorResource) Update(ctx context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse) { +} + +func (r *LakehouseMonitorResource) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) { +} diff --git a/plugin-framework/data_source.go b/plugin-framework/data_source.go index da7084f768..25a3e4eb2b 100644 --- a/plugin-framework/data_source.go +++ b/plugin-framework/data_source.go @@ -6,6 +6,8 @@ import ( "github.com/hashicorp/terraform-plugin-framework/datasource" ) +var _ datasource.DataSource = (*DatabricksDataSource)(nil) + type DatabricksDataSource struct { } diff --git a/plugin-framework/resource.go b/plugin-framework/resource.go index 16f7d2ae39..012cc1b830 100644 --- a/plugin-framework/resource.go +++ b/plugin-framework/resource.go @@ -6,6 +6,8 @@ import ( "github.com/hashicorp/terraform-plugin-framework/resource" ) +var _ resource.Resource = (*DatabricksResource)(nil) + type DatabricksResource struct { } From fef1b407e7037ef70fd849a75d9805330d73a0c7 Mon Sep 17 00:00:00 2001 From: Tanmay Rustagi Date: Thu, 25 Apr 2024 17:40:56 +0200 Subject: [PATCH 05/39] cleanup --- {plugin-framework => pluginframework}/data_source.go | 0 {plugin-framework => pluginframework}/resource.go | 2 ++ .../resource_lakehouse_monitor.go | 4 ++++ provider/provider_plugin_framework.go | 7 +++---- 4 files changed, 9 insertions(+), 4 deletions(-) rename {plugin-framework => pluginframework}/data_source.go (100%) rename {plugin-framework => pluginframework}/resource.go (97%) rename {plugin-framework/catalog => pluginframework}/resource_lakehouse_monitor.go (98%) diff --git a/plugin-framework/data_source.go b/pluginframework/data_source.go similarity index 100% rename from plugin-framework/data_source.go rename to pluginframework/data_source.go diff --git a/plugin-framework/resource.go b/pluginframework/resource.go similarity index 97% rename from plugin-framework/resource.go rename to pluginframework/resource.go index 012cc1b830..9de5d36e8a 100644 --- a/plugin-framework/resource.go +++ b/pluginframework/resource.go @@ -28,3 +28,5 @@ func (r *DatabricksResource) Update(_ context.Context, req resource.UpdateReques func (r *DatabricksResource) Delete(_ context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) { } + +type ABC struct{} diff --git a/plugin-framework/catalog/resource_lakehouse_monitor.go b/pluginframework/resource_lakehouse_monitor.go similarity index 98% rename from plugin-framework/catalog/resource_lakehouse_monitor.go rename to pluginframework/resource_lakehouse_monitor.go index f4bab728be..3bfb85c807 100644 --- a/plugin-framework/catalog/resource_lakehouse_monitor.go +++ b/pluginframework/resource_lakehouse_monitor.go @@ -32,6 +32,10 @@ func WaitForMonitor(w *databricks.WorkspaceClient, ctx context.Context, monitorN }) } +func ResourceLakehouseMonitorPluginFramework() resource.Resource { + return &LakehouseMonitorResource{} +} + type LakehouseMonitorResource struct{} func (r *LakehouseMonitorResource) Metadata(ctx context.Context, req resource.MetadataRequest, resp *resource.MetadataResponse) { diff --git a/provider/provider_plugin_framework.go b/provider/provider_plugin_framework.go index a2ac98e2cb..469b0dbcfb 100644 --- a/provider/provider_plugin_framework.go +++ b/provider/provider_plugin_framework.go @@ -3,7 +3,8 @@ package provider import ( "context" - pluginframework "github.com/databricks/terraform-provider-databricks/plugin-framework" + "github.com/databricks/terraform-provider-databricks/pluginframework" + "github.com/hashicorp/terraform-plugin-framework/datasource" "github.com/hashicorp/terraform-plugin-framework/provider" "github.com/hashicorp/terraform-plugin-framework/provider/schema" @@ -22,9 +23,7 @@ var _ provider.Provider = (*DatabricksProviderPluginFramework)(nil) func (p *DatabricksProviderPluginFramework) Resources(_ context.Context) []func() resource.Resource { return []func() resource.Resource{ - func() resource.Resource { - return &pluginframework.DatabricksResource{} - }, + pluginframework.ResourceLakehouseMonitorPluginFramework, } } From c7be1a66022352bbd1bfd7d0c4234462f76e5164 Mon Sep 17 00:00:00 2001 From: Tanmay Rustagi Date: Fri, 26 Apr 2024 16:56:12 +0200 Subject: [PATCH 06/39] - --- pluginframework/data_source.go | 6 +- pluginframework/data_volumes.go | 72 +++++++++++++++ pluginframework/resource.go | 14 ++- pluginframework/resource_lakehouse_monitor.go | 2 +- .../test/plugin_framework_poc_test.tf | 87 +++++++++++++++++++ provider/provider_plugin_framework.go | 4 +- 6 files changed, 170 insertions(+), 15 deletions(-) create mode 100644 pluginframework/data_volumes.go create mode 100644 pluginframework/test/plugin_framework_poc_test.tf diff --git a/pluginframework/data_source.go b/pluginframework/data_source.go index 25a3e4eb2b..532fa1665d 100644 --- a/pluginframework/data_source.go +++ b/pluginframework/data_source.go @@ -11,11 +11,11 @@ var _ datasource.DataSource = (*DatabricksDataSource)(nil) type DatabricksDataSource struct { } -func (d *DatabricksDataSource) Metadata(_ context.Context, _ datasource.MetadataRequest, resp *datasource.MetadataResponse) { +func (d *DatabricksDataSource) Metadata(ctx context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) { } -func (d *DatabricksDataSource) Read(_ context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { +func (d *DatabricksDataSource) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { } -func (d *DatabricksDataSource) Schema(_ context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) { +func (d *DatabricksDataSource) Schema(ctx context.Context, req datasource.SchemaRequest, resp *datasource.SchemaResponse) { } diff --git a/pluginframework/data_volumes.go b/pluginframework/data_volumes.go new file mode 100644 index 0000000000..6819725a36 --- /dev/null +++ b/pluginframework/data_volumes.go @@ -0,0 +1,72 @@ +package pluginframework + +import ( + "context" + + "github.com/databricks/databricks-sdk-go/service/catalog" + "github.com/databricks/terraform-provider-databricks/common" + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +func DataSourceVolumesPluginFramework() datasource.DataSource { + return &VolumesDataSource{} +} + +type VolumesDataSource struct{} + +type VolumesList struct { + CatalogName string `json:"catalog_name"` + SchemaName string `json:"schema_name"` + Ids []string `json:"ids,omitempty"` +} + +func (d *VolumesDataSource) Metadata(ctx context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_volumes_plugin_framework" +} + +func (d *VolumesDataSource) Schema(ctx context.Context, req datasource.SchemaRequest, resp *datasource.SchemaResponse) { + resp.Schema = schema.Schema{ + Attributes: map[string]schema.Attribute{ + "catalog_name": schema.StringAttribute{ + Required: true, + }, + "schema_name": schema.StringAttribute{ + Required: true, + }, + "ids": schema.ListAttribute{ + ElementType: types.StringType, + Optional: true, + }, + }, + } +} + +func (d *VolumesDataSource) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + c := common.DatabricksClient{} + w, err := c.WorkspaceClient() + if err != nil { + resp.Diagnostics.AddError("Failed to get workspace client", err.Error()) + return + } + var volumeInfo catalog.VolumeInfo + diags := req.Config.Get(ctx, &volumeInfo) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + volumes, err := w.Volumes.ListAll(ctx, catalog.ListVolumesRequest{ + CatalogName: volumeInfo.CatalogName, + SchemaName: volumeInfo.SchemaName, + }) + if err != nil { + resp.Diagnostics.AddError("Failed to get volumes for the catalog and schema", err.Error()) + return + } + volumeList := VolumesList{} + for _, v := range volumes { + volumeList.Ids = append(volumeList.Ids, v.FullName) + } + resp.State.Set(ctx, volumeList) +} diff --git a/pluginframework/resource.go b/pluginframework/resource.go index 9de5d36e8a..4f9d6ed981 100644 --- a/pluginframework/resource.go +++ b/pluginframework/resource.go @@ -11,22 +11,20 @@ var _ resource.Resource = (*DatabricksResource)(nil) type DatabricksResource struct { } -func (r *DatabricksResource) Metadata(_ context.Context, _ resource.MetadataRequest, resp *resource.MetadataResponse) { +func (r *DatabricksResource) Metadata(ctx context.Context, req resource.MetadataRequest, resp *resource.MetadataResponse) { } -func (r *DatabricksResource) Schema(_ context.Context, _ resource.SchemaRequest, resp *resource.SchemaResponse) { +func (r *DatabricksResource) Schema(ctx context.Context, req resource.SchemaRequest, resp *resource.SchemaResponse) { } -func (r *DatabricksResource) Create(_ context.Context, req resource.CreateRequest, resp *resource.CreateResponse) { +func (r *DatabricksResource) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) { } -func (r *DatabricksResource) Read(_ context.Context, req resource.ReadRequest, resp *resource.ReadResponse) { +func (r *DatabricksResource) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) { } -func (r *DatabricksResource) Update(_ context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse) { +func (r *DatabricksResource) Update(ctx context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse) { } -func (r *DatabricksResource) Delete(_ context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) { +func (r *DatabricksResource) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) { } - -type ABC struct{} diff --git a/pluginframework/resource_lakehouse_monitor.go b/pluginframework/resource_lakehouse_monitor.go index 3bfb85c807..83320a37f9 100644 --- a/pluginframework/resource_lakehouse_monitor.go +++ b/pluginframework/resource_lakehouse_monitor.go @@ -39,7 +39,7 @@ func ResourceLakehouseMonitorPluginFramework() resource.Resource { type LakehouseMonitorResource struct{} func (r *LakehouseMonitorResource) Metadata(ctx context.Context, req resource.MetadataRequest, resp *resource.MetadataResponse) { - resp.TypeName = req.ProviderTypeName + "_lakehouse_monitor" + resp.TypeName = req.ProviderTypeName + "_lakehouse_monitor_plugin_framework" } func (r *LakehouseMonitorResource) Schema(ctx context.Context, req resource.SchemaRequest, resp *resource.SchemaResponse) { diff --git a/pluginframework/test/plugin_framework_poc_test.tf b/pluginframework/test/plugin_framework_poc_test.tf new file mode 100644 index 0000000000..af12a8d8cf --- /dev/null +++ b/pluginframework/test/plugin_framework_poc_test.tf @@ -0,0 +1,87 @@ +# TESTING SOP +# ----------- +# Please make sure development overrides are in effect before running this manually +# https://github.com/databricks/terraform-provider-databricks/blob/main/CONTRIBUTING.md#developing-provider +# 1. touch ~/.terraformrc +# 2. Add the following to the file (update to your user.name in the path): +# provider_installation { +# dev_overrides { +# "databricks/databricks" = "/Users//terraform-provider-databricks" +# } +# direct {} +# } +# 3. run $ make in terraform-provider-databricks root directory to build the binary +# 4. cd terraform-provider-databricks/pluginframework/test +# 5. terraform init -upgrade +# 6. terraform apply + +terraform { + required_providers { + databricks = { + source = "databricks/databricks" + } + } +} + +provider "databricks" { + profile = "aws-prod-ucws" +} + +resource "databricks_catalog" "testCatalog" { + name = "testCatalog" + comment = "Plugin Framework PoC" + properties = { + purpose = "testing" + } +} + +resource "databricks_schema" "testSchema" { + catalog_name = databricks_catalog.testCatalog.name + name = "testSchema" + comment = "Plugin Framework PoC" + properties = { + purpose = "testing" + } +} + +resource "databricks_sql_table" "testTable" { + catalog_name = databricks_catalog.testCatalog.name + schema_name = databricks_schema.testSchema.name + name = "testTable" + table_type = "MANAGED" + data_source_format = "DELTA" + + column { + name = "timestamp" + type = "int" + } +} + +resource "databricks_lakehouse_monitor_plugin_framework" "testMonitor" { + table_name = "${databricks_catalog.testCatalog.name}.${databricks_schema.testSchema.name}.${databricks_sql_table.testTable.name}" +} + +resource "databricks_volume" "testVolume1" { + name = "testVolume1" + catalog_name = databricks_catalog.testCatalog.name + schema_name = databricks_schema.testSchema.name + volume_type = "MANAGED" + comment = "Plugin Framework PoC" +} + +resource "databricks_volume" "testVolume2" { + name = "testVolume2" + catalog_name = databricks_catalog.testCatalog.name + schema_name = databricks_schema.testSchema.name + volume_type = "MANAGED" + comment = "Plugin Framework PoC" +} + +data "databricks_volumes_plugin_framework" "testVolumes" { + catalog_name = databricks_catalog.testCatalog.name + schema_name = databricks_schema.testSchema.name +} + +output "all_volumes" { + value = data.databricks_volumes_plugin_framework.testVolumes +} \ No newline at end of file diff --git a/provider/provider_plugin_framework.go b/provider/provider_plugin_framework.go index 469b0dbcfb..d0cfc5ad38 100644 --- a/provider/provider_plugin_framework.go +++ b/provider/provider_plugin_framework.go @@ -29,9 +29,7 @@ func (p *DatabricksProviderPluginFramework) Resources(_ context.Context) []func( func (p *DatabricksProviderPluginFramework) DataSources(_ context.Context) []func() datasource.DataSource { return []func() datasource.DataSource{ - func() datasource.DataSource { - return &pluginframework.DatabricksDataSource{} - }, + pluginframework.DataSourceVolumesPluginFramework, } } From 7a6dc2dd5c3f9b24efc417351c9937f083c251b3 Mon Sep 17 00:00:00 2001 From: Tanmay Rustagi Date: Tue, 28 May 2024 14:56:42 +0200 Subject: [PATCH 07/39] more work --- pluginframework/data_volumes.go | 2 +- pluginframework/resource_lakehouse_monitor.go | 2 +- provider/provider_plugin_framework.go | 116 +++++++++++- provider/provider_test.go | 118 ++---------- provider/test_utils.go | 172 ++++++++++++++++++ 5 files changed, 293 insertions(+), 117 deletions(-) create mode 100644 provider/test_utils.go diff --git a/pluginframework/data_volumes.go b/pluginframework/data_volumes.go index 6819725a36..460c2e8d9d 100644 --- a/pluginframework/data_volumes.go +++ b/pluginframework/data_volumes.go @@ -10,7 +10,7 @@ import ( "github.com/hashicorp/terraform-plugin-framework/types" ) -func DataSourceVolumesPluginFramework() datasource.DataSource { +func DataSourceVolumes() datasource.DataSource { return &VolumesDataSource{} } diff --git a/pluginframework/resource_lakehouse_monitor.go b/pluginframework/resource_lakehouse_monitor.go index 83320a37f9..278a81b8de 100644 --- a/pluginframework/resource_lakehouse_monitor.go +++ b/pluginframework/resource_lakehouse_monitor.go @@ -32,7 +32,7 @@ func WaitForMonitor(w *databricks.WorkspaceClient, ctx context.Context, monitorN }) } -func ResourceLakehouseMonitorPluginFramework() resource.Resource { +func ResourceLakehouseMonitor() resource.Resource { return &LakehouseMonitorResource{} } diff --git a/provider/provider_plugin_framework.go b/provider/provider_plugin_framework.go index d0cfc5ad38..84f62ee20d 100644 --- a/provider/provider_plugin_framework.go +++ b/provider/provider_plugin_framework.go @@ -2,15 +2,37 @@ package provider import ( "context" + "fmt" + "log" + "reflect" + "sort" + "strings" + "github.com/databricks/databricks-sdk-go/client" + "github.com/databricks/databricks-sdk-go/config" + "github.com/databricks/databricks-sdk-go/useragent" + "github.com/databricks/terraform-provider-databricks/commands" + "github.com/databricks/terraform-provider-databricks/common" "github.com/databricks/terraform-provider-databricks/pluginframework" "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/path" "github.com/hashicorp/terraform-plugin-framework/provider" "github.com/hashicorp/terraform-plugin-framework/provider/schema" "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-log/tflog" ) +var pluginFrameworkProviderName = "databricks-tf-provider-plugin-framework" + +func init() { + // IMPORTANT: this line cannot be changed, because it's used for + // internal purposes at Databricks. + useragent.WithProduct(pluginFrameworkProviderName, common.Version()) +} + func GetDatabricksProviderPluginFramework() provider.Provider { p := &DatabricksProviderPluginFramework{} return p @@ -23,26 +45,104 @@ var _ provider.Provider = (*DatabricksProviderPluginFramework)(nil) func (p *DatabricksProviderPluginFramework) Resources(_ context.Context) []func() resource.Resource { return []func() resource.Resource{ - pluginframework.ResourceLakehouseMonitorPluginFramework, + pluginframework.ResourceLakehouseMonitor, } } func (p *DatabricksProviderPluginFramework) DataSources(_ context.Context) []func() datasource.DataSource { return []func() datasource.DataSource{ - pluginframework.DataSourceVolumesPluginFramework, + pluginframework.DataSourceVolumes, } } func (p *DatabricksProviderPluginFramework) Schema(_ context.Context, _ provider.SchemaRequest, resp *provider.SchemaResponse) { - resp.Schema = schema.Schema{ - Attributes: map[string]schema.Attribute{}, - } + resp.Schema = providerSchemaPluginFramework() } func (p *DatabricksProviderPluginFramework) Metadata(_ context.Context, _ provider.MetadataRequest, resp *provider.MetadataResponse) { - resp.TypeName = "databricks-terraform-provider-plugin-framework" - resp.Version = "0.0.1" + resp.TypeName = pluginFrameworkProviderName + resp.Version = common.Version() +} + +func (p *DatabricksProviderPluginFramework) Configure(_ context.Context, req provider.ConfigureRequest, resp *provider.ConfigureResponse) { + configureDatabricksClient_PluginFramework(context.Background(), req, resp) +} + +func providerSchemaPluginFramework() schema.Schema { + ps := map[string]schema.Attribute{} + for _, attr := range config.ConfigAttributes { + switch attr.Kind { + case reflect.Bool: + ps[attr.Name] = schema.BoolAttribute{ + Optional: true, + Sensitive: attr.Sensitive, + } + case reflect.String: + ps[attr.Name] = schema.StringAttribute{ + Optional: true, + Sensitive: attr.Sensitive, + } + case reflect.Int: + ps[attr.Name] = schema.Int64Attribute{ + Optional: true, + Sensitive: attr.Sensitive, + } + } + } + return schema.Schema{ + Attributes: ps, + } } -func (p *DatabricksProviderPluginFramework) Configure(_ context.Context, _ provider.ConfigureRequest, resp *provider.ConfigureResponse) { +func configureDatabricksClient_PluginFramework(ctx context.Context, req provider.ConfigureRequest, resp *provider.ConfigureResponse) any { + cfg := &config.Config{} + attrsUsed := []string{} + authsUsed := map[string]bool{} + for _, attr := range config.ConfigAttributes { + var attrName types.String + diags := req.Config.GetAttribute(ctx, path.Root(attr.Name), &attrName) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return nil + } + err := attr.Set(cfg, attrName) + if err != nil { + resp.Diagnostics.Append(diag.NewErrorDiagnostic("Failed to set attribute", err.Error())) + return nil + } + if attr.Kind == reflect.String { + attrsUsed = append(attrsUsed, attr.Name) + } + if attr.Auth != "" { + authsUsed[attr.Auth] = true + } + } + sort.Strings(attrsUsed) + tflog.Info(ctx, fmt.Sprintf("Explicit and implicit attributes: %s", strings.Join(attrsUsed, ", "))) + if cfg.AuthType != "" { + // mapping from previous Google authentication types + // and current authentication types from Databricks Go SDK + oldToNewerAuthType := map[string]string{ + "google-creds": "google-credentials", + "google-accounts": "google-id", + "google-workspace": "google-id", + } + newer, ok := oldToNewerAuthType[cfg.AuthType] + if ok { + log.Printf("[INFO] Changing required auth_type from %s to %s", cfg.AuthType, newer) + cfg.AuthType = newer + } + } + client, err := client.New(cfg) + if err != nil { + resp.Diagnostics.Append(diag.NewErrorDiagnostic("Error while generating client", err.Error())) + return nil + } + pc := &common.DatabricksClient{ + DatabricksClient: client, + } + pc.WithCommandExecutor(func(ctx context.Context, client *common.DatabricksClient) common.CommandExecutor { + return commands.NewCommandsAPI(ctx, client) + }) + return pc } diff --git a/provider/provider_test.go b/provider/provider_test.go index 7f68a20593..d27c86bab8 100644 --- a/provider/provider_test.go +++ b/provider/provider_test.go @@ -8,91 +8,13 @@ import ( "net/http/httptest" "os" "path/filepath" - "strings" "testing" "time" "github.com/databricks/terraform-provider-databricks/common" - "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) -type providerFixture struct { - host string - token string - username string - password string - configFile string - profile string - azureClientID string - azureClientSecret string - azureTenantID string - azureResourceID string - authType string - env map[string]string - assertError string - assertAuth string - assertHost string - assertAzure bool -} - -func (tt providerFixture) rawConfig() map[string]any { - rawConfig := map[string]any{} - if tt.host != "" { - rawConfig["host"] = tt.host - } - if tt.token != "" { - rawConfig["token"] = tt.token - } - if tt.username != "" { - rawConfig["username"] = tt.username - } - if tt.password != "" { - rawConfig["password"] = tt.password - } - if tt.configFile != "" { - rawConfig["config_file"] = tt.configFile - } - if tt.profile != "" { - rawConfig["profile"] = tt.profile - } - if tt.azureClientID != "" { - rawConfig["azure_client_id"] = tt.azureClientID - } - if tt.azureClientSecret != "" { - rawConfig["azure_client_secret"] = tt.azureClientSecret - } - if tt.azureTenantID != "" { - rawConfig["azure_tenant_id"] = tt.azureTenantID - } - if tt.azureResourceID != "" { - rawConfig["azure_workspace_resource_id"] = tt.azureResourceID - } - if tt.authType != "" { - rawConfig["auth_type"] = tt.authType - } - return rawConfig -} - -func (tc providerFixture) apply(t *testing.T) *common.DatabricksClient { - c, err := configureProviderAndReturnClient(t, tc) - if tc.assertError != "" { - require.NotNilf(t, err, "Expected to have %s error", tc.assertError) - require.True(t, strings.HasPrefix(err.Error(), tc.assertError), - "Expected to have '%s' error, but got '%s'", tc.assertError, err) - return nil - } - if err != nil { - require.NoError(t, err) - return nil - } - assert.Equal(t, tc.assertAzure, c.IsAzure()) - assert.Equal(t, tc.assertAuth, c.Config.AuthType) - assert.Equal(t, tc.assertHost, c.Config.Host) - return c -} - func TestConfig_NoParams(t *testing.T) { if f, err := os.Stat("~/.databrickscfg"); err == nil && !f.IsDir() { // the provider should fail to configure if no configuration options are available, @@ -425,7 +347,7 @@ func TestConfig_OAuthFetchesToken(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(shortLivedOAuthHandler)) defer ts.Close() - client := providerFixture{ + testFixture := providerFixture{ env: map[string]string{ "DATABRICKS_HOST": ts.URL, "DATABRICKS_CLIENT_ID": "x", @@ -433,9 +355,17 @@ func TestConfig_OAuthFetchesToken(t *testing.T) { }, assertAuth: "oauth-m2m", assertHost: ts.URL, - }.apply(t) + } - ws, err := client.WorkspaceClient() + client := testFixture.applyWithSDKv2(t) + testOAuthFetchesToken(t, client) + + client = testFixture.applyWithPluginFramework(t) + testOAuthFetchesToken(t, client) +} + +func testOAuthFetchesToken(t *testing.T, c *common.DatabricksClient) { + ws, err := c.WorkspaceClient() require.NoError(t, err) bgCtx := context.Background() { @@ -451,29 +381,3 @@ func TestConfig_OAuthFetchesToken(t *testing.T) { require.NoError(t, err) } } - -func configureProviderAndReturnClient(t *testing.T, tt providerFixture) (*common.DatabricksClient, error) { - for k, v := range tt.env { - t.Setenv(k, v) - } - p := DatabricksProvider() - ctx := context.Background() - diags := p.Configure(ctx, terraform.NewResourceConfigRaw(tt.rawConfig())) - if len(diags) > 0 { - issues := []string{} - for _, d := range diags { - issues = append(issues, d.Summary) - } - return nil, fmt.Errorf(strings.Join(issues, ", ")) - } - client := p.Meta().(*common.DatabricksClient) - r, err := http.NewRequest("GET", "", nil) - if err != nil { - return nil, err - } - err = client.Config.Authenticate(r) - if err != nil { - return nil, err - } - return client, nil -} diff --git a/provider/test_utils.go b/provider/test_utils.go new file mode 100644 index 0000000000..fca2a5332b --- /dev/null +++ b/provider/test_utils.go @@ -0,0 +1,172 @@ +package provider + +import ( + "context" + "fmt" + "net/http" + "strings" + "testing" + + "github.com/databricks/terraform-provider-databricks/common" + "github.com/hashicorp/terraform-plugin-framework/provider" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-go/tftypes" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +type providerFixture struct { + host string + token string + username string + password string + configFile string + profile string + azureClientID string + azureClientSecret string + azureTenantID string + azureResourceID string + authType string + env map[string]string + assertError string + assertAuth string + assertHost string + assertAzure bool +} + +func (tt providerFixture) rawConfig() map[string]any { + rawConfig := map[string]any{} + if tt.host != "" { + rawConfig["host"] = tt.host + } + if tt.token != "" { + rawConfig["token"] = tt.token + } + if tt.username != "" { + rawConfig["username"] = tt.username + } + if tt.password != "" { + rawConfig["password"] = tt.password + } + if tt.configFile != "" { + rawConfig["config_file"] = tt.configFile + } + if tt.profile != "" { + rawConfig["profile"] = tt.profile + } + if tt.azureClientID != "" { + rawConfig["azure_client_id"] = tt.azureClientID + } + if tt.azureClientSecret != "" { + rawConfig["azure_client_secret"] = tt.azureClientSecret + } + if tt.azureTenantID != "" { + rawConfig["azure_tenant_id"] = tt.azureTenantID + } + if tt.azureResourceID != "" { + rawConfig["azure_workspace_resource_id"] = tt.azureResourceID + } + if tt.authType != "" { + rawConfig["auth_type"] = tt.authType + } + return rawConfig +} + +func (tc providerFixture) applyWithSDKv2(t *testing.T) *common.DatabricksClient { + c, err := configureProviderAndReturnClient_SDKv2(t, tc) + return tc.applyAssertions(c, t, err) +} + +func (tc providerFixture) applyWithPluginFramework(t *testing.T) *common.DatabricksClient { + c, err := configureProviderAndReturnClient_PluginFramework(t, tc) + return tc.applyAssertions(c, t, err) +} + +func (tc providerFixture) applyAssertions(c *common.DatabricksClient, t *testing.T, err error) *common.DatabricksClient { + if tc.assertError != "" { + require.NotNilf(t, err, "Expected to have %s error", tc.assertError) + require.True(t, strings.HasPrefix(err.Error(), tc.assertError), + "Expected to have '%s' error, but got '%s'", tc.assertError, err) + return nil + } + if err != nil { + require.NoError(t, err) + return nil + } + assert.Equal(t, tc.assertAzure, c.IsAzure()) + assert.Equal(t, tc.assertAuth, c.Config.AuthType) + assert.Equal(t, tc.assertHost, c.Config.Host) + return c +} + +func (tc providerFixture) apply(t *testing.T) { + _ = tc.applyWithSDKv2(t) + _ = tc.applyWithPluginFramework(t) +} + +func configureProviderAndReturnClient_SDKv2(t *testing.T, tt providerFixture) (*common.DatabricksClient, error) { + for k, v := range tt.env { + t.Setenv(k, v) + } + p := DatabricksProvider() + ctx := context.Background() + testRawConfig := tt.rawConfig() + diags := p.Configure(ctx, terraform.NewResourceConfigRaw(testRawConfig)) + if len(diags) > 0 { + issues := []string{} + for _, d := range diags { + issues = append(issues, d.Summary) + } + return nil, fmt.Errorf(strings.Join(issues, ", ")) + } + client := p.Meta().(*common.DatabricksClient) + r, err := http.NewRequest("GET", "", nil) + if err != nil { + return nil, err + } + err = client.Config.Authenticate(r) + if err != nil { + return nil, err + } + return client, nil +} + +func configureProviderAndReturnClient_PluginFramework(t *testing.T, tt providerFixture) (*common.DatabricksClient, error) { + for k, v := range tt.env { + t.Setenv(k, v) + } + p := GetDatabricksProviderPluginFramework() + ctx := context.Background() + // tanmaytodo fill the request / response + testConfig := tt.rawConfig() + configRequest := provider.ConfigureRequest{ + Config: tfsdk.Config{ + Raw: tftypes.NewValue(tftypes.Map{}, testConfig), + }, + } + configResponse := &provider.ConfigureResponse{} + p.Configure(ctx, configRequest, configResponse) + diags := configResponse.Diagnostics + if len(diags) > 0 { + issues := []string{} + for _, d := range diags { + issues = append(issues, d.Summary()) + } + return nil, fmt.Errorf(strings.Join(issues, ", ")) + } + metadataRequest := provider.MetadataRequest{} + metadataResponse := &provider.MetadataResponse{} + p.Metadata(ctx, metadataRequest, metadataResponse) + // Get the configured client from metadata? + client := &common.DatabricksClient{} + r, err := http.NewRequest("GET", "", nil) + if err != nil { + return nil, err + } + err = client.Config.Authenticate(r) + if err != nil { + return nil, err + } + return client, nil +} From 4b5966cacdbb77fa18f6badb1526a9bd8dfeda80 Mon Sep 17 00:00:00 2001 From: Tanmay Rustagi Date: Tue, 28 May 2024 17:24:35 +0200 Subject: [PATCH 08/39] - --- provider/provider_plugin_framework.go | 14 +++++----- provider/test_utils.go | 37 ++++++++++++++++++--------- 2 files changed, 33 insertions(+), 18 deletions(-) diff --git a/provider/provider_plugin_framework.go b/provider/provider_plugin_framework.go index 84f62ee20d..276f317369 100644 --- a/provider/provider_plugin_framework.go +++ b/provider/provider_plugin_framework.go @@ -43,29 +43,31 @@ type DatabricksProviderPluginFramework struct { var _ provider.Provider = (*DatabricksProviderPluginFramework)(nil) -func (p *DatabricksProviderPluginFramework) Resources(_ context.Context) []func() resource.Resource { +func (p *DatabricksProviderPluginFramework) Resources(ctx context.Context) []func() resource.Resource { return []func() resource.Resource{ pluginframework.ResourceLakehouseMonitor, } } -func (p *DatabricksProviderPluginFramework) DataSources(_ context.Context) []func() datasource.DataSource { +func (p *DatabricksProviderPluginFramework) DataSources(ctx context.Context) []func() datasource.DataSource { return []func() datasource.DataSource{ pluginframework.DataSourceVolumes, } } -func (p *DatabricksProviderPluginFramework) Schema(_ context.Context, _ provider.SchemaRequest, resp *provider.SchemaResponse) { +func (p *DatabricksProviderPluginFramework) Schema(ctx context.Context, req provider.SchemaRequest, resp *provider.SchemaResponse) { resp.Schema = providerSchemaPluginFramework() } -func (p *DatabricksProviderPluginFramework) Metadata(_ context.Context, _ provider.MetadataRequest, resp *provider.MetadataResponse) { +func (p *DatabricksProviderPluginFramework) Metadata(ctx context.Context, req provider.MetadataRequest, resp *provider.MetadataResponse) { resp.TypeName = pluginFrameworkProviderName resp.Version = common.Version() } -func (p *DatabricksProviderPluginFramework) Configure(_ context.Context, req provider.ConfigureRequest, resp *provider.ConfigureResponse) { - configureDatabricksClient_PluginFramework(context.Background(), req, resp) +func (p *DatabricksProviderPluginFramework) Configure(ctx context.Context, req provider.ConfigureRequest, resp *provider.ConfigureResponse) { + client := configureDatabricksClient_PluginFramework(ctx, req, resp) + resp.DataSourceData = client + resp.ResourceData = client } func providerSchemaPluginFramework() schema.Schema { diff --git a/provider/test_utils.go b/provider/test_utils.go index fca2a5332b..9cb7977321 100644 --- a/provider/test_utils.go +++ b/provider/test_utils.go @@ -35,8 +35,8 @@ type providerFixture struct { assertAzure bool } -func (tt providerFixture) rawConfig() map[string]any { - rawConfig := map[string]any{} +func (tt providerFixture) rawConfig() map[string]string { + rawConfig := map[string]string{} if tt.host != "" { rawConfig["host"] = tt.host } @@ -73,6 +73,24 @@ func (tt providerFixture) rawConfig() map[string]any { return rawConfig } +func (tt providerFixture) rawConfigSDKv2() map[string]any { + rawConfig := tt.rawConfig() + rawConfigSDKv2 := map[string]any{} + for k, v := range rawConfig { + rawConfigSDKv2[k] = v + } + return rawConfigSDKv2 +} + +func (tt providerFixture) rawConfigPluginFramework() tftypes.Value { + rawConfig := tt.rawConfig() + pluginFrameworkMap := map[string]tftypes.Value{} + for k, v := range rawConfig { + pluginFrameworkMap[k] = tftypes.NewValue(tftypes.String, v) + } + return tftypes.NewValue(tftypes.Map{ElementType: tftypes.String}, pluginFrameworkMap) +} + func (tc providerFixture) applyWithSDKv2(t *testing.T) *common.DatabricksClient { c, err := configureProviderAndReturnClient_SDKv2(t, tc) return tc.applyAssertions(c, t, err) @@ -111,8 +129,8 @@ func configureProviderAndReturnClient_SDKv2(t *testing.T, tt providerFixture) (* } p := DatabricksProvider() ctx := context.Background() - testRawConfig := tt.rawConfig() - diags := p.Configure(ctx, terraform.NewResourceConfigRaw(testRawConfig)) + testConfig := terraform.NewResourceConfigRaw(tt.rawConfigSDKv2()) + diags := p.Configure(ctx, testConfig) if len(diags) > 0 { issues := []string{} for _, d := range diags { @@ -138,11 +156,10 @@ func configureProviderAndReturnClient_PluginFramework(t *testing.T, tt providerF } p := GetDatabricksProviderPluginFramework() ctx := context.Background() - // tanmaytodo fill the request / response - testConfig := tt.rawConfig() + rawConfig := tt.rawConfigPluginFramework() configRequest := provider.ConfigureRequest{ Config: tfsdk.Config{ - Raw: tftypes.NewValue(tftypes.Map{}, testConfig), + Raw: rawConfig, }, } configResponse := &provider.ConfigureResponse{} @@ -155,11 +172,7 @@ func configureProviderAndReturnClient_PluginFramework(t *testing.T, tt providerF } return nil, fmt.Errorf(strings.Join(issues, ", ")) } - metadataRequest := provider.MetadataRequest{} - metadataResponse := &provider.MetadataResponse{} - p.Metadata(ctx, metadataRequest, metadataResponse) - // Get the configured client from metadata? - client := &common.DatabricksClient{} + client := configResponse.ResourceData.(*common.DatabricksClient) r, err := http.NewRequest("GET", "", nil) if err != nil { return nil, err From 356fbbea7acb7d3e7c878cd2b6afe7bfdae4af13 Mon Sep 17 00:00:00 2001 From: Tanmay Rustagi Date: Tue, 28 May 2024 18:54:49 +0200 Subject: [PATCH 09/39] - --- provider/provider_plugin_framework.go | 7 ++++--- provider/test_utils.go | 5 ++++- 2 files changed, 8 insertions(+), 4 deletions(-) diff --git a/provider/provider_plugin_framework.go b/provider/provider_plugin_framework.go index 276f317369..7023ed769a 100644 --- a/provider/provider_plugin_framework.go +++ b/provider/provider_plugin_framework.go @@ -101,13 +101,14 @@ func configureDatabricksClient_PluginFramework(ctx context.Context, req provider attrsUsed := []string{} authsUsed := map[string]bool{} for _, attr := range config.ConfigAttributes { - var attrName types.String - diags := req.Config.GetAttribute(ctx, path.Root(attr.Name), &attrName) + var attrValue types.String + // tanmaytodo, failing here TerraformValueAtTerraformPath + diags := req.Config.GetAttribute(ctx, path.Root(attr.Name), &attrValue) resp.Diagnostics.Append(diags...) if resp.Diagnostics.HasError() { return nil } - err := attr.Set(cfg, attrName) + err := attr.Set(cfg, attrValue) if err != nil { resp.Diagnostics.Append(diag.NewErrorDiagnostic("Failed to set attribute", err.Error())) return nil diff --git a/provider/test_utils.go b/provider/test_utils.go index 9cb7977321..547f6d4f52 100644 --- a/provider/test_utils.go +++ b/provider/test_utils.go @@ -157,9 +157,12 @@ func configureProviderAndReturnClient_PluginFramework(t *testing.T, tt providerF p := GetDatabricksProviderPluginFramework() ctx := context.Background() rawConfig := tt.rawConfigPluginFramework() + var providerSchemaResponse provider.SchemaResponse + p.Schema(ctx, provider.SchemaRequest{}, &providerSchemaResponse) configRequest := provider.ConfigureRequest{ Config: tfsdk.Config{ - Raw: rawConfig, + Raw: rawConfig, + Schema: providerSchemaResponse.Schema, }, } configResponse := &provider.ConfigureResponse{} From 483eb992b6984a1c7a99595b5b23089b46ed6c92 Mon Sep 17 00:00:00 2001 From: vuong-nguyen <44292934+nkvuong@users.noreply.github.com> Date: Fri, 28 Jun 2024 14:22:12 +0100 Subject: [PATCH 10/39] added isolation mode support for `databricks_external_location` & `databricks_storage_credential` (#3704) * add isolation mode support for external location & storage credential * add doc & test --- catalog/resource_catalog.go | 5 +- catalog/resource_external_location.go | 13 ++- catalog/resource_external_location_test.go | 77 ++++++++++++++++ catalog/resource_storage_credential.go | 17 ++-- catalog/resource_storage_credential_test.go | 92 +++++++++++++++++++ docs/resources/external_location.md | 1 + docs/resources/storage_credential.md | 1 + internal/acceptance/external_location_test.go | 8 +- 8 files changed, 198 insertions(+), 16 deletions(-) diff --git a/catalog/resource_catalog.go b/catalog/resource_catalog.go index 56a2afd929..6c0f339fdd 100644 --- a/catalog/resource_catalog.go +++ b/catalog/resource_catalog.go @@ -90,6 +90,7 @@ func ResourceCatalog() common.Resource { if !updateRequired(d, []string{"owner", "isolation_mode", "enable_predictive_optimization"}) { return nil } + var updateCatalogRequest catalog.UpdateCatalog common.DataToStructPointer(d, catalogSchema, &updateCatalogRequest) updateCatalogRequest.Name = d.Id() @@ -98,6 +99,7 @@ func ResourceCatalog() common.Resource { return err } + // Bind the current workspace if the catalog is isolated, otherwise the read will fail return bindings.AddCurrentWorkspaceBindings(ctx, d, w, ci.Name, "catalog") }, Read: func(ctx context.Context, d *schema.ResourceData, c *common.DatabricksClient) error { @@ -163,9 +165,6 @@ func ResourceCatalog() common.Resource { // So if we don't update the field then the requests would be made to old Name which doesn't exists. d.SetId(ci.Name) - if d.Get("isolation_mode") != "ISOLATED" { - return nil - } // Bind the current workspace if the catalog is isolated, otherwise the read will fail return bindings.AddCurrentWorkspaceBindings(ctx, d, w, ci.Name, "catalog") }, diff --git a/catalog/resource_external_location.go b/catalog/resource_external_location.go index 76801799f0..f4f6524de0 100644 --- a/catalog/resource_external_location.go +++ b/catalog/resource_external_location.go @@ -4,6 +4,7 @@ import ( "context" "github.com/databricks/databricks-sdk-go/service/catalog" + "github.com/databricks/terraform-provider-databricks/catalog/bindings" "github.com/databricks/terraform-provider-databricks/common" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) @@ -20,6 +21,7 @@ type ExternalLocationInfo struct { ReadOnly bool `json:"read_only,omitempty"` AccessPoint string `json:"access_point,omitempty"` EncDetails *catalog.EncryptionDetails `json:"encryption_details,omitempty"` + IsolationMode string `json:"isolation_mode,omitempty" tf:"computed"` } func ResourceExternalLocation() common.Resource { @@ -59,8 +61,8 @@ func ResourceExternalLocation() common.Resource { } d.SetId(el.Name) - // Don't update owner if it is not provided - if d.Get("owner") == "" { + // Update owner or isolation mode if it is provided + if !updateRequired(d, []string{"owner", "isolation_mode"}) { return nil } @@ -71,7 +73,9 @@ func ResourceExternalLocation() common.Resource { if err != nil { return err } - return nil + + // Bind the current workspace if the external location is isolated, otherwise the read will fail + return bindings.AddCurrentWorkspaceBindings(ctx, d, w, el.Name, "external-location") }, Read: func(ctx context.Context, d *schema.ResourceData, c *common.DatabricksClient) error { w, err := c.WorkspaceClient() @@ -129,7 +133,8 @@ func ResourceExternalLocation() common.Resource { } return err } - return nil + // Bind the current workspace if the external location is isolated, otherwise the read will fail + return bindings.AddCurrentWorkspaceBindings(ctx, d, w, updateExternalLocationRequest.Name, "external-location") }, Delete: func(ctx context.Context, d *schema.ResourceData, c *common.DatabricksClient) error { force := d.Get("force_destroy").(bool) diff --git a/catalog/resource_external_location_test.go b/catalog/resource_external_location_test.go index 77bbb854f7..a460425101 100644 --- a/catalog/resource_external_location_test.go +++ b/catalog/resource_external_location_test.go @@ -5,8 +5,10 @@ import ( "testing" "github.com/databricks/databricks-sdk-go/apierr" + "github.com/databricks/databricks-sdk-go/experimental/mocks" "github.com/databricks/databricks-sdk-go/service/catalog" "github.com/databricks/terraform-provider-databricks/qa" + "github.com/stretchr/testify/mock" ) func TestExternalLocationCornerCases(t *testing.T) { @@ -52,6 +54,81 @@ func TestCreateExternalLocation(t *testing.T) { }.ApplyNoError(t) } +func TestCreateIsolatedExternalLocation(t *testing.T) { + qa.ResourceFixture{ + MockWorkspaceClientFunc: func(w *mocks.MockWorkspaceClient) { + e := w.GetMockExternalLocationsAPI().EXPECT() + e.Create(mock.Anything, catalog.CreateExternalLocation{ + Name: "abc", + Url: "s3://foo/bar", + CredentialName: "bcd", + Comment: "def", + }).Return(&catalog.ExternalLocationInfo{ + Name: "abc", + Url: "s3://foo/bar", + CredentialName: "bcd", + Comment: "def", + MetastoreId: "e", + Owner: "f", + }, nil) + e.Update(mock.Anything, catalog.UpdateExternalLocation{ + Name: "abc", + Url: "s3://foo/bar", + CredentialName: "bcd", + Comment: "def", + IsolationMode: "ISOLATED", + }).Return(&catalog.ExternalLocationInfo{ + Name: "abc", + Url: "s3://foo/bar", + CredentialName: "bcd", + Comment: "def", + IsolationMode: "ISOLATED", + MetastoreId: "e", + Owner: "f", + }, nil) + w.GetMockMetastoresAPI().EXPECT().Current(mock.Anything).Return(&catalog.MetastoreAssignment{ + MetastoreId: "e", + WorkspaceId: 123456789101112, + }, nil) + w.GetMockWorkspaceBindingsAPI().EXPECT().UpdateBindings(mock.Anything, catalog.UpdateWorkspaceBindingsParameters{ + SecurableName: "abc", + SecurableType: "external-location", + Add: []catalog.WorkspaceBinding{ + { + WorkspaceId: int64(123456789101112), + BindingType: catalog.WorkspaceBindingBindingTypeBindingTypeReadWrite, + }, + }, + }).Return(&catalog.WorkspaceBindingsResponse{ + Bindings: []catalog.WorkspaceBinding{ + { + WorkspaceId: int64(123456789101112), + BindingType: catalog.WorkspaceBindingBindingTypeBindingTypeReadWrite, + }, + }, + }, nil) + e.GetByName(mock.Anything, "abc").Return(&catalog.ExternalLocationInfo{ + Name: "abc", + Url: "s3://foo/bar", + CredentialName: "bcd", + Comment: "def", + IsolationMode: "ISOLATED", + MetastoreId: "e", + Owner: "f", + }, nil) + }, + Resource: ResourceExternalLocation(), + Create: true, + HCL: ` + name = "abc" + url = "s3://foo/bar" + credential_name = "bcd" + comment = "def" + isolation_mode = "ISOLATED" + `, + }.ApplyNoError(t) +} + func TestCreateExternalLocationWithOwner(t *testing.T) { qa.ResourceFixture{ Fixtures: []qa.HTTPFixture{ diff --git a/catalog/resource_storage_credential.go b/catalog/resource_storage_credential.go index a01ade1894..8526acf1f3 100644 --- a/catalog/resource_storage_credential.go +++ b/catalog/resource_storage_credential.go @@ -5,6 +5,7 @@ import ( "github.com/databricks/databricks-sdk-go" "github.com/databricks/databricks-sdk-go/service/catalog" + "github.com/databricks/terraform-provider-databricks/catalog/bindings" "github.com/databricks/terraform-provider-databricks/common" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) @@ -21,6 +22,7 @@ type StorageCredentialInfo struct { MetastoreID string `json:"metastore_id,omitempty" tf:"computed"` ReadOnly bool `json:"read_only,omitempty"` SkipValidation bool `json:"skip_validation,omitempty"` + IsolationMode string `json:"isolation_mode,omitempty" tf:"computed"` } func removeGcpSaField(originalSchema map[string]*schema.Schema) map[string]*schema.Schema { @@ -71,10 +73,11 @@ func ResourceStorageCredential() common.Resource { } d.SetId(storageCredential.CredentialInfo.Name) - // Don't update owner if it is not provided - if d.Get("owner") == "" { + // Update owner or isolation mode if it is provided + if !updateRequired(d, []string{"owner", "isolation_mode"}) { return nil } + update.Name = d.Id() _, err = acc.StorageCredentials.Update(ctx, catalog.AccountsUpdateStorageCredential{ CredentialInfo: &update, @@ -96,8 +99,8 @@ func ResourceStorageCredential() common.Resource { } d.SetId(storageCredential.Name) - // Don't update owner if it is not provided - if d.Get("owner") == "" { + // Update owner or isolation mode if it is provided + if !updateRequired(d, []string{"owner", "isolation_mode"}) { return nil } @@ -106,7 +109,8 @@ func ResourceStorageCredential() common.Resource { if err != nil { return err } - return nil + // Bind the current workspace if the storage credential is isolated, otherwise the read will fail + return bindings.AddCurrentWorkspaceBindings(ctx, d, w, storageCredential.Name, "storage-credential") }) }, Read: func(ctx context.Context, d *schema.ResourceData, c *common.DatabricksClient) error { @@ -241,7 +245,8 @@ func ResourceStorageCredential() common.Resource { } return err } - return nil + // Bind the current workspace if the storage credential is isolated, otherwise the read will fail + return bindings.AddCurrentWorkspaceBindings(ctx, d, w, update.Name, "storage-credential") }) }, Delete: func(ctx context.Context, d *schema.ResourceData, c *common.DatabricksClient) error { diff --git a/catalog/resource_storage_credential_test.go b/catalog/resource_storage_credential_test.go index 33ccd29c8a..c9d2e07af6 100644 --- a/catalog/resource_storage_credential_test.go +++ b/catalog/resource_storage_credential_test.go @@ -4,8 +4,10 @@ import ( "testing" "github.com/databricks/databricks-sdk-go/apierr" + "github.com/databricks/databricks-sdk-go/experimental/mocks" "github.com/databricks/databricks-sdk-go/service/catalog" "github.com/databricks/terraform-provider-databricks/qa" + "github.com/stretchr/testify/mock" ) func TestStorageCredentialsCornerCases(t *testing.T) { @@ -60,6 +62,96 @@ func TestCreateStorageCredentials(t *testing.T) { }) } +func TestCreateIsolatedStorageCredential(t *testing.T) { + qa.ResourceFixture{ + MockWorkspaceClientFunc: func(w *mocks.MockWorkspaceClient) { + e := w.GetMockStorageCredentialsAPI().EXPECT() + e.Create(mock.Anything, catalog.CreateStorageCredential{ + Name: "a", + AwsIamRole: &catalog.AwsIamRoleRequest{ + RoleArn: "def", + }, + Comment: "c", + }).Return(&catalog.StorageCredentialInfo{ + Name: "a", + AwsIamRole: &catalog.AwsIamRoleResponse{ + RoleArn: "def", + ExternalId: "123", + }, + MetastoreId: "d", + Id: "1234-5678", + Owner: "f", + }, nil) + e.Update(mock.Anything, catalog.UpdateStorageCredential{ + Name: "a", + AwsIamRole: &catalog.AwsIamRoleRequest{ + RoleArn: "def", + }, + Comment: "c", + IsolationMode: "ISOLATED", + }).Return(&catalog.StorageCredentialInfo{ + Name: "a", + AwsIamRole: &catalog.AwsIamRoleResponse{ + RoleArn: "def", + ExternalId: "123", + }, + MetastoreId: "d", + Id: "1234-5678", + Owner: "f", + IsolationMode: "ISOLATED", + }, nil) + w.GetMockMetastoresAPI().EXPECT().Current(mock.Anything).Return(&catalog.MetastoreAssignment{ + MetastoreId: "e", + WorkspaceId: 123456789101112, + }, nil) + w.GetMockWorkspaceBindingsAPI().EXPECT().UpdateBindings(mock.Anything, catalog.UpdateWorkspaceBindingsParameters{ + SecurableName: "a", + SecurableType: "storage-credential", + Add: []catalog.WorkspaceBinding{ + { + WorkspaceId: int64(123456789101112), + BindingType: catalog.WorkspaceBindingBindingTypeBindingTypeReadWrite, + }, + }, + }).Return(&catalog.WorkspaceBindingsResponse{ + Bindings: []catalog.WorkspaceBinding{ + { + WorkspaceId: int64(123456789101112), + BindingType: catalog.WorkspaceBindingBindingTypeBindingTypeReadWrite, + }, + }, + }, nil) + e.GetByName(mock.Anything, "a").Return(&catalog.StorageCredentialInfo{ + Name: "a", + AwsIamRole: &catalog.AwsIamRoleResponse{ + RoleArn: "def", + ExternalId: "123", + }, + MetastoreId: "d", + Id: "1234-5678", + Owner: "f", + IsolationMode: "ISOLATED", + }, nil) + }, + Resource: ResourceStorageCredential(), + Create: true, + HCL: ` + name = "a" + aws_iam_role { + role_arn = "def" + } + comment = "c" + isolation_mode = "ISOLATED" + `, + }.ApplyAndExpectData(t, map[string]any{ + "aws_iam_role.0.external_id": "123", + "aws_iam_role.0.role_arn": "def", + "name": "a", + "storage_credential_id": "1234-5678", + "isolation_mode": "ISOLATED", + }) +} + func TestCreateStorageCredentialWithOwner(t *testing.T) { qa.ResourceFixture{ Fixtures: []qa.HTTPFixture{ diff --git a/docs/resources/external_location.md b/docs/resources/external_location.md index 085292043d..6713c4b1eb 100644 --- a/docs/resources/external_location.md +++ b/docs/resources/external_location.md @@ -129,6 +129,7 @@ The following arguments are required: - `force_update` - (Optional) Update external location regardless of its dependents. - `access_point` - (Optional) The ARN of the s3 access point to use with the external location (AWS). - `encryption_details` - (Optional) The options for Server-Side Encryption to be used by each Databricks s3 client when connecting to S3 cloud storage (AWS). +- `isolation_mode` - (Optional) Whether the external location is accessible from all workspaces or a specific set of workspaces. Can be `ISOLATED` or `OPEN`. Setting the external location to `ISOLATED` will automatically allow access from the current workspace. ## Attribute Reference diff --git a/docs/resources/storage_credential.md b/docs/resources/storage_credential.md index 19a57f92c8..79b6222cc2 100644 --- a/docs/resources/storage_credential.md +++ b/docs/resources/storage_credential.md @@ -80,6 +80,7 @@ The following arguments are required: - `skip_validation` - (Optional) Suppress validation errors if any & force save the storage credential. - `force_destroy` - (Optional) Delete storage credential regardless of its dependencies. - `force_update` - (Optional) Update storage credential regardless of its dependents. +- `isolation_mode` - (Optional) Whether the storage credential is accessible from all workspaces or a specific set of workspaces. Can be `ISOLATED` or `OPEN`. Setting the credential to `ISOLATED` will automatically allow access from the current workspace. `aws_iam_role` optional configuration block for credential details for AWS: diff --git a/internal/acceptance/external_location_test.go b/internal/acceptance/external_location_test.go index 7e219bed2a..d0454746d3 100644 --- a/internal/acceptance/external_location_test.go +++ b/internal/acceptance/external_location_test.go @@ -21,6 +21,7 @@ func externalLocationTemplateWithOwner(comment string, owner string) string { name = "external-{var.STICKY_RANDOM}" url = "s3://{env.TEST_BUCKET}/some{var.STICKY_RANDOM}" credential_name = databricks_storage_credential.external.id + isolation_mode = "ISOLATED" comment = "%s" owner = "%s" } @@ -34,9 +35,10 @@ func storageCredentialTemplateWithOwner(comment, owner string) string { aws_iam_role { role_arn = "{env.TEST_METASTORE_DATA_ACCESS_ARN}" } - comment = "%s" - owner = "%s" - force_update = true + comment = "%s" + owner = "%s" + isolation_mode = "ISOLATED" + force_update = true } `, comment, owner) } From 03c71d0e33a95e7330408683a551a9251e85fa53 Mon Sep 17 00:00:00 2001 From: Alex Moschos <166370939+alexmos-db@users.noreply.github.com> Date: Fri, 28 Jun 2024 17:26:04 +0200 Subject: [PATCH 11/39] Add terraform support for periodic triggers (#3700) * Add periodic triggers * Add acceptance test for periodic triggers * Fix typo --- docs/resources/job.md | 4 +++ internal/acceptance/job_test.go | 33 +++++++++++++++++++ jobs/resource_job.go | 9 +++++- jobs/resource_job_test.go | 56 +++++++++++++++++++++++++++++++++ 4 files changed, 101 insertions(+), 1 deletion(-) diff --git a/docs/resources/job.md b/docs/resources/job.md index 02fbd9d75f..e61d6d4ed8 100644 --- a/docs/resources/job.md +++ b/docs/resources/job.md @@ -345,6 +345,10 @@ This block describes the queue settings of the job: ### trigger Configuration Block * `pause_status` - (Optional) Indicate whether this trigger is paused or not. Either `PAUSED` or `UNPAUSED`. When the `pause_status` field is omitted in the block, the server will default to using `UNPAUSED` as a value for `pause_status`. +* `periodic` - (Optional) configuration block to define a trigger for Periodic Triggers consisting of the following attributes: + * `interval` - (Required) Specifies the interval at which the job should run. This value is required. + * `unit` - (Required) Options are {"DAYS", "HOURS", "WEEKS"}. + * `file_arrival` - (Optional) configuration block to define a trigger for [File Arrival events](https://learn.microsoft.com/en-us/azure/databricks/workflows/jobs/file-arrival-triggers) consisting of following attributes: * `url` - (Required) URL to be monitored for file arrivals. The path must point to the root or a subpath of the external location. Please note that the URL must have a trailing slash character (`/`). * `min_time_between_triggers_seconds` - (Optional) If set, the trigger starts a run only after the specified amount of time passed since the last time the trigger fired. The minimum allowed value is 60 seconds. diff --git a/internal/acceptance/job_test.go b/internal/acceptance/job_test.go index 9fa55e4648..ddf0d74325 100644 --- a/internal/acceptance/job_test.go +++ b/internal/acceptance/job_test.go @@ -413,3 +413,36 @@ func TestAccRemoveWebhooks(t *testing.T) { `, }) } + +func TestAccPeriodicTrigger(t *testing.T) { + workspaceLevel(t, step{ + Template: ` + resource "databricks_job" "this" { + name = "{var.RANDOM}" + + trigger { + pause_status = "UNPAUSED" + periodic { + interval = 17 + unit = "HOURS" + } + } + }`, + Check: resourceCheck("databricks_job.this", func(ctx context.Context, client *common.DatabricksClient, id string) error { + w, err := client.WorkspaceClient() + assert.NoError(t, err) + + jobID, err := strconv.ParseInt(id, 10, 64) + assert.NoError(t, err) + + res, err := w.Jobs.GetByJobId(ctx, jobID) + assert.NoError(t, err) + + assert.Equal(t, jobs.PauseStatus("UNPAUSED"), res.Settings.Trigger.PauseStatus) + assert.Equal(t, 17, res.Settings.Trigger.Periodic.Interval) + assert.Equal(t, jobs.PeriodicTriggerConfigurationTimeUnit("HOURS"), res.Settings.Trigger.Periodic.Unit) + + return nil + }), + }) +} diff --git a/jobs/resource_job.go b/jobs/resource_job.go index b38323996c..770a42afa5 100644 --- a/jobs/resource_job.go +++ b/jobs/resource_job.go @@ -272,9 +272,15 @@ type TableUpdate struct { WaitAfterLastChangeSeconds int32 `json:"wait_after_last_change_seconds,omitempty"` } +type Periodic struct { + Interval int32 `json:"interval"` + Unit string `json:"unit"` +} + type Trigger struct { FileArrival *FileArrival `json:"file_arrival,omitempty"` TableUpdate *TableUpdate `json:"table_update,omitempty"` + Periodic *Periodic `json:"periodic,omitempty"` PauseStatus string `json:"pause_status,omitempty" tf:"default:UNPAUSED"` } @@ -566,9 +572,10 @@ func (JobSettingsResource) CustomizeSchema(s *common.CustomizableSchema) *common s.SchemaPath("continuous").SetConflictsWith([]string{"schedule", "trigger"}) s.SchemaPath("trigger").SetConflictsWith([]string{"continuous", "schedule"}) - trigger_eoo := []string{"trigger.0.file_arrival", "trigger.0.table_update"} + trigger_eoo := []string{"trigger.0.file_arrival", "trigger.0.table_update", "trigger.0.periodic"} s.SchemaPath("trigger", "file_arrival").SetExactlyOneOf(trigger_eoo) s.SchemaPath("trigger", "table_update").SetExactlyOneOf(trigger_eoo) + s.SchemaPath("trigger", "periodic").SetExactlyOneOf(trigger_eoo) // Deprecated Job API 2.0 attributes var topLevelDeprecatedAttr = []string{ diff --git a/jobs/resource_job_test.go b/jobs/resource_job_test.go index 9d07e7698d..cbfbb411ae 100644 --- a/jobs/resource_job_test.go +++ b/jobs/resource_job_test.go @@ -1381,6 +1381,62 @@ func TestResourceJobCreate_Trigger_TableUpdateCreate(t *testing.T) { }.ApplyNoError(t) } +func TestResourceJobCreate_Trigger_PeriodicCreate(t *testing.T) { + qa.ResourceFixture{ + Create: true, + Resource: ResourceJob(), + Fixtures: []qa.HTTPFixture{ + { + Method: "POST", + Resource: "/api/2.0/jobs/create", + ExpectedRequest: JobSettings{ + MaxConcurrentRuns: 1, + Name: "Test", + Trigger: &Trigger{ + PauseStatus: "UNPAUSED", + Periodic: &Periodic{ + Interval: 4, + Unit: "HOURS", + }, + }, + }, + Response: Job{ + JobID: 1042, + }, + }, + { + Method: "GET", + Resource: "/api/2.0/jobs/get?job_id=1042", + Response: Job{ + JobID: 1042, + Settings: &JobSettings{ + MaxConcurrentRuns: 1, + Name: "Test", + Trigger: &Trigger{ + PauseStatus: "UNPAUSED", + Periodic: &Periodic{ + Interval: 4, + Unit: "HOURS", + }, + }, + }, + }, + }, + }, + HCL: ` + trigger { + pause_status = "UNPAUSED" + periodic { + interval = 4 + unit = "HOURS" + } + } + max_concurrent_runs = 1 + name = "Test" + `, + }.ApplyNoError(t) +} + func TestResourceJobUpdate_ControlRunState_ContinuousUpdateRunNow(t *testing.T) { qa.ResourceFixture{ Update: true, From f42e1fb2cfffd1a5af5425749bf4193692628643 Mon Sep 17 00:00:00 2001 From: hectorcast-db Date: Fri, 28 Jun 2024 21:48:59 +0200 Subject: [PATCH 12/39] Release v1.48.2 (#3722) * Added isolation mode support for `databricks_external_location` & `databricks_storage_credential` ([#3704](https://github.com/databricks/terraform-provider-databricks/pull/3704)). * Add terraform support for periodic triggers ([#3700](https://github.com/databricks/terraform-provider-databricks/pull/3700)). --- CHANGELOG.md | 7 +++++++ common/version.go | 2 +- 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index b2925d58d0..1c21baaf4d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,12 @@ # Version changelog +## 1.48.2 + +### New Features and Improvements +* Added isolation mode support for `databricks_external_location` & `databricks_storage_credential` ([#3704](https://github.com/databricks/terraform-provider-databricks/pull/3704)). +* Add terraform support for periodic triggers ([#3700](https://github.com/databricks/terraform-provider-databricks/pull/3700)). + + ## 1.48.1 ### New Features and Improvements diff --git a/common/version.go b/common/version.go index 5aa00967e6..927e473e3a 100644 --- a/common/version.go +++ b/common/version.go @@ -3,7 +3,7 @@ package common import "context" var ( - version = "1.48.1" + version = "1.48.2" // ResourceName is resource name without databricks_ prefix ResourceName contextKey = 1 // Provider is the current instance of provider From d669d7a06bb2c0140ca878ef9adaaf360a0684aa Mon Sep 17 00:00:00 2001 From: vuong-nguyen <44292934+nkvuong@users.noreply.github.com> Date: Sun, 30 Jun 2024 11:21:59 +0100 Subject: [PATCH 13/39] remove references to basic auth (#3720) --- docs/guides/aws-private-link-workspace.md | 6 ++-- docs/guides/unity-catalog-azure.md | 2 +- docs/guides/unity-catalog-gcp.md | 2 +- docs/index.md | 37 +++++------------------ docs/resources/user.md | 4 +-- 5 files changed, 15 insertions(+), 36 deletions(-) diff --git a/docs/guides/aws-private-link-workspace.md b/docs/guides/aws-private-link-workspace.md index d680a62281..6879329d88 100644 --- a/docs/guides/aws-private-link-workspace.md +++ b/docs/guides/aws-private-link-workspace.md @@ -2,10 +2,10 @@ page_title: "Provisioning Databricks on AWS with Private Link" --- --> **Note** Refer to the [Databricks Terraform Registry modules](https://registry.terraform.io/modules/databricks/examples/databricks/latest) for Terraform modules and examples to deploy Azure Databricks resources. - # Provisioning Databricks on AWS with Private Link +-> **Note** Refer to the [Databricks Terraform Registry modules](https://registry.terraform.io/modules/databricks/examples/databricks/latest) for Terraform modules and examples to deploy Azure Databricks resources. + Databricks PrivateLink support enables private connectivity between users and their Databricks workspaces and between clusters on the data plane and core services on the control plane within the Databricks workspace infrastructure. You can use Terraform to deploy the underlying cloud resources and the private access settings resources automatically using a programmatic approach. This guide assumes you are deploying into an existing VPC and have set up credentials and storage configurations as per prior examples, notably here. ![Private link backend](https://raw.githubusercontent.com/databricks/terraform-provider-databricks/main/docs/images/aws-e2-private-link-backend.png) @@ -39,7 +39,7 @@ This guide takes you through the following high-level steps to set up a workspac ## Provider initialization -To set up account-level resources, initialize [provider with `mws` alias](https://www.terraform.io/language/providers/configuration#alias-multiple-provider-configurations). See [provider authentication](../index.md#authenticating-with-hostname,-username,-and-password) for more details. +To set up account-level resources, initialize [provider with `mws` alias](https://www.terraform.io/language/providers/configuration#alias-multiple-provider-configurations). See [provider authentication](../index.md#authenticating-with-databricks-managed-service-principal) for more details. ```hcl terraform { diff --git a/docs/guides/unity-catalog-azure.md b/docs/guides/unity-catalog-azure.md index d7fac487f1..fc1d29b0fa 100644 --- a/docs/guides/unity-catalog-azure.md +++ b/docs/guides/unity-catalog-azure.md @@ -31,7 +31,7 @@ To get started with Unity Catalog, this guide takes you through the following hi ## Provider initialization -Initialize the 3 providers to set up the required resources. See [Databricks provider authentication](../index.md#authenticating-with-hostname,-username,-and-password), [Azure AD provider authentication](https://registry.terraform.io/providers/hashicorp/azuread/latest/docs#authenticating-to-azure-active-directory) and [Azure provider authentication](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs#authenticating-to-azure) for more details. +Initialize the 3 providers to set up the required resources. See [Databricks provider authentication](../index.md#authenticating-with-databricks-managed-service-principal), [Azure AD provider authentication](https://registry.terraform.io/providers/hashicorp/azuread/latest/docs#authenticating-to-azure-active-directory) and [Azure provider authentication](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs#authenticating-to-azure) for more details. Define the required variables, and calculate the local values diff --git a/docs/guides/unity-catalog-gcp.md b/docs/guides/unity-catalog-gcp.md index d8efb5a71c..bc8e33d61d 100644 --- a/docs/guides/unity-catalog-gcp.md +++ b/docs/guides/unity-catalog-gcp.md @@ -31,7 +31,7 @@ To get started with Unity Catalog, this guide takes you through the following hi ## Provider initialization -Initialize the 3 providers to set up the required resources. See [Databricks provider authentication](../index.md#authenticating-with-hostname,-username,-and-password), [Azure AD provider authentication](https://registry.terraform.io/providers/hashicorp/azuread/latest/docs#authenticating-to-azure-active-directory) and [Azure provider authentication](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs#authenticating-to-azure) for more details. +Initialize the 3 providers to set up the required resources. See [Databricks provider authentication](../index.md#authentication), [Azure AD provider authentication](https://registry.terraform.io/providers/hashicorp/azuread/latest/docs#authenticating-to-azure-active-directory) and [Azure provider authentication](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs#authenticating-to-azure) for more details. Define the required variables, and calculate the local values diff --git a/docs/index.md b/docs/index.md index 575c196ae4..18efc26fe7 100644 --- a/docs/index.md +++ b/docs/index.md @@ -138,8 +138,7 @@ There are currently a number of supported methods to [authenticate](https://docs * [PAT Tokens](#authenticating-with-hostname-and-token) * AWS, Azure and GCP via [Databricks-managed Service Principals](#authenticating-with-databricks-managed-service-principal) * GCP via [Google Cloud CLI](#special-configurations-for-gcp) -* Azure Active Directory Tokens via [Azure CLI](#authenticating-with-azure-cli), [Azure-managed Service Principals](#authenticating-with-azure-service-principal), or [Managed Service Identities](#authenticating-with-azure-msi) -* Username and password pair (legacy) +* Azure Active Directory Tokens via [Azure CLI](#authenticating-with-azure-cli), [Azure-managed Service Principals](#authenticating-with-azure-managed-service-principal), or [Managed Service Identities](#authenticating-with-azure-msi) ### Authenticating with Databricks CLI credentials @@ -181,20 +180,6 @@ provider "databricks" { } ``` -### Authenticating with hostname, username, and password - -!> **Warning** This approach is not recommended for regular use. Instead, authenticate with [service principal](#authenticating-with-service-principal) - -You can use the `username` + `password` attributes to authenticate the provider for a workspace setup. Respective `DATABRICKS_USERNAME` and `DATABRICKS_PASSWORD` environment variables are applicable as well. - -``` hcl -provider "databricks" { - host = "https://accounts.cloud.databricks.com" - username = var.user - password = var.password -} -``` - ### Authenticating with Databricks-managed Service Principal You can use the `client_id` + `client_secret` attributes to authenticate with a Databricks-managed service principal at both the account and workspace levels in all supported clouds. The `client_id` is the `application_id` of the [Service Principal](resources/service_principal.md) and `client_secret` is its secret. You can generate the secret from Databricks Accounts Console (see [instruction](https://docs.databricks.com/dev-tools/authentication-oauth.html#step-2-create-an-oauth-secret-for-a-service-principal)) or by using the Terraform resource [databricks_service_principal_secret](resources/service_principal_secret.md). @@ -249,12 +234,10 @@ The provider block supports the following arguments: * `host` - (optional) This is the host of the Databricks workspace. It is a URL that you use to login to your workspace. Alternatively, you can provide this value as an environment variable `DATABRICKS_HOST`. * `token` - (optional) This is the API token to authenticate into the workspace. Alternatively, you can provide this value as an environment variable `DATABRICKS_TOKEN`. -* `username` - (optional) This is the username of the user that can log into the workspace. Alternatively, you can provide this value as an environment variable `DATABRICKS_USERNAME`. Recommended only for [creating workspaces in AWS](resources/mws_workspaces.md). -* `password` - (optional) This is the user's password that can log into the workspace. Alternatively, you can provide this value as an environment variable `DATABRICKS_PASSWORD`. Recommended only for [creating workspaces in AWS](resources/mws_workspaces.md). -* `config_file` - (optional) Location of the Databricks CLI credentials file created by `databricks configure --token` command (~/.databrickscfg by default). Check [Databricks CLI documentation](https://docs.databricks.com/dev-tools/cli/index.html#set-up-authentication) for more details. The provider uses configuration file credentials when you don't specify host/token/username/password/azure attributes. Alternatively, you can provide this value as an environment variable `DATABRICKS_CONFIG_FILE`. This field defaults to `~/.databrickscfg`. +* `config_file` - (optional) Location of the Databricks CLI credentials file created by `databricks configure --token` command (~/.databrickscfg by default). Check [Databricks CLI documentation](https://docs.databricks.com/dev-tools/cli/index.html#set-up-authentication) for more details. The provider uses configuration file credentials when you don't specify host/token/azure attributes. Alternatively, you can provide this value as an environment variable `DATABRICKS_CONFIG_FILE`. This field defaults to `~/.databrickscfg`. * `profile` - (optional) Connection profile specified within ~/.databrickscfg. Please check [connection profiles section](https://docs.databricks.com/dev-tools/cli/index.html#connection-profiles) for more details. This field defaults to `DEFAULT`. -* `account_id` - (optional for workspace-level operations, but required for account-level) Account Id that could be found in the top right corner of [Accounts Console](https://accounts.cloud.databricks.com/). Alternatively, you can provide this value as an environment variable `DATABRICKS_ACCOUNT_ID`. Only has effect when `host = "https://accounts.cloud.databricks.com/"`, and is currently used to provision account admins via [databricks_user](resources/user.md). In the future releases of the provider this property will also be used specify account for `databricks_mws_*` resources as well. +* `account_id` - (optional for workspace-level operations, but required for account-level) Account Id that could be found in the top right corner of [Accounts Console](https://accounts.cloud.databricks.com/). Alternatively, you can provide this value as an environment variable `DATABRICKS_ACCOUNT_ID`. Only has effect when `host = "https://accounts.cloud.databricks.com/"`, and is currently used to provision account admins via [databricks_user](resources/user.md). In the future releases of the provider this property will also be used specify account for `databricks_mws_*` resources as well. * `auth_type` - (optional) enforce specific auth type to be used in very rare cases, where a single Terraform state manages Databricks workspaces on more than one cloud and `more than one authorization method configured` error is a false positive. Valid values are `pat`, `basic`, `oauth-m2m`, `azure-client-secret`, `azure-msi`, `azure-cli`, `google-credentials`, and `google-id`. ## Special configurations for Azure @@ -378,8 +361,6 @@ The following configuration attributes can be passed via environment variables: | `auth_type` | `DATABRICKS_AUTH_TYPE` | | `host` | `DATABRICKS_HOST` | | `token` | `DATABRICKS_TOKEN` | -| `username` | `DATABRICKS_USERNAME` | -| `password` | `DATABRICKS_PASSWORD` | | `account_id` | `DATABRICKS_ACCOUNT_ID` | | `config_file` | `DATABRICKS_CONFIG_FILE` | | `profile` | `DATABRICKS_CONFIG_PROFILE` | @@ -408,12 +389,10 @@ provider "databricks" {} 1. Provider will check all the supported environment variables and set values of relevant arguments. 2. In case any conflicting arguments are present, the plan will end with an error. 3. Will check for the presence of `host` + `token` pair, continue trying otherwise. -4. Will check for `host` + `username` + `password` presence, continue trying otherwise. -5. Will check for Azure workspace ID, `azure_client_secret` + `azure_client_id` + `azure_tenant_id` presence, continue trying otherwise. -6. Will check for availability of Azure MSI, if enabled via `azure_use_msi`, continue trying otherwise. -7. Will check for Azure workspace ID presence, and if `AZ CLI` returns an access token, continue trying otherwise. -8. Will check for the `~/.databrickscfg` file in the home directory, will fail otherwise. -9. Will check for `profile` presence and try picking from that file will fail otherwise. -10. Will check for `host` and `token` or `username`+`password` combination, and will fail if none of these exist. +4. Will check for Azure workspace ID, `azure_client_secret` + `azure_client_id` + `azure_tenant_id` presence, continue trying otherwise. +5. Will check for availability of Azure MSI, if enabled via `azure_use_msi`, continue trying otherwise. +6. Will check for Azure workspace ID presence, and if `AZ CLI` returns an access token, continue trying otherwise. +7. Will check for the `~/.databrickscfg` file in the home directory, will fail otherwise. +8. Will check for `profile` presence and try picking from that file will fail otherwise. Please check [Default Authentication Flow](https://github.com/databricks/databricks-sdk-go#default-authentication-flow) from [Databricks SDK for Go](https://docs.databricks.com/dev-tools/sdk-go.html) in case you need more details. diff --git a/docs/resources/user.md b/docs/resources/user.md index 75ee1698ac..03e16365c3 100644 --- a/docs/resources/user.md +++ b/docs/resources/user.md @@ -3,7 +3,7 @@ subcategory: "Security" --- # databricks_user Resource -This resource allows you to manage [users in Databricks Workspace](https://docs.databricks.com/administration-guide/users-groups/users.html), [Databricks Account Console](https://accounts.cloud.databricks.com/) or [Azure Databricks Account Console](https://accounts.azuredatabricks.net). You can also [associate](group_member.md) Databricks users to [databricks_group](group.md). Upon user creation the user will receive a password reset email. You can also get information about caller identity using [databricks_current_user](../data-sources/current_user.md) data source. +This resource allows you to manage [users in Databricks Workspace](https://docs.databricks.com/administration-guide/users-groups/users.html), [Databricks Account Console](https://accounts.cloud.databricks.com/) or [Azure Databricks Account Console](https://accounts.azuredatabricks.net). You can also [associate](group_member.md) Databricks users to [databricks_group](group.md). Upon user creation the user will receive a welcome email. You can also get information about caller identity using [databricks_current_user](../data-sources/current_user.md) data source. -> **Note** To assign account level users to workspace use [databricks_mws_permission_assignment](mws_permission_assignment.md). @@ -101,7 +101,7 @@ The following arguments are available: * `force` - (Optional) Ignore `cannot create user: User with username X already exists` errors and implicitly import the specific user into Terraform state, enforcing entitlements defined in the instance of resource. _This functionality is experimental_ and is designed to simplify corner cases, like Azure Active Directory synchronisation. * `force_delete_repos` - (Optional) This flag determines whether the user's repo directory is deleted when the user is deleted. It will have no impact when in the accounts SCIM API. False by default. * `force_delete_home_dir` - (Optional) This flag determines whether the user's home directory is deleted when the user is deleted. It will have not impact when in the accounts SCIM API. False by default. -* `disable_as_user_deletion` - (Optional) Deactivate the user when deleting the resource, rather than deleting the user entirely. Defaults to `true` when the provider is configured at the account-level and `false` when configured at the workspace-level. This flag is exclusive to force_delete_repos and force_delete_home_dir flags. +* `disable_as_user_deletion` - (Optional) Deactivate the user when deleting the resource, rather than deleting the user entirely. Defaults to `true` when the provider is configured at the account-level and `false` when configured at the workspace-level. This flag is exclusive to force_delete_repos and force_delete_home_dir flags. ## Attribute Reference From 0c252d47dbbcd17385d12e9b06a95725f68ff854 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Aleksandar=20Dragojevi=C4=87?= Date: Sun, 30 Jun 2024 12:22:55 +0200 Subject: [PATCH 14/39] Fix invalid priviledges in grants.md (#3716) --- docs/resources/grants.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/resources/grants.md b/docs/resources/grants.md index 48679b5965..25f22c91af 100644 --- a/docs/resources/grants.md +++ b/docs/resources/grants.md @@ -274,15 +274,15 @@ resource "databricks_grants" "some" { } grant { principal = databricks_service_principal.my_sp.application_id - privileges = ["USE_SCHEMA", "MODIFY"] + privileges = ["CREATE_EXTERNAL_TABLE", "READ_FILES"] } grant { principal = databricks_group.my_group.display_name - privileges = ["USE_SCHEMA", "MODIFY"] + privileges = ["CREATE_EXTERNAL_TABLE", "READ_FILES"] } grant { principal = databricks_group.my_user.user_name - privileges = ["USE_SCHEMA", "MODIFY"] + privileges = ["CREATE_EXTERNAL_TABLE", "READ_FILES"] } } ``` From 1ba1772a28244c53db6fa6a71fcbd36d8032f878 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 1 Jul 2024 09:36:15 +0200 Subject: [PATCH 15/39] Bump github.com/hashicorp/hcl/v2 from 2.20.1 to 2.21.0 (#3684) Bumps [github.com/hashicorp/hcl/v2](https://github.com/hashicorp/hcl) from 2.20.1 to 2.21.0. - [Release notes](https://github.com/hashicorp/hcl/releases) - [Changelog](https://github.com/hashicorp/hcl/blob/main/CHANGELOG.md) - [Commits](https://github.com/hashicorp/hcl/compare/v2.20.1...v2.21.0) --- updated-dependencies: - dependency-name: github.com/hashicorp/hcl/v2 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/go.mod b/go.mod index 5a56fb699e..a421cdb136 100644 --- a/go.mod +++ b/go.mod @@ -7,7 +7,7 @@ require ( github.com/golang-jwt/jwt/v4 v4.5.0 github.com/hashicorp/go-cty v1.4.1-0.20200414143053-d3edf31b6320 github.com/hashicorp/hcl v1.0.0 - github.com/hashicorp/hcl/v2 v2.20.1 + github.com/hashicorp/hcl/v2 v2.21.0 github.com/hashicorp/terraform-plugin-log v0.9.0 github.com/hashicorp/terraform-plugin-sdk/v2 v2.34.0 github.com/stretchr/testify v1.9.0 diff --git a/go.sum b/go.sum index d2e63b3423..aa4bdff4a6 100644 --- a/go.sum +++ b/go.sum @@ -122,8 +122,8 @@ github.com/hashicorp/hc-install v0.6.4 h1:QLqlM56/+SIIGvGcfFiwMY3z5WGXT066suo/v9 github.com/hashicorp/hc-install v0.6.4/go.mod h1:05LWLy8TD842OtgcfBbOT0WMoInBMUSHjmDx10zuBIA= github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= -github.com/hashicorp/hcl/v2 v2.20.1 h1:M6hgdyz7HYt1UN9e61j+qKJBqR3orTWbI1HKBJEdxtc= -github.com/hashicorp/hcl/v2 v2.20.1/go.mod h1:TZDqQ4kNKCbh1iJp99FdPiUaVDDUPivbqxZulxDYqL4= +github.com/hashicorp/hcl/v2 v2.21.0 h1:lve4q/o/2rqwYOgUg3y3V2YPyD1/zkCLGjIV74Jit14= +github.com/hashicorp/hcl/v2 v2.21.0/go.mod h1:62ZYHrXgPoX8xBnzl8QzbWq4dyDsDtfCRgIq1rbJEvA= github.com/hashicorp/logutils v1.0.0 h1:dLEQVugN8vlakKOUE3ihGLTZJRB4j+M2cdTm/ORI65Y= github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= github.com/hashicorp/terraform-exec v0.21.0 h1:uNkLAe95ey5Uux6KJdua6+cv8asgILFVWkd/RG0D2XQ= @@ -206,8 +206,8 @@ github.com/xanzy/ssh-agent v0.3.3/go.mod h1:6dzNDKs0J9rVPHPhaGCukekBHKqfl+L3KghI github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= github.com/zclconf/go-cty v1.14.4 h1:uXXczd9QDGsgu0i/QFR/hzI5NYCHLf6NQw/atrbnhq8= github.com/zclconf/go-cty v1.14.4/go.mod h1:VvMs5i0vgZdhYawQNq5kePSpLAoz8u1xvZgrPIxfnZE= -github.com/zclconf/go-cty-debug v0.0.0-20191215020915-b22d67c1ba0b h1:FosyBZYxY34Wul7O/MSKey3txpPYyCqVO5ZyceuQJEI= -github.com/zclconf/go-cty-debug v0.0.0-20191215020915-b22d67c1ba0b/go.mod h1:ZRKQfBXbGkpdV6QMzT3rU1kSTAnfu1dO8dPKjYprgj8= +github.com/zclconf/go-cty-debug v0.0.0-20240509010212-0d6042c53940 h1:4r45xpDWB6ZMSMNJFMOjqrGHynW3DIBuR2H9j0ug+Mo= +github.com/zclconf/go-cty-debug v0.0.0-20240509010212-0d6042c53940/go.mod h1:CmBdvvj3nqzfzJ6nTCIwDTPZ56aVGvDrmztiO5g3qrM= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0 h1:4Pp6oUg3+e/6M4C0A/3kJ2VYa++dsWVTtGgLVj5xtHg= From df210b2aba89e3a00ce83abfee8bd4f446ae7f80 Mon Sep 17 00:00:00 2001 From: vuong-nguyen <44292934+nkvuong@users.noreply.github.com> Date: Mon, 1 Jul 2024 14:45:55 +0100 Subject: [PATCH 16/39] Refactored `databricks_cluster(s)` data sources to Go SDK (#3685) * relax cluster check * fix * fix * refactor `databricks_cluster` data source to Go SDK * refactor `databricks_clusters` data source to Go SDK --- clusters/data_cluster.go | 29 +++--- clusters/data_cluster_test.go | 171 ++++++++++++++------------------- clusters/data_clusters.go | 56 +++++------ clusters/data_clusters_test.go | 70 ++++++-------- 4 files changed, 141 insertions(+), 185 deletions(-) diff --git a/clusters/data_cluster.go b/clusters/data_cluster.go index 8a45b7afdf..73ae4a1e19 100644 --- a/clusters/data_cluster.go +++ b/clusters/data_cluster.go @@ -4,25 +4,24 @@ import ( "context" "fmt" + "github.com/databricks/databricks-sdk-go" + "github.com/databricks/databricks-sdk-go/service/compute" "github.com/databricks/terraform-provider-databricks/common" ) func DataSourceCluster() common.Resource { - type clusterData struct { - Id string `json:"id,omitempty" tf:"computed"` - ClusterId string `json:"cluster_id,omitempty" tf:"computed"` - Name string `json:"cluster_name,omitempty" tf:"computed"` - ClusterInfo *ClusterInfo `json:"cluster_info,omitempty" tf:"computed"` - } - return common.DataResource(clusterData{}, func(ctx context.Context, e interface{}, c *common.DatabricksClient) error { - data := e.(*clusterData) - clusterAPI := NewClustersAPI(ctx, c) + return common.WorkspaceData(func(ctx context.Context, data *struct { + Id string `json:"id,omitempty" tf:"computed"` + ClusterId string `json:"cluster_id,omitempty" tf:"computed"` + Name string `json:"cluster_name,omitempty" tf:"computed"` + ClusterInfo *compute.ClusterDetails `json:"cluster_info,omitempty" tf:"computed"` + }, w *databricks.WorkspaceClient) error { if data.Name != "" { - clusters, err := clusterAPI.List() + clusters, err := w.Clusters.ListAll(ctx, compute.ListClustersRequest{}) if err != nil { return err } - namedClusters := []ClusterInfo{} + namedClusters := []compute.ClusterDetails{} for _, clst := range clusters { cluster := clst if cluster.ClusterName == data.Name { @@ -37,16 +36,16 @@ func DataSourceCluster() common.Resource { } data.ClusterInfo = &namedClusters[0] } else if data.ClusterId != "" { - cls, err := clusterAPI.Get(data.ClusterId) + cls, err := w.Clusters.GetByClusterId(ctx, data.ClusterId) if err != nil { return err } - data.ClusterInfo = &cls + data.ClusterInfo = cls } else { return fmt.Errorf("you need to specify either `cluster_name` or `cluster_id`") } - data.Id = data.ClusterInfo.ClusterID - data.ClusterId = data.ClusterInfo.ClusterID + data.Id = data.ClusterInfo.ClusterId + data.ClusterId = data.ClusterInfo.ClusterId return nil }) diff --git a/clusters/data_cluster_test.go b/clusters/data_cluster_test.go index 9945634fcc..cd20edec0d 100644 --- a/clusters/data_cluster_test.go +++ b/clusters/data_cluster_test.go @@ -1,104 +1,81 @@ package clusters import ( - "fmt" "testing" + "github.com/databricks/databricks-sdk-go/experimental/mocks" + "github.com/databricks/databricks-sdk-go/service/compute" "github.com/databricks/terraform-provider-databricks/qa" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" + "github.com/stretchr/testify/mock" ) func TestClusterDataByID(t *testing.T) { - d, err := qa.ResourceFixture{ - Fixtures: []qa.HTTPFixture{ - { - Method: "GET", - Resource: "/api/2.0/clusters/get?cluster_id=abc", - Response: ClusterInfo{ - ClusterID: "abc", - NumWorkers: 100, - ClusterName: "Shared Autoscaling", - SparkVersion: "7.1-scala12", - NodeTypeID: "i3.xlarge", - AutoterminationMinutes: 15, - State: ClusterStateRunning, - AutoScale: &AutoScale{ - MaxWorkers: 4, - }, + qa.ResourceFixture{ + MockWorkspaceClientFunc: func(m *mocks.MockWorkspaceClient) { + e := m.GetMockClustersAPI().EXPECT() + e.GetByClusterId(mock.Anything, "abc").Return(&compute.ClusterDetails{ + ClusterId: "abc", + NumWorkers: 100, + ClusterName: "Shared Autoscaling", + SparkVersion: "7.1-scala12", + NodeTypeId: "i3.xlarge", + AutoterminationMinutes: 15, + State: ClusterStateRunning, + Autoscale: &compute.AutoScale{ + MaxWorkers: 4, }, - }, + }, nil) }, Resource: DataSourceCluster(), HCL: `cluster_id = "abc"`, Read: true, NonWritable: true, ID: "abc", - }.Apply(t) - require.NoError(t, err) - assert.Equal(t, 15, d.Get("cluster_info.0.autotermination_minutes")) - assert.Equal(t, "Shared Autoscaling", d.Get("cluster_info.0.cluster_name")) - assert.Equal(t, "i3.xlarge", d.Get("cluster_info.0.node_type_id")) - assert.Equal(t, 4, d.Get("cluster_info.0.autoscale.0.max_workers")) - assert.Equal(t, "RUNNING", d.Get("cluster_info.0.state")) - - for k, v := range d.State().Attributes { - fmt.Printf("assert.Equal(t, %#v, d.Get(%#v))\n", v, k) - } + }.ApplyAndExpectData(t, map[string]any{ + "cluster_info.0.autotermination_minutes": 15, + "cluster_info.0.cluster_name": "Shared Autoscaling", + "cluster_info.0.node_type_id": "i3.xlarge", + "cluster_info.0.autoscale.0.max_workers": 4, + "cluster_info.0.state": "RUNNING", + }) } func TestClusterDataByName(t *testing.T) { - d, err := qa.ResourceFixture{ - Fixtures: []qa.HTTPFixture{ - { - Method: "GET", - Resource: "/api/2.0/clusters/list", - - Response: ClusterList{ - Clusters: []ClusterInfo{{ - ClusterID: "abc", - NumWorkers: 100, - ClusterName: "Shared Autoscaling", - SparkVersion: "7.1-scala12", - NodeTypeID: "i3.xlarge", - AutoterminationMinutes: 15, - State: ClusterStateRunning, - AutoScale: &AutoScale{ - MaxWorkers: 4, - }, - }}, + qa.ResourceFixture{ + MockWorkspaceClientFunc: func(m *mocks.MockWorkspaceClient) { + e := m.GetMockClustersAPI().EXPECT() + e.ListAll(mock.Anything, compute.ListClustersRequest{}).Return([]compute.ClusterDetails{{ + ClusterId: "abc", + NumWorkers: 100, + ClusterName: "Shared Autoscaling", + SparkVersion: "7.1-scala12", + NodeTypeId: "i3.xlarge", + AutoterminationMinutes: 15, + State: ClusterStateRunning, + Autoscale: &compute.AutoScale{ + MaxWorkers: 4, }, - }, + }}, nil) }, Resource: DataSourceCluster(), HCL: `cluster_name = "Shared Autoscaling"`, Read: true, NonWritable: true, ID: "_", - }.Apply(t) - require.NoError(t, err) - assert.Equal(t, 15, d.Get("cluster_info.0.autotermination_minutes")) - assert.Equal(t, "Shared Autoscaling", d.Get("cluster_info.0.cluster_name")) - assert.Equal(t, "i3.xlarge", d.Get("cluster_info.0.node_type_id")) - assert.Equal(t, 4, d.Get("cluster_info.0.autoscale.0.max_workers")) - assert.Equal(t, "RUNNING", d.Get("cluster_info.0.state")) - - for k, v := range d.State().Attributes { - fmt.Printf("assert.Equal(t, %#v, d.Get(%#v))\n", v, k) - } + }.ApplyAndExpectData(t, map[string]any{ + "cluster_info.0.autotermination_minutes": 15, + "cluster_info.0.cluster_name": "Shared Autoscaling", + "cluster_info.0.node_type_id": "i3.xlarge", + "cluster_info.0.autoscale.0.max_workers": 4, + "cluster_info.0.state": "RUNNING", + }) } func TestClusterDataByName_NotFound(t *testing.T) { qa.ResourceFixture{ - Fixtures: []qa.HTTPFixture{ - { - Method: "GET", - Resource: "/api/2.0/clusters/list", - - Response: ClusterList{ - Clusters: []ClusterInfo{}, - }, - }, + MockWorkspaceClientFunc: func(m *mocks.MockWorkspaceClient) { + e := m.GetMockClustersAPI().EXPECT() + e.ListAll(mock.Anything, compute.ListClustersRequest{}).Return([]compute.ClusterDetails{}, nil) }, Resource: DataSourceCluster(), HCL: `cluster_name = "Unknown"`, @@ -110,34 +87,34 @@ func TestClusterDataByName_NotFound(t *testing.T) { func TestClusterDataByName_DuplicateNames(t *testing.T) { qa.ResourceFixture{ - Fixtures: []qa.HTTPFixture{ - { - Method: "GET", - Resource: "/api/2.0/clusters/list", - - Response: ClusterList{ - Clusters: []ClusterInfo{ - { - ClusterID: "abc", - NumWorkers: 100, - ClusterName: "Shared Autoscaling", - SparkVersion: "7.1-scala12", - NodeTypeID: "i3.xlarge", - AutoterminationMinutes: 15, - State: ClusterStateRunning, - }, - { - ClusterID: "def", - NumWorkers: 100, - ClusterName: "Shared Autoscaling", - SparkVersion: "7.1-scala12", - NodeTypeID: "i3.xlarge", - AutoterminationMinutes: 15, - State: ClusterStateRunning, - }, + MockWorkspaceClientFunc: func(m *mocks.MockWorkspaceClient) { + e := m.GetMockClustersAPI().EXPECT() + e.ListAll(mock.Anything, compute.ListClustersRequest{}).Return([]compute.ClusterDetails{ + { + ClusterId: "abc", + NumWorkers: 100, + ClusterName: "Shared Autoscaling", + SparkVersion: "7.1-scala12", + NodeTypeId: "i3.xlarge", + AutoterminationMinutes: 15, + State: ClusterStateRunning, + Autoscale: &compute.AutoScale{ + MaxWorkers: 4, + }, + }, + { + ClusterId: "def", + NumWorkers: 100, + ClusterName: "Shared Autoscaling", + SparkVersion: "7.1-scala12", + NodeTypeId: "i3.xlarge", + AutoterminationMinutes: 15, + State: ClusterStateRunning, + Autoscale: &compute.AutoScale{ + MaxWorkers: 4, }, }, - }, + }, nil) }, Resource: DataSourceCluster(), HCL: `cluster_name = "Shared Autoscaling"`, diff --git a/clusters/data_clusters.go b/clusters/data_clusters.go index 2628c4968d..da637762b5 100644 --- a/clusters/data_clusters.go +++ b/clusters/data_clusters.go @@ -4,42 +4,32 @@ import ( "context" "strings" + "github.com/databricks/databricks-sdk-go" + "github.com/databricks/databricks-sdk-go/service/compute" "github.com/databricks/terraform-provider-databricks/common" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) func DataSourceClusters() common.Resource { - return common.Resource{ - Read: func(ctx context.Context, d *schema.ResourceData, i *common.DatabricksClient) error { - clusters, err := NewClustersAPI(ctx, i).List() - if err != nil { - return err + return common.WorkspaceData(func(ctx context.Context, data *struct { + Id string `json:"id,omitempty" tf:"computed"` + Ids []string `json:"ids,omitempty" tf:"computed,slice_set"` + ClusterNameContains string `json:"cluster_name_contains"` + }, w *databricks.WorkspaceClient) error { + clusters, err := w.Clusters.ListAll(ctx, compute.ListClustersRequest{}) + if err != nil { + return err + } + ids := make([]string, 0, len(clusters)) + name_contains := strings.ToLower(data.ClusterNameContains) + for _, v := range clusters { + match_name := strings.Contains(strings.ToLower(v.ClusterName), name_contains) + if name_contains != "" && !match_name { + continue } - ids := schema.NewSet(schema.HashString, []any{}) - name_contains := strings.ToLower(d.Get("cluster_name_contains").(string)) - for _, v := range clusters { - match_name := strings.Contains(strings.ToLower(v.ClusterName), name_contains) - if name_contains != "" && !match_name { - continue - } - ids.Add(v.ClusterID) - } - d.Set("ids", ids) - d.SetId("_") - return nil - }, - Schema: map[string]*schema.Schema{ - "ids": { - Computed: true, - Type: schema.TypeSet, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - "cluster_name_contains": { - Optional: true, - Type: schema.TypeString, - }, - }, - } + ids = append(ids, v.ClusterId) + } + data.Ids = ids + data.Id = "_" + return nil + }) } diff --git a/clusters/data_clusters_test.go b/clusters/data_clusters_test.go index ddabc295fe..48d80afdfe 100644 --- a/clusters/data_clusters_test.go +++ b/clusters/data_clusters_test.go @@ -6,69 +6,59 @@ import ( "github.com/databricks/databricks-sdk-go/client" "github.com/databricks/databricks-sdk-go/config" + "github.com/databricks/databricks-sdk-go/experimental/mocks" + "github.com/databricks/databricks-sdk-go/service/compute" "github.com/databricks/terraform-provider-databricks/common" "github.com/databricks/terraform-provider-databricks/qa" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" + "github.com/stretchr/testify/mock" ) func TestClustersDataSource(t *testing.T) { qa.ResourceFixture{ - Fixtures: []qa.HTTPFixture{ - { - Method: "GET", - Resource: "/api/2.0/clusters/list", - - Response: ClusterList{ - Clusters: []ClusterInfo{ - { - ClusterID: "b", - }, - { - ClusterID: "a", - }, - }, + MockWorkspaceClientFunc: func(m *mocks.MockWorkspaceClient) { + e := m.GetMockClustersAPI().EXPECT() + e.ListAll(mock.Anything, compute.ListClustersRequest{}).Return([]compute.ClusterDetails{ + { + ClusterId: "b", + }, + { + ClusterId: "a", }, - }, + }, nil) }, Resource: DataSourceClusters(), NonWritable: true, Read: true, ID: "_", - }.ApplyNoError(t) + }.ApplyAndExpectData(t, map[string]any{ + "ids": []string{"a", "b"}, + }) } func TestClustersDataSourceContainsName(t *testing.T) { - d, err := qa.ResourceFixture{ - Fixtures: []qa.HTTPFixture{ - { - Method: "GET", - Resource: "/api/2.0/clusters/list", - Response: ClusterList{ - Clusters: []ClusterInfo{ - { - ClusterID: "b", - ClusterName: "THIS NAME", - }, - { - ClusterID: "a", - ClusterName: "that name", - }, - }, + qa.ResourceFixture{ + MockWorkspaceClientFunc: func(m *mocks.MockWorkspaceClient) { + e := m.GetMockClustersAPI().EXPECT() + e.ListAll(mock.Anything, compute.ListClustersRequest{}).Return([]compute.ClusterDetails{ + { + ClusterId: "b", + ClusterName: "THIS NAME", + }, + { + ClusterId: "a", + ClusterName: "that name", }, - }, + }, nil) }, Resource: DataSourceClusters(), NonWritable: true, Read: true, ID: "_", HCL: `cluster_name_contains = "this"`, - }.Apply(t) - require.NoError(t, err) - ids := d.Get("ids").(*schema.Set) - assert.True(t, ids.Contains("b")) - assert.Equal(t, 1, ids.Len()) + }.ApplyAndExpectData(t, map[string]any{ + "ids": []string{"b"}, + }) } func TestClustersDataSourceErrorsOut(t *testing.T) { From fc889ccca82a0acd11752f903c3741d079ca8885 Mon Sep 17 00:00:00 2001 From: vuong-nguyen <44292934+nkvuong@users.noreply.github.com> Date: Tue, 2 Jul 2024 16:53:22 +0100 Subject: [PATCH 17/39] Renamed `databricks_catalog_workspace_binding` to `databricks_workspace_binding` (#3703) * rename resource * fix test --- catalog/resource_catalog_workspace_binding.go | 154 +--------- ...resource_catalog_workspace_binding_test.go | 97 ------ catalog/resource_workspace_binding.go | 151 +++++++++ catalog/resource_workspace_binding_test.go | 287 ++++++++++++++++++ docs/resources/catalog_workspace_binding.md | 2 +- docs/resources/workspace_binding.md | 47 +++ internal/acceptance/workspace_binding_test.go | 50 +++ provider/provider.go | 1 + 8 files changed, 540 insertions(+), 249 deletions(-) create mode 100644 catalog/resource_workspace_binding.go create mode 100644 catalog/resource_workspace_binding_test.go create mode 100644 docs/resources/workspace_binding.md create mode 100644 internal/acceptance/workspace_binding_test.go diff --git a/catalog/resource_catalog_workspace_binding.go b/catalog/resource_catalog_workspace_binding.go index 9cbdeba066..b091bf216c 100644 --- a/catalog/resource_catalog_workspace_binding.go +++ b/catalog/resource_catalog_workspace_binding.go @@ -1,159 +1,11 @@ package catalog import ( - "context" - "fmt" - "log" - "strconv" - "strings" - - "github.com/databricks/databricks-sdk-go/apierr" - "github.com/databricks/databricks-sdk-go/service/catalog" "github.com/databricks/terraform-provider-databricks/common" - "github.com/hashicorp/go-cty/cty" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" ) -var getSecurableName = func(d *schema.ResourceData) string { - securableName, ok := d.GetOk("securable_name") - if !ok { - securableName = d.Get("catalog_name") - } - return securableName.(string) -} - func ResourceCatalogWorkspaceBinding() common.Resource { - workspaceBindingSchema := common.StructToSchema(catalog.WorkspaceBinding{}, - func(m map[string]*schema.Schema) map[string]*schema.Schema { - m["catalog_name"] = &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ExactlyOneOf: []string{"catalog_name", "securable_name"}, - Deprecated: "Please use 'securable_name' and 'securable_type instead.", - } - m["securable_name"] = &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ExactlyOneOf: []string{"catalog_name", "securable_name"}, - } - m["securable_type"] = &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Default: "catalog", - } - m["binding_type"].Default = catalog.WorkspaceBindingBindingTypeBindingTypeReadWrite - m["binding_type"].ValidateFunc = validation.StringInSlice([]string{ - string(catalog.WorkspaceBindingBindingTypeBindingTypeReadWrite), - string(catalog.WorkspaceBindingBindingTypeBindingTypeReadOnly), - }, false) - return m - }, - ) - return common.Resource{ - Schema: workspaceBindingSchema, - SchemaVersion: 1, - StateUpgraders: []schema.StateUpgrader{ - { - Version: 0, - Type: bindingSchemaV0(), - Upgrade: bindingMigrateV0, - }, - }, - Create: func(ctx context.Context, d *schema.ResourceData, c *common.DatabricksClient) error { - w, err := c.WorkspaceClient() - if err != nil { - return err - } - var update catalog.WorkspaceBinding - common.DataToStructPointer(d, workspaceBindingSchema, &update) - - securableName := getSecurableName(d) - _, err = w.WorkspaceBindings.UpdateBindings(ctx, catalog.UpdateWorkspaceBindingsParameters{ - Add: []catalog.WorkspaceBinding{update}, - SecurableName: securableName, - SecurableType: d.Get("securable_type").(string), - }) - d.SetId(fmt.Sprintf("%d|%s|%s", update.WorkspaceId, d.Get("securable_type").(string), securableName)) - return err - }, - Read: func(ctx context.Context, d *schema.ResourceData, c *common.DatabricksClient) error { - w, err := c.WorkspaceClient() - if err != nil { - return err - } - // TODO: fix Read operation by splitting `id` into parts... Test with actual import. Remove not necessary code in exporter? - workspaceId := int64(d.Get("workspace_id").(int)) - securable_name := getSecurableName(d) - securable_type := d.Get("securable_type").(string) - if workspaceId == 0 || securable_name == "" || securable_type == "" { - parts := strings.Split(d.Id(), "|") - if len(parts) != 3 { - return fmt.Errorf("incorrect binding id: %s. Correct format: ||", d.Id()) - } - securable_name = parts[2] - securable_type = parts[1] - workspaceId, err = strconv.ParseInt(parts[0], 10, 0) - if err != nil { - return fmt.Errorf("can't parse workspace_id: %w", err) - } - d.Set("securable_name", securable_name) - d.Set("securable_type", securable_type) - d.Set("workspace_id", workspaceId) - } - bindings, err := w.WorkspaceBindings.GetBindings(ctx, catalog.GetBindingsRequest{ - SecurableName: securable_name, - SecurableType: securable_type, - }) - if err != nil { - return err - } - for _, binding := range bindings.Bindings { - if binding.WorkspaceId == workspaceId { - return common.StructToData(binding, workspaceBindingSchema, d) - } - } - return apierr.NotFound("Catalog has no binding to this workspace") - }, - Delete: func(ctx context.Context, d *schema.ResourceData, c *common.DatabricksClient) error { - w, err := c.WorkspaceClient() - if err != nil { - return err - } - var update catalog.WorkspaceBinding - common.DataToStructPointer(d, workspaceBindingSchema, &update) - _, err = w.WorkspaceBindings.UpdateBindings(ctx, catalog.UpdateWorkspaceBindingsParameters{ - Remove: []catalog.WorkspaceBinding{update}, - SecurableName: getSecurableName(d), - SecurableType: d.Get("securable_type").(string), - }) - return err - }, - } -} - -// migrate to v1 state, as catalog_name is moved to securable_name -func bindingMigrateV0(ctx context.Context, rawState map[string]any, meta any) (map[string]any, error) { - newState := map[string]any{} - log.Printf("[INFO] Upgrade workspace binding schema") - newState["securable_name"] = rawState["catalog_name"] - newState["securable_type"] = "catalog" - newState["catalog_name"] = rawState["catalog_name"] - newState["workspace_id"] = rawState["workspace_id"] - newState["binding_type"] = string(catalog.WorkspaceBindingBindingTypeBindingTypeReadWrite) - return newState, nil -} - -func bindingSchemaV0() cty.Type { - return (&schema.Resource{ - Schema: map[string]*schema.Schema{ - "catalog_name": { - Type: schema.TypeString, - Optional: true, - }, - "workspace_id": { - Type: schema.TypeString, - Optional: true, - }, - }}).CoreConfigSchema().ImpliedType() + r := ResourceWorkspaceBinding() + r.DeprecationMessage = "Use `databricks_workspace_binding` instead." + return r } diff --git a/catalog/resource_catalog_workspace_binding_test.go b/catalog/resource_catalog_workspace_binding_test.go index bfaa713e77..dadf0fdfc9 100644 --- a/catalog/resource_catalog_workspace_binding_test.go +++ b/catalog/resource_catalog_workspace_binding_test.go @@ -106,103 +106,6 @@ func TestCatalogWorkspaceBindingsReadOnly_Create(t *testing.T) { }.ApplyNoError(t) } -func TestSecurableWorkspaceBindings_Create(t *testing.T) { - qa.ResourceFixture{ - Fixtures: []qa.HTTPFixture{ - { - Method: "PATCH", - Resource: "/api/2.1/unity-catalog/bindings/catalog/my_catalog", - ExpectedRequest: catalog.UpdateWorkspaceBindingsParameters{ - Add: []catalog.WorkspaceBinding{ - { - BindingType: catalog.WorkspaceBindingBindingTypeBindingTypeReadOnly, - WorkspaceId: int64(1234567890101112), - }, - }, - SecurableName: "my_catalog", - SecurableType: "catalog", - }, - Response: catalog.WorkspaceBindingsResponse{ - Bindings: []catalog.WorkspaceBinding{ - { - BindingType: catalog.WorkspaceBindingBindingTypeBindingTypeReadOnly, - WorkspaceId: int64(1234567890101112), - }, - }, - }, - }, { - Method: "GET", - Resource: "/api/2.1/unity-catalog/bindings/catalog/my_catalog?", - Response: catalog.WorkspaceBindingsResponse{ - Bindings: []catalog.WorkspaceBinding{ - { - BindingType: catalog.WorkspaceBindingBindingTypeBindingTypeReadOnly, - WorkspaceId: int64(1234567890101112), - }, - }, - }, - }, - }, - Resource: ResourceCatalogWorkspaceBinding(), - Create: true, - HCL: ` - securable_name = "my_catalog" - securable_type = "catalog" - workspace_id = "1234567890101112" - binding_type = "BINDING_TYPE_READ_ONLY" - `, - }.ApplyNoError(t) -} - -func TestSecurableWorkspaceBindings_Delete(t *testing.T) { - qa.ResourceFixture{ - Fixtures: []qa.HTTPFixture{ - { - Method: "PATCH", - Resource: "/api/2.1/unity-catalog/bindings/catalog/my_catalog", - ExpectedRequest: catalog.UpdateWorkspaceBindingsParameters{ - Remove: []catalog.WorkspaceBinding{ - { - BindingType: catalog.WorkspaceBindingBindingTypeBindingTypeReadOnly, - WorkspaceId: int64(1234567890101112), - }, - }, - SecurableName: "my_catalog", - SecurableType: "catalog", - }, - Response: catalog.WorkspaceBindingsResponse{ - Bindings: []catalog.WorkspaceBinding{ - { - BindingType: catalog.WorkspaceBindingBindingTypeBindingTypeReadOnly, - WorkspaceId: int64(1234567890101112), - }, - }, - }, - }, { - Method: "GET", - Resource: "/api/2.1/unity-catalog/bindings/catalog/my_catalog?", - Response: catalog.WorkspaceBindingsResponse{ - Bindings: []catalog.WorkspaceBinding{ - { - BindingType: catalog.WorkspaceBindingBindingTypeBindingTypeReadOnly, - WorkspaceId: int64(1234567890101112), - }, - }, - }, - }, - }, - Resource: ResourceCatalogWorkspaceBinding(), - Delete: true, - ID: "1234567890101112|catalog|my_catalog", - HCL: ` - securable_name = "my_catalog" - securable_type = "catalog" - workspace_id = "1234567890101112" - binding_type = "BINDING_TYPE_READ_ONLY" - `, - }.ApplyNoError(t) -} - func TestCatalogWorkspaceBindingsReadImport(t *testing.T) { qa.ResourceFixture{ Fixtures: []qa.HTTPFixture{ diff --git a/catalog/resource_workspace_binding.go b/catalog/resource_workspace_binding.go new file mode 100644 index 0000000000..0f558753b9 --- /dev/null +++ b/catalog/resource_workspace_binding.go @@ -0,0 +1,151 @@ +package catalog + +import ( + "context" + "fmt" + "log" + "strconv" + "strings" + + "github.com/databricks/databricks-sdk-go/apierr" + "github.com/databricks/databricks-sdk-go/service/catalog" + "github.com/databricks/terraform-provider-databricks/common" + "github.com/hashicorp/go-cty/cty" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" +) + +var getSecurableName = func(d *schema.ResourceData) string { + securableName, ok := d.GetOk("securable_name") + if !ok { + securableName = d.Get("catalog_name") + } + return securableName.(string) +} + +func ResourceWorkspaceBinding() common.Resource { + workspaceBindingSchema := common.StructToSchema(catalog.WorkspaceBinding{}, + func(m map[string]*schema.Schema) map[string]*schema.Schema { + m["catalog_name"] = &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ExactlyOneOf: []string{"catalog_name", "securable_name"}, + Deprecated: "Please use 'securable_name' and 'securable_type instead.", + } + m["securable_name"] = &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ExactlyOneOf: []string{"catalog_name", "securable_name"}, + } + m["securable_type"] = &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Default: "catalog", + } + common.CustomizeSchemaPath(m, "securable_type").SetValidateFunc(validation.StringInSlice([]string{"catalog", "external-location", "storage-credential"}, false)) + common.CustomizeSchemaPath(m, "binding_type").SetDefault(catalog.WorkspaceBindingBindingTypeBindingTypeReadWrite).SetValidateFunc(validation.StringInSlice([]string{ + string(catalog.WorkspaceBindingBindingTypeBindingTypeReadWrite), + string(catalog.WorkspaceBindingBindingTypeBindingTypeReadOnly), + }, false)) + return m + }, + ) + return common.Resource{ + Schema: workspaceBindingSchema, + SchemaVersion: 1, + StateUpgraders: []schema.StateUpgrader{ + { + Version: 0, + Type: bindingSchemaV0(), + Upgrade: bindingMigrateV0, + }, + }, + Create: func(ctx context.Context, d *schema.ResourceData, c *common.DatabricksClient) error { + w, err := c.WorkspaceClient() + if err != nil { + return err + } + var update catalog.WorkspaceBinding + common.DataToStructPointer(d, workspaceBindingSchema, &update) + securableName := getSecurableName(d) + securableType := d.Get("securable_type").(string) + _, err = w.WorkspaceBindings.UpdateBindings(ctx, catalog.UpdateWorkspaceBindingsParameters{ + Add: []catalog.WorkspaceBinding{update}, + SecurableName: securableName, + SecurableType: securableType, + }) + d.SetId(fmt.Sprintf("%d|%s|%s", update.WorkspaceId, securableType, securableName)) + return err + }, + Read: func(ctx context.Context, d *schema.ResourceData, c *common.DatabricksClient) error { + w, err := c.WorkspaceClient() + if err != nil { + return err + } + parts := strings.Split(d.Id(), "|") + if len(parts) != 3 { + return fmt.Errorf("incorrect binding id: %s. Correct format: ||", d.Id()) + } + securableName := parts[2] + securableType := parts[1] + workspaceId, err := strconv.ParseInt(parts[0], 10, 0) + if err != nil { + return fmt.Errorf("can't parse workspace_id: %w", err) + } + d.Set("securable_name", securableName) + d.Set("securable_type", securableType) + d.Set("workspace_id", workspaceId) + bindings, err := w.WorkspaceBindings.GetBindingsBySecurableTypeAndSecurableName(ctx, securableType, securableName) + if err != nil { + return err + } + for _, binding := range bindings.Bindings { + if binding.WorkspaceId == workspaceId { + return common.StructToData(binding, workspaceBindingSchema, d) + } + } + return apierr.NotFound(fmt.Sprintf("%s has no binding to this workspace", securableName)) + }, + Delete: func(ctx context.Context, d *schema.ResourceData, c *common.DatabricksClient) error { + w, err := c.WorkspaceClient() + if err != nil { + return err + } + var update catalog.WorkspaceBinding + common.DataToStructPointer(d, workspaceBindingSchema, &update) + _, err = w.WorkspaceBindings.UpdateBindings(ctx, catalog.UpdateWorkspaceBindingsParameters{ + Remove: []catalog.WorkspaceBinding{update}, + SecurableName: getSecurableName(d), + SecurableType: d.Get("securable_type").(string), + }) + return err + }, + } +} + +// migrate to v1 state, as catalog_name is moved to securableName +func bindingMigrateV0(ctx context.Context, rawState map[string]any, meta any) (map[string]any, error) { + newState := map[string]any{} + log.Printf("[INFO] Upgrade workspace binding schema") + newState["securable_name"] = rawState["catalog_name"] + newState["securable_type"] = "catalog" + newState["catalog_name"] = rawState["catalog_name"] + newState["workspace_id"] = rawState["workspace_id"] + newState["binding_type"] = string(catalog.WorkspaceBindingBindingTypeBindingTypeReadWrite) + return newState, nil +} + +func bindingSchemaV0() cty.Type { + return (&schema.Resource{ + Schema: map[string]*schema.Schema{ + "catalog_name": { + Type: schema.TypeString, + Optional: true, + }, + "workspace_id": { + Type: schema.TypeString, + Optional: true, + }, + }}).CoreConfigSchema().ImpliedType() +} diff --git a/catalog/resource_workspace_binding_test.go b/catalog/resource_workspace_binding_test.go new file mode 100644 index 0000000000..4059e11b44 --- /dev/null +++ b/catalog/resource_workspace_binding_test.go @@ -0,0 +1,287 @@ +package catalog + +import ( + "testing" + + "github.com/databricks/databricks-sdk-go/experimental/mocks" + "github.com/databricks/databricks-sdk-go/service/catalog" + "github.com/databricks/terraform-provider-databricks/qa" + "github.com/stretchr/testify/mock" +) + +func TestWorkspaceBindingsCornerCases(t *testing.T) { + qa.ResourceCornerCases(t, ResourceWorkspaceBinding(), + qa.CornerCaseID("1234567890101112|catalog|my_catalog"), + qa.CornerCaseSkipCRUD("create")) +} + +func TestWorkspaceBindings_Create(t *testing.T) { + qa.ResourceFixture{ + Fixtures: []qa.HTTPFixture{ + { + Method: "PATCH", + Resource: "/api/2.1/unity-catalog/bindings/catalog/my_catalog", + ExpectedRequest: catalog.UpdateWorkspaceBindingsParameters{ + Add: []catalog.WorkspaceBinding{ + { + BindingType: catalog.WorkspaceBindingBindingTypeBindingTypeReadWrite, + WorkspaceId: int64(1234567890101112), + }, + }, + SecurableName: "my_catalog", + SecurableType: "catalog", + }, + Response: catalog.WorkspaceBindingsResponse{ + Bindings: []catalog.WorkspaceBinding{ + { + BindingType: catalog.WorkspaceBindingBindingTypeBindingTypeReadWrite, + WorkspaceId: int64(1234567890101112), + }, + }, + }, + }, { + Method: "GET", + Resource: "/api/2.1/unity-catalog/bindings/catalog/my_catalog?", + Response: catalog.WorkspaceBindingsResponse{ + Bindings: []catalog.WorkspaceBinding{ + { + BindingType: catalog.WorkspaceBindingBindingTypeBindingTypeReadWrite, + WorkspaceId: int64(1234567890101112), + }, + }, + }, + }, + }, + Resource: ResourceWorkspaceBinding(), + Create: true, + HCL: ` + catalog_name = "my_catalog" + workspace_id = "1234567890101112" + `, + }.ApplyNoError(t) +} + +func TestWorkspaceBindingsReadOnly_Create(t *testing.T) { + qa.ResourceFixture{ + Fixtures: []qa.HTTPFixture{ + { + Method: "PATCH", + Resource: "/api/2.1/unity-catalog/bindings/catalog/my_catalog", + ExpectedRequest: catalog.UpdateWorkspaceBindingsParameters{ + Add: []catalog.WorkspaceBinding{ + { + BindingType: catalog.WorkspaceBindingBindingTypeBindingTypeReadOnly, + WorkspaceId: int64(1234567890101112), + }, + }, + SecurableName: "my_catalog", + SecurableType: "catalog", + }, + Response: catalog.WorkspaceBindingsResponse{ + Bindings: []catalog.WorkspaceBinding{ + { + BindingType: catalog.WorkspaceBindingBindingTypeBindingTypeReadOnly, + WorkspaceId: int64(1234567890101112), + }, + }, + }, + }, { + Method: "GET", + Resource: "/api/2.1/unity-catalog/bindings/catalog/my_catalog?", + Response: catalog.WorkspaceBindingsResponse{ + Bindings: []catalog.WorkspaceBinding{ + { + BindingType: catalog.WorkspaceBindingBindingTypeBindingTypeReadOnly, + WorkspaceId: int64(1234567890101112), + }, + }, + }, + }, + }, + Resource: ResourceWorkspaceBinding(), + Create: true, + HCL: ` + catalog_name = "my_catalog" + workspace_id = "1234567890101112" + binding_type = "BINDING_TYPE_READ_ONLY" + `, + }.ApplyNoError(t) +} + +func TestSecurableWorkspaceBindings_Create(t *testing.T) { + qa.ResourceFixture{ + Fixtures: []qa.HTTPFixture{ + { + Method: "PATCH", + Resource: "/api/2.1/unity-catalog/bindings/catalog/my_catalog", + ExpectedRequest: catalog.UpdateWorkspaceBindingsParameters{ + Add: []catalog.WorkspaceBinding{ + { + BindingType: catalog.WorkspaceBindingBindingTypeBindingTypeReadOnly, + WorkspaceId: int64(1234567890101112), + }, + }, + SecurableName: "my_catalog", + SecurableType: "catalog", + }, + Response: catalog.WorkspaceBindingsResponse{ + Bindings: []catalog.WorkspaceBinding{ + { + BindingType: catalog.WorkspaceBindingBindingTypeBindingTypeReadOnly, + WorkspaceId: int64(1234567890101112), + }, + }, + }, + }, { + Method: "GET", + Resource: "/api/2.1/unity-catalog/bindings/catalog/my_catalog?", + Response: catalog.WorkspaceBindingsResponse{ + Bindings: []catalog.WorkspaceBinding{ + { + BindingType: catalog.WorkspaceBindingBindingTypeBindingTypeReadOnly, + WorkspaceId: int64(1234567890101112), + }, + }, + }, + }, + }, + Resource: ResourceWorkspaceBinding(), + Create: true, + HCL: ` + securable_name = "my_catalog" + securable_type = "catalog" + workspace_id = "1234567890101112" + binding_type = "BINDING_TYPE_READ_ONLY" + `, + }.ApplyNoError(t) +} + +func TestSecurableWorkspaceBindings_CreateExtLocation(t *testing.T) { + qa.ResourceFixture{ + MockWorkspaceClientFunc: func(m *mocks.MockWorkspaceClient) { + e := m.GetMockWorkspaceBindingsAPI().EXPECT() + e.UpdateBindings(mock.Anything, catalog.UpdateWorkspaceBindingsParameters{ + Add: []catalog.WorkspaceBinding{{ + BindingType: catalog.WorkspaceBindingBindingTypeBindingTypeReadWrite, + WorkspaceId: int64(1234567890101112), + }, + }, + SecurableName: "external_location", + SecurableType: "external-location", + }).Return(&catalog.WorkspaceBindingsResponse{ + Bindings: []catalog.WorkspaceBinding{ + { + BindingType: catalog.WorkspaceBindingBindingTypeBindingTypeReadWrite, + WorkspaceId: int64(1234567890101112), + }, + }, + }, nil) + e.GetBindingsBySecurableTypeAndSecurableName(mock.Anything, "external-location", "external_location").Return(&catalog.WorkspaceBindingsResponse{ + Bindings: []catalog.WorkspaceBinding{ + { + WorkspaceId: int64(1234567890101112), + }, + }, + }, nil) + }, + Resource: ResourceWorkspaceBinding(), + Create: true, + HCL: ` + securable_name = "external_location" + securable_type = "external-location" + workspace_id = "1234567890101112" + `, + }.ApplyNoError(t) +} + +func TestSecurableWorkspaceBindings_Delete(t *testing.T) { + qa.ResourceFixture{ + Fixtures: []qa.HTTPFixture{ + { + Method: "PATCH", + Resource: "/api/2.1/unity-catalog/bindings/catalog/my_catalog", + ExpectedRequest: catalog.UpdateWorkspaceBindingsParameters{ + Remove: []catalog.WorkspaceBinding{ + { + BindingType: catalog.WorkspaceBindingBindingTypeBindingTypeReadOnly, + WorkspaceId: int64(1234567890101112), + }, + }, + SecurableName: "my_catalog", + SecurableType: "catalog", + }, + Response: catalog.WorkspaceBindingsResponse{ + Bindings: []catalog.WorkspaceBinding{ + { + BindingType: catalog.WorkspaceBindingBindingTypeBindingTypeReadOnly, + WorkspaceId: int64(1234567890101112), + }, + }, + }, + }, { + Method: "GET", + Resource: "/api/2.1/unity-catalog/bindings/catalog/my_catalog?", + Response: catalog.WorkspaceBindingsResponse{ + Bindings: []catalog.WorkspaceBinding{ + { + BindingType: catalog.WorkspaceBindingBindingTypeBindingTypeReadOnly, + WorkspaceId: int64(1234567890101112), + }, + }, + }, + }, + }, + Resource: ResourceWorkspaceBinding(), + Delete: true, + ID: "1234567890101112|catalog|my_catalog", + HCL: ` + securable_name = "my_catalog" + securable_type = "catalog" + workspace_id = "1234567890101112" + binding_type = "BINDING_TYPE_READ_ONLY" + `, + }.ApplyNoError(t) +} + +func TestWorkspaceBindingsReadImport(t *testing.T) { + qa.ResourceFixture{ + Fixtures: []qa.HTTPFixture{ + { + Method: "GET", + Resource: "/api/2.1/unity-catalog/bindings/catalog/my_catalog?", + Response: catalog.WorkspaceBindingsResponse{ + Bindings: []catalog.WorkspaceBinding{ + { + BindingType: catalog.WorkspaceBindingBindingTypeBindingTypeReadOnly, + WorkspaceId: int64(1234567890101112), + }, + }, + }, + }, + }, + Resource: ResourceWorkspaceBinding(), + ID: "1234567890101112|catalog|my_catalog", + New: true, + Read: true, + }.ApplyAndExpectData(t, map[string]any{ + "workspace_id": 1234567890101112, + "securable_type": "catalog", + "securable_name": "my_catalog", + }) +} + +func TestWorkspaceBindingsReadErrors(t *testing.T) { + qa.ResourceFixture{ + Resource: ResourceWorkspaceBinding(), + ID: "1234567890101112|catalog", + New: true, + Read: true, + }.ExpectError(t, "incorrect binding id: 1234567890101112|catalog. Correct format: ||") + + qa.ResourceFixture{ + Resource: ResourceWorkspaceBinding(), + ID: "A234567890101112|catalog|my_catalog", + New: true, + Read: true, + }.ExpectError(t, "can't parse workspace_id: strconv.ParseInt: parsing \"A234567890101112\": invalid syntax") +} diff --git a/docs/resources/catalog_workspace_binding.md b/docs/resources/catalog_workspace_binding.md index 32312f9966..5520192fb5 100644 --- a/docs/resources/catalog_workspace_binding.md +++ b/docs/resources/catalog_workspace_binding.md @@ -3,7 +3,7 @@ subcategory: "Unity Catalog" --- # databricks_catalog_workspace_binding Resource --> **Note** This resource could be only used with workspace-level provider! +-> **NOTE**This resource has been deprecated and will be removed soon. Please use the [databricks_workspace_binding resource](./workspace_binding.md) instead. If you use workspaces to isolate user data access, you may want to limit catalog access to specific workspaces in your account, also known as workspace-catalog binding diff --git a/docs/resources/workspace_binding.md b/docs/resources/workspace_binding.md new file mode 100644 index 0000000000..8eaabe9422 --- /dev/null +++ b/docs/resources/workspace_binding.md @@ -0,0 +1,47 @@ +--- +subcategory: "Unity Catalog" +--- +# databricks_workspace_binding Resource + +-> **Note** This resource could be only used with workspace-level provider! + +If you use workspaces to isolate user data access, you may want to limit access to catalog, external locations or storage credentials from specific workspaces in your account, also known as workspace binding + +By default, Databricks assigns the securable to all workspaces attached to the current metastore. By using `databricks_workspace_binding`, the securable will be unassigned from all workspaces and only assigned explicitly using this resource. + +-> **Note** + To use this resource the securable must have its isolation mode set to `ISOLATED` in the. Alternatively, the isolation mode can be set using the UI or API by following [this guide](https://docs.databricks.com/data-governance/unity-catalog/create-catalogs.html#configuration), [this guide](https://docs.databricks.com/en/connect/unity-catalog/external-locations.html#workspace-binding) or [this guide](https://docs.databricks.com/en/connect/unity-catalog/storage-credentials.html#optional-assign-a-storage-credential-to-specific-workspaces). + +-> **Note** + If the securable's isolation mode was set to `ISOLATED` using Terraform then the securable will have been automatically bound to the workspace it was created from. + +## Example Usage + +```hcl +resource "databricks_catalog" "sandbox" { + name = "sandbox" + isolation_mode = "ISOLATED" +} + +resource "databricks_workspace_binding" "sandbox" { + securable_name = databricks_catalog.sandbox.name + workspace_id = databricks_mws_workspaces.other.workspace_id +} +``` + +## Argument Reference + +The following arguments are required: + +* `workspace_id` - ID of the workspace. Change forces creation of a new resource. +* `securable_name` - Name of securable. Change forces creation of a new resource. +* `securable_type` - Type of securable. Default to `catalog`. Change forces creation of a new resource. +* `binding_type` - Binding mode. Default to `BINDING_TYPE_READ_WRITE`. Possible values are `BINDING_TYPE_READ_ONLY`, `BINDING_TYPE_READ_WRITE` + +## Import + +This resource can be imported by using combination of workspace ID, securable type and name: + +```sh +terraform import databricks_catalog_workspace_binding.this "||" +``` diff --git a/internal/acceptance/workspace_binding_test.go b/internal/acceptance/workspace_binding_test.go new file mode 100644 index 0000000000..8759b8a479 --- /dev/null +++ b/internal/acceptance/workspace_binding_test.go @@ -0,0 +1,50 @@ +package acceptance + +import ( + "fmt" + "testing" +) + +func workspaceBindingTemplateWithWorkspaceId(workspaceId string) string { + return fmt.Sprintf(` + # The dummy workspace needs to be assigned to the metastore for this test to pass + resource "databricks_metastore_assignment" "this" { + metastore_id = "{env.TEST_METASTORE_ID}" + workspace_id = {env.DUMMY_WORKSPACE_ID} + } + + resource "databricks_catalog" "dev" { + name = "dev{var.RANDOM}" + isolation_mode = "ISOLATED" + } + + resource "databricks_catalog" "prod" { + name = "prod{var.RANDOM}" + isolation_mode = "ISOLATED" + } + + resource "databricks_workspace_binding" "dev" { + catalog_name = databricks_catalog.dev.name + workspace_id = %s + } + + resource "databricks_workspace_binding" "prod" { + securable_name = databricks_catalog.prod.name + securable_type = "catalog" + workspace_id = %s + binding_type = "BINDING_TYPE_READ_ONLY" + } + `, workspaceId, workspaceId) +} + +func TestUcAccWorkspaceBindingToOtherWorkspace(t *testing.T) { + unityWorkspaceLevel(t, step{ + Template: workspaceBindingTemplateWithWorkspaceId("{env.DUMMY_WORKSPACE_ID}"), + }) +} + +func TestUcAccWorkspaceBindingToSameWorkspace(t *testing.T) { + unityWorkspaceLevel(t, step{ + Template: workspaceBindingTemplateWithWorkspaceId("{env.THIS_WORKSPACE_ID}"), + }) +} diff --git a/provider/provider.go b/provider/provider.go index f3f0bcc921..65bbf8e90c 100644 --- a/provider/provider.go +++ b/provider/provider.go @@ -206,6 +206,7 @@ func DatabricksProvider() *schema.Provider { "databricks_vector_search_endpoint": vectorsearch.ResourceVectorSearchEndpoint().ToResource(), "databricks_vector_search_index": vectorsearch.ResourceVectorSearchIndex().ToResource(), "databricks_volume": catalog.ResourceVolume().ToResource(), + "databricks_workspace_binding": catalog.ResourceWorkspaceBinding().ToResource(), "databricks_workspace_conf": workspace.ResourceWorkspaceConf().ToResource(), "databricks_workspace_file": workspace.ResourceWorkspaceFile().ToResource(), }, From c6f949c8a29c829e4990bc013cf8e320f519b1d0 Mon Sep 17 00:00:00 2001 From: Alex Ott Date: Tue, 2 Jul 2024 18:42:55 +0200 Subject: [PATCH 18/39] Exporter: fix generation of `run_as` blocks in `databricks_job` (#3724) * Exporter: fix generation of `run_as` blocks in `databricks_job` Because the `run_as` was marked as `computed` it was ignored when generating the code. * Ignore `run_as` for the current user --- exporter/context.go | 2 +- exporter/exporter_test.go | 20 +++++++++++++++++++- exporter/importables.go | 11 +++++++++++ exporter/test-data/run-job-child.json | 2 ++ exporter/test-data/run-job-main.json | 2 ++ 5 files changed, 35 insertions(+), 2 deletions(-) diff --git a/exporter/context.go b/exporter/context.go index 28fddf80f2..ee3f2a753b 100644 --- a/exporter/context.go +++ b/exporter/context.go @@ -360,10 +360,10 @@ func (ic *importContext) Run() error { if err != nil { return err } + ic.meUserName = me.UserName for _, g := range me.Groups { if g.Display == "admins" { ic.meAdmin = true - ic.meUserName = me.UserName break } } diff --git a/exporter/exporter_test.go b/exporter/exporter_test.go index 174a766409..30c92591d6 100644 --- a/exporter/exporter_test.go +++ b/exporter/exporter_test.go @@ -2553,7 +2553,19 @@ resource "databricks_pipeline" "def" { func TestImportingRunJobTask(t *testing.T) { qa.HTTPFixturesApply(t, []qa.HTTPFixture{ - meAdminFixture, + { + Method: "GET", + ReuseRequest: true, + Resource: "/api/2.0/preview/scim/v2/Me", + Response: scim.User{ + Groups: []scim.ComplexValue{ + { + Display: "admins", + }, + }, + UserName: "user@domain.com", + }, + }, noCurrentMetastoreAttached, emptyRepos, emptyIpAccessLIst, @@ -2596,5 +2608,11 @@ func TestImportingRunJobTask(t *testing.T) { assert.True(t, strings.Contains(contentStr, `job_id = databricks_job.jartask_932035899730845.id`)) assert.True(t, strings.Contains(contentStr, `resource "databricks_job" "runjobtask_1047501313827425"`)) assert.True(t, strings.Contains(contentStr, `resource "databricks_job" "jartask_932035899730845"`)) + assert.True(t, strings.Contains(contentStr, `run_as { + service_principal_name = "c1b2a35b-87c4-481a-a0fb-0508be621957" + }`)) + assert.False(t, strings.Contains(contentStr, `run_as { + user_name = "user@domain.com" + }`)) }) } diff --git a/exporter/importables.go b/exporter/importables.go index 165081f12c..032ab855fc 100644 --- a/exporter/importables.go +++ b/exporter/importables.go @@ -635,6 +635,17 @@ var resourcesMap map[string]importable = map[string]importable{ if js.NotificationSettings != nil { return reflect.DeepEqual(*js.NotificationSettings, sdk_jobs.JobNotificationSettings{}) } + case "run_as": + if js.RunAs != nil && (js.RunAs.UserName != "" || js.RunAs.ServicePrincipalName != "") { + var user string + if js.RunAs.UserName != "" { + user = js.RunAs.UserName + } else { + user = js.RunAs.ServicePrincipalName + } + return user == ic.meUserName + } + return true } if strings.HasPrefix(pathString, "task.") { parts := strings.Split(pathString, ".") diff --git a/exporter/test-data/run-job-child.json b/exporter/test-data/run-job-child.json index 4cc2c7a6f2..6131aed35a 100644 --- a/exporter/test-data/run-job-child.json +++ b/exporter/test-data/run-job-child.json @@ -1,6 +1,8 @@ { "created_time":1678702840675, "job_id":932035899730845, + "run_as_user_name": "c1b2a35b-87c4-481a-a0fb-0508be621957", + "run_as_owner": false, "settings": { "format":"MULTI_TASK", "max_concurrent_runs":1, diff --git a/exporter/test-data/run-job-main.json b/exporter/test-data/run-job-main.json index 0430f6fd8a..15390aa00d 100644 --- a/exporter/test-data/run-job-main.json +++ b/exporter/test-data/run-job-main.json @@ -1,6 +1,8 @@ { "created_time":1700654567867, "job_id":1047501313827425, + "run_as_user_name": "user@domain.com", + "run_as_owner": false, "settings": { "format":"MULTI_TASK", "max_concurrent_runs":1, From ff837ab7f8b45e1dba1e52dd3a020ba4059ae60c Mon Sep 17 00:00:00 2001 From: Karol Date: Wed, 3 Jul 2024 09:05:27 +0200 Subject: [PATCH 19/39] Adds `databricks_volume` as data source (#3211) * data_volume * data_volume unit and acceptance tests * docs * WorkspaceDataWithCustomParams test * fixed formatting * Removing unnecessary changes to resource.go * refactored data_volume * making change for consitency with GO SDK v0.35.0 * Update catalog/data_volume.go * Update catalog/data_volume.go * data source as nested strucutre * review comments addressed * acceptance test --------- Co-authored-by: Alex Ott Co-authored-by: vuong-nguyen <44292934+nkvuong@users.noreply.github.com> --- catalog/data_volume.go | 25 +++++++++ catalog/data_volume_test.go | 50 ++++++++++++++++++ common/resource.go | 2 +- docs/data-sources/volume.md | 69 +++++++++++++++++++++++++ internal/acceptance/data_volume_test.go | 51 ++++++++++++++++++ provider/provider.go | 1 + 6 files changed, 197 insertions(+), 1 deletion(-) create mode 100644 catalog/data_volume.go create mode 100644 catalog/data_volume_test.go create mode 100644 docs/data-sources/volume.md create mode 100644 internal/acceptance/data_volume_test.go diff --git a/catalog/data_volume.go b/catalog/data_volume.go new file mode 100644 index 0000000000..598160206a --- /dev/null +++ b/catalog/data_volume.go @@ -0,0 +1,25 @@ +package catalog + +import ( + "context" + + "github.com/databricks/databricks-sdk-go" + "github.com/databricks/databricks-sdk-go/service/catalog" + "github.com/databricks/terraform-provider-databricks/common" +) + +func DataSourceVolume() common.Resource { + return common.WorkspaceData(func(ctx context.Context, data *struct { + Id string `json:"id,omitempty" tf:"computed"` + Name string `json:"name"` + Volume *catalog.VolumeInfo `json:"volume_info,omitempty" tf:"computed"` + }, w *databricks.WorkspaceClient) error { + volume, err := w.Volumes.ReadByName(ctx, data.Name) + if err != nil { + return err + } + data.Volume = volume + data.Id = volume.FullName + return nil + }) +} diff --git a/catalog/data_volume_test.go b/catalog/data_volume_test.go new file mode 100644 index 0000000000..a7af490f3e --- /dev/null +++ b/catalog/data_volume_test.go @@ -0,0 +1,50 @@ +package catalog + +import ( + "testing" + + "github.com/databricks/databricks-sdk-go/experimental/mocks" + "github.com/databricks/databricks-sdk-go/service/catalog" + "github.com/databricks/terraform-provider-databricks/qa" + "github.com/stretchr/testify/mock" +) + +func TestDataSourceVolume(t *testing.T) { + qa.ResourceFixture{ + MockWorkspaceClientFunc: func(m *mocks.MockWorkspaceClient) { + e := m.GetMockVolumesAPI().EXPECT() + e.ReadByName(mock.Anything, "a.b.c").Return(&catalog.VolumeInfo{ + FullName: "a.b.c", + CatalogName: "a", + SchemaName: "b", + Name: "c", + Owner: "account users", + VolumeType: catalog.VolumeTypeManaged, + }, nil) + }, + Resource: DataSourceVolume(), + HCL: ` + name="a.b.c"`, + Read: true, + NonWritable: true, + ID: "_", + }.ApplyAndExpectData(t, map[string]any{ + "name": "a.b.c", + "volume_info.0.full_name": "a.b.c", + "volume_info.0.catalog_name": "a", + "volume_info.0.schema_name": "b", + "volume_info.0.name": "c", + "volume_info.0.owner": "account users", + "volume_info.0.volume_type": "MANAGED", + }) +} + +func TestDataSourceVolume_Error(t *testing.T) { + qa.ResourceFixture{ + Fixtures: qa.HTTPFailures, + Resource: DataSourceVolume(), + Read: true, + NonWritable: true, + ID: "_", + }.ExpectError(t, "i'm a teapot") +} diff --git a/common/resource.go b/common/resource.go index 4ae0bef2e0..5f42a4ac48 100644 --- a/common/resource.go +++ b/common/resource.go @@ -365,7 +365,7 @@ func genericDatabricksData[T, P, C any]( hasOther bool) Resource { var dummy T var other P - otherFields := StructToSchema(other, NoCustomize) + otherFields := StructToSchema(other, nil) s := StructToSchema(dummy, func(m map[string]*schema.Schema) map[string]*schema.Schema { // For WorkspaceData and AccountData, a single data type is used to represent all of the fields of // the resource, so its configuration is correct. For the *WithParams methods, the SdkType parameter diff --git a/docs/data-sources/volume.md b/docs/data-sources/volume.md new file mode 100644 index 0000000000..9a32875a1f --- /dev/null +++ b/docs/data-sources/volume.md @@ -0,0 +1,69 @@ +--- +subcategory: "Unity Catalog" +--- +# databricks_volume Data Source + +Retrieves details about [databricks_volume](../resources/volume.md) that was created by Terraform or manually. +A volume can be identified by its three-level (fully qualified) name (in the form of: `catalog_name`.`schema_name`.`volume_name`) as input. This can be retrieved programmatically using [databricks_volumes](../data-sources/volumes.md) data source. + +## Example Usage + +* Retrieve details of all volumes in in a _things_ [databricks_schema](../resources/schema.md) of a _sandbox_ [databricks_catalog](../resources/catalog.md): + +```hcl +data "databricks_volumes" "all" { + catalog_name = "sandbox" + schema_name = "things" +} + +data "databricks_volume" { + for_each = data.datatbricks_volumes.all.ids + name = each.value +} +``` + +* Search for a specific volume by its fully qualified name + +```hcl +data "databricks_volume" "this" { + name = "catalog.schema.volume" +} +``` + +## Argument Reference + +* `name` - (Required) a fully qualified name of [databricks_volume](../resources/volume.md): *`catalog`.`schema`.`volume`* + + +## Attribute Reference + +In addition to all arguments above, the following attributes are exported: + +* `id` - ID of this Unity Catalog Volume in form of `..`. +* `volume_info` - TableInfo object for a Unity Catalog table. This contains the following attributes: + * `name` - Name of table, relative to parent schema. + * `access_point` - the AWS access point to use when accessing s3 bucket for this volume's external location + * `browse_only` - indicates whether the principal is limited to retrieving metadata for the volume through the BROWSE privilege when include_browse is enabled in the request. + * `catalog_name` - the name of the catalog where the schema and the volume are + * `comment` - the comment attached to the volume + * `created_at` - the Unix timestamp at the volume's creation + * `created_by` - the identifier of the user who created the volume + * `encryption_details` - encryption options that apply to clients connecting to cloud storage + * `full_name` - the three-level (fully qualified) name of the volume + * `metastore_id` - the unique identifier of the metastore + * `name` - the name of the volume + * `owner` - the identifier of the user who owns the volume + * `schema_name` - the name of the schema where the volume is + * `storage_location` - the storage location on the cloud + * `updated_at` - the timestamp of the last time changes were made to the volume + * `updated_by` - the identifier of the user who updated the volume last time + * `volume_id` - the unique identifier of the volume + * `volume_type` - whether the volume is `MANAGED` or `EXTERNAL` + +## Related Resources + +The following resources are used in the same context: + +* [databricks_volume](../resources/volume.md) to manage volumes within Unity Catalog. +* [databricks_schema](../resources/schema.md) to manage schemas within Unity Catalog. +* [databricks_catalog](../resources/catalog.md) to manage catalogs within Unity Catalog. diff --git a/internal/acceptance/data_volume_test.go b/internal/acceptance/data_volume_test.go new file mode 100644 index 0000000000..f9d1ae7033 --- /dev/null +++ b/internal/acceptance/data_volume_test.go @@ -0,0 +1,51 @@ +package acceptance + +import ( + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + "github.com/stretchr/testify/require" +) + +func checkDataSourceVolume(t *testing.T) func(s *terraform.State) error { + return func(s *terraform.State) error { + _, ok := s.Modules[0].Resources["data.databricks_volume.this"] + require.True(t, ok, "data.databricks_volume.this has to be there") + return nil + } +} +func TestUcAccDataSourceVolume(t *testing.T) { + unityWorkspaceLevel(t, step{ + Template: ` + resource "databricks_catalog" "sandbox" { + name = "sandbox{var.RANDOM}" + comment = "this catalog is managed by terraform" + properties = { + purpose = "testing" + } + } + + resource "databricks_schema" "things" { + catalog_name = databricks_catalog.sandbox.id + name = "things{var.RANDOM}" + comment = "this database is managed by terraform" + properties = { + kind = "various" + } + } + + resource "databricks_volume" "this" { + name = "volume_data_source_test" + catalog_name = databricks_catalog.sandbox.name + schema_name = databricks_schema.things.name + volume_type = "MANAGED" + } + + data "databricks_volume" "this" { + name = databricks_volume.this.id + depends_on = [ databricks_volume.this ] + } + `, + Check: checkDataSourceVolume(t), + }) +} diff --git a/provider/provider.go b/provider/provider.go index 65bbf8e90c..a73b4e28a8 100644 --- a/provider/provider.go +++ b/provider/provider.go @@ -113,6 +113,7 @@ func DatabricksProvider() *schema.Provider { "databricks_table": catalog.DataSourceTable().ToResource(), "databricks_tables": catalog.DataSourceTables().ToResource(), "databricks_views": catalog.DataSourceViews().ToResource(), + "databricks_volume": catalog.DataSourceVolume().ToResource(), "databricks_volumes": catalog.DataSourceVolumes().ToResource(), "databricks_user": scim.DataSourceUser().ToResource(), "databricks_zones": clusters.DataSourceClusterZones().ToResource(), From 0d943ead9da02f88879f4c18aeafc374eb6e76e9 Mon Sep 17 00:00:00 2001 From: touchida <56789230+touchida@users.noreply.github.com> Date: Wed, 3 Jul 2024 16:06:27 +0900 Subject: [PATCH 20/39] Make the schedule.pause_status field read-only (#3692) --- catalog/resource_quality_monitor.go | 1 + docs/resources/quality_monitor.md | 1 - 2 files changed, 1 insertion(+), 1 deletion(-) diff --git a/catalog/resource_quality_monitor.go b/catalog/resource_quality_monitor.go index 9e9169fd4e..1d2beffc4e 100644 --- a/catalog/resource_quality_monitor.go +++ b/catalog/resource_quality_monitor.go @@ -53,6 +53,7 @@ func ResourceQualityMonitor() common.Resource { common.CustomizeSchemaPath(m, "profile_metrics_table_name").SetReadOnly() common.CustomizeSchemaPath(m, "status").SetReadOnly() common.CustomizeSchemaPath(m, "dashboard_id").SetReadOnly() + common.CustomizeSchemaPath(m, "schedule", "pause_status").SetReadOnly() return m }, ) diff --git a/docs/resources/quality_monitor.md b/docs/resources/quality_monitor.md index a3292f65d2..b01208c80e 100644 --- a/docs/resources/quality_monitor.md +++ b/docs/resources/quality_monitor.md @@ -112,7 +112,6 @@ table. * `schedule` - The schedule for automatically updating and refreshing metric tables. This block consists of following fields: * `quartz_cron_expression` - string expression that determines when to run the monitor. See [Quartz documentation](https://www.quartz-scheduler.org/documentation/quartz-2.3.0/tutorials/crontrigger.html) for examples. * `timezone_id` - string with timezone id (e.g., `PST`) in which to evaluate the Quartz expression. - * `pause_status` - optional string field that indicates whether a schedule is paused (`PAUSED`) or not (`UNPAUSED`). * `skip_builtin_dashboard` - Whether to skip creating a default dashboard summarizing data quality metrics. * `slicing_exprs` - List of column expressions to slice data with for targeted analysis. The data is grouped by each expression independently, resulting in a separate slice for each predicate and its complements. For high-cardinality columns, only the top 100 unique values by frequency will generate slices. * `warehouse_id` - Optional argument to specify the warehouse for dashboard creation. If not specified, the first running warehouse will be used. From 75236a645b7d86819e43281e6c2623bbc5527528 Mon Sep 17 00:00:00 2001 From: vuong-nguyen <44292934+nkvuong@users.noreply.github.com> Date: Wed, 3 Jul 2024 08:51:05 +0100 Subject: [PATCH 21/39] Added support for binding storage credentials and external locations to specific workspaces (#3678) * add isolation mode * rename * doc * fix doc * add tests * add acceptance tests * add computed * typo * add tests * use correct isolation_mode * fix test --- catalog/bindings/bindings.go | 2 +- catalog/resource_external_location_test.go | 8 +-- catalog/resource_storage_credential_test.go | 10 +-- docs/resources/workspace_binding.md | 4 +- .../catalog_workspace_binding_test.go | 61 ------------------- internal/acceptance/external_location_test.go | 4 +- internal/acceptance/workspace_binding_test.go | 39 +++++++++--- 7 files changed, 44 insertions(+), 84 deletions(-) delete mode 100644 internal/acceptance/catalog_workspace_binding_test.go diff --git a/catalog/bindings/bindings.go b/catalog/bindings/bindings.go index 8c7743aaf1..6a2633ad8a 100644 --- a/catalog/bindings/bindings.go +++ b/catalog/bindings/bindings.go @@ -9,7 +9,7 @@ import ( ) func AddCurrentWorkspaceBindings(ctx context.Context, d *schema.ResourceData, w *databricks.WorkspaceClient, securableName string, securableType string) error { - if d.Get("isolation_mode") != "ISOLATED" { + if d.Get("isolation_mode") != "ISOLATED" && d.Get("isolation_mode") != "ISOLATION_MODE_ISOLATED" { return nil } // Bind the current workspace if the catalog is isolated, otherwise the read will fail diff --git a/catalog/resource_external_location_test.go b/catalog/resource_external_location_test.go index a460425101..314d2731d0 100644 --- a/catalog/resource_external_location_test.go +++ b/catalog/resource_external_location_test.go @@ -76,13 +76,13 @@ func TestCreateIsolatedExternalLocation(t *testing.T) { Url: "s3://foo/bar", CredentialName: "bcd", Comment: "def", - IsolationMode: "ISOLATED", + IsolationMode: "ISOLATION_MODE_ISOLATED", }).Return(&catalog.ExternalLocationInfo{ Name: "abc", Url: "s3://foo/bar", CredentialName: "bcd", Comment: "def", - IsolationMode: "ISOLATED", + IsolationMode: "ISOLATION_MODE_ISOLATED", MetastoreId: "e", Owner: "f", }, nil) @@ -112,7 +112,7 @@ func TestCreateIsolatedExternalLocation(t *testing.T) { Url: "s3://foo/bar", CredentialName: "bcd", Comment: "def", - IsolationMode: "ISOLATED", + IsolationMode: "ISOLATION_MODE_ISOLATED", MetastoreId: "e", Owner: "f", }, nil) @@ -124,7 +124,7 @@ func TestCreateIsolatedExternalLocation(t *testing.T) { url = "s3://foo/bar" credential_name = "bcd" comment = "def" - isolation_mode = "ISOLATED" + isolation_mode = "ISOLATION_MODE_ISOLATED" `, }.ApplyNoError(t) } diff --git a/catalog/resource_storage_credential_test.go b/catalog/resource_storage_credential_test.go index c9d2e07af6..cf9bf0118d 100644 --- a/catalog/resource_storage_credential_test.go +++ b/catalog/resource_storage_credential_test.go @@ -88,7 +88,7 @@ func TestCreateIsolatedStorageCredential(t *testing.T) { RoleArn: "def", }, Comment: "c", - IsolationMode: "ISOLATED", + IsolationMode: "ISOLATION_MODE_ISOLATED", }).Return(&catalog.StorageCredentialInfo{ Name: "a", AwsIamRole: &catalog.AwsIamRoleResponse{ @@ -98,7 +98,7 @@ func TestCreateIsolatedStorageCredential(t *testing.T) { MetastoreId: "d", Id: "1234-5678", Owner: "f", - IsolationMode: "ISOLATED", + IsolationMode: "ISOLATION_MODE_ISOLATED", }, nil) w.GetMockMetastoresAPI().EXPECT().Current(mock.Anything).Return(&catalog.MetastoreAssignment{ MetastoreId: "e", @@ -130,7 +130,7 @@ func TestCreateIsolatedStorageCredential(t *testing.T) { MetastoreId: "d", Id: "1234-5678", Owner: "f", - IsolationMode: "ISOLATED", + IsolationMode: "ISOLATION_MODE_ISOLATED", }, nil) }, Resource: ResourceStorageCredential(), @@ -141,14 +141,14 @@ func TestCreateIsolatedStorageCredential(t *testing.T) { role_arn = "def" } comment = "c" - isolation_mode = "ISOLATED" + isolation_mode = "ISOLATION_MODE_ISOLATED" `, }.ApplyAndExpectData(t, map[string]any{ "aws_iam_role.0.external_id": "123", "aws_iam_role.0.role_arn": "def", "name": "a", "storage_credential_id": "1234-5678", - "isolation_mode": "ISOLATED", + "isolation_mode": "ISOLATION_MODE_ISOLATED", }) } diff --git a/docs/resources/workspace_binding.md b/docs/resources/workspace_binding.md index 8eaabe9422..198ce8fe21 100644 --- a/docs/resources/workspace_binding.md +++ b/docs/resources/workspace_binding.md @@ -35,8 +35,8 @@ The following arguments are required: * `workspace_id` - ID of the workspace. Change forces creation of a new resource. * `securable_name` - Name of securable. Change forces creation of a new resource. -* `securable_type` - Type of securable. Default to `catalog`. Change forces creation of a new resource. -* `binding_type` - Binding mode. Default to `BINDING_TYPE_READ_WRITE`. Possible values are `BINDING_TYPE_READ_ONLY`, `BINDING_TYPE_READ_WRITE` +* `securable_type` - Type of securable. Can be `catalog`, `external-locations` or `storage-credentials`. Default to `catalog`. Change forces creation of a new resource. +* `binding_type` - (Optional) Binding mode. Default to `BINDING_TYPE_READ_WRITE`. For `catalog`, possible values are `BINDING_TYPE_READ_ONLY`, `BINDING_TYPE_READ_WRITE`. For `external-location` or `storage-credential`, no binding mode needs to be specified ## Import diff --git a/internal/acceptance/catalog_workspace_binding_test.go b/internal/acceptance/catalog_workspace_binding_test.go deleted file mode 100644 index 5822195d6d..0000000000 --- a/internal/acceptance/catalog_workspace_binding_test.go +++ /dev/null @@ -1,61 +0,0 @@ -package acceptance - -import ( - "testing" -) - -func TestUcAccCatalogWorkspaceBindingToOtherWorkspace(t *testing.T) { - unityWorkspaceLevel(t, step{ - Template: ` - # The dummy workspace needs to be assigned to the metastore for this test to pass - resource "databricks_metastore_assignment" "this" { - metastore_id = "{env.TEST_METASTORE_ID}" - workspace_id = {env.DUMMY_WORKSPACE_ID} - } - - resource "databricks_catalog" "dev" { - name = "dev{var.RANDOM}" - isolation_mode = "ISOLATED" - } - - resource "databricks_catalog_workspace_binding" "test" { - catalog_name = databricks_catalog.dev.name - workspace_id = {env.DUMMY_WORKSPACE_ID} # dummy workspace, not the authenticated workspace in this test - } - `, - }) -} - -func TestUcAccCatalogWorkspaceBindingToSameWorkspace(t *testing.T) { - unityWorkspaceLevel(t, step{ - Template: ` - resource "databricks_catalog" "dev" { - name = "dev{var.RANDOM}" - isolation_mode = "ISOLATED" - } - - resource "databricks_catalog_workspace_binding" "test" { - catalog_name = databricks_catalog.dev.name - workspace_id = {env.THIS_WORKSPACE_ID} - } - `, - }) -} - -func TestUcAccSecurableWorkspaceBindingToSameWorkspaceReadOnly(t *testing.T) { - unityWorkspaceLevel(t, step{ - Template: ` - resource "databricks_catalog" "dev" { - name = "dev{var.RANDOM}" - isolation_mode = "ISOLATED" - } - - resource "databricks_catalog_workspace_binding" "test" { - securable_name = databricks_catalog.dev.name - securable_type = "catalog" - workspace_id = {env.THIS_WORKSPACE_ID} - binding_type = "BINDING_TYPE_READ_ONLY" - } - `, - }) -} diff --git a/internal/acceptance/external_location_test.go b/internal/acceptance/external_location_test.go index d0454746d3..fd8f497750 100644 --- a/internal/acceptance/external_location_test.go +++ b/internal/acceptance/external_location_test.go @@ -21,7 +21,7 @@ func externalLocationTemplateWithOwner(comment string, owner string) string { name = "external-{var.STICKY_RANDOM}" url = "s3://{env.TEST_BUCKET}/some{var.STICKY_RANDOM}" credential_name = databricks_storage_credential.external.id - isolation_mode = "ISOLATED" + isolation_mode = "ISOLATION_MODE_ISOLATED" comment = "%s" owner = "%s" } @@ -37,7 +37,7 @@ func storageCredentialTemplateWithOwner(comment, owner string) string { } comment = "%s" owner = "%s" - isolation_mode = "ISOLATED" + isolation_mode = "ISOLATION_MODE_ISOLATED" force_update = true } `, comment, owner) diff --git a/internal/acceptance/workspace_binding_test.go b/internal/acceptance/workspace_binding_test.go index 8759b8a479..24636da693 100644 --- a/internal/acceptance/workspace_binding_test.go +++ b/internal/acceptance/workspace_binding_test.go @@ -21,7 +21,22 @@ func workspaceBindingTemplateWithWorkspaceId(workspaceId string) string { resource "databricks_catalog" "prod" { name = "prod{var.RANDOM}" isolation_mode = "ISOLATED" - } + } + + resource "databricks_storage_credential" "external" { + name = "cred-{var.RANDOM}" + aws_iam_role { + role_arn = "{env.TEST_METASTORE_DATA_ACCESS_ARN}" + } + isolation_mode = "ISOLATION_MODE_ISOLATED" + } + + resource "databricks_external_location" "some" { + name = "external-{var.RANDOM}" + url = "s3://{env.TEST_BUCKET}/some{var.RANDOM}" + credential_name = databricks_storage_credential.external.id + isolation_mode = "ISOLATION_MODE_ISOLATED" + } resource "databricks_workspace_binding" "dev" { catalog_name = databricks_catalog.dev.name @@ -33,8 +48,20 @@ func workspaceBindingTemplateWithWorkspaceId(workspaceId string) string { securable_type = "catalog" workspace_id = %s binding_type = "BINDING_TYPE_READ_ONLY" - } - `, workspaceId, workspaceId) + } + + resource "databricks_workspace_binding" "ext" { + securable_name = databricks_external_location.some.id + securable_type = "external-location" + workspace_id = %s + } + + resource "databricks_workspace_binding" "cred" { + securable_name = databricks_storage_credential.external.id + securable_type = "storage-credential" + workspace_id = %s + } + `, workspaceId, workspaceId, workspaceId, workspaceId) } func TestUcAccWorkspaceBindingToOtherWorkspace(t *testing.T) { @@ -42,9 +69,3 @@ func TestUcAccWorkspaceBindingToOtherWorkspace(t *testing.T) { Template: workspaceBindingTemplateWithWorkspaceId("{env.DUMMY_WORKSPACE_ID}"), }) } - -func TestUcAccWorkspaceBindingToSameWorkspace(t *testing.T) { - unityWorkspaceLevel(t, step{ - Template: workspaceBindingTemplateWithWorkspaceId("{env.THIS_WORKSPACE_ID}"), - }) -} From 411f85cfb59059f9bacbda75edd71a6006970268 Mon Sep 17 00:00:00 2001 From: Alex Ott Date: Thu, 4 Jul 2024 10:33:46 +0200 Subject: [PATCH 22/39] Exporter: use Go SDK structs for `databricks_job` resource (#3727) --- exporter/importables.go | 34 ++++++++++++++-------------------- exporter/util.go | 2 +- 2 files changed, 15 insertions(+), 21 deletions(-) diff --git a/exporter/importables.go b/exporter/importables.go index 032ab855fc..5eed0d0871 100644 --- a/exporter/importables.go +++ b/exporter/importables.go @@ -349,7 +349,7 @@ var resourcesMap map[string]importable = map[string]importable{ return nil }, Import: func(ic *importContext, r *resource) error { - var c compute.ClusterDetails + var c compute.ClusterSpec s := ic.Resources["databricks_cluster"].Schema common.DataToStructPointer(r.Data, s, &c) ic.importCluster(&c) @@ -457,17 +457,11 @@ var resourcesMap map[string]importable = map[string]importable{ MatchType: MatchPrefix, SearchValueTransformFunc: appendEndingSlashToDirName}, }, Import: func(ic *importContext, r *resource) error { - var job jobs.JobSettings + var job jobs.JobSettingsResource s := ic.Resources["databricks_job"].Schema common.DataToStructPointer(r.Data, s, &job) - ic.importClusterLegacy(job.NewCluster) - ic.Emit(&resource{ - Resource: "databricks_cluster", - ID: job.ExistingClusterID, - }) ic.emitPermissionsIfNotIgnored(r, fmt.Sprintf("/jobs/%s", r.ID), "job_"+ic.Importables["databricks_job"].Name(ic, r.Data)) - // Support for multitask jobs for _, task := range job.Tasks { if task.NotebookTask != nil { if task.NotebookTask.Source != "GIT" { @@ -484,7 +478,7 @@ var resourcesMap map[string]importable = map[string]importable{ if task.PipelineTask != nil { ic.Emit(&resource{ Resource: "databricks_pipeline", - ID: task.PipelineTask.PipelineID, + ID: task.PipelineTask.PipelineId, }) } if task.SparkPythonTask != nil { @@ -514,25 +508,25 @@ var resourcesMap map[string]importable = map[string]importable{ if task.SqlTask.Query != nil { ic.Emit(&resource{ Resource: "databricks_sql_query", - ID: task.SqlTask.Query.QueryID, + ID: task.SqlTask.Query.QueryId, }) } if task.SqlTask.Dashboard != nil { ic.Emit(&resource{ Resource: "databricks_sql_dashboard", - ID: task.SqlTask.Dashboard.DashboardID, + ID: task.SqlTask.Dashboard.DashboardId, }) } if task.SqlTask.Alert != nil { ic.Emit(&resource{ Resource: "databricks_sql_alert", - ID: task.SqlTask.Alert.AlertID, + ID: task.SqlTask.Alert.AlertId, }) } - if task.SqlTask.WarehouseID != "" { + if task.SqlTask.WarehouseId != "" { ic.Emit(&resource{ Resource: "databricks_sql_endpoint", - ID: task.SqlTask.WarehouseID, + ID: task.SqlTask.WarehouseId, }) } if task.SqlTask.File != nil && task.SqlTask.File.Source == "WORKSPACE" { @@ -567,22 +561,22 @@ var resourcesMap map[string]importable = map[string]importable{ } } } - if task.RunJobTask != nil && task.RunJobTask.JobID != 0 { + if task.RunJobTask != nil && task.RunJobTask.JobId != 0 { ic.Emit(&resource{ Resource: "databricks_job", - ID: strconv.FormatInt(task.RunJobTask.JobID, 10), + ID: strconv.FormatInt(task.RunJobTask.JobId, 10), }) ic.emitFilesFromMap(task.RunJobTask.JobParameters) } - ic.importClusterLegacy(task.NewCluster) + ic.importCluster(task.NewCluster) ic.Emit(&resource{ Resource: "databricks_cluster", - ID: task.ExistingClusterID, + ID: task.ExistingClusterId, }) ic.emitLibraries(task.Libraries) } for _, jc := range job.JobClusters { - ic.importClusterLegacy(jc.NewCluster) + ic.importCluster(&jc.NewCluster) } if job.RunAs != nil { if job.RunAs.UserName != "" { @@ -620,7 +614,7 @@ var resourcesMap map[string]importable = map[string]importable{ case "url", "format": return true } - var js jobs.JobSettings + var js jobs.JobSettingsResource common.DataToStructPointer(d, ic.Resources["databricks_job"].Schema, &js) switch pathString { case "email_notifications": diff --git a/exporter/util.go b/exporter/util.go index 86ce146e74..99f60021ee 100644 --- a/exporter/util.go +++ b/exporter/util.go @@ -120,7 +120,7 @@ func (ic *importContext) importClusterLegacy(c *clusters.Cluster) { ic.emitUserOrServicePrincipal(c.SingleUserName) } -func (ic *importContext) importCluster(c *compute.ClusterDetails) { +func (ic *importContext) importCluster(c *compute.ClusterSpec) { if c == nil { return } From e8640654e963d205416c3d9d109f823a8800afee Mon Sep 17 00:00:00 2001 From: Miles Yucht Date: Thu, 4 Jul 2024 10:52:40 +0200 Subject: [PATCH 23/39] Change TF registry ownership (#3736) --- .terraform-registry | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 .terraform-registry diff --git a/.terraform-registry b/.terraform-registry new file mode 100644 index 0000000000..4032bcc614 --- /dev/null +++ b/.terraform-registry @@ -0,0 +1,3 @@ +Request: Change owner to @mgyucht +Registry link: https://registry.terraform.io/namespaces/databricks, https://registry.terraform.io/providers/databricks/databricks/latest/docs +Request by: miles@databricks.com \ No newline at end of file From 4fdaaceaa842d0d25994f64b440f7443c20fd21f Mon Sep 17 00:00:00 2001 From: Tanmay Rustagi Date: Thu, 4 Jul 2024 16:56:28 +0200 Subject: [PATCH 24/39] update --- go.mod | 56 +++++---- go.sum | 116 +++++++++--------- pluginframework/resource_lakehouse_monitor.go | 14 +-- provider/provider_test.go | 5 +- 4 files changed, 98 insertions(+), 93 deletions(-) diff --git a/go.mod b/go.mod index f30ede2935..814447959e 100644 --- a/go.mod +++ b/go.mod @@ -12,24 +12,26 @@ require ( github.com/hashicorp/terraform-plugin-sdk/v2 v2.34.0 github.com/stretchr/testify v1.9.0 github.com/zclconf/go-cty v1.14.4 - golang.org/x/exp v0.0.0-20240222234643-814bf88cf225 + golang.org/x/exp v0.0.0-20240613232115-7f521ea00fb8 golang.org/x/mod v0.18.0 ) -require github.com/hashicorp/terraform-plugin-mux v0.15.0 +require github.com/hashicorp/terraform-plugin-mux v0.16.0 + +require golang.org/x/sync v0.7.0 // indirect require ( - cloud.google.com/go/auth v0.4.2 // indirect + cloud.google.com/go/auth v0.6.1 // indirect cloud.google.com/go/auth/oauth2adapt v0.2.2 // indirect - cloud.google.com/go/compute/metadata v0.3.0 // indirect - github.com/ProtonMail/go-crypto v1.1.0-alpha.2-proton // indirect + cloud.google.com/go/compute/metadata v0.4.0 // indirect + github.com/ProtonMail/go-crypto v1.1.0-alpha.3-proton // indirect github.com/agext/levenshtein v1.2.3 // indirect github.com/apparentlymart/go-textseg/v15 v15.0.0 // indirect - github.com/cloudflare/circl v1.3.7 // indirect + github.com/cloudflare/circl v1.3.9 // indirect github.com/davecgh/go-spew v1.1.1 // indirect - github.com/fatih/color v1.16.0 // indirect + github.com/fatih/color v1.17.0 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect - github.com/go-logr/logr v1.4.1 // indirect + github.com/go-logr/logr v1.4.2 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.4 // indirect @@ -41,16 +43,16 @@ require ( github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-checkpoint v0.5.0 // indirect github.com/hashicorp/go-cleanhttp v0.5.2 // indirect - github.com/hashicorp/go-hclog v1.6.2 // indirect + github.com/hashicorp/go-hclog v1.6.3 // indirect github.com/hashicorp/go-multierror v1.1.1 // indirect - github.com/hashicorp/go-plugin v1.6.0 // indirect + github.com/hashicorp/go-plugin v1.6.1 // indirect github.com/hashicorp/go-uuid v1.0.3 // indirect - github.com/hashicorp/go-version v1.6.0 // indirect - github.com/hashicorp/hc-install v0.6.4 // indirect + github.com/hashicorp/go-version v1.7.0 // indirect + github.com/hashicorp/hc-install v0.7.0 // indirect github.com/hashicorp/logutils v1.0.0 // indirect github.com/hashicorp/terraform-exec v0.21.0 // indirect github.com/hashicorp/terraform-json v0.22.1 // indirect - github.com/hashicorp/terraform-plugin-framework v1.7.0 + github.com/hashicorp/terraform-plugin-framework v1.9.0 github.com/hashicorp/terraform-plugin-go v0.23.0 github.com/hashicorp/terraform-registry-address v0.2.3 // indirect github.com/hashicorp/terraform-svchost v0.1.1 // indirect @@ -69,22 +71,22 @@ require ( github.com/vmihailenco/msgpack/v5 v5.4.1 // indirect github.com/vmihailenco/tagparser/v2 v2.0.0 // indirect go.opencensus.io v0.24.0 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 // indirect - go.opentelemetry.io/otel v1.24.0 // indirect - go.opentelemetry.io/otel/metric v1.24.0 // indirect - go.opentelemetry.io/otel/trace v1.24.0 // indirect - golang.org/x/crypto v0.23.0 // indirect - golang.org/x/net v0.25.0 // indirect - golang.org/x/oauth2 v0.20.0 // indirect - golang.org/x/sys v0.20.0 // indirect - golang.org/x/text v0.15.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 // indirect + go.opentelemetry.io/otel v1.28.0 // indirect + go.opentelemetry.io/otel/metric v1.28.0 // indirect + go.opentelemetry.io/otel/trace v1.28.0 // indirect + golang.org/x/crypto v0.24.0 // indirect + golang.org/x/net v0.26.0 // indirect + golang.org/x/oauth2 v0.21.0 // indirect + golang.org/x/sys v0.21.0 // indirect + golang.org/x/text v0.16.0 // indirect golang.org/x/time v0.5.0 // indirect - golang.org/x/tools v0.19.0 // indirect - google.golang.org/api v0.182.0 // indirect + golang.org/x/tools v0.22.0 // indirect + google.golang.org/api v0.187.0 // indirect google.golang.org/appengine v1.6.8 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240521202816-d264139d666e // indirect - google.golang.org/grpc v1.64.0 // indirect - google.golang.org/protobuf v1.34.1 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094 // indirect + google.golang.org/grpc v1.65.0 // indirect + google.golang.org/protobuf v1.34.2 // indirect gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/go.sum b/go.sum index 2c3f82ce18..64de8d2103 100644 --- a/go.sum +++ b/go.sum @@ -1,17 +1,17 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go/auth v0.4.2 h1:sb0eyLkhRtpq5jA+a8KWw0W70YcdVca7KJ8TM0AFYDg= -cloud.google.com/go/auth v0.4.2/go.mod h1:Kqvlz1cf1sNA0D+sYJnkPQOP+JMHkuHeIgVmCRtZOLc= +cloud.google.com/go/auth v0.6.1 h1:T0Zw1XM5c1GlpN2HYr2s+m3vr1p2wy+8VN+Z1FKxW38= +cloud.google.com/go/auth v0.6.1/go.mod h1:eFHG7zDzbXHKmjJddFG/rBlcGp6t25SwRUiEQSlO4x4= cloud.google.com/go/auth/oauth2adapt v0.2.2 h1:+TTV8aXpjeChS9M+aTtN/TjdQnzJvmzKFt//oWu7HX4= cloud.google.com/go/auth/oauth2adapt v0.2.2/go.mod h1:wcYjgpZI9+Yu7LyYBg4pqSiaRkfEK3GQcpb7C/uyF1Q= -cloud.google.com/go/compute/metadata v0.3.0 h1:Tz+eQXMEqDIKRsmY3cHTL6FVaynIjX2QxYC4trgAKZc= -cloud.google.com/go/compute/metadata v0.3.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= +cloud.google.com/go/compute/metadata v0.4.0 h1:vHzJCWaM4g8XIcm8kopr3XmDA4Gy/lblD3EhhSux05c= +cloud.google.com/go/compute/metadata v0.4.0/go.mod h1:SIQh1Kkb4ZJ8zJ874fqVkslA29PRXuleyj6vOzlbK7M= dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk= dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= -github.com/ProtonMail/go-crypto v1.1.0-alpha.2-proton h1:HKz85FwoXx86kVtTvFke7rgHvq/HoloSUvW5semjFWs= -github.com/ProtonMail/go-crypto v1.1.0-alpha.2-proton/go.mod h1:rA3QumHc/FZ8pAHreoekgiAbzpNsfQAosU5td4SnOrE= +github.com/ProtonMail/go-crypto v1.1.0-alpha.3-proton h1:0RXAi0EJFs81j+MMsqvHNuAUGWzeVfCO9LnHAfoQ8NA= +github.com/ProtonMail/go-crypto v1.1.0-alpha.3-proton/go.mod h1:rA3QumHc/FZ8pAHreoekgiAbzpNsfQAosU5td4SnOrE= github.com/agext/levenshtein v1.2.3 h1:YB2fHEn0UJagG8T1rrWknE3ZQzWM06O8AMAatNn7lmo= github.com/agext/levenshtein v1.2.3/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558= github.com/apparentlymart/go-textseg/v12 v12.0.0/go.mod h1:S/4uRK2UtaQttw1GenVJEynmyUenKwP++x/+DdGV/Ec= @@ -21,8 +21,8 @@ github.com/bufbuild/protocompile v0.4.0 h1:LbFKd2XowZvQ/kajzguUp2DC9UEIQhIq77fZZ github.com/bufbuild/protocompile v0.4.0/go.mod h1:3v93+mbWn/v3xzN+31nwkJfrEpAUwp+BagBSZWx+TP8= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cloudflare/circl v1.3.7 h1:qlCDlTPz2n9fu58M0Nh1J/JzcFpfgkFHHX3O35r5vcU= -github.com/cloudflare/circl v1.3.7/go.mod h1:sRTcRWXGLrKw6yIGJ+l7amYJFfAXbZG0kBSc8r4zxgA= +github.com/cloudflare/circl v1.3.9 h1:QFrlgFYf2Qpi8bSpVPK1HBvWpx16v/1TZivyo7pGuBE= +github.com/cloudflare/circl v1.3.9/go.mod h1:PDRU+oXvdD7KCtgKxW95M5Z8BpSCJXQORiZFnBQS5QU= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg= github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= @@ -38,8 +38,8 @@ github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.m github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= -github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM= -github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE= +github.com/fatih/color v1.17.0 h1:GlRw1BRJxkpqUCBKzKOw098ed57fEsKeNjpTe3cSjK4= +github.com/fatih/color v1.17.0/go.mod h1:YZ7TlrGPkiz6ku9fK3TLD/pl3CpsiFyu8N92HLgmosI= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 h1:+zs/tPmkDkHx3U66DAb0lQFJrpS6731Oaa12ikc+DiI= @@ -49,8 +49,8 @@ github.com/go-git/go-billy/v5 v5.5.0/go.mod h1:hmexnoNsr2SJU1Ju67OaNz5ASJY3+sHgF github.com/go-git/go-git/v5 v5.12.0 h1:7Md+ndsjrzZxbddRDZjF14qK+NN56sy6wkqaVrjZtys= github.com/go-git/go-git/v5 v5.12.0/go.mod h1:FTM9VKtnI2m65hNI/TenDDDnUf2Q9FHnXYjuz9i5OEY= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= -github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= +github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-test/deep v1.0.3 h1:ZrJSEWsXzPOxaZnFteGEfooLba+ju3FYIbOrS+rQd68= @@ -95,8 +95,8 @@ github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfFxPRy3Bf7vr3h0cechB90XaQs= github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0= -github.com/googleapis/gax-go/v2 v2.12.4 h1:9gWcmF85Wvq4ryPFvGFaOgPIs1AQX0d0bcbGw4Z96qg= -github.com/googleapis/gax-go/v2 v2.12.4/go.mod h1:KYEYLorsnIGDi/rPC8b5TdlB9kbKoFubselGIoBMCwI= +github.com/googleapis/gax-go/v2 v2.12.5 h1:8gw9KZK8TiVKB6q3zHY3SBzLnrGp6HQjyfYBYGmXdxA= +github.com/googleapis/gax-go/v2 v2.12.5/go.mod h1:BUDKcWo+RaKq5SC9vVYL0wLADa3VcfswbOMMRmB9H3E= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= @@ -107,19 +107,19 @@ github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9n github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= github.com/hashicorp/go-cty v1.4.1-0.20200414143053-d3edf31b6320 h1:1/D3zfFHttUKaCaGKZ/dR2roBXv0vKbSCnssIldfQdI= github.com/hashicorp/go-cty v1.4.1-0.20200414143053-d3edf31b6320/go.mod h1:EiZBMaudVLy8fmjf9Npq1dq9RalhveqZG5w/yz3mHWs= -github.com/hashicorp/go-hclog v1.6.2 h1:NOtoftovWkDheyUM/8JW3QMiXyxJK3uHRK7wV04nD2I= -github.com/hashicorp/go-hclog v1.6.2/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= +github.com/hashicorp/go-hclog v1.6.3 h1:Qr2kF+eVWjTiYmU7Y31tYlP1h0q/X3Nl3tPGdaB11/k= +github.com/hashicorp/go-hclog v1.6.3/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= -github.com/hashicorp/go-plugin v1.6.0 h1:wgd4KxHJTVGGqWBq4QPB1i5BZNEx9BR8+OFmHDmTk8A= -github.com/hashicorp/go-plugin v1.6.0/go.mod h1:lBS5MtSSBZk0SHc66KACcjjlU6WzEVP/8pwz68aMkCI= +github.com/hashicorp/go-plugin v1.6.1 h1:P7MR2UP6gNKGPp+y7EZw2kOiq4IR9WiqLvp0XOsVdwI= +github.com/hashicorp/go-plugin v1.6.1/go.mod h1:XPHFku2tFo3o3QKFgSYo+cghcUhw1NA1hZyMK0PWAw0= github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8= github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek= -github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= -github.com/hashicorp/hc-install v0.6.4 h1:QLqlM56/+SIIGvGcfFiwMY3z5WGXT066suo/v9Km8e0= -github.com/hashicorp/hc-install v0.6.4/go.mod h1:05LWLy8TD842OtgcfBbOT0WMoInBMUSHjmDx10zuBIA= +github.com/hashicorp/go-version v1.7.0 h1:5tqGy27NaOTB8yJKUZELlFAS/LTKJkrmONwQKeRZfjY= +github.com/hashicorp/go-version v1.7.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/hc-install v0.7.0 h1:Uu9edVqjKQxxuD28mR5TikkKDd/p55S8vzPC1659aBk= +github.com/hashicorp/hc-install v0.7.0/go.mod h1:ELmmzZlGnEcqoUMKUuykHaPCIR1sYLYX+KSggWSKZuA= github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/hashicorp/hcl/v2 v2.21.0 h1:lve4q/o/2rqwYOgUg3y3V2YPyD1/zkCLGjIV74Jit14= @@ -130,14 +130,14 @@ github.com/hashicorp/terraform-exec v0.21.0 h1:uNkLAe95ey5Uux6KJdua6+cv8asgILFVW github.com/hashicorp/terraform-exec v0.21.0/go.mod h1:1PPeMYou+KDUSSeRE9szMZ/oHf4fYUmB923Wzbq1ICg= github.com/hashicorp/terraform-json v0.22.1 h1:xft84GZR0QzjPVWs4lRUwvTcPnegqlyS7orfb5Ltvec= github.com/hashicorp/terraform-json v0.22.1/go.mod h1:JbWSQCLFSXFFhg42T7l9iJwdGXBYV8fmmD6o/ML4p3A= -github.com/hashicorp/terraform-plugin-framework v1.7.0 h1:wOULbVmfONnJo9iq7/q+iBOBJul5vRovaYJIu2cY/Pw= -github.com/hashicorp/terraform-plugin-framework v1.7.0/go.mod h1:jY9Id+3KbZ17OMpulgnWLSfwxNVYSoYBQFTgsx044CI= +github.com/hashicorp/terraform-plugin-framework v1.9.0 h1:caLcDoxiRucNi2hk8+j3kJwkKfvHznubyFsJMWfZqKU= +github.com/hashicorp/terraform-plugin-framework v1.9.0/go.mod h1:qBXLDn69kM97NNVi/MQ9qgd1uWWsVftGSnygYG1tImM= github.com/hashicorp/terraform-plugin-go v0.23.0 h1:AALVuU1gD1kPb48aPQUjug9Ir/125t+AAurhqphJ2Co= github.com/hashicorp/terraform-plugin-go v0.23.0/go.mod h1:1E3Cr9h2vMlahWMbsSEcNrOCxovCZhOOIXjFHbjc/lQ= github.com/hashicorp/terraform-plugin-log v0.9.0 h1:i7hOA+vdAItN1/7UrfBqBwvYPQ9TFvymaRGZED3FCV0= github.com/hashicorp/terraform-plugin-log v0.9.0/go.mod h1:rKL8egZQ/eXSyDqzLUuwUYLVdlYeamldAHSxjUFADow= -github.com/hashicorp/terraform-plugin-mux v0.15.0 h1:+/+lDx0WUsIOpkAmdwBIoFU8UP9o2eZASoOnLsWbKME= -github.com/hashicorp/terraform-plugin-mux v0.15.0/go.mod h1:9ezplb1Dyq394zQ+ldB0nvy/qbNAz3mMoHHseMTMaKo= +github.com/hashicorp/terraform-plugin-mux v0.16.0 h1:RCzXHGDYwUwwqfYYWJKBFaS3fQsWn/ZECEiW7p2023I= +github.com/hashicorp/terraform-plugin-mux v0.16.0/go.mod h1:PF79mAsPc8CpusXPfEVa4X8PtkB+ngWoiUClMrNZlYo= github.com/hashicorp/terraform-plugin-sdk/v2 v2.34.0 h1:kJiWGx2kiQVo97Y5IOGR4EMcZ8DtMswHhUuFibsCQQE= github.com/hashicorp/terraform-plugin-sdk/v2 v2.34.0/go.mod h1:sl/UoabMc37HA6ICVMmGO+/0wofkVIRxf+BMb/dnoIg= github.com/hashicorp/terraform-registry-address v0.2.3 h1:2TAiKJ1A3MAkZlH1YI/aTVcLZRu7JseiXNRHbOAyoTI= @@ -216,22 +216,22 @@ go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0 h1:4Pp6oUg3+e/6M4C0A/3kJ2VYa++dsWVTtGgLVj5xtHg= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0/go.mod h1:Mjt1i1INqiaoZOMGR1RIUJN+i3ChKoFRqzrRQhlkbs0= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 h1:jq9TW8u3so/bN+JPT166wjOI6/vQPF6Xe7nMNIltagk= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0/go.mod h1:p8pYQP+m5XfbZm9fxtSKAbM6oIllS7s2AfxrChvc7iw= -go.opentelemetry.io/otel v1.24.0 h1:0LAOdjNmQeSTzGBzduGe/rU4tZhMwL5rWgtp9Ku5Jfo= -go.opentelemetry.io/otel v1.24.0/go.mod h1:W7b9Ozg4nkF5tWI5zsXkaKKDjdVjpD4oAt9Qi/MArHo= -go.opentelemetry.io/otel/metric v1.24.0 h1:6EhoGWWK28x1fbpA4tYTOWBkPefTDQnb8WSGXlc88kI= -go.opentelemetry.io/otel/metric v1.24.0/go.mod h1:VYhLe1rFfxuTXLgj4CBiyz+9WYBA8pNGJgDcSFRKBco= -go.opentelemetry.io/otel/trace v1.24.0 h1:CsKnnL4dUAr/0llH9FKuc698G04IrpWV0MQA/Y1YELI= -go.opentelemetry.io/otel/trace v1.24.0/go.mod h1:HPc3Xr/cOApsBI154IU0OI0HJexz+aw5uPdbs3UCjNU= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 h1:4K4tsIXefpVJtvA/8srF4V4y0akAoPHkIslgAkjixJA= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0/go.mod h1:jjdQuTGVsXV4vSs+CJ2qYDeDPf9yIJV23qlIzBm73Vg= +go.opentelemetry.io/otel v1.28.0 h1:/SqNcYk+idO0CxKEUOtKQClMK/MimZihKYMruSMViUo= +go.opentelemetry.io/otel v1.28.0/go.mod h1:q68ijF8Fc8CnMHKyzqL6akLO46ePnjkgfIMIjUIX9z4= +go.opentelemetry.io/otel/metric v1.28.0 h1:f0HGvSl1KRAU1DLgLGFjrwVyismPlnuU6JD6bOeuA5Q= +go.opentelemetry.io/otel/metric v1.28.0/go.mod h1:Fb1eVBFZmLVTMb6PPohq3TO9IIhUisDsbJoL/+uQW4s= +go.opentelemetry.io/otel/trace v1.28.0 h1:GhQ9cUuQGmNDd5BTCP2dAvv75RdMxEfTmYejp+lkx9g= +go.opentelemetry.io/otel/trace v1.28.0/go.mod h1:jPyXzNPg6da9+38HEwElrQiHlVMTnVfM3/yv2OlIHaI= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.23.0 h1:dIJU/v2J8Mdglj/8rJ6UUOM3Zc9zLZxVZwwxMooUSAI= -golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8= +golang.org/x/crypto v0.24.0 h1:mnl8DM0o513X8fdIkmyFE/5hTYxbwYOjDS/+rK6qpRI= +golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5DM= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20240222234643-814bf88cf225 h1:LfspQV/FYTatPTr/3HzIcmiUFH7PGP+OQ6mgDYo3yuQ= -golang.org/x/exp v0.0.0-20240222234643-814bf88cf225/go.mod h1:CxmFvTBINI24O/j8iY7H1xHzx2i4OsyguNBmN/uPtqc= +golang.org/x/exp v0.0.0-20240613232115-7f521ea00fb8 h1:yixxcjnhBmY0nkL253HFVIm0JsFHwrHdT3Yh6szTnfY= +golang.org/x/exp v0.0.0-20240613232115-7f521ea00fb8/go.mod h1:jj3sYF3dwk5D+ghuXyeI3r5MFf+NT2An6/9dOA95KSI= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= @@ -247,11 +247,11 @@ golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.25.0 h1:d/OCCoBEUq33pjydKrGQhw7IlUPI2Oylr+8qLx49kac= -golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= +golang.org/x/net v0.26.0 h1:soB7SVo0PWrY4vPW/+ay0jKDNScG2X9wFeYlXIvJsOQ= +golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.20.0 h1:4mQdhULixXKP1rwYBW0vAijoXnkTG0BLCDRzfe1idMo= -golang.org/x/oauth2 v0.20.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= +golang.org/x/oauth2 v0.21.0 h1:tsimM75w1tF/uws5rbeHzIWxEqElMehnc+iW793zsZs= +golang.org/x/oauth2 v0.21.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -273,19 +273,19 @@ golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y= -golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.21.0 h1:rF+pYz3DAGSQAxAu1CbC7catZg4ebC4UIeIhKxBZvws= +golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.20.0 h1:VnkxpohqXaOBYJtBmEppKUG6mXpi+4O6purfc2+sMhw= -golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY= +golang.org/x/term v0.21.0 h1:WVXCp+/EBEHOj53Rvu+7KiT/iElMrO8ACK16SMZ3jaA= +golang.org/x/term v0.21.0/go.mod h1:ooXLefLobQVslOqselCNF4SxFAaoS6KujMbsGzSDmX0= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= -golang.org/x/text v0.15.0 h1:h1V/4gjBv8v9cjcR6+AR5+/cIYK5N/WAgiv4xlsEtAk= -golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4= +golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI= golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -295,12 +295,12 @@ golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3 golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.19.0 h1:tfGCXNR1OsFG+sVdLAitlpjAvD/I6dHDKnYrpEZUHkw= -golang.org/x/tools v0.19.0/go.mod h1:qoJWxmGSIBmAeriMx19ogtrEPrGtDbPK634QFIcLAhc= +golang.org/x/tools v0.22.0 h1:gqSGLZqv+AI9lIQzniJ0nZDRG5GBPsSi+DRNHWNz6yA= +golang.org/x/tools v0.22.0/go.mod h1:aCwcsjqvq7Yqt6TNyX7QMU2enbQ/Gt0bo6krSeEri+c= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/api v0.182.0 h1:if5fPvudRQ78GeRx3RayIoiuV7modtErPIZC/T2bIvE= -google.golang.org/api v0.182.0/go.mod h1:cGhjy4caqA5yXRzEhkHI8Y9mfyC2VLTlER2l08xaqtM= +google.golang.org/api v0.187.0 h1:Mxs7VATVC2v7CY+7Xwm4ndkX71hpElcvx0D1Ji/p1eo= +google.golang.org/api v0.187.0/go.mod h1:KIHlTc4x7N7gKKuVsdmfBXN13yEEWXWFURWY6SBp2gk= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM= @@ -308,15 +308,15 @@ google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJ google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240521202816-d264139d666e h1:Elxv5MwEkCI9f5SkoL6afed6NTdxaGoAo39eANBwHL8= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240521202816-d264139d666e/go.mod h1:EfXuqaE1J41VCDicxHzUDm+8rk+7ZdXzHV0IhO/I6s0= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094 h1:BwIjyKYGsK9dMCBOorzRri8MQwmi7mT9rGHsCEinZkA= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094/go.mod h1:Ue6ibwXGpU+dqIcODieyLOcgj7z8+IcskoNIgZxtrFY= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.64.0 h1:KH3VH9y/MgNQg1dE7b3XfVK0GsPSIzJwdF617gUSbvY= -google.golang.org/grpc v1.64.0/go.mod h1:oxjF8E3FBnjp+/gVFYdWacaLDx9na1aqy9oovLpxQYg= +google.golang.org/grpc v1.65.0 h1:bs/cUb4lp1G5iImFFd3u5ixQzweKizoZJAwBNLR42lc= +google.golang.org/grpc v1.65.0/go.mod h1:WgYC2ypjlB0EiQi6wdKixMqukr6lBc0Vo+oOgjrM5ZQ= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -328,8 +328,8 @@ google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpAD google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.34.1 h1:9ddQBjfCyZPOHPUiPxpYESBLc+T8P3E+Vo4IbKZgFWg= -google.golang.org/protobuf v1.34.1/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= +google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= +google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/pluginframework/resource_lakehouse_monitor.go b/pluginframework/resource_lakehouse_monitor.go index 278a81b8de..a3faa45763 100644 --- a/pluginframework/resource_lakehouse_monitor.go +++ b/pluginframework/resource_lakehouse_monitor.go @@ -17,7 +17,7 @@ const lakehouseMonitorDefaultProvisionTimeout = 15 * time.Minute func WaitForMonitor(w *databricks.WorkspaceClient, ctx context.Context, monitorName string) error { return retry.RetryContext(ctx, lakehouseMonitorDefaultProvisionTimeout, func() *retry.RetryError { - endpoint, err := w.LakehouseMonitors.GetByTableName(ctx, monitorName) + endpoint, err := w.QualityMonitors.GetByTableName(ctx, monitorName) if err != nil { return retry.NonRetryableError(err) } @@ -81,7 +81,7 @@ func (r *LakehouseMonitorResource) Create(ctx context.Context, req resource.Crea if resp.Diagnostics.HasError() { return } - endpoint, err := w.LakehouseMonitors.Create(ctx, create) + endpoint, err := w.QualityMonitors.Create(ctx, create) if err != nil { resp.Diagnostics.AddError("Failed to get create monitor", err.Error()) return @@ -100,13 +100,13 @@ func (r *LakehouseMonitorResource) Read(ctx context.Context, req resource.ReadRe resp.Diagnostics.AddError("Failed to get workspace client", err.Error()) return } - var getMonitor catalog.GetLakehouseMonitorRequest + var getMonitor catalog.GetQualityMonitorRequest diags := req.State.Get(ctx, &getMonitor) resp.Diagnostics.Append(diags...) if resp.Diagnostics.HasError() { return } - endpoint, err := w.LakehouseMonitors.GetByTableName(ctx, getMonitor.TableName) + endpoint, err := w.QualityMonitors.GetByTableName(ctx, getMonitor.TableName) if err != nil { resp.Diagnostics.AddError("Failed to get monitor", err.Error()) return @@ -131,7 +131,7 @@ func (r *LakehouseMonitorResource) Update(ctx context.Context, req resource.Upda if resp.Diagnostics.HasError() { return } - _, err = w.LakehouseMonitors.Update(ctx, updateRequest) + _, err = w.QualityMonitors.Update(ctx, updateRequest) if err != nil { resp.Diagnostics.AddError("Failed to update monitor", err.Error()) return @@ -150,13 +150,13 @@ func (r *LakehouseMonitorResource) Delete(ctx context.Context, req resource.Dele resp.Diagnostics.AddError("Failed to get workspace client", err.Error()) return } - var deleteRequest catalog.DeleteLakehouseMonitorRequest + var deleteRequest catalog.DeleteQualityMonitorRequest diags := req.State.Get(ctx, &deleteRequest) resp.Diagnostics.Append(diags...) if resp.Diagnostics.HasError() { return } - err = w.LakehouseMonitors.DeleteByTableName(ctx, deleteRequest.TableName) + err = w.QualityMonitors.DeleteByTableName(ctx, deleteRequest.TableName) if err != nil { resp.Diagnostics.AddError("Failed to delete monitor", err.Error()) return diff --git a/provider/provider_test.go b/provider/provider_test.go index b67a644b33..31839b1785 100644 --- a/provider/provider_test.go +++ b/provider/provider_test.go @@ -8,10 +8,13 @@ import ( "net/http/httptest" "os" "path/filepath" + "strings" "testing" "time" "github.com/databricks/terraform-provider-databricks/common" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -388,7 +391,7 @@ func configureProviderAndReturnClient(t *testing.T, tt providerFixture) (*common } p := DatabricksProvider() ctx := context.Background() - diags := p.Configure(ctx, terraform.NewResourceConfigRaw(tt.rawConfig())) + diags := p.Configure(ctx, terraform.NewResourceConfigRaw(tt.rawConfigSDKv2())) if len(diags) > 0 { issues := []string{} for _, d := range diags { From 5980fb2a2785221c82f7b22cb6e62cc9d3a237fa Mon Sep 17 00:00:00 2001 From: Tanmay Rustagi Date: Tue, 9 Jul 2024 18:12:53 +0200 Subject: [PATCH 25/39] wip --- go.mod | 8 ++++---- go.sum | 20 ++++++++++---------- provider/provider_test.go | 28 ---------------------------- provider/test_utils.go | 8 ++++++-- 4 files changed, 20 insertions(+), 44 deletions(-) diff --git a/go.mod b/go.mod index 814447959e..73e0a3e18e 100644 --- a/go.mod +++ b/go.mod @@ -13,7 +13,7 @@ require ( github.com/stretchr/testify v1.9.0 github.com/zclconf/go-cty v1.14.4 golang.org/x/exp v0.0.0-20240613232115-7f521ea00fb8 - golang.org/x/mod v0.18.0 + golang.org/x/mod v0.19.0 ) require github.com/hashicorp/terraform-plugin-mux v0.16.0 @@ -75,10 +75,10 @@ require ( go.opentelemetry.io/otel v1.28.0 // indirect go.opentelemetry.io/otel/metric v1.28.0 // indirect go.opentelemetry.io/otel/trace v1.28.0 // indirect - golang.org/x/crypto v0.24.0 // indirect - golang.org/x/net v0.26.0 // indirect + golang.org/x/crypto v0.25.0 // indirect + golang.org/x/net v0.27.0 // indirect golang.org/x/oauth2 v0.21.0 // indirect - golang.org/x/sys v0.21.0 // indirect + golang.org/x/sys v0.22.0 // indirect golang.org/x/text v0.16.0 // indirect golang.org/x/time v0.5.0 // indirect golang.org/x/tools v0.22.0 // indirect diff --git a/go.sum b/go.sum index 64de8d2103..190a2b7356 100644 --- a/go.sum +++ b/go.sum @@ -227,8 +227,8 @@ go.opentelemetry.io/otel/trace v1.28.0/go.mod h1:jPyXzNPg6da9+38HEwElrQiHlVMTnVf golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.24.0 h1:mnl8DM0o513X8fdIkmyFE/5hTYxbwYOjDS/+rK6qpRI= -golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5DM= +golang.org/x/crypto v0.25.0 h1:ypSNr+bnYL2YhwoMt2zPxHFmbAN1KZs/njMG3hxUp30= +golang.org/x/crypto v0.25.0/go.mod h1:T+wALwcMOSE0kXgUAnPAHqTLW+XHgcELELW8VaDgm/M= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20240613232115-7f521ea00fb8 h1:yixxcjnhBmY0nkL253HFVIm0JsFHwrHdT3Yh6szTnfY= golang.org/x/exp v0.0.0-20240613232115-7f521ea00fb8/go.mod h1:jj3sYF3dwk5D+ghuXyeI3r5MFf+NT2An6/9dOA95KSI= @@ -236,8 +236,8 @@ golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTk golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/mod v0.18.0 h1:5+9lSbEzPSdWkH32vYPBwEpX8KwDbM52Ud9xBUvNlb0= -golang.org/x/mod v0.18.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.19.0 h1:fEdghXQSo20giMthA7cd28ZC+jts4amQ3YMXiP5oMQ8= +golang.org/x/mod v0.19.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -247,8 +247,8 @@ golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.26.0 h1:soB7SVo0PWrY4vPW/+ay0jKDNScG2X9wFeYlXIvJsOQ= -golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE= +golang.org/x/net v0.27.0 h1:5K3Njcw06/l2y9vpGCSdcxWOYHOUk3dVNGDXN+FvAys= +golang.org/x/net v0.27.0/go.mod h1:dDi0PyhWNoiUOrAS8uXv/vnScO4wnHQO4mj9fn/RytE= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.21.0 h1:tsimM75w1tF/uws5rbeHzIWxEqElMehnc+iW793zsZs= golang.org/x/oauth2 v0.21.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= @@ -273,12 +273,12 @@ golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.21.0 h1:rF+pYz3DAGSQAxAu1CbC7catZg4ebC4UIeIhKxBZvws= -golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.22.0 h1:RI27ohtqKCnwULzJLqkv897zojh5/DwS/ENaMzUOaWI= +golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.21.0 h1:WVXCp+/EBEHOj53Rvu+7KiT/iElMrO8ACK16SMZ3jaA= -golang.org/x/term v0.21.0/go.mod h1:ooXLefLobQVslOqselCNF4SxFAaoS6KujMbsGzSDmX0= +golang.org/x/term v0.22.0 h1:BbsgPEJULsl2fV/AT3v15Mjva5yXKQDyKf+TbDz7QJk= +golang.org/x/term v0.22.0/go.mod h1:F3qCibpT5AMpCRfhfT53vVJwhLtIVHhB9XDjfFvnMI4= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= diff --git a/provider/provider_test.go b/provider/provider_test.go index 31839b1785..47638a7e6f 100644 --- a/provider/provider_test.go +++ b/provider/provider_test.go @@ -8,12 +8,10 @@ import ( "net/http/httptest" "os" "path/filepath" - "strings" "testing" "time" "github.com/databricks/terraform-provider-databricks/common" - "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -385,32 +383,6 @@ func testOAuthFetchesToken(t *testing.T, c *common.DatabricksClient) { } } -func configureProviderAndReturnClient(t *testing.T, tt providerFixture) (*common.DatabricksClient, error) { - for k, v := range tt.env { - t.Setenv(k, v) - } - p := DatabricksProvider() - ctx := context.Background() - diags := p.Configure(ctx, terraform.NewResourceConfigRaw(tt.rawConfigSDKv2())) - if len(diags) > 0 { - issues := []string{} - for _, d := range diags { - issues = append(issues, d.Summary) - } - return nil, fmt.Errorf(strings.Join(issues, ", ")) - } - client := p.Meta().(*common.DatabricksClient) - r, err := http.NewRequest("GET", "", nil) - if err != nil { - return nil, err - } - err = client.Config.Authenticate(r) - if err != nil { - return nil, err - } - return client, nil -} - type parseUserAgentTestCase struct { name string env string diff --git a/provider/test_utils.go b/provider/test_utils.go index 547f6d4f52..d6e97ef3db 100644 --- a/provider/test_utils.go +++ b/provider/test_utils.go @@ -129,8 +129,7 @@ func configureProviderAndReturnClient_SDKv2(t *testing.T, tt providerFixture) (* } p := DatabricksProvider() ctx := context.Background() - testConfig := terraform.NewResourceConfigRaw(tt.rawConfigSDKv2()) - diags := p.Configure(ctx, testConfig) + diags := p.Configure(ctx, terraform.NewResourceConfigRaw(tt.rawConfigSDKv2())) if len(diags) > 0 { issues := []string{} for _, d := range diags { @@ -157,6 +156,11 @@ func configureProviderAndReturnClient_PluginFramework(t *testing.T, tt providerF p := GetDatabricksProviderPluginFramework() ctx := context.Background() rawConfig := tt.rawConfigPluginFramework() + // rawConfig := ` + // provider "databricks" { + // host = y + // } + // ` var providerSchemaResponse provider.SchemaResponse p.Schema(ctx, provider.SchemaRequest{}, &providerSchemaResponse) configRequest := provider.ConfigureRequest{ From 250da8bef000f16cdf8750112f89bb1cc58931f9 Mon Sep 17 00:00:00 2001 From: Tanmay Rustagi Date: Thu, 18 Jul 2024 16:02:51 +0200 Subject: [PATCH 26/39] - --- go.mod | 2 -- go.sum | 4 ++-- main.go | 14 +++++++++++--- 3 files changed, 13 insertions(+), 7 deletions(-) diff --git a/go.mod b/go.mod index 64929f04e9..07feed51c5 100644 --- a/go.mod +++ b/go.mod @@ -19,8 +19,6 @@ require ( golang.org/x/mod v0.19.0 ) -require github.com/hashicorp/terraform-plugin-mux v0.16.0 - require golang.org/x/sync v0.7.0 // indirect require ( diff --git a/go.sum b/go.sum index bab8a685f3..91d8fd82ee 100644 --- a/go.sum +++ b/go.sum @@ -230,8 +230,8 @@ golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5y golang.org/x/crypto v0.25.0 h1:ypSNr+bnYL2YhwoMt2zPxHFmbAN1KZs/njMG3hxUp30= golang.org/x/crypto v0.25.0/go.mod h1:T+wALwcMOSE0kXgUAnPAHqTLW+XHgcELELW8VaDgm/M= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20240613232115-7f521ea00fb8 h1:yixxcjnhBmY0nkL253HFVIm0JsFHwrHdT3Yh6szTnfY= -golang.org/x/exp v0.0.0-20240613232115-7f521ea00fb8/go.mod h1:jj3sYF3dwk5D+ghuXyeI3r5MFf+NT2An6/9dOA95KSI= +golang.org/x/exp v0.0.0-20240222234643-814bf88cf225 h1:LfspQV/FYTatPTr/3HzIcmiUFH7PGP+OQ6mgDYo3yuQ= +golang.org/x/exp v0.0.0-20240222234643-814bf88cf225/go.mod h1:CxmFvTBINI24O/j8iY7H1xHzx2i4OsyguNBmN/uPtqc= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= diff --git a/main.go b/main.go index b64453db94..57fb72d6ad 100644 --- a/main.go +++ b/main.go @@ -9,6 +9,7 @@ import ( "github.com/databricks/terraform-provider-databricks/common" "github.com/databricks/terraform-provider-databricks/exporter" "github.com/databricks/terraform-provider-databricks/provider" + "github.com/hashicorp/terraform-plugin-framework/providerserver" "github.com/hashicorp/terraform-plugin-go/tfprotov6" "github.com/hashicorp/terraform-plugin-go/tfprotov6/tf6server" "github.com/hashicorp/terraform-plugin-mux/tf5to6server" @@ -49,10 +50,17 @@ func main() { log.Fatal(err) } + pluginFrameworkProvider := provider.GetDatabricksProviderPluginFramework() + + providers := []func() tfprotov6.ProviderServer{ + func() tfprotov6.ProviderServer { + return upgradedSdkPluginProvider + }, + providerserver.NewProtocol6(pluginFrameworkProvider), + } + ctx := context.Background() - muxServer, err := tf6muxserver.NewMuxServer(ctx, func() tfprotov6.ProviderServer { - return upgradedSdkPluginProvider - }) + muxServer, err := tf6muxserver.NewMuxServer(ctx, providers...) if err != nil { log.Fatal(err) From a83c4718f4e5e34c5e0c42adb997fda4e71407bc Mon Sep 17 00:00:00 2001 From: Tanmay Rustagi Date: Fri, 19 Jul 2024 13:55:27 +0200 Subject: [PATCH 27/39] resolve bug --- provider/provider_plugin_framework.go | 65 ++++++++++++++++++++++----- 1 file changed, 54 insertions(+), 11 deletions(-) diff --git a/provider/provider_plugin_framework.go b/provider/provider_plugin_framework.go index 7023ed769a..939acb991c 100644 --- a/provider/provider_plugin_framework.go +++ b/provider/provider_plugin_framework.go @@ -96,22 +96,65 @@ func providerSchemaPluginFramework() schema.Schema { } } +func setConfigAttributePluginFramework(a *config.ConfigAttribute, cfg *config.Config, i interface{}) error { + rv := reflect.ValueOf(cfg) + field := rv.Elem().Field(0) + switch a.Kind { + case reflect.String: + field.SetString(i.(string)) + case reflect.Bool: + field.SetBool(i.(bool)) + case reflect.Int: + field.SetInt(int64(i.(int))) + default: + // must extensively test with providerFixture to avoid this one + return fmt.Errorf("cannot set %s of unknown type %s", a.Name, reflectKind(a.Kind)) + } + return nil +} + func configureDatabricksClient_PluginFramework(ctx context.Context, req provider.ConfigureRequest, resp *provider.ConfigureResponse) any { cfg := &config.Config{} attrsUsed := []string{} authsUsed := map[string]bool{} for _, attr := range config.ConfigAttributes { - var attrValue types.String - // tanmaytodo, failing here TerraformValueAtTerraformPath - diags := req.Config.GetAttribute(ctx, path.Root(attr.Name), &attrValue) - resp.Diagnostics.Append(diags...) - if resp.Diagnostics.HasError() { - return nil - } - err := attr.Set(cfg, attrValue) - if err != nil { - resp.Diagnostics.Append(diag.NewErrorDiagnostic("Failed to set attribute", err.Error())) - return nil + switch attr.Kind { + case reflect.Bool: + var attrValue types.Bool + diags := req.Config.GetAttribute(ctx, path.Root(attr.Name), &attrValue) // tanmaytodo, failing here TerraformValueAtTerraformPath + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return nil + } + err := attr.Set(cfg, attrValue) // tanmaytodo, failing here + if err != nil { + resp.Diagnostics.Append(diag.NewErrorDiagnostic("Failed to set attribute", err.Error())) + return nil + } + case reflect.Int: + var attrValue types.Int64 + diags := req.Config.GetAttribute(ctx, path.Root(attr.Name), &attrValue) // tanmaytodo, failing here TerraformValueAtTerraformPath + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return nil + } + // err := attr.Set(cfg, attrValue) // tanmaytodo, failing here + // if err != nil { + // resp.Diagnostics.Append(diag.NewErrorDiagnostic("Failed to set attribute", err.Error())) + // return nil + // } + case reflect.String: + var attrValue types.String + diags := req.Config.GetAttribute(ctx, path.Root(attr.Name), &attrValue) // tanmaytodo, failing here TerraformValueAtTerraformPath + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return nil + } + // err := attr.Set(cfg, attrValue) // tanmaytodo, failing here + // if err != nil { + // resp.Diagnostics.Append(diag.NewErrorDiagnostic("Failed to set attribute", err.Error())) + // return nil + // } } if attr.Kind == reflect.String { attrsUsed = append(attrsUsed, attr.Name) From 0aa0f7baca22e5b719c9481cf24565f41a072bf6 Mon Sep 17 00:00:00 2001 From: Tanmay Rustagi Date: Mon, 22 Jul 2024 02:43:21 +0200 Subject: [PATCH 28/39] fix bug --- provider/provider_plugin_framework.go | 45 +++++++++------------------ provider/provider_test.go | 1 + provider/test_utils.go | 5 --- 3 files changed, 15 insertions(+), 36 deletions(-) diff --git a/provider/provider_plugin_framework.go b/provider/provider_plugin_framework.go index 939acb991c..77c0373bdb 100644 --- a/provider/provider_plugin_framework.go +++ b/provider/provider_plugin_framework.go @@ -96,23 +96,6 @@ func providerSchemaPluginFramework() schema.Schema { } } -func setConfigAttributePluginFramework(a *config.ConfigAttribute, cfg *config.Config, i interface{}) error { - rv := reflect.ValueOf(cfg) - field := rv.Elem().Field(0) - switch a.Kind { - case reflect.String: - field.SetString(i.(string)) - case reflect.Bool: - field.SetBool(i.(bool)) - case reflect.Int: - field.SetInt(int64(i.(int))) - default: - // must extensively test with providerFixture to avoid this one - return fmt.Errorf("cannot set %s of unknown type %s", a.Name, reflectKind(a.Kind)) - } - return nil -} - func configureDatabricksClient_PluginFramework(ctx context.Context, req provider.ConfigureRequest, resp *provider.ConfigureResponse) any { cfg := &config.Config{} attrsUsed := []string{} @@ -121,40 +104,40 @@ func configureDatabricksClient_PluginFramework(ctx context.Context, req provider switch attr.Kind { case reflect.Bool: var attrValue types.Bool - diags := req.Config.GetAttribute(ctx, path.Root(attr.Name), &attrValue) // tanmaytodo, failing here TerraformValueAtTerraformPath + diags := req.Config.GetAttribute(ctx, path.Root(attr.Name), &attrValue) resp.Diagnostics.Append(diags...) if resp.Diagnostics.HasError() { return nil } - err := attr.Set(cfg, attrValue) // tanmaytodo, failing here + err := attr.Set(cfg, attrValue.ValueBool()) if err != nil { resp.Diagnostics.Append(diag.NewErrorDiagnostic("Failed to set attribute", err.Error())) return nil } case reflect.Int: var attrValue types.Int64 - diags := req.Config.GetAttribute(ctx, path.Root(attr.Name), &attrValue) // tanmaytodo, failing here TerraformValueAtTerraformPath + diags := req.Config.GetAttribute(ctx, path.Root(attr.Name), &attrValue) resp.Diagnostics.Append(diags...) if resp.Diagnostics.HasError() { return nil } - // err := attr.Set(cfg, attrValue) // tanmaytodo, failing here - // if err != nil { - // resp.Diagnostics.Append(diag.NewErrorDiagnostic("Failed to set attribute", err.Error())) - // return nil - // } + err := attr.Set(cfg, int(attrValue.ValueInt64())) + if err != nil { + resp.Diagnostics.Append(diag.NewErrorDiagnostic("Failed to set attribute", err.Error())) + return nil + } case reflect.String: var attrValue types.String - diags := req.Config.GetAttribute(ctx, path.Root(attr.Name), &attrValue) // tanmaytodo, failing here TerraformValueAtTerraformPath + diags := req.Config.GetAttribute(ctx, path.Root(attr.Name), &attrValue) // tanmaytodo: not getting the value from config resp.Diagnostics.Append(diags...) if resp.Diagnostics.HasError() { return nil } - // err := attr.Set(cfg, attrValue) // tanmaytodo, failing here - // if err != nil { - // resp.Diagnostics.Append(diag.NewErrorDiagnostic("Failed to set attribute", err.Error())) - // return nil - // } + err := attr.Set(cfg, attrValue.ValueString()) + if err != nil { + resp.Diagnostics.Append(diag.NewErrorDiagnostic("Failed to set attribute", err.Error())) + return nil + } } if attr.Kind == reflect.String { attrsUsed = append(attrsUsed, attr.Name) diff --git a/provider/provider_test.go b/provider/provider_test.go index 47638a7e6f..d59174636f 100644 --- a/provider/provider_test.go +++ b/provider/provider_test.go @@ -92,6 +92,7 @@ func TestConfig_BasicAuth(t *testing.T) { }.apply(t) } +// tanmaytodo: fix this func TestConfig_AttributePrecedence(t *testing.T) { providerFixture{ host: "y", diff --git a/provider/test_utils.go b/provider/test_utils.go index d6e97ef3db..de7aaadd9e 100644 --- a/provider/test_utils.go +++ b/provider/test_utils.go @@ -156,11 +156,6 @@ func configureProviderAndReturnClient_PluginFramework(t *testing.T, tt providerF p := GetDatabricksProviderPluginFramework() ctx := context.Background() rawConfig := tt.rawConfigPluginFramework() - // rawConfig := ` - // provider "databricks" { - // host = y - // } - // ` var providerSchemaResponse provider.SchemaResponse p.Schema(ctx, provider.SchemaRequest{}, &providerSchemaResponse) configRequest := provider.ConfigureRequest{ From 38c264b3d5c4fe419ff6934152b6bc1b85548ccc Mon Sep 17 00:00:00 2001 From: Tanmay Rustagi Date: Mon, 22 Jul 2024 14:26:26 +0200 Subject: [PATCH 29/39] - --- common/reflect_resource_plugin_framework.go | 10 +++++--- .../reflect_resource_plugin_framework_test.go | 2 +- pluginframework/resource_lakehouse_monitor.go | 23 ++----------------- 3 files changed, 10 insertions(+), 25 deletions(-) diff --git a/common/reflect_resource_plugin_framework.go b/common/reflect_resource_plugin_framework.go index a2f92569fb..acaadb547f 100644 --- a/common/reflect_resource_plugin_framework.go +++ b/common/reflect_resource_plugin_framework.go @@ -5,7 +5,7 @@ import ( "fmt" "reflect" - "github.com/hashicorp/terraform-plugin-framework/provider/schema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema" "github.com/hashicorp/terraform-plugin-framework/types" ) @@ -384,8 +384,12 @@ func pluginFrameworkTypeToSchema(v reflect.Value) map[string]schema.Attribute { return scm } -func pluginFrameworkStructToSchema(v any) schema.Schema { +func PluginFrameworkStructToSchema(v any) schema.Schema { return schema.Schema{ - Attributes: pluginFrameworkTypeToSchema(reflect.ValueOf(v)), + Attributes: PluginFrameworkStructToSchemaMap(v), } } + +func PluginFrameworkStructToSchemaMap(v any) map[string]schema.Attribute { + return pluginFrameworkTypeToSchema(reflect.ValueOf(v)) +} diff --git a/common/reflect_resource_plugin_framework_test.go b/common/reflect_resource_plugin_framework_test.go index 7b04c3f518..31d1ffa0c6 100644 --- a/common/reflect_resource_plugin_framework_test.go +++ b/common/reflect_resource_plugin_framework_test.go @@ -129,7 +129,7 @@ var tfSdkStruct = DummyTfSdk{ func TestGetAndSetPluginFramework(t *testing.T) { // Also test StructToSchema. - scm := pluginFrameworkStructToSchema(DummyTfSdk{}) + scm := PluginFrameworkStructToSchema(DummyTfSdk{}) state := tfsdk.State{ Schema: scm, } diff --git a/pluginframework/resource_lakehouse_monitor.go b/pluginframework/resource_lakehouse_monitor.go index a3faa45763..f96e391634 100644 --- a/pluginframework/resource_lakehouse_monitor.go +++ b/pluginframework/resource_lakehouse_monitor.go @@ -44,27 +44,8 @@ func (r *LakehouseMonitorResource) Metadata(ctx context.Context, req resource.Me func (r *LakehouseMonitorResource) Schema(ctx context.Context, req resource.SchemaRequest, resp *resource.SchemaResponse) { resp.Schema = schema.Schema{ - Description: "Schema for lakehouse monitor", - // We would need similar method to common.StructToSchema but since this is PoC, we are not implementing it here - // Also this isn't complete, using incomplete schema for PoCs just to check if it's working - Attributes: map[string]schema.Attribute{ - "assets_dir": schema.StringAttribute{ - Description: "The directory to store monitoring assets (e.g. dashboard, metric tables)", - Optional: true, - }, - "baseline_table_name": schema.StringAttribute{ - Description: "Name of the baseline table from which drift metrics are computed from Columns in the monitored table should also be present in the baseline table.", - Optional: true, - }, - "dashboard_id": schema.StringAttribute{ - Description: "Id of dashboard that visualizes the computed metrics. This can be empty if the monitor is in PENDING state.", - Optional: true, - }, - "table_name": schema.StringAttribute{ - Description: "The full name of the table to monitor. Format:__catalog_name__.__schema_name__.__table_name__.", - Required: true, - }, - }, + Description: "Terraform schema for Databricks Lakehouse Monitor. MonitorInfo struct is used to create the schema", + Attributes: common.PluginFrameworkStructToSchemaMap(catalog.MonitorInfo{}), } } From 49349bf2981b183b4dfc6aab4aceb46267f01b24 Mon Sep 17 00:00:00 2001 From: Tanmay Rustagi Date: Mon, 22 Jul 2024 17:13:45 +0200 Subject: [PATCH 30/39] fix --- provider/provider_plugin_framework.go | 5 +++-- provider/provider_test.go | 2 +- provider/test_utils.go | 16 +++++++++++++--- 3 files changed, 17 insertions(+), 6 deletions(-) diff --git a/provider/provider_plugin_framework.go b/provider/provider_plugin_framework.go index 77c0373bdb..ff0275674d 100644 --- a/provider/provider_plugin_framework.go +++ b/provider/provider_plugin_framework.go @@ -92,7 +92,8 @@ func providerSchemaPluginFramework() schema.Schema { } } return schema.Schema{ - Attributes: ps, + Description: "Databricks provider schema for plugin framework", + Attributes: ps, } } @@ -128,7 +129,7 @@ func configureDatabricksClient_PluginFramework(ctx context.Context, req provider } case reflect.String: var attrValue types.String - diags := req.Config.GetAttribute(ctx, path.Root(attr.Name), &attrValue) // tanmaytodo: not getting the value from config + diags := req.Config.GetAttribute(ctx, path.Root(attr.Name), &attrValue) resp.Diagnostics.Append(diags...) if resp.Diagnostics.HasError() { return nil diff --git a/provider/provider_test.go b/provider/provider_test.go index d59174636f..c5f85325a4 100644 --- a/provider/provider_test.go +++ b/provider/provider_test.go @@ -92,7 +92,6 @@ func TestConfig_BasicAuth(t *testing.T) { }.apply(t) } -// tanmaytodo: fix this func TestConfig_AttributePrecedence(t *testing.T) { providerFixture{ host: "y", @@ -139,6 +138,7 @@ func TestConfig_AzurePAT(t *testing.T) { }.apply(t) } +// tanmaytodo: fix this func TestConfig_ConflictingEnvs(t *testing.T) { providerFixture{ env: map[string]string{ diff --git a/provider/test_utils.go b/provider/test_utils.go index de7aaadd9e..942e6179df 100644 --- a/provider/test_utils.go +++ b/provider/test_utils.go @@ -84,11 +84,21 @@ func (tt providerFixture) rawConfigSDKv2() map[string]any { func (tt providerFixture) rawConfigPluginFramework() tftypes.Value { rawConfig := tt.rawConfig() - pluginFrameworkMap := map[string]tftypes.Value{} + + rawConfigTypeMap := map[string]tftypes.Type{} + for k, _ := range rawConfig { + rawConfigTypeMap[k] = tftypes.String + } + rawConfigType := tftypes.Object{ + AttributeTypes: rawConfigTypeMap, + } + + rawConfigValueMap := map[string]tftypes.Value{} for k, v := range rawConfig { - pluginFrameworkMap[k] = tftypes.NewValue(tftypes.String, v) + rawConfigValueMap[k] = tftypes.NewValue(tftypes.String, v) } - return tftypes.NewValue(tftypes.Map{ElementType: tftypes.String}, pluginFrameworkMap) + rawConfigValue := tftypes.NewValue(rawConfigType, rawConfigValueMap) + return rawConfigValue } func (tc providerFixture) applyWithSDKv2(t *testing.T) *common.DatabricksClient { From b443076993b622c225ab90e296590c908661eac0 Mon Sep 17 00:00:00 2001 From: Tanmay Rustagi Date: Mon, 22 Jul 2024 17:21:34 +0200 Subject: [PATCH 31/39] All passing Unit Tests --- provider/provider_plugin_framework.go | 2 +- provider/provider_test.go | 1 - provider/test_utils.go | 2 +- 3 files changed, 2 insertions(+), 3 deletions(-) diff --git a/provider/provider_plugin_framework.go b/provider/provider_plugin_framework.go index ff0275674d..621fc7f9e6 100644 --- a/provider/provider_plugin_framework.go +++ b/provider/provider_plugin_framework.go @@ -165,7 +165,7 @@ func configureDatabricksClient_PluginFramework(ctx context.Context, req provider } client, err := client.New(cfg) if err != nil { - resp.Diagnostics.Append(diag.NewErrorDiagnostic("Error while generating client", err.Error())) + resp.Diagnostics.Append(diag.NewErrorDiagnostic(err.Error(), "")) return nil } pc := &common.DatabricksClient{ diff --git a/provider/provider_test.go b/provider/provider_test.go index c5f85325a4..47638a7e6f 100644 --- a/provider/provider_test.go +++ b/provider/provider_test.go @@ -138,7 +138,6 @@ func TestConfig_AzurePAT(t *testing.T) { }.apply(t) } -// tanmaytodo: fix this func TestConfig_ConflictingEnvs(t *testing.T) { providerFixture{ env: map[string]string{ diff --git a/provider/test_utils.go b/provider/test_utils.go index 942e6179df..7f7b6b22a4 100644 --- a/provider/test_utils.go +++ b/provider/test_utils.go @@ -86,7 +86,7 @@ func (tt providerFixture) rawConfigPluginFramework() tftypes.Value { rawConfig := tt.rawConfig() rawConfigTypeMap := map[string]tftypes.Type{} - for k, _ := range rawConfig { + for k := range rawConfig { rawConfigTypeMap[k] = tftypes.String } rawConfigType := tftypes.Object{ From 618124582e8f1b29ee1520a981c931afa79618be Mon Sep 17 00:00:00 2001 From: Tanmay Rustagi Date: Tue, 23 Jul 2024 10:54:09 +0200 Subject: [PATCH 32/39] - --- common/reflect_resource_plugin_framework.go | 18 ++++---- .../reflect_resource_plugin_framework_test.go | 2 +- ...monitor.go => resource_quality_monitor.go} | 25 +++++----- .../volumes/volumes.tf} | 38 ++------------- .../quality-monitor/quality_monitor.tf | 46 +++++++++++++++++++ provider/provider_plugin_framework.go | 2 +- 6 files changed, 73 insertions(+), 58 deletions(-) rename pluginframework/{resource_lakehouse_monitor.go => resource_quality_monitor.go} (76%) rename pluginframework/test/{plugin_framework_poc_test.tf => data/volumes/volumes.tf} (52%) create mode 100644 pluginframework/test/resource/quality-monitor/quality_monitor.tf diff --git a/common/reflect_resource_plugin_framework.go b/common/reflect_resource_plugin_framework.go index acaadb547f..d0a5839e06 100644 --- a/common/reflect_resource_plugin_framework.go +++ b/common/reflect_resource_plugin_framework.go @@ -288,7 +288,7 @@ func checkTheStringInForceSendFields(fieldName string, forceSendFields []string) return false } -func pluginFrameworkTypeToSchema(v reflect.Value) map[string]schema.Attribute { +func pluginFrameworkResourceTypeToSchema(v reflect.Value) map[string]schema.Attribute { scm := map[string]schema.Attribute{} rk := v.Kind() if rk == reflect.Ptr { @@ -310,7 +310,7 @@ func pluginFrameworkTypeToSchema(v reflect.Value) map[string]schema.Attribute { if kind == reflect.Ptr { elem := typeField.Type.Elem() sv := reflect.New(elem).Elem() - nestedScm := pluginFrameworkTypeToSchema(sv) + nestedScm := pluginFrameworkResourceTypeToSchema(sv) scm[fieldName] = schema.SingleNestedAttribute{Attributes: nestedScm, Optional: true} } else if kind == reflect.Slice { elem := typeField.Type.Elem() @@ -331,7 +331,7 @@ func pluginFrameworkTypeToSchema(v reflect.Value) map[string]schema.Attribute { scm[fieldName] = schema.ListAttribute{ElementType: types.StringType, Optional: true} default: // Nested struct - nestedScm := pluginFrameworkTypeToSchema(reflect.New(elem).Elem()) + nestedScm := pluginFrameworkResourceTypeToSchema(reflect.New(elem).Elem()) scm[fieldName] = schema.ListNestedAttribute{NestedObject: schema.NestedAttributeObject{Attributes: nestedScm}, Optional: true} } } else if kind == reflect.Map { @@ -353,7 +353,7 @@ func pluginFrameworkTypeToSchema(v reflect.Value) map[string]schema.Attribute { scm[fieldName] = schema.MapAttribute{ElementType: types.StringType, Optional: true} default: // Nested struct - nestedScm := pluginFrameworkTypeToSchema(reflect.New(elem).Elem()) + nestedScm := pluginFrameworkResourceTypeToSchema(reflect.New(elem).Elem()) scm[fieldName] = schema.MapNestedAttribute{NestedObject: schema.NestedAttributeObject{Attributes: nestedScm}, Optional: true} } } else if kind == reflect.Struct { @@ -374,7 +374,7 @@ func pluginFrameworkTypeToSchema(v reflect.Value) map[string]schema.Attribute { // If it is a real stuct instead of a tfsdk type, recursively resolve it. elem := typeField.Type sv := reflect.New(elem) - nestedScm := pluginFrameworkTypeToSchema(sv) + nestedScm := pluginFrameworkResourceTypeToSchema(sv) scm[fieldName] = schema.SingleNestedAttribute{Attributes: nestedScm, Optional: true} } } else { @@ -384,12 +384,12 @@ func pluginFrameworkTypeToSchema(v reflect.Value) map[string]schema.Attribute { return scm } -func PluginFrameworkStructToSchema(v any) schema.Schema { +func PluginFrameworkResourceStructToSchema(v any) schema.Schema { return schema.Schema{ - Attributes: PluginFrameworkStructToSchemaMap(v), + Attributes: PluginFrameworkResourceStructToSchemaMap(v), } } -func PluginFrameworkStructToSchemaMap(v any) map[string]schema.Attribute { - return pluginFrameworkTypeToSchema(reflect.ValueOf(v)) +func PluginFrameworkResourceStructToSchemaMap(v any) map[string]schema.Attribute { + return pluginFrameworkResourceTypeToSchema(reflect.ValueOf(v)) } diff --git a/common/reflect_resource_plugin_framework_test.go b/common/reflect_resource_plugin_framework_test.go index 31d1ffa0c6..010a0ed1cc 100644 --- a/common/reflect_resource_plugin_framework_test.go +++ b/common/reflect_resource_plugin_framework_test.go @@ -129,7 +129,7 @@ var tfSdkStruct = DummyTfSdk{ func TestGetAndSetPluginFramework(t *testing.T) { // Also test StructToSchema. - scm := PluginFrameworkStructToSchema(DummyTfSdk{}) + scm := PluginFrameworkResourceStructToSchema(DummyTfSdk{}) state := tfsdk.State{ Schema: scm, } diff --git a/pluginframework/resource_lakehouse_monitor.go b/pluginframework/resource_quality_monitor.go similarity index 76% rename from pluginframework/resource_lakehouse_monitor.go rename to pluginframework/resource_quality_monitor.go index f96e391634..a6b6d7d702 100644 --- a/pluginframework/resource_lakehouse_monitor.go +++ b/pluginframework/resource_quality_monitor.go @@ -13,10 +13,10 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" ) -const lakehouseMonitorDefaultProvisionTimeout = 15 * time.Minute +const qualityMonitorDefaultProvisionTimeout = 15 * time.Minute func WaitForMonitor(w *databricks.WorkspaceClient, ctx context.Context, monitorName string) error { - return retry.RetryContext(ctx, lakehouseMonitorDefaultProvisionTimeout, func() *retry.RetryError { + return retry.RetryContext(ctx, qualityMonitorDefaultProvisionTimeout, func() *retry.RetryError { endpoint, err := w.QualityMonitors.GetByTableName(ctx, monitorName) if err != nil { return retry.NonRetryableError(err) @@ -32,24 +32,25 @@ func WaitForMonitor(w *databricks.WorkspaceClient, ctx context.Context, monitorN }) } -func ResourceLakehouseMonitor() resource.Resource { - return &LakehouseMonitorResource{} +func ResourceQualityMonitor() resource.Resource { + return &QualityMonitorResource{} } -type LakehouseMonitorResource struct{} +type QualityMonitorResource struct{} -func (r *LakehouseMonitorResource) Metadata(ctx context.Context, req resource.MetadataRequest, resp *resource.MetadataResponse) { +func (r *QualityMonitorResource) Metadata(ctx context.Context, req resource.MetadataRequest, resp *resource.MetadataResponse) { resp.TypeName = req.ProviderTypeName + "_lakehouse_monitor_plugin_framework" } -func (r *LakehouseMonitorResource) Schema(ctx context.Context, req resource.SchemaRequest, resp *resource.SchemaResponse) { +func (r *QualityMonitorResource) Schema(ctx context.Context, req resource.SchemaRequest, resp *resource.SchemaResponse) { resp.Schema = schema.Schema{ Description: "Terraform schema for Databricks Lakehouse Monitor. MonitorInfo struct is used to create the schema", - Attributes: common.PluginFrameworkStructToSchemaMap(catalog.MonitorInfo{}), + // Attributes: common.PluginFrameworkResourceStructToSchemaMap(catalog.MonitorInfo{}), + Attributes: map[string]schema.Attribute{}, } } -func (r *LakehouseMonitorResource) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) { +func (r *QualityMonitorResource) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) { c := common.DatabricksClient{} w, err := c.WorkspaceClient() if err != nil { @@ -74,7 +75,7 @@ func (r *LakehouseMonitorResource) Create(ctx context.Context, req resource.Crea } } -func (r *LakehouseMonitorResource) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) { +func (r *QualityMonitorResource) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) { c := common.DatabricksClient{} w, err := c.WorkspaceClient() if err != nil { @@ -99,7 +100,7 @@ func (r *LakehouseMonitorResource) Read(ctx context.Context, req resource.ReadRe } } -func (r *LakehouseMonitorResource) Update(ctx context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse) { +func (r *QualityMonitorResource) Update(ctx context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse) { c := common.DatabricksClient{} w, err := c.WorkspaceClient() if err != nil { @@ -124,7 +125,7 @@ func (r *LakehouseMonitorResource) Update(ctx context.Context, req resource.Upda } } -func (r *LakehouseMonitorResource) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) { +func (r *QualityMonitorResource) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) { c := common.DatabricksClient{} w, err := c.WorkspaceClient() if err != nil { diff --git a/pluginframework/test/plugin_framework_poc_test.tf b/pluginframework/test/data/volumes/volumes.tf similarity index 52% rename from pluginframework/test/plugin_framework_poc_test.tf rename to pluginframework/test/data/volumes/volumes.tf index af12a8d8cf..145af6ae3c 100644 --- a/pluginframework/test/plugin_framework_poc_test.tf +++ b/pluginframework/test/data/volumes/volumes.tf @@ -1,20 +1,3 @@ -# TESTING SOP -# ----------- -# Please make sure development overrides are in effect before running this manually -# https://github.com/databricks/terraform-provider-databricks/blob/main/CONTRIBUTING.md#developing-provider -# 1. touch ~/.terraformrc -# 2. Add the following to the file (update to your user.name in the path): -# provider_installation { -# dev_overrides { -# "databricks/databricks" = "/Users//terraform-provider-databricks" -# } -# direct {} -# } -# 3. run $ make in terraform-provider-databricks root directory to build the binary -# 4. cd terraform-provider-databricks/pluginframework/test -# 5. terraform init -upgrade -# 6. terraform apply - terraform { required_providers { databricks = { @@ -44,23 +27,6 @@ resource "databricks_schema" "testSchema" { } } -resource "databricks_sql_table" "testTable" { - catalog_name = databricks_catalog.testCatalog.name - schema_name = databricks_schema.testSchema.name - name = "testTable" - table_type = "MANAGED" - data_source_format = "DELTA" - - column { - name = "timestamp" - type = "int" - } -} - -resource "databricks_lakehouse_monitor_plugin_framework" "testMonitor" { - table_name = "${databricks_catalog.testCatalog.name}.${databricks_schema.testSchema.name}.${databricks_sql_table.testTable.name}" -} - resource "databricks_volume" "testVolume1" { name = "testVolume1" catalog_name = databricks_catalog.testCatalog.name @@ -84,4 +50,6 @@ data "databricks_volumes_plugin_framework" "testVolumes" { output "all_volumes" { value = data.databricks_volumes_plugin_framework.testVolumes -} \ No newline at end of file +} + + diff --git a/pluginframework/test/resource/quality-monitor/quality_monitor.tf b/pluginframework/test/resource/quality-monitor/quality_monitor.tf new file mode 100644 index 0000000000..2eb51923a7 --- /dev/null +++ b/pluginframework/test/resource/quality-monitor/quality_monitor.tf @@ -0,0 +1,46 @@ +terraform { + required_providers { + databricks = { + source = "databricks/databricks" + } + } +} + +provider "databricks" { + profile = "aws-prod-ucws" +} + +resource "databricks_catalog" "testCatalog" { + name = "testCatalog" + comment = "Plugin Framework PoC" + properties = { + purpose = "testing" + } +} + +resource "databricks_schema" "testSchema" { + catalog_name = databricks_catalog.testCatalog.name + name = "testSchema" + comment = "Plugin Framework PoC" + properties = { + purpose = "testing" + } +} + +resource "databricks_sql_table" "testTable" { + catalog_name = databricks_catalog.testCatalog.name + schema_name = databricks_schema.testSchema.name + name = "testTable" + table_type = "MANAGED" + data_source_format = "DELTA" + + column { + name = "timestamp" + type = "int" + } +} + +resource "databricks_lakehouse_monitor_plugin_framework" "testMonitor" { + table_name = "${databricks_catalog.testCatalog.name}.${databricks_schema.testSchema.name}.${databricks_sql_table.testTable.name}" +} + diff --git a/provider/provider_plugin_framework.go b/provider/provider_plugin_framework.go index 621fc7f9e6..286537ac6f 100644 --- a/provider/provider_plugin_framework.go +++ b/provider/provider_plugin_framework.go @@ -45,7 +45,7 @@ var _ provider.Provider = (*DatabricksProviderPluginFramework)(nil) func (p *DatabricksProviderPluginFramework) Resources(ctx context.Context) []func() resource.Resource { return []func() resource.Resource{ - pluginframework.ResourceLakehouseMonitor, + pluginframework.ResourceQualityMonitor, } } From 60aacbc388d15dd97d8333078b7d3d08edd022c3 Mon Sep 17 00:00:00 2001 From: Tanmay Rustagi Date: Tue, 23 Jul 2024 15:46:38 +0200 Subject: [PATCH 33/39] - --- pluginframework/resource_quality_monitor.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/pluginframework/resource_quality_monitor.go b/pluginframework/resource_quality_monitor.go index a6b6d7d702..fb57acb087 100644 --- a/pluginframework/resource_quality_monitor.go +++ b/pluginframework/resource_quality_monitor.go @@ -45,8 +45,7 @@ func (r *QualityMonitorResource) Metadata(ctx context.Context, req resource.Meta func (r *QualityMonitorResource) Schema(ctx context.Context, req resource.SchemaRequest, resp *resource.SchemaResponse) { resp.Schema = schema.Schema{ Description: "Terraform schema for Databricks Lakehouse Monitor. MonitorInfo struct is used to create the schema", - // Attributes: common.PluginFrameworkResourceStructToSchemaMap(catalog.MonitorInfo{}), - Attributes: map[string]schema.Attribute{}, + Attributes: common.PluginFrameworkResourceStructToSchemaMap(catalog.MonitorInfo{}), } } From 5b8a9b013c75deed589bccf425be175e19ab9b27 Mon Sep 17 00:00:00 2001 From: Tanmay Rustagi Date: Tue, 23 Jul 2024 16:26:29 +0200 Subject: [PATCH 34/39] debug --- pluginframework/resource_quality_monitor.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/pluginframework/resource_quality_monitor.go b/pluginframework/resource_quality_monitor.go index fb57acb087..da4be24053 100644 --- a/pluginframework/resource_quality_monitor.go +++ b/pluginframework/resource_quality_monitor.go @@ -45,7 +45,8 @@ func (r *QualityMonitorResource) Metadata(ctx context.Context, req resource.Meta func (r *QualityMonitorResource) Schema(ctx context.Context, req resource.SchemaRequest, resp *resource.SchemaResponse) { resp.Schema = schema.Schema{ Description: "Terraform schema for Databricks Lakehouse Monitor. MonitorInfo struct is used to create the schema", - Attributes: common.PluginFrameworkResourceStructToSchemaMap(catalog.MonitorInfo{}), + // Attributes: common.PluginFrameworkResourceStructToSchemaMap(catalog_tf.MonitorInfo{}), + Attributes: map[string]schema.Attribute{}, } } From 9e915b456d040f666ab1cdfa92ad673d7682a2d0 Mon Sep 17 00:00:00 2001 From: Tanmay Rustagi Date: Wed, 24 Jul 2024 14:53:17 +0200 Subject: [PATCH 35/39] - --- pluginframework/resource_quality_monitor.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pluginframework/resource_quality_monitor.go b/pluginframework/resource_quality_monitor.go index da4be24053..136c5fb396 100644 --- a/pluginframework/resource_quality_monitor.go +++ b/pluginframework/resource_quality_monitor.go @@ -8,6 +8,7 @@ import ( "github.com/databricks/databricks-sdk-go" "github.com/databricks/databricks-sdk-go/service/catalog" "github.com/databricks/terraform-provider-databricks/common" + "github.com/databricks/terraform-provider-databricks/service/catalog_tf" "github.com/hashicorp/terraform-plugin-framework/resource" "github.com/hashicorp/terraform-plugin-framework/resource/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" @@ -45,8 +46,7 @@ func (r *QualityMonitorResource) Metadata(ctx context.Context, req resource.Meta func (r *QualityMonitorResource) Schema(ctx context.Context, req resource.SchemaRequest, resp *resource.SchemaResponse) { resp.Schema = schema.Schema{ Description: "Terraform schema for Databricks Lakehouse Monitor. MonitorInfo struct is used to create the schema", - // Attributes: common.PluginFrameworkResourceStructToSchemaMap(catalog_tf.MonitorInfo{}), - Attributes: map[string]schema.Attribute{}, + Attributes: common.PluginFrameworkResourceStructToSchemaMap(catalog_tf.MonitorInfo{}), } } From b165dcf570544f6d8a6870ca1780dac5820b2a5e Mon Sep 17 00:00:00 2001 From: Tanmay Rustagi Date: Fri, 26 Jul 2024 17:27:30 +0200 Subject: [PATCH 36/39] apply working --- pluginframework/data_volumes.go | 54 +++++++++++++------ pluginframework/resource_quality_monitor.go | 41 +++++++++----- pluginframework/test/data/volumes/volumes.tf | 34 ++++++------ .../quality-monitor/quality_monitor.tf | 2 +- provider/provider_plugin_framework.go | 9 ++-- 5 files changed, 89 insertions(+), 51 deletions(-) diff --git a/pluginframework/data_volumes.go b/pluginframework/data_volumes.go index 460c2e8d9d..e3ba7a82c9 100644 --- a/pluginframework/data_volumes.go +++ b/pluginframework/data_volumes.go @@ -2,6 +2,7 @@ package pluginframework import ( "context" + "fmt" "github.com/databricks/databricks-sdk-go/service/catalog" "github.com/databricks/terraform-provider-databricks/common" @@ -10,24 +11,31 @@ import ( "github.com/hashicorp/terraform-plugin-framework/types" ) -func DataSourceVolumes() datasource.DataSource { - return &VolumesDataSource{} +func DataSourceVolumes() func() datasource.DataSource { + return func() datasource.DataSource { + return &VolumesDataSource{} + } } -type VolumesDataSource struct{} +var _ datasource.DataSource = &VolumesDataSource{} + +type VolumesDataSource struct { + Client *common.DatabricksClient +} type VolumesList struct { - CatalogName string `json:"catalog_name"` - SchemaName string `json:"schema_name"` - Ids []string `json:"ids,omitempty"` + CatalogName types.String `tfsdk:"catalog_name"` + SchemaName types.String `tfsdk:"schema_name"` + Ids []types.String `tfsdk:"ids" tf:"optional"` } func (d *VolumesDataSource) Metadata(ctx context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) { - resp.TypeName = req.ProviderTypeName + "_volumes_plugin_framework" + resp.TypeName = "databricks_volumes_pluginframework" } func (d *VolumesDataSource) Schema(ctx context.Context, req datasource.SchemaRequest, resp *datasource.SchemaResponse) { resp.Schema = schema.Schema{ + // TODO: Use StructToSchemaMap to generate the schema once it supports schema for data sources Attributes: map[string]schema.Attribute{ "catalog_name": schema.StringAttribute{ Required: true, @@ -43,30 +51,44 @@ func (d *VolumesDataSource) Schema(ctx context.Context, req datasource.SchemaReq } } +func (d *VolumesDataSource) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) { + if req.ProviderData == nil { + return + } + client, ok := req.ProviderData.(*common.DatabricksClient) + if !ok { + resp.Diagnostics.AddError( + "Unexpected Data Source Configure Type", + fmt.Sprintf("Expected *common.DatabricksClient, got: %T. Please report this issue to the provider developers.", req.ProviderData), + ) + return + } + d.Client = client +} + func (d *VolumesDataSource) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { - c := common.DatabricksClient{} - w, err := c.WorkspaceClient() + client := d.Client + w, err := client.WorkspaceClient() if err != nil { resp.Diagnostics.AddError("Failed to get workspace client", err.Error()) return } - var volumeInfo catalog.VolumeInfo - diags := req.Config.Get(ctx, &volumeInfo) + var volumesList VolumesList + diags := req.Config.Get(ctx, &volumesList) resp.Diagnostics.Append(diags...) if resp.Diagnostics.HasError() { return } volumes, err := w.Volumes.ListAll(ctx, catalog.ListVolumesRequest{ - CatalogName: volumeInfo.CatalogName, - SchemaName: volumeInfo.SchemaName, + CatalogName: volumesList.CatalogName.ValueString(), + SchemaName: volumesList.SchemaName.ValueString(), }) if err != nil { resp.Diagnostics.AddError("Failed to get volumes for the catalog and schema", err.Error()) return } - volumeList := VolumesList{} for _, v := range volumes { - volumeList.Ids = append(volumeList.Ids, v.FullName) + volumesList.Ids = append(volumesList.Ids, types.StringValue(v.FullName)) } - resp.State.Set(ctx, volumeList) + resp.State.Set(ctx, volumesList) } diff --git a/pluginframework/resource_quality_monitor.go b/pluginframework/resource_quality_monitor.go index 136c5fb396..6ef4398b3f 100644 --- a/pluginframework/resource_quality_monitor.go +++ b/pluginframework/resource_quality_monitor.go @@ -33,14 +33,20 @@ func WaitForMonitor(w *databricks.WorkspaceClient, ctx context.Context, monitorN }) } -func ResourceQualityMonitor() resource.Resource { - return &QualityMonitorResource{} +var _ resource.Resource = &QualityMonitorResource{} + +func ResourceQualityMonitor() func() resource.Resource { + return func() resource.Resource { + return &QualityMonitorResource{} + } } -type QualityMonitorResource struct{} +type QualityMonitorResource struct { + Client *common.DatabricksClient +} func (r *QualityMonitorResource) Metadata(ctx context.Context, req resource.MetadataRequest, resp *resource.MetadataResponse) { - resp.TypeName = req.ProviderTypeName + "_lakehouse_monitor_plugin_framework" + resp.TypeName = "databricks_lakehouse_monitor_pluginframework" } func (r *QualityMonitorResource) Schema(ctx context.Context, req resource.SchemaRequest, resp *resource.SchemaResponse) { @@ -50,9 +56,23 @@ func (r *QualityMonitorResource) Schema(ctx context.Context, req resource.Schema } } +func (d *QualityMonitorResource) Configure(ctx context.Context, req resource.ConfigureRequest, resp *resource.ConfigureResponse) { + if req.ProviderData == nil { + return + } + client, ok := req.ProviderData.(*common.DatabricksClient) + if !ok { + resp.Diagnostics.AddError( + "Unexpected Data Source Configure Type", + fmt.Sprintf("Expected *common.DatabricksClient, got: %T. Please report this issue to the provider developers.", req.ProviderData), + ) + return + } + d.Client = client +} + func (r *QualityMonitorResource) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) { - c := common.DatabricksClient{} - w, err := c.WorkspaceClient() + w, err := r.Client.WorkspaceClient() if err != nil { resp.Diagnostics.AddError("Failed to get workspace client", err.Error()) return @@ -76,8 +96,7 @@ func (r *QualityMonitorResource) Create(ctx context.Context, req resource.Create } func (r *QualityMonitorResource) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) { - c := common.DatabricksClient{} - w, err := c.WorkspaceClient() + w, err := r.Client.WorkspaceClient() if err != nil { resp.Diagnostics.AddError("Failed to get workspace client", err.Error()) return @@ -101,8 +120,7 @@ func (r *QualityMonitorResource) Read(ctx context.Context, req resource.ReadRequ } func (r *QualityMonitorResource) Update(ctx context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse) { - c := common.DatabricksClient{} - w, err := c.WorkspaceClient() + w, err := r.Client.WorkspaceClient() if err != nil { resp.Diagnostics.AddError("Failed to get workspace client", err.Error()) return @@ -126,8 +144,7 @@ func (r *QualityMonitorResource) Update(ctx context.Context, req resource.Update } func (r *QualityMonitorResource) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) { - c := common.DatabricksClient{} - w, err := c.WorkspaceClient() + w, err := r.Client.WorkspaceClient() if err != nil { resp.Diagnostics.AddError("Failed to get workspace client", err.Error()) return diff --git a/pluginframework/test/data/volumes/volumes.tf b/pluginframework/test/data/volumes/volumes.tf index 145af6ae3c..3ff475d826 100644 --- a/pluginframework/test/data/volumes/volumes.tf +++ b/pluginframework/test/data/volumes/volumes.tf @@ -10,46 +10,46 @@ provider "databricks" { profile = "aws-prod-ucws" } -resource "databricks_catalog" "testCatalog" { - name = "testCatalog" +resource "databricks_catalog" "testcatalog" { + name = "testcatalog-25july" comment = "Plugin Framework PoC" properties = { purpose = "testing" } } -resource "databricks_schema" "testSchema" { - catalog_name = databricks_catalog.testCatalog.name - name = "testSchema" +resource "databricks_schema" "testschema" { + catalog_name = databricks_catalog.testcatalog.name + name = "testschema-25july" comment = "Plugin Framework PoC" properties = { purpose = "testing" } } -resource "databricks_volume" "testVolume1" { - name = "testVolume1" - catalog_name = databricks_catalog.testCatalog.name - schema_name = databricks_schema.testSchema.name +resource "databricks_volume" "testvolume1" { + name = "testvolume1-25july" + catalog_name = databricks_catalog.testcatalog.name + schema_name = databricks_schema.testschema.name volume_type = "MANAGED" comment = "Plugin Framework PoC" } -resource "databricks_volume" "testVolume2" { - name = "testVolume2" - catalog_name = databricks_catalog.testCatalog.name - schema_name = databricks_schema.testSchema.name +resource "databricks_volume" "testvolume2" { + name = "testvolume2-25july" + catalog_name = databricks_catalog.testcatalog.name + schema_name = databricks_schema.testschema.name volume_type = "MANAGED" comment = "Plugin Framework PoC" } -data "databricks_volumes_plugin_framework" "testVolumes" { - catalog_name = databricks_catalog.testCatalog.name - schema_name = databricks_schema.testSchema.name +data "databricks_volumes_pluginframework" "testvolumes" { + catalog_name = databricks_catalog.testcatalog.name + schema_name = databricks_schema.testschema.name } output "all_volumes" { - value = data.databricks_volumes_plugin_framework.testVolumes + value = data.databricks_volumes_pluginframework.testvolumes } diff --git a/pluginframework/test/resource/quality-monitor/quality_monitor.tf b/pluginframework/test/resource/quality-monitor/quality_monitor.tf index 2eb51923a7..307f9e01fc 100644 --- a/pluginframework/test/resource/quality-monitor/quality_monitor.tf +++ b/pluginframework/test/resource/quality-monitor/quality_monitor.tf @@ -40,7 +40,7 @@ resource "databricks_sql_table" "testTable" { } } -resource "databricks_lakehouse_monitor_plugin_framework" "testMonitor" { +resource "databricks_lakehouse_monitor_pluginframework" "testMonitor" { table_name = "${databricks_catalog.testCatalog.name}.${databricks_schema.testSchema.name}.${databricks_sql_table.testTable.name}" } diff --git a/provider/provider_plugin_framework.go b/provider/provider_plugin_framework.go index 286537ac6f..acfc115354 100644 --- a/provider/provider_plugin_framework.go +++ b/provider/provider_plugin_framework.go @@ -25,7 +25,7 @@ import ( "github.com/hashicorp/terraform-plugin-log/tflog" ) -var pluginFrameworkProviderName = "databricks-tf-provider-plugin-framework" +var pluginFrameworkProviderName = "databricks-tf-provider-pluginframework" func init() { // IMPORTANT: this line cannot be changed, because it's used for @@ -45,13 +45,13 @@ var _ provider.Provider = (*DatabricksProviderPluginFramework)(nil) func (p *DatabricksProviderPluginFramework) Resources(ctx context.Context) []func() resource.Resource { return []func() resource.Resource{ - pluginframework.ResourceQualityMonitor, + pluginframework.ResourceQualityMonitor(), } } func (p *DatabricksProviderPluginFramework) DataSources(ctx context.Context) []func() datasource.DataSource { return []func() datasource.DataSource{ - pluginframework.DataSourceVolumes, + pluginframework.DataSourceVolumes(), } } @@ -92,8 +92,7 @@ func providerSchemaPluginFramework() schema.Schema { } } return schema.Schema{ - Description: "Databricks provider schema for plugin framework", - Attributes: ps, + Attributes: ps, } } From d6428800427227a55ebd988be56b7f4cc74a14c3 Mon Sep 17 00:00:00 2001 From: Tanmay Rustagi Date: Mon, 29 Jul 2024 01:45:15 +0200 Subject: [PATCH 37/39] TF Apply works --- pluginframework/resource_quality_monitor.go | 46 +++++++++++++------ .../quality-monitor/quality_monitor.tf | 18 ++++---- 2 files changed, 42 insertions(+), 22 deletions(-) diff --git a/pluginframework/resource_quality_monitor.go b/pluginframework/resource_quality_monitor.go index 6ef4398b3f..01f021faca 100644 --- a/pluginframework/resource_quality_monitor.go +++ b/pluginframework/resource_quality_monitor.go @@ -52,7 +52,8 @@ func (r *QualityMonitorResource) Metadata(ctx context.Context, req resource.Meta func (r *QualityMonitorResource) Schema(ctx context.Context, req resource.SchemaRequest, resp *resource.SchemaResponse) { resp.Schema = schema.Schema{ Description: "Terraform schema for Databricks Lakehouse Monitor. MonitorInfo struct is used to create the schema", - Attributes: common.PluginFrameworkResourceStructToSchemaMap(catalog_tf.MonitorInfo{}), + // TODO: Add CustomizeSchemaPath once it is supported in the plugin framework + Attributes: common.PluginFrameworkResourceStructToSchemaMap(catalog_tf.MonitorInfo{}), } } @@ -77,13 +78,20 @@ func (r *QualityMonitorResource) Create(ctx context.Context, req resource.Create resp.Diagnostics.AddError("Failed to get workspace client", err.Error()) return } - var create catalog.CreateMonitor - diags := req.Plan.Get(ctx, &create) + var createMonitorTfSDK catalog_tf.CreateMonitor + diags := req.Plan.Get(ctx, &createMonitorTfSDK) resp.Diagnostics.Append(diags...) if resp.Diagnostics.HasError() { return } - endpoint, err := w.QualityMonitors.Create(ctx, create) + + var createMonitorGoSDK catalog.CreateMonitor + err = common.TfSdkToGoSdkStruct(createMonitorTfSDK, &createMonitorGoSDK, ctx) + if err != nil { + resp.Diagnostics.AddError("Failed to convert Tf SDK struct to Go SDK struct", err.Error()) + return + } + endpoint, err := w.QualityMonitors.Create(ctx, createMonitorGoSDK) if err != nil { resp.Diagnostics.AddError("Failed to get create monitor", err.Error()) return @@ -101,18 +109,23 @@ func (r *QualityMonitorResource) Read(ctx context.Context, req resource.ReadRequ resp.Diagnostics.AddError("Failed to get workspace client", err.Error()) return } - var getMonitor catalog.GetQualityMonitorRequest + var getMonitor catalog_tf.GetQualityMonitorRequest diags := req.State.Get(ctx, &getMonitor) resp.Diagnostics.Append(diags...) if resp.Diagnostics.HasError() { return } - endpoint, err := w.QualityMonitors.GetByTableName(ctx, getMonitor.TableName) + endpoint, err := w.QualityMonitors.GetByTableName(ctx, getMonitor.TableName.ValueString()) if err != nil { resp.Diagnostics.AddError("Failed to get monitor", err.Error()) return } - diags = resp.State.Set(ctx, endpoint) + var monitorInfoTfSDK catalog_tf.MonitorInfo + err = common.GoSdkToTfSdkStruct(endpoint, &monitorInfoTfSDK, ctx) + if err != nil { + resp.Diagnostics.AddError("Failed to convert Go SDK struct to TF SDK struct", err.Error()) + } + diags = resp.State.Set(ctx, monitorInfoTfSDK) resp.Diagnostics.Append(diags...) if resp.Diagnostics.HasError() { return @@ -125,18 +138,25 @@ func (r *QualityMonitorResource) Update(ctx context.Context, req resource.Update resp.Diagnostics.AddError("Failed to get workspace client", err.Error()) return } - var updateRequest catalog.UpdateMonitor - diags := req.Plan.Get(ctx, &updateRequest) + var updateMonitorTfSDK catalog_tf.UpdateMonitor + diags := req.Plan.Get(ctx, &updateMonitorTfSDK) resp.Diagnostics.Append(diags...) if resp.Diagnostics.HasError() { return } - _, err = w.QualityMonitors.Update(ctx, updateRequest) + + var updateMonitorGoSDK catalog.UpdateMonitor + err = common.TfSdkToGoSdkStruct(updateMonitorTfSDK, &updateMonitorGoSDK, ctx) + if err != nil { + resp.Diagnostics.AddError("Failed to convert Tf SDK struct to Go SDK struct", err.Error()) + return + } + _, err = w.QualityMonitors.Update(ctx, updateMonitorGoSDK) if err != nil { resp.Diagnostics.AddError("Failed to update monitor", err.Error()) return } - err = WaitForMonitor(w, ctx, updateRequest.TableName) + err = WaitForMonitor(w, ctx, updateMonitorGoSDK.TableName) if err != nil { resp.Diagnostics.AddError("Failed to get updated monitor", err.Error()) return @@ -149,13 +169,13 @@ func (r *QualityMonitorResource) Delete(ctx context.Context, req resource.Delete resp.Diagnostics.AddError("Failed to get workspace client", err.Error()) return } - var deleteRequest catalog.DeleteQualityMonitorRequest + var deleteRequest catalog_tf.DeleteQualityMonitorRequest diags := req.State.Get(ctx, &deleteRequest) resp.Diagnostics.Append(diags...) if resp.Diagnostics.HasError() { return } - err = w.QualityMonitors.DeleteByTableName(ctx, deleteRequest.TableName) + err = w.QualityMonitors.DeleteByTableName(ctx, deleteRequest.TableName.ValueString()) if err != nil { resp.Diagnostics.AddError("Failed to delete monitor", err.Error()) return diff --git a/pluginframework/test/resource/quality-monitor/quality_monitor.tf b/pluginframework/test/resource/quality-monitor/quality_monitor.tf index 307f9e01fc..665a982d20 100644 --- a/pluginframework/test/resource/quality-monitor/quality_monitor.tf +++ b/pluginframework/test/resource/quality-monitor/quality_monitor.tf @@ -10,16 +10,16 @@ provider "databricks" { profile = "aws-prod-ucws" } -resource "databricks_catalog" "testCatalog" { - name = "testCatalog" +resource "databricks_catalog" "testcatalog" { + name = "testcatalog" comment = "Plugin Framework PoC" properties = { purpose = "testing" } } -resource "databricks_schema" "testSchema" { - catalog_name = databricks_catalog.testCatalog.name +resource "databricks_schema" "testschema" { + catalog_name = databricks_catalog.testcatalog.name name = "testSchema" comment = "Plugin Framework PoC" properties = { @@ -27,9 +27,9 @@ resource "databricks_schema" "testSchema" { } } -resource "databricks_sql_table" "testTable" { - catalog_name = databricks_catalog.testCatalog.name - schema_name = databricks_schema.testSchema.name +resource "databricks_sql_table" "testtable" { + catalog_name = databricks_catalog.testcatalog.name + schema_name = databricks_schema.testschema.name name = "testTable" table_type = "MANAGED" data_source_format = "DELTA" @@ -40,7 +40,7 @@ resource "databricks_sql_table" "testTable" { } } -resource "databricks_lakehouse_monitor_pluginframework" "testMonitor" { - table_name = "${databricks_catalog.testCatalog.name}.${databricks_schema.testSchema.name}.${databricks_sql_table.testTable.name}" +resource "databricks_lakehouse_monitor_pluginframework" "testmonitor" { + table_name = "${databricks_catalog.testcatalog.name}.${databricks_schema.testschema.name}.${databricks_sql_table.testtable.name}" } From 4f3b923a03381ac268ab9dda6e8ee770eb671540 Mon Sep 17 00:00:00 2001 From: Tanmay Rustagi Date: Mon, 29 Jul 2024 02:01:08 +0200 Subject: [PATCH 38/39] - --- common/customizable_schema_plugin_framework_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/common/customizable_schema_plugin_framework_test.go b/common/customizable_schema_plugin_framework_test.go index b84dfb90e2..fc49e6f1c9 100644 --- a/common/customizable_schema_plugin_framework_test.go +++ b/common/customizable_schema_plugin_framework_test.go @@ -5,7 +5,7 @@ import ( "fmt" "testing" - "github.com/hashicorp/terraform-plugin-framework/provider/schema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema" "github.com/hashicorp/terraform-plugin-framework/schema/validator" "github.com/stretchr/testify/assert" ) From b35195cd3492c11957bb9fe9004c235068c6fe23 Mon Sep 17 00:00:00 2001 From: Tanmay Rustagi Date: Mon, 29 Jul 2024 02:29:25 +0200 Subject: [PATCH 39/39] - --- common/reflect_resource_plugin_framework.go | 13 +++++++------ pluginframework/resource_quality_monitor.go | 17 +++++++++++++++-- .../resource/quality-monitor/quality_monitor.tf | 7 ++++--- 3 files changed, 26 insertions(+), 11 deletions(-) diff --git a/common/reflect_resource_plugin_framework.go b/common/reflect_resource_plugin_framework.go index 58917c2e78..ea0a2f3ae5 100644 --- a/common/reflect_resource_plugin_framework.go +++ b/common/reflect_resource_plugin_framework.go @@ -403,16 +403,17 @@ func fieldIsOptional(field reflect.StructField) bool { } func PluginFrameworkResourceStructToSchema(v any, customizeSchema func(CustomizableSchemaPluginFramework) CustomizableSchemaPluginFramework) schema.Schema { + attributes := PluginFrameworkResourceStructToSchemaMap(v, customizeSchema) + return schema.Schema{Attributes: attributes} +} + +func PluginFrameworkResourceStructToSchemaMap(v any, customizeSchema func(CustomizableSchemaPluginFramework) CustomizableSchemaPluginFramework) map[string]schema.Attribute { attributes := pluginFrameworkResourceTypeToSchema(reflect.ValueOf(v)) if customizeSchema != nil { cs := customizeSchema(*ConstructCustomizableSchema(attributes)) - return schema.Schema{Attributes: cs.ToAttributeMap()} + return cs.ToAttributeMap() } else { - return schema.Schema{Attributes: attributes} + return attributes } } - -func PluginFrameworkResourceStructToSchemaMap(v any) map[string]schema.Attribute { - return pluginFrameworkResourceTypeToSchema(reflect.ValueOf(v)) -} diff --git a/pluginframework/resource_quality_monitor.go b/pluginframework/resource_quality_monitor.go index 01f021faca..5dbc1bf1c2 100644 --- a/pluginframework/resource_quality_monitor.go +++ b/pluginframework/resource_quality_monitor.go @@ -52,8 +52,21 @@ func (r *QualityMonitorResource) Metadata(ctx context.Context, req resource.Meta func (r *QualityMonitorResource) Schema(ctx context.Context, req resource.SchemaRequest, resp *resource.SchemaResponse) { resp.Schema = schema.Schema{ Description: "Terraform schema for Databricks Lakehouse Monitor. MonitorInfo struct is used to create the schema", - // TODO: Add CustomizeSchemaPath once it is supported in the plugin framework - Attributes: common.PluginFrameworkResourceStructToSchemaMap(catalog_tf.MonitorInfo{}), + Attributes: common.PluginFrameworkResourceStructToSchemaMap(catalog_tf.MonitorInfo{}, func(c common.CustomizableSchemaPluginFramework) common.CustomizableSchemaPluginFramework { + c.AddNewField("skip_builtin_dashboard", schema.BoolAttribute{Optional: true, Required: false}) + c.AddNewField("warehouse_id", schema.StringAttribute{Optional: true, Required: false}) + c.SetRequired("assets_dir") + c.SetRequired("output_schema_name") + c.SetRequired("table_name") + // TODO: Uncomment this once SetReadOnly is supported in the plugin framework + // c.SetReadOnly("monitor_version") + // c.SetReadOnly("drift_metrics_table_name") + // c.SetReadOnly("profile_metrics_table_name") + // c.SetReadOnly("status") + // c.SetReadOnly("dashboard_id") + // c.SetReadOnly("schedule", "pause_status") + return c + }), } } diff --git a/pluginframework/test/resource/quality-monitor/quality_monitor.tf b/pluginframework/test/resource/quality-monitor/quality_monitor.tf index 665a982d20..e98b118ebb 100644 --- a/pluginframework/test/resource/quality-monitor/quality_monitor.tf +++ b/pluginframework/test/resource/quality-monitor/quality_monitor.tf @@ -40,7 +40,8 @@ resource "databricks_sql_table" "testtable" { } } -resource "databricks_lakehouse_monitor_pluginframework" "testmonitor" { - table_name = "${databricks_catalog.testcatalog.name}.${databricks_schema.testschema.name}.${databricks_sql_table.testtable.name}" -} +# TODO: Uncomment once SetReadOnly is supported in the plugin framework and add the necessary fields +# resource "databricks_lakehouse_monitor_pluginframework" "testmonitor" { +# table_name = "${databricks_catalog.testcatalog.name}.${databricks_schema.testschema.name}.${databricks_sql_table.testtable.name}" +# }