diff --git a/charts/spdk-csi/templates/controller.yaml b/charts/spdk-csi/templates/controller.yaml index 2d68f1f..9e7132d 100644 --- a/charts/spdk-csi/templates/controller.yaml +++ b/charts/spdk-csi/templates/controller.yaml @@ -54,9 +54,6 @@ spec: - name: spdkcsi-config mountPath: /etc/spdkcsi-config/ readOnly: true - - name: spdkcsi-secret - mountPath: /etc/spdkcsi-secret/ - readOnly: true - name: spdkcsi-snapshotter image: "{{ .Values.image.csiSnapshotter.repository }}:{{ .Values.image.csiSnapshotter.tag }}" args: @@ -78,6 +75,3 @@ spec: - name: spdkcsi-config configMap: name: spdkcsi-cm - - name: spdkcsi-secret - secret: - secretName: spdkcsi-secret diff --git a/charts/spdk-csi/templates/storageclass.yaml b/charts/spdk-csi/templates/storageclass.yaml index b0ffc6c..27b4e57 100644 --- a/charts/spdk-csi/templates/storageclass.yaml +++ b/charts/spdk-csi/templates/storageclass.yaml @@ -11,6 +11,8 @@ metadata: provisioner: csi.spdk.io parameters: fsType: ext4 + csi.storage.k8s.io/provisioner-secret-name: spdkcsi-secret + csi.storage.k8s.io/provisioner-secret-namespace: default reclaimPolicy: Delete volumeBindingMode: Immediate {{- end -}} diff --git a/deploy/kubernetes/controller.yaml b/deploy/kubernetes/controller.yaml index d5fd917..f845473 100644 --- a/deploy/kubernetes/controller.yaml +++ b/deploy/kubernetes/controller.yaml @@ -62,9 +62,6 @@ spec: - name: spdkcsi-config mountPath: /etc/spdkcsi-config/ readOnly: true - - name: spdkcsi-secret - mountPath: /etc/spdkcsi-secret/ - readOnly: true volumes: - name: socket-dir emptyDir: @@ -72,6 +69,3 @@ spec: - name: spdkcsi-config configMap: name: spdkcsi-cm - - name: spdkcsi-secret - secret: - secretName: spdkcsi-secret diff --git a/deploy/kubernetes/snapshotclass.yaml b/deploy/kubernetes/snapshotclass.yaml index 385301b..0d8c1e8 100644 --- a/deploy/kubernetes/snapshotclass.yaml +++ b/deploy/kubernetes/snapshotclass.yaml @@ -10,4 +10,6 @@ metadata: driver: csi.spdk.io parameters: fsType: ext4 + csi.storage.k8s.io/snapshotter-secret-name: spdkcsi-secret + csi.storage.k8s.io/snapshotter-secret-namespace: default deletionPolicy: Delete diff --git a/deploy/kubernetes/storageclass.yaml b/deploy/kubernetes/storageclass.yaml index 6317fcf..cff9f85 100644 --- a/deploy/kubernetes/storageclass.yaml +++ b/deploy/kubernetes/storageclass.yaml @@ -9,5 +9,7 @@ metadata: provisioner: csi.spdk.io parameters: fsType: ext4 + csi.storage.k8s.io/provisioner-secret-name: spdkcsi-secret + csi.storage.k8s.io/provisioner-secret-namespace: default reclaimPolicy: Delete volumeBindingMode: Immediate diff --git a/pkg/spdk/controllerserver.go b/pkg/spdk/controllerserver.go index 1e6f801..32225fe 100644 --- a/pkg/spdk/controllerserver.go +++ b/pkg/spdk/controllerserver.go @@ -37,8 +37,8 @@ var errVolumeInCreation = status.Error(codes.Internal, "volume in creation") type controllerServer struct { *csicommon.DefaultControllerServer - spdkNodes map[string]util.SpdkNode // all spdk nodes in cluster - volumeLocks *util.VolumeLocks + spdkNodeConfigs map[string]*util.SpdkNodeConfig + volumeLocks *util.VolumeLocks } type spdkVolume struct { @@ -57,10 +57,10 @@ func (cs *controllerServer) CreateVolume(_ context.Context, req *csi.CreateVolum return nil, status.Error(codes.Internal, err.Error()) } - volumeInfo, err := cs.publishVolume(csiVolume.GetVolumeId()) + volumeInfo, err := cs.publishVolume(csiVolume.GetVolumeId(), req.Secrets) if err != nil { klog.Errorf("failed to publish volume, volumeID: %s err: %v", volumeID, err) - cs.deleteVolume(csiVolume.GetVolumeId()) //nolint:errcheck // we can do little + cs.deleteVolume(csiVolume.GetVolumeId(), req.Secrets) //nolint:errcheck // we can do little return nil, status.Error(codes.Internal, err.Error()) } // copy volume info. node needs these info to contact target(ip, port, nqn, ...) @@ -80,7 +80,7 @@ func (cs *controllerServer) DeleteVolume(_ context.Context, req *csi.DeleteVolum unlock := cs.volumeLocks.Lock(volumeID) defer unlock() // no harm if volume already unpublished - err := cs.unpublishVolume(volumeID) + err := cs.unpublishVolume(volumeID, req.Secrets) switch { case errors.Is(err, util.ErrVolumeUnpublished): // unpublished but not deleted in last request? @@ -95,7 +95,7 @@ func (cs *controllerServer) DeleteVolume(_ context.Context, req *csi.DeleteVolum } // no harm if volume already deleted - err = cs.deleteVolume(volumeID) + err = cs.deleteVolume(volumeID, req.Secrets) if errors.Is(err, util.ErrJSONNoSuchDevice) { // deleted in previous request? klog.Warningf("volume not exists: %s", volumeID) @@ -140,13 +140,17 @@ func (cs *controllerServer) CreateSnapshot(_ context.Context, req *csi.CreateSna return nil, err } - snapshotID, err := cs.spdkNodes[spdkVol.nodeName].CreateSnapshot(spdkVol.lvolID, snapshotName) + node, err := cs.getSpdkNode(spdkVol.nodeName, req.Secrets) + if err != nil { + return nil, status.Error(codes.Internal, err.Error()) + } + snapshotID, err := node.CreateSnapshot(spdkVol.lvolID, snapshotName) if err != nil { klog.Errorf("failed to create snapshot, volumeID: %s snapshotName: %s err: %v", volumeID, snapshotName, err) return nil, status.Error(codes.Internal, err.Error()) } - volInfo, err := cs.spdkNodes[spdkVol.nodeName].VolumeInfo(spdkVol.lvolID) + volInfo, err := node.VolumeInfo(spdkVol.lvolID) if err != nil { klog.Errorf("failed to get volume info, volumeID: %s err: %v", volumeID, err) return nil, status.Error(codes.Internal, err.Error()) @@ -181,7 +185,11 @@ func (cs *controllerServer) DeleteSnapshot(_ context.Context, req *csi.DeleteSna return nil, err } - err = cs.spdkNodes[spdkVol.nodeName].DeleteVolume(spdkVol.lvolID) + node, err := cs.getSpdkNode(spdkVol.nodeName, req.Secrets) + if err != nil { + return nil, status.Error(codes.Internal, err.Error()) + } + err = node.DeleteVolume(spdkVol.lvolID) if err != nil { klog.Errorf("failed to delete snapshot, snapshotID: %s err: %v", snapshotID, err) return nil, status.Error(codes.Internal, err.Error()) @@ -203,54 +211,76 @@ func (cs *controllerServer) createVolume(req *csi.CreateVolumeRequest) (*csi.Vol ContentSource: req.GetVolumeContentSource(), } - // check all SPDK nodes to see if the volume has already been created - for nodeName, node := range cs.spdkNodes { - lvStores, err := node.LvStores() - if err != nil { - return nil, fmt.Errorf("get lvstores of node:%s failed: %w", nodeName, err) - } - for lvsIdx := range lvStores { - volumeID, err := node.GetVolume(req.GetName(), lvStores[lvsIdx].Name) - if err == nil { - vol.VolumeId = fmt.Sprintf("%s:%s", nodeName, volumeID) - return &vol, nil - } - } + volumeID, err := cs.getVolume(req) + if err == nil { + vol.VolumeId = volumeID + return &vol, nil } - // schedule a SPDK node/lvstore to create the volume. - // if volume content source is specified, using the same node/lvstore as the source volume. - var err error - var volumeID string - if req.GetVolumeContentSource() == nil { - // schedule suitable node:lvstore - nodeName, lvstore, err2 := cs.schedule(sizeMiB) + var lvolID string + + if req.GetVolumeContentSource() != nil { + // if volume content source is specified, using the same node/lvstore as the source volume. + // find the node/lvstore of the specified content source volume + nodeName, lvstore, sourceLvolID, err2 := cs.getSnapshotInfo(req.GetVolumeContentSource(), req.Secrets) if err2 != nil { return nil, err2 } - // TODO: re-schedule on ErrJSONNoSpaceLeft per optimistic concurrency control - // create a new volume - volumeID, err = cs.spdkNodes[nodeName].CreateVolume(req.GetName(), lvstore, sizeMiB) - // in the subsequent DeleteVolume() request, a nodeName needs to be specified, - // but the current CSI mechanism only passes the VolumeId to DeleteVolume(). - // therefore, the nodeName is included as part of the VolumeId. - vol.VolumeId = fmt.Sprintf("%s:%s", nodeName, volumeID) - } else { - // find the node/lvstore of the specified content source volume - nodeName, lvstore, sourceLvolID, err2 := cs.getSnapshotInfo(req.GetVolumeContentSource()) + node, err2 := cs.getSpdkNode(nodeName, req.Secrets) if err2 != nil { - return nil, err2 + return nil, status.Error(codes.Internal, err2.Error()) } // create a volume cloned from the source volume - volumeID, err = cs.spdkNodes[nodeName].CloneVolume(req.GetName(), lvstore, sourceLvolID) - vol.VolumeId = fmt.Sprintf("%s:%s", nodeName, volumeID) + lvolID, err = node.CloneVolume(req.GetName(), lvstore, sourceLvolID) + vol.VolumeId = fmt.Sprintf("%s:%s", nodeName, lvolID) + if err != nil { + return nil, err + } + return &vol, nil } - + // schedule a SPDK node/lvstore to create the volume. + // schedule suitable node:lvstore + nodeName, lvstore, err2 := cs.schedule(sizeMiB, req.Secrets) + if err2 != nil { + return nil, err2 + } + node, err2 := cs.getSpdkNode(nodeName, req.Secrets) + if err2 != nil { + return nil, status.Error(codes.Internal, err2.Error()) + } + // TODO: re-schedule on ErrJSONNoSpaceLeft per optimistic concurrency control + // create a new volume + lvolID, err = node.CreateVolume(req.GetName(), lvstore, sizeMiB) + // in the subsequent DeleteVolume() request, a nodeName needs to be specified, + // but the current CSI mechanism only passes the VolumeId to DeleteVolume(). + // therefore, the nodeName is included as part of the VolumeId. + vol.VolumeId = fmt.Sprintf("%s:%s", nodeName, lvolID) if err != nil { return nil, err } return &vol, nil } +func (cs *controllerServer) getVolume(req *csi.CreateVolumeRequest) (string, error) { + // check all SPDK nodes to see if the volume has already been created + for _, cfg := range cs.spdkNodeConfigs { + node, err := cs.getSpdkNode(cfg.Name, req.Secrets) + if err != nil { + return "nil", fmt.Errorf("failed to get spdkNode %s: %s", cfg.Name, err.Error()) + } + lvStores, err := node.LvStores() + if err != nil { + return "", fmt.Errorf("get lvstores of node:%s failed: %w", cfg.Name, err) + } + for lvsIdx := range lvStores { + volumeID, err := node.GetVolume(req.GetName(), lvStores[lvsIdx].Name) + if err == nil { + return fmt.Sprintf("%s:%s", cfg.Name, volumeID), nil + } + } + } + return "", fmt.Errorf("volume not found") +} + func getSPDKVol(csiVolumeID string) (*spdkVolume, error) { // extract spdkNodeName and spdkLvolID from csiVolumeID // csiVolumeID: node001:8e2dcb9d-3a79-4362-965e-fdb0cd3f4b8d @@ -267,41 +297,53 @@ func getSPDKVol(csiVolumeID string) (*spdkVolume, error) { return nil, fmt.Errorf("missing nodeName in volume: %s", csiVolumeID) } -func (cs *controllerServer) publishVolume(volumeID string) (map[string]string, error) { +func (cs *controllerServer) publishVolume(volumeID string, secrets map[string]string) (map[string]string, error) { spdkVol, err := getSPDKVol(volumeID) if err != nil { return nil, err } - err = cs.spdkNodes[spdkVol.nodeName].PublishVolume(spdkVol.lvolID) + node, err := cs.getSpdkNode(spdkVol.nodeName, secrets) + if err != nil { + return nil, status.Error(codes.Internal, err.Error()) + } + err = node.PublishVolume(spdkVol.lvolID) if err != nil { return nil, err } - volumeInfo, err := cs.spdkNodes[spdkVol.nodeName].VolumeInfo(spdkVol.lvolID) + volumeInfo, err := node.VolumeInfo(spdkVol.lvolID) if err != nil { - cs.unpublishVolume(volumeID) //nolint:errcheck // we can do little + cs.unpublishVolume(volumeID, secrets) //nolint:errcheck // we can do little return nil, err } return volumeInfo, nil } -func (cs *controllerServer) deleteVolume(volumeID string) error { +func (cs *controllerServer) deleteVolume(volumeID string, secrets map[string]string) error { spdkVol, err := getSPDKVol(volumeID) if err != nil { return err } - return cs.spdkNodes[spdkVol.nodeName].DeleteVolume(spdkVol.lvolID) + node, err := cs.getSpdkNode(spdkVol.nodeName, secrets) + if err != nil { + return err + } + return node.DeleteVolume(spdkVol.lvolID) } -func (cs *controllerServer) unpublishVolume(volumeID string) error { +func (cs *controllerServer) unpublishVolume(volumeID string, secrets map[string]string) error { spdkVol, err := getSPDKVol(volumeID) if err != nil { return err } - return cs.spdkNodes[spdkVol.nodeName].UnpublishVolume(spdkVol.lvolID) + node, err := cs.getSpdkNode(spdkVol.nodeName, secrets) + if err != nil { + return err + } + return node.UnpublishVolume(spdkVol.lvolID) } -func (cs *controllerServer) getSnapshotInfo(vcs *csi.VolumeContentSource) ( +func (cs *controllerServer) getSnapshotInfo(vcs *csi.VolumeContentSource, secrets map[string]string) ( nodeName, lvstore, sourceLvolID string, err error, ) { snapshotSource := vcs.GetSnapshot() @@ -317,7 +359,12 @@ func (cs *controllerServer) getSnapshotInfo(vcs *csi.VolumeContentSource) ( nodeName = snapSpdkVol.nodeName sourceLvolID = snapSpdkVol.lvolID - sourceLvolInfo, err := cs.spdkNodes[nodeName].VolumeInfo(sourceLvolID) + node, err := cs.getSpdkNode(nodeName, secrets) + if err != nil { + return + } + + sourceLvolInfo, err := node.VolumeInfo(sourceLvolID) if err != nil { return } @@ -326,9 +373,13 @@ func (cs *controllerServer) getSnapshotInfo(vcs *csi.VolumeContentSource) ( } // simplest volume scheduler: find first node:lvstore with enough free space -func (cs *controllerServer) schedule(sizeMiB int64) (nodeName, lvstore string, err error) { - for name, spdkNode := range cs.spdkNodes { - // retrieve latest lvstore info from spdk node +func (cs *controllerServer) schedule(sizeMiB int64, secrets map[string]string) (nodeName, lvstore string, err error) { + for _, cfg := range cs.spdkNodeConfigs { + spdkNode, err := cs.getSpdkNode(cfg.Name, secrets) + if err != nil { + klog.Errorf("failed to get spdkNode %s: %s", nodeName, err.Error()) + continue + } lvstores, err := spdkNode.LvStores() if err != nil { klog.Errorf("failed to get lvstores from node %s: %s", spdkNode.Info(), err.Error()) @@ -338,7 +389,7 @@ func (cs *controllerServer) schedule(sizeMiB int64) (nodeName, lvstore string, e for i := range lvstores { lvstore := &lvstores[i] if lvstore.FreeSizeMiB > sizeMiB { - return name, lvstore.Name, nil + return cfg.Name, lvstore.Name, nil } } klog.Infof("not enough free space from node %s", spdkNode.Info()) @@ -347,68 +398,44 @@ func (cs *controllerServer) schedule(sizeMiB int64) (nodeName, lvstore string, e return "", "", fmt.Errorf("failed to find node with enough free space") } +func (cs *controllerServer) getSpdkNode(nodeName string, secrets map[string]string) (util.SpdkNode, error) { + spdkSecrets, err := util.NewSpdkSecrets(secrets["secret.json"]) + if err != nil { + return nil, err + } + node, ok := cs.spdkNodeConfigs[nodeName] + if !ok { + return nil, fmt.Errorf("%s spdknode not exists", node.Name) + } + for i := range spdkSecrets.Tokens { + token := spdkSecrets.Tokens[i] + if token.Name == nodeName { + spdkNode, err := util.NewSpdkNode(node.URL, token.UserName, token.Password, node.TargetType, node.TargetAddr) + if err != nil { + klog.Errorf("failed to create spdk node %s: %s", node.Name, err.Error()) + } + return spdkNode, nil + } + } + return nil, fmt.Errorf("failed to find secret for spdk node %s", node.Name) +} + func newControllerServer(d *csicommon.CSIDriver) (*controllerServer, error) { server := controllerServer{ DefaultControllerServer: csicommon.NewDefaultControllerServer(d), - spdkNodes: map[string]util.SpdkNode{}, + spdkNodeConfigs: map[string]*util.SpdkNodeConfig{}, volumeLocks: util.NewVolumeLocks(), } - // get spdk node configs, see deploy/kubernetes/config-map.yaml - //nolint:tagliatelle // not using json:snake case - var config struct { - Nodes []struct { - Name string `json:"name"` - URL string `json:"rpcURL"` - TargetType string `json:"targetType"` - TargetAddr string `json:"targetAddr"` - } `json:"Nodes"` - } - configFile := util.FromEnv("SPDKCSI_CONFIG", "/etc/spdkcsi-config/config.json") - err := util.ParseJSONFile(configFile, &config) + config, err := util.NewCSIControllerConfig("SPDKCSI_CONFIG", "/etc/spdkcsi-config/config.json") if err != nil { return nil, err } - - // get spdk node secrets, see deploy/kubernetes/secret.yaml - //nolint:tagliatelle // not using json:snake case - var secret struct { - Tokens []struct { - Name string `json:"name"` - UserName string `json:"username"` - Password string `json:"password"` - } `json:"rpcTokens"` - } - secretFile := util.FromEnv("SPDKCSI_SECRET", "/etc/spdkcsi-secret/secret.json") - err = util.ParseJSONFile(secretFile, &secret) - if err != nil { - return nil, err - } - - // create spdk nodes for i := range config.Nodes { - node := &config.Nodes[i] - tokenFound := false - // find secret per node - for j := range secret.Tokens { - token := &secret.Tokens[j] - if token.Name == node.Name { - tokenFound = true - spdkNode, err := util.NewSpdkNode(node.URL, token.UserName, token.Password, node.TargetType, node.TargetAddr) - if err != nil { - klog.Errorf("failed to create spdk node %s: %s", node.Name, err.Error()) - } else { - klog.Infof("spdk node created: name=%s, url=%s", node.Name, node.URL) - server.spdkNodes[node.Name] = spdkNode - } - break - } - } - if !tokenFound { - klog.Errorf("failed to find secret for spdk node %s", node.Name) - } + server.spdkNodeConfigs[config.Nodes[i].Name] = &config.Nodes[i] } - if len(server.spdkNodes) == 0 { + + if len(server.spdkNodeConfigs) == 0 { return nil, fmt.Errorf("no valid spdk node found") } diff --git a/pkg/spdk/controllerserver_test.go b/pkg/spdk/controllerserver_test.go index 3368780..cd24c7f 100644 --- a/pkg/spdk/controllerserver_test.go +++ b/pkg/spdk/controllerserver_test.go @@ -155,7 +155,6 @@ func createTestController(targetType string) (cs *controllerServer, lvss [][]uti } defer func() { os.Remove(os.Getenv("SPDKCSI_CONFIG")) - os.Remove(os.Getenv("SPDKCSI_SECRET")) }() cd := csicommon.NewCSIDriver("test-driver", "test-version", "test-node") @@ -211,13 +210,10 @@ func createConfigFiles(targetType string) error { return err } os.Setenv("SPDKCSI_CONFIG", configFile.Name()) + return nil +} - secretFile, err := os.CreateTemp("", "spdkcsi-secret*.json") - if err != nil { - os.Remove(configFile.Name()) - return err - } - defer secretFile.Close() +func getSpdkSecrets() map[string]string { //nolint:gosec // only for test secret := ` { @@ -229,21 +225,16 @@ func createConfigFiles(targetType string) error { } ] }` - _, err = secretFile.WriteString(secret) - if err != nil { - os.Remove(configFile.Name()) - os.Remove(secretFile.Name()) - return err + return map[string]string{ + "secret.json": secret, } - os.Setenv("SPDKCSI_SECRET", secretFile.Name()) - - return nil } func createTestVolume(cs *controllerServer, name string, size int64) (string, error) { reqCreate := csi.CreateVolumeRequest{ Name: name, CapacityRange: &csi.CapacityRange{RequiredBytes: size}, + Secrets: getSpdkSecrets(), } resp, err := cs.CreateVolume(context.TODO(), &reqCreate) @@ -260,7 +251,10 @@ func createTestVolume(cs *controllerServer, name string, size int64) (string, er } func deleteTestVolume(cs *controllerServer, volumeID string) error { - reqDelete := csi.DeleteVolumeRequest{VolumeId: volumeID} + reqDelete := csi.DeleteVolumeRequest{ + VolumeId: volumeID, + Secrets: getSpdkSecrets(), + } _, err := cs.DeleteVolume(context.TODO(), &reqDelete) return err } @@ -274,6 +268,7 @@ func createSameVolumeInParallel(cs *controllerServer, name string, count int, si reqCreate := csi.CreateVolumeRequest{ Name: name, CapacityRange: &csi.CapacityRange{RequiredBytes: size}, + Secrets: getSpdkSecrets(), } for i := 0; i < count; i++ { wg.Add(1) @@ -316,7 +311,10 @@ func deleteSameVolumeInParallel(cs *controllerServer, volumeID string, count int var errCount int32 // issue delete requests to *same* volume in parallel - reqDelete := csi.DeleteVolumeRequest{VolumeId: volumeID} + reqDelete := csi.DeleteVolumeRequest{ + VolumeId: volumeID, + Secrets: getSpdkSecrets(), + } for i := 0; i < count; i++ { wg.Add(1) go func(wg *sync.WaitGroup) { @@ -337,7 +335,11 @@ func deleteSameVolumeInParallel(cs *controllerServer, volumeID string, count int func getLVSS(cs *controllerServer) ([][]util.LvStore, error) { var lvss [][]util.LvStore - for _, spdkNode := range cs.spdkNodes { + for _, cfg := range cs.spdkNodeConfigs { + spdkNode, err := cs.getSpdkNode(cfg.Name, getSpdkSecrets()) + if err != nil { + return nil, err + } lvs, err := spdkNode.LvStores() if err != nil { return nil, err diff --git a/pkg/util/config.go b/pkg/util/config.go index c997974..f811d2d 100644 --- a/pkg/util/config.go +++ b/pkg/util/config.go @@ -16,6 +16,8 @@ limitations under the License. package util +import "encoding/json" + const ( // TODO: move hardcoded settings to config map cfgRPCTimeoutSeconds = 20 @@ -37,3 +39,50 @@ type Config struct { IsControllerServer bool IsNodeServer bool } + +// CSIControllerConfig config for csi driver controller server, see deploy/kubernetes/config-map.yaml +// +//nolint:tagliatelle // not using json:snake case +type CSIControllerConfig struct { + Nodes []SpdkNodeConfig `json:"Nodes"` +} + +// SpdkNodeConfig config for spdk storage cluster +// +//nolint:tagliatelle // not using json:snake case +type SpdkNodeConfig struct { + Name string `json:"name"` + URL string `json:"rpcURL"` + TargetType string `json:"targetType"` + TargetAddr string `json:"targetAddr"` +} + +func NewCSIControllerConfig(env, def string) (*CSIControllerConfig, error) { + var config CSIControllerConfig + configFile := FromEnv(env, def) + err := ParseJSONFile(configFile, &config) + if err != nil { + return nil, err + } + return &config, nil +} + +// SpdkSecrets spdk storage cluster connection secrets, see deploy/kubernetes/secrets.yaml +// +//nolint:tagliatelle // not using json:snake case +type SpdkSecrets struct { + Tokens []struct { + Name string `json:"name"` + UserName string `json:"username"` + Password string `json:"password"` + } `json:"rpcTokens"` +} + +func NewSpdkSecrets(jsonSecrets string) (*SpdkSecrets, error) { + var secs SpdkSecrets + err := json.Unmarshal([]byte(jsonSecrets), &secs) + if err != nil { + return nil, err + } + return &secs, nil +}