Skip to content

Commit

Permalink
Merge branch 'main' into add-interface-filters
Browse files Browse the repository at this point in the history
  • Loading branch information
delldubey authored Jul 12, 2024
2 parents c0e2f6e + e42531f commit 864d91b
Show file tree
Hide file tree
Showing 8 changed files with 156 additions and 55 deletions.
4 changes: 2 additions & 2 deletions go.mod
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ require (
github.com/dell/gocsi v1.10.2-0.20240613132901-554b1fc3ec9a
github.com/dell/gofsutil v1.15.1-0.20240619132316-450188ec89b1
github.com/dell/goiscsi v1.9.1-0.20240521081708-989490f27824
github.com/dell/gonvme v1.7.0
github.com/dell/gonvme v1.7.1-0.20240627125407-0c79a312fff0
github.com/dell/gopowermax/v2 v2.6.1-0.20240625084712-d7b15e82834e
github.com/fsnotify/fsnotify v1.4.9
github.com/gorilla/mux v1.7.3
Expand All @@ -26,7 +26,7 @@ require (
github.com/stretchr/testify v1.9.0
github.com/vmware/govmomi v0.29.0
golang.org/x/net v0.26.0
google.golang.org/grpc v1.64.0
google.golang.org/grpc v1.64.1
google.golang.org/protobuf v1.34.2
k8s.io/api v0.20.0
k8s.io/apimachinery v0.20.0
Expand Down
10 changes: 4 additions & 6 deletions go.sum
Original file line number Diff line number Diff line change
Expand Up @@ -119,10 +119,8 @@ github.com/dell/gofsutil v1.15.1-0.20240619132316-450188ec89b1 h1:BzempSG5VqE8VZ
github.com/dell/gofsutil v1.15.1-0.20240619132316-450188ec89b1/go.mod h1:bZ43qAOqKzGJxCRvkTVD7GCFMNkK37ur84mmMuxQshE=
github.com/dell/goiscsi v1.9.1-0.20240521081708-989490f27824 h1:HuOjkApdKKACWuwBIz1jRVF4QjAvKaYEfowRBV1s2n8=
github.com/dell/goiscsi v1.9.1-0.20240521081708-989490f27824/go.mod h1:NI/W/0O1UrMW2zVdMxy4z395Jn0r7utH6RQDFSZiFyQ=
github.com/dell/gonvme v1.7.0 h1:ztJFhKQehZjfaoNv+hTbGbdhLWCAhPE44k1v7x5o2c0=
github.com/dell/gonvme v1.7.0/go.mod h1:ajbuF+fswq+ty2tRTG5FN4ecIMJsG7aDu/bkMynTKAs=
github.com/dell/gopowermax/v2 v2.6.1-0.20240603105557-59f78ebc1075 h1:7GpPo1UrO2hJV2+H2EXPL6MJJ8IS37GkiJuuXAaqwa0=
github.com/dell/gopowermax/v2 v2.6.1-0.20240603105557-59f78ebc1075/go.mod h1:Z/DqRsmKztpvgkWnMzm/aHBvdbnoTfpzYhpsSQnLX7k=
github.com/dell/gonvme v1.7.1-0.20240627125407-0c79a312fff0 h1:xDu8SFch3tC2kf2KVTF1o6v3nAkcNaJMEUDUXLRo7tk=
github.com/dell/gonvme v1.7.1-0.20240627125407-0c79a312fff0/go.mod h1:ajbuF+fswq+ty2tRTG5FN4ecIMJsG7aDu/bkMynTKAs=
github.com/dell/gopowermax/v2 v2.6.1-0.20240625084712-d7b15e82834e h1:JP4XrsfDjOjckZDN/3LMyoyzgEWKfVYmgRRnJwD7rrc=
github.com/dell/gopowermax/v2 v2.6.1-0.20240625084712-d7b15e82834e/go.mod h1:Z/DqRsmKztpvgkWnMzm/aHBvdbnoTfpzYhpsSQnLX7k=
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
Expand Down Expand Up @@ -754,8 +752,8 @@ google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8
google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0=
google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
google.golang.org/grpc v1.64.0 h1:KH3VH9y/MgNQg1dE7b3XfVK0GsPSIzJwdF617gUSbvY=
google.golang.org/grpc v1.64.0/go.mod h1:oxjF8E3FBnjp+/gVFYdWacaLDx9na1aqy9oovLpxQYg=
google.golang.org/grpc v1.64.1 h1:LKtvyfbX3UGVPFcGqJ9ItpVWW6oN/2XqTxfAnwRRXiA=
google.golang.org/grpc v1.64.1/go.mod h1:hiQF4LFZelK2WKaP6W0L92zGHtiQdZxk8CrSdvyjeP0=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
Expand Down
90 changes: 90 additions & 0 deletions samples/storageclass/powermax_nvmetcp.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,90 @@
---
# This is a sample manifest for utilizing the topology feature
# PVCs created using this storage class will be scheduled
# only on the nodes with ISCSI access to the PowerMax

# Change all instances to <SYMID> to the Symmetrix ID of the PowerMax array
# In case you installed the driver with a custom name, then replace
# all instances of "csi-powermax.dellemc.com" with the custom provisioner name

apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: powermax-nvmetcp
parameters:
# "csi.storage.k8s.io/fstype" is used to set the filesystem type which will be used to format the new volume
# Allowed values:
# "ext4" - EXT4 File system
# "xfs" - XFS File system
# Optional: true
# Default value: None if defaultFsType is not mentioned in values.yaml
# Else defaultFsType value mentioned in values.yaml
# will be used as default value
csi.storage.k8s.io/fstype: xfs
# Name of SRP on PowerMax array that should be used for provisioning
# Optional: false
# Examples: "DEFAULT_SRP" , "SRP_1"
SRP: <SRP Name>
# Serial ID of the array that is used for provisioning
# Optional: false
# Example: "000000000001"
SYMID: <SYMID>
# Name of Service Level on PowerMax array that should be used for provisioning
# Optional: true, Default value: Optimized
# Examples: "Diamond" , "Bronze"
ServiceLevel: <Service Level>
# Following params are for HostLimits, set them only if you want to set IOLimits
# HostLimitName uniquely identifies given set of limits on a storage class
# This is used in naming storage group, max of 3 letter
# Optional: true
# Example: "HL1", "HL2"
#HostLimitName: "HL1"
# The MBs per Second Host IO limit for the storage class
# Optional: true, Default: ""
# Examples: 100, 200, NOLIMIT
#HostIOLimitMBSec: ""
# The IOs per Second Host IO limit for the storage class
# Optional: true, Default: ""
# Examples: 100, 200, NOLIMIT
#HostIOLimitIOSec: ""
# distribution of the Host IO limits for the storage class
# Optional: true, Default: ""
# Allowed values: Never","Always" or "OnFailure" only
#DynamicDistribution: ""
# If using custom driver name, change the following to point to the custom name
# Optional: true, Default value: csi-powermax.dellemc.com
# Examples: "csi-driver-powermax", "csi-powermax.dellemc.com"
provisioner: csi-powermax.dellemc.com
# Configure what happens to a Persistent Volume when the PVC
# it is bound to is to be deleted
# Allowed values:
# Delete: the underlying persistent volume will be deleted along with the PVC.
# Retain: the underlying persistent volume remain.
# Optional: true, Default value: None
reclaimPolicy: Delete
# volumeBindingMode- controls when volume binding
# and dynamic provisioning should occur.
# Allowed values:
# Immediate- indicates that volume binding and dynamic provisioning
# occurs once the PersistentVolumeClaim is created
# WaitForFirstConsumer- will delay the binding and provisioning of a PV
# until a Pod using the PersistentVolumeClaim is created
# Optional: true, Default value: None
volumeBindingMode: WaitForFirstConsumer
# Attribute to allow volume expansion
# Allowed values:
# "true" - Volume can be resized
# "false" - Volume cannot be resized
# Optional: true, Default value: "true"
allowVolumeExpansion: true
# Restrict provisioning to specific topologies
# Allowed values: map of key-value pairs
# Optional: true, Default value: None
allowedTopologies:
- matchLabelExpressions:
- key: csi-powermax.dellemc.com/<SYMID>
values:
- csi-powermax.dellemc.com
- key: csi-powermax.dellemc.com/<SYMID>.nvmetcp
values:
- csi-powermax.dellemc.com
27 changes: 12 additions & 15 deletions service/controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -1695,13 +1695,11 @@ func (s *service) DeleteVolume(
log.Error("Failed to probe with erro: " + err.Error())
return nil, err
}
// Check if devID is a file system
_, err = pmaxClient.GetFileSystemByID(ctx, symID, devID)
// Check if devID is a non-fileSystem
_, err = pmaxClient.GetVolumeByID(ctx, symID, devID)
if err != nil {
log.Debugf("Error:(%s) fetching file system with ID %s", err.Error(), devID)
log.Debugf("GetfileSystem failed, continuing with GetVolumeID...")
} else {
// found file system
log.Debugf("Error:(%s) fetching volume with ID %s", err.Error(), devID)
log.Debugf("checking for fileSystem...")
err := s.deleteFileSystem(ctx, reqID, symID, volName, devID, id, pmaxClient)
if err != nil {
return nil, err
Expand Down Expand Up @@ -2467,11 +2465,10 @@ func (s *service) ControllerUnpublishVolume(
}

// Check if devID is a file system
_, err = pmaxClient.GetFileSystemByID(ctx, symID, devID)
_, err = pmaxClient.GetVolumeByID(ctx, symID, devID)
if err != nil {
log.Debugf("Error:(%s) fetching file system with ID %s", err.Error(), devID)
log.Debugf("GetfileSystem failed, continuing with GetVolumeID...")
} else {
log.Debugf("Error:(%s) fetching volume with ID %s", err.Error(), devID)
log.Debugf("continuing with fileSystem...")
// found file system
return file.DeleteNFSExport(ctx, reqID, symID, devID, pmaxClient)
}
Expand Down Expand Up @@ -2554,15 +2551,15 @@ func (s *service) unpublishVolume(ctx context.Context, reqID string, vol *types.
return nil
}
var tgtStorageGroupID, tgtMaskingViewID string
if !isISCSI {
tgtStorageGroupID = tgtFCStorageGroupID
tgtMaskingViewID = tgtFCMaskingViewID
if isISCSI {
tgtStorageGroupID = tgtISCSIStorageGroupID
tgtMaskingViewID = tgtISCSIMaskingViewID
} else if isNVMeTCP {
tgtStorageGroupID = tgtNVMeTCPStorageGroupID
tgtMaskingViewID = tgtNVMeTCPMaskingViewID
} else {
tgtStorageGroupID = tgtISCSIStorageGroupID
tgtMaskingViewID = tgtISCSIMaskingViewID
tgtStorageGroupID = tgtFCStorageGroupID
tgtMaskingViewID = tgtFCMaskingViewID
}
waitChan, lockChan, err := s.sgSvc.requestRemoveVolumeFromSGMV(ctx, tgtStorageGroupID, tgtMaskingViewID, reqID, clientSymID, symID, devID)
if err != nil {
Expand Down
25 changes: 10 additions & 15 deletions service/csi_extension_server.go
Original file line number Diff line number Diff line change
Expand Up @@ -156,31 +156,26 @@ func (s *service) IsIOInProgress(ctx context.Context, volID, symID string) (err
}
}
startTime := endTime - OneHour
_, err = pmaxClient.GetFileSystemByID(ctx, symID, volID)
resp, err := pmaxClient.GetVolumesMetricsByID(ctx, symID, volID, metricsQuery, startTime, endTime)
if err != nil {
resp, err := pmaxClient.GetVolumesMetricsByID(ctx, symID, volID, metricsQuery, startTime, endTime)
// nfs volume type logic volId may be fsID
resp, err := pmaxClient.GetFileSystemMetricsByID(ctx, symID, volID, metricsQuery, startTime, endTime)
if err != nil {
log.Errorf("Error %v while checking IsIOInProgress for array having symID %s for volumeId %s", err.Error(), symID, volID)
log.Errorf("Error %v while checking IsIOInProgress for array having symID %s for volumeID/fileSystemID %s", err.Error(), symID, volID)
return fmt.Errorf("error %v while while checking IsIOInProgress", err.Error())
}
// check last four entries status received in the response
for i := len(resp.ResultList.Result[0].VolumeResult) - 1; i >= (len(resp.ResultList.Result[0].VolumeResult)-4) && i >= 0; i-- {
if resp.ResultList.Result[0].VolumeResult[i].IoRate > 0.0 && checkIfEntryIsLatest(resp.ResultList.Result[0].VolumeResult[i].Timestamp) {
fileMetrics := resp.ResultList.Result
for i := 0; i < len(fileMetrics); i++ {
if fileMetrics[i].PercentBusy > 0.0 && checkIfEntryIsLatest(fileMetrics[i].Timestamp) {
return nil
}
}
return fmt.Errorf("no IOInProgress")
}
// nfs volume type logic
resp, err := pmaxClient.GetFileSystemMetricsByID(ctx, symID, volID, metricsQuery, startTime, endTime)
if err != nil {
log.Errorf("Error %v while checking IsIOInProgress for array having symID %s for volumeId %s", err.Error(), symID, volID)
return fmt.Errorf("error %v while while checking IsIOInProgress", err.Error())
}
// check last four entries status recieved in the response
fileMetrics := resp.ResultList.Result
for i := 0; i < len(fileMetrics); i++ {
if fileMetrics[i].PercentBusy > 0.0 && checkIfEntryIsLatest(fileMetrics[i].Timestamp) {
// check last four entries status received in the response
for i := len(resp.ResultList.Result[0].VolumeResult) - 1; i >= (len(resp.ResultList.Result[0].VolumeResult)-4) && i >= 0; i-- {
if resp.ResultList.Result[0].VolumeResult[i].IoRate > 0.0 && checkIfEntryIsLatest(resp.ResultList.Result[0].VolumeResult[i].Timestamp) {
return nil
}
}
Expand Down
23 changes: 21 additions & 2 deletions service/features/csi_extension.feature
Original file line number Diff line number Diff line change
Expand Up @@ -68,7 +68,6 @@ Feature: PowerMax CSI interface
| "none" | "no IOInProgress" |
| "InvalidSymID" | "not found" |
| "GetArrayPerfKeyError" | "getting keys" |
| "GetVolumesMetricsError" | "error" |
| "GetFreshMetrics" | "none" |

@resiliency
Expand All @@ -83,4 +82,24 @@ Feature: PowerMax CSI interface
Examples:
| induced | error |
| "none" | "no IOInProgress" |
| "GetFileSysMetricsError" | "error" |

@resiliency
@v2.11.0
Scenario: call IsIOInProgress and get fileSystem metric
Given a PowerMax service
And I call fileSystem CreateVolume "volume1"
Then a valid CreateVolumeResponse is returned
And I induce error "GetVolumesMetricsError"
When I call IsIOInProgress
Then the error contains "no IOInProgress"

@resiliency
@v2.11.0
Scenario: call IsIOInProgress and get Metric error
Given a PowerMax service
And I call fileSystem CreateVolume "volume1"
Then a valid CreateVolumeResponse is returned
And I induce error "GetFileSysMetricsError"
And I induce error "GetVolumesMetricsError"
When I call IsIOInProgress
Then the error contains "error"
4 changes: 2 additions & 2 deletions service/features/delete_volume.feature
Original file line number Diff line number Diff line change
Expand Up @@ -89,7 +89,7 @@ Feature: PowerMax CSI interface
| "InvalidVolumeID" | "Could not parse" |
| "UpdateVolumeError" | "Failed to rename volume" |
| "GetStorageGroupError" | "Unable to find storage group" |
| "GetVolumeError" | "Could not retrieve volume" |
| "GetVolumeError" | "cannot be found" |

@delete
@v1.0.0
Expand Down Expand Up @@ -185,4 +185,4 @@ Feature: PowerMax CSI interface
And a valid CreateVolumeResponse is returned
And I induce error "GetFileSystemError"
When I call fileSystem DeleteVolume
Then no error was received
Then no error was received
28 changes: 15 additions & 13 deletions service/node.go
Original file line number Diff line number Diff line change
Expand Up @@ -1003,15 +1003,15 @@ func (s *service) nodeProbeBySymID(ctx context.Context, symID string) error {
for _, target := range s.nvmeTargets[symID] {
for _, session := range sessions {
log.Debugf("matching %v with %v", target, session)
if session.Target == target && session.NVMESessionState == gonvme.NVMESessionStateLive {
if strings.HasPrefix(target, session.Target) && session.NVMESessionState == gonvme.NVMESessionStateLive {
if s.useNFS {
s.useNFS = false
}
return nil
}
}
}
return fmt.Errorf("no active iscsi sessions")
return fmt.Errorf("no active nvme sessions")
}
return fmt.Errorf("no active sessions")
}
Expand Down Expand Up @@ -1756,24 +1756,26 @@ func (s *service) nodeHostSetup(ctx context.Context, portWWNs []string, IQNs []s
s.useFC = true
}

validNVMeTCPs, err := s.verifyAndUpdateInitiatorsInADiffHost(ctx, symID, NQNs, hostIDNVMeTCP, pmaxClient)
if err != nil {
log.Error("Could not validate NVMeTCP initiators " + err.Error())
} else if len(validNVMeTCPs) > 0 && s.opts.TransportProtocol == "" || s.opts.TransportProtocol == NvmeTCPTransportProtocol {
// If pre-existing NVMeTCP initiators are not found, initiators/host should be created
s.useNVMeTCP = true
}
log.Infof("valid NVMeTCP initiators: %v", validNVMeTCPs)

validIscsis, err := s.verifyAndUpdateInitiatorsInADiffHost(ctx, symID, IQNs, hostIDIscsi, pmaxClient)
if err != nil {
log.Error("Could not validate iSCSI initiators" + err.Error())
} else if s.opts.TransportProtocol == "" || s.opts.TransportProtocol == IscsiTransportProtocol {
// We do not have to have pre-existing initiators to use Iscsi (we can create them)
s.useIscsi = true
}
log.Infof("valid ISCSI initiators: %v", validIscsis)

validNVMeTCPs, err := s.verifyAndUpdateInitiatorsInADiffHost(ctx, symID, NQNs, hostIDNVMeTCP, pmaxClient)
if err != nil {
log.Error("Could not validate NVMeTCP initiators " + err.Error())
}
log.Infof("valid NVMeTCP initiators: %v", validNVMeTCPs)

if s.opts.TransportProtocol == "" || s.opts.TransportProtocol == NvmeTCPTransportProtocol {
// If pre-existing NVMeTCP initiators are not found, initiators/host should be created
s.useNVMeTCP = true
log.Infof("valid (existing) iSCSI initiators (must be manually created): %v", validIscsis)
if len(validIscsis) == 0 {
// IQNs are not yet part of any host on Unisphere
validIscsis = IQNs
}

if !s.useFC && !s.useIscsi && !s.useNVMeTCP {
Expand Down

0 comments on commit 864d91b

Please sign in to comment.