diff --git a/aws/aws.go b/aws/aws.go index 243f4134d..284d37ded 100644 --- a/aws/aws.go +++ b/aws/aws.go @@ -29,11 +29,11 @@ import ( ) const ( - awsDevicePrefix = "/dev/sd" - awsDevicePrefixWithX = "/dev/xvd" - awsDevicePrefixWithH = "/dev/hd" - awsDevicePrefixNvme = "/dev/nvme" - contextTimeout = 30 * time.Second + awsDevicePrefix = "/dev/sd" + awsDevicePrefixWithX = "/dev/xvd" + awsDevicePrefixWithH = "/dev/hd" + awsDevicePrefixNvme = "/dev/nvme" + contextTimeout = 30 * time.Second awsErrorModificationNotFound = "InvalidVolumeModification.NotFound" ) @@ -72,7 +72,9 @@ func NewClient() (cloudops.Ops, error) { return nil, err } } - + if len(zone) == 0 { + return nil, fmt.Errorf("env does not have information about zone") + } region := zone[:len(zone)-1] awsCreds, err := awscredentials.NewAWSCredentials("", "", "", runningOnEc2) if err != nil { @@ -215,6 +217,9 @@ func (s *awsOps) waitAttachmentStatus( var actual string vol := awsVols.Volumes[0] + if vol == nil { + return nil, false, fmt.Errorf("nil volume for %s", volumeID) + } awsAttachment := vol.Attachments if awsAttachment == nil || len(awsAttachment) == 0 { // We have encountered scenarios where AWS returns a nil attachment state @@ -298,6 +303,10 @@ func (s *awsOps) InspectInstanceGroupForInstance(instanceID string) (*cloudops.I } group := result.AutoScalingGroups[0] + if group == nil { + return nil, fmt.Errorf("nil AutoScalingGroups group") + } + zones := make([]string, 0) for _, z := range group.AvailabilityZones { zones = append(zones, *z) @@ -759,6 +768,9 @@ func (s *awsOps) Enumerate( // Volume sets are identified by volumes with the same setIdentifer. for _, vol := range awsVols.Volumes { + if vol == nil { + continue + } if s.deleted(vol) { continue } @@ -796,7 +808,18 @@ func (s *awsOps) Create( return nil, cloudops.NewStorageError(cloudops.ErrVolInval, "Drive type not specified in the storage spec", "") } - + if vol.AvailabilityZone == nil { + return nil, cloudops.NewStorageError(cloudops.ErrVolInval, + "AvailabilityZone not specified in the storage spec", "") + } + if vol.Size == nil { + return nil, cloudops.NewStorageError(cloudops.ErrVolInval, + "Size not specified in the storage spec", "") + } + if vol.SnapshotId == nil { + return nil, cloudops.NewStorageError(cloudops.ErrVolInval, + "SnapshotId not specified in the storage spec", "") + } req := &ec2.CreateVolumeInput{ AvailabilityZone: vol.AvailabilityZone, Encrypted: vol.Encrypted, @@ -965,7 +988,9 @@ func (s *awsOps) AreVolumesReadyToExpand(volumeIDs []*string) (bool, error) { } } states := describeOutput.VolumesModifications - + if states == nil { + return false, fmt.Errorf("information about the volume modifications is nil") + } var state string for i := 0; i < len(states); i++ { if states[i] == nil || states[i].ModificationState == nil { @@ -977,7 +1002,7 @@ func (s *awsOps) AreVolumesReadyToExpand(volumeIDs []*string) (bool, error) { logrus.Infof("retrived volume modification state: %s for volume id: %s", state, *volumeIDs[i]) if state == ec2.VolumeModificationStateModifying || state == ec2.VolumeModificationStateOptimizing { - return false, fmt.Errorf("aws has not fully completed the last modification: " + + return false, fmt.Errorf("aws has not fully completed the last modification: "+ "volume %s is in %s state. please retry later", *volumeIDs[i], state) } } @@ -993,6 +1018,14 @@ func (s *awsOps) Expand( if err != nil { return 0, err } + if vol.Size == nil { + return 0, cloudops.NewStorageError(cloudops.ErrVolInval, + "Size not specified in the storage spec", "") + } + if vol.VolumeId == nil { + return 0, cloudops.NewStorageError(cloudops.ErrVolInval, + "VolumeId not specified in the storage spec", "") + } currentSizeInGiB := uint64(*vol.Size) if currentSizeInGiB >= newSizeInGiB { return currentSizeInGiB, cloudops.NewStorageError(cloudops.ErrDiskGreaterOrEqualToExpandSize, @@ -1011,7 +1044,17 @@ func (s *awsOps) Expand( return currentSizeInGiB, fmt.Errorf("failed to modify AWS volume for %v: %v", volumeID, err) } + if output.VolumeModification == nil { + return currentSizeInGiB, fmt.Errorf("failed to modify AWS volume for %v: %v", volumeID, "VolumeModification is nil") + } + if output.VolumeModification.ModificationState == nil { + return currentSizeInGiB, fmt.Errorf("failed to modify AWS volume for %v: %v", volumeID, "ModificationState is nil") + } + if string(*output.VolumeModification.ModificationState) == ec2.VolumeModificationStateCompleted { + if output.VolumeModification.TargetSize == nil { + return currentSizeInGiB, fmt.Errorf("failed to modify AWS volume for %v: %v", volumeID, "TargetSize is nil") + } return uint64(*output.VolumeModification.TargetSize), nil } @@ -1180,6 +1223,9 @@ func DescribeInstanceByID(service *ec2.EC2, id string) (*ec2.Instance, error) { if err != nil { return nil, err } + if out.Reservations == nil { + return nil, fmt.Errorf("DescribeInstances(%v) returned nil Reservations", id) + } if len(out.Reservations) != 1 { return nil, fmt.Errorf("DescribeInstances(%v) returned %v reservations, expect 1", id, len(out.Reservations)) diff --git a/azure/azure.go b/azure/azure.go index bb7a53b2c..0a8b1b05b 100644 --- a/azure/azure.go +++ b/azure/azure.go @@ -371,14 +371,20 @@ func (a *azureOps) Create( options map[string]string, ) (interface{}, error) { d, ok := template.(*compute.Disk) - if !ok || d.DiskProperties == nil || d.DiskProperties.DiskSizeGB == nil { + if !ok || d.DiskProperties == nil { return nil, cloudops.NewStorageError( cloudops.ErrVolInval, "Invalid volume template given", a.instance, ) } - + if d.DiskProperties.DiskSizeGB == nil { + return nil, cloudops.NewStorageError( + cloudops.ErrVolInval, + "DiskSizeGB not specified in the storage spec", + a.instance, + ) + } // Check if the disk already exists; return err if it does _, err := a.disksClient.Get( context.Background(), @@ -707,6 +713,13 @@ func (a *azureOps) DeviceMappings() (map[string]string, error) { devMap := make(map[string]string) for _, d := range dataDisks { + if d.Lun == nil { + return nil, cloudops.NewStorageError( + cloudops.ErrInvalidDevicePath, + "lun is nil for disk", + a.instance, + ) + } devPath, err := lunToBlockDevPath(*d.Lun) if err != nil { return nil, cloudops.NewStorageError( @@ -715,6 +728,9 @@ func (a *azureOps) DeviceMappings() (map[string]string, error) { a.instance, ) } + if d.Name == nil { + continue + } devMap[devPath] = *d.Name } @@ -809,9 +825,19 @@ func (a *azureOps) devicePath(diskName string) (string, error) { } for _, d := range dataDisks { + if d.Name == nil { + continue + } if *d.Name == diskName { // Retry to get the block dev path as it may take few seconds for the path // to be created even after the disk shows attached. + if d.Lun == nil { + return "", cloudops.NewStorageError( + cloudops.ErrInvalidDevicePath, + "Lun is nil for disk", + a.instance, + ) + } devPath, err := lunToBlockDevPathWithRetry(*d.Lun) if err == nil { return devPath, nil @@ -1046,6 +1072,9 @@ func (a *azureOps) waitForDetach(diskName, instance string) error { } for _, d := range dataDisks { + if d.Name != nil { + continue + } if *d.Name == diskName { return nil, true, fmt.Errorf("disk %s is still attached to instance %s", diff --git a/gce/gce.go b/gce/gce.go index a85061112..b10d5cc41 100644 --- a/gce/gce.go +++ b/gce/gce.go @@ -467,6 +467,9 @@ func (s *gceOps) Delete(id string, options map[string]string) error { if err := req.Pages(ctx, func(page *compute.DiskAggregatedList) error { for _, diskScopedList := range page.Items { for _, disk := range diskScopedList.Disks { + if disk == nil { + continue + } if disk.Name == id { found = true operation, err := s.computeService.Disks.Delete(s.inst.project, path.Base(disk.Zone), id).Do() @@ -550,6 +553,9 @@ func (s *gceOps) DeviceMappings() (map[string]string, error) { } m := make(map[string]string) for _, d := range instance.Disks { + if d == nil { + continue + } if d.Boot { continue } @@ -626,6 +632,9 @@ func (s *gceOps) Enumerate( } for _, disk := range allDisks { + if disk == nil { + continue + } if len(setIdentifier) == 0 { cloudops.AddElementToMap(sets, disk, cloudops.SetIdentifierNone) } else { @@ -675,7 +684,6 @@ func (s *gceOps) Expand( newSizeInGiB uint64, options map[string]string, ) (uint64, error) { - vol, err := s.computeService.Disks.Get(s.inst.project, s.inst.zone, volumeID).Do() if err != nil { return 0, err @@ -727,6 +735,9 @@ func (s *gceOps) Inspect(diskNames []*string, options map[string]string) ([]inte var disks []interface{} for _, id := range diskNames { + if id == nil { + continue + } if d, ok := allDisks[*id]; ok { disks = append(disks, d) } else { diff --git a/oracle/oracle.go b/oracle/oracle.go index 5af0dd5e8..16ec4cbe8 100644 --- a/oracle/oracle.go +++ b/oracle/oracle.go @@ -312,8 +312,14 @@ func (o *oracleOps) InspectInstanceGroupForInstance(instanceID string) (*cloudop zones := []string{} for _, placementConfig := range nodePoolDetails.NodePool.NodeConfigDetails.PlacementConfigs { + if placementConfig.AvailabilityDomain == nil { + continue + } zones = append(zones, *placementConfig.AvailabilityDomain) } + if nodePoolDetails.NodeConfigDetails.Size == nil { + return nil, fmt.Errorf("node pool size is nil") + } size := int64(*nodePoolDetails.NodeConfigDetails.Size) return &cloudops.InstanceGroupInfo{ @@ -431,7 +437,6 @@ func (o *oracleOps) Create(template interface{}, labels map[string]string, optio return nil, cloudops.NewStorageError(cloudops.ErrVolInval, "Invalid volume template given", "") } - createVolReq := core.CreateVolumeRequest{ CreateVolumeDetails: core.CreateVolumeDetails{ CompartmentId: &o.compartmentID, @@ -763,6 +768,9 @@ func (o *oracleOps) FreeDevices( func (o *oracleOps) GetDeviceID(vol interface{}) (string, error) { if d, ok := vol.(*core.Volume); ok { + if d == nil { + return "", fmt.Errorf("volume is nil") + } return *d.Id, nil } return "", fmt.Errorf("invalid type: %v given to GetDeviceID", vol) @@ -823,6 +831,9 @@ func (o *oracleOps) DeleteInstance(instanceID string, zone string, timeout time. func nodePoolContainsNode(s []containerengine.Node, e string) bool { for _, v := range s { + if v.Id == nil { + continue + } if *v.Id == e { return true } @@ -844,6 +855,9 @@ func (o *oracleOps) Expand(volumeID string, newSizeInGiB uint64, options map[str return 0, err } + if volume.SizeInGBs == nil { + return 0, errors.New("volume size is nil") + } currentsize := uint64(*volume.SizeInGBs) if (currentsize > newSizeInGiB) || (currentsize == newSizeInGiB) { @@ -862,6 +876,9 @@ func (o *oracleOps) Expand(volumeID string, newSizeInGiB uint64, options map[str return 0, err } + if updateVolResp.Id == nil { + return 0, errors.New("the OCID of the volume is nil") + } oracleVol, err := o.waitVolumeStatus(*updateVolResp.Id, core.VolumeLifecycleStateAvailable) if err != nil { return 0, err @@ -928,7 +945,9 @@ func (o *oracleOps) SetInstanceGroupVersion(instanceGroupName string, version st //To apply changes on existing worker nodes, first, scale the node pool to 0 . //Then scale up to original nodepool size with upgraded version //https://docs.oracle.com/en-us/iaas/Content/knownissues.htm#contengworkernodepropertiesoutofsync - + if instanceGroupID == nil { + return errors.New("the OCID of the node pool is nil") + } updateResp, err := o.scaleDownToZeroThenScaleUp(instanceGroupName, *instanceGroupID, nodePools, timeout) if err != nil { return err @@ -998,9 +1017,15 @@ func (o *oracleOps) Enumerate(volumeIds []*string, } volIDsMap := map[string]string{} for _, volIds := range volumeIds { + if volIds == nil { + continue + } volIDsMap[*volIds] = *volIds } for _, vol := range resp.Items { + if vol.Id == nil { + continue + } _, ok := volIDsMap[*vol.Id] if !ok { continue