Skip to content

Commit

Permalink
Allow setting storage mappings for OVA disks
Browse files Browse the repository at this point in the history
* Exposes OVA appliance disks as mappable storage
  in the OVA inventory collector.
* Permits mapping any or all OVA appliance disks
  to their own destination storage classes.
* Any disks that are not specifically mapped are
  assigned to a default storage class, which must
  be given a storage mapping.

Signed-off-by: Sam Lucidi <slucidi@redhat.com>
  • Loading branch information
mansam committed Dec 16, 2024
1 parent b0b9ae7 commit f4e7850
Show file tree
Hide file tree
Showing 6 changed files with 182 additions and 46 deletions.
108 changes: 77 additions & 31 deletions pkg/controller/plan/adapter/ova/builder.go
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@ import (
"strconv"
"strings"

"github.com/konveyor/forklift-controller/pkg/apis/forklift/v1beta1"
"github.com/konveyor/forklift-controller/pkg/apis/forklift/v1beta1/plan"
"github.com/konveyor/forklift-controller/pkg/apis/forklift/v1beta1/ref"
planbase "github.com/konveyor/forklift-controller/pkg/controller/plan/adapter/base"
Expand Down Expand Up @@ -61,6 +62,11 @@ const (
Unknown = "unknown"
)

// Default Storage
const (
DefaultStorageID = "default"
)

// Regex which matches the snapshot identifier suffix of a
// OVA disk backing file.
var backingFilePattern = regexp.MustCompile(`-\d\d\d\d\d\d.vmdk`)
Expand Down Expand Up @@ -154,48 +160,88 @@ func (r *Builder) DataVolumes(vmRef ref.Ref, secret *core.Secret, _ *core.Config
return
}

// For OVA provider we are assuming a single storage mapping.
dsMapIn := r.Context.Map.Storage.Spec.Map
for _, mapped := range dsMapIn {
var defaultMapping *v1beta1.DestinationStorage
mappedDiskIds := make(map[string]bool)
storageMapIn := r.Context.Map.Storage.Spec.Map
for i := range storageMapIn {
mapped := &storageMapIn[i]
ref := mapped.Source
storage := &model.Storage{}
fErr := r.Source.Inventory.Find(storage, ref)
if fErr != nil {
err = fErr
return
}
if storage.ID == DefaultStorageID {
defaultMapping = &mapped.Destination
continue
}
for _, disk := range vm.Disks {
diskSize, err := getResourceCapacity(disk.Capacity, disk.CapacityAllocationUnits)
if err != nil {
return nil, err
}
storageClass := mapped.Destination.StorageClass
dvSource := cdi.DataVolumeSource{
Blank: &cdi.DataVolumeBlankImage{},
}
dvSpec := cdi.DataVolumeSpec{
Source: &dvSource,
Storage: &cdi.StorageSpec{
Resources: core.ResourceRequirements{
Requests: core.ResourceList{
core.ResourceStorage: *resource.NewQuantity(diskSize, resource.BinarySI),
},
},
StorageClassName: &storageClass,
},
if disk.ID == storage.ID {
var dv *cdi.DataVolume
dv, err = r.mapDataVolume(disk, mapped.Destination, dvTemplate)
if err != nil {
return
}
dvs = append(dvs, *dv)
mappedDiskIds[disk.ID] = true
}
// set the access mode and volume mode if they were specified in the storage map.
// otherwise, let the storage profile decide the default values.
if mapped.Destination.AccessMode != "" {
dvSpec.Storage.AccessModes = []core.PersistentVolumeAccessMode{mapped.Destination.AccessMode}
}
}

for _, disk := range vm.Disks {
if !mappedDiskIds[disk.ID] {
if defaultMapping == nil {
err = liberr.New("VM has unmapped disks and no default disk mapping is set.", "vm", vm.ID)
return
}
if mapped.Destination.VolumeMode != "" {
dvSpec.Storage.VolumeMode = &mapped.Destination.VolumeMode
var dv *cdi.DataVolume
dv, err = r.mapDataVolume(disk, *defaultMapping, dvTemplate)
if err != nil {
return
}

dv := dvTemplate.DeepCopy()
dv.Spec = dvSpec
updateDataVolumeAnnotations(dv, &disk)
dvs = append(dvs, *dv)
}
}

return
}

func (r *Builder) mapDataVolume(disk ova.Disk, destination v1beta1.DestinationStorage, dvTemplate *cdi.DataVolume) (dv *cdi.DataVolume, err error) {
diskSize, err := getResourceCapacity(disk.Capacity, disk.CapacityAllocationUnits)
if err != nil {
return
}
storageClass := destination.StorageClass
dvSource := cdi.DataVolumeSource{
Blank: &cdi.DataVolumeBlankImage{},
}
dvSpec := cdi.DataVolumeSpec{
Source: &dvSource,
Storage: &cdi.StorageSpec{
Resources: core.ResourceRequirements{
Requests: core.ResourceList{
core.ResourceStorage: *resource.NewQuantity(diskSize, resource.BinarySI),
},
},
StorageClassName: &storageClass,
},
}
// set the access mode and volume mode if they were specified in the storage map.
// otherwise, let the storage profile decide the default values.
if destination.AccessMode != "" {
dvSpec.Storage.AccessModes = []core.PersistentVolumeAccessMode{destination.AccessMode}
}
if destination.VolumeMode != "" {
dvSpec.Storage.VolumeMode = &destination.VolumeMode
}

dv = dvTemplate.DeepCopy()
dv.Spec = dvSpec
updateDataVolumeAnnotations(dv, &disk)
return
}

func updateDataVolumeAnnotations(dv *cdi.DataVolume, disk *ova.Disk) {
if dv.ObjectMeta.Annotations == nil {
dv.ObjectMeta.Annotations = make(map[string]string)
Expand Down
23 changes: 21 additions & 2 deletions pkg/controller/plan/adapter/ova/validator.go
Original file line number Diff line number Diff line change
Expand Up @@ -84,8 +84,27 @@ func (r *Validator) PodNetwork(vmRef ref.Ref) (ok bool, err error) {

// Validate that a VM's disk backing storage has been mapped.
func (r *Validator) StorageMapped(vmRef ref.Ref) (ok bool, err error) {
//For OVA providers, we don't have an actual storage connected,
// since we use a dummy storage for mapping the function should always return true.
if r.plan.Referenced.Map.Storage == nil {
return
}
vm := &model.VM{}
err = r.inventory.Find(vm, vmRef)
if err != nil {
err = liberr.Wrap(err, "vm", vmRef.String())
return
}

// If a default mapping is defined, that satisfies the requirement.
if r.plan.Referenced.Map.Storage.Status.Refs.Find(ref.Ref{ID: DefaultStorageID}) {
ok = true
return
}

for _, disk := range vm.Disks {
if !r.plan.Referenced.Map.Storage.Status.Refs.Find(ref.Ref{ID: disk.ID}) {
return
}
}
ok = true
return
}
Expand Down
91 changes: 81 additions & 10 deletions pkg/controller/provider/container/ova/model.go
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,6 @@ package ova
import (
"context"
"errors"
"fmt"

api "github.com/konveyor/forklift-controller/pkg/apis/forklift/v1beta1"
model "github.com/konveyor/forklift-controller/pkg/controller/provider/model/ova"
Expand All @@ -12,6 +11,12 @@ import (
"github.com/konveyor/forklift-controller/pkg/lib/logging"
)

// Default Storage
const (
DefaultStorageID = "default"
DefaultStorageName = "Default"
)

// All adapters.
var adapterList []Adapter

Expand Down Expand Up @@ -356,33 +361,99 @@ type StorageAdapter struct {
}

func (r *StorageAdapter) GetUpdates(ctx *Context) (updates []Updater, err error) {
disks := []Disk{}
err = ctx.client.list("disks", &disks)
if err != nil {
return
}
for i := range disks {
disk := &disks[i]
updater := func(tx *libmodel.Tx) (err error) {
m := &model.Storage{
Base: model.Base{
ID: disk.ID,
},
}
err = tx.Get(m)
if err != nil {
if errors.Is(err, libmodel.NotFound) {
m.Name = disk.Name
err = tx.Insert(m)
}
return
}
m.Name = disk.Name
err = tx.Update(m)
return
}
updates = append(updates, updater)
}
return
}

// List the collection.
func (r *StorageAdapter) List(ctx *Context, provider *api.Provider) (itr fb.Iterator, err error) {
storageName := fmt.Sprintf("Dummy storage for source provider %s", provider.Name)
dummyStorge := Storage{
Name: storageName,
ID: string(provider.UID),
diskList := []Disk{}
err = ctx.client.list("disks", &diskList)
if err != nil {
return
}
list := fb.NewList()
m := &model.Storage{
Base: model.Base{
ID: dummyStorge.ID,
Name: dummyStorge.Name,
ID: DefaultStorageID,
Name: DefaultStorageName,
},
}
dummyStorge.ApplyTo(m)
list.Append(m)

for _, object := range diskList {
m := &model.Storage{
Base: model.Base{
ID: object.ID,
Name: object.Name,
},
}
list.Append(m)
}

itr = list.Iter()

return
}

func (r *StorageAdapter) DeleteUnexisting(ctx *Context) (deletions []Updater, err error) {
// Each provider have only one storage hence it can't be changed,
// Will be removed only if the provider deleted.
storageList := []model.Storage{}
err = ctx.db.List(&storageList, libmodel.FilterOptions{})
if err != nil {
if errors.Is(err, libmodel.NotFound) {
err = nil
}
return
}
inventory := make(map[string]bool)
for _, storage := range storageList {
inventory[storage.ID] = true
}
disks := []Disk{}
err = ctx.client.list("disks", &disks)
if err != nil {
return
}
gone := []string{}
for _, disk := range disks {
if _, found := inventory[disk.ID]; !found {
gone = append(gone, disk.ID)
}
}
for _, id := range gone {
updater := func(tx *libmodel.Tx) (err error) {
m := &model.Storage{
Base: model.Base{ID: id},
}
return tx.Delete(m)
}
deletions = append(deletions, updater)
}
return
}
1 change: 1 addition & 0 deletions pkg/controller/provider/web/ova/client.go
Original file line number Diff line number Diff line change
Expand Up @@ -298,6 +298,7 @@ func (r *Finder) Storage(ref *base.Ref) (object interface{}, err error) {
storage := &Storage{}
err = r.ByRef(storage, *ref)
if err == nil {
ref.ID = storage.ID
ref.Name = storage.Name
object = storage
}
Expand Down
4 changes: 2 additions & 2 deletions tests/suit/framework/ova.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ func (r *OvaClient) LoadSourceDetails() (vm *OvaVM, err error) {

r.vmData.testVMId = "c5686650854d1e69b4123f4bf2e70fe1ed2a"
r.vmData.testNetworkID = "ae1badc8c693926f492a01e2f357d6af321b"
r.vmData.testStorageName = "Dummy storage for source provider ova-provider"
r.vmData.testStorageName = "centos44_new-disk1.vmdk"
return &r.vmData, nil
}

Expand All @@ -39,7 +39,7 @@ func (r *OvaClient) GetNfsServerForOva(k8sClient *kubernetes.Clientset) (string,
}
nfsShare := server + ":" + share
if nfsShare == "" {
return "", errors.New("failed to fatch NFS settings")
return "", errors.New("failed to fetch NFS settings")
}

r.nfsPath = nfsShare
Expand Down
1 change: 0 additions & 1 deletion tests/suit/ova_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -80,6 +80,5 @@ var _ = Describe("[level:component]Migration tests for OVA provider", func() {
Expect(err).ToNot(HaveOccurred())
err = utils.WaitForMigrationSucceededWithTimeout(f.CrClient, provider.Namespace, test_migration_name, 900*time.Second)
Expect(err).ToNot(HaveOccurred())

})
})

0 comments on commit f4e7850

Please sign in to comment.