Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions api/src/main/java/com/cloud/agent/api/to/DiskTO.java
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,8 @@ public class DiskTO {
public static final String CHAP_TARGET_SECRET = "chapTargetSecret";
public static final String SCSI_NAA_DEVICE_ID = "scsiNaaDeviceId";
public static final String MANAGED = "managed";
public static final String SCOPE = "scope";
public static final String INTER_CLUSTER_MIGRATION = "interClusterMigration";
public static final String IQN = "iqn";
public static final String STORAGE_HOST = "storageHost";
public static final String STORAGE_PORT = "storagePort";
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,42 @@
//
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
//
package com.cloud.agent.api;

import java.util.List;

public class UnmountDatastoresCommand extends Command {
private List<String> datastorePools;

public UnmountDatastoresCommand() {

}

public UnmountDatastoresCommand(List<String> datastorePools) {
this.datastorePools = datastorePools;
}

public List<String> getDatastorePools() {
return datastorePools;
}

@Override
public boolean executeInSequence() {
return false;
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -124,7 +124,7 @@ DiskProfile allocateRawVolume(Type type, String name, DiskOffering offering, Lon

boolean storageMigration(VirtualMachineProfile vm, Map<Volume, StoragePool> volumeToPool) throws StorageUnavailableException;

void prepareForMigration(VirtualMachineProfile vm, DeployDestination dest);
void prepareForMigration(VirtualMachineProfile vm, DeployDestination dest, Long srcHostId);

void prepare(VirtualMachineProfile vm, DeployDestination dest) throws StorageUnavailableException, InsufficientStorageCapacityException, ConcurrentOperationException, StorageAccessException;

Expand Down Expand Up @@ -178,4 +178,6 @@ DiskProfile updateImportedVolume(Type type, DiskOffering offering, VirtualMachin
* Unmanage VM volumes
*/
void unmanageVolumes(long vmId);

List<String> postMigrationReleaseDatastoresOnOriginHost(VirtualMachineProfile profile, long vmId);
}
Original file line number Diff line number Diff line change
Expand Up @@ -183,4 +183,15 @@ default boolean volumesRequireGrantAccessWhenUsed() {
default boolean zoneWideVolumesAvailableWithoutClusterMotion() {
return false;
}

/**
* Disabled by default. Set to true if the data store driver needs to unmount/revoke volumes datastore on the
* origin host in case of:
* - zone wide storage
* - inter-cluster VM migration (without storage motion)
* - the hypervisor has restrictions on the number of mounted datastores per host/cluster
*/
default boolean zoneWideVolumesDatastoreCleanupOnOriginHostAfterInterClusterMigration() {
return false;
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -47,6 +47,7 @@
import javax.naming.ConfigurationException;
import javax.persistence.EntityExistsException;

import com.cloud.agent.api.UnmountDatastoresCommand;
import com.cloud.event.ActionEventUtils;
import com.google.gson.Gson;
import org.apache.cloudstack.affinity.dao.AffinityGroupVMMapDao;
Expand Down Expand Up @@ -2754,7 +2755,7 @@ protected void migrate(final VMInstanceVO vm, final long srcHostId, final Deploy
profile.setHost(dest.getHost());

_networkMgr.prepareNicForMigration(profile, dest);
volumeMgr.prepareForMigration(profile, dest);
volumeMgr.prepareForMigration(profile, dest, srcHostId);
profile.setConfigDriveLabel(VmConfigDriveLabel.value());
updateOverCommitRatioForVmProfile(profile, dest.getHost().getClusterId());

Expand Down Expand Up @@ -2883,8 +2884,8 @@ protected void migrate(final VMInstanceVO vm, final long srcHostId, final Deploy
} else {
_networkMgr.commitNicForMigration(vmSrc, profile);
volumeMgr.release(vm.getId(), srcHostId);
postMigrationRelease(profile, vm.getId(), srcHostId);
_networkMgr.setHypervisorHostname(profile, dest, true);

updateVmPod(vm, dstHostId);
}

Expand All @@ -2893,6 +2894,20 @@ protected void migrate(final VMInstanceVO vm, final long srcHostId, final Deploy
}
}

private void postMigrationRelease(VirtualMachineProfile profile, long vmId, long srcHostId) {
List<String> poolsToReleaseOnOrigin = volumeMgr.postMigrationReleaseDatastoresOnOriginHost(profile, vmId);
if (CollectionUtils.isEmpty(poolsToReleaseOnOrigin)) {
return;
}
// Unmount the datastores from this host only (useful for SolidFire 1:1 plugin zone-wide storage migrations)
UnmountDatastoresCommand cmd = new UnmountDatastoresCommand(poolsToReleaseOnOrigin);
try {
_agentMgr.send(srcHostId, cmd);
} catch (AgentUnavailableException | OperationTimedoutException e) {
throw new RuntimeException(e);
Copy link

Copilot AI Jan 26, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Throwing a RuntimeException wrapping AgentUnavailableException or OperationTimedoutException may cause the entire migration to fail unexpectedly. Consider logging the error and handling it gracefully instead of propagating it as a RuntimeException, since this is a cleanup operation that happens after the VM has successfully migrated. The migration has already succeeded at this point (line 2887), so failing here could leave the system in an inconsistent state.

Suggested change
throw new RuntimeException(e);
s_logger.warn("Failed to unmount datastores on origin host " + srcHostId +
" after migration of VM id=" + vmId + ". Datastores to unmount: " + poolsToReleaseOnOrigin, e);

Copilot uses AI. Check for mistakes.
}
}

/**
* Create and set parameters for the {@link MigrateCommand} used in the migration and scaling of VMs.
*/
Expand Down Expand Up @@ -3243,7 +3258,7 @@ private void orchestrateMigrateWithStorage(final String vmUuid, final long srcHo
}

_networkMgr.prepareNicForMigration(profile, destination);
volumeMgr.prepareForMigration(profile, destination);
volumeMgr.prepareForMigration(profile, destination, srcHostId);
final HypervisorGuru hvGuru = _hvGuruMgr.getGuru(vm.getHypervisorType());
final VirtualMachineTO to = hvGuru.implement(profile);

Expand Down Expand Up @@ -4410,7 +4425,7 @@ private void orchestrateMigrateForScale(final String vmUuid, final long srcHostI
final VirtualMachineProfile profile = new VirtualMachineProfileImpl(vm);
_networkMgr.prepareNicForMigration(profile, dest);

volumeMgr.prepareForMigration(profile, dest);
volumeMgr.prepareForMigration(profile, dest, srcHostId);

final VirtualMachineTO to = toVmTO(profile);
final PrepareForMigrationCommand pfmc = new PrepareForMigrationCommand(to);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -84,6 +84,7 @@
import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils;
import org.apache.commons.collections.CollectionUtils;
import org.apache.commons.collections.MapUtils;
import org.apache.commons.lang.BooleanUtils;
import org.apache.commons.lang3.ObjectUtils;
import org.apache.commons.lang3.StringUtils;
import org.apache.log4j.Logger;
Expand Down Expand Up @@ -1496,7 +1497,7 @@ public boolean storageMigration(VirtualMachineProfile vm, Map<Volume, StoragePoo
}

@Override
public void prepareForMigration(VirtualMachineProfile vm, DeployDestination dest) {
public void prepareForMigration(VirtualMachineProfile vm, DeployDestination dest, Long srcHostId) {
List<VolumeVO> vols = _volsDao.findUsableVolumesForInstance(vm.getId());
if (s_logger.isDebugEnabled()) {
s_logger.debug(String.format("Preparing to migrate [%s] volumes for VM [%s].", vols.size(), vm.getVirtualMachine()));
Expand All @@ -1514,7 +1515,7 @@ public void prepareForMigration(VirtualMachineProfile vm, DeployDestination dest
// make sure this is done AFTER grantAccess, as grantAccess may change the volume's state
DataTO volTO = volumeInfo.getTO();
DiskTO disk = storageMgr.getDiskWithThrottling(volTO, vol.getVolumeType(), vol.getDeviceId(), vol.getPath(), vm.getServiceOfferingId(), vol.getDiskOfferingId());
disk.setDetails(getDetails(volumeInfo, dataStore));
disk.setDetails(getDetails(volumeInfo, dataStore, vm, srcHostId));
vm.addDisk(disk);
}

Expand All @@ -1527,7 +1528,7 @@ public void prepareForMigration(VirtualMachineProfile vm, DeployDestination dest
}
}

private Map<String, String> getDetails(VolumeInfo volumeInfo, DataStore dataStore) {
private Map<String, String> getDetails(VolumeInfo volumeInfo, DataStore dataStore, VirtualMachineProfile vmProfile, Long srcHostId) {
Map<String, String> details = new HashMap<String, String>();

StoragePoolVO storagePool = _storagePoolDao.findById(dataStore.getId());
Expand Down Expand Up @@ -1560,9 +1561,33 @@ private Map<String, String> getDetails(VolumeInfo volumeInfo, DataStore dataStor
details.put(DiskTO.CHAP_TARGET_SECRET, chapInfo.getTargetSecret());
}

// Zone wide storage Solidfire inter-cluster VM migrations needs the destination host mount the LUN before migrating
if (storagePool.isManaged() && ScopeType.ZONE.equals(storagePool.getScope()) &&
isCleanupNeededOnOriginHostAfterInterClusterMigration(dataStore)) {
details.put(DiskTO.SCOPE, storagePool.getScope().name());
if (vmProfile.getHostId() != null && srcHostId != null) {
HostVO host = _hostDao.findById(vmProfile.getHostId());
HostVO lastHost = _hostDao.findById(srcHostId);
boolean interClusterMigration = isVmwareInterClusterMigration(lastHost, host);
details.put(DiskTO.INTER_CLUSTER_MIGRATION, BooleanUtils.toStringTrueFalse(interClusterMigration));
}
}

return details;
}

private boolean isCleanupNeededOnOriginHostAfterInterClusterMigration(DataStore dataStore) {
if (dataStore == null || dataStore.getDriver() == null) {
return false;
}

DataStoreDriver driver = dataStore.getDriver();
if (driver instanceof PrimaryDataStoreDriver) {
return ((PrimaryDataStoreDriver)driver).zoneWideVolumesDatastoreCleanupOnOriginHostAfterInterClusterMigration();
}
return false;
}

private void setIoDriverPolicy(Map<String, String> details, StoragePoolVO storagePool, VolumeVO volume) {
if (volume.getInstanceId() != null) {
UserVmDetailVO ioDriverPolicy = userVmDetailsDao.findDetail(volume.getInstanceId(),
Expand Down Expand Up @@ -1937,7 +1962,7 @@ public void prepare(VirtualMachineProfile vm, DeployDestination dest) throws Sto
DiskTO disk = storageMgr.getDiskWithThrottling(volTO, vol.getVolumeType(), vol.getDeviceId(), vol.getPath(), vm.getServiceOfferingId(), vol.getDiskOfferingId());
DataStore dataStore = dataStoreMgr.getDataStore(vol.getPoolId(), DataStoreRole.Primary);

disk.setDetails(getDetails(volumeInfo, dataStore));
disk.setDetails(getDetails(volumeInfo, dataStore, vm, null));

vm.addDisk(disk);

Expand Down Expand Up @@ -2308,4 +2333,52 @@ public void doInTransactionWithoutResult(TransactionStatus status) {
}
});
}

protected boolean isVmwareInterClusterMigration(Host lastHost, Host host) {
if (ObjectUtils.anyNull(lastHost, host)) {
return false;
}
if (host.getHypervisorType() != HypervisorType.VMware) {
return false;
}
Long lastHostClusterId = lastHost.getClusterId();
Long clusterId = host.getClusterId();
if (ObjectUtils.anyNull(lastHostClusterId, clusterId)) {
return false;
}
return lastHostClusterId.compareTo(clusterId) != 0;
}

@Override
public List<String> postMigrationReleaseDatastoresOnOriginHost(VirtualMachineProfile profile, long vmId) {
List<String> pools = new ArrayList<>();
if (profile.getVirtualMachine() == null) {
s_logger.debug("Cannot release from source as the virtual machine profile is not set");
return pools;
}
HostVO lastHost = _hostDao.findById(profile.getVirtualMachine().getLastHostId());
HostVO host = _hostDao.findById(profile.getHostId());

// Consider only Vmware inter-cluster migration
if (!isVmwareInterClusterMigration(lastHost, host)) {
return pools;
}

List<VolumeVO> volumesForVm = _volsDao.findUsableVolumesForInstance(vmId);
for (VolumeVO volumeForVm : volumesForVm) {
if (volumeForVm.getPoolId() != null) {
DataStore dataStore = dataStoreMgr.getDataStore(volumeForVm.getPoolId(), DataStoreRole.Primary);
PrimaryDataStore primaryDataStore = (PrimaryDataStore) dataStore;
if (primaryDataStore.getScope() != null && primaryDataStore.getScope().getScopeType() == ScopeType.ZONE) {
// Consider zone-wide storage
PrimaryDataStoreDriver driver = (PrimaryDataStoreDriver) primaryDataStore.getDriver();
if (driver.zoneWideVolumesDatastoreCleanupOnOriginHostAfterInterClusterMigration()) {
// Currently enabled for SolidFire only on inter-cluster migrations on zone-wide pools.
pools.add(volumeForVm.get_iScsiName());
}
}
}
}
return pools;
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -49,8 +49,10 @@
import javax.naming.ConfigurationException;
import javax.xml.datatype.XMLGregorianCalendar;

import com.cloud.agent.api.UnmountDatastoresCommand;
import com.cloud.capacity.CapacityManager;
import com.cloud.hypervisor.vmware.mo.HostDatastoreBrowserMO;
import com.cloud.storage.ScopeType;
import com.vmware.vim25.FileInfo;
import com.vmware.vim25.FileQueryFlags;
import com.vmware.vim25.FolderFileInfo;
Expand All @@ -74,6 +76,7 @@
import org.apache.commons.collections.CollectionUtils;
import org.apache.commons.collections.MapUtils;
import org.apache.commons.lang.ArrayUtils;
import org.apache.commons.lang.BooleanUtils;
import org.apache.commons.lang.math.NumberUtils;
import org.apache.commons.lang3.StringUtils;
import org.apache.log4j.Logger;
Expand Down Expand Up @@ -619,6 +622,8 @@ public Answer executeRequest(Command cmd) {
answer = execute((ListDataStoreObjectsCommand) cmd);
} else if (clz == PrepareForBackupRestorationCommand.class) {
answer = execute((PrepareForBackupRestorationCommand) cmd);
} else if (clz == UnmountDatastoresCommand.class) {
answer = execute((UnmountDatastoresCommand) cmd);
} else {
answer = Answer.createUnsupportedCommandAnswer(cmd);
}
Expand Down Expand Up @@ -4624,6 +4629,11 @@ protected Answer execute(PrepareForMigrationCommand cmd) {
prepareNetworkFromNicInfo(new HostMO(getServiceContext(), _morHyperHost), nic, false, cmd.getVirtualMachine().getType());
}

DiskTO[] disks = vm.getDisks();
for (DiskTO disk : disks) {
prepareDatastoreForZoneWideManagedStorageInterClusterMigration(disk, hyperHost);
}

List<Pair<String, Long>> secStoreUrlAndIdList = mgr.getSecondaryStorageStoresUrlAndIdList(Long.parseLong(_dcId));
for (Pair<String, Long> secStoreUrlAndId : secStoreUrlAndIdList) {
String secStoreUrl = secStoreUrlAndId.first();
Expand All @@ -4645,6 +4655,29 @@ protected Answer execute(PrepareForMigrationCommand cmd) {
}
}

private void prepareDatastoreForZoneWideManagedStorageInterClusterMigration(DiskTO disk, VmwareHypervisorHost hyperHost) throws Exception {
Map<String, String> details = disk.getDetails();
if (MapUtils.isEmpty(details) || !details.containsKey(DiskTO.SCOPE) ||
!details.containsKey(DiskTO.MANAGED) && !details.containsKey(DiskTO.INTER_CLUSTER_MIGRATION)) {
Copy link

Copilot AI Jan 26, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The boolean logic in this condition is incorrect. The expression !details.containsKey(DiskTO.MANAGED) && !details.containsKey(DiskTO.INTER_CLUSTER_MIGRATION) should be (!details.containsKey(DiskTO.MANAGED) || !details.containsKey(DiskTO.INTER_CLUSTER_MIGRATION)) with OR instead of AND.

The current logic means the method returns only when BOTH MANAGED and INTER_CLUSTER_MIGRATION are missing, but the subsequent checks on lines 4666-4668 explicitly check for the presence of these keys. This means if either key is present (but not both), the method won't return early but will later evaluate false in the checks below, making the early return ineffective.

The intended logic should be: return early if SCOPE is missing OR if either MANAGED or INTER_CLUSTER_MIGRATION is missing.

Suggested change
!details.containsKey(DiskTO.MANAGED) && !details.containsKey(DiskTO.INTER_CLUSTER_MIGRATION)) {
(!details.containsKey(DiskTO.MANAGED) || !details.containsKey(DiskTO.INTER_CLUSTER_MIGRATION))) {

Copilot uses AI. Check for mistakes.
return;
}

VmwareContext context = hyperHost.getContext();
boolean isManaged = details.containsKey(DiskTO.MANAGED) && BooleanUtils.toBoolean(details.get(DiskTO.MANAGED));
boolean isZoneWideStorage = details.containsKey(DiskTO.SCOPE) && details.get(DiskTO.SCOPE).equalsIgnoreCase(ScopeType.ZONE.name());
boolean isInterClusterMigration = details.containsKey(DiskTO.INTER_CLUSTER_MIGRATION) && BooleanUtils.toBoolean(details.get(DiskTO.INTER_CLUSTER_MIGRATION));

if (isManaged && isZoneWideStorage && isInterClusterMigration) {
s_logger.debug(String.format("Preparing storage on destination cluster for host %s", hyperHost.getHyperHostName()));
String iScsiName = details.get(DiskTO.IQN); // details should not be null for managed storage (it may or may not be null for non-managed storage)
String datastoreName = VmwareResource.getDatastoreName(iScsiName);
s_logger.debug(String.format("Ensuring datastore %s is mounted on destination cluster", datastoreName));
_storageProcessor.prepareManagedDatastore(context, hyperHost, datastoreName,
details.get(DiskTO.IQN), details.get(DiskTO.STORAGE_HOST),
Integer.parseInt(details.get(DiskTO.STORAGE_PORT)));
Comment on lines +4675 to +4677
Copy link

Copilot AI Jan 26, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

There's a potential NumberFormatException if details.get(DiskTO.STORAGE_PORT) returns null or a non-numeric value. While the comment on line 4672 suggests details should not be null for managed storage, there's no explicit validation that STORAGE_PORT is present or valid before parsing. Consider adding a null check and validation before calling Integer.parseInt().

Suggested change
_storageProcessor.prepareManagedDatastore(context, hyperHost, datastoreName,
details.get(DiskTO.IQN), details.get(DiskTO.STORAGE_HOST),
Integer.parseInt(details.get(DiskTO.STORAGE_PORT)));
String storagePortString = details.get(DiskTO.STORAGE_PORT);
if (StringUtils.isBlank(storagePortString) || !NumberUtils.isDigits(storagePortString)) {
s_logger.warn(String.format("Skipping preparation of managed datastore %s due to invalid storage port value: %s",
datastoreName, storagePortString));
return;
}
int storagePort = Integer.parseInt(storagePortString);
_storageProcessor.prepareManagedDatastore(context, hyperHost, datastoreName,
details.get(DiskTO.IQN), details.get(DiskTO.STORAGE_HOST), storagePort);

Copilot uses AI. Check for mistakes.
}
}

protected Answer execute(MigrateVmToPoolCommand cmd) {
final String vmName = cmd.getVmName();

Expand Down Expand Up @@ -5339,6 +5372,34 @@ private void handleTargets(boolean add, ModifyTargetsCommand.TargetTypeToRemove
}
}

private Answer execute(UnmountDatastoresCommand cmd) {
VmwareContext context = getServiceContext(cmd);
VmwareHypervisorHost hyperHost = getHyperHost(context, cmd);
if (hyperHost == null) {
throw new CloudRuntimeException("No hypervisor host found to unmount datastore");
}
try {
List<String> datastorePools = cmd.getDatastorePools();
if (CollectionUtils.isNotEmpty(datastorePools)) {
ManagedObjectReference clusterMor = hyperHost.getHyperHostCluster();
if (clusterMor == null) {
return new Answer(cmd, false, "Cannot unmount datastore pools as the cluster is not found");
}
ClusterMO clusterMO = new ClusterMO(context, clusterMor);
List<Pair<ManagedObjectReference, String>> clusterHosts = clusterMO.getClusterHosts();
for (String datastorePool : datastorePools) {
String datastoreName = VmwareResource.getDatastoreName(datastorePool);
s_logger.debug(String.format("Unmounting datastore %s from cluster %s hosts", datastoreName, clusterMO.getName()));
_storageProcessor.unmountVmfsDatastore(context, hyperHost, datastoreName, clusterHosts);
}
}
} catch (Exception e) {
s_logger.error("Error unmounting datastores", e);
return new Answer(cmd, e);
}
return new Answer(cmd, true, "success");
}

protected Answer execute(DeleteStoragePoolCommand cmd) {
try {
if (cmd.getRemoveDatastore()) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -3038,7 +3038,7 @@ private void mountVmfsDatastore2(DatastoreMO dsMO, List<HostMO> hosts) throws Ex
}
}

private void unmountVmfsDatastore(VmwareContext context, VmwareHypervisorHost hyperHost, String datastoreName,
public void unmountVmfsDatastore(VmwareContext context, VmwareHypervisorHost hyperHost, String datastoreName,
List<Pair<ManagedObjectReference, String>> hosts) throws Exception {
for (Pair<ManagedObjectReference, String> host : hosts) {
HostMO hostMO = new HostMO(context, host.first());
Expand Down
Loading
Loading