use of com.emc.storageos.model.TaskResourceRep in project coprhd-controller by CoprHD.
the class SnapshotService method deleteSnapshot.
/**
* Delete a specific snapshot
*
* @prereq none
*
* @param tenant_id
* the URN of the tenant
* @param snapshot_id
* the URN of the snapshot
*
* @brief Delete Snapshot
* @return Task result
*/
@DELETE
@Produces({ MediaType.APPLICATION_XML, MediaType.APPLICATION_JSON })
@Path("/{snapshot_id}")
@CheckPermission(roles = { Role.SYSTEM_MONITOR, Role.TENANT_ADMIN }, acls = { ACL.ANY })
public Response deleteSnapshot(@PathParam("tenant_id") String openstack_tenant_id, @PathParam("snapshot_id") String snapshot_id) {
_log.info("Delete Snapshot: id = {}", snapshot_id);
BlockSnapshot snap = findSnapshot(snapshot_id, openstack_tenant_id);
if (snap == null) {
_log.error("Not Found : Invalid volume snapshot id");
return CinderApiUtils.createErrorResponse(404, "Not Found : Invalid volume snapshot id");
} else if (snap.hasConsistencyGroup()) {
_log.error("Not Found : Snapshot belongs to a consistency group");
return CinderApiUtils.createErrorResponse(400, "Invalid snapshot: Snapshot belongs to consistency group");
}
URI snapshotURI = snap.getId();
String task = UUID.randomUUID().toString();
TaskList response = new TaskList();
ArgValidator.checkReference(BlockSnapshot.class, snapshotURI, checkForDelete(snap));
// Not an error if the snapshot we try to delete is already deleted
if (snap.getInactive()) {
Operation op = new Operation();
op.ready("The snapshot has already been deleted");
op.setResourceType(ResourceOperationTypeEnum.DELETE_VOLUME_SNAPSHOT);
_dbClient.createTaskOpStatus(BlockSnapshot.class, snap.getId(), task, op);
response.getTaskList().add(toTask(snap, task, op));
return Response.status(202).build();
}
StorageSystem device = _dbClient.queryObject(StorageSystem.class, snap.getStorageController());
List<BlockSnapshot> snapshots = new ArrayList<BlockSnapshot>();
final URI cgId = snap.getConsistencyGroup();
if (!NullColumnValueGetter.isNullURI(cgId)) {
// Collect all the BlockSnapshots if part of a CG.
URIQueryResultList results = new URIQueryResultList();
_dbClient.queryByConstraint(AlternateIdConstraint.Factory.getBlockSnapshotsBySnapsetLabel(snap.getSnapsetLabel()), results);
while (results.iterator().hasNext()) {
URI uri = results.iterator().next();
_log.info("BlockSnapshot being deactivated: " + uri);
BlockSnapshot snapshot = _dbClient.queryObject(BlockSnapshot.class, uri);
if (snapshot != null) {
snapshots.add(snapshot);
}
}
} else {
// Snap is not part of a CG so only delete the snap
snapshots.add(snap);
}
for (BlockSnapshot snapshot : snapshots) {
Operation snapOp = _dbClient.createTaskOpStatus(BlockSnapshot.class, snapshot.getId(), task, ResourceOperationTypeEnum.DELETE_VOLUME_SNAPSHOT);
response.getTaskList().add(toTask(snapshot, task, snapOp));
}
// Note that for snapshots of VPLEX volumes, the parent volume for the
// snapshot is the source side backend volume, which will have the same
// vpool as the VPLEX volume and therefore, the correct implementation
// should be returned.
Volume volume = _permissionsHelper.getObjectById(snap.getParent(), Volume.class);
BlockServiceApi blockServiceApiImpl = BlockService.getBlockServiceImpl(volume, _dbClient);
blockServiceApiImpl.deleteSnapshot(snap, snapshots, task, VolumeDeleteTypeEnum.FULL.name());
StringMap extensions = snap.getExtensions();
if (extensions == null) {
extensions = new StringMap();
}
for (TaskResourceRep rep : response.getTaskList()) {
extensions.put("taskid", rep.getId().toString());
break;
}
snap.setExtensions(extensions);
_dbClient.updateObject(snap);
auditOp(OperationTypeEnum.DELETE_VOLUME_SNAPSHOT, true, AuditLogManager.AUDITOP_BEGIN, snapshot_id, snap.getLabel(), snap.getParent().getName(), device.getId().toString());
return Response.status(202).build();
}
use of com.emc.storageos.model.TaskResourceRep in project coprhd-controller by CoprHD.
the class VPlexBlockServiceApiImpl method createVPlexVolumeDescriptors.
/**
* Create the necessary volume descriptors for Vplex volumes, including the backend
* volume descriptors and the virtual volume descriptors.
*
* @param param
* - the VolumeCreate parameters
* @param project
* -- user's project
* @param vArray
* -- virtual array volumes are created in
* @param vPool
* -- virtual pool (ROOT) used to create the volumes
* @param recommendations
* -- recommendations received from placement
* @param task
* -- a task identifier
* @param vPoolCapabilities
* - VirtualPoolCapabilitiesWrapper
* @param blockConsistencyGroupURI
* - the block consistency group URI
* @param taskList
* - OUTPUT list of tasks created
* @param allVolumes
* - OUTPUT - list of volumes created
* @param createTask
* -- boolean flag indicating to create tasks
* @return
*/
public List<VolumeDescriptor> createVPlexVolumeDescriptors(VolumeCreate param, Project project, final VirtualArray vArray, final VirtualPool vPool, List<Recommendation> recommendations, String task, VirtualPoolCapabilityValuesWrapper vPoolCapabilities, URI blockConsistencyGroupURI, TaskList taskList, List<URI> allVolumes, boolean createTask) {
s_logger.info("Request to create {} VPlex virtual volume(s)", vPoolCapabilities.getResourceCount());
// Determine if we're processing an SRDF copy so we can set appropriate name.
boolean srdfCopy = false;
if (recommendations.get(0).getRecommendation() != null && recommendations.get(0).getRecommendation() instanceof SRDFCopyRecommendation) {
srdfCopy = true;
}
// Sort the recommendations by VirtualArray. There can be up to two
// VirtualArrays, the requested VirtualArray and the HA VirtualArray
// either passed or determined by the placement when HA virtual volumes
// are being created. We also set the VPlex storage system, which
// should be the same for all recommendations.
URI vplexStorageSystemURI = null;
URI[] vplexSystemURIOut = new URI[1];
Map<String, List<VPlexRecommendation>> varrayRecommendationsMap = sortRecommendationsByVarray(recommendations, vplexSystemURIOut);
vplexStorageSystemURI = vplexSystemURIOut[0];
// for the srdf copies, since they are already pre-created.
if (!srdfCopy) {
validateVolumeLabels(param.getName(), project, vPoolCapabilities, varrayRecommendationsMap);
}
// Determine the project to be used for the VPlex's artifacts
StorageSystem vplexStorageSystem = _dbClient.queryObject(StorageSystem.class, vplexStorageSystemURI);
Project vplexProject = getVplexProject(vplexStorageSystem, _dbClient, _tenantsService);
// The volume size.
long size = SizeUtil.translateSize(param.getSize());
// The consistency group or null when not specified.
final BlockConsistencyGroup consistencyGroup = blockConsistencyGroupURI == null ? null : _dbClient.queryObject(BlockConsistencyGroup.class, blockConsistencyGroupURI);
// Find all volumes assigned to the group
boolean cgContainsVolumes = false;
if (consistencyGroup != null) {
final List<Volume> activeCGVolumes = getActiveCGVolumes(consistencyGroup);
cgContainsVolumes = (activeCGVolumes != null && !activeCGVolumes.isEmpty());
}
// If the consistency group is created but does not specify the LOCAL
// type, the CG must be a CG created prior to 2.2 or an ingested CG. In
// this case, we don't want a volume creation to result in backend CGs.
// The only exception is if the CG does not reference any volumes. In
// this case, if the LOCAL type isn't specified, we can create backend
// CGs.
BlockConsistencyGroup backendCG = null;
if (consistencyGroup != null && (!consistencyGroup.created() || !cgContainsVolumes || consistencyGroup.getTypes().contains(Types.LOCAL.toString()))) {
backendCG = consistencyGroup;
}
// Prepare Bourne volumes to represent the backend volumes for the
// recommendations in each VirtualArray.
int varrayCount = 0;
String volumeLabel = param.getName();
List<VolumeDescriptor> descriptors = new ArrayList<VolumeDescriptor>();
URI[][] varrayVolumeURIs = new URI[2][vPoolCapabilities.getResourceCount()];
Iterator<String> varrayIter = varrayRecommendationsMap.keySet().iterator();
while (varrayIter.hasNext()) {
String varrayId = varrayIter.next();
s_logger.info("Processing backend recommendations for Virtual Array {}", varrayId);
List<VPlexRecommendation> vplexRecommendations = varrayRecommendationsMap.get(varrayId);
List<VolumeDescriptor> varrayDescriptors = makeBackendVolumeDescriptors(vplexRecommendations, project, vplexProject, vPool, volumeLabel, varrayCount, size, backendCG, vPoolCapabilities, createTask, task);
descriptors.addAll(varrayDescriptors);
List<URI> varrayURIs = VolumeDescriptor.getVolumeURIs(varrayDescriptors);
allVolumes.addAll(varrayURIs);
for (int i = 0; i < varrayURIs.size(); i++) {
varrayVolumeURIs[varrayCount][i] = varrayURIs.get(i);
}
varrayCount++;
}
// Prepare Bourne volumes to represent the highly available virtual
// volumes and associate the virtual volumes with their associated
// backend volumes.
s_logger.info("Preparing virtual volumes");
List<URI> virtualVolumeURIs = new ArrayList<URI>();
URI nullPoolURI = NullColumnValueGetter.getNullURI();
vPoolCapabilities.put(VirtualPoolCapabilityValuesWrapper.AUTO_TIER__POLICY_NAME, null);
for (int i = 0; i < vPoolCapabilities.getResourceCount(); i++) {
// Compute the volume label based on the label of the underlying volume
String volumeLabelBuilt = null;
Volume associatedVolume = _dbClient.queryObject(Volume.class, varrayVolumeURIs[0][i]);
// Get the virtual volume backing replication group instance name, if available.
String backingReplicationGroupInstance = null;
if (associatedVolume != null) {
volumeLabelBuilt = generateLabelFromAssociatedVolume(volumeLabel, associatedVolume);
backingReplicationGroupInstance = NullColumnValueGetter.isNotNullValue(associatedVolume.getReplicationGroupInstance()) ? associatedVolume.getReplicationGroupInstance() : NullColumnValueGetter.getNullStr();
} else {
volumeLabelBuilt = AbstractBlockServiceApiImpl.generateDefaultVolumeLabel(volumeLabel, i, vPoolCapabilities.getResourceCount());
}
s_logger.info("Volume label is {}", volumeLabelBuilt);
Volume volume = StorageScheduler.getPrecreatedVolume(_dbClient, taskList, volumeLabelBuilt);
boolean volumePrecreated = false;
if (volume != null) {
volumePrecreated = true;
}
long thinVolumePreAllocationSize = 0;
if (null != vPool.getThinVolumePreAllocationPercentage()) {
thinVolumePreAllocationSize = VirtualPoolUtil.getThinVolumePreAllocationSize(vPool.getThinVolumePreAllocationPercentage(), size);
}
volume = prepareVolume(VolumeType.VPLEX_VIRTUAL_VOLUME, volume, size, thinVolumePreAllocationSize, project, vArray, vPool, vplexStorageSystemURI, nullPoolURI, volumeLabelBuilt, consistencyGroup, vPoolCapabilities);
StringSet associatedVolumes = new StringSet();
associatedVolumes.add(varrayVolumeURIs[0][i].toString());
s_logger.info("Associating volume {}", varrayVolumeURIs[0][i].toString());
// associated with the virtual volume.
if (varrayCount > 1) {
associatedVolumes.add(varrayVolumeURIs[1][i].toString());
s_logger.info("Associating volume {}", varrayVolumeURIs[1][i].toString());
}
volume.setAssociatedVolumes(associatedVolumes);
if (null != backingReplicationGroupInstance) {
s_logger.info("Setting virtual volume backingReplicationGroupInstance to {}", backingReplicationGroupInstance);
volume.setBackingReplicationGroupInstance(backingReplicationGroupInstance);
}
_dbClient.updateObject(volume);
URI volumeId = volume.getId();
s_logger.info("Prepared virtual volume {}", volumeId);
virtualVolumeURIs.add(volumeId);
allVolumes.add(volumeId);
if (createTask && !volumePrecreated) {
Operation op = _dbClient.createTaskOpStatus(Volume.class, volume.getId(), task, ResourceOperationTypeEnum.CREATE_BLOCK_VOLUME);
TaskResourceRep volumeTask = toTask(volume, task, op);
taskList.getTaskList().add(volumeTask);
}
VolumeDescriptor descriptor = new VolumeDescriptor(VolumeDescriptor.Type.VPLEX_VIRT_VOLUME, vplexStorageSystemURI, volumeId, null, consistencyGroup == null ? null : consistencyGroup.getId(), vPoolCapabilities, volume.getCapacity());
// Set the compute resource in the descriptor if the volume to be created will be exported
// to a host/cluster after it has been created so that the compute resource name can be
// included in the volume name if the custom volume naming is so configured. Do not set the
// compute resource if the descriptor is for an SRDF target as the target is not exported
// to the compute resource.
URI computeResourceURI = param.getComputeResource();
if ((computeResourceURI != null) && (!srdfCopy)) {
s_logger.info(String.format("Volume %s - will be exported to Host/Cluster: %s", volume.getLabel(), computeResourceURI.toString()));
descriptor.setComputeResource(computeResourceURI);
}
descriptors.add(descriptor);
}
return descriptors;
}
use of com.emc.storageos.model.TaskResourceRep in project coprhd-controller by CoprHD.
the class VolumeGroupService method detachVolumeGroupFullCopy.
/**
* Detach the specified Volume group full copy.
* - Detaches full copy for all the array replication groups within this Application.
* - If partial flag is specified, it detaches full copy only for set of array replication groups.
* A Full Copy from each array replication group can be provided to indicate which array replication
* groups's full copies needs to be detached.
*
* @prereq Create Volume group full copy as active.
*
* @param volumeGroupId The URI of the Volume group.
* @param fullCopyURI The URI of the full copy.
*
* @brief Detach Volume group full copy.
*
* @return TaskList
*/
@POST
@Consumes({ MediaType.APPLICATION_XML, MediaType.APPLICATION_JSON })
@Produces({ MediaType.APPLICATION_XML, MediaType.APPLICATION_JSON })
@Path("/{id}/protection/full-copies/detach")
@CheckPermission(roles = { Role.TENANT_ADMIN }, acls = { ACL.ANY })
public TaskList detachVolumeGroupFullCopy(@PathParam("id") final URI volumeGroupId, final VolumeGroupFullCopyDetachParam param) {
ArgValidator.checkFieldUriType(volumeGroupId, VolumeGroup.class, "id");
// Query Volume Group
final VolumeGroup volumeGroup = (VolumeGroup) queryResource(volumeGroupId);
TaskList taskList = new TaskList();
// validate replica operation for volume group
validateCopyOperationForVolumeGroup(volumeGroup, ReplicaTypeEnum.FULL_COPY);
// validate the requested full copies
List<Volume> fullCopyVolumesInRequest = new ArrayList<Volume>();
boolean partial = validateFullCopiesInRequest(fullCopyVolumesInRequest, param.getFullCopies(), param.getCopySetName(), param.getSubGroups(), volumeGroupId);
/**
* 1. VolumeGroupService Clone API accepts a Clone URI (to identify clone set and RG)
* - then get All full copies belonging to same full copy set
* - get full copy set name from the requested full copy
* 2. If partial, there will be a List of Clone URIs (one from each RG)
* 3. Group the full copies by Replication Group(RG)
* 4. For each RG, invoke the ConsistencyGroup full copy API (CG uri, clone uri)
* - a. Skip the CG/RG calls when thrown error and continue with other entries; create 'ERROR' Task for this call
* - b. Finally return the Task List (RG tasks may finish at different times as they are different calls)
*/
if (!partial) {
Volume fullCopy = fullCopyVolumesInRequest.get(0);
log.info("Full Copy operation requested for entire Application, Considering full copy {} in request.", fullCopy.getLabel());
fullCopyVolumesInRequest.clear();
fullCopyVolumesInRequest.addAll(getClonesBySetName(fullCopy.getFullCopySetName(), volumeGroup.getId()));
} else {
log.info("Full Copy operation requested for subset of array replication groups in Application.");
}
checkForApplicationPendingTasks(volumeGroup, _dbClient, true);
Map<String, Volume> repGroupToFullCopyMap = groupVolumesByReplicationGroup(fullCopyVolumesInRequest);
for (Map.Entry<String, Volume> entry : repGroupToFullCopyMap.entrySet()) {
String replicationGroup = entry.getKey();
Volume fullCopy = entry.getValue();
log.info("Processing Array Replication Group {}, Full Copy {}", replicationGroup, fullCopy.getLabel());
try {
// get CG URI
URI cgURI = getConsistencyGroupForFullCopy(fullCopy);
// Detach the full copy. Note that it will take into account the
// fact that the volume is in a ReplicationGroup
// and all volumes in that ReplicationGroup will be detached.
taskList.getTaskList().addAll(_blockConsistencyGroupService.detachConsistencyGroupFullCopy(cgURI, fullCopy.getId()).getTaskList());
} catch (InternalException | APIException e) {
String errMsg = String.format("Error detaching Array Replication Group %s, Full Copy %s", replicationGroup, fullCopy.getLabel());
log.error(errMsg, e);
TaskResourceRep task = BlockServiceUtils.createFailedTaskOnVolume(_dbClient, fullCopy, ResourceOperationTypeEnum.DETACH_VOLUME_FULL_COPY, e);
taskList.addTask(task);
}
}
if (!partial) {
auditOp(OperationTypeEnum.DETACH_VOLUME_GROUP_FULL_COPY, true, AuditLogManager.AUDITOP_BEGIN, volumeGroup.getId().toString(), fullCopyVolumesInRequest.get(0).getLabel());
}
return taskList;
}
use of com.emc.storageos.model.TaskResourceRep in project coprhd-controller by CoprHD.
the class SnapshotService method createSnapshot.
/**
* The snapshot of a volume in Block Store is a point in time copy of the
* volume. This API allows the user to create snapshot of a volume
* NOTE: This is an asynchronous operation.
*
* @prereq none
*
* @param param
* POST data containing the snapshot creation information.
*
* @brief Create snapshot
* @return Details of the newly created snapshot
* @throws InternalException
*/
@POST
@Consumes({ MediaType.APPLICATION_XML, MediaType.APPLICATION_JSON })
@Produces({ MediaType.APPLICATION_XML, MediaType.APPLICATION_JSON })
public Response createSnapshot(@PathParam("tenant_id") String openstack_tenant_id, SnapshotCreateRequestGen param, @Context HttpHeaders header, @HeaderParam("X-Cinder-V1-Call") String isV1Call) throws InternalException {
// Step 1: Parameter validation
String snapshotName = null;
String snapshotDescription = null;
if (isV1Call != null) {
snapshotName = param.snapshot.display_name;
snapshotDescription = param.snapshot.display_description;
} else {
snapshotName = param.snapshot.name;
snapshotDescription = param.snapshot.description;
}
// if snapshot name is empty create random name
if (snapshotName == null) {
snapshotName = "snapshot-" + RandomStringUtils.random(10);
}
if (snapshotName == null || (snapshotName.length() <= 2)) {
throw APIException.badRequests.parameterIsNotValid(param.snapshot.name);
}
URI volumeUri = null;
Volume volume = null;
volumeUri = URI.create(param.snapshot.volume_id);
volume = queryVolumeResource(volumeUri, openstack_tenant_id);
if (volume == null) {
_log.error("Invalid source volume id to create snapshot ={} ", param.snapshot.volume_id);
return CinderApiUtils.createErrorResponse(404, "Not Found : Invalid source volume id " + param.snapshot.volume_id);
}
VirtualPool pool = _dbClient.queryObject(VirtualPool.class, volume.getVirtualPool());
if (pool == null) {
_log.info("Virtual Pool corresponding to the volume does not exist.");
throw APIException.badRequests.parameterIsNotValid(volume.getVirtualPool().toString());
}
if (!validateSnapshotCreate(openstack_tenant_id, pool, volume.getProvisionedCapacity())) {
_log.info("The volume can not be created because of insufficient quota for virtual pool.");
throw APIException.badRequests.insufficientQuotaForVirtualPool(pool.getLabel(), "virtual pool");
}
if (!validateSnapshotCreate(openstack_tenant_id, null, volume.getProvisionedCapacity())) {
_log.info("The volume can not be created because of insufficient quota for Project.");
throw APIException.badRequests.insufficientQuotaForProject(pool.getLabel(), "project");
}
BlockFullCopyManager fcManager = new BlockFullCopyManager(_dbClient, _permissionsHelper, _auditMgr, _coordinator, _placementManager, sc, uriInfo, _request, _tenantsService);
VolumeIngestionUtil.checkOperationSupportedOnIngestedVolume(volume, ResourceOperationTypeEnum.CREATE_VOLUME_SNAPSHOT, _dbClient);
// Don't operate on VPLEX backend volumes or RP journal volumes.
BlockServiceUtils.validateNotAnInternalBlockObject(volume, false);
validateSourceVolumeHasExported(volume);
String snapshotType = TechnologyType.NATIVE.toString();
Boolean createInactive = Boolean.FALSE;
Boolean readOnly = Boolean.FALSE;
BlockServiceApi api = getBlockServiceImpl(pool, _dbClient);
List<Volume> volumesToSnap = new ArrayList<Volume>();
volumesToSnap.addAll(api.getVolumesToSnap(volume, snapshotType));
api.validateCreateSnapshot(volume, volumesToSnap, snapshotType, snapshotName, readOnly, fcManager);
String taskId = UUID.randomUUID().toString();
List<URI> snapshotURIs = new ArrayList<URI>();
List<BlockSnapshot> snapshots = api.prepareSnapshots(volumesToSnap, snapshotType, snapshotName, snapshotURIs, taskId);
TaskList response = new TaskList();
for (BlockSnapshot snapshot : snapshots) {
response.getTaskList().add(toTask(snapshot, taskId));
}
// Update the task status for the volumes task.
_dbClient.createTaskOpStatus(Volume.class, volume.getId(), taskId, ResourceOperationTypeEnum.CREATE_VOLUME_SNAPSHOT);
// Invoke the block service API implementation to create the snapshot
api.createSnapshot(volume, snapshotURIs, snapshotType, createInactive, readOnly, taskId);
SnapshotCreateResponse snapCreateResp = new SnapshotCreateResponse();
for (TaskResourceRep rep : response.getTaskList()) {
URI snapshotUri = rep.getResource().getId();
BlockSnapshot snap = _dbClient.queryObject(BlockSnapshot.class, snapshotUri);
if (snap != null) {
StringMap extensions = snap.getExtensions();
if (extensions == null)
extensions = new StringMap();
extensions.put("display_description", (snapshotDescription == null) ? "" : snapshotDescription);
extensions.put("taskid", rep.getId().toString());
_log.debug("Create snapshot : stored description");
snap.setExtensions(extensions);
ScopedLabelSet tagSet = new ScopedLabelSet();
snap.setTag(tagSet);
String[] splits = snapshotUri.toString().split(":");
String tagName = splits[3];
// this check will verify whether retrieved data is not corrupted
if (tagName == null || tagName.isEmpty() || tagName.length() < 2) {
throw APIException.badRequests.parameterTooShortOrEmpty("Tag", 2);
}
Volume parentVol = _permissionsHelper.getObjectById(snap.getParent(), Volume.class);
URI tenantOwner = parentVol.getTenant().getURI();
ScopedLabel tagLabel = new ScopedLabel(tenantOwner.toString(), tagName);
tagSet.add(tagLabel);
_dbClient.updateObject(snap);
if (isV1Call != null) {
_log.debug("Inside V1 call");
return CinderApiUtils.getCinderResponse(getSnapshotDetail(snap, isV1Call, openstack_tenant_id), header, true, CinderConstants.STATUS_OK);
} else {
return CinderApiUtils.getCinderResponse(getSnapshotDetail(snap, isV1Call, openstack_tenant_id), header, true, CinderConstants.STATUS_ACCEPT);
}
}
}
return CinderApiUtils.getCinderResponse(new CinderSnapshot(), header, true, CinderConstants.STATUS_ACCEPT);
}
use of com.emc.storageos.model.TaskResourceRep in project coprhd-controller by CoprHD.
the class VolumeService method createTaskList.
private TaskList createTaskList(long size, Project project, VirtualArray varray, VirtualPool vpool, String label, String task, Integer volumeCount) {
TaskList taskList = new TaskList();
// long lsize = SizeUtil.translateSize(size);
for (int i = 0; i < volumeCount; i++) {
Volume volume = StorageScheduler.prepareEmptyVolume(_dbClient, size, project, varray, vpool, label, i, volumeCount);
Operation op = _dbClient.createTaskOpStatus(Volume.class, volume.getId(), task, ResourceOperationTypeEnum.CREATE_BLOCK_VOLUME);
volume.getOpStatus().put(task, op);
TaskResourceRep volumeTask = toTask(volume, task, op);
taskList.getTaskList().add(volumeTask);
_log.info(String.format("Volume and Task Pre-creation Objects [Init]-- Source Volume: %s, Task: %s, Op: %s", volume.getId(), volumeTask.getId(), task));
}
return taskList;
}
Aggregations