use of com.emc.storageos.vplexcontroller.job.VPlexMigrationJob in project coprhd-controller by CoprHD.
the class QueueJobTracker method run.
public void run() {
HashMap<String, HashMap<String, Integer>> jobProgressMap = new HashMap<String, HashMap<String, Integer>>();
while (true) {
JobWrapper jobWrapper = null;
_logger.debug("Tracker: Will check job status after {} ms...", _trackingPeriodInMillis);
try {
ArrayList<String> completedJobs = new ArrayList<String>();
Thread.sleep(_trackingPeriodInMillis);
_logger.debug("Tracker: Checking status of {} jobs", _activeJobs.size());
for (Iterator<JobWrapper> iter = _activeJobs.iterator(); iter.hasNext(); ) {
jobWrapper = iter.next();
Job job = jobWrapper.getJob();
try {
setPollingStartTime(job);
JobPollResult result = job.poll(_jobContext, _trackingPeriodInMillis);
updateJobProgress(jobProgressMap, result);
boolean stopJobTracking = false;
String msg = null;
// Check if we have to stop job tracking.
if (result.isJobInTerminalState()) {
// stop tracking jobs in final status and final post processing status
msg = String.format("Tracker: Stopping tracking job %s with status: %s and post-processing status %s", result.getJobId(), result.getJobStatus(), result.getJobPostProcessingStatus());
stopJobTracking = true;
} else {
long trackingTime = System.currentTimeMillis() - job.getPollingStartTime();
if (trackingTime > job.getTimeoutTimeMsec()) {
// Stop tracking job if maximum job tracking time was reached.
msg = String.format("Tracker: Stopping tracking job %s with status: %s and post-processing status %s .\n" + "The job tracking time reached job tracking time limit %d hours, job tracking time %d hours.", result.getJobId(), result.getJobStatus(), result.getJobPostProcessingStatus(), job.getTimeoutTimeMsec() / (60 * 60 * 1000), trackingTime / (60 * 60 * 1000));
_logger.info(msg);
String errorMsg = String.format("Could not execute job %s on backend device. Exceeded time limit for job status tracking.", result.getJobName());
if (job instanceof VPlexMigrationJob) {
errorMsg = String.format("Could not execute VPlex Migration Job %s on backend device. Exceeded time limit for VPLEX migration timeout.", result.getJobName());
}
ServiceError error = DeviceControllerException.errors.unableToExecuteJob(errorMsg);
job.getTaskCompleter().error(_jobContext.getDbClient(), error);
stopJobTracking = true;
}
}
if (stopJobTracking) {
_logger.info(msg);
stopTrackingJob(jobWrapper);
completedJobs.add(result.getJobId());
}
} catch (Exception ex) {
_logger.error("Tracker: Unexpected exception.", ex);
}
}
if (!jobProgressMap.isEmpty()) {
_logger.info(String.format("Progress of jobs - %n %s", jobProgressMap.toString()));
}
removeCompletedJobProgressItems(jobProgressMap, completedJobs);
} catch (InterruptedException ie) {
_logger.info("Tracker: Unexpected Interrupted exception.", ie);
} catch (Exception e) {
_logger.info("Tracker: Unexpected exception.", e);
}
}
}
use of com.emc.storageos.vplexcontroller.job.VPlexMigrationJob in project coprhd-controller by CoprHD.
the class VPlexDeviceController method migrateVirtualVolume.
/**
* Creates and starts a VPlex data migration for the passed virtual volume
* on the passed VPlex storage system. The passed target is a newly created
* backend volume to which the data will be migrated. The source for the
* data migration is the current backend volume for the virtual volume that
* is in the same varray as the passed target. The method also creates
* a migration job to monitor the progress of the migration. The workflow
* step will complete when the migration completes, at which point the
* migration is automatically committed.
*
* @param vplexURI
* The URI of the VPlex storage system.
* @param virtualVolumeURI
* The URI of the virtual volume.
* @param targetVolumeURI
* The URI of the migration target.
* @param migrationURI
* The URI of the migration.
* @param newNhURI
* The URI of the new varray for the virtual volume
* when a local virtual volume is being migrated to the other
* cluster, or null.
* @param stepId
* The workflow step identifier.
* @throws WorkflowException
*/
public void migrateVirtualVolume(URI vplexURI, URI virtualVolumeURI, URI targetVolumeURI, URI migrationURI, URI newNhURI, String stepId) throws WorkflowException {
_log.info("Migration {} using target {}", migrationURI, targetVolumeURI);
try {
// Update step state to executing.
WorkflowStepCompleter.stepExecuting(stepId);
// Initialize the step data. The step data indicates if we
// successfully started the migration and is used in
// rollback.
_workflowService.storeStepData(stepId, Boolean.FALSE);
// Get the virtual volume.
Volume virtualVolume = getDataObject(Volume.class, virtualVolumeURI, _dbClient);
String virtualVolumeName = virtualVolume.getDeviceLabel();
_log.info("Virtual volume name is {}", virtualVolumeName);
// Setup the native volume info for the migration target.
Volume migrationTarget = getDataObject(Volume.class, targetVolumeURI, _dbClient);
StorageSystem targetStorageSystem = getDataObject(StorageSystem.class, migrationTarget.getStorageController(), _dbClient);
_log.info("Storage system for migration target is {}", migrationTarget.getStorageController());
List<String> itls = VPlexControllerUtils.getVolumeITLs(migrationTarget);
VolumeInfo nativeVolumeInfo = new VolumeInfo(targetStorageSystem.getNativeGuid(), targetStorageSystem.getSystemType(), migrationTarget.getWWN().toUpperCase().replaceAll(":", ""), migrationTarget.getNativeId(), migrationTarget.getThinlyProvisioned().booleanValue(), itls);
// Get the migration associated with the target.
Migration migration = getDataObject(Migration.class, migrationURI, _dbClient);
// Determine the unique name for the migration. We identifying
// the migration source and target, using array serial number
// and volume native id, in the migration name. This was fine
// for VPlex extent migration, which has a max length of 63
// for the migration name. However, for remote migrations,
// which require VPlex device migration, the max length is much
// more restrictive, like 20 characters. So, we switched over
// timestamps.
StringBuilder migrationNameBuilder = new StringBuilder(MIGRATION_NAME_PREFIX);
DateFormat dateFormatter = new SimpleDateFormat(MIGRATION_NAME_DATE_FORMAT);
migrationNameBuilder.append(dateFormatter.format(new Date()));
String migrationName = migrationNameBuilder.toString();
migration.setLabel(migrationName);
_dbClient.updateObject(migration);
_log.info("Migration name is {}", migrationName);
// Get the VPlex API client.
StorageSystem vplexSystem = getDataObject(StorageSystem.class, vplexURI, _dbClient);
VPlexApiClient client = getVPlexAPIClient(_vplexApiFactory, vplexSystem, _dbClient);
_log.info("Got VPlex API client for VPlex {}", vplexURI);
// Get the configured migration speed
String speed = customConfigHandler.getComputedCustomConfigValue(CustomConfigConstants.MIGRATION_SPEED, vplexSystem.getSystemType(), null);
_log.info("Migration speed is {}", speed);
String transferSize = migrationSpeedToTransferSizeMap.get(speed);
// Make a call to the VPlex API client to migrate the virtual
// volume. Note that we need to do a remote migration when a
// local virtual volume is being migrated to the other VPlex
// cluster. If the passed new varray is not null, then
// this is the case.
Boolean isRemoteMigration = newNhURI != null;
// We support both device and extent migrations, however,
// when we don't know anything about the backend volumes
// we must use device migration.
Boolean useDeviceMigration = migration.getSource() == null;
List<VPlexMigrationInfo> migrationInfoList = client.migrateVirtualVolume(migrationName, virtualVolumeName, Arrays.asList(nativeVolumeInfo), isRemoteMigration, useDeviceMigration, true, true, transferSize);
_log.info("Started VPlex migration");
// We store step data indicating that the migration was successfully
// create and started. We will use this to determine the behavior
// on rollback. If we never got to the point that the migration
// was created and started, then there is no rollback to attempt
// on the VLPEX as the migrate API already tried to clean everything
// up on the VLPEX.
_workflowService.storeStepData(stepId, Boolean.TRUE);
// Initialize the migration info in the database.
VPlexMigrationInfo migrationInfo = migrationInfoList.get(0);
migration.setMigrationStatus(VPlexMigrationInfo.MigrationStatus.READY.getStatusValue());
migration.setPercentDone("0");
migration.setStartTime(migrationInfo.getStartTime());
_dbClient.updateObject(migration);
_log.info("Update migration info");
// Create a migration task completer and queue a job to monitor
// the migration progress. The completer will be invoked by the
// job when the migration completes.
MigrationTaskCompleter migrationCompleter = new MigrationTaskCompleter(migrationURI, stepId);
VPlexMigrationJob migrationJob = new VPlexMigrationJob(migrationCompleter);
migrationJob.setTimeoutTimeMsec(MINUTE_TO_MILLISECONDS * Long.valueOf(ControllerUtils.getPropertyValueFromCoordinator(coordinator, CONTROLLER_VPLEX_MIGRATION_TIMEOUT_MINUTES)));
ControllerServiceImpl.enqueueJob(new QueueJob(migrationJob));
_log.info("Queued job to monitor migration progress.");
} catch (VPlexApiException vae) {
_log.error("Exception migrating VPlex virtual volume: " + vae.getMessage(), vae);
WorkflowStepCompleter.stepFailed(stepId, vae);
} catch (Exception ex) {
_log.error("Exception migrating VPlex virtual volume: " + ex.getMessage(), ex);
String opName = ResourceOperationTypeEnum.MIGRATE_VIRTUAL_VOLUME.getName();
ServiceError serviceError = VPlexApiException.errors.migrateVirtualVolume(opName, ex);
WorkflowStepCompleter.stepFailed(stepId, serviceError);
}
}
Aggregations