use of com.emc.storageos.db.exceptions.DatabaseException in project coprhd-controller by CoprHD.
the class MigrationHandlerImpl method run.
/**
*/
@Override
public boolean run() throws DatabaseException {
Date startTime = new Date();
// set state to migration_init and wait for all nodes to reach this state
setDbConfig(DbConfigConstants.MIGRATION_INIT);
targetVersion = service.getVersion();
statusChecker.setVersion(targetVersion);
statusChecker.setServiceName(service.getName());
// dbsvc will wait for all dbsvc, and geodbsvc waits for all geodbsvc.
statusChecker.waitForAllNodesMigrationInit();
if (schemaUtil.isStandby()) {
String currentSchemaVersion = coordinator.getCurrentDbSchemaVersion();
if (!StringUtils.equals(currentSchemaVersion, targetVersion)) {
// no migration on standby site
log.info("Migration does not run on standby. Change current version to {}", targetVersion);
schemaUtil.setCurrentVersion(targetVersion);
}
return true;
}
if (schemaUtil.isGeoDbsvc()) {
boolean schemaVersionChanged = isDbSchemaVersionChanged();
// scan and update cassandra schema
checkGeoDbSchema();
// no migration procedure for geosvc, just wait till migration is done on one of the
// dbsvcs
log.warn("Migration is not supported for Geodbsvc. Wait till migration is done");
statusChecker.waitForMigrationDone();
// Update vdc version
if (schemaVersionChanged) {
schemaUtil.insertOrUpdateVdcVersion(dbClient, true);
}
return true;
} else {
// for dbsvc, we have to wait till all geodbsvc becomes migration_init since we might
// need to copy geo-replicated resources from local to geo db.
statusChecker.waitForAllNodesMigrationInit(Constants.GEODBSVC_NAME);
}
InterProcessLock lock = null;
String currentSchemaVersion = null;
int retryCount = 0;
while (retryCount < MAX_MIGRATION_RETRY) {
log.debug("Migration handlers - Start. Trying to grab lock ...");
try {
// grab global lock for migration
lock = getLock(DB_MIGRATION_LOCK);
// make sure we haven't finished the migration on another node already
MigrationStatus status = coordinator.getMigrationStatus();
if (status != null) {
if (status == MigrationStatus.DONE) {
log.info("DB migration is done already. Skipping...");
if (null == getPersistedSchema(targetVersion)) {
persistSchema(targetVersion, DbSchemaChecker.marshalSchemas(currentSchema, null));
}
return true;
} else if (status == MigrationStatus.FAILED) {
log.error("DB migration is done already with status:{}. ", status);
return false;
}
}
schemaUtil.setMigrationStatus(MigrationStatus.RUNNING);
// we expect currentSchemaVersion to be set
currentSchemaVersion = coordinator.getCurrentDbSchemaVersion();
if (currentSchemaVersion == null) {
throw new IllegalStateException("Schema version not set");
}
// figure out our source and target versions
DbSchemas persistedSchema = getPersistedSchema(currentSchemaVersion);
if (isSchemaMissed(persistedSchema, currentSchemaVersion, targetVersion)) {
throw new IllegalStateException("Schema definition not found for version " + currentSchemaVersion);
}
if (isFreshInstall(persistedSchema, currentSchemaVersion, targetVersion)) {
log.info("saving schema of version {} to db", currentSchemaVersion);
persistedSchema = currentSchema;
persistSchema(currentSchemaVersion, DbSchemaChecker.marshalSchemas(persistedSchema, null));
}
// check if we have a schema upgrade to deal with
if (!currentSchemaVersion.equals(targetVersion)) {
log.info("Start scanning and creating new column families");
schemaUtil.checkCf(true);
log.info("Scanning and creating new column families succeed");
DbSchemasDiff diff = new DbSchemasDiff(persistedSchema, currentSchema, ignoredPkgs);
if (diff.isChanged()) {
// log the changes
dumpChanges(diff);
if (!diff.isUpgradable()) {
// we should never be here, but, if we are here, throw an IllegalStateException and stop
// To Do - dump the problematic diffs here
log.error("schema diff details: {}", DbSchemaChecker.marshalSchemasDiff(diff));
throw new IllegalStateException("schema not upgradable.");
}
}
log.info("Starting migration callbacks from {} to {}", currentSchemaVersion, targetVersion);
// we need to check point the progress of these callbacks as they are run,
// so we can resume from where we left off in case of restarts/errors
String checkpoint = schemaUtil.getMigrationCheckpoint();
if (checkpoint != null) {
log.info("Migration checkpoint found for {}", checkpoint);
}
// run all migration callbacks
runMigrationCallbacks(diff, checkpoint);
log.info("Done migration callbacks");
persistSchema(targetVersion, DbSchemaChecker.marshalSchemas(currentSchema, null));
schemaUtil.dropUnusedCfsIfExists();
// set current version in zk
schemaUtil.setCurrentVersion(targetVersion);
log.info("current schema version is updated to {}", targetVersion);
}
schemaUtil.setMigrationStatus(MigrationStatus.DONE);
// Remove migration checkpoint after done
schemaUtil.removeMigrationCheckpoint();
removeMigrationFailInfoIfExist();
log.debug("Migration handler - Done.");
return true;
} catch (Exception e) {
if (e instanceof MigrationCallbackException) {
markMigrationFailure(startTime, currentSchemaVersion, e);
} else if (isUnRetryableException(e)) {
markMigrationFailure(startTime, currentSchemaVersion, e);
return false;
} else {
log.warn("Retryable exception during migration ", e);
retryCount++;
lastException = e;
}
} finally {
if (lock != null) {
try {
lock.release();
} catch (Exception ignore) {
log.debug("lock release failed");
}
}
}
sleepBeforeRetry();
}
// while -- not done
markMigrationFailure(startTime, currentSchemaVersion, lastException);
return false;
}
use of com.emc.storageos.db.exceptions.DatabaseException in project coprhd-controller by CoprHD.
the class WorkflowService method logWorkflow.
/**
* Persist the Cassandra logging record for the Workflow
*
* @param workflow
* @param completed
* - If true, assumes the Workflow has been completed
* (reached a terminal state).
*/
void logWorkflow(Workflow workflow, boolean completed) {
try {
boolean created = false;
com.emc.storageos.db.client.model.Workflow logWorkflow = null;
if (workflow._workflowURI != null) {
logWorkflow = _dbClient.queryObject(com.emc.storageos.db.client.model.Workflow.class, workflow._workflowURI);
} else {
workflow._workflowURI = URIUtil.createId(com.emc.storageos.db.client.model.Workflow.class);
}
// Are we updating or adding?
if (logWorkflow == null) {
created = true;
logWorkflow = new com.emc.storageos.db.client.model.Workflow();
logWorkflow.setId(workflow._workflowURI);
logWorkflow.setCreationTime(Calendar.getInstance());
logWorkflow.setCompleted(false);
}
logWorkflow.setOrchControllerName(workflow._orchControllerName);
logWorkflow.setOrchMethod(workflow._orchMethod);
logWorkflow.setOrchTaskId(workflow._orchTaskId);
logWorkflow.setCompleted(completed);
if (completed) {
// If completed, log the final state and error message.
try {
Map<String, StepStatus> statusMap = workflow.getAllStepStatus();
String[] errorMessage = new String[] { workflow._successMessage };
Workflow.getOverallState(statusMap, errorMessage);
WorkflowState state = workflow.getWorkflowState();
logWorkflow.setCompletionState(state.name());
logWorkflow.setCompletionMessage(errorMessage[0]);
} catch (WorkflowException ex) {
_log.error(ex.getMessage(), ex);
}
}
if (created) {
_dbClient.createObject(logWorkflow);
} else {
_dbClient.updateObject(logWorkflow);
}
if (workflow.getOrchTaskId() != null) {
List<Task> tasks = new ArrayList<>();
if (workflow._taskCompleter != null && workflow._taskCompleter.getId() != null) {
Set<URI> taskIds = new HashSet<>();
// as migrating a non-CG virtual volume.
for (URI resourceId : workflow._taskCompleter.getIds()) {
Task task = TaskUtils.findTaskForRequestId(_dbClient, resourceId, workflow.getOrchTaskId());
if (task != null && !taskIds.contains(task.getId())) {
tasks.add(task);
taskIds.add(task.getId());
}
}
// instance)
for (URI resourceId : workflow._taskCompleter.getIds()) {
Task task = TaskUtils.findTaskForRequestIdAssociatedResource(_dbClient, resourceId, workflow.getOrchTaskId());
if (task != null && !taskIds.contains(task.getId())) {
tasks.add(task);
taskIds.add(task.getId());
}
}
} else {
List<Task> foundTasks = TaskUtils.findTasksForRequestId(_dbClient, workflow.getOrchTaskId());
if (foundTasks != null && !foundTasks.isEmpty()) {
tasks.addAll(foundTasks);
}
}
if (tasks != null && !tasks.isEmpty()) {
for (Task task : tasks) {
task.setWorkflow(workflow.getWorkflowURI());
}
_dbClient.updateObject(tasks);
}
}
} catch (DatabaseException ex) {
_log.error("Cannot persist Cassandra Workflow record " + workflow.getWorkflowURI().toString(), ex);
}
}
use of com.emc.storageos.db.exceptions.DatabaseException in project coprhd-controller by CoprHD.
the class Cassandraforplugin method query.
public static long query(DbClient dbClient) {
ExecutorService executor = Executors.newFixedThreadPool(10);
CountDownLatch latch = new CountDownLatch(1);
DummyQueryResult result = new DummyQueryResult(latch);
DateTime dateTime = new DateTime(DateTimeZone.UTC);
try {
dbClient.queryTimeSeries(StatTimeSeries.class, dateTime, result, executor);
} catch (DatabaseException e) {
_logger.error("Exception Query" + e);
}
try {
latch.await(60, TimeUnit.SECONDS);
} catch (InterruptedException e) {
_logger.error(e.getMessage(), e);
}
return latch.getCount();
}
use of com.emc.storageos.db.exceptions.DatabaseException in project coprhd-controller by CoprHD.
the class SmisMetaVolumeOperations method createMetaVolumeHead.
/**
* Create meta volume head device. Meta volume is represented by its head.
* We create it as a regular bound volume.
*
* @param storageSystem
* @param storagePool
* @param metaHead
* @param capacity
* @param capabilities
* @param metaVolumeTaskCompleter
* @throws Exception
*/
@Override
public void createMetaVolumeHead(StorageSystem storageSystem, StoragePool storagePool, Volume metaHead, long capacity, VirtualPoolCapabilityValuesWrapper capabilities, MetaVolumeTaskCompleter metaVolumeTaskCompleter) throws Exception {
String label;
_log.info(String.format("Create Meta Volume Head Start - Array: %s, Pool: %s, %n Head: %s, IsThinlyProvisioned: %s, Capacity: %s", storageSystem.getSerialNumber(), storagePool.getNativeId(), metaHead.getLabel(), metaHead.getThinlyProvisioned(), capacity));
String tenantName = "";
try {
TenantOrg tenant = _dbClient.queryObject(TenantOrg.class, metaHead.getTenant().getURI());
tenantName = tenant.getLabel();
} catch (DatabaseException e) {
_log.error("Error lookup TenantOrb object", e);
}
label = _nameGenerator.generate(tenantName, metaHead.getLabel(), metaHead.getId().toString(), '-', SmisConstants.MAX_VOLUME_NAME_LENGTH);
boolean isThinlyProvisioned = metaHead.getThinlyProvisioned();
// Thin stripe meta heads should be created unbound from pool on VMAX
// Thin concatenated meta heads are created unbound from pool on vmax as well.
// This is done to preallocate capacity later when meta volume is bound to pool.
boolean isBoundToPool = !(isThinlyProvisioned && DiscoveredDataObject.Type.vmax.toString().equalsIgnoreCase(storageSystem.getSystemType()));
try {
CIMObjectPath configSvcPath = _cimPath.getConfigSvcPath(storageSystem);
CIMArgument[] inArgs;
// only for vnxBlock, we need to associate StoragePool Setting as Goal
if (DiscoveredDataObject.Type.vnxblock.toString().equalsIgnoreCase(storageSystem.getSystemType())) {
inArgs = _helper.getCreateVolumesInputArgumentsOnFastEnabledPool(storageSystem, storagePool, label, capacity, 1, isThinlyProvisioned, capabilities.getAutoTierPolicyName());
} else {
inArgs = _helper.getCreateVolumesInputArguments(storageSystem, storagePool, label, capacity, 1, isThinlyProvisioned, null, isBoundToPool);
}
CIMArgument[] outArgs = new CIMArgument[5];
StorageSystem forProvider = _helper.getStorageSystemForProvider(storageSystem, metaHead);
_log.info("Selected Provider : {}", forProvider.getNativeGuid());
SmisCreateMetaVolumeHeadJob smisJobCompleter = new SmisCreateMetaVolumeHeadJob(null, forProvider.getId(), metaVolumeTaskCompleter, metaHead.getId());
_helper.invokeMethodSynchronously(forProvider, configSvcPath, _helper.createVolumesMethodName(forProvider), inArgs, outArgs, smisJobCompleter);
} catch (WBEMException e) {
_log.error("Problem making SMI-S call: ", e);
ServiceError error = DeviceControllerErrors.smis.unableToCallStorageProvider(e.getMessage());
metaVolumeTaskCompleter.getVolumeTaskCompleter().error(_dbClient, error);
throw e;
} catch (Exception e) {
_log.error("Problem in createMetaVolumeHead: " + metaHead.getLabel(), e);
ServiceError error = DeviceControllerErrors.smis.methodFailed("createMetaVolumeHead", e.getMessage());
metaVolumeTaskCompleter.getVolumeTaskCompleter().error(_dbClient, error);
throw e;
} finally {
_log.info(String.format("Create Meta Volume Head End - Array:%s, Pool: %s, %n Head: %s", storageSystem.getSerialNumber(), storagePool.getNativeId(), metaHead.getLabel()));
}
}
use of com.emc.storageos.db.exceptions.DatabaseException in project coprhd-controller by CoprHD.
the class ScheduledEventService method deactivateScheduledEvent.
/**
* Deactivates the scheduled event and its orders
*
* @param id the URN of a scheduled event to be deactivated
* @return OK if deactivation completed successfully
* @throws DatabaseException when a DB error occurs
*/
@POST
@Path("/{id}/deactivate")
@Produces({ MediaType.APPLICATION_XML, MediaType.APPLICATION_JSON })
@CheckPermission(roles = { Role.TENANT_ADMIN })
public Response deactivateScheduledEvent(@PathParam("id") String id) throws DatabaseException {
ScheduledEvent scheduledEvent = queryResource(uri(id));
ArgValidator.checkEntity(scheduledEvent, uri(id), true);
// deactivate all the orders from the scheduled event
URIQueryResultList resultList = new URIQueryResultList();
_dbClient.queryByConstraint(ContainmentConstraint.Factory.getScheduledEventOrderConstraint(uri(id)), resultList);
for (URI uri : resultList) {
log.info("deleting order: {}", uri);
Order order = _dbClient.queryObject(Order.class, uri);
client.delete(order);
}
try {
log.info("Deleting a scheduledEvent {}:{}", scheduledEvent.getId(), ScheduleInfo.deserialize(org.apache.commons.codec.binary.Base64.decodeBase64(scheduledEvent.getScheduleInfo().getBytes(UTF_8))).toString());
} catch (Exception e) {
log.error("Failed to parse scheduledEvent.");
}
// deactivate the scheduled event
client.delete(scheduledEvent);
return Response.ok().build();
}
Aggregations