Search in sources :

Example 36 with DatabaseException

use of com.emc.storageos.db.exceptions.DatabaseException in project coprhd-controller by CoprHD.

the class MigrationHandlerImpl method run.

/**
 */
@Override
public boolean run() throws DatabaseException {
    Date startTime = new Date();
    // set state to migration_init and wait for all nodes to reach this state
    setDbConfig(DbConfigConstants.MIGRATION_INIT);
    targetVersion = service.getVersion();
    statusChecker.setVersion(targetVersion);
    statusChecker.setServiceName(service.getName());
    // dbsvc will wait for all dbsvc, and geodbsvc waits for all geodbsvc.
    statusChecker.waitForAllNodesMigrationInit();
    if (schemaUtil.isStandby()) {
        String currentSchemaVersion = coordinator.getCurrentDbSchemaVersion();
        if (!StringUtils.equals(currentSchemaVersion, targetVersion)) {
            // no migration on standby site
            log.info("Migration does not run on standby. Change current version to {}", targetVersion);
            schemaUtil.setCurrentVersion(targetVersion);
        }
        return true;
    }
    if (schemaUtil.isGeoDbsvc()) {
        boolean schemaVersionChanged = isDbSchemaVersionChanged();
        // scan and update cassandra schema
        checkGeoDbSchema();
        // no migration procedure for geosvc, just wait till migration is done on one of the
        // dbsvcs
        log.warn("Migration is not supported for Geodbsvc. Wait till migration is done");
        statusChecker.waitForMigrationDone();
        // Update vdc version
        if (schemaVersionChanged) {
            schemaUtil.insertOrUpdateVdcVersion(dbClient, true);
        }
        return true;
    } else {
        // for dbsvc, we have to wait till all geodbsvc becomes migration_init since we might
        // need to copy geo-replicated resources from local to geo db.
        statusChecker.waitForAllNodesMigrationInit(Constants.GEODBSVC_NAME);
    }
    InterProcessLock lock = null;
    String currentSchemaVersion = null;
    int retryCount = 0;
    while (retryCount < MAX_MIGRATION_RETRY) {
        log.debug("Migration handlers - Start. Trying to grab lock ...");
        try {
            // grab global lock for migration
            lock = getLock(DB_MIGRATION_LOCK);
            // make sure we haven't finished the migration on another node already
            MigrationStatus status = coordinator.getMigrationStatus();
            if (status != null) {
                if (status == MigrationStatus.DONE) {
                    log.info("DB migration is done already. Skipping...");
                    if (null == getPersistedSchema(targetVersion)) {
                        persistSchema(targetVersion, DbSchemaChecker.marshalSchemas(currentSchema, null));
                    }
                    return true;
                } else if (status == MigrationStatus.FAILED) {
                    log.error("DB migration is done already with status:{}. ", status);
                    return false;
                }
            }
            schemaUtil.setMigrationStatus(MigrationStatus.RUNNING);
            // we expect currentSchemaVersion to be set
            currentSchemaVersion = coordinator.getCurrentDbSchemaVersion();
            if (currentSchemaVersion == null) {
                throw new IllegalStateException("Schema version not set");
            }
            // figure out our source and target versions
            DbSchemas persistedSchema = getPersistedSchema(currentSchemaVersion);
            if (isSchemaMissed(persistedSchema, currentSchemaVersion, targetVersion)) {
                throw new IllegalStateException("Schema definition not found for version " + currentSchemaVersion);
            }
            if (isFreshInstall(persistedSchema, currentSchemaVersion, targetVersion)) {
                log.info("saving schema of version {} to db", currentSchemaVersion);
                persistedSchema = currentSchema;
                persistSchema(currentSchemaVersion, DbSchemaChecker.marshalSchemas(persistedSchema, null));
            }
            // check if we have a schema upgrade to deal with
            if (!currentSchemaVersion.equals(targetVersion)) {
                log.info("Start scanning and creating new column families");
                schemaUtil.checkCf(true);
                log.info("Scanning and creating new column families succeed");
                DbSchemasDiff diff = new DbSchemasDiff(persistedSchema, currentSchema, ignoredPkgs);
                if (diff.isChanged()) {
                    // log the changes
                    dumpChanges(diff);
                    if (!diff.isUpgradable()) {
                        // we should never be here, but, if we are here, throw an IllegalStateException and stop
                        // To Do - dump the problematic diffs here
                        log.error("schema diff details: {}", DbSchemaChecker.marshalSchemasDiff(diff));
                        throw new IllegalStateException("schema not upgradable.");
                    }
                }
                log.info("Starting migration callbacks from {} to {}", currentSchemaVersion, targetVersion);
                // we need to check point the progress of these callbacks as they are run,
                // so we can resume from where we left off in case of restarts/errors
                String checkpoint = schemaUtil.getMigrationCheckpoint();
                if (checkpoint != null) {
                    log.info("Migration checkpoint found for {}", checkpoint);
                }
                // run all migration callbacks
                runMigrationCallbacks(diff, checkpoint);
                log.info("Done migration callbacks");
                persistSchema(targetVersion, DbSchemaChecker.marshalSchemas(currentSchema, null));
                schemaUtil.dropUnusedCfsIfExists();
                // set current version in zk
                schemaUtil.setCurrentVersion(targetVersion);
                log.info("current schema version is updated to {}", targetVersion);
            }
            schemaUtil.setMigrationStatus(MigrationStatus.DONE);
            // Remove migration checkpoint after done
            schemaUtil.removeMigrationCheckpoint();
            removeMigrationFailInfoIfExist();
            log.debug("Migration handler - Done.");
            return true;
        } catch (Exception e) {
            if (e instanceof MigrationCallbackException) {
                markMigrationFailure(startTime, currentSchemaVersion, e);
            } else if (isUnRetryableException(e)) {
                markMigrationFailure(startTime, currentSchemaVersion, e);
                return false;
            } else {
                log.warn("Retryable exception during migration ", e);
                retryCount++;
                lastException = e;
            }
        } finally {
            if (lock != null) {
                try {
                    lock.release();
                } catch (Exception ignore) {
                    log.debug("lock release failed");
                }
            }
        }
        sleepBeforeRetry();
    }
    // while -- not done
    markMigrationFailure(startTime, currentSchemaVersion, lastException);
    return false;
}
Also used : MigrationCallbackException(com.emc.storageos.svcs.errorhandling.resources.MigrationCallbackException) DbSchemasDiff(com.emc.storageos.db.common.diff.DbSchemasDiff) InterProcessLock(org.apache.curator.framework.recipes.locks.InterProcessLock) DbSchemas(com.emc.storageos.db.common.schema.DbSchemas) MigrationStatus(com.emc.storageos.coordinator.client.model.MigrationStatus) MigrationCallbackException(com.emc.storageos.svcs.errorhandling.resources.MigrationCallbackException) DatabaseException(com.emc.storageos.db.exceptions.DatabaseException) FatalCoordinatorException(com.emc.storageos.coordinator.exceptions.FatalCoordinatorException) FatalDatabaseException(com.emc.storageos.db.exceptions.FatalDatabaseException)

Example 37 with DatabaseException

use of com.emc.storageos.db.exceptions.DatabaseException in project coprhd-controller by CoprHD.

the class WorkflowService method logWorkflow.

/**
 * Persist the Cassandra logging record for the Workflow
 *
 * @param workflow
 * @param completed
 *            - If true, assumes the Workflow has been completed
 *            (reached a terminal state).
 */
void logWorkflow(Workflow workflow, boolean completed) {
    try {
        boolean created = false;
        com.emc.storageos.db.client.model.Workflow logWorkflow = null;
        if (workflow._workflowURI != null) {
            logWorkflow = _dbClient.queryObject(com.emc.storageos.db.client.model.Workflow.class, workflow._workflowURI);
        } else {
            workflow._workflowURI = URIUtil.createId(com.emc.storageos.db.client.model.Workflow.class);
        }
        // Are we updating or adding?
        if (logWorkflow == null) {
            created = true;
            logWorkflow = new com.emc.storageos.db.client.model.Workflow();
            logWorkflow.setId(workflow._workflowURI);
            logWorkflow.setCreationTime(Calendar.getInstance());
            logWorkflow.setCompleted(false);
        }
        logWorkflow.setOrchControllerName(workflow._orchControllerName);
        logWorkflow.setOrchMethod(workflow._orchMethod);
        logWorkflow.setOrchTaskId(workflow._orchTaskId);
        logWorkflow.setCompleted(completed);
        if (completed) {
            // If completed, log the final state and error message.
            try {
                Map<String, StepStatus> statusMap = workflow.getAllStepStatus();
                String[] errorMessage = new String[] { workflow._successMessage };
                Workflow.getOverallState(statusMap, errorMessage);
                WorkflowState state = workflow.getWorkflowState();
                logWorkflow.setCompletionState(state.name());
                logWorkflow.setCompletionMessage(errorMessage[0]);
            } catch (WorkflowException ex) {
                _log.error(ex.getMessage(), ex);
            }
        }
        if (created) {
            _dbClient.createObject(logWorkflow);
        } else {
            _dbClient.updateObject(logWorkflow);
        }
        if (workflow.getOrchTaskId() != null) {
            List<Task> tasks = new ArrayList<>();
            if (workflow._taskCompleter != null && workflow._taskCompleter.getId() != null) {
                Set<URI> taskIds = new HashSet<>();
                // as migrating a non-CG virtual volume.
                for (URI resourceId : workflow._taskCompleter.getIds()) {
                    Task task = TaskUtils.findTaskForRequestId(_dbClient, resourceId, workflow.getOrchTaskId());
                    if (task != null && !taskIds.contains(task.getId())) {
                        tasks.add(task);
                        taskIds.add(task.getId());
                    }
                }
                // instance)
                for (URI resourceId : workflow._taskCompleter.getIds()) {
                    Task task = TaskUtils.findTaskForRequestIdAssociatedResource(_dbClient, resourceId, workflow.getOrchTaskId());
                    if (task != null && !taskIds.contains(task.getId())) {
                        tasks.add(task);
                        taskIds.add(task.getId());
                    }
                }
            } else {
                List<Task> foundTasks = TaskUtils.findTasksForRequestId(_dbClient, workflow.getOrchTaskId());
                if (foundTasks != null && !foundTasks.isEmpty()) {
                    tasks.addAll(foundTasks);
                }
            }
            if (tasks != null && !tasks.isEmpty()) {
                for (Task task : tasks) {
                    task.setWorkflow(workflow.getWorkflowURI());
                }
                _dbClient.updateObject(tasks);
            }
        }
    } catch (DatabaseException ex) {
        _log.error("Cannot persist Cassandra Workflow record " + workflow.getWorkflowURI().toString(), ex);
    }
}
Also used : Task(com.emc.storageos.db.client.model.Task) ArrayList(java.util.ArrayList) StepStatus(com.emc.storageos.workflow.Workflow.StepStatus) URI(java.net.URI) DatabaseException(com.emc.storageos.db.exceptions.DatabaseException) HashSet(java.util.HashSet)

Example 38 with DatabaseException

use of com.emc.storageos.db.exceptions.DatabaseException in project coprhd-controller by CoprHD.

the class Cassandraforplugin method query.

public static long query(DbClient dbClient) {
    ExecutorService executor = Executors.newFixedThreadPool(10);
    CountDownLatch latch = new CountDownLatch(1);
    DummyQueryResult result = new DummyQueryResult(latch);
    DateTime dateTime = new DateTime(DateTimeZone.UTC);
    try {
        dbClient.queryTimeSeries(StatTimeSeries.class, dateTime, result, executor);
    } catch (DatabaseException e) {
        _logger.error("Exception Query" + e);
    }
    try {
        latch.await(60, TimeUnit.SECONDS);
    } catch (InterruptedException e) {
        _logger.error(e.getMessage(), e);
    }
    return latch.getCount();
}
Also used : ExecutorService(java.util.concurrent.ExecutorService) CountDownLatch(java.util.concurrent.CountDownLatch) DatabaseException(com.emc.storageos.db.exceptions.DatabaseException) DateTime(org.joda.time.DateTime)

Example 39 with DatabaseException

use of com.emc.storageos.db.exceptions.DatabaseException in project coprhd-controller by CoprHD.

the class SmisMetaVolumeOperations method createMetaVolumeHead.

/**
 * Create meta volume head device. Meta volume is represented by its head.
 * We create it as a regular bound volume.
 *
 * @param storageSystem
 * @param storagePool
 * @param metaHead
 * @param capacity
 * @param capabilities
 * @param metaVolumeTaskCompleter
 * @throws Exception
 */
@Override
public void createMetaVolumeHead(StorageSystem storageSystem, StoragePool storagePool, Volume metaHead, long capacity, VirtualPoolCapabilityValuesWrapper capabilities, MetaVolumeTaskCompleter metaVolumeTaskCompleter) throws Exception {
    String label;
    _log.info(String.format("Create Meta Volume Head Start - Array: %s, Pool: %s, %n   Head: %s, IsThinlyProvisioned: %s, Capacity: %s", storageSystem.getSerialNumber(), storagePool.getNativeId(), metaHead.getLabel(), metaHead.getThinlyProvisioned(), capacity));
    String tenantName = "";
    try {
        TenantOrg tenant = _dbClient.queryObject(TenantOrg.class, metaHead.getTenant().getURI());
        tenantName = tenant.getLabel();
    } catch (DatabaseException e) {
        _log.error("Error lookup TenantOrb object", e);
    }
    label = _nameGenerator.generate(tenantName, metaHead.getLabel(), metaHead.getId().toString(), '-', SmisConstants.MAX_VOLUME_NAME_LENGTH);
    boolean isThinlyProvisioned = metaHead.getThinlyProvisioned();
    // Thin stripe meta heads should be created unbound from pool on VMAX
    // Thin concatenated meta heads are created unbound from pool on vmax as well.
    // This is done to preallocate capacity later when meta volume is bound to pool.
    boolean isBoundToPool = !(isThinlyProvisioned && DiscoveredDataObject.Type.vmax.toString().equalsIgnoreCase(storageSystem.getSystemType()));
    try {
        CIMObjectPath configSvcPath = _cimPath.getConfigSvcPath(storageSystem);
        CIMArgument[] inArgs;
        // only for vnxBlock, we need to associate StoragePool Setting as Goal
        if (DiscoveredDataObject.Type.vnxblock.toString().equalsIgnoreCase(storageSystem.getSystemType())) {
            inArgs = _helper.getCreateVolumesInputArgumentsOnFastEnabledPool(storageSystem, storagePool, label, capacity, 1, isThinlyProvisioned, capabilities.getAutoTierPolicyName());
        } else {
            inArgs = _helper.getCreateVolumesInputArguments(storageSystem, storagePool, label, capacity, 1, isThinlyProvisioned, null, isBoundToPool);
        }
        CIMArgument[] outArgs = new CIMArgument[5];
        StorageSystem forProvider = _helper.getStorageSystemForProvider(storageSystem, metaHead);
        _log.info("Selected Provider : {}", forProvider.getNativeGuid());
        SmisCreateMetaVolumeHeadJob smisJobCompleter = new SmisCreateMetaVolumeHeadJob(null, forProvider.getId(), metaVolumeTaskCompleter, metaHead.getId());
        _helper.invokeMethodSynchronously(forProvider, configSvcPath, _helper.createVolumesMethodName(forProvider), inArgs, outArgs, smisJobCompleter);
    } catch (WBEMException e) {
        _log.error("Problem making SMI-S call: ", e);
        ServiceError error = DeviceControllerErrors.smis.unableToCallStorageProvider(e.getMessage());
        metaVolumeTaskCompleter.getVolumeTaskCompleter().error(_dbClient, error);
        throw e;
    } catch (Exception e) {
        _log.error("Problem in createMetaVolumeHead: " + metaHead.getLabel(), e);
        ServiceError error = DeviceControllerErrors.smis.methodFailed("createMetaVolumeHead", e.getMessage());
        metaVolumeTaskCompleter.getVolumeTaskCompleter().error(_dbClient, error);
        throw e;
    } finally {
        _log.info(String.format("Create Meta Volume Head End - Array:%s, Pool: %s, %n   Head: %s", storageSystem.getSerialNumber(), storagePool.getNativeId(), metaHead.getLabel()));
    }
}
Also used : ServiceError(com.emc.storageos.svcs.errorhandling.model.ServiceError) TenantOrg(com.emc.storageos.db.client.model.TenantOrg) CIMObjectPath(javax.cim.CIMObjectPath) WBEMException(javax.wbem.WBEMException) DatabaseException(com.emc.storageos.db.exceptions.DatabaseException) SmisCreateMetaVolumeHeadJob(com.emc.storageos.volumecontroller.impl.smis.job.SmisCreateMetaVolumeHeadJob) WBEMException(javax.wbem.WBEMException) DatabaseException(com.emc.storageos.db.exceptions.DatabaseException) DeviceControllerException(com.emc.storageos.exceptions.DeviceControllerException) CIMArgument(javax.cim.CIMArgument) StorageSystem(com.emc.storageos.db.client.model.StorageSystem)

Example 40 with DatabaseException

use of com.emc.storageos.db.exceptions.DatabaseException in project coprhd-controller by CoprHD.

the class ScheduledEventService method deactivateScheduledEvent.

/**
 * Deactivates the scheduled event and its orders
 *
 * @param id the URN of a scheduled event to be deactivated
 * @return OK if deactivation completed successfully
 * @throws DatabaseException when a DB error occurs
 */
@POST
@Path("/{id}/deactivate")
@Produces({ MediaType.APPLICATION_XML, MediaType.APPLICATION_JSON })
@CheckPermission(roles = { Role.TENANT_ADMIN })
public Response deactivateScheduledEvent(@PathParam("id") String id) throws DatabaseException {
    ScheduledEvent scheduledEvent = queryResource(uri(id));
    ArgValidator.checkEntity(scheduledEvent, uri(id), true);
    // deactivate all the orders from the scheduled event
    URIQueryResultList resultList = new URIQueryResultList();
    _dbClient.queryByConstraint(ContainmentConstraint.Factory.getScheduledEventOrderConstraint(uri(id)), resultList);
    for (URI uri : resultList) {
        log.info("deleting order: {}", uri);
        Order order = _dbClient.queryObject(Order.class, uri);
        client.delete(order);
    }
    try {
        log.info("Deleting a scheduledEvent {}:{}", scheduledEvent.getId(), ScheduleInfo.deserialize(org.apache.commons.codec.binary.Base64.decodeBase64(scheduledEvent.getScheduleInfo().getBytes(UTF_8))).toString());
    } catch (Exception e) {
        log.error("Failed to parse scheduledEvent.");
    }
    // deactivate the scheduled event
    client.delete(scheduledEvent);
    return Response.ok().build();
}
Also used : NamedURI(com.emc.storageos.db.client.model.NamedURI) URI(java.net.URI) URIQueryResultList(com.emc.storageos.db.client.constraint.URIQueryResultList) APIException(com.emc.storageos.svcs.errorhandling.resources.APIException) BadRequestException(com.emc.storageos.svcs.errorhandling.resources.BadRequestException) DatabaseException(com.emc.storageos.db.exceptions.DatabaseException) ParseException(java.text.ParseException) CheckPermission(com.emc.storageos.security.authorization.CheckPermission)

Aggregations

DatabaseException (com.emc.storageos.db.exceptions.DatabaseException)109 URI (java.net.URI)70 ArrayList (java.util.ArrayList)29 URIQueryResultList (com.emc.storageos.db.client.constraint.URIQueryResultList)22 IOException (java.io.IOException)21 StorageSystem (com.emc.storageos.db.client.model.StorageSystem)20 DeviceControllerException (com.emc.storageos.exceptions.DeviceControllerException)19 ServiceError (com.emc.storageos.svcs.errorhandling.model.ServiceError)18 BlockSnapshot (com.emc.storageos.db.client.model.BlockSnapshot)17 BaseCollectionException (com.emc.storageos.plugins.BaseCollectionException)16 NamedURI (com.emc.storageos.db.client.model.NamedURI)14 ControllerException (com.emc.storageos.volumecontroller.ControllerException)13 TenantOrg (com.emc.storageos.db.client.model.TenantOrg)12 HashSet (java.util.HashSet)12 StoragePool (com.emc.storageos.db.client.model.StoragePool)11 StoragePort (com.emc.storageos.db.client.model.StoragePort)11 Volume (com.emc.storageos.db.client.model.Volume)11 HashMap (java.util.HashMap)11 List (java.util.List)11 WBEMException (javax.wbem.WBEMException)11