use of com.emc.storageos.db.common.diff.DbSchemasDiff in project coprhd-controller by CoprHD.
the class DbSchemaCheckerTest method testGeoNewAnnotationOnNewField.
@Test
public void testGeoNewAnnotationOnNewField() {
DbSchema srcGeoSchema = new DataObjectSchema(GeoClassUT.class);
srcSchemas.addSchema(srcGeoSchema);
tgtSchema = new DataObjectSchema(GeoNewField.class);
tgtSchema.setType(srcSchema.getType());
tgtSchemas.addSchema(tgtSchema);
tgtSchemas.addSchema(srcSchema);
DbSchemasDiff diff = new DbSchemasDiff(srcSchemas, tgtSchemas);
// Adding index on new field of Geo object is allowed
Assert.assertTrue(diff.isUpgradable());
Assert.assertTrue(diff.isChanged());
}
use of com.emc.storageos.db.common.diff.DbSchemasDiff in project coprhd-controller by CoprHD.
the class MigrationHandlerImpl method run.
/**
*/
@Override
public boolean run() throws DatabaseException {
Date startTime = new Date();
// set state to migration_init and wait for all nodes to reach this state
setDbConfig(DbConfigConstants.MIGRATION_INIT);
targetVersion = service.getVersion();
statusChecker.setVersion(targetVersion);
statusChecker.setServiceName(service.getName());
// dbsvc will wait for all dbsvc, and geodbsvc waits for all geodbsvc.
statusChecker.waitForAllNodesMigrationInit();
if (schemaUtil.isStandby()) {
String currentSchemaVersion = coordinator.getCurrentDbSchemaVersion();
if (!StringUtils.equals(currentSchemaVersion, targetVersion)) {
// no migration on standby site
log.info("Migration does not run on standby. Change current version to {}", targetVersion);
schemaUtil.setCurrentVersion(targetVersion);
}
return true;
}
if (schemaUtil.isGeoDbsvc()) {
boolean schemaVersionChanged = isDbSchemaVersionChanged();
// scan and update cassandra schema
checkGeoDbSchema();
// no migration procedure for geosvc, just wait till migration is done on one of the
// dbsvcs
log.warn("Migration is not supported for Geodbsvc. Wait till migration is done");
statusChecker.waitForMigrationDone();
// Update vdc version
if (schemaVersionChanged) {
schemaUtil.insertOrUpdateVdcVersion(dbClient, true);
}
return true;
} else {
// for dbsvc, we have to wait till all geodbsvc becomes migration_init since we might
// need to copy geo-replicated resources from local to geo db.
statusChecker.waitForAllNodesMigrationInit(Constants.GEODBSVC_NAME);
}
InterProcessLock lock = null;
String currentSchemaVersion = null;
int retryCount = 0;
while (retryCount < MAX_MIGRATION_RETRY) {
log.debug("Migration handlers - Start. Trying to grab lock ...");
try {
// grab global lock for migration
lock = getLock(DB_MIGRATION_LOCK);
// make sure we haven't finished the migration on another node already
MigrationStatus status = coordinator.getMigrationStatus();
if (status != null) {
if (status == MigrationStatus.DONE) {
log.info("DB migration is done already. Skipping...");
if (null == getPersistedSchema(targetVersion)) {
persistSchema(targetVersion, DbSchemaChecker.marshalSchemas(currentSchema, null));
}
return true;
} else if (status == MigrationStatus.FAILED) {
log.error("DB migration is done already with status:{}. ", status);
return false;
}
}
schemaUtil.setMigrationStatus(MigrationStatus.RUNNING);
// we expect currentSchemaVersion to be set
currentSchemaVersion = coordinator.getCurrentDbSchemaVersion();
if (currentSchemaVersion == null) {
throw new IllegalStateException("Schema version not set");
}
// figure out our source and target versions
DbSchemas persistedSchema = getPersistedSchema(currentSchemaVersion);
if (isSchemaMissed(persistedSchema, currentSchemaVersion, targetVersion)) {
throw new IllegalStateException("Schema definition not found for version " + currentSchemaVersion);
}
if (isFreshInstall(persistedSchema, currentSchemaVersion, targetVersion)) {
log.info("saving schema of version {} to db", currentSchemaVersion);
persistedSchema = currentSchema;
persistSchema(currentSchemaVersion, DbSchemaChecker.marshalSchemas(persistedSchema, null));
}
// check if we have a schema upgrade to deal with
if (!currentSchemaVersion.equals(targetVersion)) {
log.info("Start scanning and creating new column families");
schemaUtil.checkCf(true);
log.info("Scanning and creating new column families succeed");
DbSchemasDiff diff = new DbSchemasDiff(persistedSchema, currentSchema, ignoredPkgs);
if (diff.isChanged()) {
// log the changes
dumpChanges(diff);
if (!diff.isUpgradable()) {
// we should never be here, but, if we are here, throw an IllegalStateException and stop
// To Do - dump the problematic diffs here
log.error("schema diff details: {}", DbSchemaChecker.marshalSchemasDiff(diff));
throw new IllegalStateException("schema not upgradable.");
}
}
log.info("Starting migration callbacks from {} to {}", currentSchemaVersion, targetVersion);
// we need to check point the progress of these callbacks as they are run,
// so we can resume from where we left off in case of restarts/errors
String checkpoint = schemaUtil.getMigrationCheckpoint();
if (checkpoint != null) {
log.info("Migration checkpoint found for {}", checkpoint);
}
// run all migration callbacks
runMigrationCallbacks(diff, checkpoint);
log.info("Done migration callbacks");
persistSchema(targetVersion, DbSchemaChecker.marshalSchemas(currentSchema, null));
schemaUtil.dropUnusedCfsIfExists();
// set current version in zk
schemaUtil.setCurrentVersion(targetVersion);
log.info("current schema version is updated to {}", targetVersion);
}
schemaUtil.setMigrationStatus(MigrationStatus.DONE);
// Remove migration checkpoint after done
schemaUtil.removeMigrationCheckpoint();
removeMigrationFailInfoIfExist();
log.debug("Migration handler - Done.");
return true;
} catch (Exception e) {
if (e instanceof MigrationCallbackException) {
markMigrationFailure(startTime, currentSchemaVersion, e);
} else if (isUnRetryableException(e)) {
markMigrationFailure(startTime, currentSchemaVersion, e);
return false;
} else {
log.warn("Retryable exception during migration ", e);
retryCount++;
lastException = e;
}
} finally {
if (lock != null) {
try {
lock.release();
} catch (Exception ignore) {
log.debug("lock release failed");
}
}
}
sleepBeforeRetry();
}
// while -- not done
markMigrationFailure(startTime, currentSchemaVersion, lastException);
return false;
}
use of com.emc.storageos.db.common.diff.DbSchemasDiff in project coprhd-controller by CoprHD.
the class DbSchemaChecker method main.
public static void main(String[] args) throws Exception {
String schemaFile = null;
String[] pkgs = null;
String[] ignoredPkgs = null;
String dbSchemaVersion = null;
String baseCallbackFile = null;
String currentCallbackFile = null;
SchemaLockType schemaLock = null;
for (int i = 0; i < args.length; i++) {
if (args[i].equals("-i")) {
ignoredPkgs = args[++i].split(":");
if (ignoredPkgs.length == 0) {
usage();
throw new IllegalArgumentException("no ignored packages provided");
}
continue;
}
if (args[i].equals("-v")) {
dbSchemaVersion = args[++i];
continue;
}
if (args[i].equals("-l")) {
String lock = null;
try {
lock = args[++i].trim();
schemaLock = SchemaLockType.valueOf(lock.toUpperCase());
log.info("Schema lock:{}", schemaLock);
} catch (IllegalArgumentException e) {
usage();
throw new IllegalArgumentException("Invalid schema lock: " + lock);
}
continue;
}
if (args[i].equals("-b")) {
baseCallbackFile = args[++i];
continue;
}
if (args[i].equals("-c")) {
currentCallbackFile = args[++i];
continue;
}
schemaFile = args[i++];
pkgs = args[i].split(":");
}
if (baseCallbackFile == null || currentCallbackFile == null) {
usage();
throw new IllegalArgumentException("no migraton callback file provided");
}
if (schemaFile == null || pkgs.length == 0) {
usage();
throw new IllegalArgumentException("no schema file or packages provided");
}
DbMigrationCallbackChecker migrationCallbackChecker = new DbMigrationCallbackChecker(baseCallbackFile, currentCallbackFile);
if (SchemaLockType.ALL.equals(schemaLock) && migrationCallbackChecker.hasDiff()) {
Map<String, List<MigrationCallbackDiff>> versionedDiffs = migrationCallbackChecker.getDiff();
dumpMigrationCallbackDiff(versionedDiffs);
log.warn("All migration callback has been locked");
System.exit(-1);
}
DbSchemaScanner scanner = new DbSchemaScanner(pkgs);
scanner.setScannerInterceptor(new DbSchemaInterceptorImpl());
scanner.scan();
log.info("Check the integrity of DataObject classes in packages {}", pkgs);
checkSourceSchema(pkgs);
DbSchemas currentSchemas = scanner.getSchemas();
if (currentSchemas.hasDuplicateField()) {
Map<String, List<FieldInfo>> schemaDuplicateFields = currentSchemas.getDuplicateFields();
dumpDuplicateColumns(schemaDuplicateFields);
System.exit(-1);
}
log.info("Check db schemas of the packages: {}\nagainst schema file: {}", pkgs, schemaFile);
try (BufferedReader reader = new BufferedReader(new FileReader(schemaFile))) {
DbSchemas spec = unmarshalSchemas(dbSchemaVersion, reader);
DbSchemasDiff diff = new DbSchemasDiff(spec, currentSchemas, ignoredPkgs);
if (diff.isChanged()) {
log.info("schema diffs: {}", marshalSchemasDiff(diff));
switch(schemaLock) {
case ALL:
log.error("All the db schemas have been locked");
System.exit(-1);
break;
case GEO:
if (genGeoDiffs(spec, scanner.getGeoSchemas()).isChanged()) {
log.error("The geo db schemas have been locked");
System.exit(-1);
}
case NONE:
default:
if (diff.isUpgradable()) {
log.warn("The db schemas are changed but upgradable");
} else {
log.error("The db schemas are changed and not upgradable");
System.exit(-1);
}
}
} else {
log.info("The Db schemas are the SAME");
}
}
}
use of com.emc.storageos.db.common.diff.DbSchemasDiff in project coprhd-controller by CoprHD.
the class BaseDbSchemaCheckerTest method testCustomMigrationExistingField.
@Test
public void testCustomMigrationExistingField() {
tgtSchema = new DataObjectSchema(CustomMigrationExistingField.class);
tgtSchema.setType(tgtSchema.getType());
tgtSchemas.addSchema(tgtSchema);
diff = new DbSchemasDiff(srcSchemas, tgtSchemas);
Assert.assertTrue(diff.isUpgradable());
Assert.assertFalse(diff.isChanged());
}
use of com.emc.storageos.db.common.diff.DbSchemasDiff in project coprhd-controller by CoprHD.
the class DbSchemaCheckerTest method testNewNotPermittedFieldAnnotation.
@Test
public void testNewNotPermittedFieldAnnotation() {
tgtSchema = new DataObjectSchema(NewNotPermittedFieldAnnotation.class);
tgtSchema.setType(srcSchema.getType());
tgtSchemas.addSchema(tgtSchema);
DbSchemasDiff diff = new DbSchemasDiff(srcSchemas, tgtSchemas);
Assert.assertFalse(diff.isUpgradable());
Assert.assertTrue(diff.isChanged());
}
Aggregations