use of com.emc.storageos.svcs.errorhandling.resources.MigrationCallbackException in project coprhd-controller by CoprHD.
the class FileSnapshotPolicyMigration method process.
@Override
public void process() throws MigrationCallbackException {
logger.info("File snapshot schedule policy to file policy migration START");
DbClient dbClient = getDbClient();
try {
List<URI> schedulePolicyURIs = dbClient.queryByType(SchedulePolicy.class, true);
Iterator<SchedulePolicy> schedulePolicies = dbClient.queryIterativeObjects(SchedulePolicy.class, schedulePolicyURIs, true);
List<FilePolicy> filePolicies = new ArrayList<FilePolicy>();
List<VirtualPool> modifiedVpools = new ArrayList<VirtualPool>();
while (schedulePolicies.hasNext()) {
SchedulePolicy schedulePolicy = schedulePolicies.next();
FilePolicy fileSnapshotPolicy = new FilePolicy();
VirtualPool associatedVP = new VirtualPool();
fileSnapshotPolicy.setId(URIUtil.createId(FilePolicy.class));
if (schedulePolicy.getAssignedResources() != null && !schedulePolicy.getAssignedResources().isEmpty()) {
for (String assignedResource : schedulePolicy.getAssignedResources()) {
logger.info("assigning resource to fileSnapshotPolicy from schedulePolicy : {}", schedulePolicy.getAssignedResources());
fileSnapshotPolicy.addAssignedResources(resourceURI(assignedResource));
logger.info("Assigned resources from fileSnapshotPolicy : {}", fileSnapshotPolicy.getAssignedResources());
}
}
fileSnapshotPolicy.setFilePolicyDescription("Policy created from Schedule Policy " + schedulePolicy.getLabel() + " while system upgrade");
String polName = schedulePolicy.getLabel() + "_File_Snapshot_Policy";
fileSnapshotPolicy.setLabel(polName);
fileSnapshotPolicy.setFilePolicyName(schedulePolicy.getLabel());
fileSnapshotPolicy.setFilePolicyType(FilePolicyType.file_snapshot.name());
fileSnapshotPolicy.setScheduleFrequency(schedulePolicy.getScheduleFrequency());
fileSnapshotPolicy.setScheduleRepeat(schedulePolicy.getScheduleRepeat());
fileSnapshotPolicy.setScheduleTime(schedulePolicy.getScheduleTime());
fileSnapshotPolicy.setScheduleDayOfWeek(schedulePolicy.getScheduleDayOfWeek());
fileSnapshotPolicy.setScheduleDayOfMonth(schedulePolicy.getScheduleDayOfMonth());
fileSnapshotPolicy.setSnapshotExpireTime(schedulePolicy.getSnapshotExpireTime());
fileSnapshotPolicy.setSnapshotExpireType(schedulePolicy.getSnapshotExpireType());
// snapshot policy apply at file system level
fileSnapshotPolicy.setApplyAt(FilePolicyApplyLevel.file_system.name());
if (schedulePolicy.getAssignedResources() != null && !schedulePolicy.getAssignedResources().isEmpty()) {
List<URI> fileShareURIs = getAssignedResourcesURIs(schedulePolicy.getAssignedResources());
for (URI fsURI : fileShareURIs) {
FileShare fs = dbClient.queryObject(FileShare.class, fsURI);
if (!fs.getInactive()) {
StorageSystem system = dbClient.queryObject(StorageSystem.class, fs.getStorageDevice());
updatePolicyStorageResouce(system, fileSnapshotPolicy, fs);
// Remove the existing schedule policy from fs
// add new file policy to fs!!
StringSet fsExistingPolicies = fs.getFilePolicies();
if (fsExistingPolicies != null && !fsExistingPolicies.isEmpty()) {
Set<String> snapSchedulesToRemove = new HashSet<String>();
for (String existingSnapPolicyId : fsExistingPolicies) {
if (URIUtil.isType(URI.create(existingSnapPolicyId), SchedulePolicy.class)) {
snapSchedulesToRemove.add(existingSnapPolicyId);
}
}
if (!snapSchedulesToRemove.isEmpty()) {
/*
* StringSet.removeAll() does not work if the set has only one entry.
* Hence the logic below
*/
if (fsExistingPolicies.size() == 1 && snapSchedulesToRemove.size() == 1) {
fsExistingPolicies.clear();
} else {
fsExistingPolicies.removeAll(snapSchedulesToRemove);
}
}
} else {
fsExistingPolicies = new StringSet();
}
fsExistingPolicies.add(fileSnapshotPolicy.getId().toString());
fs.setFilePolicies(fsExistingPolicies);
dbClient.updateObject(fs);
URI associatedVPId = fs.getVirtualPool();
associatedVP = dbClient.queryObject(VirtualPool.class, associatedVPId);
associatedVP.setAllowFilePolicyAtFSLevel(true);
modifiedVpools.add(associatedVP);
}
}
}
filePolicies.add(fileSnapshotPolicy);
}
// Update DB
if (!filePolicies.isEmpty()) {
logger.info("Created {} file snapshot policies", filePolicies.size());
dbClient.createObject(filePolicies);
}
if (!modifiedVpools.isEmpty()) {
logger.info("Modified {} vpools ", modifiedVpools.size());
dbClient.updateObject(modifiedVpools);
}
} catch (Exception ex) {
logger.error("Exception occured while migrating file replication policy for Virtual pools");
logger.error(ex.getMessage(), ex);
}
}
use of com.emc.storageos.svcs.errorhandling.resources.MigrationCallbackException in project coprhd-controller by CoprHD.
the class FileSystemExportToFileSystemExportRuleMigration method process.
@Override
public void process() throws MigrationCallbackException {
log.info("FileSystemExport to FileSystem export rule migration: start");
DbClient dbClient = getDbClient();
try {
List<URI> fileExpRuleURIList = dbClient.queryByType(FileExportRule.class, true);
int exisitingExportRuleCount = 0;
for (Iterator<URI> iterator = fileExpRuleURIList.iterator(); iterator.hasNext(); ) {
URI uri = (URI) iterator.next();
log.debug("Existing export rule URI: {}", uri);
exisitingExportRuleCount++;
}
if (exisitingExportRuleCount > 0) {
log.info("There are exisiting export rule(s). Skipping migration.");
return;
}
// FileSystems
List<URI> fileSystemURIList = dbClient.queryByType(FileShare.class, true);
Iterator<FileShare> fileShareListIterator = dbClient.queryIterativeObjects(FileShare.class, fileSystemURIList);
while (fileShareListIterator.hasNext()) {
FileShare fileShare = fileShareListIterator.next();
// Create FS Export Rule for export Map
List<FileExportRule> fsExpRules = createFSExportRules(fileShare);
if (null != fsExpRules && !fsExpRules.isEmpty()) {
log.debug("Persisting new File Export rule(s): {}", fsExpRules);
dbClient.createObject(fsExpRules);
}
}
// Snapshots
List<URI> snapshotURIList = dbClient.queryByType(Snapshot.class, true);
Iterator<Snapshot> snapshotListIterator = dbClient.queryIterativeObjects(Snapshot.class, snapshotURIList);
while (snapshotListIterator.hasNext()) {
Snapshot snapshot = snapshotListIterator.next();
// Create FS Export Rule for export Map
List<FileExportRule> snapshotExpRules = createSnapshotExportRules(snapshot);
if (null != snapshotExpRules && !snapshotExpRules.isEmpty()) {
log.debug("Persisting new Snapshot Export rule(s): {}", snapshotExpRules);
dbClient.createObject(snapshotExpRules);
}
}
log.info("FileSystemExport to FileSystem export rule migration: end");
} catch (Exception e) {
log.error("Exception occured while migrating FileShare/Snapshot Export Map CF to FileExportRule CF");
log.error(e.getMessage(), e);
}
}
use of com.emc.storageos.svcs.errorhandling.resources.MigrationCallbackException in project coprhd-controller by CoprHD.
the class SMISProviderToStorageProviderMigration method process.
/**
* 1. Create new StorageProvider instance per smis provider instance available in db.
* 2. Populate all existing fields into new StorageProvider instance except id and interfaceType.
* 3. Generate new id for the new instance. interfaceType will be "smis" in this case.
* 4. Persist new instance into db.
* 5: Update the newly created storage provider Id reference with the all storage systems managed by smis provider.
* a) : Fetch storageSystems using provider.getStorageSystems()
* b) : Iterate each storage systems.
* c) : Needs to change storageSystem.activeProviderURI and storageSystem.getProviders() with the newly created
* StorageProvider id. In this step we need to remove the existing smis provider id add new storage provider id
*/
@Override
public void process() throws MigrationCallbackException {
DbClient dbClient = getDbClient();
try {
List<URI> smisProviderURIList = dbClient.queryByType(SMISProvider.class, true);
Iterator<SMISProvider> smisProviderListIterator = dbClient.queryIterativeObjects(SMISProvider.class, smisProviderURIList);
while (smisProviderListIterator.hasNext()) {
SMISProvider smisProvider = smisProviderListIterator.next();
StorageProvider newStorageProvider = createNewStorageProviderInstance(smisProvider);
dbClient.createObject(newStorageProvider);
StringSet storageSystemSet = smisProvider.getStorageSystems();
if (storageSystemSet != null) {
for (String strStorageSystem : storageSystemSet) {
URI storageSystemURI = URI.create(strStorageSystem);
StorageSystem storageSystem = dbClient.queryObject(StorageSystem.class, storageSystemURI);
updateStorageProvidersforStorageSystems(dbClient, storageSystem, smisProvider, newStorageProvider);
smisProvider.setInactive(true);
dbClient.persistObject(smisProvider);
}
}
}
// Handle VPLEX storage systems, which are now discovered using the
// StorageProvider model.
List<URI> storageSystemURIs = dbClient.queryByType(StorageSystem.class, true);
Iterator<StorageSystem> storageSystemIter = dbClient.queryIterativeObjects(StorageSystem.class, storageSystemURIs);
while (storageSystemIter.hasNext()) {
StorageSystem storageSystem = storageSystemIter.next();
if (DiscoveredDataObject.Type.vplex.name().equals(storageSystem.getSystemType())) {
createStorageProviderForVPlexSystem(storageSystem);
}
}
} catch (Exception e) {
log.error("Exception occured while migrating SMISProvider CF to StorageProvider");
log.error(e.getMessage(), e);
}
}
use of com.emc.storageos.svcs.errorhandling.resources.MigrationCallbackException in project coprhd-controller by CoprHD.
the class MigrationHandlerImpl method runMigrationCallbacks.
/**
* Figure out all migration callbacks and run from given checkpoint
*
* @param diff
* @param checkpoint
* @throws MigrationCallbackException
*/
private void runMigrationCallbacks(DbSchemasDiff diff, String checkpoint) throws MigrationCallbackException {
List<MigrationCallback> callbacks = new ArrayList<>();
// TODO: we are putting class annotations at the first place since that's where
// @Keyspace belongs, but we probably need some explicit ordering to make sure
// that the geo resources gets migrated into geodb first.
callbacks.addAll(generateDefaultMigrationCallbacks(diff.getNewClassAnnotations()));
callbacks.addAll(generateDefaultMigrationCallbacks(diff.getNewFieldAnnotations()));
// now, see if there is any extra ones we need to run from the specified source
// version
callbacks.addAll(generateCustomMigrationCallbacks());
log.info("Total {} migration callbacks ", callbacks.size());
DbClientContext geoContext = disableGeoAccess();
boolean startProcessing = false;
try {
for (MigrationCallback callback : callbacks) {
// ignore the callback if it is before given checkpoint
if (!startProcessing && checkpoint != null) {
if (!callback.getName().equals(checkpoint)) {
log.info("Ignore migration callback: " + callback.getName());
continue;
} else {
// Start from next callback
startProcessing = true;
continue;
}
}
long beginTime = System.currentTimeMillis();
log.info("Invoking migration callback: {}", callback.getName());
try {
callback.process();
} catch (MigrationCallbackException ex) {
throw ex;
} catch (Exception e) {
String msg = String.format("%s fail,Please contract the EMC support team", callback.getName());
throw new MigrationCallbackException(msg, e);
} finally {
log.info("Migration callback {} finished with time: {}", callback.getName(), DurationFormatUtils.formatDurationHMS(System.currentTimeMillis() - beginTime));
}
// Update checkpoint
schemaUtil.setMigrationCheckpoint(callback.getName());
}
} finally {
enableGeoAccess(geoContext);
}
}
use of com.emc.storageos.svcs.errorhandling.resources.MigrationCallbackException in project coprhd-controller by CoprHD.
the class StatsCleanupMigration method process.
@Override
public void process() throws MigrationCallbackException {
log.info("begin to cleanup stats CF and change it compaction strategy");
DbClientImpl dbClient = (DbClientImpl) getDbClient();
TimeSeriesType<Stat> doType = TypeMap.getTimeSeriesType(StatTimeSeries.class);
try {
dbClient.getLocalContext().getKeyspace().prepareQuery(doType.getCf()).setConsistencyLevel(ConsistencyLevel.CL_ALL).withCql(String.format("TRUNCATE TABLE \"%s\"", doType.getCf().getName())).execute();
dbClient.getLocalContext().getKeyspace().prepareQuery(doType.getCf()).withCql(String.format("ALTER TABLE \"%s\" WITH compaction = {'class': 'SizeTieredCompactionStrategy'}", doType.getCf().getName())).execute();
} catch (Exception e) {
log.error("Failed to cleanup stats CF {}", e);
throw new MigrationCallbackException("Failed to cleanup stats CF", e);
}
}
Aggregations