use of com.emc.storageos.db.client.model.ComputeSystem in project coprhd-controller by CoprHD.
the class ComputeImageServerMigration method process.
@Override
public void process() throws MigrationCallbackException {
try {
// Retrieve data from zk db using coordinator client
Configuration config1 = coordinatorClient.queryConfiguration(PropertyInfoExt.TARGET_PROPERTY, PropertyInfoExt.TARGET_PROPERTY_ID);
log.info("imageServerIP:" + config1.getConfig(IMAGE_SERVER_ADDRESS));
PropertyInfo p = coordinatorClient.getPropertyInfo();
EncryptionProviderImpl encryptionProvider1 = new EncryptionProviderImpl();
encryptionProvider1.setCoordinator(coordinatorClient);
encryptionProvider1.start();
this.encryptionProvider = encryptionProvider1;
if (!StringUtils.isBlank(p.getProperty("image_server_address"))) {
ComputeImageServer imageServer = new ComputeImageServer();
imageServer.setId(URIUtil.createId(ComputeImageServer.class));
imageServer.setImageServerIp(p.getProperty(IMAGE_SERVER_ADDRESS));
imageServer.setLabel(p.getProperty(IMAGE_SERVER_ADDRESS));
imageServer.setImageServerUser(p.getProperty(IMAGE_SERVER_USERNAME));
imageServer.setTftpBootDir(p.getProperty(IMAGE_SERVER_TFTPBOOT_DIR));
imageServer.setImageServerSecondIp(p.getProperty(IMAGE_SERVER_OS_NETWORK_IP));
imageServer.setImageServerHttpPort(p.getProperty(IMAGE_SERVER_HTTP_PORT));
imageServer.setImageDir(p.getProperty(IMAGE_SERVER_IMAGEDIR));
String encryptedPassword = p.getProperty(IMAGE_SERVER_ENC_PWD);
try {
imageServer.setImageServerPassword(encryptionProvider.decrypt(Base64.decodeBase64(encryptedPassword)));
} catch (Exception e) {
log.error("Can't decrypt image server password :" + e.getLocalizedMessage());
log.error("Failed to save image server details into database during migration", e);
throw e;
}
associateComputeImages(imageServer);
dbClient.createObject(imageServer);
log.info("Saved imageServer info into cassandra db");
// Associate all existing Compute Systems to this image server
List<URI> computeSystemURIs = dbClient.queryByType(ComputeSystem.class, true);
Iterator<ComputeSystem> computeSystemListIterator = dbClient.queryIterativeObjects(ComputeSystem.class, computeSystemURIs);
while (computeSystemListIterator.hasNext()) {
ComputeSystem computeSystem = computeSystemListIterator.next();
computeSystem.setComputeImageServer(imageServer.getId());
dbClient.persistObject(computeSystem);
}
// Delete imageserverConf data from ZK db.
Configuration config = coordinatorClient.queryConfiguration(PropertyInfoExt.TARGET_PROPERTY, PropertyInfoExt.TARGET_PROPERTY_ID);
config.removeConfig(IMAGE_SERVER_ADDRESS);
config.removeConfig(IMAGE_SERVER_USERNAME);
config.removeConfig(IMAGE_SERVER_ENC_PWD);
config.removeConfig(IMAGE_SERVER_TFTPBOOT_DIR);
config.removeConfig(IMAGE_SERVER_HTTP_PORT);
config.removeConfig(IMAGE_SERVER_OS_NETWORK_IP);
config.removeConfig(IMAGE_SERVER_IMAGEDIR);
coordinatorClient.persistServiceConfiguration(config);
} else {
log.info("No image server configuration found in Zookeeper db");
}
} catch (Exception e) {
log.error("Exception occured while migrating compute image server information");
log.error(e.getMessage(), e);
}
}
use of com.emc.storageos.db.client.model.ComputeSystem in project coprhd-controller by CoprHD.
the class DataCollectionJobScheduler method isDataCollectionJobSchedulingNeeded.
/**
* @param <T>
* @param system
* @param scheduler indicates if the job is initiated automatically by scheduler or if it is
* requested by a user.
* @return
*/
private <T extends DiscoveredSystemObject> boolean isDataCollectionJobSchedulingNeeded(T system, DataCollectionJob job) {
String type = job.getType();
boolean scheduler = job.isSchedulerJob();
String namespace = job.getNamespace();
// COP-20052 if an unmanaged CG discovery is requested, just run it
if (!scheduler && (Discovery_Namespaces.UNMANAGED_VOLUMES.name().equalsIgnoreCase(namespace) || Discovery_Namespaces.BLOCK_SNAPSHOTS.name().equalsIgnoreCase(namespace) || Discovery_Namespaces.UNMANAGED_FILESYSTEMS.name().equalsIgnoreCase(namespace) || Discovery_Namespaces.UNMANAGED_CGS.name().equalsIgnoreCase(namespace))) {
_logger.info(namespace + " discovery has been requested by the user, scheduling now...");
return true;
}
if (ControllerServiceImpl.METERING.equalsIgnoreCase(type) && !DiscoveredDataObject.RegistrationStatus.REGISTERED.toString().equalsIgnoreCase(system.getRegistrationStatus())) {
return false;
}
// Scan triggered the discovery of this new System found, and discovery was in progress
// in the mean time, UI triggered the discovery again, the last Run time will be 0
// as we depend on the last run time to calculate next run time, the value will be
// always 3600 seconds in this case, which is lower than the maximum idle interval which is 4200 sec.
// hence a new Job will again get rescheduled.
// This fix, calculates next time from last Run time , only if its not 0.
long lastTime = getLastRunTime(system, type);
long nextTime = getNextRunTime(system, type);
if (lastTime > 0) {
nextTime = lastTime + JobIntervals.get(type).getInterval() * 1000;
}
if (ControllerServiceImpl.DISCOVERY.equalsIgnoreCase(type) && system instanceof NetworkSystem) {
type = ControllerServiceImpl.NS_DISCOVERY;
}
if (ControllerServiceImpl.DISCOVERY.equalsIgnoreCase(type) && system instanceof ComputeSystem) {
type = ControllerServiceImpl.COMPUTE_DISCOVERY;
}
if (ControllerServiceImpl.DISCOVERY.equalsIgnoreCase(type) && (system instanceof Host || system instanceof Vcenter)) {
type = ControllerServiceImpl.CS_DISCOVERY;
}
// check directly on the queue to determine if the job is in progress
boolean inProgress = ControllerServiceImpl.isDataCollectionJobInProgress(job);
boolean queued = ControllerServiceImpl.isDataCollectionJobQueued(job);
if (!queued && !inProgress) {
// the job does not appear on the queue in either active or queued state
// check the storage system database status; if it shows that it's scheduled or in progress, something
// went wrong with a previous discovery. Set it to error and allow it to be rescheduled.
boolean dbInProgressStatus = isInProgress(system, type);
if (dbInProgressStatus) {
_logger.warn(type + " job for " + system.getLabel() + " is not queued or in progress; correcting the ViPR DB status");
updateDataCollectionStatus(system, type, DiscoveredDataObject.DataCollectionJobStatus.ERROR);
}
// check for any pending tasks; if there are any, they're orphaned and should be cleaned up
// look for tasks older than one hour; this will exclude the discovery job currently being scheduled
Calendar oneHourAgo = Calendar.getInstance();
oneHourAgo.setTime(Date.from(LocalDateTime.now().minusDays(1).atZone(ZoneId.systemDefault()).toInstant()));
if (ControllerServiceImpl.DISCOVERY.equalsIgnoreCase(type)) {
TaskUtils.cleanupPendingTasks(_dbClient, system.getId(), ResourceOperationTypeEnum.DISCOVER_STORAGE_SYSTEM.getName(), URI.create(SYSTEM_TENANT_ID), oneHourAgo);
} else if (ControllerServiceImpl.METERING.equalsIgnoreCase(type)) {
TaskUtils.cleanupPendingTasks(_dbClient, system.getId(), ResourceOperationTypeEnum.METERING_STORAGE_SYSTEM.getName(), URI.create(SYSTEM_TENANT_ID), oneHourAgo);
}
} else {
// log a message if the discovery job has been runnig for longer than expected
long currentTime = System.currentTimeMillis();
long maxIdleTime = JobIntervals.getMaxIdleInterval() * 1000;
long jobInterval = JobIntervals.get(job.getType()).getInterval();
// next time is the time the job was picked up from the queue plus the job interval
// so the start time of the currently running job is next time minus job interval
// the running time of the currently running job is current time - next time - job interval
boolean longRunningDiscovery = inProgress && (currentTime - nextTime - jobInterval >= maxIdleTime);
if (longRunningDiscovery) {
_logger.warn(type + " job for " + system.getLabel() + " has been running for longer than expected; this could indicate a problem with the storage system");
}
}
return isJobSchedulingNeeded(system.getId(), type, (queued || inProgress), isError(system, type), scheduler, lastTime, nextTime);
}
use of com.emc.storageos.db.client.model.ComputeSystem in project coprhd-controller by CoprHD.
the class DataCollectionJobUtil method getAccessProfile.
/**
* Create AccessProfile from DiscoveryJob
*
* TODO create subClasses for Accessprofile based on deviceType and Profile.
* i.e. Metering-isilon accessProfile - a subclass under AccessProfile
*
* @param clazz
* @param objectID
* @param jobProfile
* @return AccessProfile
* @throws IOException
*/
public AccessProfile getAccessProfile(Class<? extends DataObject> clazz, URI objectID, String jobProfile, String nameSpace) throws DatabaseException, DeviceControllerException {
DataObject taskObject = _dbClient.queryObject(clazz, objectID);
AccessProfile profile = new AccessProfile();
profile.setProfileName(jobProfile);
profile.setRecordableEventManager(_eventManager);
if (clazz == StorageProvider.class && StorageProvider.InterfaceType.smis.name().equalsIgnoreCase(((StorageProvider) taskObject).getInterfaceType())) {
populateSMISAccessProfile(profile, (StorageProvider) taskObject);
} else if (clazz == StorageProvider.class && StorageProvider.InterfaceType.hicommand.name().equalsIgnoreCase(((StorageProvider) taskObject).getInterfaceType())) {
populateHDSAccessProfile(profile, (StorageProvider) taskObject);
} else if (clazz == StorageProvider.class && StorageProvider.InterfaceType.cinder.name().equalsIgnoreCase(((StorageProvider) taskObject).getInterfaceType())) {
populateCinderAccessProfile(profile, (StorageProvider) taskObject);
} else if ((clazz == StorageProvider.class && StorageProvider.InterfaceType.vplex.name().equalsIgnoreCase(((StorageProvider) taskObject).getInterfaceType())) || (clazz == StorageSystem.class && DiscoveredDataObject.Type.vplex.name().equalsIgnoreCase(((StorageSystem) taskObject).getSystemType()))) {
populateVPLEXAccessProfile(profile, taskObject, nameSpace);
} else if (clazz == StorageProvider.class && StorageProvider.InterfaceType.scaleioapi.name().equalsIgnoreCase(((StorageProvider) taskObject).getInterfaceType())) {
populateScaleIOAccessProfile(profile, (StorageProvider) taskObject);
} else if (clazz == StorageProvider.class && StorageProvider.InterfaceType.ddmc.name().equalsIgnoreCase(((StorageProvider) taskObject).getInterfaceType())) {
populateDataDomainAccessProfile(profile, (StorageProvider) taskObject);
} else if (clazz == StorageProvider.class && StorageProvider.InterfaceType.ibmxiv.name().equalsIgnoreCase(((StorageProvider) taskObject).getInterfaceType())) {
populateSMISAccessProfile(profile, (StorageProvider) taskObject);
profile.setnamespace(Constants.IBM_NAMESPACE);
} else if (clazz == StorageProvider.class && StorageProvider.InterfaceType.xtremio.name().equalsIgnoreCase(((StorageProvider) taskObject).getInterfaceType())) {
populateXtremIOAccessProfile(profile, (StorageProvider) taskObject);
} else if (clazz == StorageProvider.class && StorageProvider.InterfaceType.ceph.name().equalsIgnoreCase(((StorageProvider) taskObject).getInterfaceType())) {
populateCephAccessProfile(profile, (StorageProvider) taskObject);
} else if (clazz == StorageProvider.class && StorageProvider.InterfaceType.unity.name().equalsIgnoreCase(((StorageProvider) taskObject).getInterfaceType())) {
populateUnityAccessProfile(profile, (StorageProvider) taskObject);
} else if (clazz == StorageProvider.class && StorageSystem.Type.isDriverManagedStorageProvider(((StorageProvider) taskObject).getInterfaceType())) {
populateExternalProviderAccessProfile(profile, (StorageProvider) taskObject);
} else if (clazz == StorageSystem.class) {
populateAccessProfile(profile, (StorageSystem) taskObject, nameSpace);
} else if (clazz == ProtectionSystem.class) {
populateAccessProfile(profile, (ProtectionSystem) taskObject, nameSpace);
} else if (clazz == ComputeSystem.class) {
populateAccessProfile(profile, (ComputeSystem) taskObject);
} else if (clazz == NetworkSystem.class) {
populateAccessProfile(profile, (NetworkSystem) taskObject);
} else if (clazz == Host.class) {
populateAccessProfile(profile, (Host) taskObject);
} else if (clazz == Vcenter.class) {
populateAccessProfile(profile, (Vcenter) taskObject);
} else {
throw new RuntimeException("getAccessProfile: profile is unknown for objects of type : " + taskObject.getClass());
}
return profile;
}
use of com.emc.storageos.db.client.model.ComputeSystem in project coprhd-controller by CoprHD.
the class ComputeControllerImpl method clearDeviceSession.
@Override
public void clearDeviceSession(URI computeSystemId) throws InternalException {
ComputeSystem cs = _dbClient.queryObject(ComputeSystem.class, computeSystemId);
execCompute("clearDeviceSession", cs.getId());
}
use of com.emc.storageos.db.client.model.ComputeSystem in project coprhd-controller by CoprHD.
the class ComputeDeviceControllerImpl method setLanBootTarget.
private void setLanBootTarget(URI computeSystemId, URI computeElementId, URI hostId, boolean waitForServerRestart) throws InternalException {
log.info("setLanBootTarget");
ComputeSystem cs = _dbClient.queryObject(ComputeSystem.class, computeSystemId);
getDevice(cs.getSystemType()).setLanBootTarget(cs, computeElementId, hostId, waitForServerRestart);
}
Aggregations