use of com.emc.storageos.db.client.DbClient in project coprhd-controller by CoprHD.
the class RpExportGroupInternalFlagMigration method updateFlagsForExportGroups.
/**
* Update export groups that need to have the internal flags set.
*/
private void updateFlagsForExportGroups() {
DbClient dbClient = getDbClient();
List<URI> exportGroupURIs = dbClient.queryByType(ExportGroup.class, false);
Iterator<ExportGroup> exportGroups = dbClient.queryIterativeObjects(ExportGroup.class, exportGroupURIs);
while (exportGroups.hasNext()) {
ExportGroup exportGroup = exportGroups.next();
log.debug("Examining export group (id={}) for upgrade", exportGroup.getId().toString());
// Check to see if this export group has RP Initiators
if (checkIfInitiatorsForRPAfterMigration(exportGroup.getInitiators())) {
log.info("Export group (id={}) must be upgraded", exportGroup.getId().toString());
exportGroup.addInternalFlags(Flag.RECOVERPOINT);
dbClient.persistObject(exportGroup);
log.info("Marked export group (id={}) as RecoverPoint", exportGroup.getId().toString());
}
}
}
use of com.emc.storageos.db.client.DbClient in project coprhd-controller by CoprHD.
the class SRDFTargetVolumeRDFGroupMigration method process.
@Override
public void process() throws MigrationCallbackException {
log.info("Updating SRDF Target volume rdfGroup information.");
DbClient dbClient = this.getDbClient();
List<URI> volumeURIs = dbClient.queryByType(Volume.class, true);
Map<URI, RemoteDirectorGroup> rdfGroupCache = new HashMap<URI, RemoteDirectorGroup>();
Map<URI, StorageSystem> systemCache = new HashMap<URI, StorageSystem>();
List<Volume> volumesToUpdate = new ArrayList<Volume>();
Iterator<Volume> volumes = dbClient.queryIterativeObjects(Volume.class, volumeURIs);
while (volumes.hasNext()) {
Volume volume = volumes.next();
try {
if (null != volume.getSrdfParent() && !NullColumnValueGetter.isNullNamedURI(volume.getSrdfParent())) {
if (null != volume.getSrdfGroup() && !NullColumnValueGetter.isNullURI(volume.getSrdfGroup()) && !NullColumnValueGetter.isNullURI(volume.getStorageController())) {
log.info("Determining SRDF Target volume {} to update rdf group", volume.getLabel());
RemoteDirectorGroup volumeSrdfGroup = fetchRDFGroupFromCache(rdfGroupCache, volume.getSrdfGroup());
StorageSystem system = fetchSystemFromCache(systemCache, volume.getStorageController());
// Found a target volume with the target SRDFGroup uri
if (URIUtil.identical(volumeSrdfGroup.getSourceStorageSystemUri(), volume.getStorageController())) {
// Set the source SRDF Group URI
RemoteDirectorGroup sourceRDFGroup = getAssociatedTargetRemoteDirectorGroup(system.getUsingSmis80(), volumeSrdfGroup.getNativeGuid());
if (null == sourceRDFGroup) {
log.info("Source RDFGroup not found in DB. Hence skipping.");
continue;
}
volume.setSrdfGroup(sourceRDFGroup.getId());
volumesToUpdate.add(volume);
if (volumesToUpdate.size() > 100) {
this.dbClient.updateObject(volumesToUpdate);
log.info("Updated {} SRDF Target volumes in db", volumesToUpdate.size());
volumesToUpdate.clear();
}
} else {
log.info("No need to update the rdfgroup for volume {} as it has the right source RDFGroup {}", volume.getLabel(), volume.getSrdfGroup());
}
}
}
} catch (Exception ex) {
log.error("Exception occurred while updating the SRDFGroup for the target volume {}. proceeding next..", volume.getLabel());
}
}
// Update the remaining volumes
if (volumesToUpdate.size() > 0) {
this.dbClient.updateObject(volumesToUpdate);
log.info("Updated {} SRDF Target volumes in db", volumesToUpdate.size());
}
}
use of com.emc.storageos.db.client.DbClient in project coprhd-controller by CoprHD.
the class SRDFVolumesInCGMigration method updateVolumesInConsistencyGroup.
/**
* Migrate the SRDF volumes in CG
*/
private void updateVolumesInConsistencyGroup() {
log.info("Migrating SRDF volumes in CG");
DbClient dbClient = getDbClient();
List<URI> volumeURIs = dbClient.queryByType(Volume.class, true);
Iterator<Volume> volumes = dbClient.queryIterativeObjects(Volume.class, volumeURIs);
int totalVolumes = 0;
int volumesUpdated = 0;
while (volumes.hasNext()) {
totalVolumes++;
Volume volume = volumes.next();
if (!NullColumnValueGetter.isNullURI(volume.getStorageController()) && !NullColumnValueGetter.isNullURI(volume.getConsistencyGroup())) {
URI cgUri = volume.getConsistencyGroup();
URI storageUri = volume.getStorageController();
BlockConsistencyGroup cg = dbClient.queryObject(BlockConsistencyGroup.class, cgUri);
StorageSystem system = dbClient.queryObject(StorageSystem.class, storageUri);
if (cg == null || system == null) {
log.warn(String.format("Volume %s is being skipped because the refrenced CG or storage system is null; cgUri: %s; storageUri: %s", volume.getId().toString(), cgUri.toString(), storageUri.toString()));
continue;
}
if (volume.getSrdfParent() != null || volume.getSrdfTargets() != null) {
String replicationGroupName = cg.getCgNameOnStorageSystem(volume.getStorageController());
if (replicationGroupName != null && !replicationGroupName.isEmpty() && NullColumnValueGetter.isNullValue(volume.getReplicationGroupInstance())) {
log.info("updating the SRDF volume {} replicationgroup {}", volume.getLabel(), replicationGroupName);
volume.setReplicationGroupInstance(replicationGroupName);
dbClient.updateObject(volume);
volumesUpdated++;
}
}
}
}
log.info(String.format("%d volumes updated out of a total of %d volumes", volumesUpdated, totalVolumes));
}
use of com.emc.storageos.db.client.DbClient in project coprhd-controller by CoprHD.
the class DbClientTest method testTimeSeries.
@Test
public void testTimeSeries() throws Exception {
_logger.info("Starting testTimeSeries");
final int perThreadCount = 100000;
final int batchCount = 100;
final int numThreads = 5;
final DbClient dbClient = _dbClient;
// write
final Event e = new Event();
e.setEventType("randomstuff");
e.setVirtualPool(URI.create("urn:storageos:VirtualPool:random"));
e.setEventId("abc");
e.setProjectId(URI.create("urn:storageos:Project:abcrandom"));
e.setResourceId(URI.create("urn:storageos:FileShare:random"));
e.setSeverity("REALLY BAD");
e.setUserId(URI.create("urn:storageos:User:foobar"));
e.setTimeInMillis(-1);
DateTime dateTime = new DateTime(DateTimeZone.UTC);
ExecutorService executor = Executors.newFixedThreadPool(numThreads);
long duration = System.currentTimeMillis();
for (int index = 0; index < numThreads; index++) {
executor.submit(new Callable<Object>() {
@Override
public Object call() throws Exception {
for (int index = 0; index < perThreadCount / batchCount; index++) {
Event[] batch = new Event[batchCount];
for (int j = 0; j < batchCount; j++) {
batch[j] = e;
}
dbClient.insertTimeSeries(EventTimeSeries.class, batch);
}
return null;
}
});
}
executor.shutdown();
Assert.assertTrue(executor.awaitTermination(60, TimeUnit.SECONDS));
duration = System.currentTimeMillis() - duration;
_logger.info("Insertion throughput with batch size {} is {} records per second", batchCount, (perThreadCount * numThreads) / (duration / 1000f));
// read at default granularity
executor = Executors.newFixedThreadPool(numThreads);
CountDownLatch latch = new CountDownLatch(numThreads * perThreadCount);
DummyQueryResult result = new DummyQueryResult(latch);
_logger.info("Starting query with default granularity");
duration = System.currentTimeMillis();
dbClient.queryTimeSeries(EventTimeSeries.class, dateTime, result, executor);
Assert.assertTrue(latch.await(60, TimeUnit.SECONDS));
duration = System.currentTimeMillis() - duration;
_logger.info("Read throughput(HOUR) is {} records per second", (perThreadCount * numThreads) / (duration / 1000f));
// read at minute granularity
int total = numThreads * perThreadCount;
latch = new CountDownLatch(total);
result = new DummyQueryResult(latch, dateTime.getMinuteOfHour());
_logger.info("Starting query with MINUTE bucket");
duration = System.currentTimeMillis();
dbClient.queryTimeSeries(EventTimeSeries.class, dateTime, TimeSeriesMetadata.TimeBucket.MINUTE, result, executor);
Assert.assertTrue(latch.getCount() >= 0);
_logger.info("Records at time {}: {}", dateTime.toString(), total - latch.getCount());
duration = System.currentTimeMillis() - duration;
_logger.info("Read throughput(MINUTE) is {} records per second", total / (duration / 1000f));
// read at second granularity
latch = new CountDownLatch(total);
result = new DummyQueryResult(latch, dateTime.getMinuteOfHour(), dateTime.getSecondOfMinute());
_logger.info("Starting query with SECOND bucket");
duration = System.currentTimeMillis();
dbClient.queryTimeSeries(EventTimeSeries.class, dateTime, TimeSeriesMetadata.TimeBucket.SECOND, result, executor);
Assert.assertTrue(latch.getCount() >= 0);
_logger.info("Records at time {}: {}", dateTime.toString(), total - latch.getCount());
duration = System.currentTimeMillis() - duration;
_logger.info("Read throughput(SECOND) is {} records per second", total / (duration / 1000f));
_logger.info("Finished testTimeSeries");
}
use of com.emc.storageos.db.client.DbClient in project coprhd-controller by CoprHD.
the class DbClientTest method testEncryption.
@Test
public void testEncryption() throws Exception {
_logger.info("Starting encryption test");
final int count = 100;
Map<URI, UserSecretKey> expected = new HashMap<>();
DbClient dbClient = _dbClient;
TypeMap.setEncryptionProviders(_encryptionProvider, _encryptionProvider);
for (int index = 0; index < count; index++) {
UserSecretKey key = new UserSecretKey();
key.setId(URIUtil.createId(UserSecretKey.class));
key.setFirstKey(UUID.randomUUID().toString());
key.setSecondKey("");
expected.put(key.getId(), key);
dbClient.persistObject(key);
}
Iterator<URI> it = expected.keySet().iterator();
while (it.hasNext()) {
URI id = it.next();
UserSecretKey original = expected.get(id);
UserSecretKey queried = dbClient.queryObject(UserSecretKey.class, id);
Assert.assertEquals(original.getFirstKey(), queried.getFirstKey());
Assert.assertEquals(original.getSecondKey(), queried.getSecondKey());
}
// set encryption provider to null, so, we can read and write out in plain text
TypeMap.setEncryptionProviders(null, null);
UserSecretKey queried = null;
it = expected.keySet().iterator();
while (it.hasNext()) {
URI id = it.next();
UserSecretKey original = expected.get(id);
queried = dbClient.queryObject(UserSecretKey.class, id);
Assert.assertFalse(original.getFirstKey().equals(queried.getFirstKey()));
Assert.assertFalse(original.getSecondKey().equals(queried.getSecondKey()));
}
queried.setSecondKey("");
dbClient.persistObject(queried);
TypeMap.setEncryptionProviders(_encryptionProvider, _encryptionProvider);
// set the encryption provider, try to read plain data via a provider
// the provider will reject it == this is a state we should never be in
boolean good = false;
try {
queried = dbClient.queryObject(UserSecretKey.class, queried.getId());
} catch (IllegalStateException ex) {
good = true;
}
Assert.assertTrue(good);
// set encryption back so that the objects can be deleted
queried.setSecondKey("");
dbClient.persistObject(queried);
_logger.info("Ended encryption test");
}
Aggregations