use of com.emc.storageos.db.client.model.Event in project coprhd-controller by CoprHD.
the class XMLEventMarshallerTest method testXmlEventMarshallingForNullEvent.
@Test
public void testXmlEventMarshallingForNullEvent() throws URISyntaxException, IOException, MarshallingExcetion, JAXBException {
deleteIfExists(XmlTestOutputFile);
XMLEventMarshaller jm = new XMLEventMarshaller();
Event evt = null;
OutputStream output = new OutputStream() {
private StringBuilder string = new StringBuilder();
@Override
public void write(int b) throws IOException {
this.string.append((char) b);
}
public String toString() {
return this.string.toString();
}
};
BufferedWriter writer = new BufferedWriter(new OutputStreamWriter(output));
jm.header(writer);
jm.marshal(evt, writer);
jm.tailer(writer);
writer.close();
JAXBContext context = null;
Unmarshaller unmarshaller = null;
context = JAXBContext.newInstance(Event.class);
unmarshaller = context.createUnmarshaller();
File f = new File(XmlTestOutputFile);
try {
@SuppressWarnings("unused") Event event = (Event) unmarshaller.unmarshal(f);
} catch (Exception e) {
Assert.assertTrue(e.toString().contains("java.io.FileNotFoundException"));
}
deleteIfExists(XmlTestOutputFile);
}
use of com.emc.storageos.db.client.model.Event in project coprhd-controller by CoprHD.
the class ControllerUtils method convertToEvent.
/**
* Converts a RecordableEvent to an Event Model
*
* @param event
* @return
*/
public static Event convertToEvent(RecordableEvent event) {
Event dbEvent = new Event();
dbEvent.setTimeInMillis(event.getTimestamp());
dbEvent.setEventType(event.getType());
dbEvent.setTenantId(event.getTenantId());
dbEvent.setProjectId(event.getProjectId());
dbEvent.setUserId(event.getUserId());
dbEvent.setVirtualPool(event.getVirtualPool());
dbEvent.setService(event.getService());
dbEvent.setResourceId(event.getResourceId());
dbEvent.setSeverity(event.getSeverity());
dbEvent.setDescription(event.getDescription());
dbEvent.setExtensions(event.getExtensions());
dbEvent.setEventId(event.getEventId());
dbEvent.setAlertType(event.getAlertType());
dbEvent.setRecordType(event.getRecordType());
dbEvent.setNativeGuid(event.getNativeGuid());
dbEvent.setOperationalStatusCodes(event.getOperationalStatusCodes());
dbEvent.setOperationalStatusDescriptions(event.getOperationalStatusDescriptions());
dbEvent.setEventSource(event.getSource());
return dbEvent;
}
use of com.emc.storageos.db.client.model.Event in project coprhd-controller by CoprHD.
the class RecordableEventManager method recordEvents.
/**
* Called to record events in the database.
*
* @param events references to recordable events.
* @throws IOException thrown when insert events to database fails
*/
public void recordEvents(RecordableEvent... events) throws DatabaseException {
List<Event> dbEventsList = new ArrayList<Event>();
for (RecordableEvent event : events) {
Event dbEvent = ControllerUtils.convertToEvent(event);
// we need to drop the these indications
if (event.getResourceId() == null && !(event instanceof RecordableBourneEvent)) {
continue;
}
dbEventsList.add(dbEvent);
}
if (!dbEventsList.isEmpty()) {
Event[] dbEvents = new Event[dbEventsList.size()];
dbEventsList.toArray(dbEvents);
// Now insert the events into the database.
try {
String bucketId = _dbClient.insertTimeSeries(EventTimeSeries.class, dbEvents);
s_logger.debug("Event(s) persisted into Cassandra with bucketId/rowId : {}", bucketId);
} catch (DatabaseException e) {
s_logger.error("Error inserting events into the database", e);
throw e;
}
} else {
s_logger.info("Event list is empty");
}
}
use of com.emc.storageos.db.client.model.Event in project coprhd-controller by CoprHD.
the class DbClientTest method testTimeSeries.
@Test
public void testTimeSeries() throws Exception {
_logger.info("Starting testTimeSeries");
final int perThreadCount = 100000;
final int batchCount = 100;
final int numThreads = 5;
final DbClient dbClient = _dbClient;
// write
final Event e = new Event();
e.setEventType("randomstuff");
e.setVirtualPool(URI.create("urn:storageos:VirtualPool:random"));
e.setEventId("abc");
e.setProjectId(URI.create("urn:storageos:Project:abcrandom"));
e.setResourceId(URI.create("urn:storageos:FileShare:random"));
e.setSeverity("REALLY BAD");
e.setUserId(URI.create("urn:storageos:User:foobar"));
e.setTimeInMillis(-1);
DateTime dateTime = new DateTime(DateTimeZone.UTC);
ExecutorService executor = Executors.newFixedThreadPool(numThreads);
long duration = System.currentTimeMillis();
for (int index = 0; index < numThreads; index++) {
executor.submit(new Callable<Object>() {
@Override
public Object call() throws Exception {
for (int index = 0; index < perThreadCount / batchCount; index++) {
Event[] batch = new Event[batchCount];
for (int j = 0; j < batchCount; j++) {
batch[j] = e;
}
dbClient.insertTimeSeries(EventTimeSeries.class, batch);
}
return null;
}
});
}
executor.shutdown();
Assert.assertTrue(executor.awaitTermination(60, TimeUnit.SECONDS));
duration = System.currentTimeMillis() - duration;
_logger.info("Insertion throughput with batch size {} is {} records per second", batchCount, (perThreadCount * numThreads) / (duration / 1000f));
// read at default granularity
executor = Executors.newFixedThreadPool(numThreads);
CountDownLatch latch = new CountDownLatch(numThreads * perThreadCount);
DummyQueryResult result = new DummyQueryResult(latch);
_logger.info("Starting query with default granularity");
duration = System.currentTimeMillis();
dbClient.queryTimeSeries(EventTimeSeries.class, dateTime, result, executor);
Assert.assertTrue(latch.await(60, TimeUnit.SECONDS));
duration = System.currentTimeMillis() - duration;
_logger.info("Read throughput(HOUR) is {} records per second", (perThreadCount * numThreads) / (duration / 1000f));
// read at minute granularity
int total = numThreads * perThreadCount;
latch = new CountDownLatch(total);
result = new DummyQueryResult(latch, dateTime.getMinuteOfHour());
_logger.info("Starting query with MINUTE bucket");
duration = System.currentTimeMillis();
dbClient.queryTimeSeries(EventTimeSeries.class, dateTime, TimeSeriesMetadata.TimeBucket.MINUTE, result, executor);
Assert.assertTrue(latch.getCount() >= 0);
_logger.info("Records at time {}: {}", dateTime.toString(), total - latch.getCount());
duration = System.currentTimeMillis() - duration;
_logger.info("Read throughput(MINUTE) is {} records per second", total / (duration / 1000f));
// read at second granularity
latch = new CountDownLatch(total);
result = new DummyQueryResult(latch, dateTime.getMinuteOfHour(), dateTime.getSecondOfMinute());
_logger.info("Starting query with SECOND bucket");
duration = System.currentTimeMillis();
dbClient.queryTimeSeries(EventTimeSeries.class, dateTime, TimeSeriesMetadata.TimeBucket.SECOND, result, executor);
Assert.assertTrue(latch.getCount() >= 0);
_logger.info("Records at time {}: {}", dateTime.toString(), total - latch.getCount());
duration = System.currentTimeMillis() - duration;
_logger.info("Read throughput(SECOND) is {} records per second", total / (duration / 1000f));
_logger.info("Finished testTimeSeries");
}
use of com.emc.storageos.db.client.model.Event in project coprhd-controller by CoprHD.
the class DbClientTest method testTimeSeriesWithTimestamp.
@Test
public void testTimeSeriesWithTimestamp() throws Exception {
// org.apache.log4j.Logger.getRootLogger().setLevel(org.apache.log4j.Level.INFO);
_logger.info("Starting testTimeSeriesWithTimestamp");
final int perThreadCount = 10;
final int numThreads = 5;
final DbClient dbClient = _dbClient;
// write
final Event e = new Event();
e.setEventType("randomstuff");
e.setVirtualPool(URI.create("urn:storageos:VirtualPool:random"));
e.setEventId("abc");
e.setProjectId(URI.create("urn:storageos:Project:abcrandom"));
e.setResourceId(URI.create("urn:storageos:FileShare:random"));
e.setSeverity("REALLY BAD");
e.setUserId(URI.create("urn:storageos:User:foobar"));
e.setTimeInMillis(-1);
// Given time for data points of time series
final DateTime dateTime = new DateTime(2000, 1, 1, 0, 0, DateTimeZone.UTC);
ExecutorService executor = Executors.newFixedThreadPool(numThreads);
long duration = System.currentTimeMillis();
for (int threadIndex = 0; threadIndex < numThreads; threadIndex++) {
final int threadId = threadIndex;
executor.submit(new Callable<Object>() {
@Override
public Object call() throws Exception {
for (int index = 0; index < perThreadCount; index++) {
long millis = dateTime.getMillis() + (long) ((perThreadCount) * threadId + index) * 1000;
DateTime time = new DateTime(millis, DateTimeZone.UTC);
dbClient.insertTimeSeries(EventTimeSeries.class, time, e);
}
return null;
}
});
}
executor.shutdown();
Assert.assertTrue(executor.awaitTermination(60, TimeUnit.SECONDS));
duration = System.currentTimeMillis() - duration;
_logger.info("Insertion throughput with batch size {} is {} records per second", 1, (perThreadCount * numThreads) / (duration / 1000f));
// read at default granularity
executor = Executors.newFixedThreadPool(numThreads);
CountDownLatch latch = new CountDownLatch(numThreads * perThreadCount);
DummyQueryResult result = new DummyQueryResult(latch);
_logger.info("Starting query with default granularity");
duration = System.currentTimeMillis();
dbClient.queryTimeSeries(EventTimeSeries.class, dateTime, result, executor);
Assert.assertTrue(latch.await(60, TimeUnit.SECONDS));
duration = System.currentTimeMillis() - duration;
_logger.info("Read throughput(HOUR) is {} records per second", (perThreadCount * numThreads) / (duration / 1000f));
_logger.info("Finished testTimeSeriesWithTimestamp");
}
Aggregations