use of com.linkedin.databus.core.monitoring.mbean.DbusEventsStatisticsCollector in project databus by linkedin.
the class RelayContainerStatsRequestProcessor method processStats.
private void processStats(StatsCollectors<DbusEventsStatisticsCollector> globalStatsCollector, DbusEventStatsCollectorsPartitioner resourceGroupStatsCollector, String prefix, DatabusRequest request) throws IOException, RequestProcessingException {
String reqPathStr = request.getParams().getProperty(DatabusRequest.PATH_PARAM_NAME);
String reqPathSuffix = reqPathStr.substring(prefix.length());
// allow DBNAME/partitionid for REST api
reqPathSuffix = reqPathSuffix.replace('/', ':');
DbusEventsTotalStats sourceStats = null;
if (reqPathSuffix.contains(":")) {
// This is a request for a specific partition
if (null != globalStatsCollector) {
DbusEventsStatisticsCollector s = globalStatsCollector.getStatsCollector(reqPathSuffix);
sourceStats = (s == null) ? null : s.getTotalStats();
}
} else {
// This is a request at DB aggregate level
if (null != resourceGroupStatsCollector) {
StatsCollectors<DbusEventsStatisticsCollector> c = resourceGroupStatsCollector.getDBStatsCollector(reqPathSuffix);
if (null != c)
sourceStats = c.getStatsCollector().getTotalStats();
}
}
if (null == sourceStats) {
LOG.warn("No Stats for this source=" + request.getName() + ", prefix=" + prefix + ", DB/Physical Partition String=" + reqPathSuffix);
sourceStats = new DbusEventsTotalStats(0, reqPathSuffix, false, false, null);
}
writeJsonObjectToResponse(sourceStats, request);
if (request.getRequestType() == HttpMethod.PUT || request.getRequestType() == HttpMethod.POST) {
enableOrResetStatsMBean(sourceStats, request);
}
}
use of com.linkedin.databus.core.monitoring.mbean.DbusEventsStatisticsCollector in project databus by linkedin.
the class BootstrapRequestProcessor method doProcess.
/*
* (non-Javadoc)
*
* @see
* com.linkedin.databus.container.request.RequestProcessor#process(com.linkedin.databus
* .container.request.DatabusRequest)
*/
@Override
protected DatabusRequest doProcess(DatabusRequest request) throws IOException, RequestProcessingException {
BootstrapProcessor processor = null;
BootstrapHttpStatsCollector bootstrapStatsCollector = _bootstrapServer.getBootstrapStatsCollector();
long startTime = System.currentTimeMillis();
boolean isDebug = LOG.isDebugEnabled();
try {
try {
String threadName = Thread.currentThread().getName();
DbusEventsStatisticsCollector threadCollector = _bootstrapServer.getOutBoundStatsCollectors().getStatsCollector(threadName);
if (null == threadCollector) {
threadCollector = new DbusEventsStatisticsCollector(_bootstrapServer.getContainerStaticConfig().getId(), threadName, true, false, _bootstrapServer.getMbeanServer());
StatsCollectors<DbusEventsStatisticsCollector> ds = _bootstrapServer.getOutBoundStatsCollectors();
ds.addStatsCollector(threadName, threadCollector);
}
processor = new BootstrapProcessor(_config, threadCollector);
} catch (Exception e) {
if (null != bootstrapStatsCollector) {
bootstrapStatsCollector.registerErrBootstrap();
}
throw new RequestProcessingException(e);
}
DatabusComponentStatus componentStatus = _componentStatus.getStatusSnapshot();
if (!componentStatus.isRunningStatus()) {
if (null != bootstrapStatsCollector)
bootstrapStatsCollector.registerErrBootstrap();
throw new RequestProcessingException(componentStatus.getMessage());
}
String partitionInfoString = request.getParams().getProperty(PARTITION_INFO_PARAM);
DbusKeyFilter keyFilter = null;
if ((null != partitionInfoString) && (!partitionInfoString.isEmpty())) {
try {
keyFilter = KeyFilterConfigJSONFactory.parseDbusKeyFilter(partitionInfoString);
if (isDebug)
LOG.debug("ServerSideFilter is :" + keyFilter);
} catch (Exception ex) {
String msg = "Unable to parse partitionInfo from request. PartitionInfo was :" + partitionInfoString;
LOG.error(msg, ex);
throw new RequestProcessingException(msg, ex);
}
}
String outputFormat = request.getParams().getProperty(OUTPUT_PARAM);
Encoding enc = Encoding.BINARY;
if (null != outputFormat) {
try {
enc = Encoding.valueOf(outputFormat.toUpperCase());
} catch (Exception ex) {
LOG.error("Unable to find the output format for bootstrap request for " + outputFormat + ". Using Binary!!", ex);
}
}
processor.setKeyFilter(keyFilter);
String checkpointString = request.getRequiredStringParam(CHECKPOINT_PARAM);
int bufferMarginSpace = DEFAULT_BUFFER_MARGIN_SPACE;
if (null != _serverHostPort) {
bufferMarginSpace = Math.max(bufferMarginSpace, (_serverHostPort.length() + Checkpoint.BOOTSTRAP_SERVER_INFO.length() + DEFAULT_JSON_OVERHEAD_BYTES));
}
int clientFreeBufferSize = request.getRequiredIntParam(BATCHSIZE_PARAM) - checkpointString.length() - bufferMarginSpace;
BootstrapEventWriter writer = null;
if (_config.getPredicatePushDown())
writer = createEventWriter(request, clientFreeBufferSize, null, enc);
else
writer = createEventWriter(request, clientFreeBufferSize, keyFilter, enc);
Checkpoint cp = new Checkpoint(checkpointString);
DbusClientMode consumptionMode = cp.getConsumptionMode();
LOG.info("Bootstrap request received: " + "fetchSize=" + clientFreeBufferSize + ", consumptionMode=" + consumptionMode + ", checkpoint=" + checkpointString + ", predicatePushDown=" + _config.getPredicatePushDown());
try {
boolean phaseCompleted = false;
switch(consumptionMode) {
case BOOTSTRAP_SNAPSHOT:
phaseCompleted = processor.streamSnapShotRows(new Checkpoint(checkpointString), writer);
break;
case BOOTSTRAP_CATCHUP:
phaseCompleted = processor.streamCatchupRows(new Checkpoint(checkpointString), writer);
break;
default:
if (null != bootstrapStatsCollector)
bootstrapStatsCollector.registerErrBootstrap();
throw new RequestProcessingException("Unexpected mode: " + consumptionMode);
}
if (null != bootstrapStatsCollector)
bootstrapStatsCollector.registerBootStrapReq(cp, System.currentTimeMillis() - startTime, clientFreeBufferSize);
if (writer.getNumRowsWritten() == 0 && writer.getSizeOfPendingEvent() > 0) {
// Append a header to indicate to the client that we do have at least one event to
// send, but it is too large to fit into client's offered buffer.
request.getResponseContent().addMetadata(DatabusHttpHeaders.DATABUS_PENDING_EVENT_SIZE, writer.getSizeOfPendingEvent());
if (isDebug) {
LOG.debug("Returning 0 events but have pending event of size " + writer.getSizeOfPendingEvent());
}
}
if (phaseCompleted) {
request.getResponseContent().setMetadata(BootstrapProcessor.PHASE_COMPLETED_HEADER_NAME, BootstrapProcessor.PHASE_COMPLETED_HEADER_TRUE);
}
} catch (BootstrapDatabaseTooOldException e) {
if (null != bootstrapStatsCollector)
bootstrapStatsCollector.registerErrDatabaseTooOld();
LOG.error("Bootstrap database is too old!", e);
throw new RequestProcessingException(e);
} catch (BootstrapDBException e) {
if (null != bootstrapStatsCollector)
bootstrapStatsCollector.registerErrBootstrap();
throw new RequestProcessingException(e);
} catch (SQLException e) {
if (null != bootstrapStatsCollector)
bootstrapStatsCollector.registerErrSqlException();
throw new RequestProcessingException(e);
} catch (BootstrapProcessingException e) {
if (null != bootstrapStatsCollector)
bootstrapStatsCollector.registerErrBootstrap();
throw new RequestProcessingException(e);
}
} finally {
if (null != processor)
processor.shutdown();
}
return request;
}
use of com.linkedin.databus.core.monitoring.mbean.DbusEventsStatisticsCollector in project databus by linkedin.
the class ContainerStatsRequestProcessor method processPhysicalPartitionStats.
private void processPhysicalPartitionStats(StatsCollectors<DbusEventsStatisticsCollector> statsCollectors, String prefix, DatabusRequest request) throws IOException, RequestProcessingException {
if (null == statsCollectors)
return;
String category = request.getParams().getProperty(DatabusRequest.PATH_PARAM_NAME);
String sourceIdStr = category.substring(prefix.length());
//allow DBNAME/partitionid for REST api
sourceIdStr = sourceIdStr.replace('/', ':');
DbusEventsStatisticsCollector s = statsCollectors.getStatsCollector(sourceIdStr);
DbusEventsTotalStats sourceStats = (s == null) ? null : s.getTotalStats();
if (null == sourceStats) {
LOG.warn("no stats for this srcId: " + request.getName() + "prefix=" + prefix + "source ids " + sourceIdStr);
sourceStats = new DbusEventsTotalStats(0, sourceIdStr, false, false, null);
}
writeJsonObjectToResponse(sourceStats, request);
if (request.getRequestType() == HttpMethod.PUT || request.getRequestType() == HttpMethod.POST) {
enableOrResetStatsMBean(sourceStats, request);
}
}
use of com.linkedin.databus.core.monitoring.mbean.DbusEventsStatisticsCollector in project databus by linkedin.
the class TestAggregatedDbusEventStatsCollector method runAggregateTestStats.
void runAggregateTestStats(int n) {
try {
DbusEventsStatisticsCollector aggregateEventStatsCollectors = new AggregatedDbusEventsStatisticsCollector(0, "eventsInbound", true, true, null);
//collection of n+1 stats collectors;
StatsCollectors<DbusEventsStatisticsCollector> eventStatsCollectors = new StatsCollectors<DbusEventsStatisticsCollector>(aggregateEventStatsCollectors);
//add new individual stats collectors
int maxEventsInWindow = 10;
StatsWriter[] nStatsWriters = createStatsWriters(n, maxEventsInWindow);
for (StatsWriter sw : nStatsWriters) {
eventStatsCollectors.addStatsCollector(sw.getStatsName(), sw.getEventsStatsCollector());
}
//aggregator thread; 250 ms poll time
GlobalStatsCalc agg = new GlobalStatsCalc(10);
agg.registerStatsCollector(eventStatsCollectors);
Thread aggThread = new Thread(agg);
aggThread.start();
//start writers
for (StatsWriter sw : nStatsWriters) {
sw.start();
}
//Let the writers start
Thread.sleep(1000);
long startTimeMs = System.currentTimeMillis();
//5s
long durationInMs = 5 * 1000;
DbusEventsTotalStats globalStats = aggregateEventStatsCollectors.getTotalStats();
long prevValue = 0, prevSize = 0;
while (System.currentTimeMillis() < (startTimeMs + durationInMs)) {
//constraint checks;
//check that readers don't have partial updates or get initialized
long value = globalStats.getNumDataEvents();
long size = globalStats.getSizeDataEvents();
Assert.assertTrue(value > 0);
if (prevValue > 0 && (value != prevValue)) {
Assert.assertTrue(size != prevSize);
prevValue = value;
prevSize = size;
}
Assert.assertTrue(globalStats.getMaxSeenWinScn() > 0);
Thread.sleep(RngUtils.randomPositiveInt() % 10 + 1);
}
//shut down
for (StatsWriter sw : nStatsWriters) {
sw.shutdown();
sw.interrupt();
}
//Give a chance to catch up
Thread.sleep(1000);
agg.halt();
aggThread.interrupt();
//final tally aggregatedEventTotalStats = sum of all individual statsWriter objects in a thread free way
AggregatedDbusEventsTotalStats myTotalStats = new AggregatedDbusEventsTotalStats(StatsWriter.OWNERID, "mytotal", true, false, null);
for (DbusEventsStatisticsCollector s : eventStatsCollectors.getStatsCollectors()) {
DbusEventsTotalStats writerStat = s.getTotalStats();
//obviously - we assume this is correct here. we want to check that the updates happen correctly in a concurrent setting
myTotalStats.mergeStats(writerStat);
}
LOG.info("global = " + globalStats.getNumDataEvents() + " Sigma writers=" + myTotalStats.getNumDataEvents());
Assert.assertEquals("NumDataEvents mismatch for n = " + n, globalStats.getNumDataEvents(), myTotalStats.getNumDataEvents());
Assert.assertEquals("MaxSeenWinScn mismatch for n = " + n, globalStats.getMaxSeenWinScn(), myTotalStats.getMaxSeenWinScn());
} catch (InterruptedException e) {
Assert.assertTrue(false);
}
}
use of com.linkedin.databus.core.monitoring.mbean.DbusEventsStatisticsCollector in project databus by linkedin.
the class JmxShutdownThread method doShutdown.
protected void doShutdown() {
unregisterShutdownHook();
LOG.info("Initializing shutdown for serving container: " + _containerStaticConfig.getId());
if (null != _jmxShutdownThread && Thread.State.NEW == _jmxShutdownThread.getState()) {
try {
Runtime.getRuntime().removeShutdownHook(_jmxShutdownThread);
_jmxShutdownThread.start();
} catch (IllegalStateException ise) {
LOG.error("Error removing shutdown hook", ise);
}
}
if (null != _nettyShutdownThread && Thread.State.NEW == _nettyShutdownThread.getState()) {
try {
Runtime.getRuntime().removeShutdownHook(_nettyShutdownThread);
_nettyShutdownThread.start();
} catch (IllegalStateException ise) {
LOG.error("Error removing shutdown hook", ise);
}
}
if (_globalStatsMerger != null && !_globalStatsMerger.isHalted()) {
_globalStatsMerger.halt();
_globalStatsThread.interrupt();
}
// unregister all mbeans
getContainerStatsCollector().unregisterMBeans();
getInboundEventStatisticsCollector().unregisterMBeans();
getOutboundEventStatisticsCollector().unregisterMBeans();
for (DbusEventsStatisticsCollector coll : _inBoundStatsCollectors.getStatsCollectors()) {
coll.unregisterMBeans();
}
for (DbusEventsStatisticsCollector coll : _outBoundStatsCollectors.getStatsCollectors()) {
coll.unregisterMBeans();
}
_componentAdmin.unregisterAsMBeans();
LOG.info("joining shutdown threads");
long startTime = System.currentTimeMillis();
long timeRemaining = SHUTDOWN_TIMEOUT_MS;
while (null != _jmxShutdownThread && _jmxShutdownThread.isAlive() && timeRemaining > 0) {
try {
_jmxShutdownThread.join(timeRemaining);
} catch (InterruptedException ie) {
}
timeRemaining = SHUTDOWN_TIMEOUT_MS - (System.currentTimeMillis() - startTime);
}
LOG.info("JMX shutdown for container " + _containerStaticConfig.getId() + ":" + (null != _jmxShutdownThread ? !_jmxShutdownThread.isAlive() : true) + "; ms remaining: " + timeRemaining);
while (null != _nettyShutdownThread && _nettyShutdownThread.isAlive() && timeRemaining > 0) {
try {
_nettyShutdownThread.join(timeRemaining);
} catch (InterruptedException ie) {
}
timeRemaining = SHUTDOWN_TIMEOUT_MS - (System.currentTimeMillis() - startTime);
}
LOG.info("Netty shutdown for container " + _containerStaticConfig.getId() + ":" + (null != _nettyShutdownThread ? !_nettyShutdownThread.isAlive() : true) + "; ms remaining: " + timeRemaining);
LOG.info("Done with shutdown for serving container: " + _containerStaticConfig.getId());
}
Aggregations