use of com.linkedin.databus.core.DbusEventInternalReadable in project databus by linkedin.
the class RngUtils method randomEvent.
public static DbusEvent randomEvent(short srcId) {
ByteBuffer serBuf = ByteBuffer.allocate(1000).order(_eventFactory.getByteOrder());
try {
DbusEventInfo eventInfo = new DbusEventInfo(DbusOpcode.UPSERT, // Sequence number
0L, // physical Partition
(short) 0, randomPositiveShort(), System.currentTimeMillis(), srcId, schemaMd5, randomString(20).getBytes(Charset.defaultCharset()), randomPositiveShort() % 100 <= 1, false);
// make this explicit
eventInfo.setEventSerializationVersion(DbusEventFactory.DBUS_EVENT_V1);
DbusEventFactory.serializeEvent(new DbusEventKey(randomLong()), serBuf, eventInfo);
} catch (KeyTypeNotImplementedException e1) {
throw new RuntimeException(e1);
}
serBuf.rewind();
DbusEventInternalReadable e = _eventFactory.createReadOnlyDbusEventFromBuffer(serBuf, serBuf.position());
return e;
}
use of com.linkedin.databus.core.DbusEventInternalReadable in project databus by linkedin.
the class RemoteExceptionHandler method suspendConnectionOnError.
private void suspendConnectionOnError(Throwable exception) throws InvalidEventException, InterruptedException {
// suspend pull threads
_sourcesConn.getConnectionStatus().suspendOnError(exception);
// send an error event to dispatcher through dbusEventBuffer
DbusEventInternalReadable errorEvent = null;
if (exception instanceof BootstrapDatabaseTooOldException) {
errorEvent = _eventFactory.createErrorEvent(new DbusErrorEvent(exception, DbusEventInternalWritable.BOOTSTRAPTOOOLD_ERROR_SRCID));
} else if (exception instanceof PullerRetriesExhaustedException) {
errorEvent = _eventFactory.createErrorEvent(new DbusErrorEvent(exception, DbusEventInternalWritable.PULLER_RETRIES_EXPIRED));
} else {
throw new InvalidEventException("Got an unrecognizable exception ");
}
byte[] errorEventBytes = new byte[errorEvent.getRawBytes().limit()];
if (LOG.isDebugEnabled()) {
LOG.debug("error event size: " + errorEventBytes.length);
LOG.debug("error event:" + errorEvent.toString());
}
errorEvent.getRawBytes().get(errorEventBytes);
ByteArrayInputStream errIs = new ByteArrayInputStream(errorEventBytes);
ReadableByteChannel errRbc = Channels.newChannel(errIs);
boolean success = false;
int retryCounter = 0;
while (!success && retryCounter < 10) {
String errMsg = "Sending an internal system event to dispatcher. Retry count = " + retryCounter;
DbusPrettyLogUtils.logExceptionAtInfo(errMsg, exception, LOG);
success = _dbusEventBuffer.readEvents(errRbc) > 0 ? true : false;
if (!success) {
LOG.warn("Unable to send an internal system event to dispatcher. Will retry later " + retryCounter);
retryCounter++;
Thread.sleep(1000);
}
}
}
use of com.linkedin.databus.core.DbusEventInternalReadable in project databus by linkedin.
the class GenericDispatcher method doDispatchEvents.
protected void doDispatchEvents() {
boolean debugEnabled = _log.isDebugEnabled();
boolean traceEnabled = _log.isTraceEnabled();
//need to remove eventually but for now I want to avoid a nasty diff
final DispatcherState curState = _internalState;
if (!_stopDispatch.get() && !curState.getEventsIterator().hasNext() && !checkForShutdownRequest()) {
if (debugEnabled)
_log.debug("Waiting for events");
curState.getEventsIterator().await(50, TimeUnit.MILLISECONDS);
}
boolean success = true;
boolean hasQueuedEvents = false;
while (success && !_stopDispatch.get() && curState.getStateId() != DispatcherState.StateId.STOP_DISPATCH_EVENTS && null != curState.getEventsIterator() && curState.getEventsIterator().hasNext() && !checkForShutdownRequest() && //exit the event processing loop if there are other queued notifications
!hasMessages()) {
DbusEventInternalReadable nextEvent = curState.getEventsIterator().next();
_currentWindowSizeInBytes += nextEvent.size();
if (traceEnabled)
_log.trace("Got event:" + nextEvent);
Long eventSrcId = (long) nextEvent.srcId();
if (curState.isSCNRegress()) {
SingleSourceSCN scn = new SingleSourceSCN(nextEvent.physicalPartitionId(), nextEvent.sequence());
_log.info("We are regressing to SCN: " + scn);
curState.switchToRollback();
doRollback(curState, scn, false, false);
curState.setSCNRegress(false);
curState.switchToExpectEventWindow();
}
if (null != getAsyncCallback().getStats())
getAsyncCallback().getStats().registerWindowSeen(nextEvent.timestampInNanos(), nextEvent.sequence());
if (nextEvent.isControlMessage()) {
//control event
if (nextEvent.isEndOfPeriodMarker()) {
if (curState.isEventsSeen()) {
if (null != curState.getCurrentSource()) {
curState.switchToEndStreamSource();
success = doEndStreamSource(curState);
}
SCN endWinScn = null;
if (success) {
_lastWindowScn = nextEvent.sequence();
_lastEowTsNsecs = nextEvent.timestampInNanos();
endWinScn = new SingleSourceSCN(nextEvent.physicalPartitionId(), _lastWindowScn);
curState.switchToEndStreamEventWindow(endWinScn);
success = doEndStreamEventWindow(curState);
}
if (success) {
try {
//end of period event
Checkpoint cp = createCheckpoint(curState, nextEvent);
success = doStoreCheckpoint(curState, nextEvent, cp, endWinScn);
} catch (SharedCheckpointException e) {
//shutdown
return;
}
}
} else {
//empty window
success = true;
if (_log.isDebugEnabled()) {
_log.debug("skipping empty window: " + nextEvent.sequence());
}
//write a checkpoint; takes care of slow sources ; but skip storing the first control eop with 0 scn
if (nextEvent.sequence() > 0) {
_lastWindowScn = nextEvent.sequence();
//The reason is that the eop's timestamp is the max timestamp of all data events seen so far.
if (nextEvent.timestampInNanos() > 0) {
_lastEowTsNsecs = nextEvent.timestampInNanos();
}
Checkpoint ckpt = createCheckpoint(curState, nextEvent);
try {
success = doStoreCheckpoint(curState, nextEvent, ckpt, new SingleSourceSCN(nextEvent.physicalPartitionId(), nextEvent.sequence()));
} catch (SharedCheckpointException e) {
//shutdown
return;
}
} else {
_log.warn("EOP with scn=" + nextEvent.sequence());
}
}
if (success) {
curState.switchToExpectEventWindow();
//we have recovered from the error and it's not the dummy window
if (nextEvent.sequence() > 0) {
if (!getStatus().isRunningStatus())
getStatus().resume();
}
}
} else if (nextEvent.isErrorEvent()) {
_log.info("Error event: " + nextEvent.sequence());
success = processErrorEvent(curState, nextEvent);
} else {
//control event
success = processSysEvent(curState, nextEvent);
if (success) {
if (nextEvent.isCheckpointMessage()) {
Checkpoint sysCheckpt = createCheckpoint(curState, nextEvent);
try {
long scn = sysCheckpt.getConsumptionMode() == DbusClientMode.ONLINE_CONSUMPTION ? nextEvent.sequence() : sysCheckpt.getBootstrapSinceScn();
//ensure that control event with 0 scn doesn't get saved unless it is during snapshot of bootstrap
if (scn > 0 || sysCheckpt.getConsumptionMode() == DbusClientMode.BOOTSTRAP_SNAPSHOT) {
success = doStoreCheckpoint(curState, nextEvent, sysCheckpt, new SingleSourceSCN(nextEvent.physicalPartitionId(), scn));
}
} catch (SharedCheckpointException e) {
//shutdown
return;
}
}
}
}
} else {
curState.setEventsSeen(true);
//not a control event
if (curState.getStateId().equals(StateId.EXPECT_EVENT_WINDOW) || curState.getStateId().equals(StateId.REPLAY_DATA_EVENTS)) {
SCN startScn = new SingleSourceSCN(nextEvent.physicalPartitionId(), nextEvent.sequence());
curState.switchToStartStreamEventWindow(startScn);
success = doStartStreamEventWindow(curState);
if (success && (eventSrcId.longValue() >= 0)) {
success = doCheckStartSource(curState, eventSrcId, new SchemaId(nextEvent.schemaId()));
}
} else {
if (null != curState.getCurrentSource() && !eventSrcId.equals(curState.getCurrentSource().getId())) {
curState.switchToEndStreamSource();
success = doEndStreamSource(curState);
}
if (success) {
//Check if schemas of the source exist.
//Also check if the exact schema id present in event exists in the client. This is worthwhile if there's a
//guarantee that the entire window is written with the same schemaId, which is the case if the relay does not use a new schema
//mid-window
success = doCheckStartSource(curState, eventSrcId, new SchemaId(nextEvent.schemaId()));
}
}
if (success) {
//finally: process data event
success = processDataEvent(curState, nextEvent);
if (success) {
hasQueuedEvents = true;
if (hasCheckpointThresholdBeenExceeded()) {
_log.info("Attempting to checkpoint (only if the consumer callback for onCheckpoint returns SUCCESS), because " + getCurrentWindowSizeInBytes() + " bytes reached without checkpoint ");
success = processDataEventsBatch(curState);
if (success) {
hasQueuedEvents = false;
//checkpoint: for bootstrap it's the right checkpoint; that has been lazily created by a checkpoint event
// checkpoint: for relay: create a checkpoint that has the prevScn
Checkpoint cp = createCheckpoint(curState, nextEvent);
// DDSDBUS-1889 : scn for bootstrap is bootstrapSinceSCN
// scn for online consumption is : currentWindow
SCN lastScn = cp.getConsumptionMode() == DbusClientMode.ONLINE_CONSUMPTION ? curState.getStartWinScn() : new SingleSourceSCN(nextEvent.physicalPartitionId(), cp.getBootstrapSinceScn());
try {
// Even if storeCheckpoint fails, we
// should continue (hoping for the best)
success = doStoreCheckpoint(curState, nextEvent, cp, lastScn);
} catch (SharedCheckpointException e) {
// shutdown
return;
}
curState.switchToExpectStreamDataEvents();
if (!getStatus().isRunningStatus())
getStatus().resume();
}
}
}
}
}
if (success) {
// before next successful checkpoint
if (hasCheckpointThresholdBeenExceeded()) {
//drain events just in case it hasn't been drained before; mainly control events that are not checkpoint events
success = processDataEventsBatch(curState);
if (success) {
_log.warn("Checkpoint not stored, but removing older events from buffer to guarantee progress (checkpoint threshold has" + " exceeded), consider checkpointing more frequently. Triggered on control-event=" + nextEvent.isControlMessage());
// guarantee progress: risk being unable to rollback by
// removing events, but hope for the best
removeEvents(curState);
}
}
}
}
if (!_stopDispatch.get() && !checkForShutdownRequest()) {
if (success) {
if (hasQueuedEvents) {
success = processDataEventsBatch(curState);
if (!success) {
_log.error("Unable to flush partial window");
}
}
if (debugEnabled)
_log.debug("doDispatchEvents to " + curState.toString());
}
if (!success) {
curState.switchToRollback();
doRollback(curState);
}
//loop around -- let any other messages be processed
enqueueMessage(curState);
}
}
use of com.linkedin.databus.core.DbusEventInternalReadable in project databus by linkedin.
the class BootstrapTableReader method execute.
public void execute() throws Exception {
String query = getQuery();
Connection conn = null;
Statement stmt = null;
ResultSet rs = null;
try {
conn = getConnection();
stmt = conn.createStatement();
LOG.info("Executing query : " + query);
rs = stmt.executeQuery(query);
byte[] b1 = new byte[1024 * 1024];
ByteBuffer buffer = ByteBuffer.wrap(b1);
DbusEventInternalReadable event = _eventFactory.createReadOnlyDbusEventFromBuffer(buffer, 0);
int count = 0;
_eventHandler.onStart(query);
while (rs.next()) {
buffer.clear();
buffer.put(rs.getBytes("val"));
event = event.reset(buffer, 0);
GenericRecord record = _decoder.getGenericRecord(event);
_eventHandler.onRecord(event, record);
count++;
}
_eventHandler.onEnd(count);
} finally {
DBHelper.close(rs, stmt, conn);
}
}
use of com.linkedin.databus.core.DbusEventInternalReadable in project databus by linkedin.
the class DbusEventAppender method addEventToBuffer.
public int addEventToBuffer(DbusEvent ev, int dataEventCount) {
byte[] payload = new byte[((DbusEventInternalReadable) ev).payloadLength()];
ev.value().get(payload);
if ((_numDataEventsBeforeSkip < 0) || (dataEventCount < _numDataEventsBeforeSkip)) {
_buffer.appendEvent(new DbusEventKey(ev.key()), ev.physicalPartitionId(), ev.logicalPartitionId(), ev.timestampInNanos(), ev.srcId(), ev.schemaId(), payload, false, _stats);
if (!_bufferReflector.validateBuffer()) {
throw new RuntimeException("Buffer validation 3 failed");
}
++dataEventCount;
}
return dataEventCount;
}
Aggregations