use of io.siddhi.core.event.ComplexEventChunk in project siddhi by wso2.
the class PersistedIncrementalExecutor method dispatchEvent.
private void dispatchEvent(long startTimeOfNewAggregates, long emittedTime, String timeZone) {
ZonedDateTime startTime = ZonedDateTime.ofInstant(Instant.ofEpochMilli(startTimeOfNewAggregates), ZoneId.of(timeZone));
ZonedDateTime endTime = ZonedDateTime.ofInstant(Instant.ofEpochMilli(emittedTime), ZoneId.of(timeZone));
log.info("Aggregation event dispatched for the duration " + duration + " for aggregation " + aggregatorName + " to aggregate data from " + startTime + " to " + endTime + " ");
ComplexEventChunk complexEventChunk = new ComplexEventChunk();
StreamEvent streamEvent = streamEventFactory.newInstance();
streamEvent.setType(ComplexEvent.Type.CURRENT);
streamEvent.setTimestamp(emittedTime);
List<Object> outputDataList = new ArrayList<>();
outputDataList.add(startTimeOfNewAggregates);
outputDataList.add(emittedTime);
outputDataList.add(null);
streamEvent.setOutputData(outputDataList.toArray());
if (isProcessingExecutor) {
complexEventChunk.add(streamEvent);
cudStreamProcessorQueue.add(new QueuedCudStreamProcessor(cudStreamProcessor, streamEvent, startTimeOfNewAggregates, emittedTime, timeZone, duration));
}
if (getNextExecutor() != null) {
next.execute(complexEventChunk);
}
}
use of io.siddhi.core.event.ComplexEventChunk in project siddhi by wso2.
the class CudStreamProcessorQueueManager method run.
@Override
public void run() {
while (run) {
QueuedCudStreamProcessor queuedCudStreamProcessor = null;
try {
queuedCudStreamProcessor = this.cudStreamProcessorQueue.take();
} catch (InterruptedException e) {
log.warn("Thread interrupted. Error when trying to retrieve queued values." + e.getMessage());
}
if (null != queuedCudStreamProcessor) {
if (log.isDebugEnabled()) {
log.debug("Current queue size is = " + cudStreamProcessorQueue.size());
}
int i = 0;
while (true) {
i++;
try {
ComplexEventChunk complexEventChunk = new ComplexEventChunk();
complexEventChunk.add(queuedCudStreamProcessor.getStreamEvent());
if (log.isDebugEnabled()) {
log.debug("Starting processing for duration " + queuedCudStreamProcessor.getDuration());
}
queuedCudStreamProcessor.getCudStreamProcessor().process(complexEventChunk);
complexEventChunk.clear();
if (log.isDebugEnabled()) {
log.debug("End processing for duration " + queuedCudStreamProcessor.getDuration());
}
break;
} catch (Exception e) {
if (e.getCause() instanceof SQLException) {
if (e.getCause().getLocalizedMessage().contains("try restarting transaction") && i < 3) {
log.error("Error occurred while executing the aggregation for data between " + queuedCudStreamProcessor.getStartTimeOfNewAggregates() + " - " + queuedCudStreamProcessor.getEmittedTime() + " for duration " + queuedCudStreamProcessor.getDuration() + " Retrying the transaction attempt " + (i - 1), e);
try {
Thread.sleep(3000);
} catch (InterruptedException interruptedException) {
log.error("Thread sleep interrupted while waiting to re-execute the " + "aggregation query for duration " + queuedCudStreamProcessor.getDuration(), interruptedException);
}
continue;
}
log.error("Error occurred while executing the aggregation for data between " + queuedCudStreamProcessor.getStartTimeOfNewAggregates() + " - " + queuedCudStreamProcessor.getEmittedTime() + " for duration " + queuedCudStreamProcessor.getDuration() + ". Attempted re-executing the query for 9 seconds. " + "This Should be investigated since this will lead to a data mismatch\n", e);
} else {
log.error("Error occurred while executing the aggregation for data between " + queuedCudStreamProcessor.getStartTimeOfNewAggregates() + " - " + queuedCudStreamProcessor.getEmittedTime() + " for duration \n" + queuedCudStreamProcessor.getDuration(), e);
}
break;
}
}
}
}
}
use of io.siddhi.core.event.ComplexEventChunk in project siddhi by wso2.
the class IncrementalExecutorsInitialiser method initialiseExecutors.
public synchronized void initialiseExecutors() {
if (this.isInitialised || isReadOnly) {
// Only cleared when executors change from reading to processing state in one node deployment
return;
}
Event[] events;
Long lastData = null;
// Get max(AGG_TIMESTAMP) from table corresponding to max duration
Table tableForMaxDuration = aggregationTables.get(incrementalDurations.get(incrementalDurations.size() - 1));
OnDemandQuery onDemandQuery = getOnDemandQuery(tableForMaxDuration, true, endOFLatestEventTimestamp);
onDemandQuery.setType(OnDemandQuery.OnDemandQueryType.FIND);
OnDemandQueryRuntime onDemandQueryRuntime = OnDemandQueryParser.parse(onDemandQuery, null, siddhiAppContext, tableMap, windowMap, aggregationMap);
// Get latest event timestamp in tableForMaxDuration and get the end time of the aggregation record
events = onDemandQueryRuntime.execute();
if (events != null) {
lastData = (Long) events[events.length - 1].getData(0);
endOFLatestEventTimestamp = IncrementalTimeConverterUtil.getNextEmitTime(lastData, incrementalDurations.get(incrementalDurations.size() - 1), timeZone);
}
if (isPersistedAggregation) {
for (int i = incrementalDurations.size() - 1; i > 0; i--) {
if (lastData != null && !IncrementalTimeConverterUtil.isAggregationDataComplete(lastData, incrementalDurations.get(i), timeZone)) {
recreateState(lastData, incrementalDurations.get(i), aggregationTables.get(incrementalDurations.get(i - 1)), i == 1);
} else if (lastData == null) {
recreateState(null, incrementalDurations.get(i), aggregationTables.get(incrementalDurations.get(i - 1)), i == 1);
}
if (i > 1) {
onDemandQuery = getOnDemandQuery(aggregationTables.get(incrementalDurations.get(i - 1)), true, endOFLatestEventTimestamp);
onDemandQuery.setType(OnDemandQuery.OnDemandQueryType.FIND);
onDemandQueryRuntime = OnDemandQueryParser.parse(onDemandQuery, null, siddhiAppContext, tableMap, windowMap, aggregationMap);
events = onDemandQueryRuntime.execute();
if (events != null) {
lastData = (Long) events[events.length - 1].getData(0);
} else {
lastData = null;
}
}
}
} else {
for (int i = incrementalDurations.size() - 1; i > 0; i--) {
TimePeriod.Duration recreateForDuration = incrementalDurations.get(i);
Executor incrementalExecutor = incrementalExecutorMap.get(recreateForDuration);
// Get the table previous to the duration for which we need to recreate (e.g. if we want to recreate
// for minute duration, take the second table [provided that aggregation is done for seconds])
// This lookup is filtered by endOFLatestEventTimestamp
Table recreateFromTable = aggregationTables.get(incrementalDurations.get(i - 1));
onDemandQuery = getOnDemandQuery(recreateFromTable, false, endOFLatestEventTimestamp);
onDemandQuery.setType(OnDemandQuery.OnDemandQueryType.FIND);
onDemandQueryRuntime = OnDemandQueryParser.parse(onDemandQuery, null, siddhiAppContext, tableMap, windowMap, aggregationMap);
events = onDemandQueryRuntime.execute();
if (events != null) {
long referenceToNextLatestEvent = (Long) events[events.length - 1].getData(0);
endOFLatestEventTimestamp = IncrementalTimeConverterUtil.getNextEmitTime(referenceToNextLatestEvent, incrementalDurations.get(i - 1), timeZone);
ComplexEventChunk<StreamEvent> complexEventChunk = new ComplexEventChunk<>();
for (Event event : events) {
StreamEvent streamEvent = streamEventFactory.newInstance();
streamEvent.setOutputData(event.getData());
complexEventChunk.add(streamEvent);
}
incrementalExecutor.execute(complexEventChunk);
if (i == 1) {
TimePeriod.Duration rootDuration = incrementalDurations.get(0);
Executor rootIncrementalExecutor = incrementalExecutorMap.get(rootDuration);
long emitTimeOfLatestEventInTable = IncrementalTimeConverterUtil.getNextEmitTime(referenceToNextLatestEvent, rootDuration, timeZone);
rootIncrementalExecutor.setEmitTime(emitTimeOfLatestEventInTable);
}
}
}
}
this.isInitialised = true;
}
use of io.siddhi.core.event.ComplexEventChunk in project siddhi by wso2.
the class IncrementalAggregateCompileCondition method createAggregateSelectionEventChunk.
private ComplexEventChunk<StreamEvent> createAggregateSelectionEventChunk(ComplexEventChunk<StreamEvent> complexEventChunkToHoldMatches, List<ExpressionExecutor> outputExpressionExecutors) {
ComplexEventChunk<StreamEvent> aggregateSelectionComplexEventChunk = new ComplexEventChunk<>();
StreamEvent resetEvent = streamEventFactoryForTableMeta.newInstance();
resetEvent.setType(ComplexEvent.Type.RESET);
while (complexEventChunkToHoldMatches.hasNext()) {
StreamEvent streamEvent = complexEventChunkToHoldMatches.next();
StreamEvent newStreamEvent = streamEventFactoryForAggregateMeta.newInstance();
Object[] outputData = new Object[newStreamEvent.getOutputData().length];
for (int i = 0; i < outputExpressionExecutors.size(); i++) {
outputData[i] = outputExpressionExecutors.get(i).execute(streamEvent);
}
newStreamEvent.setTimestamp(streamEvent.getTimestamp());
newStreamEvent.setOutputData(outputData);
aggregateSelectionComplexEventChunk.add(newStreamEvent);
}
for (ExpressionExecutor expressionExecutor : outputExpressionExecutors) {
expressionExecutor.execute(resetEvent);
}
return aggregateSelectionComplexEventChunk;
}
use of io.siddhi.core.event.ComplexEventChunk in project siddhi by wso2.
the class SnapshotableEventQueueOperator method tryUpdate.
@Override
public ComplexEventChunk<StateEvent> tryUpdate(ComplexEventChunk<StateEvent> updatingOrAddingEventChunk, Object storeEvents, InMemoryCompiledUpdateSet compiledUpdateSet, AddingStreamEventExtractor addingStreamEventExtractor) {
SnapshotableStreamEventQueue storeEventQueue = (SnapshotableStreamEventQueue) storeEvents;
updatingOrAddingEventChunk.reset();
ComplexEventChunk<StateEvent> failedEventChunk = new ComplexEventChunk<>();
while (updatingOrAddingEventChunk.hasNext()) {
StateEvent overwritingOrAddingEvent = updatingOrAddingEventChunk.next();
try {
boolean updated = false;
storeEventQueue.reset();
while (storeEventQueue.hasNext()) {
StreamEvent storeEvent = storeEventQueue.next();
overwritingOrAddingEvent.setEvent(storeEventPosition, storeEvent);
if ((Boolean) expressionExecutor.execute(overwritingOrAddingEvent)) {
for (Map.Entry<Integer, ExpressionExecutor> entry : compiledUpdateSet.getExpressionExecutorMap().entrySet()) {
storeEvent.setOutputData(entry.getValue().execute(overwritingOrAddingEvent), entry.getKey());
}
storeEventQueue.overwrite(storeEvent);
updated = true;
}
}
if (!updated) {
updatingOrAddingEventChunk.remove();
failedEventChunk.add(overwritingOrAddingEvent);
}
} finally {
overwritingOrAddingEvent.setEvent(storeEventPosition, null);
}
}
return failedEventChunk;
}
Aggregations