use of org.wso2.siddhi.core.event.ComplexEventChunk in project siddhi by wso2.
the class OverwriteTableIndexOperator method tryUpdate.
@Override
public ComplexEventChunk<StreamEvent> tryUpdate(ComplexEventChunk<StateEvent> updatingOrAddingEventChunk, Object storeEvents, InMemoryCompiledUpdateSet compiledUpdateSet, AddingStreamEventExtractor addingStreamEventExtractor) {
updatingOrAddingEventChunk.reset();
while (updatingOrAddingEventChunk.hasNext()) {
StateEvent overwritingOrAddingEvent = updatingOrAddingEventChunk.next();
((IndexedEventHolder) storeEvents).overwrite(addingStreamEventExtractor.getAddingStreamEvent(overwritingOrAddingEvent));
}
return null;
}
use of org.wso2.siddhi.core.event.ComplexEventChunk in project siddhi by wso2.
the class Window method add.
/**
* Add the given ComplexEventChunk to the Window.
*
* @param complexEventChunk the event chunk to be added
*/
public void add(ComplexEventChunk complexEventChunk) {
try {
this.lockWrapper.lock();
complexEventChunk.reset();
// Convert all events to StreamEvent because StateEvents can be passed if directly received from a join
ComplexEvent complexEvents = complexEventChunk.getFirst();
StreamEvent firstEvent = streamEventPool.borrowEvent();
eventConverter.convertComplexEvent(complexEvents, firstEvent);
StreamEvent currentEvent = firstEvent;
complexEvents = complexEvents.getNext();
int numberOfEvents = 0;
while (complexEvents != null) {
numberOfEvents++;
StreamEvent nextEvent = streamEventPool.borrowEvent();
eventConverter.convertComplexEvent(complexEvents, nextEvent);
currentEvent.setNext(nextEvent);
currentEvent = nextEvent;
complexEvents = complexEvents.getNext();
}
try {
if (throughputTrackerInsert != null && siddhiAppContext.isStatsEnabled()) {
throughputTrackerInsert.eventsIn(numberOfEvents);
latencyTrackerInsert.markIn();
}
// Send to the window windowProcessor
windowProcessor.process(new ComplexEventChunk<StreamEvent>(firstEvent, currentEvent, complexEventChunk.isBatch()));
} finally {
if (throughputTrackerInsert != null && siddhiAppContext.isStatsEnabled()) {
latencyTrackerInsert.markOut();
}
}
} finally {
this.lockWrapper.unlock();
}
}
use of org.wso2.siddhi.core.event.ComplexEventChunk in project carbon-apimgt by wso2.
the class EmitOnStateChange method process.
@Override
protected void process(ComplexEventChunk<StreamEvent> streamEventChunk, Processor processor, StreamEventCloner streamEventCloner, ComplexEventPopulater complexEventPopulater) {
while (streamEventChunk.hasNext()) {
StreamEvent event = streamEventChunk.next();
Boolean currentThrottleState = (Boolean) isThrottledExpressionExecutor.execute(event);
String key = (String) keyExpressionExecutor.execute(event);
Boolean lastThrottleState = (Boolean) throttleStateMap.get(key);
if (currentThrottleState.equals(lastThrottleState) && !currentThrottleState) {
streamEventChunk.remove();
} else {
throttleStateMap.put(key, currentThrottleState);
}
}
nextProcessor.process(streamEventChunk);
}
use of org.wso2.siddhi.core.event.ComplexEventChunk in project carbon-apimgt by wso2.
the class ThrottleStreamProcessor method process.
@Override
protected void process(ComplexEventChunk<StreamEvent> streamEventChunk, Processor nextProcessor, StreamEventCloner streamEventCloner, ComplexEventPopulater complexEventPopulater) {
synchronized (this) {
if (expireEventTime == -1) {
long currentTime = siddhiAppContext.getTimestampGenerator().currentTime();
if (startTime != -1) {
expireEventTime = addTimeShift(currentTime);
} else {
expireEventTime = siddhiAppContext.getTimestampGenerator().currentTime() + timeInMilliSeconds;
}
if (scheduler != null) {
scheduler.notifyAt(expireEventTime);
} else {
log.error("scheduler is not initiated");
}
}
long currentTime = siddhiAppContext.getTimestampGenerator().currentTime();
boolean sendEvents;
if (currentTime >= expireEventTime) {
expireEventTime += timeInMilliSeconds;
if (scheduler != null) {
scheduler.notifyAt(expireEventTime);
} else {
log.error("scheduler is not initiated");
}
sendEvents = true;
} else {
sendEvents = false;
}
while (streamEventChunk.hasNext()) {
StreamEvent streamEvent = streamEventChunk.next();
if (streamEvent.getType() != ComplexEvent.Type.CURRENT) {
continue;
}
complexEventPopulater.populateComplexEvent(streamEvent, new Object[] { expireEventTime });
StreamEvent clonedStreamEvent = streamEventCloner.copyStreamEvent(streamEvent);
clonedStreamEvent.setType(StreamEvent.Type.EXPIRED);
clonedStreamEvent.setTimestamp(expireEventTime);
expiredEventChunk.add(clonedStreamEvent);
}
if (sendEvents) {
expiredEventChunk.reset();
if (expiredEventChunk.getFirst() != null) {
streamEventChunk.add(expiredEventChunk.getFirst());
}
expiredEventChunk.clear();
}
}
if (streamEventChunk.getFirst() != null) {
streamEventChunk.setBatch(true);
nextProcessor.process(streamEventChunk);
streamEventChunk.setBatch(false);
}
}
use of org.wso2.siddhi.core.event.ComplexEventChunk in project siddhi by wso2.
the class AggregationRuntime method compileExpression.
public CompiledCondition compileExpression(Expression expression, Within within, Expression per, MatchingMetaInfoHolder matchingMetaInfoHolder, List<VariableExpressionExecutor> variableExpressionExecutors, Map<String, Table> tableMap, String queryName, SiddhiAppContext siddhiAppContext) {
Map<TimePeriod.Duration, CompiledCondition> withinTableCompiledConditions = new HashMap<>();
CompiledCondition withinInMemoryCompileCondition;
CompiledCondition onCompiledCondition;
List<Attribute> additionalAttributes = new ArrayList<>();
// Define additional attribute list
additionalAttributes.add(new Attribute("_START", Attribute.Type.LONG));
additionalAttributes.add(new Attribute("_END", Attribute.Type.LONG));
// Get table definition. Table definitions for all the tables used to persist aggregates are similar.
// Therefore it's enough to get the definition from one table.
AbstractDefinition tableDefinition = ((Table) aggregationTables.values().toArray()[0]).getTableDefinition();
// Alter existing meta stream event or create new one if a meta stream doesn't exist
// After calling this method the original MatchingMetaInfoHolder's meta stream event would be altered
MetaStreamEvent newMetaStreamEventWithStartEnd = createNewMetaStreamEventWithStartEnd(matchingMetaInfoHolder, additionalAttributes);
MatchingMetaInfoHolder alteredMatchingMetaInfoHolder = null;
// Alter meta info holder to contain stream event and aggregate both when it's a store query
if (matchingMetaInfoHolder.getMetaStateEvent().getMetaStreamEvents().length == 1) {
matchingMetaInfoHolder = alterMetaInfoHolderForStoreQuery(newMetaStreamEventWithStartEnd, matchingMetaInfoHolder);
alteredMatchingMetaInfoHolder = matchingMetaInfoHolder;
}
// Create new MatchingMetaInfoHolder containing newMetaStreamEventWithStartEnd and table meta event
MatchingMetaInfoHolder streamTableMetaInfoHolderWithStartEnd = createNewStreamTableMetaInfoHolder(newMetaStreamEventWithStartEnd, tableDefinition);
// Create per expression executor
ExpressionExecutor perExpressionExecutor = ExpressionParser.parseExpression(per, matchingMetaInfoHolder.getMetaStateEvent(), matchingMetaInfoHolder.getCurrentState(), tableMap, variableExpressionExecutors, siddhiAppContext, false, 0, queryName);
if (perExpressionExecutor.getReturnType() != Attribute.Type.STRING) {
throw new SiddhiAppCreationException("Query " + queryName + "'s per value expected a string but found " + perExpressionExecutor.getReturnType(), per.getQueryContextStartIndex(), per.getQueryContextEndIndex());
}
// Create within expression
Expression withinExpression;
Expression start = Expression.variable(additionalAttributes.get(0).getName());
Expression end = Expression.variable(additionalAttributes.get(1).getName());
Expression compareWithStartTime = Compare.compare(start, Compare.Operator.LESS_THAN_EQUAL, Expression.variable("AGG_TIMESTAMP"));
Expression compareWithEndTime = Compare.compare(Expression.variable("AGG_TIMESTAMP"), Compare.Operator.LESS_THAN, end);
withinExpression = Expression.and(compareWithStartTime, compareWithEndTime);
// Create start and end time expression
Expression startEndTimeExpression;
if (within.getTimeRange().size() == 1) {
startEndTimeExpression = new AttributeFunction("incrementalAggregator", "startTimeEndTime", within.getTimeRange().get(0));
} else {
// within.getTimeRange().size() == 2
startEndTimeExpression = new AttributeFunction("incrementalAggregator", "startTimeEndTime", within.getTimeRange().get(0), within.getTimeRange().get(1));
}
ExpressionExecutor startTimeEndTimeExpressionExecutor = ExpressionParser.parseExpression(startEndTimeExpression, matchingMetaInfoHolder.getMetaStateEvent(), matchingMetaInfoHolder.getCurrentState(), tableMap, variableExpressionExecutors, siddhiAppContext, false, 0, queryName);
// These compile conditions are used to check whether the aggregates in tables are within the given duration.
for (Map.Entry<TimePeriod.Duration, Table> entry : aggregationTables.entrySet()) {
CompiledCondition withinTableCompileCondition = entry.getValue().compileCondition(withinExpression, streamTableMetaInfoHolderWithStartEnd, siddhiAppContext, variableExpressionExecutors, tableMap, queryName);
withinTableCompiledConditions.put(entry.getKey(), withinTableCompileCondition);
}
// Create compile condition for in-memory data.
// This compile condition is used to check whether the running aggregates (in-memory data)
// are within given duration
withinInMemoryCompileCondition = OperatorParser.constructOperator(new ComplexEventChunk<>(true), withinExpression, streamTableMetaInfoHolderWithStartEnd, siddhiAppContext, variableExpressionExecutors, tableMap, queryName);
// On compile condition.
// After finding all the aggregates belonging to within duration, the final on condition (given as
// "on stream1.name == aggregator.nickName ..." in the join query) must be executed on that data.
// This condition is used for that purpose.
onCompiledCondition = OperatorParser.constructOperator(new ComplexEventChunk<>(true), expression, matchingMetaInfoHolder, siddhiAppContext, variableExpressionExecutors, tableMap, queryName);
return new IncrementalAggregateCompileCondition(withinTableCompiledConditions, withinInMemoryCompileCondition, onCompiledCondition, tableMetaStreamEvent, aggregateMetaSteamEvent, additionalAttributes, alteredMatchingMetaInfoHolder, perExpressionExecutor, startTimeEndTimeExpressionExecutor);
}
Aggregations