use of org.wso2.siddhi.core.event.state.StateEvent in project siddhi by wso2.
the class EventChunkOperator method find.
@Override
public StreamEvent find(StateEvent matchingEvent, Object storeEvents, StreamEventCloner storeEventCloner) {
ComplexEventChunk<StreamEvent> storeEventChunk = (ComplexEventChunk<StreamEvent>) storeEvents;
ComplexEventChunk<StreamEvent> returnEventChunk = new ComplexEventChunk<StreamEvent>(false);
storeEventChunk.reset();
while (storeEventChunk.hasNext()) {
StreamEvent storeEvent = storeEventChunk.next();
matchingEvent.setEvent(storeEventPosition, storeEvent);
if ((Boolean) expressionExecutor.execute(matchingEvent)) {
returnEventChunk.add(storeEventCloner.copyStreamEvent(storeEvent));
}
matchingEvent.setEvent(storeEventPosition, null);
}
return returnEventChunk.getFirst();
}
use of org.wso2.siddhi.core.event.state.StateEvent in project siddhi by wso2.
the class EventChunkOperator method delete.
@Override
public void delete(ComplexEventChunk<StateEvent> deletingEventChunk, Object storeEvents) {
ComplexEventChunk<StreamEvent> storeEventChunk = (ComplexEventChunk<StreamEvent>) storeEvents;
deletingEventChunk.reset();
while (deletingEventChunk.hasNext()) {
StateEvent deletingEvent = deletingEventChunk.next();
try {
storeEventChunk.reset();
while (storeEventChunk.hasNext()) {
StreamEvent storeEvent = storeEventChunk.next();
deletingEvent.setEvent(storeEventPosition, storeEvent);
if ((Boolean) expressionExecutor.execute(deletingEvent)) {
storeEventChunk.remove();
}
}
} finally {
deletingEvent.setEvent(storeEventPosition, null);
}
}
}
use of org.wso2.siddhi.core.event.state.StateEvent in project siddhi by wso2.
the class IncrementalAggregateCompileCondition method find.
public StreamEvent find(StateEvent matchingEvent, AggregationDefinition aggregationDefinition, Map<TimePeriod.Duration, IncrementalExecutor> incrementalExecutorMap, Map<TimePeriod.Duration, Table> aggregationTables, List<TimePeriod.Duration> incrementalDurations, List<ExpressionExecutor> baseExecutors, ExpressionExecutor timestampExecutor, List<ExpressionExecutor> outputExpressionExecutors, SiddhiAppContext siddhiAppContext) {
ComplexEventChunk<StreamEvent> complexEventChunkToHoldWithinMatches = new ComplexEventChunk<>(true);
// Retrieve per value
String perValueAsString = perExpressionExecutor.execute(matchingEvent).toString();
TimePeriod.Duration perValue = TimePeriod.Duration.valueOf(perValueAsString.toUpperCase());
if (!incrementalExecutorMap.keySet().contains(perValue)) {
throw new SiddhiAppRuntimeException("The aggregate values for " + perValue.toString() + " granularity cannot be provided since aggregation definition " + aggregationDefinition.getId() + " does not contain " + perValue.toString() + " duration");
}
Table tableForPerDuration = aggregationTables.get(perValue);
Long[] startTimeEndTime = (Long[]) startTimeEndTimeExpressionExecutor.execute(matchingEvent);
if (startTimeEndTime == null) {
throw new SiddhiAppRuntimeException("Start and end times for within duration cannot be retrieved");
}
complexEventPopulater.populateComplexEvent(matchingEvent.getStreamEvent(0), startTimeEndTime);
// Get all the aggregates within the given duration, from table corresponding to "per" duration
StreamEvent withinMatchFromPersistedEvents = tableForPerDuration.find(matchingEvent, withinTableCompiledConditions.get(perValue));
complexEventChunkToHoldWithinMatches.add(withinMatchFromPersistedEvents);
// Optimization step.
// Get the newest and oldest event timestamps from in-memory, and
// check whether at least one of those timestamps fall out of the given time range. If that's the case,
// there's no need to iterate through in-memory data.
long oldestInMemoryEventTimestamp = getOldestInMemoryEventTimestamp(incrementalExecutorMap, incrementalDurations, perValue);
long newestInMemoryEventTimestamp = getNewestInMemoryEventTimestamp(incrementalExecutorMap, incrementalDurations, perValue);
if (requiresAggregatingInMemoryData(newestInMemoryEventTimestamp, oldestInMemoryEventTimestamp, startTimeEndTime)) {
IncrementalDataAggregator incrementalDataAggregator = new IncrementalDataAggregator(incrementalDurations, perValue, baseExecutors, timestampExecutor, tableMetaStreamEvent, siddhiAppContext);
// Aggregate in-memory data and create an event chunk out of it
ComplexEventChunk<StreamEvent> aggregatedInMemoryEventChunk = incrementalDataAggregator.aggregateInMemoryData(incrementalExecutorMap);
// Get the in-memory aggregate data, which is within given duration
StreamEvent withinMatchFromInMemory = ((Operator) inMemoryStoreCompileCondition).find(matchingEvent, aggregatedInMemoryEventChunk, tableEventCloner);
complexEventChunkToHoldWithinMatches.add(withinMatchFromInMemory);
}
// Get the final event chunk from the data which is within given duration. This event chunk contains the values
// in the select clause of an aggregate definition.
ComplexEventChunk<StreamEvent> aggregateSelectionComplexEventChunk = createAggregateSelectionEventChunk(complexEventChunkToHoldWithinMatches, outputExpressionExecutors);
// Execute the on compile condition
return ((Operator) onCompiledCondition).find(matchingEvent, aggregateSelectionComplexEventChunk, aggregateEventCloner);
}
use of org.wso2.siddhi.core.event.state.StateEvent in project siddhi by wso2.
the class IndexOperator method tryUpdate.
@Override
public ComplexEventChunk<StreamEvent> tryUpdate(ComplexEventChunk<StateEvent> updatingOrAddingEventChunk, Object storeEvents, InMemoryCompiledUpdateSet compiledUpdateSet, AddingStreamEventExtractor addingStreamEventExtractor) {
ComplexEventChunk<StreamEvent> failedEventChunk = new ComplexEventChunk<StreamEvent>(updatingOrAddingEventChunk.isBatch());
updatingOrAddingEventChunk.reset();
while (updatingOrAddingEventChunk.hasNext()) {
StateEvent overwritingOrAddingEvent = updatingOrAddingEventChunk.next();
StreamEvent streamEvents = collectionExecutor.find(overwritingOrAddingEvent, (IndexedEventHolder) storeEvents, null);
ComplexEventChunk<StreamEvent> foundEventChunk = new ComplexEventChunk<>(false);
foundEventChunk.add(streamEvents);
if (foundEventChunk.getFirst() != null) {
// for cases when indexed attribute is also updated but that not changed
// to reduce number of passes needed to update the events
update((IndexedEventHolder) storeEvents, compiledUpdateSet, overwritingOrAddingEvent, foundEventChunk);
} else {
failedEventChunk.add(addingStreamEventExtractor.getAddingStreamEvent(overwritingOrAddingEvent));
}
}
return failedEventChunk;
}
use of org.wso2.siddhi.core.event.state.StateEvent in project siddhi by wso2.
the class IndexOperator method update.
private void update(IndexedEventHolder storeEvents, InMemoryCompiledUpdateSet compiledUpdateSet, StateEvent overwritingOrAddingEvent, ComplexEventChunk<StreamEvent> foundEventChunk) {
// for cases when indexed attribute is also updated but that not changed
// to reduce number of passes needed to update the events
boolean doDeleteUpdate = false;
boolean fail = false;
for (Map.Entry<Integer, ExpressionExecutor> entry : compiledUpdateSet.getExpressionExecutorMap().entrySet()) {
if (doDeleteUpdate || fail) {
break;
}
if (storeEvents.isAttributeIndexed(entry.getKey())) {
// Todo how much check we need to do before falling back to Delete and then Update
foundEventChunk.reset();
Set<Object> keys = null;
PrimaryKeyReferenceHolder[] primaryKeyReferenceHolders = storeEvents.getPrimaryKeyReferenceHolders();
if (primaryKeyReferenceHolders != null && primaryKeyReferenceHolders.length == 1 && entry.getKey() == primaryKeyReferenceHolders[0].getPrimaryKeyPosition()) {
keys = new HashSet<>(storeEvents.getAllPrimaryKeyValues());
}
while (foundEventChunk.hasNext()) {
StreamEvent streamEvent = foundEventChunk.next();
Object updatingData = entry.getValue().execute(overwritingOrAddingEvent);
Object storeEventData = streamEvent.getOutputData()[entry.getKey()];
if (updatingData != null && storeEventData != null && !updatingData.equals(storeEventData)) {
doDeleteUpdate = true;
if (keys == null || keys.size() == 0) {
break;
} else {
keys.remove(storeEventData);
if (!keys.add(updatingData)) {
log.error("Update failed for event :" + overwritingOrAddingEvent + ", as there is " + "already an event stored with primary key '" + updatingData + "' at '" + queryName + "'");
fail = true;
break;
}
}
}
}
}
}
foundEventChunk.reset();
if (!fail) {
if (doDeleteUpdate) {
collectionExecutor.delete(overwritingOrAddingEvent, storeEvents);
ComplexEventChunk<StreamEvent> toUpdateEventChunk = new ComplexEventChunk<StreamEvent>(false);
while (foundEventChunk.hasNext()) {
StreamEvent streamEvent = foundEventChunk.next();
foundEventChunk.remove();
// to make the chained state back to normal
streamEvent.setNext(null);
for (Map.Entry<Integer, ExpressionExecutor> entry : compiledUpdateSet.getExpressionExecutorMap().entrySet()) {
streamEvent.setOutputData(entry.getValue().execute(overwritingOrAddingEvent), entry.getKey());
}
toUpdateEventChunk.add(streamEvent);
}
storeEvents.add(toUpdateEventChunk);
} else {
while (foundEventChunk.hasNext()) {
StreamEvent streamEvent = foundEventChunk.next();
// to make the chained state back to normal
streamEvent.setNext(null);
for (Map.Entry<Integer, ExpressionExecutor> entry : compiledUpdateSet.getExpressionExecutorMap().entrySet()) {
streamEvent.setOutputData(entry.getValue().execute(overwritingOrAddingEvent), entry.getKey());
}
}
}
}
}
Aggregations