use of io.siddhi.core.config.SiddhiAppContext in project siddhi by wso2.
the class AggregationParser method parse.
public static AggregationRuntime parse(AggregationDefinition aggregationDefinition, SiddhiAppContext siddhiAppContext, Map<String, AbstractDefinition> streamDefinitionMap, Map<String, AbstractDefinition> tableDefinitionMap, Map<String, AbstractDefinition> windowDefinitionMap, Map<String, AbstractDefinition> aggregationDefinitionMap, Map<String, Table> tableMap, Map<String, Window> windowMap, Map<String, AggregationRuntime> aggregationMap, SiddhiAppRuntimeBuilder siddhiAppRuntimeBuilder) {
// set timeZone for aggregation
String timeZone = getTimeZone(siddhiAppContext);
boolean isDebugEnabled = log.isDebugEnabled();
boolean isPersistedAggregation = false;
boolean isReadOnly = false;
if (!validateTimeZone(timeZone)) {
throw new SiddhiAppCreationException("Given timeZone '" + timeZone + "' for aggregations is invalid. Please provide a valid time zone.");
}
// get aggregation name
String aggregatorName = aggregationDefinition.getId();
if (isDebugEnabled) {
log.debug("Incremental aggregation initialization process started for aggregation " + aggregatorName);
}
Annotation aggregationProperties = AnnotationHelper.getAnnotation(ANNOTATION_PERSISTED_AGGREGATION, aggregationDefinition.getAnnotations());
if (aggregationProperties != null) {
String persistedAggregationMode = aggregationProperties.getElement(ANNOTATION_ELEMENT_ENABLE);
isPersistedAggregation = persistedAggregationMode == null || Boolean.parseBoolean(persistedAggregationMode);
String readOnlyMode = aggregationProperties.getElement(ANNOTATION_ELEMENT_IS_READ_ONLY);
isReadOnly = Boolean.parseBoolean(readOnlyMode);
}
if (isPersistedAggregation) {
aggregationDefinition.getSelector().getSelectionList().stream().forEach(outputAttribute -> {
if (outputAttribute.getExpression() instanceof AttributeFunction && ((AttributeFunction) outputAttribute.getExpression()).getName().equals("distinctCount")) {
throw new SiddhiAppCreationException("Aggregation function 'distinctCount' does not supported " + "with persisted aggregation type please use default incremental aggregation");
}
});
}
if (isDebugEnabled) {
log.debug("Aggregation mode is defined as " + (isPersistedAggregation ? "persisted" : "inMemory") + " for aggregation " + aggregatorName);
}
if (aggregationDefinition.getTimePeriod() == null) {
throw new SiddhiAppCreationException("Aggregation Definition '" + aggregationDefinition.getId() + "'s timePeriod is null. " + "Hence, can't create the siddhi app '" + siddhiAppContext.getName() + "'", aggregationDefinition.getQueryContextStartIndex(), aggregationDefinition.getQueryContextEndIndex());
}
if (aggregationDefinition.getSelector() == null) {
throw new SiddhiAppCreationException("Aggregation Definition '" + aggregationDefinition.getId() + "'s selection is not defined. " + "Hence, can't create the siddhi app '" + siddhiAppContext.getName() + "'", aggregationDefinition.getQueryContextStartIndex(), aggregationDefinition.getQueryContextEndIndex());
}
if (streamDefinitionMap.get(aggregationDefinition.getBasicSingleInputStream().getStreamId()) == null) {
throw new SiddhiAppCreationException("Stream " + aggregationDefinition.getBasicSingleInputStream().getStreamId() + " has not been defined");
}
// Check if the user defined primary keys exists for @store annotation on aggregation if yes, an error is
// is thrown
Element userDefinedPrimaryKey = AnnotationHelper.getAnnotationElement(SiddhiConstants.ANNOTATION_PRIMARY_KEY, null, aggregationDefinition.getAnnotations());
if (userDefinedPrimaryKey != null) {
throw new SiddhiAppCreationException("Aggregation Tables have predefined primary key, but found '" + userDefinedPrimaryKey.getValue() + "' primary key defined though annotation.");
}
try {
List<VariableExpressionExecutor> incomingVariableExpressionExecutors = new ArrayList<>();
SiddhiQueryContext siddhiQueryContext = new SiddhiQueryContext(siddhiAppContext, aggregatorName);
StreamRuntime streamRuntime = InputStreamParser.parse(aggregationDefinition.getBasicSingleInputStream(), null, streamDefinitionMap, tableDefinitionMap, windowDefinitionMap, aggregationDefinitionMap, tableMap, windowMap, aggregationMap, incomingVariableExpressionExecutors, false, siddhiQueryContext);
// Get original meta for later use.
MetaStreamEvent incomingMetaStreamEvent = (MetaStreamEvent) streamRuntime.getMetaComplexEvent();
// Create new meta stream event.
// This must hold the timestamp, group by attributes (if given) and the incremental attributes, in
// onAfterWindowData array
// Example format: AGG_TIMESTAMP, groupByAttribute1, groupByAttribute2, AGG_incAttribute1, AGG_incAttribute2
// AGG_incAttribute1, AGG_incAttribute2 would have the same attribute names as in
// finalListOfIncrementalAttributes
// To enter data as onAfterWindowData
incomingMetaStreamEvent.initializeOnAfterWindowData();
// List of all aggregationDurations
List<TimePeriod.Duration> aggregationDurations = getSortedPeriods(aggregationDefinition.getTimePeriod(), isPersistedAggregation);
// Incoming executors will be executors for timestamp, externalTimestamp(if used),
List<ExpressionExecutor> incomingExpressionExecutors = new ArrayList<>();
List<IncrementalAttributeAggregator> incrementalAttributeAggregators = new ArrayList<>();
// group by attributes (if given) and the incremental attributes expression executors
List<Variable> groupByVariableList = aggregationDefinition.getSelector().getGroupByList();
// Expressions to get final aggregate outputs. e.g for avg the expression is Divide expression with
// AGG_SUM/ AGG_COUNT
List<Expression> outputExpressions = new ArrayList<>();
boolean isProcessingOnExternalTime = aggregationDefinition.getAggregateAttribute() != null;
boolean isGroupBy = aggregationDefinition.getSelector().getGroupByList().size() != 0;
final boolean isDistributed;
ConfigManager configManager = siddhiAppContext.getSiddhiContext().getConfigManager();
final String shardId = configManager.extractProperty("shardId");
boolean enablePartitioning = false;
// check if the setup is Active Active(distributed deployment) by checking availability of partitionById
// config
Annotation partitionById = AnnotationHelper.getAnnotation(ANNOTATION_PARTITION_BY_ID, aggregationDefinition.getAnnotations());
if (partitionById != null) {
String enableElement = partitionById.getElement(ANNOTATION_ELEMENT_ENABLE);
enablePartitioning = enableElement == null || Boolean.parseBoolean(enableElement);
}
boolean shouldPartitionById = Boolean.parseBoolean(configManager.extractProperty("partitionById"));
if (enablePartitioning || shouldPartitionById) {
if (shardId == null) {
throw new SiddhiAppCreationException("Configuration 'shardId' not provided for @partitionById " + "annotation");
}
isDistributed = true;
} else {
isDistributed = false;
}
if (isDebugEnabled) {
log.debug("Distributed aggregation processing is " + (isDistributed ? "enabled" : "disabled") + " in " + aggregatorName + " aggregation ");
}
populateIncomingAggregatorsAndExecutors(aggregationDefinition, siddhiQueryContext, tableMap, incomingVariableExpressionExecutors, incomingMetaStreamEvent, incomingExpressionExecutors, incrementalAttributeAggregators, groupByVariableList, outputExpressions, isProcessingOnExternalTime, isDistributed, shardId);
// check if the populateIncomingAggregatorsAndExecutors process has been completed successfully
boolean isLatestEventColAdded = incomingMetaStreamEvent.getOutputData().get(incomingMetaStreamEvent.getOutputData().size() - 1).getName().equals(AGG_LAST_TIMESTAMP_COL);
int baseAggregatorBeginIndex = incomingMetaStreamEvent.getOutputData().size();
List<Expression> finalBaseExpressions = new ArrayList<>();
boolean isOptimisedLookup = populateFinalBaseAggregators(tableMap, incomingVariableExpressionExecutors, incomingMetaStreamEvent, incomingExpressionExecutors, incrementalAttributeAggregators, siddhiQueryContext, finalBaseExpressions);
if (isDebugEnabled) {
log.debug("Optimised lookup mode is " + (isOptimisedLookup ? "enabled" : "disabled") + " for " + "aggregation " + aggregatorName);
}
// Creating an intermediate stream with aggregated stream and above extracted output variables
StreamDefinition incomingOutputStreamDefinition = StreamDefinition.id(aggregatorName + "_intermediate");
incomingOutputStreamDefinition.setQueryContextStartIndex(aggregationDefinition.getQueryContextStartIndex());
incomingOutputStreamDefinition.setQueryContextEndIndex(aggregationDefinition.getQueryContextEndIndex());
MetaStreamEvent processedMetaStreamEvent = new MetaStreamEvent();
for (Attribute attribute : incomingMetaStreamEvent.getOutputData()) {
incomingOutputStreamDefinition.attribute(attribute.getName(), attribute.getType());
processedMetaStreamEvent.addOutputData(attribute);
}
incomingMetaStreamEvent.setOutputDefinition(incomingOutputStreamDefinition);
processedMetaStreamEvent.addInputDefinition(incomingOutputStreamDefinition);
processedMetaStreamEvent.setOutputDefinition(incomingOutputStreamDefinition);
// Executors of processing meta
List<VariableExpressionExecutor> processVariableExpressionExecutors = new ArrayList<>();
Map<TimePeriod.Duration, List<ExpressionExecutor>> processExpressionExecutorsMap = new HashMap<>();
Map<TimePeriod.Duration, List<ExpressionExecutor>> processExpressionExecutorsMapForFind = new HashMap<>();
aggregationDurations.forEach(incrementalDuration -> {
processExpressionExecutorsMap.put(incrementalDuration, constructProcessExpressionExecutors(siddhiQueryContext, tableMap, baseAggregatorBeginIndex, finalBaseExpressions, incomingOutputStreamDefinition, processedMetaStreamEvent, processVariableExpressionExecutors, isProcessingOnExternalTime, incrementalDuration, isDistributed, shardId, isLatestEventColAdded));
processExpressionExecutorsMapForFind.put(incrementalDuration, constructProcessExpressionExecutors(siddhiQueryContext, tableMap, baseAggregatorBeginIndex, finalBaseExpressions, incomingOutputStreamDefinition, processedMetaStreamEvent, processVariableExpressionExecutors, isProcessingOnExternalTime, incrementalDuration, isDistributed, shardId, isLatestEventColAdded));
});
ExpressionExecutor shouldUpdateTimestamp = null;
if (isLatestEventColAdded) {
Expression shouldUpdateTimestampExp = new Variable(AGG_LAST_TIMESTAMP_COL);
shouldUpdateTimestamp = ExpressionParser.parseExpression(shouldUpdateTimestampExp, processedMetaStreamEvent, 0, tableMap, processVariableExpressionExecutors, false, 0, ProcessingMode.BATCH, false, siddhiQueryContext);
}
List<ExpressionExecutor> outputExpressionExecutors = outputExpressions.stream().map(expression -> ExpressionParser.parseExpression(expression, processedMetaStreamEvent, 0, tableMap, processVariableExpressionExecutors, isGroupBy, 0, ProcessingMode.BATCH, false, siddhiQueryContext)).collect(Collectors.toList());
// Create group by key generator
Map<TimePeriod.Duration, GroupByKeyGenerator> groupByKeyGeneratorMap = new HashMap<>();
aggregationDurations.forEach(incrementalDuration -> {
GroupByKeyGenerator groupByKeyGenerator = null;
if (isProcessingOnExternalTime || isGroupBy) {
List<Expression> groupByExpressionList = new ArrayList<>();
if (isProcessingOnExternalTime) {
Expression externalTimestampExpression = AttributeFunction.function("incrementalAggregator", "getAggregationStartTime", new Variable(AGG_EXTERNAL_TIMESTAMP_COL), new StringConstant(incrementalDuration.name()));
groupByExpressionList.add(externalTimestampExpression);
}
groupByExpressionList.addAll(groupByVariableList.stream().map(groupByVariable -> (Expression) groupByVariable).collect(Collectors.toList()));
groupByKeyGenerator = new GroupByKeyGenerator(groupByExpressionList, processedMetaStreamEvent, SiddhiConstants.UNKNOWN_STATE, tableMap, processVariableExpressionExecutors, siddhiQueryContext);
}
groupByKeyGeneratorMap.put(incrementalDuration, groupByKeyGenerator);
});
// GroupBy for reading
Map<TimePeriod.Duration, GroupByKeyGenerator> groupByKeyGeneratorMapForReading = new HashMap<>();
if (isDistributed && !isProcessingOnExternalTime) {
aggregationDurations.forEach(incrementalDuration -> {
List<Expression> groupByExpressionList = new ArrayList<>();
Expression timestampExpression = AttributeFunction.function("incrementalAggregator", "getAggregationStartTime", new Variable(AGG_START_TIMESTAMP_COL), new StringConstant(incrementalDuration.name()));
groupByExpressionList.add(timestampExpression);
if (isGroupBy) {
groupByExpressionList.addAll(groupByVariableList.stream().map(groupByVariable -> (Expression) groupByVariable).collect(Collectors.toList()));
}
GroupByKeyGenerator groupByKeyGenerator = new GroupByKeyGenerator(groupByExpressionList, processedMetaStreamEvent, SiddhiConstants.UNKNOWN_STATE, tableMap, processVariableExpressionExecutors, siddhiQueryContext);
groupByKeyGeneratorMapForReading.put(incrementalDuration, groupByKeyGenerator);
});
} else {
groupByKeyGeneratorMapForReading.putAll(groupByKeyGeneratorMap);
}
// Create new scheduler
EntryValveExecutor entryValveExecutor = new EntryValveExecutor(siddhiAppContext);
LockWrapper lockWrapper = new LockWrapper(aggregatorName);
lockWrapper.setLock(new ReentrantLock());
Scheduler scheduler = SchedulerParser.parse(entryValveExecutor, siddhiQueryContext);
scheduler.init(lockWrapper, aggregatorName);
scheduler.setStreamEventFactory(new StreamEventFactory(processedMetaStreamEvent));
QueryParserHelper.reduceMetaComplexEvent(incomingMetaStreamEvent);
QueryParserHelper.reduceMetaComplexEvent(processedMetaStreamEvent);
QueryParserHelper.updateVariablePosition(incomingMetaStreamEvent, incomingVariableExpressionExecutors);
QueryParserHelper.updateVariablePosition(processedMetaStreamEvent, processVariableExpressionExecutors);
Map<TimePeriod.Duration, Table> aggregationTables = initDefaultTables(aggregatorName, aggregationDurations, processedMetaStreamEvent.getOutputStreamDefinition(), siddhiAppRuntimeBuilder, aggregationDefinition.getAnnotations(), groupByVariableList, isProcessingOnExternalTime, isDistributed);
Map<TimePeriod.Duration, Executor> incrementalExecutorMap = buildIncrementalExecutors(processedMetaStreamEvent, processExpressionExecutorsMap, groupByKeyGeneratorMap, aggregationDurations, aggregationTables, siddhiQueryContext, aggregatorName, shouldUpdateTimestamp, timeZone, isPersistedAggregation, incomingOutputStreamDefinition, isDistributed, shardId, isProcessingOnExternalTime, aggregationDefinition, configManager, groupByVariableList, isReadOnly);
isOptimisedLookup = isOptimisedLookup && aggregationTables.get(aggregationDurations.get(0)) instanceof QueryableProcessor;
List<String> groupByVariablesList = groupByVariableList.stream().map(Variable::getAttributeName).collect(Collectors.toList());
List<OutputAttribute> defaultSelectorList = new ArrayList<>();
if (isOptimisedLookup) {
defaultSelectorList = incomingOutputStreamDefinition.getAttributeList().stream().map((attribute) -> new OutputAttribute(new Variable(attribute.getName()))).collect(Collectors.toList());
}
IncrementalDataPurger incrementalDataPurger = new IncrementalDataPurger();
incrementalDataPurger.init(aggregationDefinition, new StreamEventFactory(processedMetaStreamEvent), aggregationTables, isProcessingOnExternalTime, siddhiQueryContext, aggregationDurations, timeZone, windowMap, aggregationMap);
// Recreate in-memory data from tables
IncrementalExecutorsInitialiser incrementalExecutorsInitialiser = new IncrementalExecutorsInitialiser(aggregationDurations, aggregationTables, incrementalExecutorMap, isDistributed, shardId, siddhiAppContext, processedMetaStreamEvent, tableMap, windowMap, aggregationMap, timeZone, isReadOnly, isPersistedAggregation);
IncrementalExecutor rootIncrementalExecutor = (IncrementalExecutor) incrementalExecutorMap.get(aggregationDurations.get(0));
rootIncrementalExecutor.setScheduler(scheduler);
// Connect entry valve to root incremental executor
entryValveExecutor.setNextExecutor(rootIncrementalExecutor);
QueryParserHelper.initStreamRuntime(streamRuntime, incomingMetaStreamEvent, lockWrapper, aggregatorName);
LatencyTracker latencyTrackerFind = null;
LatencyTracker latencyTrackerInsert = null;
ThroughputTracker throughputTrackerFind = null;
ThroughputTracker throughputTrackerInsert = null;
if (siddhiAppContext.getStatisticsManager() != null) {
latencyTrackerFind = QueryParserHelper.createLatencyTracker(siddhiAppContext, aggregationDefinition.getId(), METRIC_INFIX_AGGREGATIONS, METRIC_TYPE_FIND);
latencyTrackerInsert = QueryParserHelper.createLatencyTracker(siddhiAppContext, aggregationDefinition.getId(), METRIC_INFIX_AGGREGATIONS, METRIC_TYPE_INSERT);
throughputTrackerFind = QueryParserHelper.createThroughputTracker(siddhiAppContext, aggregationDefinition.getId(), METRIC_INFIX_AGGREGATIONS, METRIC_TYPE_FIND);
throughputTrackerInsert = QueryParserHelper.createThroughputTracker(siddhiAppContext, aggregationDefinition.getId(), METRIC_INFIX_AGGREGATIONS, METRIC_TYPE_INSERT);
}
AggregationRuntime aggregationRuntime = new AggregationRuntime(aggregationDefinition, isProcessingOnExternalTime, isDistributed, aggregationDurations, incrementalExecutorMap, aggregationTables, outputExpressionExecutors, processExpressionExecutorsMapForFind, shouldUpdateTimestamp, groupByKeyGeneratorMapForReading, isOptimisedLookup, defaultSelectorList, groupByVariablesList, isLatestEventColAdded, baseAggregatorBeginIndex, finalBaseExpressions, incrementalDataPurger, incrementalExecutorsInitialiser, ((SingleStreamRuntime) streamRuntime), processedMetaStreamEvent, latencyTrackerFind, throughputTrackerFind, timeZone);
streamRuntime.setCommonProcessor(new IncrementalAggregationProcessor(aggregationRuntime, incomingExpressionExecutors, processedMetaStreamEvent, latencyTrackerInsert, throughputTrackerInsert, siddhiAppContext));
return aggregationRuntime;
} catch (Throwable t) {
ExceptionUtil.populateQueryContext(t, aggregationDefinition, siddhiAppContext);
throw t;
}
}
use of io.siddhi.core.config.SiddhiAppContext in project siddhi by wso2.
the class EventHolderPasser method parse.
public static EventHolder parse(AbstractDefinition tableDefinition, StreamEventFactory tableStreamEventFactory, SiddhiAppContext siddhiAppContext, boolean isCacheTable) {
ZeroStreamEventConverter eventConverter = new ZeroStreamEventConverter();
PrimaryKeyReferenceHolder[] primaryKeyReferenceHolders = null;
Map<String, Integer> indexMetaData = new HashMap<String, Integer>();
// primaryKey.
Annotation primaryKeyAnnotation = AnnotationHelper.getAnnotation(SiddhiConstants.ANNOTATION_PRIMARY_KEY, tableDefinition.getAnnotations());
if (primaryKeyAnnotation != null) {
if (primaryKeyAnnotation.getElements().size() == 0) {
throw new SiddhiAppValidationException(SiddhiConstants.ANNOTATION_PRIMARY_KEY + " annotation " + "contains " + primaryKeyAnnotation.getElements().size() + " element, at '" + tableDefinition.getId() + "'");
}
primaryKeyReferenceHolders = primaryKeyAnnotation.getElements().stream().map(element -> element.getValue().trim()).map(key -> new PrimaryKeyReferenceHolder(key, tableDefinition.getAttributePosition(key))).toArray(PrimaryKeyReferenceHolder[]::new);
}
for (Annotation indexAnnotation : AnnotationHelper.getAnnotations(SiddhiConstants.ANNOTATION_INDEX, tableDefinition.getAnnotations())) {
if (indexAnnotation.getElements().size() == 0) {
throw new SiddhiAppValidationException(SiddhiConstants.ANNOTATION_INDEX + " annotation of " + "in-memory table should contain only one index element, but found " + indexAnnotation.getElements().size() + " element", indexAnnotation.getQueryContextStartIndex(), indexAnnotation.getQueryContextEndIndex());
} else if (indexAnnotation.getElements().size() > 1) {
throw new SiddhiAppValidationException(SiddhiConstants.ANNOTATION_INDEX + " annotation of the " + "in-memory table should only contain one index element but found " + indexAnnotation.getElements().size() + " elements. To use multiple indexes, " + "define multiple '@index(<index key>)' annotations with one index element " + "per each index key", indexAnnotation.getQueryContextStartIndex(), indexAnnotation.getQueryContextEndIndex());
}
for (Element element : indexAnnotation.getElements()) {
Integer previousValue = indexMetaData.put(element.getValue().trim(), tableDefinition.getAttributePosition(element.getValue().trim()));
if (previousValue != null) {
throw new SiddhiAppValidationException("Multiple " + SiddhiConstants.ANNOTATION_INDEX + " " + "annotations defined with same attribute '" + element.getValue().trim() + "', at '" + tableDefinition.getId() + "'", indexAnnotation.getQueryContextStartIndex(), indexAnnotation.getQueryContextEndIndex());
}
}
}
// not support indexBy.
Annotation indexByAnnotation = AnnotationHelper.getAnnotation(SiddhiConstants.ANNOTATION_INDEX_BY, tableDefinition.getAnnotations());
if (indexByAnnotation != null) {
throw new OperationNotSupportedException(SiddhiConstants.ANNOTATION_INDEX_BY + " annotation is not " + "supported anymore, please use @PrimaryKey or @Index annotations instead," + " at '" + tableDefinition.getId() + "'");
}
if (primaryKeyReferenceHolders != null || indexMetaData.size() > 0) {
boolean isNumeric = false;
if (primaryKeyReferenceHolders != null) {
if (primaryKeyReferenceHolders.length == 1) {
Attribute.Type type = tableDefinition.getAttributeType(primaryKeyReferenceHolders[0].getPrimaryKeyAttribute());
if (type == Attribute.Type.DOUBLE || type == Attribute.Type.FLOAT || type == Attribute.Type.INT || type == Attribute.Type.LONG) {
isNumeric = true;
}
}
}
if (isCacheTable) {
return new IndexEventHolderForCache(tableStreamEventFactory, eventConverter, primaryKeyReferenceHolders, isNumeric, indexMetaData, tableDefinition, siddhiAppContext);
} else {
return new IndexEventHolder(tableStreamEventFactory, eventConverter, primaryKeyReferenceHolders, isNumeric, indexMetaData, tableDefinition, siddhiAppContext);
}
} else {
MetaStreamEvent metaStreamEvent = new MetaStreamEvent();
for (Attribute attribute : tableDefinition.getAttributeList()) {
metaStreamEvent.addOutputData(attribute);
}
StreamEventCloner streamEventCloner = new StreamEventCloner(metaStreamEvent, tableStreamEventFactory);
return new ListEventHolder(tableStreamEventFactory, eventConverter, new StreamEventClonerHolder(streamEventCloner));
}
}
use of io.siddhi.core.config.SiddhiAppContext in project siddhi by wso2.
the class SnapshotService method restoreIncrementalSnapshot.
private void restoreIncrementalSnapshot(PartitionIdStateHolder partitionIdStateHolder, Map<String, Map<Long, Map<IncrementalSnapshotInfo, byte[]>>> incrementalStateByTime) {
if (incrementalStateByTime != null) {
String id = null;
State state = null;
StateHolder stateHolder = null;
Map<String, Object> deserializedStateMap = null;
try {
for (Iterator<Map.Entry<String, Map<Long, Map<IncrementalSnapshotInfo, byte[]>>>> iterator = incrementalStateByTime.entrySet().iterator(); iterator.hasNext(); ) {
Map.Entry<String, Map<Long, Map<IncrementalSnapshotInfo, byte[]>>> incrementalStateByTimeEntry = iterator.next();
iterator.remove();
for (Iterator<Map.Entry<Long, Map<IncrementalSnapshotInfo, byte[]>>> partitionGroupByKeyIterator = incrementalStateByTimeEntry.getValue().entrySet().iterator(); partitionGroupByKeyIterator.hasNext(); ) {
Map.Entry<Long, Map<IncrementalSnapshotInfo, byte[]>> partitionGroupByKeyStateByTimeEntry = partitionGroupByKeyIterator.next();
partitionGroupByKeyIterator.remove();
for (Iterator<Map.Entry<IncrementalSnapshotInfo, byte[]>> iterator1 = partitionGroupByKeyStateByTimeEntry.getValue().entrySet().iterator(); iterator1.hasNext(); ) {
Map.Entry<IncrementalSnapshotInfo, byte[]> incrementalStateByInfoEntry = iterator1.next();
iterator1.remove();
IncrementalSnapshotInfo incrementalSnapshotInfo = incrementalStateByInfoEntry.getKey();
Map<String, Object> singleIncrementSnapshot = (Map<String, Object>) ByteSerializer.byteToObject(incrementalStateByInfoEntry.getValue(), siddhiAppContext);
if (singleIncrementSnapshot != null) {
if (!incrementalSnapshotInfo.getId().equals(id)) {
if (id != null) {
state.restore(deserializedStateMap);
SiddhiAppContext.startPartitionFlow(id);
try {
stateHolder.returnState(state);
} finally {
SiddhiAppContext.stopPartitionFlow();
}
id = null;
state = null;
stateHolder = null;
deserializedStateMap = null;
}
ElementStateHolder elementStateHolder = partitionIdStateHolder.queryStateHolderMap.get(incrementalSnapshotInfo.getQueryName());
if (elementStateHolder == null) {
continue;
}
stateHolder = elementStateHolder.elementHolderMap.get(incrementalSnapshotInfo.getElementId());
if (stateHolder == null) {
continue;
}
String partitionKey = null;
String groupByKey = null;
String[] keys = incrementalSnapshotInfo.getPartitionGroupByKey().split("--");
if (keys.length == 2) {
if (!keys[0].equals("null")) {
partitionKey = keys[0];
}
if (!keys[1].equals("null")) {
groupByKey = keys[1];
}
}
SiddhiAppContext.startPartitionFlow(partitionKey);
SiddhiAppContext.startGroupByFlow(groupByKey);
try {
state = stateHolder.getState();
} finally {
SiddhiAppContext.stopGroupByFlow();
SiddhiAppContext.stopPartitionFlow();
}
if (state != null) {
id = incrementalSnapshotInfo.getId();
deserializedStateMap = new HashMap<>();
}
}
if (state != null) {
for (Map.Entry<String, Object> singleIncrementSnapshotEntry : singleIncrementSnapshot.entrySet()) {
if (singleIncrementSnapshotEntry.getValue() instanceof Snapshot) {
Snapshot snapshot = (Snapshot) singleIncrementSnapshotEntry.getValue();
SnapshotStateList snapshotStateList = (SnapshotStateList) deserializedStateMap.computeIfAbsent(singleIncrementSnapshotEntry.getKey(), k -> new SnapshotStateList());
if (snapshot.isIncrementalSnapshot()) {
snapshotStateList.putSnapshotState(partitionGroupByKeyStateByTimeEntry.getKey(), snapshot);
} else {
snapshotStateList.getSnapshotStates().clear();
snapshotStateList.putSnapshotState(partitionGroupByKeyStateByTimeEntry.getKey(), snapshot);
}
} else {
deserializedStateMap.put(singleIncrementSnapshotEntry.getKey(), singleIncrementSnapshotEntry.getValue());
}
}
}
}
}
}
if (id != null) {
state.restore(deserializedStateMap);
SiddhiAppContext.startPartitionFlow(id);
try {
stateHolder.returnState(state);
} finally {
SiddhiAppContext.stopPartitionFlow();
}
id = null;
state = null;
stateHolder = null;
deserializedStateMap = null;
}
}
} finally {
if (id != null && stateHolder != null && state != null) {
SiddhiAppContext.startPartitionFlow(id);
try {
stateHolder.returnState(state);
} finally {
SiddhiAppContext.stopPartitionFlow();
}
id = null;
state = null;
stateHolder = null;
}
}
}
}
Aggregations