use of io.siddhi.core.util.statistics.LatencyTracker in project siddhi by wso2.
the class QueryParser method parse.
/**
* Parse a query and return corresponding QueryRuntime.
*
* @param query query to be parsed.
* @param siddhiAppContext associated Siddhi app context.
* @param streamDefinitionMap keyvalue containing user given stream definitions.
* @param tableDefinitionMap keyvalue containing table definitions.
* @param windowDefinitionMap keyvalue containing window definition map.
* @param aggregationDefinitionMap keyvalue containing aggregation definition map.
* @param tableMap keyvalue containing event tables.
* @param aggregationMap keyvalue containing aggrigation runtimes.
* @param windowMap keyvalue containing event window map.
* @param lockSynchronizer Lock synchronizer for sync the lock across queries.
* @param queryIndex query index to identify unknown query by number
* @param partitioned is the query partitioned
* @param partitionId The ID of the partition
* @return queryRuntime
*/
public static QueryRuntimeImpl parse(Query query, SiddhiAppContext siddhiAppContext, Map<String, AbstractDefinition> streamDefinitionMap, Map<String, AbstractDefinition> tableDefinitionMap, Map<String, AbstractDefinition> windowDefinitionMap, Map<String, AbstractDefinition> aggregationDefinitionMap, Map<String, Table> tableMap, Map<String, AggregationRuntime> aggregationMap, Map<String, Window> windowMap, LockSynchronizer lockSynchronizer, String queryIndex, boolean partitioned, String partitionId) {
List<VariableExpressionExecutor> executors = new ArrayList<>();
QueryRuntimeImpl queryRuntime;
Element nameElement = null;
LatencyTracker latencyTracker = null;
LockWrapper lockWrapper = null;
try {
nameElement = AnnotationHelper.getAnnotationElement("info", "name", query.getAnnotations());
String queryName;
if (nameElement != null) {
queryName = nameElement.getValue();
} else {
queryName = "query_" + queryIndex;
}
SiddhiQueryContext siddhiQueryContext = new SiddhiQueryContext(siddhiAppContext, queryName, partitionId);
siddhiQueryContext.setPartitioned(partitioned);
latencyTracker = QueryParserHelper.createLatencyTracker(siddhiAppContext, siddhiQueryContext.getName(), SiddhiConstants.METRIC_INFIX_QUERIES, null);
siddhiQueryContext.setLatencyTracker(latencyTracker);
OutputStream.OutputEventType outputEventType = query.getOutputStream().getOutputEventType();
if (query.getOutputRate() != null && query.getOutputRate() instanceof SnapshotOutputRate) {
if (outputEventType != OutputStream.OutputEventType.ALL_EVENTS) {
throw new SiddhiAppCreationException("As query '" + siddhiQueryContext.getName() + "' is performing snapshot rate limiting, it can only insert '" + OutputStream.OutputEventType.ALL_EVENTS + "' but it is inserting '" + outputEventType + "'!", query.getOutputStream().getQueryContextStartIndex(), query.getOutputStream().getQueryContextEndIndex());
}
}
siddhiQueryContext.setOutputEventType(outputEventType);
boolean outputExpectsExpiredEvents = false;
if (outputEventType != OutputStream.OutputEventType.CURRENT_EVENTS) {
outputExpectsExpiredEvents = true;
}
StreamRuntime streamRuntime = InputStreamParser.parse(query.getInputStream(), query, streamDefinitionMap, tableDefinitionMap, windowDefinitionMap, aggregationDefinitionMap, tableMap, windowMap, aggregationMap, executors, outputExpectsExpiredEvents, siddhiQueryContext);
QuerySelector selector;
if (streamRuntime.getQuerySelector() != null) {
selector = streamRuntime.getQuerySelector();
} else {
selector = SelectorParser.parse(query.getSelector(), query.getOutputStream(), streamRuntime.getMetaComplexEvent(), tableMap, executors, SiddhiConstants.UNKNOWN_STATE, streamRuntime.getProcessingMode(), outputExpectsExpiredEvents, siddhiQueryContext);
}
boolean isWindow = query.getInputStream() instanceof JoinInputStream;
if (!isWindow && query.getInputStream() instanceof SingleInputStream) {
for (StreamHandler streamHandler : ((SingleInputStream) query.getInputStream()).getStreamHandlers()) {
if (streamHandler instanceof io.siddhi.query.api.execution.query.input.handler.Window) {
isWindow = true;
break;
}
}
}
Element synchronizedElement = AnnotationHelper.getAnnotationElement("synchronized", null, query.getAnnotations());
if (synchronizedElement != null) {
if (!("false".equalsIgnoreCase(synchronizedElement.getValue()))) {
// Query LockWrapper does not need a unique
lockWrapper = new LockWrapper("");
// id since it will
// not be passed to the LockSynchronizer.
// LockWrapper does not have a default lock
lockWrapper.setLock(new ReentrantLock());
}
} else {
if (isWindow || !(streamRuntime instanceof SingleStreamRuntime)) {
if (streamRuntime instanceof JoinStreamRuntime) {
// If at least one Window is involved in the join, use the LockWrapper of that window
// for the query as well.
// If join is between two EventWindows, sync the locks of the LockWrapper of those windows
// and use either of them for query.
MetaStateEvent metaStateEvent = (MetaStateEvent) streamRuntime.getMetaComplexEvent();
MetaStreamEvent[] metaStreamEvents = metaStateEvent.getMetaStreamEvents();
if (metaStreamEvents[0].getEventType() == EventType.WINDOW && metaStreamEvents[1].getEventType() == EventType.WINDOW) {
LockWrapper leftLockWrapper = windowMap.get(metaStreamEvents[0].getLastInputDefinition().getId()).getLock();
LockWrapper rightLockWrapper = windowMap.get(metaStreamEvents[1].getLastInputDefinition().getId()).getLock();
if (!leftLockWrapper.equals(rightLockWrapper)) {
// Sync the lock across both wrappers
lockSynchronizer.sync(leftLockWrapper, rightLockWrapper);
}
// Can use either leftLockWrapper or rightLockWrapper since both of them will hold the
// same lock internally
// If either of their lock is updated later, the other lock also will be update by the
// LockSynchronizer.
lockWrapper = leftLockWrapper;
} else if (metaStreamEvents[0].getEventType() == EventType.WINDOW) {
// Share the same wrapper as the query lock wrapper
lockWrapper = windowMap.get(metaStreamEvents[0].getLastInputDefinition().getId()).getLock();
} else if (metaStreamEvents[1].getEventType() == EventType.WINDOW) {
// Share the same wrapper as the query lock wrapper
lockWrapper = windowMap.get(metaStreamEvents[1].getLastInputDefinition().getId()).getLock();
} else {
// Join does not contain any Window
// Query LockWrapper does not need a unique
lockWrapper = new LockWrapper("");
// id since
// it will not be passed to the LockSynchronizer.
// LockWrapper does not have a default lock
lockWrapper.setLock(new ReentrantLock());
}
} else {
lockWrapper = new LockWrapper("");
lockWrapper.setLock(new ReentrantLock());
}
}
}
OutputRateLimiter outputRateLimiter = OutputParser.constructOutputRateLimiter(query.getOutputStream().getId(), query.getOutputRate(), query.getSelector().getGroupByList().size() != 0, isWindow, siddhiQueryContext);
if (outputRateLimiter instanceof WrappedSnapshotOutputRateLimiter) {
selector.setBatchingEnabled(false);
}
boolean groupBy = !query.getSelector().getGroupByList().isEmpty();
OutputCallback outputCallback = OutputParser.constructOutputCallback(query.getOutputStream(), streamRuntime.getMetaComplexEvent().getOutputStreamDefinition(), tableMap, windowMap, !(streamRuntime instanceof SingleStreamRuntime) || groupBy, siddhiQueryContext);
QueryParserHelper.reduceMetaComplexEvent(streamRuntime.getMetaComplexEvent());
QueryParserHelper.updateVariablePosition(streamRuntime.getMetaComplexEvent(), executors);
QueryParserHelper.initStreamRuntime(streamRuntime, streamRuntime.getMetaComplexEvent(), lockWrapper, siddhiQueryContext.getName());
// Update cache compile selection variable expression executors
if (streamRuntime instanceof JoinStreamRuntime) {
streamRuntime.getSingleStreamRuntimes().forEach((singleStreamRuntime -> {
Processor processorChain = singleStreamRuntime.getProcessorChain();
if (processorChain instanceof JoinProcessor) {
CompiledSelection compiledSelection = ((JoinProcessor) processorChain).getCompiledSelection();
if (compiledSelection instanceof AbstractQueryableRecordTable.CompiledSelectionWithCache) {
List<VariableExpressionExecutor> variableExpressionExecutors = ((AbstractQueryableRecordTable.CompiledSelectionWithCache) compiledSelection).getVariableExpressionExecutorsForQuerySelector();
QueryParserHelper.updateVariablePosition(streamRuntime.getMetaComplexEvent(), variableExpressionExecutors);
}
}
}));
}
selector.setEventPopulator(StateEventPopulatorFactory.constructEventPopulator(streamRuntime.getMetaComplexEvent()));
queryRuntime = new QueryRuntimeImpl(query, streamRuntime, selector, outputRateLimiter, outputCallback, streamRuntime.getMetaComplexEvent(), siddhiQueryContext);
if (outputRateLimiter instanceof WrappedSnapshotOutputRateLimiter) {
selector.setBatchingEnabled(false);
((WrappedSnapshotOutputRateLimiter) outputRateLimiter).init(streamRuntime.getMetaComplexEvent().getOutputStreamDefinition().getAttributeList().size(), selector.getAttributeProcessorList(), streamRuntime.getMetaComplexEvent());
}
outputRateLimiter.init(lockWrapper, groupBy, siddhiQueryContext);
} catch (DuplicateDefinitionException e) {
if (nameElement != null) {
throw new DuplicateDefinitionException(e.getMessageWithOutContext() + ", when creating query " + nameElement.getValue(), e, e.getQueryContextStartIndex(), e.getQueryContextEndIndex(), siddhiAppContext.getName(), siddhiAppContext.getSiddhiAppString());
} else {
throw new DuplicateDefinitionException(e.getMessage(), e, e.getQueryContextStartIndex(), e.getQueryContextEndIndex(), siddhiAppContext.getName(), siddhiAppContext.getSiddhiAppString());
}
} catch (Throwable t) {
ExceptionUtil.populateQueryContext(t, query, siddhiAppContext);
throw t;
}
return queryRuntime;
}
use of io.siddhi.core.util.statistics.LatencyTracker in project siddhi by wso2.
the class ProcessStreamReceiver method process.
private void process(ComplexEventChunk<StreamEvent> streamEventChunk) {
if (lockWrapper != null) {
lockWrapper.lock();
}
try {
LatencyTracker latencyTracker = siddhiQueryContext.getLatencyTracker();
if (Level.BASIC.compareTo(siddhiQueryContext.getSiddhiAppContext().getRootMetricsLevel()) <= 0 && latencyTracker != null) {
try {
latencyTracker.markIn();
processAndClear(streamEventChunk);
} finally {
latencyTracker.markOut();
}
} else {
processAndClear(streamEventChunk);
}
} finally {
if (lockWrapper != null) {
lockWrapper.unlock();
}
}
}
use of io.siddhi.core.util.statistics.LatencyTracker in project siddhi by wso2.
the class MultiProcessStreamReceiver method process.
private void process(int eventSequence, StreamEvent newEvent) {
if (lockWrapper != null) {
lockWrapper.lock();
}
try {
LatencyTracker latencyTracker = siddhiQueryContext.getLatencyTracker();
if (latencyTracker != null) {
try {
latencyTracker.markIn();
processAndClear(eventSequence, newEvent);
} finally {
latencyTracker.markOut();
}
} else {
processAndClear(eventSequence, newEvent);
}
} finally {
if (lockWrapper != null) {
lockWrapper.unlock();
}
}
}
use of io.siddhi.core.util.statistics.LatencyTracker in project siddhi by wso2.
the class AggregationParser method parse.
public static AggregationRuntime parse(AggregationDefinition aggregationDefinition, SiddhiAppContext siddhiAppContext, Map<String, AbstractDefinition> streamDefinitionMap, Map<String, AbstractDefinition> tableDefinitionMap, Map<String, AbstractDefinition> windowDefinitionMap, Map<String, AbstractDefinition> aggregationDefinitionMap, Map<String, Table> tableMap, Map<String, Window> windowMap, Map<String, AggregationRuntime> aggregationMap, SiddhiAppRuntimeBuilder siddhiAppRuntimeBuilder) {
// set timeZone for aggregation
String timeZone = getTimeZone(siddhiAppContext);
boolean isDebugEnabled = log.isDebugEnabled();
boolean isPersistedAggregation = false;
boolean isReadOnly = false;
if (!validateTimeZone(timeZone)) {
throw new SiddhiAppCreationException("Given timeZone '" + timeZone + "' for aggregations is invalid. Please provide a valid time zone.");
}
// get aggregation name
String aggregatorName = aggregationDefinition.getId();
if (isDebugEnabled) {
log.debug("Incremental aggregation initialization process started for aggregation " + aggregatorName);
}
Annotation aggregationProperties = AnnotationHelper.getAnnotation(ANNOTATION_PERSISTED_AGGREGATION, aggregationDefinition.getAnnotations());
if (aggregationProperties != null) {
String persistedAggregationMode = aggregationProperties.getElement(ANNOTATION_ELEMENT_ENABLE);
isPersistedAggregation = persistedAggregationMode == null || Boolean.parseBoolean(persistedAggregationMode);
String readOnlyMode = aggregationProperties.getElement(ANNOTATION_ELEMENT_IS_READ_ONLY);
isReadOnly = Boolean.parseBoolean(readOnlyMode);
}
if (isPersistedAggregation) {
aggregationDefinition.getSelector().getSelectionList().stream().forEach(outputAttribute -> {
if (outputAttribute.getExpression() instanceof AttributeFunction && ((AttributeFunction) outputAttribute.getExpression()).getName().equals("distinctCount")) {
throw new SiddhiAppCreationException("Aggregation function 'distinctCount' does not supported " + "with persisted aggregation type please use default incremental aggregation");
}
});
}
if (isDebugEnabled) {
log.debug("Aggregation mode is defined as " + (isPersistedAggregation ? "persisted" : "inMemory") + " for aggregation " + aggregatorName);
}
if (aggregationDefinition.getTimePeriod() == null) {
throw new SiddhiAppCreationException("Aggregation Definition '" + aggregationDefinition.getId() + "'s timePeriod is null. " + "Hence, can't create the siddhi app '" + siddhiAppContext.getName() + "'", aggregationDefinition.getQueryContextStartIndex(), aggregationDefinition.getQueryContextEndIndex());
}
if (aggregationDefinition.getSelector() == null) {
throw new SiddhiAppCreationException("Aggregation Definition '" + aggregationDefinition.getId() + "'s selection is not defined. " + "Hence, can't create the siddhi app '" + siddhiAppContext.getName() + "'", aggregationDefinition.getQueryContextStartIndex(), aggregationDefinition.getQueryContextEndIndex());
}
if (streamDefinitionMap.get(aggregationDefinition.getBasicSingleInputStream().getStreamId()) == null) {
throw new SiddhiAppCreationException("Stream " + aggregationDefinition.getBasicSingleInputStream().getStreamId() + " has not been defined");
}
// Check if the user defined primary keys exists for @store annotation on aggregation if yes, an error is
// is thrown
Element userDefinedPrimaryKey = AnnotationHelper.getAnnotationElement(SiddhiConstants.ANNOTATION_PRIMARY_KEY, null, aggregationDefinition.getAnnotations());
if (userDefinedPrimaryKey != null) {
throw new SiddhiAppCreationException("Aggregation Tables have predefined primary key, but found '" + userDefinedPrimaryKey.getValue() + "' primary key defined though annotation.");
}
try {
List<VariableExpressionExecutor> incomingVariableExpressionExecutors = new ArrayList<>();
SiddhiQueryContext siddhiQueryContext = new SiddhiQueryContext(siddhiAppContext, aggregatorName);
StreamRuntime streamRuntime = InputStreamParser.parse(aggregationDefinition.getBasicSingleInputStream(), null, streamDefinitionMap, tableDefinitionMap, windowDefinitionMap, aggregationDefinitionMap, tableMap, windowMap, aggregationMap, incomingVariableExpressionExecutors, false, siddhiQueryContext);
// Get original meta for later use.
MetaStreamEvent incomingMetaStreamEvent = (MetaStreamEvent) streamRuntime.getMetaComplexEvent();
// Create new meta stream event.
// This must hold the timestamp, group by attributes (if given) and the incremental attributes, in
// onAfterWindowData array
// Example format: AGG_TIMESTAMP, groupByAttribute1, groupByAttribute2, AGG_incAttribute1, AGG_incAttribute2
// AGG_incAttribute1, AGG_incAttribute2 would have the same attribute names as in
// finalListOfIncrementalAttributes
// To enter data as onAfterWindowData
incomingMetaStreamEvent.initializeOnAfterWindowData();
// List of all aggregationDurations
List<TimePeriod.Duration> aggregationDurations = getSortedPeriods(aggregationDefinition.getTimePeriod(), isPersistedAggregation);
// Incoming executors will be executors for timestamp, externalTimestamp(if used),
List<ExpressionExecutor> incomingExpressionExecutors = new ArrayList<>();
List<IncrementalAttributeAggregator> incrementalAttributeAggregators = new ArrayList<>();
// group by attributes (if given) and the incremental attributes expression executors
List<Variable> groupByVariableList = aggregationDefinition.getSelector().getGroupByList();
// Expressions to get final aggregate outputs. e.g for avg the expression is Divide expression with
// AGG_SUM/ AGG_COUNT
List<Expression> outputExpressions = new ArrayList<>();
boolean isProcessingOnExternalTime = aggregationDefinition.getAggregateAttribute() != null;
boolean isGroupBy = aggregationDefinition.getSelector().getGroupByList().size() != 0;
final boolean isDistributed;
ConfigManager configManager = siddhiAppContext.getSiddhiContext().getConfigManager();
final String shardId = configManager.extractProperty("shardId");
boolean enablePartitioning = false;
// check if the setup is Active Active(distributed deployment) by checking availability of partitionById
// config
Annotation partitionById = AnnotationHelper.getAnnotation(ANNOTATION_PARTITION_BY_ID, aggregationDefinition.getAnnotations());
if (partitionById != null) {
String enableElement = partitionById.getElement(ANNOTATION_ELEMENT_ENABLE);
enablePartitioning = enableElement == null || Boolean.parseBoolean(enableElement);
}
boolean shouldPartitionById = Boolean.parseBoolean(configManager.extractProperty("partitionById"));
if (enablePartitioning || shouldPartitionById) {
if (shardId == null) {
throw new SiddhiAppCreationException("Configuration 'shardId' not provided for @partitionById " + "annotation");
}
isDistributed = true;
} else {
isDistributed = false;
}
if (isDebugEnabled) {
log.debug("Distributed aggregation processing is " + (isDistributed ? "enabled" : "disabled") + " in " + aggregatorName + " aggregation ");
}
populateIncomingAggregatorsAndExecutors(aggregationDefinition, siddhiQueryContext, tableMap, incomingVariableExpressionExecutors, incomingMetaStreamEvent, incomingExpressionExecutors, incrementalAttributeAggregators, groupByVariableList, outputExpressions, isProcessingOnExternalTime, isDistributed, shardId);
// check if the populateIncomingAggregatorsAndExecutors process has been completed successfully
boolean isLatestEventColAdded = incomingMetaStreamEvent.getOutputData().get(incomingMetaStreamEvent.getOutputData().size() - 1).getName().equals(AGG_LAST_TIMESTAMP_COL);
int baseAggregatorBeginIndex = incomingMetaStreamEvent.getOutputData().size();
List<Expression> finalBaseExpressions = new ArrayList<>();
boolean isOptimisedLookup = populateFinalBaseAggregators(tableMap, incomingVariableExpressionExecutors, incomingMetaStreamEvent, incomingExpressionExecutors, incrementalAttributeAggregators, siddhiQueryContext, finalBaseExpressions);
if (isDebugEnabled) {
log.debug("Optimised lookup mode is " + (isOptimisedLookup ? "enabled" : "disabled") + " for " + "aggregation " + aggregatorName);
}
// Creating an intermediate stream with aggregated stream and above extracted output variables
StreamDefinition incomingOutputStreamDefinition = StreamDefinition.id(aggregatorName + "_intermediate");
incomingOutputStreamDefinition.setQueryContextStartIndex(aggregationDefinition.getQueryContextStartIndex());
incomingOutputStreamDefinition.setQueryContextEndIndex(aggregationDefinition.getQueryContextEndIndex());
MetaStreamEvent processedMetaStreamEvent = new MetaStreamEvent();
for (Attribute attribute : incomingMetaStreamEvent.getOutputData()) {
incomingOutputStreamDefinition.attribute(attribute.getName(), attribute.getType());
processedMetaStreamEvent.addOutputData(attribute);
}
incomingMetaStreamEvent.setOutputDefinition(incomingOutputStreamDefinition);
processedMetaStreamEvent.addInputDefinition(incomingOutputStreamDefinition);
processedMetaStreamEvent.setOutputDefinition(incomingOutputStreamDefinition);
// Executors of processing meta
List<VariableExpressionExecutor> processVariableExpressionExecutors = new ArrayList<>();
Map<TimePeriod.Duration, List<ExpressionExecutor>> processExpressionExecutorsMap = new HashMap<>();
Map<TimePeriod.Duration, List<ExpressionExecutor>> processExpressionExecutorsMapForFind = new HashMap<>();
aggregationDurations.forEach(incrementalDuration -> {
processExpressionExecutorsMap.put(incrementalDuration, constructProcessExpressionExecutors(siddhiQueryContext, tableMap, baseAggregatorBeginIndex, finalBaseExpressions, incomingOutputStreamDefinition, processedMetaStreamEvent, processVariableExpressionExecutors, isProcessingOnExternalTime, incrementalDuration, isDistributed, shardId, isLatestEventColAdded));
processExpressionExecutorsMapForFind.put(incrementalDuration, constructProcessExpressionExecutors(siddhiQueryContext, tableMap, baseAggregatorBeginIndex, finalBaseExpressions, incomingOutputStreamDefinition, processedMetaStreamEvent, processVariableExpressionExecutors, isProcessingOnExternalTime, incrementalDuration, isDistributed, shardId, isLatestEventColAdded));
});
ExpressionExecutor shouldUpdateTimestamp = null;
if (isLatestEventColAdded) {
Expression shouldUpdateTimestampExp = new Variable(AGG_LAST_TIMESTAMP_COL);
shouldUpdateTimestamp = ExpressionParser.parseExpression(shouldUpdateTimestampExp, processedMetaStreamEvent, 0, tableMap, processVariableExpressionExecutors, false, 0, ProcessingMode.BATCH, false, siddhiQueryContext);
}
List<ExpressionExecutor> outputExpressionExecutors = outputExpressions.stream().map(expression -> ExpressionParser.parseExpression(expression, processedMetaStreamEvent, 0, tableMap, processVariableExpressionExecutors, isGroupBy, 0, ProcessingMode.BATCH, false, siddhiQueryContext)).collect(Collectors.toList());
// Create group by key generator
Map<TimePeriod.Duration, GroupByKeyGenerator> groupByKeyGeneratorMap = new HashMap<>();
aggregationDurations.forEach(incrementalDuration -> {
GroupByKeyGenerator groupByKeyGenerator = null;
if (isProcessingOnExternalTime || isGroupBy) {
List<Expression> groupByExpressionList = new ArrayList<>();
if (isProcessingOnExternalTime) {
Expression externalTimestampExpression = AttributeFunction.function("incrementalAggregator", "getAggregationStartTime", new Variable(AGG_EXTERNAL_TIMESTAMP_COL), new StringConstant(incrementalDuration.name()));
groupByExpressionList.add(externalTimestampExpression);
}
groupByExpressionList.addAll(groupByVariableList.stream().map(groupByVariable -> (Expression) groupByVariable).collect(Collectors.toList()));
groupByKeyGenerator = new GroupByKeyGenerator(groupByExpressionList, processedMetaStreamEvent, SiddhiConstants.UNKNOWN_STATE, tableMap, processVariableExpressionExecutors, siddhiQueryContext);
}
groupByKeyGeneratorMap.put(incrementalDuration, groupByKeyGenerator);
});
// GroupBy for reading
Map<TimePeriod.Duration, GroupByKeyGenerator> groupByKeyGeneratorMapForReading = new HashMap<>();
if (isDistributed && !isProcessingOnExternalTime) {
aggregationDurations.forEach(incrementalDuration -> {
List<Expression> groupByExpressionList = new ArrayList<>();
Expression timestampExpression = AttributeFunction.function("incrementalAggregator", "getAggregationStartTime", new Variable(AGG_START_TIMESTAMP_COL), new StringConstant(incrementalDuration.name()));
groupByExpressionList.add(timestampExpression);
if (isGroupBy) {
groupByExpressionList.addAll(groupByVariableList.stream().map(groupByVariable -> (Expression) groupByVariable).collect(Collectors.toList()));
}
GroupByKeyGenerator groupByKeyGenerator = new GroupByKeyGenerator(groupByExpressionList, processedMetaStreamEvent, SiddhiConstants.UNKNOWN_STATE, tableMap, processVariableExpressionExecutors, siddhiQueryContext);
groupByKeyGeneratorMapForReading.put(incrementalDuration, groupByKeyGenerator);
});
} else {
groupByKeyGeneratorMapForReading.putAll(groupByKeyGeneratorMap);
}
// Create new scheduler
EntryValveExecutor entryValveExecutor = new EntryValveExecutor(siddhiAppContext);
LockWrapper lockWrapper = new LockWrapper(aggregatorName);
lockWrapper.setLock(new ReentrantLock());
Scheduler scheduler = SchedulerParser.parse(entryValveExecutor, siddhiQueryContext);
scheduler.init(lockWrapper, aggregatorName);
scheduler.setStreamEventFactory(new StreamEventFactory(processedMetaStreamEvent));
QueryParserHelper.reduceMetaComplexEvent(incomingMetaStreamEvent);
QueryParserHelper.reduceMetaComplexEvent(processedMetaStreamEvent);
QueryParserHelper.updateVariablePosition(incomingMetaStreamEvent, incomingVariableExpressionExecutors);
QueryParserHelper.updateVariablePosition(processedMetaStreamEvent, processVariableExpressionExecutors);
Map<TimePeriod.Duration, Table> aggregationTables = initDefaultTables(aggregatorName, aggregationDurations, processedMetaStreamEvent.getOutputStreamDefinition(), siddhiAppRuntimeBuilder, aggregationDefinition.getAnnotations(), groupByVariableList, isProcessingOnExternalTime, isDistributed);
Map<TimePeriod.Duration, Executor> incrementalExecutorMap = buildIncrementalExecutors(processedMetaStreamEvent, processExpressionExecutorsMap, groupByKeyGeneratorMap, aggregationDurations, aggregationTables, siddhiQueryContext, aggregatorName, shouldUpdateTimestamp, timeZone, isPersistedAggregation, incomingOutputStreamDefinition, isDistributed, shardId, isProcessingOnExternalTime, aggregationDefinition, configManager, groupByVariableList, isReadOnly);
isOptimisedLookup = isOptimisedLookup && aggregationTables.get(aggregationDurations.get(0)) instanceof QueryableProcessor;
List<String> groupByVariablesList = groupByVariableList.stream().map(Variable::getAttributeName).collect(Collectors.toList());
List<OutputAttribute> defaultSelectorList = new ArrayList<>();
if (isOptimisedLookup) {
defaultSelectorList = incomingOutputStreamDefinition.getAttributeList().stream().map((attribute) -> new OutputAttribute(new Variable(attribute.getName()))).collect(Collectors.toList());
}
IncrementalDataPurger incrementalDataPurger = new IncrementalDataPurger();
incrementalDataPurger.init(aggregationDefinition, new StreamEventFactory(processedMetaStreamEvent), aggregationTables, isProcessingOnExternalTime, siddhiQueryContext, aggregationDurations, timeZone, windowMap, aggregationMap);
// Recreate in-memory data from tables
IncrementalExecutorsInitialiser incrementalExecutorsInitialiser = new IncrementalExecutorsInitialiser(aggregationDurations, aggregationTables, incrementalExecutorMap, isDistributed, shardId, siddhiAppContext, processedMetaStreamEvent, tableMap, windowMap, aggregationMap, timeZone, isReadOnly, isPersistedAggregation);
IncrementalExecutor rootIncrementalExecutor = (IncrementalExecutor) incrementalExecutorMap.get(aggregationDurations.get(0));
rootIncrementalExecutor.setScheduler(scheduler);
// Connect entry valve to root incremental executor
entryValveExecutor.setNextExecutor(rootIncrementalExecutor);
QueryParserHelper.initStreamRuntime(streamRuntime, incomingMetaStreamEvent, lockWrapper, aggregatorName);
LatencyTracker latencyTrackerFind = null;
LatencyTracker latencyTrackerInsert = null;
ThroughputTracker throughputTrackerFind = null;
ThroughputTracker throughputTrackerInsert = null;
if (siddhiAppContext.getStatisticsManager() != null) {
latencyTrackerFind = QueryParserHelper.createLatencyTracker(siddhiAppContext, aggregationDefinition.getId(), METRIC_INFIX_AGGREGATIONS, METRIC_TYPE_FIND);
latencyTrackerInsert = QueryParserHelper.createLatencyTracker(siddhiAppContext, aggregationDefinition.getId(), METRIC_INFIX_AGGREGATIONS, METRIC_TYPE_INSERT);
throughputTrackerFind = QueryParserHelper.createThroughputTracker(siddhiAppContext, aggregationDefinition.getId(), METRIC_INFIX_AGGREGATIONS, METRIC_TYPE_FIND);
throughputTrackerInsert = QueryParserHelper.createThroughputTracker(siddhiAppContext, aggregationDefinition.getId(), METRIC_INFIX_AGGREGATIONS, METRIC_TYPE_INSERT);
}
AggregationRuntime aggregationRuntime = new AggregationRuntime(aggregationDefinition, isProcessingOnExternalTime, isDistributed, aggregationDurations, incrementalExecutorMap, aggregationTables, outputExpressionExecutors, processExpressionExecutorsMapForFind, shouldUpdateTimestamp, groupByKeyGeneratorMapForReading, isOptimisedLookup, defaultSelectorList, groupByVariablesList, isLatestEventColAdded, baseAggregatorBeginIndex, finalBaseExpressions, incrementalDataPurger, incrementalExecutorsInitialiser, ((SingleStreamRuntime) streamRuntime), processedMetaStreamEvent, latencyTrackerFind, throughputTrackerFind, timeZone);
streamRuntime.setCommonProcessor(new IncrementalAggregationProcessor(aggregationRuntime, incomingExpressionExecutors, processedMetaStreamEvent, latencyTrackerInsert, throughputTrackerInsert, siddhiAppContext));
return aggregationRuntime;
} catch (Throwable t) {
ExceptionUtil.populateQueryContext(t, aggregationDefinition, siddhiAppContext);
throw t;
}
}
use of io.siddhi.core.util.statistics.LatencyTracker in project siddhi by wso2.
the class QueryParserHelper method createLatencyTracker.
public static LatencyTracker createLatencyTracker(SiddhiAppContext siddhiAppContext, String name, String type, String function) {
LatencyTracker latencyTracker = null;
if (siddhiAppContext.getStatisticsManager() != null) {
String metricName = siddhiAppContext.getSiddhiContext().getStatisticsConfiguration().getMetricPrefix() + SiddhiConstants.METRIC_DELIMITER + SiddhiConstants.METRIC_INFIX_SIDDHI_APPS + SiddhiConstants.METRIC_DELIMITER + siddhiAppContext.getName() + SiddhiConstants.METRIC_DELIMITER + SiddhiConstants.METRIC_INFIX_SIDDHI + SiddhiConstants.METRIC_DELIMITER + type + SiddhiConstants.METRIC_DELIMITER + name;
if (function != null) {
metricName += SiddhiConstants.METRIC_DELIMITER + function;
}
metricName += SiddhiConstants.METRIC_DELIMITER + "latency";
boolean matchExist = false;
for (String regex : siddhiAppContext.getIncludedMetrics()) {
if (metricName.matches(regex)) {
matchExist = true;
break;
}
}
if (matchExist) {
latencyTracker = siddhiAppContext.getSiddhiContext().getStatisticsConfiguration().getFactory().createLatencyTracker(metricName, siddhiAppContext.getStatisticsManager());
}
}
return latencyTracker;
}
Aggregations