use of org.wso2.siddhi.core.config.SiddhiAppContext in project siddhi by wso2.
the class SiddhiAppRuntime method query.
private Event[] query(StoreQuery storeQuery, String storeQueryString) {
try {
if (siddhiAppContext.isStatsEnabled() && storeQueryLatencyTracker != null) {
storeQueryLatencyTracker.markIn();
}
StoreQueryRuntime storeQueryRuntime = storeQueryRuntimeMap.get(storeQuery);
if (storeQueryRuntime == null) {
storeQueryRuntime = StoreQueryParser.parse(storeQuery, siddhiAppContext, tableMap, windowMap, aggregationMap);
storeQueryRuntimeMap.put(storeQuery, storeQueryRuntime);
} else {
storeQueryRuntime.reset();
}
return storeQueryRuntime.execute();
} catch (RuntimeException e) {
if (e instanceof SiddhiAppContextException) {
throw new StoreQueryCreationException(((SiddhiAppContextException) e).getMessageWithOutContext(), e, ((SiddhiAppContextException) e).getQueryContextStartIndex(), ((SiddhiAppContextException) e).getQueryContextEndIndex(), null, storeQueryString);
}
throw new StoreQueryCreationException(e.getMessage(), e);
} finally {
if (siddhiAppContext.isStatsEnabled() && storeQueryLatencyTracker != null) {
storeQueryLatencyTracker.markOut();
}
}
}
use of org.wso2.siddhi.core.config.SiddhiAppContext in project siddhi by wso2.
the class SiddhiAppRuntime method addCallback.
public void addCallback(String streamId, StreamCallback streamCallback) {
streamCallback.setStreamId(streamId);
StreamJunction streamJunction = streamJunctionMap.get(streamId);
if (streamJunction == null) {
throw new DefinitionNotExistException("No stream found with name: " + streamId);
}
streamCallback.setStreamDefinition(streamDefinitionMap.get(streamId));
streamCallback.setContext(siddhiAppContext);
streamJunction.subscribe(streamCallback);
}
use of org.wso2.siddhi.core.config.SiddhiAppContext in project siddhi by wso2.
the class SiddhiAppRuntime method shutdown.
public synchronized void shutdown() {
SourceHandlerManager sourceHandlerManager = siddhiAppContext.getSiddhiContext().getSourceHandlerManager();
for (List<Source> sources : sourceMap.values()) {
for (Source source : sources) {
try {
if (sourceHandlerManager != null) {
sourceHandlerManager.unregisterSourceHandler(source.getMapper().getHandler().getElementId());
}
source.shutdown();
} catch (Throwable t) {
log.error(StringUtil.removeCRLFCharacters(ExceptionUtil.getMessageWithContext(t, siddhiAppContext)) + " Error in shutting down source '" + StringUtil.removeCRLFCharacters(source.getType()) + "' at '" + StringUtil.removeCRLFCharacters(source.getStreamDefinition().getId()) + "' on Siddhi App '" + siddhiAppContext.getName() + "'.", t);
}
}
}
for (Table table : tableMap.values()) {
try {
table.shutdown();
} catch (Throwable t) {
log.error(StringUtil.removeCRLFCharacters(ExceptionUtil.getMessageWithContext(t, siddhiAppContext)) + " Error in shutting down table '" + StringUtil.removeCRLFCharacters(table.getTableDefinition().getId()) + "' on Siddhi App '" + StringUtil.removeCRLFCharacters(siddhiAppContext.getName()) + "'.", t);
}
}
SinkHandlerManager sinkHandlerManager = siddhiAppContext.getSiddhiContext().getSinkHandlerManager();
for (List<Sink> sinks : sinkMap.values()) {
for (Sink sink : sinks) {
try {
if (sinkHandlerManager != null) {
sinkHandlerManager.unregisterSinkHandler(sink.getHandler().getElementId());
}
sink.shutdown();
} catch (Throwable t) {
log.error(StringUtil.removeCRLFCharacters(ExceptionUtil.getMessageWithContext(t, siddhiAppContext)) + " Error in shutting down sink '" + StringUtil.removeCRLFCharacters(sink.getType()) + "' at '" + StringUtil.removeCRLFCharacters(sink.getStreamDefinition().getId()) + "' on Siddhi App '" + StringUtil.removeCRLFCharacters(siddhiAppContext.getName()) + "'.", t);
}
}
}
for (Table table : tableMap.values()) {
RecordTableHandlerManager recordTableHandlerManager = siddhiAppContext.getSiddhiContext().getRecordTableHandlerManager();
if (recordTableHandlerManager != null) {
String elementId = null;
RecordTableHandler recordTableHandler = table.getHandler();
if (recordTableHandler != null) {
elementId = recordTableHandler.getElementId();
}
if (elementId != null) {
recordTableHandlerManager.unregisterRecordTableHandler(elementId);
}
}
table.shutdown();
}
for (EternalReferencedHolder eternalReferencedHolder : siddhiAppContext.getEternalReferencedHolders()) {
try {
eternalReferencedHolder.stop();
} catch (Throwable t) {
log.error(StringUtil.removeCRLFCharacters(ExceptionUtil.getMessageWithContext(t, siddhiAppContext)) + " Error while stopping EternalReferencedHolder '" + StringUtil.removeCRLFCharacters(eternalReferencedHolder.toString()) + "' down Siddhi app '" + StringUtil.removeCRLFCharacters(siddhiAppContext.getName()) + "'.", t);
}
}
inputManager.disconnect();
Thread thread = new Thread(new Runnable() {
@Override
public void run() {
try {
Thread.sleep(1000);
} catch (InterruptedException e) {
}
for (StreamJunction streamJunction : streamJunctionMap.values()) {
streamJunction.stopProcessing();
}
try {
Thread.sleep(1000);
} catch (InterruptedException e) {
}
siddhiAppContext.getScheduledExecutorService().shutdownNow();
siddhiAppContext.getExecutorService().shutdownNow();
}
}, "Siddhi-SiddhiApp-" + siddhiAppContext.getName() + "-Shutdown-Cleaner");
thread.start();
if (siddhiAppRuntimeMap != null) {
siddhiAppRuntimeMap.remove(siddhiAppContext.getName());
}
if (siddhiAppContext.getStatisticsManager() != null) {
if (siddhiAppContext.isStatsEnabled()) {
siddhiAppContext.getStatisticsManager().stopReporting();
}
siddhiAppContext.getStatisticsManager().cleanup();
}
running = false;
}
use of org.wso2.siddhi.core.config.SiddhiAppContext in project siddhi by wso2.
the class AggregationRuntime method compileExpression.
public CompiledCondition compileExpression(Expression expression, Within within, Expression per, MatchingMetaInfoHolder matchingMetaInfoHolder, List<VariableExpressionExecutor> variableExpressionExecutors, Map<String, Table> tableMap, String queryName, SiddhiAppContext siddhiAppContext) {
Map<TimePeriod.Duration, CompiledCondition> withinTableCompiledConditions = new HashMap<>();
CompiledCondition withinInMemoryCompileCondition;
CompiledCondition onCompiledCondition;
List<Attribute> additionalAttributes = new ArrayList<>();
// Define additional attribute list
additionalAttributes.add(new Attribute("_START", Attribute.Type.LONG));
additionalAttributes.add(new Attribute("_END", Attribute.Type.LONG));
// Get table definition. Table definitions for all the tables used to persist aggregates are similar.
// Therefore it's enough to get the definition from one table.
AbstractDefinition tableDefinition = ((Table) aggregationTables.values().toArray()[0]).getTableDefinition();
// Alter existing meta stream event or create new one if a meta stream doesn't exist
// After calling this method the original MatchingMetaInfoHolder's meta stream event would be altered
MetaStreamEvent newMetaStreamEventWithStartEnd = createNewMetaStreamEventWithStartEnd(matchingMetaInfoHolder, additionalAttributes);
MatchingMetaInfoHolder alteredMatchingMetaInfoHolder = null;
// Alter meta info holder to contain stream event and aggregate both when it's a store query
if (matchingMetaInfoHolder.getMetaStateEvent().getMetaStreamEvents().length == 1) {
matchingMetaInfoHolder = alterMetaInfoHolderForStoreQuery(newMetaStreamEventWithStartEnd, matchingMetaInfoHolder);
alteredMatchingMetaInfoHolder = matchingMetaInfoHolder;
}
// Create new MatchingMetaInfoHolder containing newMetaStreamEventWithStartEnd and table meta event
MatchingMetaInfoHolder streamTableMetaInfoHolderWithStartEnd = createNewStreamTableMetaInfoHolder(newMetaStreamEventWithStartEnd, tableDefinition);
// Create per expression executor
ExpressionExecutor perExpressionExecutor = ExpressionParser.parseExpression(per, matchingMetaInfoHolder.getMetaStateEvent(), matchingMetaInfoHolder.getCurrentState(), tableMap, variableExpressionExecutors, siddhiAppContext, false, 0, queryName);
if (perExpressionExecutor.getReturnType() != Attribute.Type.STRING) {
throw new SiddhiAppCreationException("Query " + queryName + "'s per value expected a string but found " + perExpressionExecutor.getReturnType(), per.getQueryContextStartIndex(), per.getQueryContextEndIndex());
}
// Create within expression
Expression withinExpression;
Expression start = Expression.variable(additionalAttributes.get(0).getName());
Expression end = Expression.variable(additionalAttributes.get(1).getName());
Expression compareWithStartTime = Compare.compare(start, Compare.Operator.LESS_THAN_EQUAL, Expression.variable("AGG_TIMESTAMP"));
Expression compareWithEndTime = Compare.compare(Expression.variable("AGG_TIMESTAMP"), Compare.Operator.LESS_THAN, end);
withinExpression = Expression.and(compareWithStartTime, compareWithEndTime);
// Create start and end time expression
Expression startEndTimeExpression;
if (within.getTimeRange().size() == 1) {
startEndTimeExpression = new AttributeFunction("incrementalAggregator", "startTimeEndTime", within.getTimeRange().get(0));
} else {
// within.getTimeRange().size() == 2
startEndTimeExpression = new AttributeFunction("incrementalAggregator", "startTimeEndTime", within.getTimeRange().get(0), within.getTimeRange().get(1));
}
ExpressionExecutor startTimeEndTimeExpressionExecutor = ExpressionParser.parseExpression(startEndTimeExpression, matchingMetaInfoHolder.getMetaStateEvent(), matchingMetaInfoHolder.getCurrentState(), tableMap, variableExpressionExecutors, siddhiAppContext, false, 0, queryName);
// These compile conditions are used to check whether the aggregates in tables are within the given duration.
for (Map.Entry<TimePeriod.Duration, Table> entry : aggregationTables.entrySet()) {
CompiledCondition withinTableCompileCondition = entry.getValue().compileCondition(withinExpression, streamTableMetaInfoHolderWithStartEnd, siddhiAppContext, variableExpressionExecutors, tableMap, queryName);
withinTableCompiledConditions.put(entry.getKey(), withinTableCompileCondition);
}
// Create compile condition for in-memory data.
// This compile condition is used to check whether the running aggregates (in-memory data)
// are within given duration
withinInMemoryCompileCondition = OperatorParser.constructOperator(new ComplexEventChunk<>(true), withinExpression, streamTableMetaInfoHolderWithStartEnd, siddhiAppContext, variableExpressionExecutors, tableMap, queryName);
// On compile condition.
// After finding all the aggregates belonging to within duration, the final on condition (given as
// "on stream1.name == aggregator.nickName ..." in the join query) must be executed on that data.
// This condition is used for that purpose.
onCompiledCondition = OperatorParser.constructOperator(new ComplexEventChunk<>(true), expression, matchingMetaInfoHolder, siddhiAppContext, variableExpressionExecutors, tableMap, queryName);
return new IncrementalAggregateCompileCondition(withinTableCompiledConditions, withinInMemoryCompileCondition, onCompiledCondition, tableMetaStreamEvent, aggregateMetaSteamEvent, additionalAttributes, alteredMatchingMetaInfoHolder, perExpressionExecutor, startTimeEndTimeExpressionExecutor);
}
use of org.wso2.siddhi.core.config.SiddhiAppContext in project siddhi by wso2.
the class RecreateInMemoryData method recreateInMemoryData.
public void recreateInMemoryData() {
if (incrementalExecutorMap.get(incrementalDurations.get(0)).getNextEmitTime() != -1) {
// created. Hence this method does not have to be executed.
return;
}
Event[] events;
Long latestEventTimestamp = null;
// Get all events from table corresponding to max duration
Table tableForMaxDuration = aggregationTables.get(incrementalDurations.get(incrementalDurations.size() - 1));
StoreQuery storeQuery = StoreQuery.query().from(InputStore.store(tableForMaxDuration.getTableDefinition().getId())).select(Selector.selector().orderBy(Expression.variable("AGG_TIMESTAMP")));
StoreQueryRuntime storeQueryRuntime = StoreQueryParser.parse(storeQuery, siddhiAppContext, tableMap, windowMap, aggregationMap);
// Get latest event timestamp in tableForMaxDuration
events = storeQueryRuntime.execute();
if (events != null) {
latestEventTimestamp = (Long) events[events.length - 1].getData(0);
}
for (int i = incrementalDurations.size() - 1; i > 0; i--) {
TimePeriod.Duration recreateForDuration = incrementalDurations.get(i);
IncrementalExecutor incrementalExecutor = incrementalExecutorMap.get(recreateForDuration);
// Get the table previous to the duration for which we need to recreate (e.g. if we want to recreate
// for minute duration, take the second table [provided that aggregation is done for seconds])
Table recreateFromTable = aggregationTables.get(incrementalDurations.get(i - 1));
storeQuery = StoreQuery.query().from(InputStore.store(recreateFromTable.getTableDefinition().getId())).select(Selector.selector().orderBy(Expression.variable("AGG_TIMESTAMP")));
storeQueryRuntime = StoreQueryParser.parse(storeQuery, siddhiAppContext, tableMap, windowMap, aggregationMap);
events = storeQueryRuntime.execute();
if (events != null) {
long referenceToNextLatestEvent = (Long) events[events.length - 1].getData(0);
String timeZoneOfNextLatestEvent = events[events.length - 1].getData(1).toString();
if (latestEventTimestamp != null) {
List<Event> eventsNewerThanLatestEventOfRecreateForDuration = new ArrayList<>();
for (Event event : events) {
// After getting the events from recreateFromTable, get the time bucket it belongs to,
// if each of these events were in the recreateForDuration. This helps to identify the events,
// which must be processed in-memory for recreateForDuration.
long timeBucketForNextDuration = IncrementalTimeConverterUtil.getStartTimeOfAggregates((Long) event.getData(0), recreateForDuration, event.getData(1).toString());
if (timeBucketForNextDuration > latestEventTimestamp) {
eventsNewerThanLatestEventOfRecreateForDuration.add(event);
}
}
events = eventsNewerThanLatestEventOfRecreateForDuration.toArray(new Event[eventsNewerThanLatestEventOfRecreateForDuration.size()]);
}
latestEventTimestamp = referenceToNextLatestEvent;
ComplexEventChunk<StreamEvent> complexEventChunk = new ComplexEventChunk<>(false);
for (Event event : events) {
StreamEvent streamEvent = streamEventPool.borrowEvent();
streamEvent.setOutputData(event.getData());
complexEventChunk.add(streamEvent);
}
incrementalExecutor.execute(complexEventChunk);
if (i == 1) {
TimePeriod.Duration rootDuration = incrementalDurations.get(0);
IncrementalExecutor rootIncrementalExecutor = incrementalExecutorMap.get(rootDuration);
long emitTimeOfLatestEventInTable = IncrementalTimeConverterUtil.getNextEmitTime(latestEventTimestamp, rootDuration, timeZoneOfNextLatestEvent);
rootIncrementalExecutor.setValuesForInMemoryRecreateFromTable(true, emitTimeOfLatestEventInTable);
}
}
}
}
Aggregations