use of io.siddhi.query.api.execution.query.OnDemandQuery in project siddhi by wso2.
the class AbstractQueryableRecordTable method initCache.
@Override
public void initCache(TableDefinition tableDefinition, SiddhiAppContext siddhiAppContext, StreamEventCloner storeEventCloner, ConfigReader configReader) {
String[] annotationNames = { ANNOTATION_STORE, ANNOTATION_CACHE };
Annotation cacheTableAnnotation = getAnnotation(annotationNames, tableDefinition.getAnnotations());
if (cacheTableAnnotation != null) {
cacheEnabled = true;
maxCacheSize = Integer.parseInt(cacheTableAnnotation.getElement(CACHE_TABLE_SIZE));
TableDefinition cacheTableDefinition = TableDefinition.id(tableDefinition.getId());
for (Attribute attribute : tableDefinition.getAttributeList()) {
cacheTableDefinition.attribute(attribute.getName(), attribute.getType());
}
for (Annotation annotation : tableDefinition.getAnnotations()) {
if (!annotation.getName().equalsIgnoreCase("Store")) {
cacheTableDefinition.annotation(annotation);
}
}
String cachePolicy = cacheTableAnnotation.getElement(ANNOTATION_CACHE_POLICY);
if (cachePolicy == null || cachePolicy.equalsIgnoreCase("FIFO")) {
cachePolicy = "FIFO";
cacheTable = new CacheTableFIFO();
} else if (cachePolicy.equalsIgnoreCase("LRU")) {
cacheTable = new CacheTableLRU();
} else if (cachePolicy.equalsIgnoreCase("LFU")) {
cacheTable = new CacheTableLFU();
} else {
throw new SiddhiAppCreationException(siddhiAppContext.getName() + " : Cache policy can only be one " + "of FIFO, LRU, and LFU but given as " + cachePolicy);
}
// check if cache expiry enabled and initialize relevant parameters
if (cacheTableAnnotation.getElement(ANNOTATION_CACHE_RETENTION_PERIOD) != null) {
cacheExpiryEnabled = true;
retentionPeriod = Expression.Time.timeToLong(cacheTableAnnotation.getElement(ANNOTATION_CACHE_RETENTION_PERIOD));
if (cacheTableAnnotation.getElement(ANNOTATION_CACHE_PURGE_INTERVAL) == null) {
purgeInterval = retentionPeriod;
} else {
purgeInterval = Expression.Time.timeToLong(cacheTableAnnotation.getElement(ANNOTATION_CACHE_PURGE_INTERVAL));
}
storeSizeCheckInterval = purgeInterval * 5;
} else {
storeSizeCheckInterval = 10000;
}
((CacheTable) cacheTable).initCacheTable(cacheTableDefinition, configReader, siddhiAppContext, recordTableHandler, cacheExpiryEnabled, maxCacheSize, cachePolicy);
// creating objects needed to load cache
SiddhiQueryContext siddhiQueryContext = new SiddhiQueryContext(siddhiAppContext, CACHE_QUERY_NAME + tableDefinition.getId());
MatchingMetaInfoHolder matchingMetaInfoHolder = generateMatchingMetaInfoHolderForCacheTable(tableDefinition);
OnDemandQuery onDemandQuery = OnDemandQuery.query().from(InputStore.store(tableDefinition.getId())).select(Selector.selector().limit(Expression.value((maxCacheSize + 1))));
List<VariableExpressionExecutor> variableExpressionExecutors = new ArrayList<>();
compiledConditionForCaching = compileCondition(Expression.value(true), matchingMetaInfoHolder, variableExpressionExecutors, tableMap, siddhiQueryContext);
List<Attribute> expectedOutputAttributes = buildExpectedOutputAttributes(onDemandQuery, tableMap, SiddhiConstants.UNKNOWN_STATE, matchingMetaInfoHolder, siddhiQueryContext);
compiledSelectionForCaching = compileSelection(onDemandQuery.getSelector(), expectedOutputAttributes, matchingMetaInfoHolder, variableExpressionExecutors, tableMap, siddhiQueryContext);
outputAttributesForCaching = expectedOutputAttributes.toArray(new Attribute[0]);
QueryParserHelper.reduceMetaComplexEvent(matchingMetaInfoHolder.getMetaStateEvent());
QueryParserHelper.updateVariablePosition(matchingMetaInfoHolder.getMetaStateEvent(), variableExpressionExecutors);
compiledSelectionForSelectAll = generateCSForSelectAll();
}
}
use of io.siddhi.query.api.execution.query.OnDemandQuery in project siddhi by wso2.
the class IncrementalDataPurger method dataToDelete.
Event[] dataToDelete(long purgingTime, Table table) {
OnDemandQuery onDemandQuery = getOnDemandQuery(table, purgingTime, 0);
onDemandQuery.setType(OnDemandQuery.OnDemandQueryType.FIND);
OnDemandQueryRuntime onDemandQueryRuntime = OnDemandQueryParser.parse(onDemandQuery, null, siddhiQueryContext.getSiddhiAppContext(), tableMap, windowMap, aggregationMap);
return onDemandQueryRuntime.execute();
}
use of io.siddhi.query.api.execution.query.OnDemandQuery in project siddhi by wso2.
the class IncrementalExecutorsInitialiser method initialiseExecutors.
public synchronized void initialiseExecutors() {
if (this.isInitialised || isReadOnly) {
// Only cleared when executors change from reading to processing state in one node deployment
return;
}
Event[] events;
Long lastData = null;
// Get max(AGG_TIMESTAMP) from table corresponding to max duration
Table tableForMaxDuration = aggregationTables.get(incrementalDurations.get(incrementalDurations.size() - 1));
OnDemandQuery onDemandQuery = getOnDemandQuery(tableForMaxDuration, true, endOFLatestEventTimestamp);
onDemandQuery.setType(OnDemandQuery.OnDemandQueryType.FIND);
OnDemandQueryRuntime onDemandQueryRuntime = OnDemandQueryParser.parse(onDemandQuery, null, siddhiAppContext, tableMap, windowMap, aggregationMap);
// Get latest event timestamp in tableForMaxDuration and get the end time of the aggregation record
events = onDemandQueryRuntime.execute();
if (events != null) {
lastData = (Long) events[events.length - 1].getData(0);
endOFLatestEventTimestamp = IncrementalTimeConverterUtil.getNextEmitTime(lastData, incrementalDurations.get(incrementalDurations.size() - 1), timeZone);
}
if (isPersistedAggregation) {
for (int i = incrementalDurations.size() - 1; i > 0; i--) {
if (lastData != null && !IncrementalTimeConverterUtil.isAggregationDataComplete(lastData, incrementalDurations.get(i), timeZone)) {
recreateState(lastData, incrementalDurations.get(i), aggregationTables.get(incrementalDurations.get(i - 1)), i == 1);
} else if (lastData == null) {
recreateState(null, incrementalDurations.get(i), aggregationTables.get(incrementalDurations.get(i - 1)), i == 1);
}
if (i > 1) {
onDemandQuery = getOnDemandQuery(aggregationTables.get(incrementalDurations.get(i - 1)), true, endOFLatestEventTimestamp);
onDemandQuery.setType(OnDemandQuery.OnDemandQueryType.FIND);
onDemandQueryRuntime = OnDemandQueryParser.parse(onDemandQuery, null, siddhiAppContext, tableMap, windowMap, aggregationMap);
events = onDemandQueryRuntime.execute();
if (events != null) {
lastData = (Long) events[events.length - 1].getData(0);
} else {
lastData = null;
}
}
}
} else {
for (int i = incrementalDurations.size() - 1; i > 0; i--) {
TimePeriod.Duration recreateForDuration = incrementalDurations.get(i);
Executor incrementalExecutor = incrementalExecutorMap.get(recreateForDuration);
// Get the table previous to the duration for which we need to recreate (e.g. if we want to recreate
// for minute duration, take the second table [provided that aggregation is done for seconds])
// This lookup is filtered by endOFLatestEventTimestamp
Table recreateFromTable = aggregationTables.get(incrementalDurations.get(i - 1));
onDemandQuery = getOnDemandQuery(recreateFromTable, false, endOFLatestEventTimestamp);
onDemandQuery.setType(OnDemandQuery.OnDemandQueryType.FIND);
onDemandQueryRuntime = OnDemandQueryParser.parse(onDemandQuery, null, siddhiAppContext, tableMap, windowMap, aggregationMap);
events = onDemandQueryRuntime.execute();
if (events != null) {
long referenceToNextLatestEvent = (Long) events[events.length - 1].getData(0);
endOFLatestEventTimestamp = IncrementalTimeConverterUtil.getNextEmitTime(referenceToNextLatestEvent, incrementalDurations.get(i - 1), timeZone);
ComplexEventChunk<StreamEvent> complexEventChunk = new ComplexEventChunk<>();
for (Event event : events) {
StreamEvent streamEvent = streamEventFactory.newInstance();
streamEvent.setOutputData(event.getData());
complexEventChunk.add(streamEvent);
}
incrementalExecutor.execute(complexEventChunk);
if (i == 1) {
TimePeriod.Duration rootDuration = incrementalDurations.get(0);
Executor rootIncrementalExecutor = incrementalExecutorMap.get(rootDuration);
long emitTimeOfLatestEventInTable = IncrementalTimeConverterUtil.getNextEmitTime(referenceToNextLatestEvent, rootDuration, timeZone);
rootIncrementalExecutor.setEmitTime(emitTimeOfLatestEventInTable);
}
}
}
}
this.isInitialised = true;
}
use of io.siddhi.query.api.execution.query.OnDemandQuery in project siddhi by wso2.
the class SiddhiQLBaseVisitorImpl method visitStore_query.
@Override
public Object visitStore_query(SiddhiQLParser.Store_queryContext ctx) {
OutputStream outputStream;
OnDemandQuery onDemandQuery = OnDemandQuery.query();
if (ctx.FROM() != null) {
onDemandQuery.setType(OnDemandQuery.OnDemandQueryType.FIND);
onDemandQuery.from((InputStore) visit(ctx.store_input()));
if (ctx.query_section() != null) {
onDemandQuery = onDemandQuery.select((Selector) visit(ctx.query_section()));
}
} else if (ctx.query_section() != null) {
onDemandQuery.select((Selector) visit(ctx.query_section()));
if (ctx.UPDATE() != null && ctx.OR() != null) {
onDemandQuery.setType(OnDemandQuery.OnDemandQueryType.UPDATE_OR_INSERT);
Source source = (Source) visit(ctx.target());
if (source.isInnerStream || source.isFaultStream) {
throw newSiddhiParserException(ctx, "UPDATE OR INTO INSERT can be only used with Tables!");
}
if (ctx.set_clause() != null) {
outputStream = new UpdateOrInsertStream(source.streamId, (UpdateSet) visit(ctx.set_clause()), (Expression) visit(ctx.expression()));
populateQueryContext(outputStream, ctx);
} else {
outputStream = new UpdateOrInsertStream(source.streamId, (Expression) visit(ctx.expression()));
populateQueryContext(outputStream, ctx);
}
onDemandQuery.outStream(outputStream);
} else if (ctx.INSERT() != null) {
onDemandQuery.setType(OnDemandQuery.OnDemandQueryType.INSERT);
Source source = (Source) visit(ctx.target());
if (source.isInnerStream || source.isFaultStream) {
throw newSiddhiParserException(ctx, "INSERT can be only used with Tables!");
}
outputStream = new InsertIntoStream(source.streamId);
populateQueryContext(outputStream, ctx);
onDemandQuery.outStream(outputStream);
} else if (ctx.store_query_output() != null) {
outputStream = (OutputStream) visit(ctx.store_query_output());
if (outputStream instanceof DeleteStream) {
onDemandQuery.setType(OnDemandQuery.OnDemandQueryType.DELETE);
} else if (outputStream instanceof UpdateStream) {
onDemandQuery.setType(OnDemandQuery.OnDemandQueryType.UPDATE);
}
onDemandQuery.outStream(outputStream);
}
} else if (ctx.store_query_output() != null) {
outputStream = (OutputStream) visit(ctx.store_query_output());
if (outputStream instanceof DeleteStream) {
onDemandQuery.setType(OnDemandQuery.OnDemandQueryType.DELETE);
} else if (outputStream instanceof UpdateStream) {
onDemandQuery.setType(OnDemandQuery.OnDemandQueryType.UPDATE);
}
onDemandQuery.outStream(outputStream);
}
populateQueryContext(onDemandQuery, ctx);
return onDemandQuery;
}
use of io.siddhi.query.api.execution.query.OnDemandQuery in project siddhi by wso2.
the class SiddhiCompiler method parseOnDemandQuery.
public static OnDemandQuery parseOnDemandQuery(String onDemandQueryString) throws SiddhiParserException {
CharStream input = CharStreams.fromString(onDemandQueryString);
SiddhiQLLexer lexer = new SiddhiQLLexer(input);
lexer.removeErrorListeners();
lexer.addErrorListener(SiddhiErrorListener.INSTANCE);
CommonTokenStream tokens = new CommonTokenStream(lexer);
SiddhiQLParser parser = new SiddhiQLParser(tokens);
parser.removeErrorListeners();
parser.addErrorListener(SiddhiErrorListener.INSTANCE);
ParseTree tree = parser.store_query_final();
SiddhiQLVisitor eval = new SiddhiQLBaseVisitorImpl();
OnDemandQuery onDemandQuery = (OnDemandQuery) eval.visit(tree);
return onDemandQuery;
}
Aggregations