use of io.siddhi.core.query.OnDemandQueryRuntime in project siddhi by wso2.
the class IncrementalExecutorsInitialiser method recreateState.
private void recreateState(Long lastData, TimePeriod.Duration recreateForDuration, Table recreateFromTable, boolean isBeforeRoot) {
Executor incrementalExecutor = incrementalExecutorMap.get(recreateForDuration);
if (lastData != null) {
endOFLatestEventTimestamp = IncrementalTimeConverterUtil.getNextEmitTime(lastData, recreateForDuration, timeZone);
}
OnDemandQuery onDemandQuery = getOnDemandQuery(recreateFromTable, false, endOFLatestEventTimestamp);
onDemandQuery.setType(OnDemandQuery.OnDemandQueryType.FIND);
OnDemandQueryRuntime onDemandQueryRuntime = OnDemandQueryParser.parse(onDemandQuery, null, siddhiAppContext, tableMap, windowMap, aggregationMap);
Event[] events = onDemandQueryRuntime.execute();
if (events != null) {
long referenceToNextLatestEvent = (Long) events[events.length - 1].getData(0);
ComplexEventChunk<StreamEvent> complexEventChunk = new ComplexEventChunk<>();
for (Event event : events) {
StreamEvent streamEvent = streamEventFactory.newInstance();
streamEvent.setOutputData(event.getData());
complexEventChunk.add(streamEvent);
}
incrementalExecutor.execute(complexEventChunk);
if (isBeforeRoot) {
TimePeriod.Duration rootDuration = incrementalDurations.get(0);
Executor rootIncrementalExecutor = incrementalExecutorMap.get(rootDuration);
long emitTimeOfLatestEventInTable = IncrementalTimeConverterUtil.getNextEmitTime(referenceToNextLatestEvent, rootDuration, timeZone);
rootIncrementalExecutor.setEmitTime(emitTimeOfLatestEventInTable);
}
}
}
use of io.siddhi.core.query.OnDemandQueryRuntime in project siddhi by wso2.
the class IncrementalDataPurger method isSafeToPurgeTheDuration.
private Map<String, Boolean> isSafeToPurgeTheDuration(long purgeTime, Table parentTable, Table currentTable, TimePeriod.Duration duration, String timeZone) {
Event[] dataToDelete;
Event[] dataInParentTable = null;
Map<String, Boolean> purgingCheckState = new HashMap<>();
try {
dataToDelete = dataToDelete(purgeTime, currentTable);
if (dataToDelete != null && dataToDelete.length != 0) {
Map<String, Long> purgingValidationTimeDurations = getPurgingValidationTimeDurations(duration, (Long) dataToDelete[0].getData()[0], timeZone);
OnDemandQuery onDemandQuery = getOnDemandQuery(parentTable, purgingValidationTimeDurations.get(AGGREGATION_START_TIME), purgingValidationTimeDurations.get(AGGREGATION_NEXT_EMIT_TIME));
onDemandQuery.setType(OnDemandQuery.OnDemandQueryType.FIND);
OnDemandQueryRuntime onDemandQueryRuntime = OnDemandQueryParser.parse(onDemandQuery, null, siddhiQueryContext.getSiddhiAppContext(), tableMap, windowMap, aggregationMap);
dataInParentTable = onDemandQueryRuntime.execute();
}
purgingCheckState.put(IS_DATA_AVAILABLE_TO_PURGE, dataToDelete != null && dataToDelete.length > 0);
purgingCheckState.put(IS_PARENT_TABLE_HAS_AGGREGATED_DATA, dataInParentTable != null && dataInParentTable.length > 0);
} catch (Exception e) {
if (e.getMessage().contains("deadlocked")) {
errorMessage = "Deadlock observed while checking whether the data is safe to purge from aggregation " + "tables for the aggregation " + aggregationDefinition.getId() + ". If this occurred in an Active Active deployment, this error can be ignored if other node " + "doesn't have this error";
} else {
errorMessage = "Error occurred while checking whether the data is safe to purge from aggregation " + "tables for the aggregation " + aggregationDefinition.getId();
}
LOG.error(errorMessage, e);
purgingCheckState.put(IS_DATA_AVAILABLE_TO_PURGE, false);
purgingCheckState.put(IS_PARENT_TABLE_HAS_AGGREGATED_DATA, false);
errorMessage = "Error occurred while checking whether the data is safe to purge from aggregation tables" + " for the aggregation " + aggregationDefinition.getId();
purgingHalted = true;
}
return purgingCheckState;
}
use of io.siddhi.core.query.OnDemandQueryRuntime in project siddhi by siddhi-io.
the class IncrementalDataPurger method isSafeToPurgeTheDuration.
private Map<String, Boolean> isSafeToPurgeTheDuration(long purgeTime, Table parentTable, Table currentTable, TimePeriod.Duration duration, String timeZone) {
Event[] dataToDelete;
Event[] dataInParentTable = null;
Map<String, Boolean> purgingCheckState = new HashMap<>();
try {
dataToDelete = dataToDelete(purgeTime, currentTable);
if (dataToDelete != null && dataToDelete.length != 0) {
Map<String, Long> purgingValidationTimeDurations = getPurgingValidationTimeDurations(duration, (Long) dataToDelete[0].getData()[0], timeZone);
OnDemandQuery onDemandQuery = getOnDemandQuery(parentTable, purgingValidationTimeDurations.get(AGGREGATION_START_TIME), purgingValidationTimeDurations.get(AGGREGATION_NEXT_EMIT_TIME));
onDemandQuery.setType(OnDemandQuery.OnDemandQueryType.FIND);
OnDemandQueryRuntime onDemandQueryRuntime = OnDemandQueryParser.parse(onDemandQuery, null, siddhiQueryContext.getSiddhiAppContext(), tableMap, windowMap, aggregationMap);
dataInParentTable = onDemandQueryRuntime.execute();
}
purgingCheckState.put(IS_DATA_AVAILABLE_TO_PURGE, dataToDelete != null && dataToDelete.length > 0);
purgingCheckState.put(IS_PARENT_TABLE_HAS_AGGREGATED_DATA, dataInParentTable != null && dataInParentTable.length > 0);
} catch (Exception e) {
if (e.getMessage().contains("deadlocked")) {
errorMessage = "Deadlock observed while checking whether the data is safe to purge from aggregation " + "tables for the aggregation " + aggregationDefinition.getId() + ". If this occurred in an Active Active deployment, this error can be ignored if other node " + "doesn't have this error";
} else {
errorMessage = "Error occurred while checking whether the data is safe to purge from aggregation " + "tables for the aggregation " + aggregationDefinition.getId();
}
LOG.error(errorMessage, e);
purgingCheckState.put(IS_DATA_AVAILABLE_TO_PURGE, false);
purgingCheckState.put(IS_PARENT_TABLE_HAS_AGGREGATED_DATA, false);
errorMessage = "Error occurred while checking whether the data is safe to purge from aggregation tables" + " for the aggregation " + aggregationDefinition.getId();
purgingHalted = true;
}
return purgingCheckState;
}
use of io.siddhi.core.query.OnDemandQueryRuntime in project siddhi by siddhi-io.
the class IncrementalExecutorsInitialiser method recreateState.
private void recreateState(Long lastData, TimePeriod.Duration recreateForDuration, Table recreateFromTable, boolean isBeforeRoot) {
if (lastData != null) {
endOFLatestEventTimestamp = IncrementalTimeConverterUtil.getNextEmitTime(lastData, recreateForDuration, timeZone);
}
OnDemandQuery onDemandQuery = getOnDemandQuery(recreateFromTable, false, endOFLatestEventTimestamp);
onDemandQuery.setType(OnDemandQuery.OnDemandQueryType.FIND);
OnDemandQueryRuntime onDemandQueryRuntime = OnDemandQueryParser.parse(onDemandQuery, null, siddhiAppContext, tableMap, windowMap, aggregationMap);
Event[] events = onDemandQueryRuntime.execute();
if (events != null) {
long referenceToNextLatestEvent = (Long) events[events.length - 1].getData(0);
if (!isStatePresentForAggregationDuration(recreateForDuration)) {
ComplexEventChunk<StreamEvent> complexEventChunk = new ComplexEventChunk<>();
for (Event event : events) {
StreamEvent streamEvent = streamEventFactory.newInstance();
streamEvent.setOutputData(event.getData());
complexEventChunk.add(streamEvent);
}
Executor incrementalExecutor = incrementalExecutorMap.get(recreateForDuration);
incrementalExecutor.execute(complexEventChunk);
}
if (isBeforeRoot) {
TimePeriod.Duration rootDuration = incrementalDurations.get(0);
Executor rootIncrementalExecutor = incrementalExecutorMap.get(rootDuration);
long emitTimeOfLatestEventInTable = IncrementalTimeConverterUtil.getNextEmitTime(referenceToNextLatestEvent, rootDuration, timeZone);
rootIncrementalExecutor.setEmitTime(emitTimeOfLatestEventInTable);
}
}
}
use of io.siddhi.core.query.OnDemandQueryRuntime in project siddhi by siddhi-io.
the class IncrementalExecutorsInitialiser method initialiseExecutors.
public synchronized void initialiseExecutors() {
if (this.isInitialised || isReadOnly) {
// Only cleared when executors change from reading to processing state in one node deployment
return;
}
Event[] events;
Long lastData = null;
// Get max(AGG_TIMESTAMP) from table corresponding to max duration
Table tableForMaxDuration = aggregationTables.get(incrementalDurations.get(incrementalDurations.size() - 1));
OnDemandQuery onDemandQuery = getOnDemandQuery(tableForMaxDuration, true, endOFLatestEventTimestamp);
onDemandQuery.setType(OnDemandQuery.OnDemandQueryType.FIND);
OnDemandQueryRuntime onDemandQueryRuntime = OnDemandQueryParser.parse(onDemandQuery, null, siddhiAppContext, tableMap, windowMap, aggregationMap);
// Get latest event timestamp in tableForMaxDuration and get the end time of the aggregation record
events = onDemandQueryRuntime.execute();
if (events != null) {
lastData = (Long) events[events.length - 1].getData(0);
endOFLatestEventTimestamp = IncrementalTimeConverterUtil.getNextEmitTime(lastData, incrementalDurations.get(incrementalDurations.size() - 1), timeZone);
}
if (isPersistedAggregation) {
for (int i = incrementalDurations.size() - 1; i > 0; i--) {
if (lastData != null && !IncrementalTimeConverterUtil.isAggregationDataComplete(lastData, incrementalDurations.get(i), timeZone)) {
recreateState(lastData, incrementalDurations.get(i), aggregationTables.get(incrementalDurations.get(i - 1)), i == 1);
} else if (lastData == null) {
recreateState(null, incrementalDurations.get(i), aggregationTables.get(incrementalDurations.get(i - 1)), i == 1);
}
if (i > 1) {
onDemandQuery = getOnDemandQuery(aggregationTables.get(incrementalDurations.get(i - 1)), true, endOFLatestEventTimestamp);
onDemandQuery.setType(OnDemandQuery.OnDemandQueryType.FIND);
onDemandQueryRuntime = OnDemandQueryParser.parse(onDemandQuery, null, siddhiAppContext, tableMap, windowMap, aggregationMap);
events = onDemandQueryRuntime.execute();
if (events != null) {
lastData = (Long) events[events.length - 1].getData(0);
} else {
lastData = null;
}
}
}
} else {
for (int i = incrementalDurations.size() - 1; i > 0; i--) {
// Get the table previous to the duration for which we need to recreate (e.g. if we want to recreate
// for minute duration, take the second table [provided that aggregation is done for seconds])
// This lookup is filtered by endOFLatestEventTimestamp
Table recreateFromTable = aggregationTables.get(incrementalDurations.get(i - 1));
onDemandQuery = getOnDemandQuery(recreateFromTable, false, endOFLatestEventTimestamp);
onDemandQuery.setType(OnDemandQuery.OnDemandQueryType.FIND);
onDemandQueryRuntime = OnDemandQueryParser.parse(onDemandQuery, null, siddhiAppContext, tableMap, windowMap, aggregationMap);
events = onDemandQueryRuntime.execute();
if (events != null) {
long referenceToNextLatestEvent = (Long) events[events.length - 1].getData(0);
endOFLatestEventTimestamp = IncrementalTimeConverterUtil.getNextEmitTime(referenceToNextLatestEvent, incrementalDurations.get(i - 1), timeZone);
TimePeriod.Duration recreateForDuration = incrementalDurations.get(i);
if (!isStatePresentForAggregationDuration(recreateForDuration)) {
ComplexEventChunk<StreamEvent> complexEventChunk = new ComplexEventChunk<>();
for (Event event : events) {
StreamEvent streamEvent = streamEventFactory.newInstance();
streamEvent.setOutputData(event.getData());
complexEventChunk.add(streamEvent);
}
Executor incrementalExecutor = incrementalExecutorMap.get(recreateForDuration);
incrementalExecutor.execute(complexEventChunk);
}
if (i == 1) {
TimePeriod.Duration rootDuration = incrementalDurations.get(0);
Executor rootIncrementalExecutor = incrementalExecutorMap.get(rootDuration);
long emitTimeOfLatestEventInTable = IncrementalTimeConverterUtil.getNextEmitTime(referenceToNextLatestEvent, rootDuration, timeZone);
rootIncrementalExecutor.setEmitTime(emitTimeOfLatestEventInTable);
}
}
}
}
this.isInitialised = true;
}
Aggregations