use of org.apache.nifi.components.state.StateMap in project nifi by apache.
the class MockStateManager method assertStateNotEquals.
/**
* Ensures that the state is not equal to the given values
*
* @param stateValues the unexpected values
* @param scope the scope to compare the stateValues against
*/
public void assertStateNotEquals(final Map<String, String> stateValues, final Scope scope) {
final StateMap stateMap = retrieveState(scope);
Assert.assertNotSame(stateValues, stateMap.toMap());
}
use of org.apache.nifi.components.state.StateMap in project nifi by apache.
the class UpdateAttribute method onScheduled.
@OnScheduled
public void onScheduled(final ProcessContext context) throws IOException {
criteriaCache.set(CriteriaSerDe.deserialize(context.getAnnotationData()));
propertyValues.clear();
if (stateful) {
StateManager stateManager = context.getStateManager();
StateMap state = stateManager.getState(Scope.LOCAL);
HashMap<String, String> tempMap = new HashMap<>();
tempMap.putAll(state.toMap());
String initValue = context.getProperty(STATEFUL_VARIABLES_INIT_VALUE).getValue();
// Initialize the stateful default actions
for (PropertyDescriptor entry : context.getProperties().keySet()) {
if (entry.isDynamic()) {
if (!tempMap.containsKey(entry.getName())) {
tempMap.put(entry.getName(), initValue);
}
}
}
// Initialize the stateful actions if the criteria exists
final Criteria criteria = criteriaCache.get();
if (criteria != null) {
for (Rule rule : criteria.getRules()) {
for (Action action : rule.getActions()) {
if (!tempMap.containsKey(action.getAttribute())) {
tempMap.put(action.getAttribute(), initValue);
}
}
}
}
context.getStateManager().setState(tempMap, Scope.LOCAL);
}
defaultActions = getDefaultActions(context.getProperties());
debugEnabled = getLogger().isDebugEnabled();
}
use of org.apache.nifi.components.state.StateMap in project nifi by apache.
the class StandardNiFiServiceFacade method getReportingTaskState.
@Override
public ComponentStateDTO getReportingTaskState(final String reportingTaskId) {
final StateMap clusterState = isClustered() ? reportingTaskDAO.getState(reportingTaskId, Scope.CLUSTER) : null;
final StateMap localState = reportingTaskDAO.getState(reportingTaskId, Scope.LOCAL);
// reporting task will be non null as it was already found when getting the state
final ReportingTaskNode reportingTask = reportingTaskDAO.getReportingTask(reportingTaskId);
return dtoFactory.createComponentStateDTO(reportingTaskId, reportingTask.getReportingTask().getClass(), localState, clusterState);
}
use of org.apache.nifi.components.state.StateMap in project nifi by apache.
the class GetHDFSEvents method onTrigger.
@Override
public void onTrigger(ProcessContext context, ProcessSession session) throws ProcessException {
final StateManager stateManager = context.getStateManager();
try {
StateMap state = stateManager.getState(Scope.CLUSTER);
String txIdAsString = state.get(LAST_TX_ID);
if (txIdAsString != null && !"".equals(txIdAsString)) {
lastTxId = Long.parseLong(txIdAsString);
}
} catch (IOException e) {
getLogger().error("Unable to retrieve last transaction ID. Must retrieve last processed transaction ID before processing can occur.", e);
context.yield();
return;
}
try {
final int retries = context.getProperty(NUMBER_OF_RETRIES_FOR_POLL).asInteger();
final TimeUnit pollDurationTimeUnit = TimeUnit.MICROSECONDS;
final long pollDuration = context.getProperty(POLL_DURATION).asTimePeriod(pollDurationTimeUnit);
final DFSInotifyEventInputStream eventStream = lastTxId == -1L ? getHdfsAdmin().getInotifyEventStream() : getHdfsAdmin().getInotifyEventStream(lastTxId);
final EventBatch eventBatch = getEventBatch(eventStream, pollDuration, pollDurationTimeUnit, retries);
if (eventBatch != null && eventBatch.getEvents() != null) {
if (eventBatch.getEvents().length > 0) {
List<FlowFile> flowFiles = new ArrayList<>(eventBatch.getEvents().length);
for (Event e : eventBatch.getEvents()) {
if (toProcessEvent(context, e)) {
getLogger().debug("Creating flow file for event: {}.", new Object[] { e });
final String path = getPath(e);
FlowFile flowFile = session.create();
flowFile = session.putAttribute(flowFile, CoreAttributes.MIME_TYPE.key(), "application/json");
flowFile = session.putAttribute(flowFile, EventAttributes.EVENT_TYPE, e.getEventType().name());
flowFile = session.putAttribute(flowFile, EventAttributes.EVENT_PATH, path);
flowFile = session.write(flowFile, new OutputStreamCallback() {
@Override
public void process(OutputStream out) throws IOException {
out.write(OBJECT_MAPPER.writeValueAsBytes(e));
}
});
flowFiles.add(flowFile);
}
}
for (FlowFile flowFile : flowFiles) {
final String path = flowFile.getAttribute(EventAttributes.EVENT_PATH);
final String transitUri = path.startsWith("/") ? "hdfs:/" + path : "hdfs://" + path;
getLogger().debug("Transferring flow file {} and creating provenance event with URI {}.", new Object[] { flowFile, transitUri });
session.transfer(flowFile, REL_SUCCESS);
session.getProvenanceReporter().receive(flowFile, transitUri);
}
}
lastTxId = eventBatch.getTxid();
}
} catch (IOException | InterruptedException e) {
getLogger().error("Unable to get notification information: {}", new Object[] { e });
context.yield();
return;
} catch (MissingEventsException e) {
// set lastTxId to -1 and update state. This may cause events not to be processed. The reason this exception is thrown is described in the
// org.apache.hadoop.hdfs.client.HdfsAdmin#getInotifyEventStrea API. It suggests tuning a couple parameters if this API is used.
lastTxId = -1L;
getLogger().error("Unable to get notification information. Setting transaction id to -1. This may cause some events to get missed. " + "Please see javadoc for org.apache.hadoop.hdfs.client.HdfsAdmin#getInotifyEventStream: {}", new Object[] { e });
}
updateClusterStateForTxId(stateManager);
}
use of org.apache.nifi.components.state.StateMap in project nifi by apache.
the class ListGCSBucketTest method testPersistState.
@Test
public void testPersistState() throws Exception {
reset(storage);
final ListGCSBucket processor = getProcessor();
final TestRunner runner = buildNewRunner(processor);
addRequiredPropertiesToRunner(runner);
runner.assertValid();
assertEquals("Cluster StateMap should be fresh (version -1L)", -1L, runner.getProcessContext().getStateManager().getState(Scope.CLUSTER).getVersion());
processor.currentKeys = ImmutableSet.of("test-key-0", "test-key-1");
processor.currentTimestamp = 4L;
processor.persistState(runner.getProcessContext());
final StateMap stateMap = runner.getStateManager().getState(Scope.CLUSTER);
assertEquals("Cluster StateMap should have been written to", 1L, stateMap.getVersion());
assertEquals(ImmutableMap.of(ListGCSBucket.CURRENT_TIMESTAMP, String.valueOf(4L), ListGCSBucket.CURRENT_KEY_PREFIX + "0", "test-key-0", ListGCSBucket.CURRENT_KEY_PREFIX + "1", "test-key-1"), stateMap.toMap());
}
Aggregations