use of org.apache.nifi.processor.ProcessContext in project nifi by apache.
the class ListDatabaseTables method onTrigger.
@Override
public void onTrigger(ProcessContext context, ProcessSession session) throws ProcessException {
final ComponentLog logger = getLogger();
final DBCPService dbcpService = context.getProperty(DBCP_SERVICE).asControllerService(DBCPService.class);
final String catalog = context.getProperty(CATALOG).getValue();
final String schemaPattern = context.getProperty(SCHEMA_PATTERN).getValue();
final String tableNamePattern = context.getProperty(TABLE_NAME_PATTERN).getValue();
final String[] tableTypes = context.getProperty(TABLE_TYPES).isSet() ? context.getProperty(TABLE_TYPES).getValue().split("\\s*,\\s*") : null;
final boolean includeCount = context.getProperty(INCLUDE_COUNT).asBoolean();
final long refreshInterval = context.getProperty(REFRESH_INTERVAL).asTimePeriod(TimeUnit.MILLISECONDS);
final StateManager stateManager = context.getStateManager();
final StateMap stateMap;
final Map<String, String> stateMapProperties;
try {
stateMap = stateManager.getState(Scope.CLUSTER);
stateMapProperties = new HashMap<>(stateMap.toMap());
} catch (IOException ioe) {
throw new ProcessException(ioe);
}
try (final Connection con = dbcpService.getConnection()) {
DatabaseMetaData dbMetaData = con.getMetaData();
ResultSet rs = dbMetaData.getTables(catalog, schemaPattern, tableNamePattern, tableTypes);
while (rs.next()) {
final String tableCatalog = rs.getString(1);
final String tableSchema = rs.getString(2);
final String tableName = rs.getString(3);
final String tableType = rs.getString(4);
final String tableRemarks = rs.getString(5);
// Build fully-qualified name
String fqn = Stream.of(tableCatalog, tableSchema, tableName).filter(segment -> !StringUtils.isEmpty(segment)).collect(Collectors.joining("."));
String lastTimestampForTable = stateMapProperties.get(fqn);
boolean refreshTable = true;
try {
// Refresh state if the interval has elapsed
long lastRefreshed = -1;
final long currentTime = System.currentTimeMillis();
if (!StringUtils.isEmpty(lastTimestampForTable)) {
lastRefreshed = Long.parseLong(lastTimestampForTable);
}
if (lastRefreshed == -1 || (refreshInterval > 0 && currentTime >= (lastRefreshed + refreshInterval))) {
stateMapProperties.remove(lastTimestampForTable);
} else {
refreshTable = false;
}
} catch (final NumberFormatException nfe) {
getLogger().error("Failed to retrieve observed last table fetches from the State Manager. Will not perform " + "query until this is accomplished.", nfe);
context.yield();
return;
}
if (refreshTable) {
FlowFile flowFile = session.create();
logger.info("Found {}: {}", new Object[] { tableType, fqn });
if (includeCount) {
try (Statement st = con.createStatement()) {
final String countQuery = "SELECT COUNT(1) FROM " + fqn;
logger.debug("Executing query: {}", new Object[] { countQuery });
ResultSet countResult = st.executeQuery(countQuery);
if (countResult.next()) {
flowFile = session.putAttribute(flowFile, DB_TABLE_COUNT, Long.toString(countResult.getLong(1)));
}
} catch (SQLException se) {
logger.error("Couldn't get row count for {}", new Object[] { fqn });
session.remove(flowFile);
continue;
}
}
if (tableCatalog != null) {
flowFile = session.putAttribute(flowFile, DB_TABLE_CATALOG, tableCatalog);
}
if (tableSchema != null) {
flowFile = session.putAttribute(flowFile, DB_TABLE_SCHEMA, tableSchema);
}
flowFile = session.putAttribute(flowFile, DB_TABLE_NAME, tableName);
flowFile = session.putAttribute(flowFile, DB_TABLE_FULLNAME, fqn);
flowFile = session.putAttribute(flowFile, DB_TABLE_TYPE, tableType);
if (tableRemarks != null) {
flowFile = session.putAttribute(flowFile, DB_TABLE_REMARKS, tableRemarks);
}
String transitUri;
try {
transitUri = dbMetaData.getURL();
} catch (SQLException sqle) {
transitUri = "<unknown>";
}
session.getProvenanceReporter().receive(flowFile, transitUri);
session.transfer(flowFile, REL_SUCCESS);
stateMapProperties.put(fqn, Long.toString(System.currentTimeMillis()));
}
}
// Update the timestamps for listed tables
if (stateMap.getVersion() == -1) {
stateManager.setState(stateMapProperties, Scope.CLUSTER);
} else {
stateManager.replace(stateMap, stateMapProperties, Scope.CLUSTER);
}
} catch (final SQLException | IOException e) {
throw new ProcessException(e);
}
}
use of org.apache.nifi.processor.ProcessContext in project nifi by apache.
the class MergeRecord method onTrigger.
@Override
public void onTrigger(final ProcessContext context, final ProcessSessionFactory sessionFactory) throws ProcessException {
RecordBinManager manager = binManager.get();
while (manager == null) {
manager = new RecordBinManager(context, sessionFactory, getLogger());
manager.setMaxBinAge(context.getProperty(MAX_BIN_AGE).asTimePeriod(TimeUnit.NANOSECONDS), TimeUnit.NANOSECONDS);
final boolean updated = binManager.compareAndSet(null, manager);
if (!updated) {
manager = binManager.get();
}
}
final ProcessSession session = sessionFactory.createSession();
final List<FlowFile> flowFiles = session.get(FlowFileFilters.newSizeBasedFilter(250, DataUnit.KB, 250));
if (getLogger().isDebugEnabled()) {
final List<String> ids = flowFiles.stream().map(ff -> "id=" + ff.getId()).collect(Collectors.toList());
getLogger().debug("Pulled {} FlowFiles from queue: {}", new Object[] { ids.size(), ids });
}
final String mergeStrategy = context.getProperty(MERGE_STRATEGY).getValue();
final boolean block;
if (MERGE_STRATEGY_DEFRAGMENT.equals(mergeStrategy)) {
block = true;
} else if (context.getProperty(CORRELATION_ATTRIBUTE_NAME).isSet()) {
block = true;
} else {
block = false;
}
try {
for (final FlowFile flowFile : flowFiles) {
try {
binFlowFile(context, flowFile, session, manager, block);
} catch (final Exception e) {
getLogger().error("Failed to bin {} due to {}", new Object[] { flowFile, e });
session.transfer(flowFile, REL_FAILURE);
}
}
} finally {
session.commit();
}
try {
manager.completeExpiredBins();
} catch (final Exception e) {
getLogger().error("Failed to merge FlowFiles to create new bin due to " + e, e);
}
if (flowFiles.isEmpty()) {
getLogger().debug("No FlowFiles to bin; will yield");
context.yield();
}
}
use of org.apache.nifi.processor.ProcessContext in project nifi by apache.
the class Notify method onTrigger.
@Override
public void onTrigger(final ProcessContext context, final ProcessSession session) throws ProcessException {
final ComponentLog logger = getLogger();
final PropertyValue signalIdProperty = context.getProperty(RELEASE_SIGNAL_IDENTIFIER);
final PropertyValue counterNameProperty = context.getProperty(SIGNAL_COUNTER_NAME);
final PropertyValue deltaProperty = context.getProperty(SIGNAL_COUNTER_DELTA);
final String attributeCacheRegex = context.getProperty(ATTRIBUTE_CACHE_REGEX).getValue();
final Integer bufferCount = context.getProperty(SIGNAL_BUFFER_COUNT).asInteger();
// the cache client used to interact with the distributed cache.
final AtomicDistributedMapCacheClient cache = context.getProperty(DISTRIBUTED_CACHE_SERVICE).asControllerService(AtomicDistributedMapCacheClient.class);
final WaitNotifyProtocol protocol = new WaitNotifyProtocol(cache);
final Map<String, SignalBuffer> signalBuffers = new HashMap<>();
for (int i = 0; i < bufferCount; i++) {
final FlowFile flowFile = session.get();
if (flowFile == null) {
break;
}
// Signal id is computed from attribute 'RELEASE_SIGNAL_IDENTIFIER' with expression language support
final String signalId = signalIdProperty.evaluateAttributeExpressions(flowFile).getValue();
// if the computed value is null, or empty, we transfer the flow file to failure relationship
if (StringUtils.isBlank(signalId)) {
logger.error("FlowFile {} has no attribute for given Release Signal Identifier", new Object[] { flowFile });
// set 'notified' attribute
session.transfer(session.putAttribute(flowFile, NOTIFIED_ATTRIBUTE_NAME, String.valueOf(false)), REL_FAILURE);
continue;
}
String counterName = counterNameProperty.evaluateAttributeExpressions(flowFile).getValue();
if (StringUtils.isEmpty(counterName)) {
counterName = WaitNotifyProtocol.DEFAULT_COUNT_NAME;
}
int delta = 1;
if (deltaProperty.isSet()) {
final String deltaStr = deltaProperty.evaluateAttributeExpressions(flowFile).getValue();
try {
delta = Integer.parseInt(deltaStr);
} catch (final NumberFormatException e) {
logger.error("Failed to calculate delta for FlowFile {} due to {}", new Object[] { flowFile, e }, e);
session.transfer(session.putAttribute(flowFile, NOTIFIED_ATTRIBUTE_NAME, String.valueOf(false)), REL_FAILURE);
continue;
}
}
if (!signalBuffers.containsKey(signalId)) {
signalBuffers.put(signalId, new SignalBuffer());
}
final SignalBuffer signalBuffer = signalBuffers.get(signalId);
if (StringUtils.isNotEmpty(attributeCacheRegex)) {
flowFile.getAttributes().entrySet().stream().filter(e -> (!e.getKey().equals("uuid") && e.getKey().matches(attributeCacheRegex))).forEach(e -> signalBuffer.attributesToCache.put(e.getKey(), e.getValue()));
}
signalBuffer.incrementDelta(counterName, delta);
signalBuffer.flowFiles.add(flowFile);
if (logger.isDebugEnabled()) {
logger.debug("Cached release signal identifier {} counterName {} from FlowFile {}", new Object[] { signalId, counterName, flowFile });
}
}
signalBuffers.forEach((signalId, signalBuffer) -> {
// retry after yielding for a while.
try {
protocol.notify(signalId, signalBuffer.deltas, signalBuffer.attributesToCache);
signalBuffer.flowFiles.forEach(flowFile -> session.transfer(session.putAttribute(flowFile, NOTIFIED_ATTRIBUTE_NAME, String.valueOf(true)), REL_SUCCESS));
} catch (IOException e) {
throw new RuntimeException(String.format("Unable to communicate with cache when processing %s due to %s", signalId, e), e);
}
});
}
use of org.apache.nifi.processor.ProcessContext in project nifi by apache.
the class ListenHTTPServlet method doPost.
@Override
protected void doPost(final HttpServletRequest request, final HttpServletResponse response) throws ServletException, IOException {
final ProcessContext context = processContext;
ProcessSessionFactory sessionFactory;
do {
sessionFactory = sessionFactoryHolder.get();
if (sessionFactory == null) {
try {
Thread.sleep(10);
} catch (final InterruptedException e) {
}
}
} while (sessionFactory == null);
final ProcessSession session = sessionFactory.createSession();
FlowFile flowFile = null;
String holdUuid = null;
String foundSubject = null;
try {
final long n = filesReceived.getAndIncrement() % FILES_BEFORE_CHECKING_DESTINATION_SPACE;
if (n == 0 || !spaceAvailable.get()) {
if (context.getAvailableRelationships().isEmpty()) {
spaceAvailable.set(false);
if (logger.isDebugEnabled()) {
logger.debug("Received request from " + request.getRemoteHost() + " but no space available; Indicating Service Unavailable");
}
response.sendError(HttpServletResponse.SC_SERVICE_UNAVAILABLE);
return;
} else {
spaceAvailable.set(true);
}
}
response.setHeader("Content-Type", MediaType.TEXT_PLAIN);
final boolean contentGzipped = Boolean.parseBoolean(request.getHeader(GZIPPED_HEADER));
final X509Certificate[] certs = (X509Certificate[]) request.getAttribute("javax.servlet.request.X509Certificate");
foundSubject = DEFAULT_FOUND_SUBJECT;
if (certs != null && certs.length > 0) {
for (final X509Certificate cert : certs) {
foundSubject = cert.getSubjectDN().getName();
if (authorizedPattern.matcher(foundSubject).matches()) {
break;
} else {
logger.warn("Rejecting transfer attempt from " + foundSubject + " because the DN is not authorized, host=" + request.getRemoteHost());
response.sendError(HttpServletResponse.SC_FORBIDDEN, "not allowed based on dn");
return;
}
}
}
final String destinationVersion = request.getHeader(PROTOCOL_VERSION_HEADER);
Integer protocolVersion = null;
if (destinationVersion != null) {
try {
protocolVersion = Integer.valueOf(destinationVersion);
} catch (final NumberFormatException e) {
// Value was invalid. Treat as if the header were missing.
}
}
final boolean destinationIsLegacyNiFi = (protocolVersion == null);
final boolean createHold = Boolean.parseBoolean(request.getHeader(FLOWFILE_CONFIRMATION_HEADER));
final String contentType = request.getContentType();
final InputStream unthrottled = contentGzipped ? new GZIPInputStream(request.getInputStream()) : request.getInputStream();
final InputStream in = (streamThrottler == null) ? unthrottled : streamThrottler.newThrottledInputStream(unthrottled);
if (logger.isDebugEnabled()) {
logger.debug("Received request from " + request.getRemoteHost() + ", createHold=" + createHold + ", content-type=" + contentType + ", gzip=" + contentGzipped);
}
final AtomicBoolean hasMoreData = new AtomicBoolean(false);
final FlowFileUnpackager unpackager;
if (APPLICATION_FLOW_FILE_V3.equals(contentType)) {
unpackager = new FlowFileUnpackagerV3();
} else if (APPLICATION_FLOW_FILE_V2.equals(contentType)) {
unpackager = new FlowFileUnpackagerV2();
} else if (APPLICATION_FLOW_FILE_V1.equals(contentType)) {
unpackager = new FlowFileUnpackagerV1();
} else {
unpackager = null;
}
final Set<FlowFile> flowFileSet = new HashSet<>();
do {
final long startNanos = System.nanoTime();
final Map<String, String> attributes = new HashMap<>();
flowFile = session.create();
flowFile = session.write(flowFile, new OutputStreamCallback() {
@Override
public void process(final OutputStream rawOut) throws IOException {
try (final BufferedOutputStream bos = new BufferedOutputStream(rawOut, 65536)) {
if (unpackager == null) {
IOUtils.copy(in, bos);
hasMoreData.set(false);
} else {
attributes.putAll(unpackager.unpackageFlowFile(in, bos));
if (destinationIsLegacyNiFi) {
if (attributes.containsKey("nf.file.name")) {
// for backward compatibility with old nifi...
attributes.put(CoreAttributes.FILENAME.key(), attributes.remove("nf.file.name"));
}
if (attributes.containsKey("nf.file.path")) {
attributes.put(CoreAttributes.PATH.key(), attributes.remove("nf.file.path"));
}
}
hasMoreData.set(unpackager.hasMoreData());
}
}
}
});
final long transferNanos = System.nanoTime() - startNanos;
final long transferMillis = TimeUnit.MILLISECONDS.convert(transferNanos, TimeUnit.NANOSECONDS);
// put metadata on flowfile
final String nameVal = request.getHeader(CoreAttributes.FILENAME.key());
if (StringUtils.isNotBlank(nameVal)) {
attributes.put(CoreAttributes.FILENAME.key(), nameVal);
}
// put arbitrary headers on flow file
for (Enumeration<String> headerEnum = request.getHeaderNames(); headerEnum.hasMoreElements(); ) {
String headerName = headerEnum.nextElement();
if (headerPattern != null && headerPattern.matcher(headerName).matches()) {
String headerValue = request.getHeader(headerName);
attributes.put(headerName, headerValue);
}
}
String sourceSystemFlowFileIdentifier = attributes.get(CoreAttributes.UUID.key());
if (sourceSystemFlowFileIdentifier != null) {
sourceSystemFlowFileIdentifier = "urn:nifi:" + sourceSystemFlowFileIdentifier;
// If we receveied a UUID, we want to give the FlowFile a new UUID and register the sending system's
// identifier as the SourceSystemFlowFileIdentifier field in the Provenance RECEIVE event
attributes.put(CoreAttributes.UUID.key(), UUID.randomUUID().toString());
}
flowFile = session.putAllAttributes(flowFile, attributes);
session.getProvenanceReporter().receive(flowFile, request.getRequestURL().toString(), sourceSystemFlowFileIdentifier, "Remote DN=" + foundSubject, transferMillis);
flowFile = session.putAttribute(flowFile, "restlistener.remote.source.host", request.getRemoteHost());
flowFile = session.putAttribute(flowFile, "restlistener.request.uri", request.getRequestURI());
flowFile = session.putAttribute(flowFile, "restlistener.remote.user.dn", foundSubject);
flowFileSet.add(flowFile);
if (holdUuid == null) {
holdUuid = flowFile.getAttribute(CoreAttributes.UUID.key());
}
} while (hasMoreData.get());
if (createHold) {
String uuid = (holdUuid == null) ? UUID.randomUUID().toString() : holdUuid;
if (flowFileMap.containsKey(uuid)) {
uuid = UUID.randomUUID().toString();
}
final FlowFileEntryTimeWrapper wrapper = new FlowFileEntryTimeWrapper(session, flowFileSet, System.currentTimeMillis(), request.getRemoteHost());
FlowFileEntryTimeWrapper previousWrapper;
do {
previousWrapper = flowFileMap.putIfAbsent(uuid, wrapper);
if (previousWrapper != null) {
uuid = UUID.randomUUID().toString();
}
} while (previousWrapper != null);
response.setStatus(HttpServletResponse.SC_SEE_OTHER);
final String ackUri = "/" + basePath + "/holds/" + uuid;
response.addHeader(LOCATION_HEADER_NAME, ackUri);
response.addHeader(LOCATION_URI_INTENT_NAME, LOCATION_URI_INTENT_VALUE);
response.getOutputStream().write(ackUri.getBytes("UTF-8"));
if (logger.isDebugEnabled()) {
logger.debug("Ingested {} from Remote Host: [{}] Port [{}] SubjectDN [{}]; placed hold on these {} files with ID {}", new Object[] { flowFileSet, request.getRemoteHost(), request.getRemotePort(), foundSubject, flowFileSet.size(), uuid });
}
} else {
response.setStatus(this.returnCode);
logger.info("Received from Remote Host: [{}] Port [{}] SubjectDN [{}]; transferring to 'success' {}", new Object[] { request.getRemoteHost(), request.getRemotePort(), foundSubject, flowFile });
session.transfer(flowFileSet, ListenHTTP.RELATIONSHIP_SUCCESS);
session.commit();
}
} catch (final Throwable t) {
session.rollback();
if (flowFile == null) {
logger.error("Unable to receive file from Remote Host: [{}] SubjectDN [{}] due to {}", new Object[] { request.getRemoteHost(), foundSubject, t });
} else {
logger.error("Unable to receive file {} from Remote Host: [{}] SubjectDN [{}] due to {}", new Object[] { flowFile, request.getRemoteHost(), foundSubject, t });
}
response.sendError(HttpServletResponse.SC_INTERNAL_SERVER_ERROR, t.toString());
}
}
use of org.apache.nifi.processor.ProcessContext in project nifi by apache.
the class ITListenAndPutSyslog method run.
/**
* Sends numMessages from PutSyslog to ListenSyslog.
*/
private void run(String protocol, int numMessages, int expectedMessages) throws IOException, InterruptedException {
// set the same protocol on both processors
putSyslogRunner.setProperty(PutSyslog.PROTOCOL, protocol);
listenSyslogRunner.setProperty(ListenSyslog.PROTOCOL, protocol);
// set a listening port of 0 to get a random available port
listenSyslogRunner.setProperty(ListenSyslog.PORT, "0");
// call onScheduled to start ListenSyslog listening
final ProcessSessionFactory processSessionFactory = listenSyslogRunner.getProcessSessionFactory();
final ProcessContext context = listenSyslogRunner.getProcessContext();
listenSyslog.onScheduled(context);
// get the real port it is listening on and set that in PutSyslog
final int listeningPort = listenSyslog.getPort();
putSyslogRunner.setProperty(PutSyslog.PORT, String.valueOf(listeningPort));
// configure the message properties on PutSyslog
final String pri = "34";
final String version = "1";
final String stamp = "2016-02-05T22:14:15.003Z";
final String host = "localhost";
final String body = "some message";
final String expectedMessage = "<" + pri + ">" + version + " " + stamp + " " + host + " " + body;
putSyslogRunner.setProperty(PutSyslog.MSG_PRIORITY, pri);
putSyslogRunner.setProperty(PutSyslog.MSG_VERSION, version);
putSyslogRunner.setProperty(PutSyslog.MSG_TIMESTAMP, stamp);
putSyslogRunner.setProperty(PutSyslog.MSG_HOSTNAME, host);
putSyslogRunner.setProperty(PutSyslog.MSG_BODY, body);
// send the messages
for (int i = 0; i < numMessages; i++) {
putSyslogRunner.enqueue("incoming data".getBytes(Charset.forName("UTF-8")));
}
putSyslogRunner.run(numMessages, false);
// trigger ListenSyslog until we've seen all the messages
int numTransfered = 0;
long timeout = System.currentTimeMillis() + 30000;
while (numTransfered < expectedMessages && System.currentTimeMillis() < timeout) {
Thread.sleep(10);
listenSyslog.onTrigger(context, processSessionFactory);
numTransfered = listenSyslogRunner.getFlowFilesForRelationship(ListenSyslog.REL_SUCCESS).size();
}
Assert.assertEquals("Did not process all the messages", expectedMessages, numTransfered);
if (expectedMessages > 0) {
// check that one of flow files has the expected content
MockFlowFile mockFlowFile = listenSyslogRunner.getFlowFilesForRelationship(ListenSyslog.REL_SUCCESS).get(0);
mockFlowFile.assertContentEquals(expectedMessage);
}
}
Aggregations