use of org.apache.nifi.processor.io.InputStreamCallback in project nifi by apache.
the class ExtractTNEFAttachments method onTrigger.
@Override
public void onTrigger(final ProcessContext context, final ProcessSession session) {
final ComponentLog logger = getLogger();
final FlowFile originalFlowFile = session.get();
if (originalFlowFile == null) {
return;
}
final List<FlowFile> attachmentsList = new ArrayList<>();
final List<FlowFile> invalidFlowFilesList = new ArrayList<>();
final List<FlowFile> originalFlowFilesList = new ArrayList<>();
session.read(originalFlowFile, new InputStreamCallback() {
@Override
public void process(final InputStream rawIn) throws IOException {
try (final InputStream in = new BufferedInputStream(rawIn)) {
Properties props = new Properties();
HMEFMessage hmefMessage = null;
// This will trigger an exception in case content is not a TNEF.
hmefMessage = new HMEFMessage(in);
// Add otiginal flowfile (may revert later on in case of errors) //
originalFlowFilesList.add(originalFlowFile);
if (hmefMessage != null) {
// Attachments isn empty, proceeding.
if (!hmefMessage.getAttachments().isEmpty()) {
final String originalFlowFileName = originalFlowFile.getAttribute(CoreAttributes.FILENAME.key());
try {
for (final Attachment attachment : hmefMessage.getAttachments()) {
FlowFile split = session.create(originalFlowFile);
final Map<String, String> attributes = new HashMap<>();
if (StringUtils.isNotBlank(attachment.getLongFilename())) {
attributes.put(CoreAttributes.FILENAME.key(), attachment.getFilename());
}
String parentUuid = originalFlowFile.getAttribute(CoreAttributes.UUID.key());
attributes.put(ATTACHMENT_ORIGINAL_UUID, parentUuid);
attributes.put(ATTACHMENT_ORIGINAL_FILENAME, originalFlowFileName);
// TODO: Extract Mime Type (HMEF doesn't seem to be able to get this info.
split = session.append(split, new OutputStreamCallback() {
@Override
public void process(OutputStream out) throws IOException {
out.write(attachment.getContents());
}
});
split = session.putAllAttributes(split, attributes);
attachmentsList.add(split);
}
} catch (FlowFileHandlingException e) {
// Something went wrong
// Removing splits that may have been created
session.remove(attachmentsList);
// Removing the original flow from its list
originalFlowFilesList.remove(originalFlowFile);
logger.error("Flowfile {} triggered error {} while processing message removing generated FlowFiles from sessions", new Object[] { originalFlowFile, e });
invalidFlowFilesList.add(originalFlowFile);
}
}
}
} catch (Exception e) {
// Another error hit...
// Removing the original flow from its list
originalFlowFilesList.remove(originalFlowFile);
logger.error("Could not parse the flowfile {} as an email, treating as failure", new Object[] { originalFlowFile, e });
// Message is invalid or triggered an error during parsing
invalidFlowFilesList.add(originalFlowFile);
}
}
});
session.transfer(attachmentsList, REL_ATTACHMENTS);
// As per above code, originalFlowfile may be routed to invalid or
// original depending on RFC2822 compliance.
session.transfer(invalidFlowFilesList, REL_FAILURE);
session.transfer(originalFlowFilesList, REL_ORIGINAL);
// check if attachments have been extracted
if (attachmentsList.size() != 0) {
if (attachmentsList.size() > 10) {
// If more than 10, summarise log
logger.info("Split {} into {} files", new Object[] { originalFlowFile, attachmentsList.size() });
} else {
// Otherwise be more verbose and list each individual split
logger.info("Split {} into {} files: {}", new Object[] { originalFlowFile, attachmentsList.size(), attachmentsList });
}
}
}
use of org.apache.nifi.processor.io.InputStreamCallback in project nifi by apache.
the class FlowFileEvent method getBody.
@Override
public byte[] getBody() {
synchronized (bodyLock) {
if (!bodyLoaded) {
if (flowFile.getSize() > Integer.MAX_VALUE) {
throw new RuntimeException("Can't get body of Event because the backing FlowFile is too large (" + flowFile.getSize() + " bytes)");
}
final ByteArrayOutputStream baos = new ByteArrayOutputStream((int) flowFile.getSize());
session.read(flowFile, new InputStreamCallback() {
@Override
public void process(InputStream in) throws IOException {
try (BufferedInputStream input = new BufferedInputStream(in)) {
StreamUtils.copy(input, baos);
}
baos.close();
}
});
body = baos.toByteArray();
bodyLoaded = true;
}
}
return body;
}
use of org.apache.nifi.processor.io.InputStreamCallback in project nifi by apache.
the class HashContent method onTrigger.
@Override
public void onTrigger(final ProcessContext context, final ProcessSession session) throws ProcessException {
FlowFile flowFile = session.get();
if (flowFile == null) {
return;
}
final ComponentLog logger = getLogger();
final String algorithm = context.getProperty(HASH_ALGORITHM).getValue();
final MessageDigest digest;
try {
digest = MessageDigest.getInstance(algorithm);
} catch (NoSuchAlgorithmException e) {
logger.error("Failed to process {} due to {}; routing to failure", new Object[] { flowFile, e });
session.transfer(flowFile, REL_FAILURE);
return;
}
final AtomicReference<String> hashValueHolder = new AtomicReference<>(null);
try {
session.read(flowFile, new InputStreamCallback() {
@Override
public void process(final InputStream in) throws IOException {
try (final DigestOutputStream digestOut = new DigestOutputStream(new NullOutputStream(), digest)) {
StreamUtils.copy(in, digestOut);
final byte[] hash = digest.digest();
final StringBuilder strb = new StringBuilder(hash.length * 2);
for (int i = 0; i < hash.length; i++) {
strb.append(Integer.toHexString((hash[i] & 0xFF) | 0x100).substring(1, 3));
}
hashValueHolder.set(strb.toString());
}
}
});
final String attributeName = context.getProperty(ATTRIBUTE_NAME).getValue();
flowFile = session.putAttribute(flowFile, attributeName, hashValueHolder.get());
logger.info("Successfully added attribute '{}' to {} with a value of {}; routing to success", new Object[] { attributeName, flowFile, hashValueHolder.get() });
session.getProvenanceReporter().modifyAttributes(flowFile);
session.transfer(flowFile, REL_SUCCESS);
} catch (final ProcessException e) {
logger.error("Failed to process {} due to {}; routing to failure", new Object[] { flowFile, e });
session.transfer(flowFile, REL_FAILURE);
}
}
use of org.apache.nifi.processor.io.InputStreamCallback in project nifi by apache.
the class ParseSyslog method onTrigger.
@Override
public void onTrigger(final ProcessContext context, final ProcessSession session) throws ProcessException {
FlowFile flowFile = session.get();
if (flowFile == null) {
return;
}
final String charsetName = context.getProperty(CHARSET).getValue();
// If the parser already exists and uses the same charset, it does not need to be re-initialized
if (parser == null || !parser.getCharsetName().equals(charsetName)) {
parser = new SyslogParser(Charset.forName(charsetName));
}
final byte[] buffer = new byte[(int) flowFile.getSize()];
session.read(flowFile, new InputStreamCallback() {
@Override
public void process(final InputStream in) throws IOException {
StreamUtils.fillBuffer(in, buffer);
}
});
final SyslogEvent event;
try {
event = parser.parseEvent(buffer, null);
} catch (final ProcessException pe) {
getLogger().error("Failed to parse {} as a Syslog message due to {}; routing to failure", new Object[] { flowFile, pe });
session.transfer(flowFile, REL_FAILURE);
return;
}
if (event == null || !event.isValid()) {
getLogger().error("Failed to parse {} as a Syslog message: it does not conform to any of the RFC formats supported; routing to failure", new Object[] { flowFile });
session.transfer(flowFile, REL_FAILURE);
return;
}
final Map<String, String> attributes = new HashMap<>(8);
attributes.put(SyslogAttributes.PRIORITY.key(), event.getPriority());
attributes.put(SyslogAttributes.SEVERITY.key(), event.getSeverity());
attributes.put(SyslogAttributes.FACILITY.key(), event.getFacility());
attributes.put(SyslogAttributes.VERSION.key(), event.getVersion());
attributes.put(SyslogAttributes.TIMESTAMP.key(), event.getTimeStamp());
attributes.put(SyslogAttributes.HOSTNAME.key(), event.getHostName());
attributes.put(SyslogAttributes.BODY.key(), event.getMsgBody());
flowFile = session.putAllAttributes(flowFile, attributes);
session.transfer(flowFile, REL_SUCCESS);
}
use of org.apache.nifi.processor.io.InputStreamCallback in project nifi by apache.
the class PostHTTP method onTrigger.
@Override
public void onTrigger(final ProcessContext context, final ProcessSession session) {
FlowFile firstFlowFile = session.get();
if (firstFlowFile == null) {
return;
}
final ComponentLog logger = getLogger();
final String url = context.getProperty(URL).evaluateAttributeExpressions(firstFlowFile).getValue();
try {
new java.net.URL(url);
} catch (final MalformedURLException e) {
logger.error("After substituting attribute values for {}, URL is {}; this is not a valid URL, so routing to failure", new Object[] { firstFlowFile, url });
firstFlowFile = session.penalize(firstFlowFile);
session.transfer(firstFlowFile, REL_FAILURE);
return;
}
final List<FlowFile> toSend = new ArrayList<>();
toSend.add(firstFlowFile);
final boolean sendAsFlowFile = context.getProperty(SEND_AS_FLOWFILE).asBoolean();
final int compressionLevel = context.getProperty(COMPRESSION_LEVEL).asInteger();
final String userAgent = context.getProperty(USER_AGENT).getValue();
final RequestConfig.Builder requestConfigBuilder = RequestConfig.custom();
requestConfigBuilder.setConnectionRequestTimeout(context.getProperty(DATA_TIMEOUT).asTimePeriod(TimeUnit.MILLISECONDS).intValue());
requestConfigBuilder.setConnectTimeout(context.getProperty(CONNECTION_TIMEOUT).asTimePeriod(TimeUnit.MILLISECONDS).intValue());
requestConfigBuilder.setRedirectsEnabled(false);
requestConfigBuilder.setSocketTimeout(context.getProperty(DATA_TIMEOUT).asTimePeriod(TimeUnit.MILLISECONDS).intValue());
final RequestConfig requestConfig = requestConfigBuilder.build();
final StreamThrottler throttler = throttlerRef.get();
final Double maxBatchBytes = context.getProperty(MAX_BATCH_SIZE).asDataSize(DataUnit.B);
final AtomicLong bytesToSend = new AtomicLong(firstFlowFile.getSize());
DestinationAccepts destinationAccepts = null;
CloseableHttpClient client = null;
final String transactionId = UUID.randomUUID().toString();
final AtomicReference<String> dnHolder = new AtomicReference<>("none");
final Config config = getConfig(url, context);
final HttpClientConnectionManager conMan = config.getConnectionManager();
final HttpClientBuilder clientBuilder = HttpClientBuilder.create();
clientBuilder.setConnectionManager(conMan);
clientBuilder.setUserAgent(userAgent);
clientBuilder.addInterceptorFirst(new HttpResponseInterceptor() {
@Override
public void process(final HttpResponse response, final HttpContext httpContext) throws HttpException, IOException {
final HttpCoreContext coreContext = HttpCoreContext.adapt(httpContext);
final ManagedHttpClientConnection conn = coreContext.getConnection(ManagedHttpClientConnection.class);
if (!conn.isOpen()) {
return;
}
final SSLSession sslSession = conn.getSSLSession();
if (sslSession != null) {
final Certificate[] certChain = sslSession.getPeerCertificates();
if (certChain == null || certChain.length == 0) {
throw new SSLPeerUnverifiedException("No certificates found");
}
try {
final X509Certificate cert = CertificateUtils.convertAbstractX509Certificate(certChain[0]);
dnHolder.set(cert.getSubjectDN().getName().trim());
} catch (CertificateException e) {
final String msg = "Could not extract subject DN from SSL session peer certificate";
logger.warn(msg);
throw new SSLPeerUnverifiedException(msg);
}
}
}
});
clientBuilder.disableAutomaticRetries();
clientBuilder.disableContentCompression();
final String username = context.getProperty(USERNAME).getValue();
final String password = context.getProperty(PASSWORD).getValue();
// set the credentials if appropriate
if (username != null) {
final CredentialsProvider credentialsProvider = new BasicCredentialsProvider();
if (password == null) {
credentialsProvider.setCredentials(AuthScope.ANY, new UsernamePasswordCredentials(username));
} else {
credentialsProvider.setCredentials(AuthScope.ANY, new UsernamePasswordCredentials(username, password));
}
clientBuilder.setDefaultCredentialsProvider(credentialsProvider);
}
// Set the proxy if specified
if (context.getProperty(PROXY_HOST).isSet() && context.getProperty(PROXY_PORT).isSet()) {
final String host = context.getProperty(PROXY_HOST).getValue();
final int port = context.getProperty(PROXY_PORT).asInteger();
clientBuilder.setProxy(new HttpHost(host, port));
}
client = clientBuilder.build();
// determine whether or not destination accepts flowfile/gzip
destinationAccepts = config.getDestinationAccepts();
if (destinationAccepts == null) {
try {
destinationAccepts = getDestinationAcceptance(sendAsFlowFile, client, url, getLogger(), transactionId);
config.setDestinationAccepts(destinationAccepts);
} catch (final IOException e) {
firstFlowFile = session.penalize(firstFlowFile);
session.transfer(firstFlowFile, REL_FAILURE);
logger.error("Unable to communicate with destination {} to determine whether or not it can accept " + "flowfiles/gzip; routing {} to failure due to {}", new Object[] { url, firstFlowFile, e });
context.yield();
return;
}
}
// then we can get more flowfiles from the session up to MAX_BATCH_SIZE for the same URL
if (sendAsFlowFile && (destinationAccepts.isFlowFileV3Accepted() || destinationAccepts.isFlowFileV2Accepted())) {
toSend.addAll(session.get(new FlowFileFilter() {
@Override
public FlowFileFilterResult filter(FlowFile flowFile) {
// if over MAX_BATCH_SIZE, then stop adding files
if (bytesToSend.get() + flowFile.getSize() > maxBatchBytes) {
return FlowFileFilterResult.REJECT_AND_TERMINATE;
}
// check URL to see if this flowfile can be included in the batch
final String urlToCheck = context.getProperty(URL).evaluateAttributeExpressions(flowFile).getValue();
if (url.equals(urlToCheck)) {
bytesToSend.addAndGet(flowFile.getSize());
return FlowFileFilterResult.ACCEPT_AND_CONTINUE;
} else {
return FlowFileFilterResult.REJECT_AND_CONTINUE;
}
}
}));
}
final HttpPost post = new HttpPost(url);
final DestinationAccepts accepts = destinationAccepts;
final boolean isDestinationLegacyNiFi = accepts.getProtocolVersion() == null;
final EntityTemplate entity = new EntityTemplate(new ContentProducer() {
@Override
public void writeTo(final OutputStream rawOut) throws IOException {
final OutputStream throttled = throttler == null ? rawOut : throttler.newThrottledOutputStream(rawOut);
OutputStream wrappedOut = new BufferedOutputStream(throttled);
if (compressionLevel > 0 && accepts.isGzipAccepted()) {
wrappedOut = new GZIPOutputStream(wrappedOut, compressionLevel);
}
try (final OutputStream out = wrappedOut) {
for (final FlowFile flowFile : toSend) {
session.read(flowFile, new InputStreamCallback() {
@Override
public void process(final InputStream rawIn) throws IOException {
try (final InputStream in = new BufferedInputStream(rawIn)) {
FlowFilePackager packager = null;
if (!sendAsFlowFile) {
packager = null;
} else if (accepts.isFlowFileV3Accepted()) {
packager = new FlowFilePackagerV3();
} else if (accepts.isFlowFileV2Accepted()) {
packager = new FlowFilePackagerV2();
} else if (accepts.isFlowFileV1Accepted()) {
packager = new FlowFilePackagerV1();
}
// formats is acceptable if sending as FlowFile.
if (packager == null) {
StreamUtils.copy(in, out);
} else {
final Map<String, String> flowFileAttributes;
if (isDestinationLegacyNiFi) {
// Old versions of NiFi expect nf.file.name and nf.file.path to indicate filename & path;
// in order to maintain backward compatibility, we copy the filename & path to those attribute keys.
flowFileAttributes = new HashMap<>(flowFile.getAttributes());
flowFileAttributes.put("nf.file.name", flowFile.getAttribute(CoreAttributes.FILENAME.key()));
flowFileAttributes.put("nf.file.path", flowFile.getAttribute(CoreAttributes.PATH.key()));
} else {
flowFileAttributes = flowFile.getAttributes();
}
packager.packageFlowFile(in, out, flowFileAttributes, flowFile.getSize());
}
}
}
});
}
out.flush();
}
}
}) {
@Override
public long getContentLength() {
if (compressionLevel == 0 && !sendAsFlowFile && !context.getProperty(CHUNKED_ENCODING).asBoolean()) {
return toSend.get(0).getSize();
} else {
return -1;
}
}
};
if (context.getProperty(CHUNKED_ENCODING).isSet()) {
entity.setChunked(context.getProperty(CHUNKED_ENCODING).asBoolean());
}
post.setEntity(entity);
post.setConfig(requestConfig);
final String contentType;
if (sendAsFlowFile) {
if (accepts.isFlowFileV3Accepted()) {
contentType = APPLICATION_FLOW_FILE_V3;
} else if (accepts.isFlowFileV2Accepted()) {
contentType = APPLICATION_FLOW_FILE_V2;
} else if (accepts.isFlowFileV1Accepted()) {
contentType = APPLICATION_FLOW_FILE_V1;
} else {
logger.error("Cannot send data to {} because the destination does not accept FlowFiles and this processor is " + "configured to deliver FlowFiles; rolling back session", new Object[] { url });
session.rollback();
context.yield();
IOUtils.closeQuietly(client);
return;
}
} else {
final String contentTypeValue = context.getProperty(CONTENT_TYPE).evaluateAttributeExpressions(toSend.get(0)).getValue();
contentType = StringUtils.isBlank(contentTypeValue) ? DEFAULT_CONTENT_TYPE : contentTypeValue;
}
final String attributeHeaderRegex = context.getProperty(ATTRIBUTES_AS_HEADERS_REGEX).getValue();
if (attributeHeaderRegex != null && !sendAsFlowFile && toSend.size() == 1) {
final Pattern pattern = Pattern.compile(attributeHeaderRegex);
final Map<String, String> attributes = toSend.get(0).getAttributes();
for (final Map.Entry<String, String> entry : attributes.entrySet()) {
final String key = entry.getKey();
if (pattern.matcher(key).matches()) {
post.setHeader(entry.getKey(), entry.getValue());
}
}
}
post.setHeader(CONTENT_TYPE_HEADER, contentType);
post.setHeader(FLOWFILE_CONFIRMATION_HEADER, "true");
post.setHeader(PROTOCOL_VERSION_HEADER, PROTOCOL_VERSION);
post.setHeader(TRANSACTION_ID_HEADER, transactionId);
if (compressionLevel > 0 && accepts.isGzipAccepted()) {
if (sendAsFlowFile) {
post.setHeader(GZIPPED_HEADER, "true");
} else {
post.setHeader(CONTENT_ENCODING_HEADER, CONTENT_ENCODING_GZIP_VALUE);
}
}
// Do the actual POST
final String flowFileDescription = toSend.size() <= 10 ? toSend.toString() : toSend.size() + " FlowFiles";
final String uploadDataRate;
final long uploadMillis;
CloseableHttpResponse response = null;
try {
final StopWatch stopWatch = new StopWatch(true);
response = client.execute(post);
// consume input stream entirely, ignoring its contents. If we
// don't do this, the Connection will not be returned to the pool
EntityUtils.consume(response.getEntity());
stopWatch.stop();
uploadDataRate = stopWatch.calculateDataRate(bytesToSend.get());
uploadMillis = stopWatch.getDuration(TimeUnit.MILLISECONDS);
} catch (final IOException e) {
logger.error("Failed to Post {} due to {}; transferring to failure", new Object[] { flowFileDescription, e });
context.yield();
for (FlowFile flowFile : toSend) {
flowFile = session.penalize(flowFile);
session.transfer(flowFile, REL_FAILURE);
}
return;
} finally {
if (response != null) {
try {
response.close();
} catch (final IOException e) {
getLogger().warn("Failed to close HTTP Response due to {}", new Object[] { e });
}
}
}
// If we get a 'SEE OTHER' status code and an HTTP header that indicates that the intent
// of the Location URI is a flowfile hold, we will store this holdUri. This prevents us
// from posting to some other webservice and then attempting to delete some resource to which
// we are redirected
final int responseCode = response.getStatusLine().getStatusCode();
final String responseReason = response.getStatusLine().getReasonPhrase();
String holdUri = null;
if (responseCode == HttpServletResponse.SC_SEE_OTHER) {
final Header locationUriHeader = response.getFirstHeader(LOCATION_URI_INTENT_NAME);
if (locationUriHeader != null) {
if (LOCATION_URI_INTENT_VALUE.equals(locationUriHeader.getValue())) {
final Header holdUriHeader = response.getFirstHeader(LOCATION_HEADER_NAME);
if (holdUriHeader != null) {
holdUri = holdUriHeader.getValue();
}
}
}
if (holdUri == null) {
for (FlowFile flowFile : toSend) {
flowFile = session.penalize(flowFile);
logger.error("Failed to Post {} to {}: sent content and received status code {}:{} but no Hold URI", new Object[] { flowFile, url, responseCode, responseReason });
session.transfer(flowFile, REL_FAILURE);
}
return;
}
}
if (holdUri == null) {
if (responseCode == HttpServletResponse.SC_SERVICE_UNAVAILABLE) {
for (FlowFile flowFile : toSend) {
flowFile = session.penalize(flowFile);
logger.error("Failed to Post {} to {}: response code was {}:{}; will yield processing, " + "since the destination is temporarily unavailable", new Object[] { flowFile, url, responseCode, responseReason });
session.transfer(flowFile, REL_FAILURE);
}
context.yield();
return;
}
if (responseCode >= 300) {
for (FlowFile flowFile : toSend) {
flowFile = session.penalize(flowFile);
logger.error("Failed to Post {} to {}: response code was {}:{}", new Object[] { flowFile, url, responseCode, responseReason });
session.transfer(flowFile, REL_FAILURE);
}
return;
}
logger.info("Successfully Posted {} to {} in {} at a rate of {}", new Object[] { flowFileDescription, url, FormatUtils.formatMinutesSeconds(uploadMillis, TimeUnit.MILLISECONDS), uploadDataRate });
for (final FlowFile flowFile : toSend) {
session.getProvenanceReporter().send(flowFile, url, "Remote DN=" + dnHolder.get(), uploadMillis, true);
session.transfer(flowFile, REL_SUCCESS);
}
return;
}
//
// the response indicated a Hold URI; delete the Hold.
//
// determine the full URI of the Flow File's Hold; Unfortunately, the responses that are returned have
// changed over the past, so we have to take into account a few different possibilities.
String fullHoldUri = holdUri;
if (holdUri.startsWith("/contentListener")) {
// If the Hold URI that we get starts with /contentListener, it may not really be /contentListener,
// as this really indicates that it should be whatever we posted to -- if posting directly to the
// ListenHTTP component, it will be /contentListener, but if posting to a proxy/load balancer, we may
// be posting to some other URL.
fullHoldUri = url + holdUri.substring(16);
} else if (holdUri.startsWith("/")) {
// URL indicates the full path but not hostname or port; use the same hostname & port that we posted
// to but use the full path indicated by the response.
int firstSlash = url.indexOf("/", 8);
if (firstSlash < 0) {
firstSlash = url.length();
}
final String beforeSlash = url.substring(0, firstSlash);
fullHoldUri = beforeSlash + holdUri;
} else if (!holdUri.startsWith("http")) {
// Absolute URL
fullHoldUri = url + (url.endsWith("/") ? "" : "/") + holdUri;
}
final HttpDelete delete = new HttpDelete(fullHoldUri);
delete.setHeader(TRANSACTION_ID_HEADER, transactionId);
while (true) {
try {
final HttpResponse holdResponse = client.execute(delete);
EntityUtils.consume(holdResponse.getEntity());
final int holdStatusCode = holdResponse.getStatusLine().getStatusCode();
final String holdReason = holdResponse.getStatusLine().getReasonPhrase();
if (holdStatusCode >= 300) {
logger.error("Failed to delete Hold that destination placed on {}: got response code {}:{}; routing to failure", new Object[] { flowFileDescription, holdStatusCode, holdReason });
for (FlowFile flowFile : toSend) {
flowFile = session.penalize(flowFile);
session.transfer(flowFile, REL_FAILURE);
}
return;
}
logger.info("Successfully Posted {} to {} in {} milliseconds at a rate of {}", new Object[] { flowFileDescription, url, uploadMillis, uploadDataRate });
for (final FlowFile flowFile : toSend) {
session.getProvenanceReporter().send(flowFile, url);
session.transfer(flowFile, REL_SUCCESS);
}
return;
} catch (final IOException e) {
logger.warn("Failed to delete Hold that destination placed on {} due to {}", new Object[] { flowFileDescription, e });
}
if (!isScheduled()) {
context.yield();
logger.warn("Failed to delete Hold that destination placed on {}; Processor has been stopped so routing FlowFile(s) to failure", new Object[] { flowFileDescription });
for (FlowFile flowFile : toSend) {
flowFile = session.penalize(flowFile);
session.transfer(flowFile, REL_FAILURE);
}
return;
}
}
}
Aggregations