use of org.apache.nifi.stream.io.GZIPOutputStream in project nifi by apache.
the class CompressableRecordWriter method resetWriteStream.
/**
* Resets the streams to prepare for a new block
*
* @param eventId the first id that will be written to the new block
* @throws IOException if unable to flush/close the current streams properly
*/
protected void resetWriteStream(final Long eventId) throws IOException {
try {
if (out != null) {
out.flush();
}
final long byteOffset = (byteCountingOut == null) ? rawOutStream.getBytesWritten() : byteCountingOut.getBytesWritten();
final TocWriter tocWriter = getTocWriter();
if (compressed) {
// We don't have to check if the writer is dirty because we will have already checked before calling this method.
if (out != null) {
out.close();
}
if (tocWriter != null && eventId != null) {
tocWriter.addBlockOffset(rawOutStream.getBytesWritten(), eventId);
}
final OutputStream writableStream = new BufferedOutputStream(new GZIPOutputStream(new NonCloseableOutputStream(rawOutStream), 1), 65536);
this.byteCountingOut = new ByteCountingOutputStream(writableStream, byteOffset);
} else {
if (tocWriter != null && eventId != null) {
tocWriter.addBlockOffset(rawOutStream.getBytesWritten(), eventId);
}
this.byteCountingOut = rawOutStream;
}
this.out = new DataOutputStream(byteCountingOut);
resetDirtyFlag();
} catch (final IOException ioe) {
markDirty();
throw ioe;
}
}
use of org.apache.nifi.stream.io.GZIPOutputStream in project nifi by apache.
the class EventFileCompressor method compress.
public static void compress(final File input, final TocReader tocReader, final File output, final TocWriter tocWriter) throws IOException {
try (final InputStream fis = new FileInputStream(input);
final OutputStream fos = new FileOutputStream(output);
final ByteCountingOutputStream byteCountingOut = new ByteCountingOutputStream(fos)) {
int blockIndex = 0;
while (true) {
// Determine the min and max byte ranges for the current block.
final long blockStart = tocReader.getBlockOffset(blockIndex);
if (blockStart == -1) {
break;
}
long blockEnd = tocReader.getBlockOffset(blockIndex + 1);
if (blockEnd < 0) {
blockEnd = input.length();
}
final long firstEventId = tocReader.getFirstEventIdForBlock(blockIndex);
final long blockStartOffset = byteCountingOut.getBytesWritten();
try (final OutputStream ncos = new NonCloseableOutputStream(byteCountingOut);
final OutputStream gzipOut = new GZIPOutputStream(ncos, 1)) {
StreamUtils.copy(fis, gzipOut, blockEnd - blockStart);
}
tocWriter.addBlockOffset(blockStartOffset, firstEventId);
blockIndex++;
}
}
// Close the TOC Reader and TOC Writer
CloseableUtil.closeQuietly(tocReader, tocWriter);
}
use of org.apache.nifi.stream.io.GZIPOutputStream in project nifi by apache.
the class PostHTTP method onTrigger.
@Override
public void onTrigger(final ProcessContext context, final ProcessSession session) {
FlowFile firstFlowFile = session.get();
if (firstFlowFile == null) {
return;
}
final ComponentLog logger = getLogger();
final String url = context.getProperty(URL).evaluateAttributeExpressions(firstFlowFile).getValue();
try {
new java.net.URL(url);
} catch (final MalformedURLException e) {
logger.error("After substituting attribute values for {}, URL is {}; this is not a valid URL, so routing to failure", new Object[] { firstFlowFile, url });
firstFlowFile = session.penalize(firstFlowFile);
session.transfer(firstFlowFile, REL_FAILURE);
return;
}
final List<FlowFile> toSend = new ArrayList<>();
toSend.add(firstFlowFile);
final boolean sendAsFlowFile = context.getProperty(SEND_AS_FLOWFILE).asBoolean();
final int compressionLevel = context.getProperty(COMPRESSION_LEVEL).asInteger();
final String userAgent = context.getProperty(USER_AGENT).getValue();
final RequestConfig.Builder requestConfigBuilder = RequestConfig.custom();
requestConfigBuilder.setConnectionRequestTimeout(context.getProperty(DATA_TIMEOUT).asTimePeriod(TimeUnit.MILLISECONDS).intValue());
requestConfigBuilder.setConnectTimeout(context.getProperty(CONNECTION_TIMEOUT).asTimePeriod(TimeUnit.MILLISECONDS).intValue());
requestConfigBuilder.setRedirectsEnabled(false);
requestConfigBuilder.setSocketTimeout(context.getProperty(DATA_TIMEOUT).asTimePeriod(TimeUnit.MILLISECONDS).intValue());
final RequestConfig requestConfig = requestConfigBuilder.build();
final StreamThrottler throttler = throttlerRef.get();
final Double maxBatchBytes = context.getProperty(MAX_BATCH_SIZE).asDataSize(DataUnit.B);
final AtomicLong bytesToSend = new AtomicLong(firstFlowFile.getSize());
DestinationAccepts destinationAccepts = null;
CloseableHttpClient client = null;
final String transactionId = UUID.randomUUID().toString();
final AtomicReference<String> dnHolder = new AtomicReference<>("none");
final Config config = getConfig(url, context);
final HttpClientConnectionManager conMan = config.getConnectionManager();
final HttpClientBuilder clientBuilder = HttpClientBuilder.create();
clientBuilder.setConnectionManager(conMan);
clientBuilder.setUserAgent(userAgent);
clientBuilder.addInterceptorFirst(new HttpResponseInterceptor() {
@Override
public void process(final HttpResponse response, final HttpContext httpContext) throws HttpException, IOException {
final HttpCoreContext coreContext = HttpCoreContext.adapt(httpContext);
final ManagedHttpClientConnection conn = coreContext.getConnection(ManagedHttpClientConnection.class);
if (!conn.isOpen()) {
return;
}
final SSLSession sslSession = conn.getSSLSession();
if (sslSession != null) {
final Certificate[] certChain = sslSession.getPeerCertificates();
if (certChain == null || certChain.length == 0) {
throw new SSLPeerUnverifiedException("No certificates found");
}
try {
final X509Certificate cert = CertificateUtils.convertAbstractX509Certificate(certChain[0]);
dnHolder.set(cert.getSubjectDN().getName().trim());
} catch (CertificateException e) {
final String msg = "Could not extract subject DN from SSL session peer certificate";
logger.warn(msg);
throw new SSLPeerUnverifiedException(msg);
}
}
}
});
clientBuilder.disableAutomaticRetries();
clientBuilder.disableContentCompression();
final String username = context.getProperty(USERNAME).getValue();
final String password = context.getProperty(PASSWORD).getValue();
// set the credentials if appropriate
if (username != null) {
final CredentialsProvider credentialsProvider = new BasicCredentialsProvider();
if (password == null) {
credentialsProvider.setCredentials(AuthScope.ANY, new UsernamePasswordCredentials(username));
} else {
credentialsProvider.setCredentials(AuthScope.ANY, new UsernamePasswordCredentials(username, password));
}
clientBuilder.setDefaultCredentialsProvider(credentialsProvider);
}
// Set the proxy if specified
if (context.getProperty(PROXY_HOST).isSet() && context.getProperty(PROXY_PORT).isSet()) {
final String host = context.getProperty(PROXY_HOST).getValue();
final int port = context.getProperty(PROXY_PORT).asInteger();
clientBuilder.setProxy(new HttpHost(host, port));
}
client = clientBuilder.build();
// determine whether or not destination accepts flowfile/gzip
destinationAccepts = config.getDestinationAccepts();
if (destinationAccepts == null) {
try {
destinationAccepts = getDestinationAcceptance(sendAsFlowFile, client, url, getLogger(), transactionId);
config.setDestinationAccepts(destinationAccepts);
} catch (final IOException e) {
firstFlowFile = session.penalize(firstFlowFile);
session.transfer(firstFlowFile, REL_FAILURE);
logger.error("Unable to communicate with destination {} to determine whether or not it can accept " + "flowfiles/gzip; routing {} to failure due to {}", new Object[] { url, firstFlowFile, e });
context.yield();
return;
}
}
// then we can get more flowfiles from the session up to MAX_BATCH_SIZE for the same URL
if (sendAsFlowFile && (destinationAccepts.isFlowFileV3Accepted() || destinationAccepts.isFlowFileV2Accepted())) {
toSend.addAll(session.get(new FlowFileFilter() {
@Override
public FlowFileFilterResult filter(FlowFile flowFile) {
// if over MAX_BATCH_SIZE, then stop adding files
if (bytesToSend.get() + flowFile.getSize() > maxBatchBytes) {
return FlowFileFilterResult.REJECT_AND_TERMINATE;
}
// check URL to see if this flowfile can be included in the batch
final String urlToCheck = context.getProperty(URL).evaluateAttributeExpressions(flowFile).getValue();
if (url.equals(urlToCheck)) {
bytesToSend.addAndGet(flowFile.getSize());
return FlowFileFilterResult.ACCEPT_AND_CONTINUE;
} else {
return FlowFileFilterResult.REJECT_AND_CONTINUE;
}
}
}));
}
final HttpPost post = new HttpPost(url);
final DestinationAccepts accepts = destinationAccepts;
final boolean isDestinationLegacyNiFi = accepts.getProtocolVersion() == null;
final EntityTemplate entity = new EntityTemplate(new ContentProducer() {
@Override
public void writeTo(final OutputStream rawOut) throws IOException {
final OutputStream throttled = throttler == null ? rawOut : throttler.newThrottledOutputStream(rawOut);
OutputStream wrappedOut = new BufferedOutputStream(throttled);
if (compressionLevel > 0 && accepts.isGzipAccepted()) {
wrappedOut = new GZIPOutputStream(wrappedOut, compressionLevel);
}
try (final OutputStream out = wrappedOut) {
for (final FlowFile flowFile : toSend) {
session.read(flowFile, new InputStreamCallback() {
@Override
public void process(final InputStream rawIn) throws IOException {
try (final InputStream in = new BufferedInputStream(rawIn)) {
FlowFilePackager packager = null;
if (!sendAsFlowFile) {
packager = null;
} else if (accepts.isFlowFileV3Accepted()) {
packager = new FlowFilePackagerV3();
} else if (accepts.isFlowFileV2Accepted()) {
packager = new FlowFilePackagerV2();
} else if (accepts.isFlowFileV1Accepted()) {
packager = new FlowFilePackagerV1();
}
// formats is acceptable if sending as FlowFile.
if (packager == null) {
StreamUtils.copy(in, out);
} else {
final Map<String, String> flowFileAttributes;
if (isDestinationLegacyNiFi) {
// Old versions of NiFi expect nf.file.name and nf.file.path to indicate filename & path;
// in order to maintain backward compatibility, we copy the filename & path to those attribute keys.
flowFileAttributes = new HashMap<>(flowFile.getAttributes());
flowFileAttributes.put("nf.file.name", flowFile.getAttribute(CoreAttributes.FILENAME.key()));
flowFileAttributes.put("nf.file.path", flowFile.getAttribute(CoreAttributes.PATH.key()));
} else {
flowFileAttributes = flowFile.getAttributes();
}
packager.packageFlowFile(in, out, flowFileAttributes, flowFile.getSize());
}
}
}
});
}
out.flush();
}
}
}) {
@Override
public long getContentLength() {
if (compressionLevel == 0 && !sendAsFlowFile && !context.getProperty(CHUNKED_ENCODING).asBoolean()) {
return toSend.get(0).getSize();
} else {
return -1;
}
}
};
if (context.getProperty(CHUNKED_ENCODING).isSet()) {
entity.setChunked(context.getProperty(CHUNKED_ENCODING).asBoolean());
}
post.setEntity(entity);
post.setConfig(requestConfig);
final String contentType;
if (sendAsFlowFile) {
if (accepts.isFlowFileV3Accepted()) {
contentType = APPLICATION_FLOW_FILE_V3;
} else if (accepts.isFlowFileV2Accepted()) {
contentType = APPLICATION_FLOW_FILE_V2;
} else if (accepts.isFlowFileV1Accepted()) {
contentType = APPLICATION_FLOW_FILE_V1;
} else {
logger.error("Cannot send data to {} because the destination does not accept FlowFiles and this processor is " + "configured to deliver FlowFiles; rolling back session", new Object[] { url });
session.rollback();
context.yield();
IOUtils.closeQuietly(client);
return;
}
} else {
final String contentTypeValue = context.getProperty(CONTENT_TYPE).evaluateAttributeExpressions(toSend.get(0)).getValue();
contentType = StringUtils.isBlank(contentTypeValue) ? DEFAULT_CONTENT_TYPE : contentTypeValue;
}
final String attributeHeaderRegex = context.getProperty(ATTRIBUTES_AS_HEADERS_REGEX).getValue();
if (attributeHeaderRegex != null && !sendAsFlowFile && toSend.size() == 1) {
final Pattern pattern = Pattern.compile(attributeHeaderRegex);
final Map<String, String> attributes = toSend.get(0).getAttributes();
for (final Map.Entry<String, String> entry : attributes.entrySet()) {
final String key = entry.getKey();
if (pattern.matcher(key).matches()) {
post.setHeader(entry.getKey(), entry.getValue());
}
}
}
post.setHeader(CONTENT_TYPE_HEADER, contentType);
post.setHeader(FLOWFILE_CONFIRMATION_HEADER, "true");
post.setHeader(PROTOCOL_VERSION_HEADER, PROTOCOL_VERSION);
post.setHeader(TRANSACTION_ID_HEADER, transactionId);
if (compressionLevel > 0 && accepts.isGzipAccepted()) {
if (sendAsFlowFile) {
post.setHeader(GZIPPED_HEADER, "true");
} else {
post.setHeader(CONTENT_ENCODING_HEADER, CONTENT_ENCODING_GZIP_VALUE);
}
}
// Do the actual POST
final String flowFileDescription = toSend.size() <= 10 ? toSend.toString() : toSend.size() + " FlowFiles";
final String uploadDataRate;
final long uploadMillis;
CloseableHttpResponse response = null;
try {
final StopWatch stopWatch = new StopWatch(true);
response = client.execute(post);
// consume input stream entirely, ignoring its contents. If we
// don't do this, the Connection will not be returned to the pool
EntityUtils.consume(response.getEntity());
stopWatch.stop();
uploadDataRate = stopWatch.calculateDataRate(bytesToSend.get());
uploadMillis = stopWatch.getDuration(TimeUnit.MILLISECONDS);
} catch (final IOException e) {
logger.error("Failed to Post {} due to {}; transferring to failure", new Object[] { flowFileDescription, e });
context.yield();
for (FlowFile flowFile : toSend) {
flowFile = session.penalize(flowFile);
session.transfer(flowFile, REL_FAILURE);
}
return;
} finally {
if (response != null) {
try {
response.close();
} catch (final IOException e) {
getLogger().warn("Failed to close HTTP Response due to {}", new Object[] { e });
}
}
}
// If we get a 'SEE OTHER' status code and an HTTP header that indicates that the intent
// of the Location URI is a flowfile hold, we will store this holdUri. This prevents us
// from posting to some other webservice and then attempting to delete some resource to which
// we are redirected
final int responseCode = response.getStatusLine().getStatusCode();
final String responseReason = response.getStatusLine().getReasonPhrase();
String holdUri = null;
if (responseCode == HttpServletResponse.SC_SEE_OTHER) {
final Header locationUriHeader = response.getFirstHeader(LOCATION_URI_INTENT_NAME);
if (locationUriHeader != null) {
if (LOCATION_URI_INTENT_VALUE.equals(locationUriHeader.getValue())) {
final Header holdUriHeader = response.getFirstHeader(LOCATION_HEADER_NAME);
if (holdUriHeader != null) {
holdUri = holdUriHeader.getValue();
}
}
}
if (holdUri == null) {
for (FlowFile flowFile : toSend) {
flowFile = session.penalize(flowFile);
logger.error("Failed to Post {} to {}: sent content and received status code {}:{} but no Hold URI", new Object[] { flowFile, url, responseCode, responseReason });
session.transfer(flowFile, REL_FAILURE);
}
return;
}
}
if (holdUri == null) {
if (responseCode == HttpServletResponse.SC_SERVICE_UNAVAILABLE) {
for (FlowFile flowFile : toSend) {
flowFile = session.penalize(flowFile);
logger.error("Failed to Post {} to {}: response code was {}:{}; will yield processing, " + "since the destination is temporarily unavailable", new Object[] { flowFile, url, responseCode, responseReason });
session.transfer(flowFile, REL_FAILURE);
}
context.yield();
return;
}
if (responseCode >= 300) {
for (FlowFile flowFile : toSend) {
flowFile = session.penalize(flowFile);
logger.error("Failed to Post {} to {}: response code was {}:{}", new Object[] { flowFile, url, responseCode, responseReason });
session.transfer(flowFile, REL_FAILURE);
}
return;
}
logger.info("Successfully Posted {} to {} in {} at a rate of {}", new Object[] { flowFileDescription, url, FormatUtils.formatMinutesSeconds(uploadMillis, TimeUnit.MILLISECONDS), uploadDataRate });
for (final FlowFile flowFile : toSend) {
session.getProvenanceReporter().send(flowFile, url, "Remote DN=" + dnHolder.get(), uploadMillis, true);
session.transfer(flowFile, REL_SUCCESS);
}
return;
}
//
// the response indicated a Hold URI; delete the Hold.
//
// determine the full URI of the Flow File's Hold; Unfortunately, the responses that are returned have
// changed over the past, so we have to take into account a few different possibilities.
String fullHoldUri = holdUri;
if (holdUri.startsWith("/contentListener")) {
// If the Hold URI that we get starts with /contentListener, it may not really be /contentListener,
// as this really indicates that it should be whatever we posted to -- if posting directly to the
// ListenHTTP component, it will be /contentListener, but if posting to a proxy/load balancer, we may
// be posting to some other URL.
fullHoldUri = url + holdUri.substring(16);
} else if (holdUri.startsWith("/")) {
// URL indicates the full path but not hostname or port; use the same hostname & port that we posted
// to but use the full path indicated by the response.
int firstSlash = url.indexOf("/", 8);
if (firstSlash < 0) {
firstSlash = url.length();
}
final String beforeSlash = url.substring(0, firstSlash);
fullHoldUri = beforeSlash + holdUri;
} else if (!holdUri.startsWith("http")) {
// Absolute URL
fullHoldUri = url + (url.endsWith("/") ? "" : "/") + holdUri;
}
final HttpDelete delete = new HttpDelete(fullHoldUri);
delete.setHeader(TRANSACTION_ID_HEADER, transactionId);
while (true) {
try {
final HttpResponse holdResponse = client.execute(delete);
EntityUtils.consume(holdResponse.getEntity());
final int holdStatusCode = holdResponse.getStatusLine().getStatusCode();
final String holdReason = holdResponse.getStatusLine().getReasonPhrase();
if (holdStatusCode >= 300) {
logger.error("Failed to delete Hold that destination placed on {}: got response code {}:{}; routing to failure", new Object[] { flowFileDescription, holdStatusCode, holdReason });
for (FlowFile flowFile : toSend) {
flowFile = session.penalize(flowFile);
session.transfer(flowFile, REL_FAILURE);
}
return;
}
logger.info("Successfully Posted {} to {} in {} milliseconds at a rate of {}", new Object[] { flowFileDescription, url, uploadMillis, uploadDataRate });
for (final FlowFile flowFile : toSend) {
session.getProvenanceReporter().send(flowFile, url);
session.transfer(flowFile, REL_SUCCESS);
}
return;
} catch (final IOException e) {
logger.warn("Failed to delete Hold that destination placed on {} due to {}", new Object[] { flowFileDescription, e });
}
if (!isScheduled()) {
context.yield();
logger.warn("Failed to delete Hold that destination placed on {}; Processor has been stopped so routing FlowFile(s) to failure", new Object[] { flowFileDescription });
for (FlowFile flowFile : toSend) {
flowFile = session.penalize(flowFile);
session.transfer(flowFile, REL_FAILURE);
}
return;
}
}
}
use of org.apache.nifi.stream.io.GZIPOutputStream in project nifi by apache.
the class CompressContent method onTrigger.
@Override
public void onTrigger(final ProcessContext context, final ProcessSession session) {
FlowFile flowFile = session.get();
if (flowFile == null) {
return;
}
final ComponentLog logger = getLogger();
final long sizeBeforeCompression = flowFile.getSize();
final String compressionMode = context.getProperty(MODE).getValue();
String compressionFormatValue = context.getProperty(COMPRESSION_FORMAT).getValue();
if (compressionFormatValue.equals(COMPRESSION_FORMAT_ATTRIBUTE)) {
final String mimeType = flowFile.getAttribute(CoreAttributes.MIME_TYPE.key());
if (mimeType == null) {
logger.error("No {} attribute exists for {}; routing to failure", new Object[] { CoreAttributes.MIME_TYPE.key(), flowFile });
session.transfer(flowFile, REL_FAILURE);
return;
}
compressionFormatValue = compressionFormatMimeTypeMap.get(mimeType);
if (compressionFormatValue == null) {
logger.info("Mime Type of {} is '{}', which does not indicate a supported Compression Format; routing to success without decompressing", new Object[] { flowFile, mimeType });
session.transfer(flowFile, REL_SUCCESS);
return;
}
}
final String compressionFormat = compressionFormatValue;
final AtomicReference<String> mimeTypeRef = new AtomicReference<>(null);
final StopWatch stopWatch = new StopWatch(true);
final String fileExtension;
switch(compressionFormat.toLowerCase()) {
case COMPRESSION_FORMAT_GZIP:
fileExtension = ".gz";
break;
case COMPRESSION_FORMAT_LZMA:
fileExtension = ".lzma";
break;
case COMPRESSION_FORMAT_XZ_LZMA2:
fileExtension = ".xz";
break;
case COMPRESSION_FORMAT_BZIP2:
fileExtension = ".bz2";
break;
case COMPRESSION_FORMAT_SNAPPY:
fileExtension = ".snappy";
break;
case COMPRESSION_FORMAT_SNAPPY_FRAMED:
fileExtension = ".sz";
break;
default:
fileExtension = "";
break;
}
try {
flowFile = session.write(flowFile, new StreamCallback() {
@Override
public void process(final InputStream rawIn, final OutputStream rawOut) throws IOException {
final OutputStream compressionOut;
final InputStream compressionIn;
final OutputStream bufferedOut = new BufferedOutputStream(rawOut, 65536);
final InputStream bufferedIn = new BufferedInputStream(rawIn, 65536);
try {
if (MODE_COMPRESS.equalsIgnoreCase(compressionMode)) {
compressionIn = bufferedIn;
switch(compressionFormat.toLowerCase()) {
case COMPRESSION_FORMAT_GZIP:
final int compressionLevel = context.getProperty(COMPRESSION_LEVEL).asInteger();
compressionOut = new GZIPOutputStream(bufferedOut, compressionLevel);
mimeTypeRef.set("application/gzip");
break;
case COMPRESSION_FORMAT_LZMA:
compressionOut = new LzmaOutputStream.Builder(bufferedOut).build();
mimeTypeRef.set("application/x-lzma");
break;
case COMPRESSION_FORMAT_XZ_LZMA2:
compressionOut = new XZOutputStream(bufferedOut, new LZMA2Options());
mimeTypeRef.set("application/x-xz");
break;
case COMPRESSION_FORMAT_SNAPPY:
compressionOut = new SnappyOutputStream(bufferedOut);
mimeTypeRef.set("application/x-snappy");
break;
case COMPRESSION_FORMAT_SNAPPY_FRAMED:
compressionOut = new SnappyFramedOutputStream(bufferedOut);
mimeTypeRef.set("application/x-snappy-framed");
break;
case COMPRESSION_FORMAT_BZIP2:
default:
mimeTypeRef.set("application/x-bzip2");
compressionOut = new CompressorStreamFactory().createCompressorOutputStream(compressionFormat.toLowerCase(), bufferedOut);
break;
}
} else {
compressionOut = bufferedOut;
switch(compressionFormat.toLowerCase()) {
case COMPRESSION_FORMAT_LZMA:
compressionIn = new LzmaInputStream(bufferedIn, new Decoder());
break;
case COMPRESSION_FORMAT_XZ_LZMA2:
compressionIn = new XZInputStream(bufferedIn);
break;
case COMPRESSION_FORMAT_BZIP2:
// need this two-arg constructor to support concatenated streams
compressionIn = new BZip2CompressorInputStream(bufferedIn, true);
break;
case COMPRESSION_FORMAT_GZIP:
compressionIn = new GzipCompressorInputStream(bufferedIn, true);
break;
case COMPRESSION_FORMAT_SNAPPY:
compressionIn = new SnappyInputStream(bufferedIn);
break;
case COMPRESSION_FORMAT_SNAPPY_FRAMED:
compressionIn = new SnappyFramedInputStream(bufferedIn);
break;
default:
compressionIn = new CompressorStreamFactory().createCompressorInputStream(compressionFormat.toLowerCase(), bufferedIn);
}
}
} catch (final Exception e) {
closeQuietly(bufferedOut);
throw new IOException(e);
}
try (final InputStream in = compressionIn;
final OutputStream out = compressionOut) {
final byte[] buffer = new byte[8192];
int len;
while ((len = in.read(buffer)) > 0) {
out.write(buffer, 0, len);
}
out.flush();
}
}
});
stopWatch.stop();
final long sizeAfterCompression = flowFile.getSize();
if (MODE_DECOMPRESS.equalsIgnoreCase(compressionMode)) {
flowFile = session.removeAttribute(flowFile, CoreAttributes.MIME_TYPE.key());
if (context.getProperty(UPDATE_FILENAME).asBoolean()) {
final String filename = flowFile.getAttribute(CoreAttributes.FILENAME.key());
if (filename.toLowerCase().endsWith(fileExtension)) {
flowFile = session.putAttribute(flowFile, CoreAttributes.FILENAME.key(), filename.substring(0, filename.length() - fileExtension.length()));
}
}
} else {
flowFile = session.putAttribute(flowFile, CoreAttributes.MIME_TYPE.key(), mimeTypeRef.get());
if (context.getProperty(UPDATE_FILENAME).asBoolean()) {
final String filename = flowFile.getAttribute(CoreAttributes.FILENAME.key());
flowFile = session.putAttribute(flowFile, CoreAttributes.FILENAME.key(), filename + fileExtension);
}
}
logger.info("Successfully {}ed {} using {} compression format; size changed from {} to {} bytes", new Object[] { compressionMode.toLowerCase(), flowFile, compressionFormat, sizeBeforeCompression, sizeAfterCompression });
session.getProvenanceReporter().modifyContent(flowFile, stopWatch.getDuration(TimeUnit.MILLISECONDS));
session.transfer(flowFile, REL_SUCCESS);
} catch (final ProcessException e) {
logger.error("Unable to {} {} using {} compression format due to {}; routing to failure", new Object[] { compressionMode.toLowerCase(), flowFile, compressionFormat, e });
session.transfer(flowFile, REL_FAILURE);
}
}
Aggregations