use of org.apache.nifi.stream.io.ByteArrayInputStream in project nifi by apache.
the class StandardHttpFlowFileServerProtocol method readTransactionResponse.
@Override
protected Response readTransactionResponse(boolean isTransfer, CommunicationsSession commsSession) throws IOException {
// Returns Response based on current status.
HttpServerCommunicationsSession commSession = (HttpServerCommunicationsSession) commsSession;
ByteArrayOutputStream bos = new ByteArrayOutputStream();
Transaction.TransactionState currentStatus = commSession.getStatus();
if (isTransfer) {
switch(currentStatus) {
case DATA_EXCHANGED:
String clientChecksum = commSession.getChecksum();
logger.debug("readTransactionResponse. clientChecksum={}", clientChecksum);
ResponseCode.CONFIRM_TRANSACTION.writeResponse(new DataOutputStream(bos), clientChecksum);
break;
case TRANSACTION_CONFIRMED:
logger.debug("readTransactionResponse. finishing.");
ResponseCode.TRANSACTION_FINISHED.writeResponse(new DataOutputStream(bos));
break;
}
} else {
switch(currentStatus) {
case TRANSACTION_STARTED:
logger.debug("readTransactionResponse. returning CONTINUE_TRANSACTION.");
// We don't know if there's more data to receive, so just continue it.
ResponseCode.CONTINUE_TRANSACTION.writeResponse(new DataOutputStream(bos));
break;
case TRANSACTION_CONFIRMED:
// Checksum was successfully validated at client side, or BAD_CHECKSUM is returned.
ResponseCode responseCode = commSession.getResponseCode();
logger.debug("readTransactionResponse. responseCode={}", responseCode);
if (responseCode.containsMessage()) {
responseCode.writeResponse(new DataOutputStream(bos), "");
} else {
responseCode.writeResponse(new DataOutputStream(bos));
}
break;
}
}
ByteArrayInputStream bis = new ByteArrayInputStream(bos.toByteArray());
return Response.read(new DataInputStream(bis));
}
use of org.apache.nifi.stream.io.ByteArrayInputStream in project nifi by apache.
the class BeatsDecoder method processPAYLOAD.
/**
* Process the outer PAYLOAD byte by byte. Once data is read state is set to COMPLETE so that the data payload
* can be processed fully using {@link #splitCompressedFrames(byte[])}
*/
private void processPAYLOAD(final byte b) {
currBytes.write(b);
switch(decodedFrameType) {
case // 'W'
FRAME_WINDOWSIZE:
if (currBytes.size() < WINDOWSIZE_LENGTH) {
logger.trace("Beats currBytes contents are {}", new Object[] { currBytes.toString() });
break;
} else if (currBytes.size() == WINDOWSIZE_LENGTH) {
frameBuilder.dataSize = ByteBuffer.wrap(java.util.Arrays.copyOfRange(currBytes.toByteArray(), 2, 6)).getInt() & 0x00000000ffffffffL;
logger.debug("Data size is {}", new Object[] { frameBuilder.dataSize });
// Sets payload to empty as frame contains no data
frameBuilder.payload(new byte[] {});
currBytes.reset();
currState = BeatsState.COMPLETE;
windowSize = frameBuilder.dataSize;
break;
} else {
// Should never be here to be honest...
logger.debug("Saw a packet I should not have seen. Packet contents were {}", new Object[] { currBytes.toString() });
break;
}
case // 'C'
FRAME_COMPRESSED:
if (currBytes.size() < COMPRESSED_MIN_LENGTH) {
if (logger.isTraceEnabled()) {
logger.trace("Beats currBytes contents are {}", new Object[] { currBytes.toString() });
}
break;
} else if (currBytes.size() >= COMPRESSED_MIN_LENGTH) {
// If data contains more thant the minimum data size
frameBuilder.dataSize = ByteBuffer.wrap(java.util.Arrays.copyOfRange(currBytes.toByteArray(), 2, 6)).getInt() & 0x00000000ffffffffL;
if (currBytes.size() - 6 == frameBuilder.dataSize) {
try {
byte[] buf = java.util.Arrays.copyOfRange(currBytes.toByteArray(), 6, currBytes.size());
InputStream in = new InflaterInputStream(new ByteArrayInputStream(buf));
ByteArrayOutputStream out = new ByteArrayOutputStream();
byte[] buffer = new byte[1024];
int len;
while ((len = in.read(buffer)) > 0) {
out.write(buffer, 0, len);
}
in.close();
out.close();
unprocessedData = out.toByteArray();
// buf is no longer needed
buf = null;
logger.debug("Finished decompressing data");
// Decompression is complete, we should be able to proceed with resetting currBytes and curSrtate and iterating them
// as type 'D' frames
frameBuilder.dataSize(unprocessedData.length);
currState = BeatsState.COMPLETE;
} catch (IOException e) {
throw new BeatsFrameException("Error decompressing frame: " + e.getMessage(), e);
}
}
break;
// If currentByte.size is not lower than six and also not equal or great than 6...
} else {
// Should never be here to be honest...
if (logger.isDebugEnabled()) {
logger.debug("Received a compressed frame with partial data or invalid content. The packet contents were {}", new Object[] { currBytes.toString() });
}
break;
}
case // 'J́'
FRAME_JSON:
// called
if (currBytes.size() < JSON_MIN_LENGTH) {
if (logger.isTraceEnabled()) {
logger.trace("Beats currBytes contents are {}", new Object[] { currBytes.toString() });
}
break;
} else if (currBytes.size() == JSON_MIN_LENGTH) {
// Read the sequence number from bytes
frameBuilder.seqNumber = (int) (ByteBuffer.wrap(java.util.Arrays.copyOfRange(currBytes.toByteArray(), 2, 6)).getInt() & 0x00000000ffffffffL);
// Read the JSON payload length
frameBuilder.dataSize = ByteBuffer.wrap(java.util.Arrays.copyOfRange(currBytes.toByteArray(), 6, 10)).getInt() & 0x00000000ffffffffL;
} else if (currBytes.size() > JSON_MIN_LENGTH) {
// Wait for payload to be fully read and then complete processing
if (currBytes.size() - 10 == frameBuilder.dataSize) {
// Transfer the current payload so it can be processed by {@link #splitCompressedFrames} method.
frameBuilder.payload = java.util.Arrays.copyOfRange(currBytes.toByteArray(), 10, currBytes.size());
currState = BeatsState.COMPLETE;
}
break;
}
}
}
use of org.apache.nifi.stream.io.ByteArrayInputStream in project nifi by apache.
the class TestSplitAvro method testRecordSplitDatafileOutputWithoutMetadata.
@Test
public void testRecordSplitDatafileOutputWithoutMetadata() throws IOException {
final TestRunner runner = TestRunners.newTestRunner(new SplitAvro());
runner.setProperty(SplitAvro.TRANSFER_METADATA, "false");
runner.enqueue(users.toByteArray());
runner.run();
runner.assertTransferCount(SplitAvro.REL_SPLIT, 100);
runner.assertTransferCount(SplitAvro.REL_ORIGINAL, 1);
runner.assertTransferCount(SplitAvro.REL_FAILURE, 0);
runner.getFlowFilesForRelationship(SplitAvro.REL_ORIGINAL).get(0).assertAttributeEquals(FRAGMENT_COUNT.key(), "100");
final List<MockFlowFile> flowFiles = runner.getFlowFilesForRelationship(SplitAvro.REL_SPLIT);
checkDataFileSplitSize(flowFiles, 1, false);
for (final MockFlowFile flowFile : flowFiles) {
try (final ByteArrayInputStream in = new ByteArrayInputStream(flowFile.toByteArray());
final DataFileStream<GenericRecord> reader = new DataFileStream<>(in, new GenericDatumReader<GenericRecord>())) {
Assert.assertFalse(reader.getMetaKeys().contains(META_KEY1));
Assert.assertFalse(reader.getMetaKeys().contains(META_KEY2));
Assert.assertFalse(reader.getMetaKeys().contains(META_KEY3));
}
}
}
use of org.apache.nifi.stream.io.ByteArrayInputStream in project nifi by apache.
the class TestSplitAvro method checkBareRecordsSplitSize.
private void checkBareRecordsSplitSize(final List<MockFlowFile> flowFiles, final int expectedRecordsPerSplit, final boolean checkMetadata) throws IOException {
for (final MockFlowFile flowFile : flowFiles) {
try (final ByteArrayInputStream in = new ByteArrayInputStream(flowFile.toByteArray())) {
final DatumReader<GenericRecord> reader = new GenericDatumReader<>(schema);
final Decoder decoder = DecoderFactory.get().binaryDecoder(in, null);
int count = 0;
GenericRecord record = reader.read(null, decoder);
try {
while (record != null) {
Assert.assertNotNull(record.get("name"));
Assert.assertNotNull(record.get("favorite_number"));
count++;
record = reader.read(record, decoder);
}
} catch (EOFException eof) {
// expected
}
assertEquals(expectedRecordsPerSplit, count);
}
if (checkMetadata) {
Assert.assertTrue(flowFile.getAttributes().containsKey(META_KEY1));
Assert.assertTrue(flowFile.getAttributes().containsKey(META_KEY2));
Assert.assertTrue(flowFile.getAttributes().containsKey(META_KEY3));
}
}
}
use of org.apache.nifi.stream.io.ByteArrayInputStream in project nifi by apache.
the class TestSplitAvro method checkDataFileTotalSize.
private void checkDataFileTotalSize(List<MockFlowFile> flowFiles, int expectedTotalRecords) throws IOException {
int count = 0;
for (final MockFlowFile flowFile : flowFiles) {
try (final ByteArrayInputStream in = new ByteArrayInputStream(flowFile.toByteArray());
final DataFileStream<GenericRecord> reader = new DataFileStream<>(in, new GenericDatumReader<GenericRecord>())) {
GenericRecord record = null;
while (reader.hasNext()) {
record = reader.next(record);
Assert.assertNotNull(record.get("name"));
Assert.assertNotNull(record.get("favorite_number"));
count++;
}
}
}
assertEquals(expectedTotalRecords, count);
}
Aggregations