use of org.apache.nifi.stream.io.ByteArrayOutputStream in project nifi by apache.
the class DataTransferResource method transferFlowFiles.
@GET
@Consumes(MediaType.WILDCARD)
@Produces(MediaType.APPLICATION_OCTET_STREAM)
@Path("output-ports/{portId}/transactions/{transactionId}/flow-files")
@ApiOperation(value = "Transfer flow files from the output port", response = StreamingOutput.class, authorizations = { @Authorization(value = "Write - /data-transfer/output-ports/{uuid}") })
@ApiResponses(value = { @ApiResponse(code = 200, message = "There is no flow file to return."), @ApiResponse(code = 400, message = "NiFi was unable to complete the request because it was invalid. The request should not be retried without modification."), @ApiResponse(code = 401, message = "Client could not be authenticated."), @ApiResponse(code = 403, message = "Client is not authorized to make this request."), @ApiResponse(code = 404, message = "The specified resource could not be found."), @ApiResponse(code = 409, message = "The request was valid but NiFi was not in the appropriate state to process it. Retrying the same request later may be successful."), @ApiResponse(code = 503, message = "NiFi instance is not ready for serving request, or temporarily overloaded. Retrying the same request later may be successful") })
public Response transferFlowFiles(@ApiParam(value = "The output port id.", required = true) @PathParam("portId") String portId, @PathParam("transactionId") String transactionId, @Context HttpServletRequest req, @Context HttpServletResponse res, @Context ServletContext context, InputStream inputStream) {
// authorize access
serviceFacade.authorizeAccess(lookup -> {
authorizeDataTransfer(lookup, ResourceType.OutputPort, portId);
});
final ValidateRequestResult validationResult = validateResult(req, portId, transactionId);
if (validationResult.errResponse != null) {
return validationResult.errResponse;
}
logger.debug("transferFlowFiles request: portId={}", portId);
// Before opening the real output stream for HTTP response,
// use this temporary output stream to buffer handshake result.
final ByteArrayOutputStream tempBos = new ByteArrayOutputStream();
final Peer peer = constructPeer(req, inputStream, tempBos, portId, transactionId);
final int transportProtocolVersion = validationResult.transportProtocolVersion;
try {
final HttpFlowFileServerProtocol serverProtocol = initiateServerProtocol(req, peer, transportProtocolVersion);
StreamingOutput flowFileContent = new StreamingOutput() {
@Override
public void write(OutputStream outputStream) throws IOException, WebApplicationException {
HttpOutput output = (HttpOutput) peer.getCommunicationsSession().getOutput();
output.setOutputStream(outputStream);
try {
int numOfFlowFiles = serverProtocol.getPort().transferFlowFiles(peer, serverProtocol);
logger.debug("finished transferring flow files, numOfFlowFiles={}", numOfFlowFiles);
if (numOfFlowFiles < 1) {
// There was no flow file to transfer. Throw this exception to stop responding with SEE OTHER.
throw new WebApplicationException(Response.Status.OK);
}
} catch (NotAuthorizedException | BadRequestException | RequestExpiredException e) {
// Handshake is done outside of write() method, so these exception wouldn't be thrown.
throw new IOException("Failed to process the request.", e);
}
}
};
return responseCreator.acceptedResponse(transactionManager, flowFileContent, transportProtocolVersion);
} catch (HandshakeException e) {
return responseCreator.handshakeExceptionResponse(e);
} catch (Exception e) {
return responseCreator.unexpectedErrorResponse(portId, e);
}
}
use of org.apache.nifi.stream.io.ByteArrayOutputStream in project nifi by apache.
the class StandardHttpFlowFileServerProtocol method readTransactionResponse.
@Override
protected Response readTransactionResponse(boolean isTransfer, CommunicationsSession commsSession) throws IOException {
// Returns Response based on current status.
HttpServerCommunicationsSession commSession = (HttpServerCommunicationsSession) commsSession;
ByteArrayOutputStream bos = new ByteArrayOutputStream();
Transaction.TransactionState currentStatus = commSession.getStatus();
if (isTransfer) {
switch(currentStatus) {
case DATA_EXCHANGED:
String clientChecksum = commSession.getChecksum();
logger.debug("readTransactionResponse. clientChecksum={}", clientChecksum);
ResponseCode.CONFIRM_TRANSACTION.writeResponse(new DataOutputStream(bos), clientChecksum);
break;
case TRANSACTION_CONFIRMED:
logger.debug("readTransactionResponse. finishing.");
ResponseCode.TRANSACTION_FINISHED.writeResponse(new DataOutputStream(bos));
break;
}
} else {
switch(currentStatus) {
case TRANSACTION_STARTED:
logger.debug("readTransactionResponse. returning CONTINUE_TRANSACTION.");
// We don't know if there's more data to receive, so just continue it.
ResponseCode.CONTINUE_TRANSACTION.writeResponse(new DataOutputStream(bos));
break;
case TRANSACTION_CONFIRMED:
// Checksum was successfully validated at client side, or BAD_CHECKSUM is returned.
ResponseCode responseCode = commSession.getResponseCode();
logger.debug("readTransactionResponse. responseCode={}", responseCode);
if (responseCode.containsMessage()) {
responseCode.writeResponse(new DataOutputStream(bos), "");
} else {
responseCode.writeResponse(new DataOutputStream(bos));
}
break;
}
}
ByteArrayInputStream bis = new ByteArrayInputStream(bos.toByteArray());
return Response.read(new DataInputStream(bis));
}
use of org.apache.nifi.stream.io.ByteArrayOutputStream in project nifi by apache.
the class QueryCassandraTest method testConvertToAvroStream.
@Test
public void testConvertToAvroStream() throws Exception {
ResultSet rs = CassandraQueryTestUtil.createMockResultSet();
ByteArrayOutputStream baos = new ByteArrayOutputStream();
long numberOfRows = QueryCassandra.convertToAvroStream(rs, baos, 0, null);
assertEquals(2, numberOfRows);
}
use of org.apache.nifi.stream.io.ByteArrayOutputStream in project nifi by apache.
the class BeatsDecoder method processPAYLOAD.
/**
* Process the outer PAYLOAD byte by byte. Once data is read state is set to COMPLETE so that the data payload
* can be processed fully using {@link #splitCompressedFrames(byte[])}
*/
private void processPAYLOAD(final byte b) {
currBytes.write(b);
switch(decodedFrameType) {
case // 'W'
FRAME_WINDOWSIZE:
if (currBytes.size() < WINDOWSIZE_LENGTH) {
logger.trace("Beats currBytes contents are {}", new Object[] { currBytes.toString() });
break;
} else if (currBytes.size() == WINDOWSIZE_LENGTH) {
frameBuilder.dataSize = ByteBuffer.wrap(java.util.Arrays.copyOfRange(currBytes.toByteArray(), 2, 6)).getInt() & 0x00000000ffffffffL;
logger.debug("Data size is {}", new Object[] { frameBuilder.dataSize });
// Sets payload to empty as frame contains no data
frameBuilder.payload(new byte[] {});
currBytes.reset();
currState = BeatsState.COMPLETE;
windowSize = frameBuilder.dataSize;
break;
} else {
// Should never be here to be honest...
logger.debug("Saw a packet I should not have seen. Packet contents were {}", new Object[] { currBytes.toString() });
break;
}
case // 'C'
FRAME_COMPRESSED:
if (currBytes.size() < COMPRESSED_MIN_LENGTH) {
if (logger.isTraceEnabled()) {
logger.trace("Beats currBytes contents are {}", new Object[] { currBytes.toString() });
}
break;
} else if (currBytes.size() >= COMPRESSED_MIN_LENGTH) {
// If data contains more thant the minimum data size
frameBuilder.dataSize = ByteBuffer.wrap(java.util.Arrays.copyOfRange(currBytes.toByteArray(), 2, 6)).getInt() & 0x00000000ffffffffL;
if (currBytes.size() - 6 == frameBuilder.dataSize) {
try {
byte[] buf = java.util.Arrays.copyOfRange(currBytes.toByteArray(), 6, currBytes.size());
InputStream in = new InflaterInputStream(new ByteArrayInputStream(buf));
ByteArrayOutputStream out = new ByteArrayOutputStream();
byte[] buffer = new byte[1024];
int len;
while ((len = in.read(buffer)) > 0) {
out.write(buffer, 0, len);
}
in.close();
out.close();
unprocessedData = out.toByteArray();
// buf is no longer needed
buf = null;
logger.debug("Finished decompressing data");
// Decompression is complete, we should be able to proceed with resetting currBytes and curSrtate and iterating them
// as type 'D' frames
frameBuilder.dataSize(unprocessedData.length);
currState = BeatsState.COMPLETE;
} catch (IOException e) {
throw new BeatsFrameException("Error decompressing frame: " + e.getMessage(), e);
}
}
break;
// If currentByte.size is not lower than six and also not equal or great than 6...
} else {
// Should never be here to be honest...
if (logger.isDebugEnabled()) {
logger.debug("Received a compressed frame with partial data or invalid content. The packet contents were {}", new Object[] { currBytes.toString() });
}
break;
}
case // 'J́'
FRAME_JSON:
// called
if (currBytes.size() < JSON_MIN_LENGTH) {
if (logger.isTraceEnabled()) {
logger.trace("Beats currBytes contents are {}", new Object[] { currBytes.toString() });
}
break;
} else if (currBytes.size() == JSON_MIN_LENGTH) {
// Read the sequence number from bytes
frameBuilder.seqNumber = (int) (ByteBuffer.wrap(java.util.Arrays.copyOfRange(currBytes.toByteArray(), 2, 6)).getInt() & 0x00000000ffffffffL);
// Read the JSON payload length
frameBuilder.dataSize = ByteBuffer.wrap(java.util.Arrays.copyOfRange(currBytes.toByteArray(), 6, 10)).getInt() & 0x00000000ffffffffL;
} else if (currBytes.size() > JSON_MIN_LENGTH) {
// Wait for payload to be fully read and then complete processing
if (currBytes.size() - 10 == frameBuilder.dataSize) {
// Transfer the current payload so it can be processed by {@link #splitCompressedFrames} method.
frameBuilder.payload = java.util.Arrays.copyOfRange(currBytes.toByteArray(), 10, currBytes.size());
currState = BeatsState.COMPLETE;
}
break;
}
}
}
use of org.apache.nifi.stream.io.ByteArrayOutputStream in project nifi by apache.
the class TestConvertAvroToJSON method testMultipleAvroMessages.
@Test
public void testMultipleAvroMessages() throws IOException {
final TestRunner runner = TestRunners.newTestRunner(new ConvertAvroToJSON());
final Schema schema = new Schema.Parser().parse(new File("src/test/resources/user.avsc"));
runner.setProperty(ConvertAvroToJSON.CONTAINER_OPTIONS, ConvertAvroToJSON.CONTAINER_ARRAY);
final GenericRecord user1 = new GenericData.Record(schema);
user1.put("name", "Alyssa");
user1.put("favorite_number", 256);
final GenericRecord user2 = new GenericData.Record(schema);
user2.put("name", "George");
user2.put("favorite_number", 1024);
user2.put("favorite_color", "red");
final DatumWriter<GenericRecord> datumWriter = new GenericDatumWriter<>(schema);
final ByteArrayOutputStream out1 = AvroTestUtil.serializeAvroRecord(schema, datumWriter, user1, user2);
runner.enqueue(out1.toByteArray());
runner.run();
runner.assertAllFlowFilesTransferred(ConvertAvroToJSON.REL_SUCCESS, 1);
final MockFlowFile out = runner.getFlowFilesForRelationship(ConvertAvroToJSON.REL_SUCCESS).get(0);
out.assertContentEquals("[{\"name\": \"Alyssa\", \"favorite_number\": 256, \"favorite_color\": null},{\"name\": \"George\", \"favorite_number\": 1024, \"favorite_color\": \"red\"}]");
}
Aggregations