use of org.apache.nifi.remote.exception.ProtocolException in project nifi by apache.
the class StandardFlowFileCodec method decode.
@Override
public DataPacket decode(final InputStream stream) throws IOException, ProtocolException {
final DataInputStream in = new DataInputStream(stream);
final int numAttributes;
try {
numAttributes = in.readInt();
} catch (final EOFException e) {
// we're out of data.
return null;
}
// generally result in an OutOfMemoryError.
if (numAttributes > MAX_NUM_ATTRIBUTES) {
throw new ProtocolException("FlowFile exceeds maximum number of attributes with a total of " + numAttributes);
}
final Map<String, String> attributes = new HashMap<>(numAttributes);
for (int i = 0; i < numAttributes; i++) {
final String attrName = readString(in);
final String attrValue = readString(in);
attributes.put(attrName, attrValue);
}
final long numBytes = in.readLong();
return new StandardDataPacket(attributes, stream, numBytes);
}
use of org.apache.nifi.remote.exception.ProtocolException in project nifi by apache.
the class SocketClientTransaction method initialize.
private void initialize() throws IOException {
try {
if (direction == TransferDirection.RECEIVE) {
// Indicate that we would like to have some data
RequestType.RECEIVE_FLOWFILES.writeRequestType(dos);
dos.flush();
final Response dataAvailableCode = Response.read(dis);
switch(dataAvailableCode.getCode()) {
case MORE_DATA:
logger.debug("{} {} Indicates that data is available", this, peer);
this.dataAvailable = true;
break;
case NO_MORE_DATA:
logger.debug("{} No data available from {}", peer);
this.dataAvailable = false;
return;
default:
throw new ProtocolException("Got unexpected response when asking for data: " + dataAvailableCode);
}
} else {
// Indicate that we would like to have some data
RequestType.SEND_FLOWFILES.writeRequestType(dos);
dos.flush();
}
} catch (final Exception e) {
error();
throw e;
}
}
use of org.apache.nifi.remote.exception.ProtocolException in project nifi by apache.
the class StandardRemoteGroupPort method transferFlowFiles.
private int transferFlowFiles(final Transaction transaction, final ProcessContext context, final ProcessSession session, final FlowFile firstFlowFile) throws IOException, ProtocolException {
FlowFile flowFile = firstFlowFile;
try {
final String userDn = transaction.getCommunicant().getDistinguishedName();
final long startSendingNanos = System.nanoTime();
final StopWatch stopWatch = new StopWatch(true);
long bytesSent = 0L;
final SiteToSiteClientConfig siteToSiteClientConfig = getSiteToSiteClient().getConfig();
final long maxBatchBytes = siteToSiteClientConfig.getPreferredBatchSize();
final int maxBatchCount = siteToSiteClientConfig.getPreferredBatchCount();
final long preferredBatchDuration = siteToSiteClientConfig.getPreferredBatchDuration(TimeUnit.NANOSECONDS);
final long maxBatchDuration = preferredBatchDuration > 0 ? preferredBatchDuration : BATCH_SEND_NANOS;
final Set<FlowFile> flowFilesSent = new HashSet<>();
boolean continueTransaction = true;
while (continueTransaction) {
final long startNanos = System.nanoTime();
// call codec.encode within a session callback so that we have the InputStream to read the FlowFile
final FlowFile toWrap = flowFile;
session.read(flowFile, new InputStreamCallback() {
@Override
public void process(final InputStream in) throws IOException {
final DataPacket dataPacket = new StandardDataPacket(toWrap.getAttributes(), in, toWrap.getSize());
transaction.send(dataPacket);
}
});
final long transferNanos = System.nanoTime() - startNanos;
final long transferMillis = TimeUnit.MILLISECONDS.convert(transferNanos, TimeUnit.NANOSECONDS);
flowFilesSent.add(flowFile);
bytesSent += flowFile.getSize();
logger.debug("{} Sent {} to {}", this, flowFile, transaction.getCommunicant().getUrl());
final String transitUri = transaction.getCommunicant().createTransitUri(flowFile.getAttribute(CoreAttributes.UUID.key()));
session.getProvenanceReporter().send(flowFile, transitUri, "Remote DN=" + userDn, transferMillis, false);
session.remove(flowFile);
final long sendingNanos = System.nanoTime() - startSendingNanos;
if (maxBatchCount > 0 && flowFilesSent.size() >= maxBatchCount) {
flowFile = null;
} else if (maxBatchBytes > 0 && bytesSent >= maxBatchBytes) {
flowFile = null;
} else if (sendingNanos >= maxBatchDuration) {
flowFile = null;
} else {
flowFile = session.get();
}
continueTransaction = (flowFile != null);
}
transaction.confirm();
// consume input stream entirely, ignoring its contents. If we
// don't do this, the Connection will not be returned to the pool
stopWatch.stop();
final String uploadDataRate = stopWatch.calculateDataRate(bytesSent);
final long uploadMillis = stopWatch.getDuration(TimeUnit.MILLISECONDS);
final String dataSize = FormatUtils.formatDataSize(bytesSent);
transaction.complete();
session.commit();
final String flowFileDescription = (flowFilesSent.size() < 20) ? flowFilesSent.toString() : flowFilesSent.size() + " FlowFiles";
logger.info("{} Successfully sent {} ({}) to {} in {} milliseconds at a rate of {}", new Object[] { this, flowFileDescription, dataSize, transaction.getCommunicant().getUrl(), uploadMillis, uploadDataRate });
return flowFilesSent.size();
} catch (final Exception e) {
session.rollback();
throw e;
}
}
use of org.apache.nifi.remote.exception.ProtocolException in project nifi by apache.
the class AbstractFlowFileServerProtocol method receiveFlowFiles.
@Override
public int receiveFlowFiles(final Peer peer, final ProcessContext context, final ProcessSession session, final FlowFileCodec codec) throws IOException, ProtocolException {
if (!handshakeCompleted) {
throw new IllegalStateException("Handshake has not been completed");
}
if (shutdown) {
throw new IllegalStateException("Protocol is shutdown");
}
logger.debug("{} receiving FlowFiles from {}", this, peer);
final CommunicationsSession commsSession = peer.getCommunicationsSession();
final DataInputStream dis = new DataInputStream(commsSession.getInput().getInputStream());
String remoteDn = commsSession.getUserDn();
if (remoteDn == null) {
remoteDn = "none";
}
final StopWatch stopWatch = new StopWatch(true);
final CRC32 crc = new CRC32();
// Peer has data. Otherwise, we would not have been called, because they would not have sent
// a SEND_FLOWFILES request to use. Just decode the bytes into FlowFiles until peer says he's
// finished sending data.
final Set<FlowFile> flowFilesReceived = new HashSet<>();
long bytesReceived = 0L;
boolean continueTransaction = true;
while (continueTransaction) {
final long startNanos = System.nanoTime();
final InputStream flowFileInputStream = handshakeProperties.isUseGzip() ? new CompressionInputStream(dis) : dis;
final CheckedInputStream checkedInputStream = new CheckedInputStream(flowFileInputStream, crc);
final DataPacket dataPacket = codec.decode(checkedInputStream);
if (dataPacket == null) {
logger.debug("{} Received null dataPacket indicating the end of transaction from {}", this, peer);
break;
}
FlowFile flowFile = session.create();
flowFile = session.importFrom(dataPacket.getData(), flowFile);
flowFile = session.putAllAttributes(flowFile, dataPacket.getAttributes());
if (handshakeProperties.isUseGzip()) {
// Close CompressionInputStream to free acquired memory, without closing underlying stream.
checkedInputStream.close();
}
final long transferNanos = System.nanoTime() - startNanos;
final long transferMillis = TimeUnit.MILLISECONDS.convert(transferNanos, TimeUnit.NANOSECONDS);
final String sourceSystemFlowFileUuid = dataPacket.getAttributes().get(CoreAttributes.UUID.key());
final String host = StringUtils.isEmpty(peer.getHost()) ? "unknown" : peer.getHost();
final String port = peer.getPort() <= 0 ? "unknown" : String.valueOf(peer.getPort());
final Map<String, String> attributes = new HashMap<>(4);
attributes.put(CoreAttributes.UUID.key(), UUID.randomUUID().toString());
attributes.put(SiteToSiteAttributes.S2S_HOST.key(), host);
attributes.put(SiteToSiteAttributes.S2S_ADDRESS.key(), host + ":" + port);
flowFile = session.putAllAttributes(flowFile, attributes);
final String transitUri = createTransitUri(peer, sourceSystemFlowFileUuid);
session.getProvenanceReporter().receive(flowFile, transitUri, sourceSystemFlowFileUuid == null ? null : "urn:nifi:" + sourceSystemFlowFileUuid, "Remote Host=" + peer.getHost() + ", Remote DN=" + remoteDn, transferMillis);
session.transfer(flowFile, Relationship.ANONYMOUS);
flowFilesReceived.add(flowFile);
bytesReceived += flowFile.getSize();
final Response transactionResponse = readTransactionResponse(false, commsSession);
switch(transactionResponse.getCode()) {
case CONTINUE_TRANSACTION:
logger.debug("{} Received ContinueTransaction indicator from {}", this, peer);
break;
case FINISH_TRANSACTION:
logger.debug("{} Received FinishTransaction indicator from {}", this, peer);
continueTransaction = false;
break;
case CANCEL_TRANSACTION:
logger.info("{} Received CancelTransaction indicator from {} with explanation {}", this, peer, transactionResponse.getMessage());
session.rollback();
return 0;
default:
throw new ProtocolException("Received unexpected response from peer: when expecting Continue Transaction or Finish Transaction, received" + transactionResponse);
}
}
// we received a FINISH_TRANSACTION indicator. Send back a CONFIRM_TRANSACTION message
// to peer so that we can verify that the connection is still open. This is a two-phase commit,
// which helps to prevent the chances of data duplication. Without doing this, we may commit the
// session and then when we send the response back to the peer, the peer may have timed out and may not
// be listening. As a result, it will re-send the data. By doing this two-phase commit, we narrow the
// Critical Section involved in this transaction so that rather than the Critical Section being the
// time window involved in the entire transaction, it is reduced to a simple round-trip conversation.
logger.debug("{} Sending CONFIRM_TRANSACTION Response Code to {}", this, peer);
String calculatedCRC = String.valueOf(crc.getValue());
writeTransactionResponse(false, ResponseCode.CONFIRM_TRANSACTION, commsSession, calculatedCRC);
FlowFileTransaction transaction = new FlowFileTransaction(session, context, stopWatch, bytesReceived, flowFilesReceived, calculatedCRC);
return commitReceiveTransaction(peer, transaction);
}
use of org.apache.nifi.remote.exception.ProtocolException in project nifi by apache.
the class SocketFlowFileServerProtocol method negotiateCodec.
@Override
public FlowFileCodec negotiateCodec(final Peer peer) throws IOException, ProtocolException {
if (!handshakeCompleted) {
throw new IllegalStateException("Handshake has not been completed");
}
if (shutdown) {
throw new IllegalStateException("Protocol is shutdown");
}
logger.debug("{} Negotiating Codec with {} using {}", new Object[] { this, peer, peer.getCommunicationsSession() });
final CommunicationsSession commsSession = peer.getCommunicationsSession();
final DataInputStream dis = new DataInputStream(commsSession.getInput().getInputStream());
final DataOutputStream dos = new DataOutputStream(commsSession.getOutput().getOutputStream());
if (port == null) {
RemoteResourceFactory.rejectCodecNegotiation(dis, dos, "Cannot transfer FlowFiles because no port was specified");
}
// Negotiate the FlowFileCodec to use.
try {
negotiatedFlowFileCodec = RemoteResourceFactory.receiveCodecNegotiation(dis, dos);
logger.debug("{} Negotiated Codec {} with {}", new Object[] { this, negotiatedFlowFileCodec, peer });
return negotiatedFlowFileCodec;
} catch (final HandshakeException e) {
throw new ProtocolException(e.toString());
}
}
Aggregations