use of org.apache.nifi.processor.ProcessSession in project nifi by apache.
the class TestHttpFlowFileServerProtocol method testShutdown.
@Test
public void testShutdown() throws Exception {
final HttpFlowFileServerProtocol serverProtocol = getDefaultHttpFlowFileServerProtocol();
final Peer peer = getDefaultPeer();
serverProtocol.handshake(peer);
assertTrue(serverProtocol.isHandshakeSuccessful());
final FlowFileCodec negotiatedCoded = serverProtocol.negotiateCodec(peer);
assertTrue(negotiatedCoded instanceof StandardFlowFileCodec);
assertEquals(negotiatedCoded, serverProtocol.getPreNegotiatedCodec());
assertEquals(1234, serverProtocol.getRequestExpiration());
serverProtocol.shutdown(peer);
final ProcessContext context = null;
final ProcessSession processSession = null;
try {
serverProtocol.transferFlowFiles(peer, context, processSession, negotiatedCoded);
fail("transferFlowFiles should fail since it's already shutdown.");
} catch (final IllegalStateException e) {
}
try {
serverProtocol.receiveFlowFiles(peer, context, processSession, negotiatedCoded);
fail("receiveFlowFiles should fail since it's already shutdown.");
} catch (final IllegalStateException e) {
}
}
use of org.apache.nifi.processor.ProcessSession in project nifi by apache.
the class TestHttpFlowFileServerProtocol method testTransferZeroFile.
@Test
public void testTransferZeroFile() throws Exception {
final HttpFlowFileServerProtocol serverProtocol = getDefaultHttpFlowFileServerProtocol();
final Peer peer = getDefaultPeer();
serverProtocol.handshake(peer);
assertTrue(serverProtocol.isHandshakeSuccessful());
final FlowFileCodec negotiatedCoded = serverProtocol.negotiateCodec(peer);
final ProcessContext context = null;
final ProcessSession processSession = mock(ProcessSession.class);
// Execute test using mock
final int flowFileSent = serverProtocol.transferFlowFiles(peer, context, processSession, negotiatedCoded);
assertEquals(0, flowFileSent);
}
use of org.apache.nifi.processor.ProcessSession in project nifi by apache.
the class PutCassandraQL method onTrigger.
@Override
public void onTrigger(final ProcessContext context, final ProcessSession session) throws ProcessException {
ComponentLog logger = getLogger();
FlowFile flowFile = session.get();
if (flowFile == null) {
return;
}
final long startNanos = System.nanoTime();
final long statementTimeout = context.getProperty(STATEMENT_TIMEOUT).evaluateAttributeExpressions(flowFile).asTimePeriod(TimeUnit.MILLISECONDS);
final Charset charset = Charset.forName(context.getProperty(CHARSET).evaluateAttributeExpressions(flowFile).getValue());
// The documentation for the driver recommends the session remain open the entire time the processor is running
// and states that it is thread-safe. This is why connectionSession is not in a try-with-resources.
final Session connectionSession = cassandraSession.get();
String cql = getCQL(session, flowFile, charset);
try {
PreparedStatement statement = connectionSession.prepare(cql);
BoundStatement boundStatement = statement.bind();
Map<String, String> attributes = flowFile.getAttributes();
for (final Map.Entry<String, String> entry : attributes.entrySet()) {
final String key = entry.getKey();
final Matcher matcher = CQL_TYPE_ATTRIBUTE_PATTERN.matcher(key);
if (matcher.matches()) {
final int parameterIndex = Integer.parseInt(matcher.group(1));
String paramType = entry.getValue();
if (StringUtils.isEmpty(paramType)) {
throw new ProcessException("Value of the " + key + " attribute is null or empty, it must contain a valid value");
}
paramType = paramType.trim();
final String valueAttrName = "cql.args." + parameterIndex + ".value";
final String parameterValue = attributes.get(valueAttrName);
try {
setStatementObject(boundStatement, parameterIndex - 1, valueAttrName, parameterValue, paramType);
} catch (final InvalidTypeException | IllegalArgumentException e) {
throw new ProcessException("The value of the " + valueAttrName + " is '" + parameterValue + "', which cannot be converted into the necessary data type: " + paramType, e);
}
}
}
try {
ResultSetFuture future = connectionSession.executeAsync(boundStatement);
if (statementTimeout > 0) {
future.getUninterruptibly(statementTimeout, TimeUnit.MILLISECONDS);
} else {
future.getUninterruptibly();
}
// Emit a Provenance SEND event
final long transmissionMillis = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startNanos);
// This isn't a real URI but since Cassandra is distributed we just use the cluster name
String transitUri = "cassandra://" + connectionSession.getCluster().getMetadata().getClusterName();
session.getProvenanceReporter().send(flowFile, transitUri, transmissionMillis, true);
session.transfer(flowFile, REL_SUCCESS);
} catch (final TimeoutException e) {
throw new ProcessException(e);
}
} catch (final NoHostAvailableException nhae) {
getLogger().error("No host in the Cassandra cluster can be contacted successfully to execute this statement", nhae);
// Log up to 10 error messages. Otherwise if a 1000-node cluster was specified but there was no connectivity,
// a thousand error messages would be logged. However we would like information from Cassandra itself, so
// cap the error limit at 10, format the messages, and don't include the stack trace (it is displayed by the
// logger message above).
getLogger().error(nhae.getCustomMessage(10, true, false));
flowFile = session.penalize(flowFile);
session.transfer(flowFile, REL_RETRY);
} catch (final QueryExecutionException qee) {
logger.error("Cannot execute the statement with the requested consistency level successfully", qee);
flowFile = session.penalize(flowFile);
session.transfer(flowFile, REL_RETRY);
} catch (final QueryValidationException qve) {
logger.error("The CQL statement {} is invalid due to syntax error, authorization issue, or another " + "validation problem; routing {} to failure", new Object[] { cql, flowFile }, qve);
flowFile = session.penalize(flowFile);
session.transfer(flowFile, REL_FAILURE);
} catch (final ProcessException e) {
logger.error("Unable to execute CQL select statement {} for {} due to {}; routing to failure", new Object[] { cql, flowFile, e });
flowFile = session.penalize(flowFile);
session.transfer(flowFile, REL_FAILURE);
}
}
use of org.apache.nifi.processor.ProcessSession in project nifi by apache.
the class DDLEventWriter method writeEvent.
@Override
public long writeEvent(ProcessSession session, String transitUri, DDLEventInfo eventInfo, long currentSequenceId, Relationship relationship) {
FlowFile flowFile = session.create();
flowFile = session.write(flowFile, (outputStream) -> {
super.startJson(outputStream, eventInfo);
super.writeJson(eventInfo);
jsonGenerator.writeStringField("query", eventInfo.getQuery());
super.endJson();
});
flowFile = session.putAllAttributes(flowFile, getCommonAttributes(currentSequenceId, eventInfo));
session.transfer(flowFile, relationship);
session.getProvenanceReporter().receive(flowFile, transitUri);
return currentSequenceId + 1;
}
use of org.apache.nifi.processor.ProcessSession in project nifi by apache.
the class UpdateRowsWriter method writeEvent.
/**
* Creates and transfers a new flow file whose contents are the JSON-serialized value of the specified event, and the sequence ID attribute set
*
* @param session A reference to a ProcessSession from which the flow file(s) will be created and transferred
* @param eventInfo An event whose value will become the contents of the flow file
* @return The next available CDC sequence ID for use by the CDC processor
*/
@Override
public long writeEvent(final ProcessSession session, String transitUri, final UpdateRowsEventInfo eventInfo, final long currentSequenceId, Relationship relationship) {
final AtomicLong seqId = new AtomicLong(currentSequenceId);
for (Map.Entry<Serializable[], Serializable[]> row : eventInfo.getRows()) {
FlowFile flowFile = session.create();
flowFile = session.write(flowFile, outputStream -> {
super.startJson(outputStream, eventInfo);
super.writeJson(eventInfo);
final BitSet bitSet = eventInfo.getIncludedColumns();
writeRow(eventInfo, row, bitSet);
super.endJson();
});
flowFile = session.putAllAttributes(flowFile, getCommonAttributes(seqId.get(), eventInfo));
session.transfer(flowFile, relationship);
session.getProvenanceReporter().receive(flowFile, transitUri);
seqId.getAndIncrement();
}
return seqId.get();
}
Aggregations