use of org.apache.nifi.processor.io.OutputStreamCallback in project kylo by Teradata.
the class ExecuteHQL method onTrigger.
@Override
public void onTrigger(final ProcessContext context, final ProcessSession session) throws ProcessException {
final ComponentLog logger = getLog();
FlowFile flowFile = null;
try {
if (context.hasIncomingConnection()) {
flowFile = session.get();
if (flowFile == null) {
return;
}
}
} catch (NoSuchMethodError e) {
logger.error("Failed to get incoming", e);
}
FlowFile outgoing = (flowFile == null ? session.create() : flowFile);
final ThriftService thriftService = context.getProperty(THRIFT_SERVICE).asControllerService(ThriftService.class);
final String selectQuery = context.getProperty(SQL_SELECT_QUERY).evaluateAttributeExpressions(outgoing).getValue();
final Integer queryTimeout = context.getProperty(QUERY_TIMEOUT).asTimePeriod(TimeUnit.SECONDS).intValue();
final StopWatch stopWatch = new StopWatch(true);
try (final Connection con = thriftService.getConnection();
final Statement st = con.createStatement()) {
setQueryTimeout(st, queryTimeout);
final AtomicLong nrOfRows = new AtomicLong(0L);
outgoing = session.write(outgoing, new OutputStreamCallback() {
@Override
public void process(final OutputStream out) throws IOException {
try {
logger.debug("Executing query {}", new Object[] { selectQuery });
final ResultSet resultSet = new ResultSetAdapter(st.executeQuery(selectQuery));
nrOfRows.set(JdbcCommon.convertToAvroStream(resultSet, out));
} catch (final SQLException e) {
throw new ProcessException(e);
}
}
});
// set attribute how many rows were selected
outgoing = session.putAttribute(outgoing, RESULT_ROW_COUNT, Long.toString(nrOfRows.get()));
logger.info("{} contains {} Avro records", new Object[] { nrOfRows.get() });
logger.info("Transferred {} to 'success'", new Object[] { outgoing });
session.getProvenanceReporter().modifyContent(outgoing, "Retrieved " + nrOfRows.get() + " rows", stopWatch.getElapsed(TimeUnit.MILLISECONDS));
session.transfer(outgoing, REL_SUCCESS);
} catch (final ProcessException | SQLException e) {
logger.error("Unable to execute SQL select query {} for {} due to {}; routing to failure", new Object[] { selectQuery, outgoing, e });
session.transfer(outgoing, REL_FAILURE);
}
}
use of org.apache.nifi.processor.io.OutputStreamCallback in project kylo by Teradata.
the class GetTableData method onTrigger.
@Override
public void onTrigger(final ProcessContext context, final ProcessSession session) throws ProcessException {
FlowFile flowFile = null;
if (context.hasIncomingConnection()) {
flowFile = session.get();
// we know that we should run only if we have a FlowFile.
if (flowFile == null && context.hasNonLoopConnection()) {
return;
}
}
final FlowFile incoming = flowFile;
final ComponentLog logger = getLog();
final DBCPService dbcpService = context.getProperty(JDBC_SERVICE).asControllerService(DBCPService.class);
final MetadataProviderService metadataService = context.getProperty(METADATA_SERVICE).asControllerService(MetadataProviderService.class);
final String loadStrategy = context.getProperty(LOAD_STRATEGY).getValue();
final String categoryName = context.getProperty(FEED_CATEGORY).evaluateAttributeExpressions(incoming).getValue();
final String feedName = context.getProperty(FEED_NAME).evaluateAttributeExpressions(incoming).getValue();
final String tableName = context.getProperty(TABLE_NAME).evaluateAttributeExpressions(incoming).getValue();
final String fieldSpecs = context.getProperty(TABLE_SPECS).evaluateAttributeExpressions(incoming).getValue();
final String dateField = context.getProperty(DATE_FIELD).evaluateAttributeExpressions(incoming).getValue();
final Integer queryTimeout = context.getProperty(QUERY_TIMEOUT).asTimePeriod(TimeUnit.SECONDS).intValue();
final Integer overlapTime = context.getProperty(OVERLAP_TIME).evaluateAttributeExpressions(incoming).asTimePeriod(TimeUnit.SECONDS).intValue();
final Integer backoffTime = context.getProperty(BACKOFF_PERIOD).asTimePeriod(TimeUnit.SECONDS).intValue();
final String unitSize = context.getProperty(UNIT_SIZE).getValue();
final String outputType = context.getProperty(OUTPUT_TYPE).getValue();
String outputDelimiter = context.getProperty(OUTPUT_DELIMITER).evaluateAttributeExpressions(incoming).getValue();
final String delimiter = StringUtils.isBlank(outputDelimiter) ? "," : outputDelimiter;
final PropertyValue waterMarkPropName = context.getProperty(HIGH_WATER_MARK_PROP).evaluateAttributeExpressions(incoming);
final String[] selectFields = parseFields(fieldSpecs);
final LoadStrategy strategy = LoadStrategy.valueOf(loadStrategy);
final StopWatch stopWatch = new StopWatch(true);
try (final Connection conn = dbcpService.getConnection()) {
FlowFile outgoing = (incoming == null ? session.create() : incoming);
final AtomicLong nrOfRows = new AtomicLong(0L);
final LastFieldVisitor visitor = new LastFieldVisitor(dateField, null);
final FlowFile current = outgoing;
outgoing = session.write(outgoing, new OutputStreamCallback() {
@Override
public void process(final OutputStream out) throws IOException {
ResultSet rs = null;
try {
GetTableDataSupport support = new GetTableDataSupport(conn, queryTimeout);
if (strategy == LoadStrategy.FULL_LOAD) {
rs = support.selectFullLoad(tableName, selectFields);
} else if (strategy == LoadStrategy.INCREMENTAL) {
String waterMarkValue = getIncrementalWaterMarkValue(current, waterMarkPropName);
LocalDateTime waterMarkTime = LocalDateTime.parse(waterMarkValue, DATE_TIME_FORMAT);
Date lastLoadDate = toDate(waterMarkTime);
visitor.setLastModifyDate(lastLoadDate);
rs = support.selectIncremental(tableName, selectFields, dateField, overlapTime, lastLoadDate, backoffTime, GetTableDataSupport.UnitSizes.valueOf(unitSize));
} else {
throw new RuntimeException("Unsupported loadStrategy [" + loadStrategy + "]");
}
if (GetTableDataSupport.OutputType.DELIMITED.equals(GetTableDataSupport.OutputType.valueOf(outputType))) {
nrOfRows.set(JdbcCommon.convertToDelimitedStream(rs, out, (strategy == LoadStrategy.INCREMENTAL ? visitor : null), delimiter));
} else if (GetTableDataSupport.OutputType.AVRO.equals(GetTableDataSupport.OutputType.valueOf(outputType))) {
avroSchema = JdbcCommon.createSchema(rs);
nrOfRows.set(JdbcCommon.convertToAvroStream(rs, out, (strategy == LoadStrategy.INCREMENTAL ? visitor : null), avroSchema));
} else {
throw new RuntimeException("Unsupported output format type [" + outputType + "]");
}
} catch (final SQLException e) {
throw new IOException("SQL execution failure", e);
} finally {
if (rs != null) {
try {
if (rs.getStatement() != null) {
rs.getStatement().close();
}
rs.close();
} catch (SQLException e) {
getLog().error("Error closing sql statement and resultset");
}
}
}
}
});
// set attribute how many rows were selected
outgoing = session.putAttribute(outgoing, RESULT_ROW_COUNT, Long.toString(nrOfRows.get()));
// set output format type and avro schema for feed setup, if available
outgoing = session.putAttribute(outgoing, "db.table.output.format", outputType);
String avroSchemaForFeedSetup = (avroSchema != null) ? JdbcCommon.getAvroSchemaForFeedSetup(avroSchema) : EMPTY_STRING;
outgoing = session.putAttribute(outgoing, "db.table.avro.schema", avroSchemaForFeedSetup);
session.getProvenanceReporter().modifyContent(outgoing, "Retrieved " + nrOfRows.get() + " rows", stopWatch.getElapsed(TimeUnit.MILLISECONDS));
// Terminate flow file if no work
Long rowcount = nrOfRows.get();
outgoing = session.putAttribute(outgoing, ComponentAttributes.NUM_SOURCE_RECORDS.key(), String.valueOf(rowcount));
if (nrOfRows.get() == 0L) {
logger.info("{} contains no data; transferring to 'nodata'", new Object[] { outgoing });
session.transfer(outgoing, REL_NO_DATA);
} else {
logger.info("{} contains {} records; transferring to 'success'", new Object[] { outgoing, nrOfRows.get() });
if (strategy == LoadStrategy.INCREMENTAL) {
String newWaterMarkStr = format(visitor.getLastModifyDate());
outgoing = setIncrementalWaterMarkValue(session, outgoing, waterMarkPropName, newWaterMarkStr);
logger.info("Recorded load status feed {} date {}", new Object[] { feedName, newWaterMarkStr });
}
session.transfer(outgoing, REL_SUCCESS);
}
} catch (final Exception e) {
if (incoming == null) {
logger.error("Unable to execute SQL select from table due to {}. No incoming flow file to route to failure", new Object[] { e });
} else {
logger.error("Unable to execute SQL select from table due to {}; routing to failure", new Object[] { incoming, e });
session.transfer(incoming, REL_FAILURE);
}
}
}
use of org.apache.nifi.processor.io.OutputStreamCallback in project nifi by apache.
the class TestStandardProcessSession method testWriteAfterSessionClosesStream.
@Test
public void testWriteAfterSessionClosesStream() throws IOException {
final ContentClaim claim = contentRepo.create(false);
final FlowFileRecord flowFileRecord = new StandardFlowFileRecord.Builder().contentClaim(claim).addAttribute("uuid", "12345678-1234-1234-1234-123456789012").entryDate(System.currentTimeMillis()).build();
flowFileQueue.put(flowFileRecord);
FlowFile flowFile = session.get();
assertNotNull(flowFile);
final AtomicReference<OutputStream> outputStreamHolder = new AtomicReference<>(null);
flowFile = session.write(flowFile, new OutputStreamCallback() {
@Override
public void process(final OutputStream out) throws IOException {
outputStreamHolder.set(out);
}
});
assertDisabled(outputStreamHolder.get());
}
use of org.apache.nifi.processor.io.OutputStreamCallback in project nifi by apache.
the class TestStandardProcessSession method testRollbackAfterCheckpoint.
@Test
public void testRollbackAfterCheckpoint() {
final FlowFileRecord flowFileRecord = new StandardFlowFileRecord.Builder().addAttribute("uuid", "12345678-1234-1234-1234-123456789012").entryDate(System.currentTimeMillis()).contentClaim(new StandardContentClaim(resourceClaimManager.newResourceClaim("x", "x", "0", true, false), 0L)).contentClaimOffset(0L).size(0L).build();
flowFileQueue.put(flowFileRecord);
final FlowFile originalFlowFile = session.get();
assertTrue(flowFileQueue.isActiveQueueEmpty());
assertEquals(1, flowFileQueue.getUnacknowledgedQueueSize().getObjectCount());
final FlowFile modified = session.write(originalFlowFile, new OutputStreamCallback() {
@Override
public void process(OutputStream out) throws IOException {
out.write("Hello".getBytes());
}
});
session.transfer(modified);
session.checkpoint();
assertTrue(flowFileQueue.isActiveQueueEmpty());
session.rollback();
assertTrue(flowFileQueue.isActiveQueueEmpty());
assertEquals(0, flowFileQueue.size().getObjectCount());
assertEquals(0, flowFileQueue.getUnacknowledgedQueueSize().getObjectCount());
session.rollback();
flowFileQueue.put(flowFileRecord);
assertFalse(flowFileQueue.isActiveQueueEmpty());
final FlowFile originalRound2 = session.get();
assertTrue(flowFileQueue.isActiveQueueEmpty());
assertEquals(1, flowFileQueue.getUnacknowledgedQueueSize().getObjectCount());
final FlowFile modifiedRound2 = session.write(originalRound2, new OutputStreamCallback() {
@Override
public void process(OutputStream out) throws IOException {
out.write("Hello".getBytes());
}
});
session.transfer(modifiedRound2);
session.checkpoint();
assertTrue(flowFileQueue.isActiveQueueEmpty());
assertEquals(1, flowFileQueue.getUnacknowledgedQueueSize().getObjectCount());
session.commit();
// FlowFile transferred back to queue
assertEquals(1, flowFileQueue.size().getObjectCount());
assertEquals(0, flowFileQueue.getUnacknowledgedQueueSize().getObjectCount());
assertFalse(flowFileQueue.isActiveQueueEmpty());
}
use of org.apache.nifi.processor.io.OutputStreamCallback in project nifi by apache.
the class TestStandardProcessSession method testAppendDoesNotDecrementContentClaimIfNotNeeded.
@Test
public void testAppendDoesNotDecrementContentClaimIfNotNeeded() {
FlowFile flowFile = session.create();
session.append(flowFile, new OutputStreamCallback() {
@Override
public void process(OutputStream out) throws IOException {
out.write("hello".getBytes());
}
});
final Set<ContentClaim> existingClaims = contentRepo.getExistingClaims();
assertEquals(1, existingClaims.size());
final ContentClaim claim = existingClaims.iterator().next();
final int countAfterAppend = contentRepo.getClaimantCount(claim);
assertEquals(1, countAfterAppend);
}
Aggregations