use of com.thinkbiganalytics.nifi.core.api.metadata.MetadataProviderService in project kylo by Teradata.
the class UpdateFeedHistoryReindexTest method setFeedStatus_ValidStateDirty.
@Test
public void setFeedStatus_ValidStateDirty() throws Exception {
final String METADATA_SERVICE_IDENTIFIER = "MockMetadataProviderService_Minimal";
final TestRunner runner = TestRunners.newTestRunner(UpdateFeedHistoryReindex.class);
final MetadataProviderService metadataService = new MockMetadataProviderService_Minimal();
runner.addControllerService(METADATA_SERVICE_IDENTIFIER, metadataService);
runner.enableControllerService(metadataService);
runner.setProperty(UpdateFeedHistoryReindex.METADATA_SERVICE, METADATA_SERVICE_IDENTIFIER);
runner.setProperty(UpdateFeedHistoryReindex.FEED_ID, "feed-0-id");
runner.setProperty(UpdateFeedHistoryReindex.FEED_REINDEX_STATUS, "DIRTY");
runner.assertValid();
}
use of com.thinkbiganalytics.nifi.core.api.metadata.MetadataProviderService in project kylo by Teradata.
the class UpdateFeedHistoryReindexTest method setFeedStatus_ValidStateNeverRun.
@Test
public void setFeedStatus_ValidStateNeverRun() throws Exception {
final String METADATA_SERVICE_IDENTIFIER = "MockMetadataProviderService_Minimal";
final TestRunner runner = TestRunners.newTestRunner(UpdateFeedHistoryReindex.class);
final MetadataProviderService metadataService = new MockMetadataProviderService_Minimal();
runner.addControllerService(METADATA_SERVICE_IDENTIFIER, metadataService);
runner.enableControllerService(metadataService);
runner.setProperty(UpdateFeedHistoryReindex.METADATA_SERVICE, METADATA_SERVICE_IDENTIFIER);
runner.setProperty(UpdateFeedHistoryReindex.FEED_ID, "feed-0-id");
runner.setProperty(UpdateFeedHistoryReindex.FEED_REINDEX_STATUS, "NEVER_RUN");
runner.assertValid();
}
use of com.thinkbiganalytics.nifi.core.api.metadata.MetadataProviderService in project kylo by Teradata.
the class UpdateFeedHistoryReindexTest method setFeedStatus_ToDirty.
@Test
public void setFeedStatus_ToDirty() throws Exception {
final String METADATA_SERVICE_IDENTIFIER = "MockMetadataProviderService_SetFeedToDirty";
final TestRunner runner = TestRunners.newTestRunner(UpdateFeedHistoryReindex.class);
final MetadataProviderService metadataService = new MockMetadataProviderService_SetFeedToDirty();
runner.addControllerService(METADATA_SERVICE_IDENTIFIER, metadataService);
runner.enableControllerService(metadataService);
runner.setProperty(UpdateFeedHistoryReindex.METADATA_SERVICE, METADATA_SERVICE_IDENTIFIER);
runner.setProperty(UpdateFeedHistoryReindex.FEED_ID, "feed-0-id");
runner.setProperty(UpdateFeedHistoryReindex.FEED_REINDEX_STATUS, "DIRTY");
runner.run(1);
runner.assertQueueEmpty();
runner.assertTransferCount(UpdateFeedHistoryReindex.REL_SUCCESS, 1);
List<MockFlowFile> results = runner.getFlowFilesForRelationship(UpdateFeedHistoryReindex.REL_SUCCESS);
MockFlowFile resultFlowFile = results.get(0);
resultFlowFile.assertAttributeExists(UpdateFeedHistoryReindex.UPDATED_FEED_INFO_FOR_HISTORY_REINDEX_KEY);
resultFlowFile.assertAttributeExists(UpdateFeedHistoryReindex.UPDATED_FEED_STATUS_FOR_HISTORY_REINDEX_KEY);
resultFlowFile.assertAttributeExists(UpdateFeedHistoryReindex.UPDATED_TIME_UTC_FOR_HISTORY_REINDEX_KEY);
resultFlowFile.assertAttributeExists(UpdateFeedHistoryReindex.UPDATED_INDEX_COLUMNS_STRING_FOR_HISTORY_REINDEX_KEY);
resultFlowFile.assertAttributeEquals(UpdateFeedHistoryReindex.UPDATED_FEED_INFO_FOR_HISTORY_REINDEX_KEY, "feed id: feed-0-id, feed name: cat-0-system-name.feed-0-system-name");
resultFlowFile.assertAttributeEquals(UpdateFeedHistoryReindex.UPDATED_FEED_STATUS_FOR_HISTORY_REINDEX_KEY, "DIRTY");
String updatedTimeUtcForHistoryReindexActualValue = resultFlowFile.getAttribute(UpdateFeedHistoryReindex.UPDATED_TIME_UTC_FOR_HISTORY_REINDEX_KEY);
String updatedTimeUtcForHistoryReindexExpectedValueWithoutTimeZone = "2017-12-21T11:43:23.345";
assertTrue(updatedTimeUtcForHistoryReindexActualValue.contains(updatedTimeUtcForHistoryReindexExpectedValueWithoutTimeZone));
resultFlowFile.assertAttributeEquals(UpdateFeedHistoryReindex.UPDATED_INDEX_COLUMNS_STRING_FOR_HISTORY_REINDEX_KEY, "col1,col2,col3");
}
use of com.thinkbiganalytics.nifi.core.api.metadata.MetadataProviderService in project kylo by Teradata.
the class UpdateFeedHistoryReindexTest method setFeedStatus_ThrowsGeneralException.
@Test
public void setFeedStatus_ThrowsGeneralException() throws Exception {
final String METADATA_SERVICE_IDENTIFIER = "MockMetadataProviderService_ThrowsGeneralException";
final TestRunner runner = TestRunners.newTestRunner(UpdateFeedHistoryReindex.class);
final MetadataProviderService metadataService = new MockMetadataProviderService_ThrowsGeneralException();
runner.addControllerService(METADATA_SERVICE_IDENTIFIER, metadataService);
runner.enableControllerService(metadataService);
runner.setProperty(UpdateFeedHistoryReindex.METADATA_SERVICE, METADATA_SERVICE_IDENTIFIER);
runner.setProperty(UpdateFeedHistoryReindex.FEED_ID, "feed-0-id");
runner.setProperty(UpdateFeedHistoryReindex.FEED_REINDEX_STATUS, "DIRTY");
runner.run(1);
runner.assertQueueEmpty();
runner.assertTransferCount(UpdateFeedHistoryReindex.REL_FAILURE, 1);
}
use of com.thinkbiganalytics.nifi.core.api.metadata.MetadataProviderService in project kylo by Teradata.
the class GetTableData method onTrigger.
@Override
public void onTrigger(final ProcessContext context, final ProcessSession session) throws ProcessException {
FlowFile flowFile = null;
if (context.hasIncomingConnection()) {
flowFile = session.get();
// we know that we should run only if we have a FlowFile.
if (flowFile == null && context.hasNonLoopConnection()) {
return;
}
}
final FlowFile incoming = flowFile;
final ComponentLog logger = getLog();
final DBCPService dbcpService = context.getProperty(JDBC_SERVICE).asControllerService(DBCPService.class);
final MetadataProviderService metadataService = context.getProperty(METADATA_SERVICE).asControllerService(MetadataProviderService.class);
final String loadStrategy = context.getProperty(LOAD_STRATEGY).getValue();
final String categoryName = context.getProperty(FEED_CATEGORY).evaluateAttributeExpressions(incoming).getValue();
final String feedName = context.getProperty(FEED_NAME).evaluateAttributeExpressions(incoming).getValue();
final String tableName = context.getProperty(TABLE_NAME).evaluateAttributeExpressions(incoming).getValue();
final String fieldSpecs = context.getProperty(TABLE_SPECS).evaluateAttributeExpressions(incoming).getValue();
final String dateField = context.getProperty(DATE_FIELD).evaluateAttributeExpressions(incoming).getValue();
final Integer queryTimeout = context.getProperty(QUERY_TIMEOUT).asTimePeriod(TimeUnit.SECONDS).intValue();
final Integer overlapTime = context.getProperty(OVERLAP_TIME).evaluateAttributeExpressions(incoming).asTimePeriod(TimeUnit.SECONDS).intValue();
final Integer backoffTime = context.getProperty(BACKOFF_PERIOD).asTimePeriod(TimeUnit.SECONDS).intValue();
final String unitSize = context.getProperty(UNIT_SIZE).getValue();
final String outputType = context.getProperty(OUTPUT_TYPE).getValue();
String outputDelimiter = context.getProperty(OUTPUT_DELIMITER).evaluateAttributeExpressions(incoming).getValue();
final String delimiter = StringUtils.isBlank(outputDelimiter) ? "," : outputDelimiter;
final PropertyValue waterMarkPropName = context.getProperty(HIGH_WATER_MARK_PROP).evaluateAttributeExpressions(incoming);
final String[] selectFields = parseFields(fieldSpecs);
final LoadStrategy strategy = LoadStrategy.valueOf(loadStrategy);
final StopWatch stopWatch = new StopWatch(true);
try (final Connection conn = dbcpService.getConnection()) {
FlowFile outgoing = (incoming == null ? session.create() : incoming);
final AtomicLong nrOfRows = new AtomicLong(0L);
final LastFieldVisitor visitor = new LastFieldVisitor(dateField, null);
final FlowFile current = outgoing;
outgoing = session.write(outgoing, new OutputStreamCallback() {
@Override
public void process(final OutputStream out) throws IOException {
ResultSet rs = null;
try {
GetTableDataSupport support = new GetTableDataSupport(conn, queryTimeout);
if (strategy == LoadStrategy.FULL_LOAD) {
rs = support.selectFullLoad(tableName, selectFields);
} else if (strategy == LoadStrategy.INCREMENTAL) {
String waterMarkValue = getIncrementalWaterMarkValue(current, waterMarkPropName);
LocalDateTime waterMarkTime = LocalDateTime.parse(waterMarkValue, DATE_TIME_FORMAT);
Date lastLoadDate = toDate(waterMarkTime);
visitor.setLastModifyDate(lastLoadDate);
rs = support.selectIncremental(tableName, selectFields, dateField, overlapTime, lastLoadDate, backoffTime, GetTableDataSupport.UnitSizes.valueOf(unitSize));
} else {
throw new RuntimeException("Unsupported loadStrategy [" + loadStrategy + "]");
}
if (GetTableDataSupport.OutputType.DELIMITED.equals(GetTableDataSupport.OutputType.valueOf(outputType))) {
nrOfRows.set(JdbcCommon.convertToDelimitedStream(rs, out, (strategy == LoadStrategy.INCREMENTAL ? visitor : null), delimiter));
} else if (GetTableDataSupport.OutputType.AVRO.equals(GetTableDataSupport.OutputType.valueOf(outputType))) {
avroSchema = JdbcCommon.createSchema(rs);
nrOfRows.set(JdbcCommon.convertToAvroStream(rs, out, (strategy == LoadStrategy.INCREMENTAL ? visitor : null), avroSchema));
} else {
throw new RuntimeException("Unsupported output format type [" + outputType + "]");
}
} catch (final SQLException e) {
throw new IOException("SQL execution failure", e);
} finally {
if (rs != null) {
try {
if (rs.getStatement() != null) {
rs.getStatement().close();
}
rs.close();
} catch (SQLException e) {
getLog().error("Error closing sql statement and resultset");
}
}
}
}
});
// set attribute how many rows were selected
outgoing = session.putAttribute(outgoing, RESULT_ROW_COUNT, Long.toString(nrOfRows.get()));
// set output format type and avro schema for feed setup, if available
outgoing = session.putAttribute(outgoing, "db.table.output.format", outputType);
String avroSchemaForFeedSetup = (avroSchema != null) ? JdbcCommon.getAvroSchemaForFeedSetup(avroSchema) : EMPTY_STRING;
outgoing = session.putAttribute(outgoing, "db.table.avro.schema", avroSchemaForFeedSetup);
session.getProvenanceReporter().modifyContent(outgoing, "Retrieved " + nrOfRows.get() + " rows", stopWatch.getElapsed(TimeUnit.MILLISECONDS));
// Terminate flow file if no work
Long rowcount = nrOfRows.get();
outgoing = session.putAttribute(outgoing, ComponentAttributes.NUM_SOURCE_RECORDS.key(), String.valueOf(rowcount));
if (nrOfRows.get() == 0L) {
logger.info("{} contains no data; transferring to 'nodata'", new Object[] { outgoing });
session.transfer(outgoing, REL_NO_DATA);
} else {
logger.info("{} contains {} records; transferring to 'success'", new Object[] { outgoing, nrOfRows.get() });
if (strategy == LoadStrategy.INCREMENTAL) {
String newWaterMarkStr = format(visitor.getLastModifyDate());
outgoing = setIncrementalWaterMarkValue(session, outgoing, waterMarkPropName, newWaterMarkStr);
logger.info("Recorded load status feed {} date {}", new Object[] { feedName, newWaterMarkStr });
}
session.transfer(outgoing, REL_SUCCESS);
}
} catch (final Exception e) {
if (incoming == null) {
logger.error("Unable to execute SQL select from table due to {}. No incoming flow file to route to failure", new Object[] { e });
} else {
logger.error("Unable to execute SQL select from table due to {}; routing to failure", new Object[] { incoming, e });
session.transfer(incoming, REL_FAILURE);
}
}
}
Aggregations