use of org.apache.nifi.serialization.RecordReaderFactory in project nifi by apache.
the class PutDruidRecord method processFlowFile.
/**
* Parses the record(s), converts each to a Map, and sends via Tranquility to the Druid Indexing Service
*
* @param context The process context
* @param session The process session
*/
@SuppressWarnings("unchecked")
private void processFlowFile(ProcessContext context, final ProcessSession session) {
final ComponentLog log = getLogger();
// Get handle on Druid Tranquility session
DruidTranquilityService tranquilityController = context.getProperty(DRUID_TRANQUILITY_SERVICE).asControllerService(DruidTranquilityService.class);
Tranquilizer<Map<String, Object>> tranquilizer = tranquilityController.getTranquilizer();
FlowFile flowFile = session.get();
if (flowFile == null) {
return;
}
// Create the outgoing flow files and output streams
FlowFile droppedFlowFile = session.create(flowFile);
final AtomicInteger droppedFlowFileCount = new AtomicInteger(0);
FlowFile failedFlowFile = session.create(flowFile);
final AtomicInteger failedFlowFileCount = new AtomicInteger(0);
FlowFile successfulFlowFile = session.create(flowFile);
final AtomicInteger successfulFlowFileCount = new AtomicInteger(0);
final AtomicInteger recordWriteErrors = new AtomicInteger(0);
int recordCount = 0;
final OutputStream droppedOutputStream = session.write(droppedFlowFile);
final RecordSetWriter droppedRecordWriter;
final OutputStream failedOutputStream = session.write(failedFlowFile);
final RecordSetWriter failedRecordWriter;
final OutputStream successfulOutputStream = session.write(successfulFlowFile);
final RecordSetWriter successfulRecordWriter;
try (final InputStream in = session.read(flowFile)) {
final RecordReaderFactory recordParserFactory = context.getProperty(RECORD_READER_FACTORY).asControllerService(RecordReaderFactory.class);
final RecordSetWriterFactory writerFactory = context.getProperty(RECORD_WRITER_FACTORY).asControllerService(RecordSetWriterFactory.class);
final Map<String, String> attributes = flowFile.getAttributes();
final RecordReader reader = recordParserFactory.createRecordReader(flowFile, in, getLogger());
final RecordSchema outSchema = writerFactory.getSchema(attributes, reader.getSchema());
droppedRecordWriter = writerFactory.createWriter(log, outSchema, droppedOutputStream);
droppedRecordWriter.beginRecordSet();
failedRecordWriter = writerFactory.createWriter(log, outSchema, failedOutputStream);
failedRecordWriter.beginRecordSet();
successfulRecordWriter = writerFactory.createWriter(log, outSchema, successfulOutputStream);
successfulRecordWriter.beginRecordSet();
Record r;
while ((r = reader.nextRecord()) != null) {
final Record record = r;
recordCount++;
// Convert each Record to HashMap and send to Druid
Map<String, Object> contentMap = (Map<String, Object>) DataTypeUtils.convertRecordFieldtoObject(r, RecordFieldType.RECORD.getRecordDataType(r.getSchema()));
log.debug("Tranquilizer Status: {}", new Object[] { tranquilizer.status().toString() });
// Send data element to Druid asynchronously
Future<BoxedUnit> future = tranquilizer.send(contentMap);
log.debug("Sent Payload to Druid: {}", new Object[] { contentMap });
// Wait for Druid to call back with status
future.addEventListener(new FutureEventListener<Object>() {
@Override
public void onFailure(Throwable cause) {
if (cause instanceof MessageDroppedException) {
// This happens when event timestamp targets a Druid Indexing task that has closed (Late Arriving Data)
log.debug("Record Dropped due to MessageDroppedException: {}, transferring record to dropped.", new Object[] { cause.getMessage() }, cause);
try {
synchronized (droppedRecordWriter) {
droppedRecordWriter.write(record);
droppedRecordWriter.flush();
droppedFlowFileCount.incrementAndGet();
}
} catch (final IOException ioe) {
log.error("Error transferring record to dropped, this may result in data loss.", new Object[] { ioe.getMessage() }, ioe);
recordWriteErrors.incrementAndGet();
}
} else {
log.error("FlowFile Processing Failed due to: {}", new Object[] { cause.getMessage() }, cause);
try {
synchronized (failedRecordWriter) {
failedRecordWriter.write(record);
failedRecordWriter.flush();
failedFlowFileCount.incrementAndGet();
}
} catch (final IOException ioe) {
log.error("Error transferring record to failure, this may result in data loss.", new Object[] { ioe.getMessage() }, ioe);
recordWriteErrors.incrementAndGet();
}
}
}
@Override
public void onSuccess(Object value) {
log.debug(" FlowFile Processing Success: {}", new Object[] { value.toString() });
try {
synchronized (successfulRecordWriter) {
successfulRecordWriter.write(record);
successfulRecordWriter.flush();
successfulFlowFileCount.incrementAndGet();
}
} catch (final IOException ioe) {
log.error("Error transferring record to success, this may result in data loss. " + "However the record was successfully processed by Druid", new Object[] { ioe.getMessage() }, ioe);
recordWriteErrors.incrementAndGet();
}
}
});
}
} catch (IOException | SchemaNotFoundException | MalformedRecordException e) {
log.error("FlowFile Processing Failed due to: {}", new Object[] { e.getMessage() }, e);
// The FlowFile will be obtained and the error logged below, when calling publishResult.getFailedFlowFiles()
flowFile = session.putAttribute(flowFile, RECORD_COUNT, Integer.toString(recordCount));
session.transfer(flowFile, REL_FAILURE);
try {
droppedOutputStream.close();
session.remove(droppedFlowFile);
} catch (IOException ioe) {
log.error("Error closing output stream for FlowFile with dropped records.", ioe);
}
try {
failedOutputStream.close();
session.remove(failedFlowFile);
} catch (IOException ioe) {
log.error("Error closing output stream for FlowFile with failed records.", ioe);
}
try {
successfulOutputStream.close();
session.remove(successfulFlowFile);
} catch (IOException ioe) {
log.error("Error closing output stream for FlowFile with successful records.", ioe);
}
session.commit();
return;
}
if (recordCount == 0) {
// Send original (empty) flow file to success, remove the rest
flowFile = session.putAttribute(flowFile, RECORD_COUNT, "0");
session.transfer(flowFile, REL_SUCCESS);
try {
droppedOutputStream.close();
session.remove(droppedFlowFile);
} catch (IOException ioe) {
log.error("Error closing output stream for FlowFile with dropped records.", ioe);
}
try {
failedOutputStream.close();
session.remove(failedFlowFile);
} catch (IOException ioe) {
log.error("Error closing output stream for FlowFile with failed records.", ioe);
}
try {
successfulOutputStream.close();
session.remove(successfulFlowFile);
} catch (IOException ioe) {
log.error("Error closing output stream for FlowFile with successful records.", ioe);
}
} else {
// Wait for all the records to finish processing
while (recordCount != (droppedFlowFileCount.get() + failedFlowFileCount.get() + successfulFlowFileCount.get() + recordWriteErrors.get())) {
Thread.yield();
}
try {
droppedRecordWriter.finishRecordSet();
droppedRecordWriter.close();
} catch (IOException ioe) {
log.error("Error closing FlowFile with dropped records: {}", new Object[] { ioe.getMessage() }, ioe);
session.rollback();
throw new ProcessException(ioe);
}
if (droppedFlowFileCount.get() > 0) {
droppedFlowFile = session.putAttribute(droppedFlowFile, RECORD_COUNT, Integer.toString(droppedFlowFileCount.get()));
session.transfer(droppedFlowFile, REL_DROPPED);
} else {
session.remove(droppedFlowFile);
}
try {
failedRecordWriter.finishRecordSet();
failedRecordWriter.close();
} catch (IOException ioe) {
log.error("Error closing FlowFile with failed records: {}", new Object[] { ioe.getMessage() }, ioe);
session.rollback();
throw new ProcessException(ioe);
}
if (failedFlowFileCount.get() > 0) {
failedFlowFile = session.putAttribute(failedFlowFile, RECORD_COUNT, Integer.toString(failedFlowFileCount.get()));
session.transfer(failedFlowFile, REL_FAILURE);
} else {
session.remove(failedFlowFile);
}
try {
successfulRecordWriter.finishRecordSet();
successfulRecordWriter.close();
} catch (IOException ioe) {
log.error("Error closing FlowFile with successful records: {}", new Object[] { ioe.getMessage() }, ioe);
session.rollback();
throw new ProcessException(ioe);
}
if (successfulFlowFileCount.get() > 0) {
successfulFlowFile = session.putAttribute(successfulFlowFile, RECORD_COUNT, Integer.toString(successfulFlowFileCount.get()));
session.transfer(successfulFlowFile, REL_SUCCESS);
session.getProvenanceReporter().send(successfulFlowFile, tranquilityController.getTransitUri());
} else {
session.remove(successfulFlowFile);
}
session.remove(flowFile);
}
session.commit();
}
use of org.apache.nifi.serialization.RecordReaderFactory in project nifi by apache.
the class PublishKafkaRecord_1_0 method onTrigger.
@Override
public void onTrigger(final ProcessContext context, final ProcessSession session) throws ProcessException {
final List<FlowFile> flowFiles = session.get(FlowFileFilters.newSizeBasedFilter(1, DataUnit.MB, 500));
if (flowFiles.isEmpty()) {
return;
}
final PublisherPool pool = getPublisherPool(context);
if (pool == null) {
context.yield();
return;
}
final String securityProtocol = context.getProperty(KafkaProcessorUtils.SECURITY_PROTOCOL).getValue();
final String bootstrapServers = context.getProperty(KafkaProcessorUtils.BOOTSTRAP_SERVERS).evaluateAttributeExpressions().getValue();
final RecordSetWriterFactory writerFactory = context.getProperty(RECORD_WRITER).asControllerService(RecordSetWriterFactory.class);
final RecordReaderFactory readerFactory = context.getProperty(RECORD_READER).asControllerService(RecordReaderFactory.class);
final boolean useTransactions = context.getProperty(USE_TRANSACTIONS).asBoolean();
final long startTime = System.nanoTime();
try (final PublisherLease lease = pool.obtainPublisher()) {
if (useTransactions) {
lease.beginTransaction();
}
// Send each FlowFile to Kafka asynchronously.
final Iterator<FlowFile> itr = flowFiles.iterator();
while (itr.hasNext()) {
final FlowFile flowFile = itr.next();
if (!isScheduled()) {
// If stopped, re-queue FlowFile instead of sending it
if (useTransactions) {
session.rollback();
lease.rollback();
return;
}
session.transfer(flowFile);
itr.remove();
continue;
}
final String topic = context.getProperty(TOPIC).evaluateAttributeExpressions(flowFile).getValue();
final String messageKeyField = context.getProperty(MESSAGE_KEY_FIELD).evaluateAttributeExpressions(flowFile).getValue();
try {
session.read(flowFile, new InputStreamCallback() {
@Override
public void process(final InputStream rawIn) throws IOException {
try (final InputStream in = new BufferedInputStream(rawIn)) {
final RecordReader reader = readerFactory.createRecordReader(flowFile, in, getLogger());
final RecordSet recordSet = reader.createRecordSet();
final RecordSchema schema = writerFactory.getSchema(flowFile.getAttributes(), recordSet.getSchema());
lease.publish(flowFile, recordSet, writerFactory, schema, messageKeyField, topic);
} catch (final SchemaNotFoundException | MalformedRecordException e) {
throw new ProcessException(e);
}
}
});
} catch (final Exception e) {
// The FlowFile will be obtained and the error logged below, when calling publishResult.getFailedFlowFiles()
lease.fail(flowFile, e);
continue;
}
}
// Complete the send
final PublishResult publishResult = lease.complete();
if (publishResult.isFailure()) {
getLogger().info("Failed to send FlowFile to kafka; transferring to failure");
session.transfer(flowFiles, REL_FAILURE);
return;
}
// Transfer any successful FlowFiles.
final long transmissionMillis = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startTime);
for (FlowFile success : flowFiles) {
final String topic = context.getProperty(TOPIC).evaluateAttributeExpressions(success).getValue();
final int msgCount = publishResult.getSuccessfulMessageCount(success);
success = session.putAttribute(success, MSG_COUNT, String.valueOf(msgCount));
session.adjustCounter("Messages Sent", msgCount, true);
final String transitUri = KafkaProcessorUtils.buildTransitURI(securityProtocol, bootstrapServers, topic);
session.getProvenanceReporter().send(success, transitUri, "Sent " + msgCount + " messages", transmissionMillis);
session.transfer(success, REL_SUCCESS);
}
}
}
use of org.apache.nifi.serialization.RecordReaderFactory in project nifi by apache.
the class ConsumeKafkaRecord_1_0 method createConsumerPool.
protected ConsumerPool createConsumerPool(final ProcessContext context, final ComponentLog log) {
final int maxLeases = context.getMaxConcurrentTasks();
final long maxUncommittedTime = context.getProperty(MAX_UNCOMMITTED_TIME).asTimePeriod(TimeUnit.MILLISECONDS);
final Map<String, Object> props = new HashMap<>();
KafkaProcessorUtils.buildCommonKafkaProperties(context, ConsumerConfig.class, props);
props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "false");
props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, ByteArrayDeserializer.class.getName());
props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, ByteArrayDeserializer.class.getName());
final String topicListing = context.getProperty(ConsumeKafkaRecord_1_0.TOPICS).evaluateAttributeExpressions().getValue();
final String topicType = context.getProperty(ConsumeKafkaRecord_1_0.TOPIC_TYPE).evaluateAttributeExpressions().getValue();
final List<String> topics = new ArrayList<>();
final String securityProtocol = context.getProperty(KafkaProcessorUtils.SECURITY_PROTOCOL).getValue();
final String bootstrapServers = context.getProperty(KafkaProcessorUtils.BOOTSTRAP_SERVERS).evaluateAttributeExpressions().getValue();
final RecordReaderFactory readerFactory = context.getProperty(RECORD_READER).asControllerService(RecordReaderFactory.class);
final RecordSetWriterFactory writerFactory = context.getProperty(RECORD_WRITER).asControllerService(RecordSetWriterFactory.class);
final boolean honorTransactions = context.getProperty(HONOR_TRANSACTIONS).asBoolean();
final String charsetName = context.getProperty(MESSAGE_HEADER_ENCODING).evaluateAttributeExpressions().getValue();
final Charset charset = Charset.forName(charsetName);
final String headerNameRegex = context.getProperty(HEADER_NAME_REGEX).getValue();
final Pattern headerNamePattern = headerNameRegex == null ? null : Pattern.compile(headerNameRegex);
if (topicType.equals(TOPIC_NAME.getValue())) {
for (final String topic : topicListing.split(",", 100)) {
final String trimmedName = topic.trim();
if (!trimmedName.isEmpty()) {
topics.add(trimmedName);
}
}
return new ConsumerPool(maxLeases, readerFactory, writerFactory, props, topics, maxUncommittedTime, securityProtocol, bootstrapServers, log, honorTransactions, charset, headerNamePattern);
} else if (topicType.equals(TOPIC_PATTERN.getValue())) {
final Pattern topicPattern = Pattern.compile(topicListing.trim());
return new ConsumerPool(maxLeases, readerFactory, writerFactory, props, topicPattern, maxUncommittedTime, securityProtocol, bootstrapServers, log, honorTransactions, charset, headerNamePattern);
} else {
getLogger().error("Subscription type has an unknown value {}", new Object[] { topicType });
return null;
}
}
use of org.apache.nifi.serialization.RecordReaderFactory in project nifi by apache.
the class SplitRecord method onTrigger.
@Override
public void onTrigger(final ProcessContext context, final ProcessSession session) throws ProcessException {
FlowFile original = session.get();
if (original == null) {
return;
}
final RecordReaderFactory readerFactory = context.getProperty(RECORD_READER).asControllerService(RecordReaderFactory.class);
final RecordSetWriterFactory writerFactory = context.getProperty(RECORD_WRITER).asControllerService(RecordSetWriterFactory.class);
final int maxRecords = context.getProperty(RECORDS_PER_SPLIT).evaluateAttributeExpressions(original).asInteger();
final List<FlowFile> splits = new ArrayList<>();
final Map<String, String> originalAttributes = original.getAttributes();
try {
session.read(original, new InputStreamCallback() {
@Override
public void process(final InputStream in) throws IOException {
try (final RecordReader reader = readerFactory.createRecordReader(originalAttributes, in, getLogger())) {
final RecordSchema schema = writerFactory.getSchema(originalAttributes, reader.getSchema());
final RecordSet recordSet = reader.createRecordSet();
final PushBackRecordSet pushbackSet = new PushBackRecordSet(recordSet);
while (pushbackSet.isAnotherRecord()) {
FlowFile split = session.create(original);
try {
final Map<String, String> attributes = new HashMap<>();
final WriteResult writeResult;
try (final OutputStream out = session.write(split);
final RecordSetWriter writer = writerFactory.createWriter(getLogger(), schema, out)) {
if (maxRecords == 1) {
final Record record = pushbackSet.next();
writeResult = writer.write(record);
} else {
final RecordSet limitedSet = pushbackSet.limit(maxRecords);
writeResult = writer.write(limitedSet);
}
attributes.put("record.count", String.valueOf(writeResult.getRecordCount()));
attributes.put(CoreAttributes.MIME_TYPE.key(), writer.getMimeType());
attributes.putAll(writeResult.getAttributes());
session.adjustCounter("Records Split", writeResult.getRecordCount(), false);
}
split = session.putAllAttributes(split, attributes);
} finally {
splits.add(split);
}
}
} catch (final SchemaNotFoundException | MalformedRecordException e) {
throw new ProcessException("Failed to parse incoming data", e);
}
}
});
} catch (final ProcessException pe) {
getLogger().error("Failed to split {}", new Object[] { original, pe });
session.remove(splits);
session.transfer(original, REL_FAILURE);
return;
}
session.transfer(original, REL_ORIGINAL);
session.transfer(splits, REL_SPLITS);
getLogger().info("Successfully split {} into {} FlowFiles, each containing up to {} records", new Object[] { original, splits.size(), maxRecords });
}
use of org.apache.nifi.serialization.RecordReaderFactory in project nifi by apache.
the class MergeRecord method binFlowFile.
private void binFlowFile(final ProcessContext context, final FlowFile flowFile, final ProcessSession session, final RecordBinManager binManager, final boolean block) {
final RecordReaderFactory readerFactory = context.getProperty(RECORD_READER).asControllerService(RecordReaderFactory.class);
try (final InputStream in = session.read(flowFile);
final RecordReader reader = readerFactory.createRecordReader(flowFile, in, getLogger())) {
final RecordSchema schema = reader.getSchema();
final String groupId = getGroupId(context, flowFile, schema, session);
getLogger().debug("Got Group ID {} for {}", new Object[] { groupId, flowFile });
binManager.add(groupId, flowFile, reader, session, block);
} catch (MalformedRecordException | IOException | SchemaNotFoundException e) {
throw new ProcessException(e);
}
}
Aggregations