Search in sources :

Example 1 with RecordSet

use of org.apache.nifi.serialization.record.RecordSet in project nifi by apache.

the class PublishKafkaRecord_1_0 method onTrigger.

@Override
public void onTrigger(final ProcessContext context, final ProcessSession session) throws ProcessException {
    final List<FlowFile> flowFiles = session.get(FlowFileFilters.newSizeBasedFilter(1, DataUnit.MB, 500));
    if (flowFiles.isEmpty()) {
        return;
    }
    final PublisherPool pool = getPublisherPool(context);
    if (pool == null) {
        context.yield();
        return;
    }
    final String securityProtocol = context.getProperty(KafkaProcessorUtils.SECURITY_PROTOCOL).getValue();
    final String bootstrapServers = context.getProperty(KafkaProcessorUtils.BOOTSTRAP_SERVERS).evaluateAttributeExpressions().getValue();
    final RecordSetWriterFactory writerFactory = context.getProperty(RECORD_WRITER).asControllerService(RecordSetWriterFactory.class);
    final RecordReaderFactory readerFactory = context.getProperty(RECORD_READER).asControllerService(RecordReaderFactory.class);
    final boolean useTransactions = context.getProperty(USE_TRANSACTIONS).asBoolean();
    final long startTime = System.nanoTime();
    try (final PublisherLease lease = pool.obtainPublisher()) {
        if (useTransactions) {
            lease.beginTransaction();
        }
        // Send each FlowFile to Kafka asynchronously.
        final Iterator<FlowFile> itr = flowFiles.iterator();
        while (itr.hasNext()) {
            final FlowFile flowFile = itr.next();
            if (!isScheduled()) {
                // If stopped, re-queue FlowFile instead of sending it
                if (useTransactions) {
                    session.rollback();
                    lease.rollback();
                    return;
                }
                session.transfer(flowFile);
                itr.remove();
                continue;
            }
            final String topic = context.getProperty(TOPIC).evaluateAttributeExpressions(flowFile).getValue();
            final String messageKeyField = context.getProperty(MESSAGE_KEY_FIELD).evaluateAttributeExpressions(flowFile).getValue();
            try {
                session.read(flowFile, new InputStreamCallback() {

                    @Override
                    public void process(final InputStream rawIn) throws IOException {
                        try (final InputStream in = new BufferedInputStream(rawIn)) {
                            final RecordReader reader = readerFactory.createRecordReader(flowFile, in, getLogger());
                            final RecordSet recordSet = reader.createRecordSet();
                            final RecordSchema schema = writerFactory.getSchema(flowFile.getAttributes(), recordSet.getSchema());
                            lease.publish(flowFile, recordSet, writerFactory, schema, messageKeyField, topic);
                        } catch (final SchemaNotFoundException | MalformedRecordException e) {
                            throw new ProcessException(e);
                        }
                    }
                });
            } catch (final Exception e) {
                // The FlowFile will be obtained and the error logged below, when calling publishResult.getFailedFlowFiles()
                lease.fail(flowFile, e);
                continue;
            }
        }
        // Complete the send
        final PublishResult publishResult = lease.complete();
        if (publishResult.isFailure()) {
            getLogger().info("Failed to send FlowFile to kafka; transferring to failure");
            session.transfer(flowFiles, REL_FAILURE);
            return;
        }
        // Transfer any successful FlowFiles.
        final long transmissionMillis = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startTime);
        for (FlowFile success : flowFiles) {
            final String topic = context.getProperty(TOPIC).evaluateAttributeExpressions(success).getValue();
            final int msgCount = publishResult.getSuccessfulMessageCount(success);
            success = session.putAttribute(success, MSG_COUNT, String.valueOf(msgCount));
            session.adjustCounter("Messages Sent", msgCount, true);
            final String transitUri = KafkaProcessorUtils.buildTransitURI(securityProtocol, bootstrapServers, topic);
            session.getProvenanceReporter().send(success, transitUri, "Sent " + msgCount + " messages", transmissionMillis);
            session.transfer(success, REL_SUCCESS);
        }
    }
}
Also used : FlowFile(org.apache.nifi.flowfile.FlowFile) BufferedInputStream(java.io.BufferedInputStream) InputStream(java.io.InputStream) RecordReader(org.apache.nifi.serialization.RecordReader) IOException(java.io.IOException) SchemaNotFoundException(org.apache.nifi.schema.access.SchemaNotFoundException) ProcessException(org.apache.nifi.processor.exception.ProcessException) MalformedRecordException(org.apache.nifi.serialization.MalformedRecordException) IOException(java.io.IOException) RecordReaderFactory(org.apache.nifi.serialization.RecordReaderFactory) ProcessException(org.apache.nifi.processor.exception.ProcessException) RecordSetWriterFactory(org.apache.nifi.serialization.RecordSetWriterFactory) BufferedInputStream(java.io.BufferedInputStream) InputStreamCallback(org.apache.nifi.processor.io.InputStreamCallback) RecordSet(org.apache.nifi.serialization.record.RecordSet) RecordSchema(org.apache.nifi.serialization.record.RecordSchema)

Example 2 with RecordSet

use of org.apache.nifi.serialization.record.RecordSet in project nifi by apache.

the class TestPublisherLease method testRecordsSentToRecordWriterAndThenToProducer.

@Test
public void testRecordsSentToRecordWriterAndThenToProducer() throws IOException, SchemaNotFoundException, MalformedRecordException {
    final PublisherLease lease = new PublisherLease(producer, 1024 * 1024, 10L, logger, true, null, StandardCharsets.UTF_8);
    final FlowFile flowFile = new MockFlowFile(1L);
    final byte[] exampleInput = "101, John Doe, 48\n102, Jane Doe, 47".getBytes(StandardCharsets.UTF_8);
    final MockRecordParser readerService = new MockRecordParser();
    readerService.addSchemaField("person_id", RecordFieldType.LONG);
    readerService.addSchemaField("name", RecordFieldType.STRING);
    readerService.addSchemaField("age", RecordFieldType.INT);
    final RecordReader reader = readerService.createRecordReader(Collections.emptyMap(), new ByteArrayInputStream(exampleInput), logger);
    final RecordSet recordSet = reader.createRecordSet();
    final RecordSchema schema = reader.getSchema();
    final String topic = "unit-test";
    final String keyField = "person_id";
    final RecordSetWriterFactory writerFactory = Mockito.mock(RecordSetWriterFactory.class);
    final RecordSetWriter writer = Mockito.mock(RecordSetWriter.class);
    Mockito.when(writer.write(Mockito.any(Record.class))).thenReturn(WriteResult.of(1, Collections.emptyMap()));
    Mockito.when(writerFactory.createWriter(eq(logger), eq(schema), any())).thenReturn(writer);
    lease.publish(flowFile, recordSet, writerFactory, schema, keyField, topic);
    verify(writerFactory, times(2)).createWriter(eq(logger), eq(schema), any());
    verify(writer, times(2)).write(any(Record.class));
    verify(producer, times(2)).send(any(), any());
}
Also used : FlowFile(org.apache.nifi.flowfile.FlowFile) MockFlowFile(org.apache.nifi.util.MockFlowFile) RecordReader(org.apache.nifi.serialization.RecordReader) RecordSetWriter(org.apache.nifi.serialization.RecordSetWriter) MockFlowFile(org.apache.nifi.util.MockFlowFile) RecordSetWriterFactory(org.apache.nifi.serialization.RecordSetWriterFactory) ByteArrayInputStream(java.io.ByteArrayInputStream) ProducerRecord(org.apache.kafka.clients.producer.ProducerRecord) Record(org.apache.nifi.serialization.record.Record) RecordSet(org.apache.nifi.serialization.record.RecordSet) RecordSchema(org.apache.nifi.serialization.record.RecordSchema) MockRecordParser(org.apache.nifi.processors.kafka.pubsub.util.MockRecordParser) Test(org.junit.Test)

Example 3 with RecordSet

use of org.apache.nifi.serialization.record.RecordSet in project nifi by apache.

the class MockRecordWriter method createWriter.

@Override
public RecordSetWriter createWriter(final ComponentLog logger, final RecordSchema schema, final OutputStream out) {
    return new RecordSetWriter() {

        @Override
        public void flush() throws IOException {
            out.flush();
        }

        @Override
        public WriteResult write(final RecordSet rs) throws IOException {
            out.write(header.getBytes());
            out.write("\n".getBytes());
            int recordCount = 0;
            final int numCols = rs.getSchema().getFieldCount();
            Record record = null;
            while ((record = rs.next()) != null) {
                if (++recordCount > failAfterN && failAfterN > -1) {
                    throw new IOException("Unit Test intentionally throwing IOException after " + failAfterN + " records were written");
                }
                int i = 0;
                for (final String fieldName : record.getSchema().getFieldNames()) {
                    final String val = record.getAsString(fieldName);
                    if (quoteValues) {
                        out.write("\"".getBytes());
                        if (val != null) {
                            out.write(val.getBytes());
                        }
                        out.write("\"".getBytes());
                    } else if (val != null) {
                        out.write(val.getBytes());
                    }
                    if (i++ < numCols - 1) {
                        out.write(",".getBytes());
                    }
                }
                out.write("\n".getBytes());
            }
            return WriteResult.of(recordCount, Collections.emptyMap());
        }

        @Override
        public String getMimeType() {
            return "text/plain";
        }

        @Override
        public WriteResult write(Record record) throws IOException {
            return WriteResult.of(1, Collections.emptyMap());
        }

        @Override
        public void close() throws IOException {
        }

        @Override
        public void beginRecordSet() throws IOException {
        }

        @Override
        public WriteResult finishRecordSet() throws IOException {
            return null;
        }
    };
}
Also used : Record(org.apache.nifi.serialization.record.Record) RecordSet(org.apache.nifi.serialization.record.RecordSet) IOException(java.io.IOException) RecordSetWriter(org.apache.nifi.serialization.RecordSetWriter)

Example 4 with RecordSet

use of org.apache.nifi.serialization.record.RecordSet in project nifi by apache.

the class SplitRecord method onTrigger.

@Override
public void onTrigger(final ProcessContext context, final ProcessSession session) throws ProcessException {
    FlowFile original = session.get();
    if (original == null) {
        return;
    }
    final RecordReaderFactory readerFactory = context.getProperty(RECORD_READER).asControllerService(RecordReaderFactory.class);
    final RecordSetWriterFactory writerFactory = context.getProperty(RECORD_WRITER).asControllerService(RecordSetWriterFactory.class);
    final int maxRecords = context.getProperty(RECORDS_PER_SPLIT).evaluateAttributeExpressions(original).asInteger();
    final List<FlowFile> splits = new ArrayList<>();
    final Map<String, String> originalAttributes = original.getAttributes();
    try {
        session.read(original, new InputStreamCallback() {

            @Override
            public void process(final InputStream in) throws IOException {
                try (final RecordReader reader = readerFactory.createRecordReader(originalAttributes, in, getLogger())) {
                    final RecordSchema schema = writerFactory.getSchema(originalAttributes, reader.getSchema());
                    final RecordSet recordSet = reader.createRecordSet();
                    final PushBackRecordSet pushbackSet = new PushBackRecordSet(recordSet);
                    while (pushbackSet.isAnotherRecord()) {
                        FlowFile split = session.create(original);
                        try {
                            final Map<String, String> attributes = new HashMap<>();
                            final WriteResult writeResult;
                            try (final OutputStream out = session.write(split);
                                final RecordSetWriter writer = writerFactory.createWriter(getLogger(), schema, out)) {
                                if (maxRecords == 1) {
                                    final Record record = pushbackSet.next();
                                    writeResult = writer.write(record);
                                } else {
                                    final RecordSet limitedSet = pushbackSet.limit(maxRecords);
                                    writeResult = writer.write(limitedSet);
                                }
                                attributes.put("record.count", String.valueOf(writeResult.getRecordCount()));
                                attributes.put(CoreAttributes.MIME_TYPE.key(), writer.getMimeType());
                                attributes.putAll(writeResult.getAttributes());
                                session.adjustCounter("Records Split", writeResult.getRecordCount(), false);
                            }
                            split = session.putAllAttributes(split, attributes);
                        } finally {
                            splits.add(split);
                        }
                    }
                } catch (final SchemaNotFoundException | MalformedRecordException e) {
                    throw new ProcessException("Failed to parse incoming data", e);
                }
            }
        });
    } catch (final ProcessException pe) {
        getLogger().error("Failed to split {}", new Object[] { original, pe });
        session.remove(splits);
        session.transfer(original, REL_FAILURE);
        return;
    }
    session.transfer(original, REL_ORIGINAL);
    session.transfer(splits, REL_SPLITS);
    getLogger().info("Successfully split {} into {} FlowFiles, each containing up to {} records", new Object[] { original, splits.size(), maxRecords });
}
Also used : FlowFile(org.apache.nifi.flowfile.FlowFile) InputStream(java.io.InputStream) RecordReader(org.apache.nifi.serialization.RecordReader) OutputStream(java.io.OutputStream) ArrayList(java.util.ArrayList) IOException(java.io.IOException) RecordSetWriter(org.apache.nifi.serialization.RecordSetWriter) RecordReaderFactory(org.apache.nifi.serialization.RecordReaderFactory) ProcessException(org.apache.nifi.processor.exception.ProcessException) WriteResult(org.apache.nifi.serialization.WriteResult) RecordSetWriterFactory(org.apache.nifi.serialization.RecordSetWriterFactory) InputStreamCallback(org.apache.nifi.processor.io.InputStreamCallback) Record(org.apache.nifi.serialization.record.Record) PushBackRecordSet(org.apache.nifi.serialization.record.PushBackRecordSet) RecordSet(org.apache.nifi.serialization.record.RecordSet) RecordSchema(org.apache.nifi.serialization.record.RecordSchema) HashMap(java.util.HashMap) Map(java.util.Map) PushBackRecordSet(org.apache.nifi.serialization.record.PushBackRecordSet)

Example 5 with RecordSet

use of org.apache.nifi.serialization.record.RecordSet in project nifi by apache.

the class GetSolr method onTrigger.

@Override
public void onTrigger(final ProcessContext context, final ProcessSession session) throws ProcessException {
    final ComponentLog logger = getLogger();
    final AtomicBoolean continuePaging = new AtomicBoolean(true);
    final SolrQuery solrQuery = new SolrQuery();
    try {
        if (id_field == null) {
            id_field = getFieldNameOfUniqueKey();
        }
        final String dateField = context.getProperty(DATE_FIELD).getValue();
        final Map<String, String> stateMap = new HashMap<String, String>();
        stateMap.putAll(context.getStateManager().getState(Scope.CLUSTER).toMap());
        solrQuery.setQuery("*:*");
        final String query = context.getProperty(SOLR_QUERY).getValue();
        if (!StringUtils.isBlank(query) && !query.equals("*:*")) {
            solrQuery.addFilterQuery(query);
        }
        final StringBuilder automatedFilterQuery = (new StringBuilder()).append(dateField).append(":[").append(stateMap.get(STATE_MANAGER_FILTER)).append(" TO *]");
        solrQuery.addFilterQuery(automatedFilterQuery.toString());
        final List<String> fieldList = new ArrayList<String>();
        final String returnFields = context.getProperty(RETURN_FIELDS).getValue();
        if (!StringUtils.isBlank(returnFields)) {
            fieldList.addAll(Arrays.asList(returnFields.trim().split("[,]")));
            if (!fieldList.contains(dateField)) {
                fieldList.add(dateField);
                dateFieldNotInSpecifiedFieldsList.set(true);
            }
            for (String returnField : fieldList) {
                solrQuery.addField(returnField.trim());
            }
        }
        solrQuery.setParam(CursorMarkParams.CURSOR_MARK_PARAM, stateMap.get(STATE_MANAGER_CURSOR_MARK));
        solrQuery.setRows(context.getProperty(BATCH_SIZE).asInteger());
        final StringBuilder sortClause = (new StringBuilder()).append(dateField).append(" asc,").append(id_field).append(" asc");
        solrQuery.setParam("sort", sortClause.toString());
        while (continuePaging.get()) {
            final QueryRequest req = new QueryRequest(solrQuery);
            if (isBasicAuthEnabled()) {
                req.setBasicAuthCredentials(getUsername(), getPassword());
            }
            logger.debug(solrQuery.toQueryString());
            final QueryResponse response = req.process(getSolrClient());
            final SolrDocumentList documentList = response.getResults();
            if (response.getResults().size() > 0) {
                final SolrDocument lastSolrDocument = documentList.get(response.getResults().size() - 1);
                final String latestDateValue = df.format(lastSolrDocument.get(dateField));
                final String newCursorMark = response.getNextCursorMark();
                solrQuery.setParam(CursorMarkParams.CURSOR_MARK_PARAM, newCursorMark);
                stateMap.put(STATE_MANAGER_CURSOR_MARK, newCursorMark);
                stateMap.put(STATE_MANAGER_FILTER, latestDateValue);
                FlowFile flowFile = session.create();
                flowFile = session.putAttribute(flowFile, "solrQuery", solrQuery.toString());
                if (context.getProperty(RETURN_TYPE).getValue().equals(MODE_XML.getValue())) {
                    if (dateFieldNotInSpecifiedFieldsList.get()) {
                        for (SolrDocument doc : response.getResults()) {
                            doc.removeFields(dateField);
                        }
                    }
                    flowFile = session.write(flowFile, SolrUtils.getOutputStreamCallbackToTransformSolrResponseToXml(response));
                    flowFile = session.putAttribute(flowFile, CoreAttributes.MIME_TYPE.key(), "application/xml");
                } else {
                    final RecordSetWriterFactory writerFactory = context.getProperty(RECORD_WRITER).asControllerService(RecordSetWriterFactory.class);
                    final RecordSchema schema = writerFactory.getSchema(null, null);
                    final RecordSet recordSet = SolrUtils.solrDocumentsToRecordSet(response.getResults(), schema);
                    final StringBuffer mimeType = new StringBuffer();
                    flowFile = session.write(flowFile, new OutputStreamCallback() {

                        @Override
                        public void process(final OutputStream out) throws IOException {
                            try {
                                final RecordSetWriter writer = writerFactory.createWriter(getLogger(), schema, out);
                                writer.write(recordSet);
                                writer.flush();
                                mimeType.append(writer.getMimeType());
                            } catch (SchemaNotFoundException e) {
                                throw new ProcessException("Could not parse Solr response", e);
                            }
                        }
                    });
                    flowFile = session.putAttribute(flowFile, CoreAttributes.MIME_TYPE.key(), mimeType.toString());
                }
                session.transfer(flowFile, REL_SUCCESS);
            }
            continuePaging.set(response.getResults().size() == Integer.parseInt(context.getProperty(BATCH_SIZE).getValue()));
        }
        context.getStateManager().setState(stateMap, Scope.CLUSTER);
    } catch (SolrServerException | SchemaNotFoundException | IOException e) {
        context.yield();
        session.rollback();
        logger.error("Failed to execute query {} due to {}", new Object[] { solrQuery.toString(), e }, e);
        throw new ProcessException(e);
    } catch (final Throwable t) {
        context.yield();
        session.rollback();
        logger.error("Failed to execute query {} due to {}", new Object[] { solrQuery.toString(), t }, t);
        throw t;
    }
}
Also used : HashMap(java.util.HashMap) OutputStream(java.io.OutputStream) SolrServerException(org.apache.solr.client.solrj.SolrServerException) ArrayList(java.util.ArrayList) RecordSetWriter(org.apache.nifi.serialization.RecordSetWriter) SolrQuery(org.apache.solr.client.solrj.SolrQuery) SolrDocument(org.apache.solr.common.SolrDocument) RecordSetWriterFactory(org.apache.nifi.serialization.RecordSetWriterFactory) RecordSet(org.apache.nifi.serialization.record.RecordSet) OutputStreamCallback(org.apache.nifi.processor.io.OutputStreamCallback) RecordSchema(org.apache.nifi.serialization.record.RecordSchema) FlowFile(org.apache.nifi.flowfile.FlowFile) QueryRequest(org.apache.solr.client.solrj.request.QueryRequest) SolrDocumentList(org.apache.solr.common.SolrDocumentList) IOException(java.io.IOException) ComponentLog(org.apache.nifi.logging.ComponentLog) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) ProcessException(org.apache.nifi.processor.exception.ProcessException) QueryResponse(org.apache.solr.client.solrj.response.QueryResponse) SchemaNotFoundException(org.apache.nifi.schema.access.SchemaNotFoundException)

Aggregations

RecordSet (org.apache.nifi.serialization.record.RecordSet)14 RecordSchema (org.apache.nifi.serialization.record.RecordSchema)10 IOException (java.io.IOException)9 FlowFile (org.apache.nifi.flowfile.FlowFile)9 HashMap (java.util.HashMap)8 RecordReader (org.apache.nifi.serialization.RecordReader)8 Record (org.apache.nifi.serialization.record.Record)8 InputStream (java.io.InputStream)7 ProcessException (org.apache.nifi.processor.exception.ProcessException)7 RecordReaderFactory (org.apache.nifi.serialization.RecordReaderFactory)7 ArrayList (java.util.ArrayList)6 RecordSetWriterFactory (org.apache.nifi.serialization.RecordSetWriterFactory)6 Test (org.junit.Test)6 BufferedInputStream (java.io.BufferedInputStream)5 ComponentLog (org.apache.nifi.logging.ComponentLog)5 SchemaNotFoundException (org.apache.nifi.schema.access.SchemaNotFoundException)5 ByteArrayOutputStream (java.io.ByteArrayOutputStream)4 LinkedHashMap (java.util.LinkedHashMap)4 InputStreamCallback (org.apache.nifi.processor.io.InputStreamCallback)4 RecordSetWriter (org.apache.nifi.serialization.RecordSetWriter)4