Search in sources :

Example 46 with ProducerRecord

use of org.apache.kafka.clients.producer.ProducerRecord in project apache-kafka-on-k8s by banzaicloud.

the class RecordCollectorTest method shouldNotThrowStreamsExceptionOnFlushIfASendFailedWithContinueExceptionHandler.

@SuppressWarnings("unchecked")
@Test
public void shouldNotThrowStreamsExceptionOnFlushIfASendFailedWithContinueExceptionHandler() {
    final RecordCollector collector = new RecordCollectorImpl(new MockProducer(cluster, true, new DefaultPartitioner(), byteArraySerializer, byteArraySerializer) {

        @Override
        public synchronized Future<RecordMetadata> send(final ProducerRecord record, final Callback callback) {
            callback.onCompletion(null, new Exception());
            return null;
        }
    }, "test", logContext, new AlwaysContinueProductionExceptionHandler());
    collector.send("topic1", "3", "0", null, stringSerializer, stringSerializer, streamPartitioner);
    collector.flush();
}
Also used : AlwaysContinueProductionExceptionHandler(org.apache.kafka.streams.errors.AlwaysContinueProductionExceptionHandler) MockProducer(org.apache.kafka.clients.producer.MockProducer) Callback(org.apache.kafka.clients.producer.Callback) DefaultPartitioner(org.apache.kafka.clients.producer.internals.DefaultPartitioner) ProducerRecord(org.apache.kafka.clients.producer.ProducerRecord) Future(java.util.concurrent.Future) KafkaException(org.apache.kafka.common.KafkaException) StreamsException(org.apache.kafka.streams.errors.StreamsException) Test(org.junit.Test)

Example 47 with ProducerRecord

use of org.apache.kafka.clients.producer.ProducerRecord in project incubator-rya by apache.

the class KafkaLoadStatements method fromFile.

@Override
public void fromFile(final Path statementsPath, final String visibilities) throws RyaStreamsException {
    requireNonNull(statementsPath);
    requireNonNull(visibilities);
    if (!statementsPath.toFile().exists()) {
        throw new RyaStreamsException("Could not load statements at path '" + statementsPath + "' because that " + "does not exist. Make sure you've entered the correct path.");
    }
    // Create an RDF Parser whose format is derived from the statementPath's file extension.
    final RDFFormat format = RDFFormat.forFileName(statementsPath.getFileName().toString());
    final RDFParser parser = Rio.createParser(format);
    // Set a handler that writes the statements to the specified kafka topic.
    parser.setRDFHandler(new RDFHandlerBase() {

        @Override
        public void startRDF() throws RDFHandlerException {
            log.trace("Starting loading statements.");
        }

        @Override
        public void handleStatement(final Statement stmnt) throws RDFHandlerException {
            final VisibilityStatement visiStatement = new VisibilityStatement(stmnt, visibilities);
            producer.send(new ProducerRecord<>(topic, visiStatement));
        }

        @Override
        public void endRDF() throws RDFHandlerException {
            producer.flush();
            log.trace("Done.");
        }
    });
    // Do the parse and load.
    try {
        parser.parse(Files.newInputStream(statementsPath), "");
    } catch (RDFParseException | RDFHandlerException | IOException e) {
        throw new RyaStreamsException("Could not load the RDF file's Statements into Rya Streams.", e);
    }
}
Also used : RDFHandlerException(org.openrdf.rio.RDFHandlerException) RyaStreamsException(org.apache.rya.streams.api.exception.RyaStreamsException) Statement(org.openrdf.model.Statement) VisibilityStatement(org.apache.rya.api.model.VisibilityStatement) ProducerRecord(org.apache.kafka.clients.producer.ProducerRecord) RDFHandlerBase(org.openrdf.rio.helpers.RDFHandlerBase) IOException(java.io.IOException) RDFParser(org.openrdf.rio.RDFParser) VisibilityStatement(org.apache.rya.api.model.VisibilityStatement) RDFFormat(org.openrdf.rio.RDFFormat) RDFParseException(org.openrdf.rio.RDFParseException)

Example 48 with ProducerRecord

use of org.apache.kafka.clients.producer.ProducerRecord in project incubator-rya by apache.

the class KafkaRyaSubGraphExporter method export.

/**
 * Exports the RyaSubGraph to a Kafka topic equivalent to the result returned by {@link RyaSubGraph#getId()}
 * @param subgraph - RyaSubGraph exported to Kafka
 * @param contructID - rowID of result that is exported. Used for logging purposes.
 */
@Override
public void export(final String constructID, final RyaSubGraph subGraph) throws ResultExportException {
    checkNotNull(constructID);
    checkNotNull(subGraph);
    try {
        // Send the result to the topic whose name matches the PCJ ID.
        final ProducerRecord<String, RyaSubGraph> rec = new ProducerRecord<>(subGraph.getId(), subGraph);
        final Future<RecordMetadata> future = producer.send(rec);
        // Don't let the export return until the result has been written to the topic. Otherwise we may lose results.
        future.get();
        log.debug("Producer successfully sent record with id: {} and statements: {}", constructID, subGraph.getStatements());
    } catch (final Throwable e) {
        throw new ResultExportException("A result could not be exported to Kafka.", e);
    }
}
Also used : RecordMetadata(org.apache.kafka.clients.producer.RecordMetadata) RyaSubGraph(org.apache.rya.api.domain.RyaSubGraph) ProducerRecord(org.apache.kafka.clients.producer.ProducerRecord) ResultExportException(org.apache.rya.indexing.pcj.fluo.app.export.IncrementalBindingSetExporter.ResultExportException)

Example 49 with ProducerRecord

use of org.apache.kafka.clients.producer.ProducerRecord in project auratrainingproject by liuqinghua666.

the class JavaKafkaEventProducer method main.

public static void main(String[] args) throws Exception {
    String dataPath = "D:\\bigdata\\source\\auratrainingproject\\spark\\data\\IJCAI17_dataset";
    String topic = KafkaRedisConfig.KAFKA_USER_PAY_TOPIC;
    Properties props = getConfig();
    Producer<String, String> producer = new KafkaProducer<String, String>(props);
    // 准备文件路径
    if (args.length > 0) {
        dataPath = args[0];
    }
    String fileName = JavaSQLAliPayAnalyzer.getOSPath(dataPath + "/user_pay.txt");
    // 使用RateLimiter做流量控制
    int maxRatePerSecond = 10;
    RateLimiter limiter = RateLimiter.create(maxRatePerSecond);
    ;
    File file = new File(fileName);
    BufferedReader reader = null;
    try {
        System.out.println("以行为单位读取文件内容,一次读一整行:");
        reader = new BufferedReader(new FileReader(file));
        String tempString = null;
        int line = 1;
        // 一次读入一行,直到读入null为文件结束
        while ((tempString = reader.readLine()) != null) {
            // 显示行号
            // System.out.println("line[" + line + "]=" + tempString);
            // 准备数据
            String[] row = tempString.split(",");
            if (row.length >= 3) {
                // 每10ms产生1个消息
                limiter.acquire();
                // user_id
                String key = "" + row[0];
                // shop_id+”,”+time_stamp
                String value = "" + row[1] + "," + row[2];
                // 推送数据
                producer.send(new ProducerRecord(topic, key, value));
                System.out.println("Message[" + line + "] sent: " + key + "=>" + value);
                producer.send(new ProducerRecord(topic, key, value));
                line++;
            // Thread.sleep(10);
            }
        }
        reader.close();
    } catch (IOException e) {
        e.printStackTrace();
    } finally {
        if (reader != null) {
            try {
                reader.close();
            } catch (IOException e1) {
            }
        }
    }
}
Also used : KafkaProducer(org.apache.kafka.clients.producer.KafkaProducer) ProducerRecord(org.apache.kafka.clients.producer.ProducerRecord) Properties(java.util.Properties) RateLimiter(org.spark_project.guava.util.concurrent.RateLimiter)

Example 50 with ProducerRecord

use of org.apache.kafka.clients.producer.ProducerRecord in project kafka-streams-examples by confluentinc.

the class TopArticlesExampleDriver method produceInputs.

private static void produceInputs(String bootstrapServers, String schemaRegistryUrl) throws IOException {
    final String[] users = { "erica", "bob", "joe", "damian", "tania", "phil", "sam", "lauren", "joseph" };
    final String[] industries = { "engineering", "telco", "finance", "health", "science" };
    final String[] pages = { "index.html", "news.html", "contact.html", "about.html", "stuff.html" };
    final Properties props = new Properties();
    props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers);
    props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
    props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, io.confluent.kafka.serializers.KafkaAvroSerializer.class);
    props.put(AbstractKafkaAvroSerDeConfig.SCHEMA_REGISTRY_URL_CONFIG, schemaRegistryUrl);
    final KafkaProducer<String, GenericRecord> producer = new KafkaProducer<>(props);
    final GenericRecordBuilder pageViewBuilder = new GenericRecordBuilder(loadSchema("pageview.avsc"));
    final Random random = new Random();
    for (String user : users) {
        pageViewBuilder.set("industry", industries[random.nextInt(industries.length)]);
        pageViewBuilder.set("flags", "ARTICLE");
        // For each user generate some page views
        IntStream.range(0, random.nextInt(10)).mapToObj(value -> {
            pageViewBuilder.set("user", user);
            pageViewBuilder.set("page", pages[random.nextInt(pages.length)]);
            return pageViewBuilder.build();
        }).forEach(record -> producer.send(new ProducerRecord<>(TopArticlesLambdaExample.PAGE_VIEWS, null, record)));
    }
    producer.flush();
}
Also used : KafkaProducer(org.apache.kafka.clients.producer.KafkaProducer) GenericRecordBuilder(org.apache.avro.generic.GenericRecordBuilder) IntStream(java.util.stream.IntStream) GenericRecord(org.apache.avro.generic.GenericRecord) Schema(org.apache.avro.Schema) ProducerRecord(org.apache.kafka.clients.producer.ProducerRecord) Properties(java.util.Properties) ConsumerConfig(org.apache.kafka.clients.consumer.ConsumerConfig) IOException(java.io.IOException) Random(java.util.Random) AbstractKafkaAvroSerDeConfig(io.confluent.kafka.serializers.AbstractKafkaAvroSerDeConfig) ConsumerRecords(org.apache.kafka.clients.consumer.ConsumerRecords) WindowedDeserializer(org.apache.kafka.streams.kstream.internals.WindowedDeserializer) KafkaProducer(org.apache.kafka.clients.producer.KafkaProducer) StringDeserializer(org.apache.kafka.common.serialization.StringDeserializer) Windowed(org.apache.kafka.streams.kstream.Windowed) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) Serdes(org.apache.kafka.common.serialization.Serdes) StringSerializer(org.apache.kafka.common.serialization.StringSerializer) Deserializer(org.apache.kafka.common.serialization.Deserializer) ProducerConfig(org.apache.kafka.clients.producer.ProducerConfig) Collections(java.util.Collections) KafkaConsumer(org.apache.kafka.clients.consumer.KafkaConsumer) InputStream(java.io.InputStream) Random(java.util.Random) ProducerRecord(org.apache.kafka.clients.producer.ProducerRecord) GenericRecordBuilder(org.apache.avro.generic.GenericRecordBuilder) Properties(java.util.Properties) GenericRecord(org.apache.avro.generic.GenericRecord)

Aggregations

ProducerRecord (org.apache.kafka.clients.producer.ProducerRecord)193 Test (org.junit.Test)90 KafkaProducer (org.apache.kafka.clients.producer.KafkaProducer)57 Properties (java.util.Properties)50 RecordMetadata (org.apache.kafka.clients.producer.RecordMetadata)40 ArrayList (java.util.ArrayList)39 Callback (org.apache.kafka.clients.producer.Callback)30 Future (java.util.concurrent.Future)26 TopicPartition (org.apache.kafka.common.TopicPartition)24 StringSerializer (org.apache.kafka.common.serialization.StringSerializer)21 HashMap (java.util.HashMap)20 Random (java.util.Random)19 IOException (java.io.IOException)16 ConsumerRecord (org.apache.kafka.clients.consumer.ConsumerRecord)16 KafkaConsumer (org.apache.kafka.clients.consumer.KafkaConsumer)16 KafkaException (org.apache.kafka.common.KafkaException)16 List (java.util.List)13 MockProducer (org.apache.kafka.clients.producer.MockProducer)13 DefaultPartitioner (org.apache.kafka.clients.producer.internals.DefaultPartitioner)12 StreamsException (org.apache.kafka.streams.errors.StreamsException)12