Search in sources :

Example 6 with StringSerializer

use of org.apache.kafka.common.serialization.StringSerializer in project samza by apache.

the class StandaloneIntegrationTestHarness method setUp.

@Override
public void setUp() {
    super.setUp();
    producer = TestUtils.createNewProducer(bootstrapServers(), 1, 60 * 1000L, 1024L * 1024L, 0, 0L, 5 * 1000L, SecurityProtocol.PLAINTEXT, null, Option$.MODULE$.<Properties>apply(new Properties()), new StringSerializer(), new ByteArraySerializer(), Option$.MODULE$.<Properties>apply(new Properties()));
    consumer = TestUtils.createNewConsumer(bootstrapServers(), "group", "earliest", 4096L, "org.apache.kafka.clients.consumer.RangeAssignor", 30000, SecurityProtocol.PLAINTEXT, Option$.MODULE$.<File>empty(), Option$.MODULE$.<Properties>empty(), Option$.MODULE$.<Properties>empty());
}
Also used : Properties(java.util.Properties) StringSerializer(org.apache.kafka.common.serialization.StringSerializer) ByteArraySerializer(org.apache.kafka.common.serialization.ByteArraySerializer) File(java.io.File)

Example 7 with StringSerializer

use of org.apache.kafka.common.serialization.StringSerializer in project samza by apache.

the class StreamApplicationIntegrationTestHarness method setUp.

/**
   * Starts a single kafka broker, and a single embedded zookeeper server in their own threads.
   * Sub-classes should invoke {@link #zkConnect()} and {@link #bootstrapUrl()}s to
   * obtain the urls (and ports) of the started zookeeper and kafka broker.
   */
@Override
public void setUp() {
    super.setUp();
    Properties consumerDeserializerProperties = new Properties();
    consumerDeserializerProperties.setProperty("key.deserializer", DEFAULT_DESERIALIZER);
    consumerDeserializerProperties.setProperty("value.deserializer", DEFAULT_DESERIALIZER);
    producer = TestUtils.createNewProducer(// bootstrap-server url
    bootstrapServers(), // acks
    1, // maxBlockMs
    60 * 1000L, // buffer size
    1024L * 1024L, // numRetries
    0, // lingerMs
    0L, // requestTimeout
    5 * 1000L, SecurityProtocol.PLAINTEXT, null, Option.apply(new Properties()), new StringSerializer(), new StringSerializer(), Option.apply(new Properties()));
    consumer = TestUtils.createNewConsumer(bootstrapServers(), // groupId
    "group", // auto-offset-reset
    "earliest", // per-partition fetch size
    4096L, // partition Assigner
    "org.apache.kafka.clients.consumer.RangeAssignor", 30000, SecurityProtocol.PLAINTEXT, Option$.MODULE$.<File>empty(), Option$.MODULE$.<Properties>empty(), Option$.MODULE$.<Properties>apply(consumerDeserializerProperties));
}
Also used : Properties(java.util.Properties) StringSerializer(org.apache.kafka.common.serialization.StringSerializer) File(java.io.File)

Example 8 with StringSerializer

use of org.apache.kafka.common.serialization.StringSerializer in project spark-dataflow by cloudera.

the class KafkaStreamingTest method init.

@BeforeClass
public static void init() throws IOException {
    EMBEDDED_ZOOKEEPER.startup();
    EMBEDDED_KAFKA_CLUSTER.startup();
    // write to Kafka
    Properties producerProps = new Properties();
    producerProps.putAll(EMBEDDED_KAFKA_CLUSTER.getProps());
    producerProps.put("request.required.acks", 1);
    producerProps.put("bootstrap.servers", EMBEDDED_KAFKA_CLUSTER.getBrokerList());
    Serializer<String> stringSerializer = new StringSerializer();
    try (@SuppressWarnings("unchecked") KafkaProducer<String, String> kafkaProducer = new KafkaProducer(producerProps, stringSerializer, stringSerializer)) {
        for (Map.Entry<String, String> en : KAFKA_MESSAGES.entrySet()) {
            kafkaProducer.send(new ProducerRecord<>(TOPIC, en.getKey(), en.getValue()));
        }
    }
}
Also used : KafkaProducer(org.apache.kafka.clients.producer.KafkaProducer) Properties(java.util.Properties) StringSerializer(org.apache.kafka.common.serialization.StringSerializer) ImmutableMap(com.google.cloud.dataflow.sdk.repackaged.com.google.common.collect.ImmutableMap) Map(java.util.Map) BeforeClass(org.junit.BeforeClass)

Example 9 with StringSerializer

use of org.apache.kafka.common.serialization.StringSerializer in project kafka by apache.

the class KafkaProducerTest method testMetadataFetchOnStaleMetadata.

@PrepareOnlyThisForTest(Metadata.class)
@Test
public void testMetadataFetchOnStaleMetadata() throws Exception {
    Properties props = new Properties();
    props.setProperty(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9999");
    KafkaProducer<String, String> producer = new KafkaProducer<>(props, new StringSerializer(), new StringSerializer());
    Metadata metadata = PowerMock.createNiceMock(Metadata.class);
    MemberModifier.field(KafkaProducer.class, "metadata").set(producer, metadata);
    String topic = "topic";
    ProducerRecord<String, String> initialRecord = new ProducerRecord<>(topic, "value");
    // Create a record with a partition higher than the initial (outdated) partition range
    ProducerRecord<String, String> extendedRecord = new ProducerRecord<>(topic, 2, null, "value");
    Collection<Node> nodes = Collections.singletonList(new Node(0, "host1", 1000));
    final Cluster emptyCluster = new Cluster(null, nodes, Collections.<PartitionInfo>emptySet(), Collections.<String>emptySet(), Collections.<String>emptySet());
    final Cluster initialCluster = new Cluster("dummy", Collections.singletonList(new Node(0, "host1", 1000)), Arrays.asList(new PartitionInfo(topic, 0, null, null, null)), Collections.<String>emptySet(), Collections.<String>emptySet());
    final Cluster extendedCluster = new Cluster("dummy", Collections.singletonList(new Node(0, "host1", 1000)), Arrays.asList(new PartitionInfo(topic, 0, null, null, null), new PartitionInfo(topic, 1, null, null, null), new PartitionInfo(topic, 2, null, null, null)), Collections.<String>emptySet(), Collections.<String>emptySet());
    // Expect exactly one fetch for each attempt to refresh while topic metadata is not available
    final int refreshAttempts = 5;
    EasyMock.expect(metadata.fetch()).andReturn(emptyCluster).times(refreshAttempts - 1);
    EasyMock.expect(metadata.fetch()).andReturn(initialCluster).once();
    EasyMock.expect(metadata.fetch()).andThrow(new IllegalStateException("Unexpected call to metadata.fetch()")).anyTimes();
    PowerMock.replay(metadata);
    producer.send(initialRecord);
    PowerMock.verify(metadata);
    // Expect exactly one fetch if topic metadata is available and records are still within range
    PowerMock.reset(metadata);
    EasyMock.expect(metadata.fetch()).andReturn(initialCluster).once();
    EasyMock.expect(metadata.fetch()).andThrow(new IllegalStateException("Unexpected call to metadata.fetch()")).anyTimes();
    PowerMock.replay(metadata);
    producer.send(initialRecord, null);
    PowerMock.verify(metadata);
    // Expect exactly two fetches if topic metadata is available but metadata response still returns
    // the same partition size (either because metadata are still stale at the broker too or because
    // there weren't any partitions added in the first place).
    PowerMock.reset(metadata);
    EasyMock.expect(metadata.fetch()).andReturn(initialCluster).once();
    EasyMock.expect(metadata.fetch()).andReturn(initialCluster).once();
    EasyMock.expect(metadata.fetch()).andThrow(new IllegalStateException("Unexpected call to metadata.fetch()")).anyTimes();
    PowerMock.replay(metadata);
    try {
        producer.send(extendedRecord, null);
        fail("Expected KafkaException to be raised");
    } catch (KafkaException e) {
    // expected
    }
    PowerMock.verify(metadata);
    // Expect exactly two fetches if topic metadata is available but outdated for the given record
    PowerMock.reset(metadata);
    EasyMock.expect(metadata.fetch()).andReturn(initialCluster).once();
    EasyMock.expect(metadata.fetch()).andReturn(extendedCluster).once();
    EasyMock.expect(metadata.fetch()).andThrow(new IllegalStateException("Unexpected call to metadata.fetch()")).anyTimes();
    PowerMock.replay(metadata);
    producer.send(extendedRecord, null);
    PowerMock.verify(metadata);
}
Also used : Node(org.apache.kafka.common.Node) Metadata(org.apache.kafka.clients.Metadata) Cluster(org.apache.kafka.common.Cluster) Properties(java.util.Properties) KafkaException(org.apache.kafka.common.KafkaException) PartitionInfo(org.apache.kafka.common.PartitionInfo) StringSerializer(org.apache.kafka.common.serialization.StringSerializer) PrepareOnlyThisForTest(org.powermock.core.classloader.annotations.PrepareOnlyThisForTest) Test(org.junit.Test) PrepareOnlyThisForTest(org.powermock.core.classloader.annotations.PrepareOnlyThisForTest)

Example 10 with StringSerializer

use of org.apache.kafka.common.serialization.StringSerializer in project druid by druid-io.

the class ITKafkaIndexingServiceTest method testKafka.

@Test
public void testKafka() {
    LOG.info("Starting test: ITKafkaIndexingServiceTest");
    // create topic
    try {
        int sessionTimeoutMs = 10000;
        int connectionTimeoutMs = 10000;
        String zkHosts = config.getZookeeperHosts();
        zkClient = new ZkClient(zkHosts, sessionTimeoutMs, connectionTimeoutMs, ZKStringSerializer$.MODULE$);
        int numPartitions = 4;
        int replicationFactor = 1;
        Properties topicConfig = new Properties();
        AdminUtils.createTopic(zkClient, TOPIC_NAME, numPartitions, replicationFactor, topicConfig);
    } catch (TopicExistsException e) {
    // it's ok if the topic already exists
    } catch (Exception e) {
        throw new ISE(e, "could not create kafka topic");
    }
    String spec;
    try {
        LOG.info("supervisorSpec name: [%s]", INDEXER_FILE);
        spec = getTaskAsString(INDEXER_FILE).replaceAll("%%DATASOURCE%%", DATASOURCE).replaceAll("%%TOPIC%%", TOPIC_NAME).replaceAll("%%KAFKA_BROKER%%", config.getKafkaHost());
        LOG.info("supervisorSpec: [%s]\n", spec);
    } catch (Exception e) {
        LOG.error("could not read file [%s]", INDEXER_FILE);
        throw new ISE(e, "could not read file [%s]", INDEXER_FILE);
    }
    // start supervisor
    supervisorId = indexer.submitSupervisor(spec);
    LOG.info("Submitted supervisor");
    // set up kafka producer
    Properties properties = new Properties();
    properties.put("bootstrap.servers", config.getKafkaHost());
    LOG.info("Kafka bootstrap.servers: [%s]", config.getKafkaHost());
    properties.put("acks", "all");
    properties.put("retries", "3");
    KafkaProducer<String, String> producer = new KafkaProducer<>(properties, new StringSerializer(), new StringSerializer());
    DateTimeZone zone = DateTimeZone.forID("UTC");
    // format for putting into events
    DateTimeFormatter event_fmt = DateTimeFormat.forPattern("yyyy-MM-dd'T'HH:mm:ss'Z'");
    // timestamp to put on events
    DateTime dt = new DateTime(zone);
    // timestamp of 1st event
    dtFirst = dt;
    // timestamp of last event
    dtLast = dt;
    // these are used to compute the expected aggregations
    int added = 0;
    int num_events = 0;
    // send data to kafka
    while (num_events < NUM_EVENTS_TO_SEND) {
        num_events++;
        added += num_events;
        // construct the event to send
        String event = String.format(event_template, event_fmt.print(dt), num_events, 0, num_events);
        LOG.info("sending event: [%s]", event);
        try {
            producer.send(new ProducerRecord<String, String>(TOPIC_NAME, event)).get();
        } catch (Exception ioe) {
            throw Throwables.propagate(ioe);
        }
        dtLast = dt;
        dt = new DateTime(zone);
    }
    producer.close();
    LOG.info("Waiting for [%s] millis for Kafka indexing tasks to consume events", WAIT_TIME_MILLIS);
    try {
        Thread.sleep(WAIT_TIME_MILLIS);
    } catch (InterruptedException e) {
        Thread.currentThread().interrupt();
        throw new RuntimeException(e);
    }
    InputStream is = ITKafkaIndexingServiceTest.class.getResourceAsStream(QUERIES_FILE);
    if (null == is) {
        throw new ISE("could not open query file: %s", QUERIES_FILE);
    }
    // put the timestamps into the query structure
    String query_response_template;
    try {
        query_response_template = IOUtils.toString(is, "UTF-8");
    } catch (IOException e) {
        throw new ISE(e, "could not read query file: %s", QUERIES_FILE);
    }
    String queryStr = query_response_template.replaceAll("%%DATASOURCE%%", DATASOURCE).replace("%%TIMEBOUNDARY_RESPONSE_TIMESTAMP%%", TIMESTAMP_FMT.print(dtFirst)).replace("%%TIMEBOUNDARY_RESPONSE_MAXTIME%%", TIMESTAMP_FMT.print(dtLast)).replace("%%TIMEBOUNDARY_RESPONSE_MINTIME%%", TIMESTAMP_FMT.print(dtFirst)).replace("%%TIMESERIES_QUERY_START%%", INTERVAL_FMT.print(dtFirst)).replace("%%TIMESERIES_QUERY_END%%", INTERVAL_FMT.print(dtLast.plusMinutes(2))).replace("%%TIMESERIES_RESPONSE_TIMESTAMP%%", TIMESTAMP_FMT.print(dtFirst)).replace("%%TIMESERIES_ADDED%%", Integer.toString(added)).replace("%%TIMESERIES_NUMEVENTS%%", Integer.toString(num_events));
    // this query will probably be answered from the indexing tasks but possibly from 2 historical segments / 2 indexing
    try {
        this.queryHelper.testQueriesFromString(queryStr, 2);
    } catch (Exception e) {
        throw Throwables.propagate(e);
    }
    LOG.info("Shutting down Kafka Supervisor");
    indexer.shutdownSupervisor(supervisorId);
    // wait for all kafka indexing tasks to finish
    LOG.info("Waiting for all kafka indexing tasks to finish");
    RetryUtil.retryUntilTrue(new Callable<Boolean>() {

        @Override
        public Boolean call() throws Exception {
            return (indexer.getPendingTasks().size() + indexer.getRunningTasks().size() + indexer.getWaitingTasks().size()) == 0;
        }
    }, "Waiting for Tasks Completion");
    // wait for segments to be handed off
    try {
        RetryUtil.retryUntil(new Callable<Boolean>() {

            @Override
            public Boolean call() throws Exception {
                return coordinator.areSegmentsLoaded(DATASOURCE);
            }
        }, true, 30000, 10, "Real-time generated segments loaded");
    } catch (Exception e) {
        throw Throwables.propagate(e);
    }
    LOG.info("segments are present");
    segmentsExist = true;
    // this query will be answered by at least 1 historical segment, most likely 2, and possibly up to all 4
    try {
        this.queryHelper.testQueriesFromString(queryStr, 2);
    } catch (Exception e) {
        throw Throwables.propagate(e);
    }
}
Also used : ZkClient(org.I0Itec.zkclient.ZkClient) KafkaProducer(org.apache.kafka.clients.producer.KafkaProducer) InputStream(java.io.InputStream) IOException(java.io.IOException) Properties(java.util.Properties) TopicExistsException(kafka.common.TopicExistsException) TopicExistsException(kafka.common.TopicExistsException) IOException(java.io.IOException) DateTimeZone(org.joda.time.DateTimeZone) DateTime(org.joda.time.DateTime) ProducerRecord(org.apache.kafka.clients.producer.ProducerRecord) ISE(io.druid.java.util.common.ISE) StringSerializer(org.apache.kafka.common.serialization.StringSerializer) DateTimeFormatter(org.joda.time.format.DateTimeFormatter) Test(org.testng.annotations.Test)

Aggregations

StringSerializer (org.apache.kafka.common.serialization.StringSerializer)11 Properties (java.util.Properties)10 Test (org.junit.Test)5 KafkaProducer (org.apache.kafka.clients.producer.KafkaProducer)4 PrepareOnlyThisForTest (org.powermock.core.classloader.annotations.PrepareOnlyThisForTest)4 Cluster (org.apache.kafka.common.Cluster)3 Node (org.apache.kafka.common.Node)3 PartitionInfo (org.apache.kafka.common.PartitionInfo)3 ISE (io.druid.java.util.common.ISE)2 File (java.io.File)2 IOException (java.io.IOException)2 InputStream (java.io.InputStream)2 Map (java.util.Map)2 TopicExistsException (kafka.common.TopicExistsException)2 ZkClient (org.I0Itec.zkclient.ZkClient)2 Metadata (org.apache.kafka.clients.Metadata)2 ProducerRecord (org.apache.kafka.clients.producer.ProducerRecord)2 DateTime (org.joda.time.DateTime)2 DateTimeZone (org.joda.time.DateTimeZone)2 DateTimeFormatter (org.joda.time.format.DateTimeFormatter)2