Search in sources :

Example 31 with TopicPartition

use of org.apache.kafka.common.TopicPartition in project kafka by apache.

the class MockConsumerInterceptor method onConsume.

@Override
public ConsumerRecords<String, String> onConsume(ConsumerRecords<String, String> records) {
    // This will ensure that we get the cluster metadata when onConsume is called for the first time
    // as subsequent compareAndSet operations will fail.
    CLUSTER_ID_BEFORE_ON_CONSUME.compareAndSet(NO_CLUSTER_ID, CLUSTER_META.get());
    Map<TopicPartition, List<ConsumerRecord<String, String>>> recordMap = new HashMap<>();
    for (TopicPartition tp : records.partitions()) {
        List<ConsumerRecord<String, String>> lst = new ArrayList<>();
        for (ConsumerRecord<String, String> record : records.records(tp)) {
            lst.add(new ConsumerRecord<>(record.topic(), record.partition(), record.offset(), record.timestamp(), record.timestampType(), record.checksum(), record.serializedKeySize(), record.serializedValueSize(), record.key(), record.value().toUpperCase(Locale.ROOT)));
        }
        recordMap.put(tp, lst);
    }
    return new ConsumerRecords<String, String>(recordMap);
}
Also used : HashMap(java.util.HashMap) TopicPartition(org.apache.kafka.common.TopicPartition) ArrayList(java.util.ArrayList) ArrayList(java.util.ArrayList) List(java.util.List) ConsumerRecords(org.apache.kafka.clients.consumer.ConsumerRecords) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord)

Example 32 with TopicPartition

use of org.apache.kafka.common.TopicPartition in project kafka by apache.

the class FileStreamSinkTaskTest method testPutFlush.

@Test
public void testPutFlush() {
    HashMap<TopicPartition, OffsetAndMetadata> offsets = new HashMap<>();
    final String newLine = System.getProperty("line.separator");
    // We do not call task.start() since it would override the output stream
    task.put(Arrays.asList(new SinkRecord("topic1", 0, null, null, Schema.STRING_SCHEMA, "line1", 1)));
    offsets.put(new TopicPartition("topic1", 0), new OffsetAndMetadata(1L));
    task.flush(offsets);
    assertEquals("line1" + newLine, os.toString());
    task.put(Arrays.asList(new SinkRecord("topic1", 0, null, null, Schema.STRING_SCHEMA, "line2", 2), new SinkRecord("topic2", 0, null, null, Schema.STRING_SCHEMA, "line3", 1)));
    offsets.put(new TopicPartition("topic1", 0), new OffsetAndMetadata(2L));
    offsets.put(new TopicPartition("topic2", 0), new OffsetAndMetadata(1L));
    task.flush(offsets);
    assertEquals("line1" + newLine + "line2" + newLine + "line3" + newLine, os.toString());
}
Also used : HashMap(java.util.HashMap) TopicPartition(org.apache.kafka.common.TopicPartition) OffsetAndMetadata(org.apache.kafka.clients.consumer.OffsetAndMetadata) SinkRecord(org.apache.kafka.connect.sink.SinkRecord) Test(org.junit.Test)

Example 33 with TopicPartition

use of org.apache.kafka.common.TopicPartition in project kafka by apache.

the class RequestResponseTest method createUpdateMetadataRequest.

private UpdateMetadataRequest createUpdateMetadataRequest(int version, String rack) {
    Map<TopicPartition, PartitionState> partitionStates = new HashMap<>();
    List<Integer> isr = Arrays.asList(1, 2);
    List<Integer> replicas = Arrays.asList(1, 2, 3, 4);
    partitionStates.put(new TopicPartition("topic5", 105), new PartitionState(0, 2, 1, new ArrayList<>(isr), 2, new HashSet<>(replicas)));
    partitionStates.put(new TopicPartition("topic5", 1), new PartitionState(1, 1, 1, new ArrayList<>(isr), 2, new HashSet<>(replicas)));
    partitionStates.put(new TopicPartition("topic20", 1), new PartitionState(1, 0, 1, new ArrayList<>(isr), 2, new HashSet<>(replicas)));
    SecurityProtocol plaintext = SecurityProtocol.PLAINTEXT;
    List<UpdateMetadataRequest.EndPoint> endPoints1 = new ArrayList<>();
    endPoints1.add(new UpdateMetadataRequest.EndPoint("host1", 1223, plaintext, ListenerName.forSecurityProtocol(plaintext)));
    List<UpdateMetadataRequest.EndPoint> endPoints2 = new ArrayList<>();
    endPoints2.add(new UpdateMetadataRequest.EndPoint("host1", 1244, plaintext, ListenerName.forSecurityProtocol(plaintext)));
    if (version > 0) {
        SecurityProtocol ssl = SecurityProtocol.SSL;
        endPoints2.add(new UpdateMetadataRequest.EndPoint("host2", 1234, ssl, ListenerName.forSecurityProtocol(ssl)));
        endPoints2.add(new UpdateMetadataRequest.EndPoint("host2", 1334, ssl, new ListenerName("CLIENT")));
    }
    Set<UpdateMetadataRequest.Broker> liveBrokers = new HashSet<>(Arrays.asList(new UpdateMetadataRequest.Broker(0, endPoints1, rack), new UpdateMetadataRequest.Broker(1, endPoints2, rack)));
    return new UpdateMetadataRequest.Builder((short) version, 1, 10, partitionStates, liveBrokers).build();
}
Also used : HashMap(java.util.HashMap) LinkedHashMap(java.util.LinkedHashMap) ArrayList(java.util.ArrayList) SecurityProtocol(org.apache.kafka.common.protocol.SecurityProtocol) ListenerName(org.apache.kafka.common.network.ListenerName) TopicPartition(org.apache.kafka.common.TopicPartition) HashSet(java.util.HashSet)

Example 34 with TopicPartition

use of org.apache.kafka.common.TopicPartition in project kafka by apache.

the class RequestResponseTest method createOffsetCommitResponse.

private OffsetCommitResponse createOffsetCommitResponse() {
    Map<TopicPartition, Errors> responseData = new HashMap<>();
    responseData.put(new TopicPartition("test", 0), Errors.NONE);
    return new OffsetCommitResponse(responseData);
}
Also used : Errors(org.apache.kafka.common.protocol.Errors) HashMap(java.util.HashMap) LinkedHashMap(java.util.LinkedHashMap) TopicPartition(org.apache.kafka.common.TopicPartition)

Example 35 with TopicPartition

use of org.apache.kafka.common.TopicPartition in project kafka by apache.

the class RequestResponseTest method createFetchRequest.

private FetchRequest createFetchRequest(int version) {
    LinkedHashMap<TopicPartition, FetchRequest.PartitionData> fetchData = new LinkedHashMap<>();
    fetchData.put(new TopicPartition("test1", 0), new FetchRequest.PartitionData(100, 1000000));
    fetchData.put(new TopicPartition("test2", 0), new FetchRequest.PartitionData(200, 1000000));
    return FetchRequest.Builder.forConsumer(100, 100000, fetchData).setMaxBytes(1000).build((short) version);
}
Also used : TopicPartition(org.apache.kafka.common.TopicPartition) LinkedHashMap(java.util.LinkedHashMap)

Aggregations

TopicPartition (org.apache.kafka.common.TopicPartition)257 HashMap (java.util.HashMap)135 Test (org.junit.Test)97 Map (java.util.Map)64 ArrayList (java.util.ArrayList)50 HashSet (java.util.HashSet)44 LinkedHashMap (java.util.LinkedHashMap)39 PartitionInfo (org.apache.kafka.common.PartitionInfo)34 TaskId (org.apache.kafka.streams.processor.TaskId)33 OffsetAndMetadata (org.apache.kafka.clients.consumer.OffsetAndMetadata)29 Set (java.util.Set)28 List (java.util.List)26 Metrics (org.apache.kafka.common.metrics.Metrics)20 ConsumerRecord (org.apache.kafka.clients.consumer.ConsumerRecord)19 Node (org.apache.kafka.common.Node)19 StreamsConfig (org.apache.kafka.streams.StreamsConfig)19 Properties (java.util.Properties)18 MockTime (org.apache.kafka.common.utils.MockTime)17 PrepareForTest (org.powermock.core.classloader.annotations.PrepareForTest)15 Collection (java.util.Collection)13