use of org.apache.kafka.common.PartitionInfo in project kafka by apache.
the class StreamsMetadataStateTest method shouldGetInstanceWithKey.
@Test
public void shouldGetInstanceWithKey() throws Exception {
final TopicPartition tp4 = new TopicPartition("topic-three", 1);
hostToPartitions.put(hostTwo, Utils.mkSet(topic2P0, tp4));
discovery.onChange(hostToPartitions, cluster.withPartitions(Collections.singletonMap(tp4, new PartitionInfo("topic-three", 1, null, null, null))));
final StreamsMetadata expected = new StreamsMetadata(hostThree, Utils.mkSet(globalTable, "table-three"), Collections.singleton(topic3P0));
final StreamsMetadata actual = discovery.getMetadataWithKey("table-three", "the-key", Serdes.String().serializer());
assertEquals(expected, actual);
}
use of org.apache.kafka.common.PartitionInfo in project kafka by apache.
the class StreamsMetadataStateTest method before.
@Before
public void before() {
builder = new KStreamBuilder();
final KStream<Object, Object> one = builder.stream("topic-one");
one.groupByKey().count("table-one");
final KStream<Object, Object> two = builder.stream("topic-two");
two.groupByKey().count("table-two");
builder.stream("topic-three").groupByKey().count("table-three");
builder.merge(one, two).groupByKey().count("merged-table");
builder.stream("topic-four").mapValues(new ValueMapper<Object, Object>() {
@Override
public Object apply(final Object value) {
return value;
}
});
builder.globalTable("global-topic", "global-table");
builder.setApplicationId("appId");
topic1P0 = new TopicPartition("topic-one", 0);
topic1P1 = new TopicPartition("topic-one", 1);
topic2P0 = new TopicPartition("topic-two", 0);
topic2P1 = new TopicPartition("topic-two", 1);
topic3P0 = new TopicPartition("topic-three", 0);
topic4P0 = new TopicPartition("topic-four", 0);
hostOne = new HostInfo("host-one", 8080);
hostTwo = new HostInfo("host-two", 9090);
hostThree = new HostInfo("host-three", 7070);
hostToPartitions = new HashMap<>();
hostToPartitions.put(hostOne, Utils.mkSet(topic1P0, topic2P1, topic4P0));
hostToPartitions.put(hostTwo, Utils.mkSet(topic2P0, topic1P1));
hostToPartitions.put(hostThree, Collections.singleton(topic3P0));
partitionInfos = Arrays.asList(new PartitionInfo("topic-one", 0, null, null, null), new PartitionInfo("topic-one", 1, null, null, null), new PartitionInfo("topic-two", 0, null, null, null), new PartitionInfo("topic-two", 1, null, null, null), new PartitionInfo("topic-three", 0, null, null, null), new PartitionInfo("topic-four", 0, null, null, null));
cluster = new Cluster(null, Collections.<Node>emptyList(), partitionInfos, Collections.<String>emptySet(), Collections.<String>emptySet());
discovery = new StreamsMetadataState(builder, hostOne);
discovery.onChange(hostToPartitions, cluster);
partitioner = new StreamPartitioner<String, Object>() {
@Override
public Integer partition(final String key, final Object value, final int numPartitions) {
return 1;
}
};
}
use of org.apache.kafka.common.PartitionInfo in project kafka by apache.
the class StreamsMetadataStateTest method shouldGetInstanceWithKeyWithMergedStreams.
@Test
public void shouldGetInstanceWithKeyWithMergedStreams() throws Exception {
final TopicPartition topic2P2 = new TopicPartition("topic-two", 2);
hostToPartitions.put(hostTwo, Utils.mkSet(topic2P0, topic1P1, topic2P2));
discovery.onChange(hostToPartitions, cluster.withPartitions(Collections.singletonMap(topic2P2, new PartitionInfo("topic-two", 2, null, null, null))));
final StreamsMetadata expected = new StreamsMetadata(hostTwo, Utils.mkSet("global-table", "table-two", "table-one", "merged-table"), Utils.mkSet(topic2P0, topic1P1, topic2P2));
final StreamsMetadata actual = discovery.getMetadataWithKey("merged-table", "123", new StreamPartitioner<String, Object>() {
@Override
public Integer partition(final String key, final Object value, final int numPartitions) {
return 2;
}
});
assertEquals(expected, actual);
}
use of org.apache.kafka.common.PartitionInfo in project flink by apache.
the class KafkaProducerTest method testPropagateExceptions.
@Test
@SuppressWarnings("unchecked")
public void testPropagateExceptions() {
try {
// mock kafka producer
KafkaProducer<?, ?> kafkaProducerMock = mock(KafkaProducer.class);
// partition setup
when(kafkaProducerMock.partitionsFor(anyString())).thenReturn(// returning a unmodifiable list to mimic KafkaProducer#partitionsFor() behaviour
Collections.singletonList(new PartitionInfo("mock_topic", 42, null, null, null)));
// failure when trying to send an element
when(kafkaProducerMock.send(any(ProducerRecord.class), any(Callback.class))).thenAnswer(new Answer<Future<RecordMetadata>>() {
@Override
public Future<RecordMetadata> answer(InvocationOnMock invocation) throws Throwable {
Callback callback = (Callback) invocation.getArguments()[1];
callback.onCompletion(null, new Exception("Test error"));
return null;
}
});
// make sure the FlinkKafkaProducer instantiates our mock producer
whenNew(KafkaProducer.class).withAnyArguments().thenReturn(kafkaProducerMock);
// (1) producer that propagates errors
FlinkKafkaProducer09<String> producerPropagating = new FlinkKafkaProducer09<>("mock_topic", new SimpleStringSchema(), FakeStandardProducerConfig.get(), null);
OneInputStreamOperatorTestHarness<String, Object> testHarness = new OneInputStreamOperatorTestHarness<>(new StreamSink(producerPropagating));
testHarness.open();
try {
testHarness.processElement(new StreamRecord<>("value"));
testHarness.processElement(new StreamRecord<>("value"));
fail("This should fail with an exception");
} catch (Exception e) {
assertNotNull(e.getCause());
assertNotNull(e.getCause().getMessage());
assertTrue(e.getCause().getMessage().contains("Test error"));
}
// (2) producer that only logs errors
FlinkKafkaProducer09<String> producerLogging = new FlinkKafkaProducer09<>("mock_topic", new SimpleStringSchema(), FakeStandardProducerConfig.get(), null);
producerLogging.setLogFailuresOnly(true);
testHarness = new OneInputStreamOperatorTestHarness<>(new StreamSink(producerLogging));
testHarness.open();
testHarness.processElement(new StreamRecord<>("value"));
testHarness.processElement(new StreamRecord<>("value"));
testHarness.close();
} catch (Exception e) {
e.printStackTrace();
fail(e.getMessage());
}
}
use of org.apache.kafka.common.PartitionInfo in project apex-malhar by apache.
the class OneToManyPartitioner method assign.
@Override
List<Set<PartitionMeta>> assign(Map<String, Map<String, List<PartitionInfo>>> metadata) {
if (prototypeOperator.getInitialPartitionCount() <= 0) {
throw new IllegalArgumentException("Num of partitions should be greater or equal to 1");
}
int partitionCount = prototypeOperator.getInitialPartitionCount();
ArrayList<Set<PartitionMeta>> eachPartitionAssignment = new ArrayList<>(prototypeOperator.getInitialPartitionCount());
int i = 0;
for (Map.Entry<String, Map<String, List<PartitionInfo>>> clusterMap : metadata.entrySet()) {
for (Map.Entry<String, List<PartitionInfo>> topicPartition : clusterMap.getValue().entrySet()) {
for (PartitionInfo pif : topicPartition.getValue()) {
int index = i++ % partitionCount;
if (index >= eachPartitionAssignment.size()) {
eachPartitionAssignment.add(new HashSet<PartitionMeta>());
}
eachPartitionAssignment.get(index).add(new PartitionMeta(clusterMap.getKey(), topicPartition.getKey(), pif.partition()));
}
}
}
return eachPartitionAssignment;
}
Aggregations