use of org.apache.kafka.streams.processor.internals.assignment.AssignorConfiguration.AssignmentListener in project kafka by apache.
the class HighAvailabilityTaskAssignorIntegrationTest method shouldScaleOutWithWarmupTasks.
private void shouldScaleOutWithWarmupTasks(final Function<String, Materialized<Object, Object, KeyValueStore<Bytes, byte[]>>> materializedFunction) throws InterruptedException {
final String testId = safeUniqueTestName(getClass(), testName);
final String appId = "appId_" + System.currentTimeMillis() + "_" + testId;
final String inputTopic = "input" + testId;
final Set<TopicPartition> inputTopicPartitions = mkSet(new TopicPartition(inputTopic, 0), new TopicPartition(inputTopic, 1));
final String storeName = "store" + testId;
final String storeChangelog = appId + "-store" + testId + "-changelog";
final Set<TopicPartition> changelogTopicPartitions = mkSet(new TopicPartition(storeChangelog, 0), new TopicPartition(storeChangelog, 1));
IntegrationTestUtils.cleanStateBeforeTest(CLUSTER, 2, inputTopic, storeChangelog);
final ReentrantLock assignmentLock = new ReentrantLock();
final AtomicInteger assignmentsCompleted = new AtomicInteger(0);
final Map<Integer, Boolean> assignmentsStable = new ConcurrentHashMap<>();
final AtomicBoolean assignmentStable = new AtomicBoolean(false);
final AssignmentListener assignmentListener = stable -> {
assignmentLock.lock();
try {
final int thisAssignmentIndex = assignmentsCompleted.incrementAndGet();
assignmentsStable.put(thisAssignmentIndex, stable);
assignmentStable.set(stable);
} finally {
assignmentLock.unlock();
}
};
final StreamsBuilder builder = new StreamsBuilder();
builder.table(inputTopic, materializedFunction.apply(storeName));
final Topology topology = builder.build();
final int numberOfRecords = 500;
produceTestData(inputTopic, numberOfRecords);
try (final KafkaStreams kafkaStreams0 = new KafkaStreams(topology, streamsProperties(appId, assignmentListener));
final KafkaStreams kafkaStreams1 = new KafkaStreams(topology, streamsProperties(appId, assignmentListener));
final Consumer<String, String> consumer = new KafkaConsumer<>(getConsumerProperties())) {
kafkaStreams0.start();
// sanity check: just make sure we actually wrote all the input records
TestUtils.waitForCondition(() -> getEndOffsetSum(inputTopicPartitions, consumer) == numberOfRecords, 120_000L, () -> "Input records haven't all been written to the input topic: " + getEndOffsetSum(inputTopicPartitions, consumer));
// wait until all the input records are in the changelog
TestUtils.waitForCondition(() -> getEndOffsetSum(changelogTopicPartitions, consumer) == numberOfRecords, 120_000L, () -> "Input records haven't all been written to the changelog: " + getEndOffsetSum(changelogTopicPartitions, consumer));
final AtomicLong instance1TotalRestored = new AtomicLong(-1);
final AtomicLong instance1NumRestored = new AtomicLong(-1);
final CountDownLatch restoreCompleteLatch = new CountDownLatch(1);
kafkaStreams1.setGlobalStateRestoreListener(new StateRestoreListener() {
@Override
public void onRestoreStart(final TopicPartition topicPartition, final String storeName, final long startingOffset, final long endingOffset) {
}
@Override
public void onBatchRestored(final TopicPartition topicPartition, final String storeName, final long batchEndOffset, final long numRestored) {
instance1NumRestored.accumulateAndGet(numRestored, (prev, restored) -> prev == -1 ? restored : prev + restored);
}
@Override
public void onRestoreEnd(final TopicPartition topicPartition, final String storeName, final long totalRestored) {
instance1TotalRestored.accumulateAndGet(totalRestored, (prev, restored) -> prev == -1 ? restored : prev + restored);
restoreCompleteLatch.countDown();
}
});
final int assignmentsBeforeScaleOut = assignmentsCompleted.get();
kafkaStreams1.start();
TestUtils.waitForCondition(() -> {
assignmentLock.lock();
try {
if (assignmentsCompleted.get() > assignmentsBeforeScaleOut) {
assertFalseNoRetry(assignmentsStable.get(assignmentsBeforeScaleOut + 1), "the first assignment after adding a node should be unstable while we warm up the state.");
return true;
} else {
return false;
}
} finally {
assignmentLock.unlock();
}
}, 120_000L, "Never saw a first assignment after scale out: " + assignmentsCompleted.get());
TestUtils.waitForCondition(assignmentStable::get, 120_000L, "Assignment hasn't become stable: " + assignmentsCompleted.get() + " Note, if this does fail, check and see if the new instance just failed to catch up within" + " the probing rebalance interval. A full minute should be long enough to read ~500 records" + " in any test environment, but you never know...");
restoreCompleteLatch.await();
// We should finalize the restoration without having restored any records (because they're already in
// the store. Otherwise, we failed to properly re-use the state from the standby.
assertThat(instance1TotalRestored.get(), is(0L));
// Belt-and-suspenders check that we never even attempt to restore any records.
assertThat(instance1NumRestored.get(), is(-1L));
}
}
use of org.apache.kafka.streams.processor.internals.assignment.AssignorConfiguration.AssignmentListener in project kafka by apache.
the class TaskAssignorIntegrationTest method shouldProperlyConfigureTheAssignor.
@SuppressWarnings("unchecked")
@Test
public void shouldProperlyConfigureTheAssignor() throws NoSuchFieldException, IllegalAccessException {
// This test uses reflection to check and make sure that all the expected configurations really
// make it all the way to configure the task assignor. There's no other use case for being able
// to extract all these fields, so reflection is a good choice until we find that the maintenance
// burden is too high.
//
// Also note that this is an integration test because so many components have to come together to
// ensure these configurations wind up where they belong, and any number of future code changes
// could break this change.
final String testId = safeUniqueTestName(getClass(), testName);
final String appId = "appId_" + testId;
final String inputTopic = "input" + testId;
IntegrationTestUtils.cleanStateBeforeTest(CLUSTER, inputTopic);
// Maybe I'm paranoid, but I don't want the compiler deciding that my lambdas are equal to the identity
// function and defeating my identity check
final AtomicInteger compilerDefeatingReference = new AtomicInteger(0);
// the implementation doesn't matter, we're just going to verify the reference.
final AssignmentListener configuredAssignmentListener = stable -> compilerDefeatingReference.incrementAndGet();
final Properties properties = mkObjectProperties(mkMap(mkEntry(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, CLUSTER.bootstrapServers()), mkEntry(StreamsConfig.APPLICATION_ID_CONFIG, appId), mkEntry(StreamsConfig.STATE_DIR_CONFIG, TestUtils.tempDirectory().getPath()), mkEntry(StreamsConfig.NUM_STANDBY_REPLICAS_CONFIG, "5"), mkEntry(StreamsConfig.ACCEPTABLE_RECOVERY_LAG_CONFIG, "6"), mkEntry(StreamsConfig.MAX_WARMUP_REPLICAS_CONFIG, "7"), mkEntry(StreamsConfig.PROBING_REBALANCE_INTERVAL_MS_CONFIG, "480000"), mkEntry(StreamsConfig.InternalConfig.ASSIGNMENT_LISTENER, configuredAssignmentListener), mkEntry(StreamsConfig.InternalConfig.INTERNAL_TASK_ASSIGNOR_CLASS, MyTaskAssignor.class.getName())));
final StreamsBuilder builder = new StreamsBuilder();
builder.stream(inputTopic);
try (final KafkaStreams kafkaStreams = new KafkaStreams(builder.build(), properties)) {
kafkaStreams.start();
final Field threads = KafkaStreams.class.getDeclaredField("threads");
threads.setAccessible(true);
final List<StreamThread> streamThreads = (List<StreamThread>) threads.get(kafkaStreams);
final StreamThread streamThread = streamThreads.get(0);
final Field mainConsumer = StreamThread.class.getDeclaredField("mainConsumer");
mainConsumer.setAccessible(true);
final KafkaConsumer<?, ?> consumer = (KafkaConsumer<?, ?>) mainConsumer.get(streamThread);
final Field assignors = KafkaConsumer.class.getDeclaredField("assignors");
assignors.setAccessible(true);
final List<ConsumerPartitionAssignor> consumerPartitionAssignors = (List<ConsumerPartitionAssignor>) assignors.get(consumer);
final StreamsPartitionAssignor streamsPartitionAssignor = (StreamsPartitionAssignor) consumerPartitionAssignors.get(0);
final Field assignmentConfigs = StreamsPartitionAssignor.class.getDeclaredField("assignmentConfigs");
assignmentConfigs.setAccessible(true);
final AssignorConfiguration.AssignmentConfigs configs = (AssignorConfiguration.AssignmentConfigs) assignmentConfigs.get(streamsPartitionAssignor);
final Field assignmentListenerField = StreamsPartitionAssignor.class.getDeclaredField("assignmentListener");
assignmentListenerField.setAccessible(true);
final AssignmentListener actualAssignmentListener = (AssignmentListener) assignmentListenerField.get(streamsPartitionAssignor);
final Field taskAssignorSupplierField = StreamsPartitionAssignor.class.getDeclaredField("taskAssignorSupplier");
taskAssignorSupplierField.setAccessible(true);
final Supplier<TaskAssignor> taskAssignorSupplier = (Supplier<TaskAssignor>) taskAssignorSupplierField.get(streamsPartitionAssignor);
final TaskAssignor taskAssignor = taskAssignorSupplier.get();
assertThat(configs.numStandbyReplicas, is(5));
assertThat(configs.acceptableRecoveryLag, is(6L));
assertThat(configs.maxWarmupReplicas, is(7));
assertThat(configs.probingRebalanceIntervalMs, is(480000L));
assertThat(actualAssignmentListener, sameInstance(configuredAssignmentListener));
assertThat(taskAssignor, instanceOf(MyTaskAssignor.class));
}
}
Aggregations