use of org.apache.kafka.clients.admin.DeleteRecordsResult in project apache-kafka-on-k8s by banzaicloud.
the class TaskManagerTest method shouldSendPurgeData.
@Test
public void shouldSendPurgeData() {
final KafkaFutureImpl<DeletedRecords> futureDeletedRecords = new KafkaFutureImpl<>();
final Map<TopicPartition, RecordsToDelete> recordsToDelete = Collections.singletonMap(t1p1, RecordsToDelete.beforeOffset(5L));
final DeleteRecordsResult deleteRecordsResult = new DeleteRecordsResult(Collections.singletonMap(t1p1, (KafkaFuture<DeletedRecords>) futureDeletedRecords));
futureDeletedRecords.complete(null);
EasyMock.expect(active.recordsToDelete()).andReturn(Collections.singletonMap(t1p1, 5L)).times(2);
EasyMock.expect(adminClient.deleteRecords(recordsToDelete)).andReturn(deleteRecordsResult).times(2);
replay();
taskManager.maybePurgeCommitedRecords();
taskManager.maybePurgeCommitedRecords();
verify(active, adminClient);
}
use of org.apache.kafka.clients.admin.DeleteRecordsResult in project apache-kafka-on-k8s by banzaicloud.
the class TaskManagerTest method shouldIgnorePurgeDataErrors.
@Test
public void shouldIgnorePurgeDataErrors() {
final KafkaFutureImpl<DeletedRecords> futureDeletedRecords = new KafkaFutureImpl<>();
final Map<TopicPartition, RecordsToDelete> recordsToDelete = Collections.singletonMap(t1p1, RecordsToDelete.beforeOffset(5L));
final DeleteRecordsResult deleteRecordsResult = new DeleteRecordsResult(Collections.singletonMap(t1p1, (KafkaFuture<DeletedRecords>) futureDeletedRecords));
futureDeletedRecords.completeExceptionally(new Exception("KABOOM!"));
EasyMock.expect(active.recordsToDelete()).andReturn(Collections.singletonMap(t1p1, 5L)).times(2);
EasyMock.expect(adminClient.deleteRecords(recordsToDelete)).andReturn(deleteRecordsResult).times(2);
replay();
taskManager.maybePurgeCommitedRecords();
taskManager.maybePurgeCommitedRecords();
verify(active, adminClient);
}
use of org.apache.kafka.clients.admin.DeleteRecordsResult in project apache-kafka-on-k8s by banzaicloud.
the class TaskManagerTest method shouldNotSendPurgeDataIfPreviousNotDone.
@Test
public void shouldNotSendPurgeDataIfPreviousNotDone() {
final KafkaFuture<DeletedRecords> futureDeletedRecords = new KafkaFutureImpl<>();
final Map<TopicPartition, RecordsToDelete> recordsToDelete = Collections.singletonMap(t1p1, RecordsToDelete.beforeOffset(5L));
final DeleteRecordsResult deleteRecordsResult = new DeleteRecordsResult(Collections.singletonMap(t1p1, futureDeletedRecords));
EasyMock.expect(active.recordsToDelete()).andReturn(Collections.singletonMap(t1p1, 5L)).once();
EasyMock.expect(adminClient.deleteRecords(recordsToDelete)).andReturn(deleteRecordsResult).once();
replay();
taskManager.maybePurgeCommitedRecords();
// second call should be no-op as the previous one is not done yet
taskManager.maybePurgeCommitedRecords();
verify(active, adminClient);
}
use of org.apache.kafka.clients.admin.DeleteRecordsResult in project kafka by apache.
the class TaskManagerTest method shouldSendPurgeData.
@Test
public void shouldSendPurgeData() {
resetToStrict(adminClient);
expect(adminClient.deleteRecords(singletonMap(t1p1, RecordsToDelete.beforeOffset(5L)))).andReturn(new DeleteRecordsResult(singletonMap(t1p1, completedFuture())));
expect(adminClient.deleteRecords(singletonMap(t1p1, RecordsToDelete.beforeOffset(17L)))).andReturn(new DeleteRecordsResult(singletonMap(t1p1, completedFuture())));
replay(adminClient);
final Map<TopicPartition, Long> purgableOffsets = new HashMap<>();
final StateMachineTask task00 = new StateMachineTask(taskId00, taskId00Partitions, true) {
@Override
public Map<TopicPartition, Long> purgeableOffsets() {
return purgableOffsets;
}
};
expectRestoreToBeCompleted(consumer, changeLogReader);
expect(activeTaskCreator.createTasks(anyObject(), eq(taskId00Assignment))).andStubReturn(singletonList(task00));
replay(activeTaskCreator, consumer, changeLogReader);
taskManager.handleAssignment(taskId00Assignment, emptyMap());
assertThat(taskManager.tryToCompleteRestoration(time.milliseconds(), null), is(true));
assertThat(task00.state(), is(Task.State.RUNNING));
purgableOffsets.put(t1p1, 5L);
taskManager.maybePurgeCommittedRecords();
purgableOffsets.put(t1p1, 17L);
taskManager.maybePurgeCommittedRecords();
verify(adminClient);
}
use of org.apache.kafka.clients.admin.DeleteRecordsResult in project kafka by apache.
the class TaskManagerTest method shouldNotSendPurgeDataIfPreviousNotDone.
@Test
public void shouldNotSendPurgeDataIfPreviousNotDone() {
resetToStrict(adminClient);
final KafkaFutureImpl<DeletedRecords> futureDeletedRecords = new KafkaFutureImpl<>();
expect(adminClient.deleteRecords(singletonMap(t1p1, RecordsToDelete.beforeOffset(5L)))).andReturn(new DeleteRecordsResult(singletonMap(t1p1, futureDeletedRecords)));
replay(adminClient);
final Map<TopicPartition, Long> purgableOffsets = new HashMap<>();
final StateMachineTask task00 = new StateMachineTask(taskId00, taskId00Partitions, true) {
@Override
public Map<TopicPartition, Long> purgeableOffsets() {
return purgableOffsets;
}
};
expectRestoreToBeCompleted(consumer, changeLogReader);
expect(activeTaskCreator.createTasks(anyObject(), eq(taskId00Assignment))).andStubReturn(singletonList(task00));
replay(activeTaskCreator, consumer, changeLogReader);
taskManager.handleAssignment(taskId00Assignment, emptyMap());
assertThat(taskManager.tryToCompleteRestoration(time.milliseconds(), null), is(true));
assertThat(task00.state(), is(Task.State.RUNNING));
purgableOffsets.put(t1p1, 5L);
taskManager.maybePurgeCommittedRecords();
// this call should be a no-op.
// this is verified, as there is no expectation on adminClient for this second call,
// so it would fail verification if we invoke the admin client again.
purgableOffsets.put(t1p1, 17L);
taskManager.maybePurgeCommittedRecords();
verify(adminClient);
}
Aggregations