use of org.apache.kafka.common.utils.Time in project kafka by apache.
the class KafkaProducerTest method testCommitTransactionWithRecordTooLargeException.
@Test
public void testCommitTransactionWithRecordTooLargeException() throws Exception {
Map<String, Object> configs = new HashMap<>();
configs.put(ProducerConfig.TRANSACTIONAL_ID_CONFIG, "some.id");
configs.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9000");
configs.put(ProducerConfig.MAX_REQUEST_SIZE_CONFIG, 1000);
Time time = new MockTime(1);
MetadataResponse initialUpdateResponse = RequestTestUtils.metadataUpdateWith(1, singletonMap("topic", 1));
ProducerMetadata metadata = mock(ProducerMetadata.class);
MockClient client = new MockClient(time, metadata);
client.updateMetadata(initialUpdateResponse);
client.prepareResponse(FindCoordinatorResponse.prepareResponse(Errors.NONE, "some.id", NODE));
client.prepareResponse(initProducerIdResponse(1L, (short) 5, Errors.NONE));
when(metadata.fetch()).thenReturn(onePartitionCluster);
String largeString = IntStream.range(0, 1000).mapToObj(i -> "*").collect(Collectors.joining());
ProducerRecord<String, String> largeRecord = new ProducerRecord<>(topic, "large string", largeString);
try (KafkaProducer<String, String> producer = kafkaProducer(configs, new StringSerializer(), new StringSerializer(), metadata, client, null, time)) {
producer.initTransactions();
client.prepareResponse(endTxnResponse(Errors.NONE));
producer.beginTransaction();
TestUtils.assertFutureError(producer.send(largeRecord), RecordTooLargeException.class);
assertThrows(KafkaException.class, producer::commitTransaction);
}
}
use of org.apache.kafka.common.utils.Time in project kafka by apache.
the class KafkaProducerTest method testCloseIsForcedOnPendingInitProducerId.
@Test
public void testCloseIsForcedOnPendingInitProducerId() throws InterruptedException {
Map<String, Object> configs = new HashMap<>();
configs.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9000");
configs.put(ProducerConfig.TRANSACTIONAL_ID_CONFIG, "this-is-a-transactional-id");
Time time = new MockTime();
MetadataResponse initialUpdateResponse = RequestTestUtils.metadataUpdateWith(1, singletonMap("testTopic", 1));
ProducerMetadata metadata = newMetadata(0, Long.MAX_VALUE);
metadata.updateWithCurrentRequestVersion(initialUpdateResponse, false, time.milliseconds());
MockClient client = new MockClient(time, metadata);
Producer<String, String> producer = kafkaProducer(configs, new StringSerializer(), new StringSerializer(), metadata, client, null, time);
ExecutorService executorService = Executors.newSingleThreadExecutor();
CountDownLatch assertionDoneLatch = new CountDownLatch(1);
client.prepareResponse(FindCoordinatorResponse.prepareResponse(Errors.NONE, "this-is-a-transactional-id", NODE));
executorService.submit(() -> {
assertThrows(KafkaException.class, producer::initTransactions);
assertionDoneLatch.countDown();
});
client.waitForRequests(1, 2000);
producer.close(Duration.ofMillis(1000));
assertionDoneLatch.await(5000, TimeUnit.MILLISECONDS);
}
use of org.apache.kafka.common.utils.Time in project kafka by apache.
the class MeteredTimestampedKeyValueStoreTest method before.
@Before
public void before() {
final Time mockTime = new MockTime();
metered = new MeteredTimestampedKeyValueStore<>(inner, "scope", mockTime, Serdes.String(), new ValueAndTimestampSerde<>(Serdes.String()));
metrics.config().recordLevel(Sensor.RecordingLevel.DEBUG);
expect(context.applicationId()).andStubReturn(APPLICATION_ID);
expect(context.metrics()).andStubReturn(new StreamsMetricsImpl(metrics, "test", StreamsConfig.METRICS_LATEST, mockTime));
expect(context.taskId()).andStubReturn(taskId);
expect(context.changelogFor(STORE_NAME)).andStubReturn(CHANGELOG_TOPIC);
expectSerdes();
expect(inner.name()).andStubReturn(STORE_NAME);
expect(context.appConfigs()).andStubReturn(CONFIGS);
tags = mkMap(mkEntry(THREAD_ID_TAG_KEY, threadId), mkEntry("task-id", taskId.toString()), mkEntry(STORE_TYPE + "-state-id", STORE_NAME));
}
use of org.apache.kafka.common.utils.Time in project kafka by apache.
the class TransactionsCommand method execute.
static void execute(String[] args, Function<Namespace, Admin> adminSupplier, PrintStream out, Time time) throws Exception {
List<TransactionsCommand> commands = Arrays.asList(new ListTransactionsCommand(time), new DescribeTransactionsCommand(time), new DescribeProducersCommand(time), new AbortTransactionCommand(time), new FindHangingTransactionsCommand(time));
ArgumentParser parser = buildBaseParser();
Subparsers subparsers = parser.addSubparsers().dest("command").title("commands").metavar("COMMAND");
commands.forEach(command -> command.addSubparser(subparsers));
final Namespace ns;
try {
ns = parser.parseArgs(args);
} catch (ArgumentParserException e) {
parser.handleError(e);
Exit.exit(1);
return;
}
Admin admin = adminSupplier.apply(ns);
String commandName = ns.getString("command");
Optional<TransactionsCommand> commandOpt = commands.stream().filter(cmd -> cmd.name().equals(commandName)).findFirst();
if (!commandOpt.isPresent()) {
printErrorAndExit("Unexpected command " + commandName);
}
TransactionsCommand command = commandOpt.get();
command.execute(admin, ns, out);
Exit.exit(0);
}
use of org.apache.kafka.common.utils.Time in project kafka by apache.
the class KafkaStreamsTest method testStateThreadClose.
@Test
public void testStateThreadClose() throws Exception {
// make sure we have the global state thread running too
final StreamsBuilder builder = getBuilderWithSource();
builder.globalTable("anyTopic");
try (final KafkaStreams streams = new KafkaStreams(builder.build(), props, supplier, time)) {
assertEquals(NUM_THREADS, streams.threads.size());
assertEquals(streams.state(), KafkaStreams.State.CREATED);
streams.start();
waitForCondition(() -> streams.state() == KafkaStreams.State.RUNNING, "Streams never started.");
for (int i = 0; i < NUM_THREADS; i++) {
final StreamThread tmpThread = streams.threads.get(i);
tmpThread.shutdown();
waitForCondition(() -> tmpThread.state() == StreamThread.State.DEAD, "Thread never stopped.");
streams.threads.get(i).join();
}
waitForCondition(() -> streams.metadataForLocalThreads().stream().allMatch(t -> t.threadState().equals("DEAD")), "Streams never stopped");
streams.close();
waitForCondition(() -> streams.state() == KafkaStreams.State.NOT_RUNNING, "Streams never stopped.");
assertNull(streams.globalStreamThread);
}
}
Aggregations