Search in sources :

Example 21 with ApiVersions

use of org.apache.kafka.clients.ApiVersions in project kafka by apache.

the class RecordAccumulatorTest method testIdempotenceWithOldMagic.

@Test
public void testIdempotenceWithOldMagic() {
    // Simulate talking to an older broker, ie. one which supports a lower magic.
    ApiVersions apiVersions = new ApiVersions();
    int batchSize = 1025;
    int deliveryTimeoutMs = 3200;
    int lingerMs = 10;
    long retryBackoffMs = 100L;
    long totalSize = 10 * batchSize;
    String metricGrpName = "producer-metrics";
    apiVersions.update("foobar", NodeApiVersions.create(ApiKeys.PRODUCE.id, (short) 0, (short) 2));
    TransactionManager transactionManager = new TransactionManager(new LogContext(), null, 0, retryBackoffMs, apiVersions);
    RecordAccumulator accum = new RecordAccumulator(logContext, batchSize + DefaultRecordBatch.RECORD_BATCH_OVERHEAD, CompressionType.NONE, lingerMs, retryBackoffMs, deliveryTimeoutMs, metrics, metricGrpName, time, apiVersions, transactionManager, new BufferPool(totalSize, batchSize, metrics, time, metricGrpName));
    assertThrows(UnsupportedVersionException.class, () -> accum.append(tp1, 0L, key, value, Record.EMPTY_HEADERS, null, 0, false, time.milliseconds()));
}
Also used : NodeApiVersions(org.apache.kafka.clients.NodeApiVersions) ApiVersions(org.apache.kafka.clients.ApiVersions) LogContext(org.apache.kafka.common.utils.LogContext) Test(org.junit.jupiter.api.Test)

Example 22 with ApiVersions

use of org.apache.kafka.clients.ApiVersions in project kafka by apache.

the class RecordAccumulatorTest method testAppendLargeOldMessageFormat.

private void testAppendLargeOldMessageFormat(CompressionType compressionType) throws Exception {
    int batchSize = 512;
    byte[] value = new byte[2 * batchSize];
    ApiVersions apiVersions = new ApiVersions();
    apiVersions.update(node1.idString(), NodeApiVersions.create(ApiKeys.PRODUCE.id, (short) 0, (short) 2));
    RecordAccumulator accum = createTestRecordAccumulator(batchSize + DefaultRecordBatch.RECORD_BATCH_OVERHEAD, 10 * 1024, compressionType, 0);
    accum.append(tp1, 0L, key, value, Record.EMPTY_HEADERS, null, maxBlockTimeMs, false, time.milliseconds());
    assertEquals(Collections.singleton(node1), accum.ready(cluster, time.milliseconds()).readyNodes, "Our partition's leader should be ready");
    Deque<ProducerBatch> batches = accum.batches().get(tp1);
    assertEquals(1, batches.size());
    ProducerBatch producerBatch = batches.peek();
    List<MutableRecordBatch> recordBatches = TestUtils.toList(producerBatch.records().batches());
    assertEquals(1, recordBatches.size());
    MutableRecordBatch recordBatch = recordBatches.get(0);
    assertEquals(0L, recordBatch.baseOffset());
    List<Record> records = TestUtils.toList(recordBatch);
    assertEquals(1, records.size());
    Record record = records.get(0);
    assertEquals(0L, record.offset());
    assertEquals(ByteBuffer.wrap(key), record.key());
    assertEquals(ByteBuffer.wrap(value), record.value());
    assertEquals(0L, record.timestamp());
}
Also used : MutableRecordBatch(org.apache.kafka.common.record.MutableRecordBatch) NodeApiVersions(org.apache.kafka.clients.NodeApiVersions) ApiVersions(org.apache.kafka.clients.ApiVersions) DefaultRecord(org.apache.kafka.common.record.DefaultRecord) Record(org.apache.kafka.common.record.Record)

Example 23 with ApiVersions

use of org.apache.kafka.clients.ApiVersions in project kafka by apache.

the class RecordAccumulatorTest method testRetryBackoff.

@Test
public void testRetryBackoff() throws Exception {
    int lingerMs = Integer.MAX_VALUE / 16;
    long retryBackoffMs = Integer.MAX_VALUE / 8;
    int deliveryTimeoutMs = Integer.MAX_VALUE;
    long totalSize = 10 * 1024;
    int batchSize = 1024 + DefaultRecordBatch.RECORD_BATCH_OVERHEAD;
    String metricGrpName = "producer-metrics";
    final RecordAccumulator accum = new RecordAccumulator(logContext, batchSize, CompressionType.NONE, lingerMs, retryBackoffMs, deliveryTimeoutMs, metrics, metricGrpName, time, new ApiVersions(), null, new BufferPool(totalSize, batchSize, metrics, time, metricGrpName));
    long now = time.milliseconds();
    accum.append(tp1, 0L, key, value, Record.EMPTY_HEADERS, null, maxBlockTimeMs, false, time.milliseconds());
    RecordAccumulator.ReadyCheckResult result = accum.ready(cluster, now + lingerMs + 1);
    assertEquals(Collections.singleton(node1), result.readyNodes, "Node1 should be ready");
    Map<Integer, List<ProducerBatch>> batches = accum.drain(cluster, result.readyNodes, Integer.MAX_VALUE, now + lingerMs + 1);
    assertEquals(1, batches.size(), "Node1 should be the only ready node.");
    assertEquals(1, batches.get(0).size(), "Partition 0 should only have one batch drained.");
    // Reenqueue the batch
    now = time.milliseconds();
    accum.reenqueue(batches.get(0).get(0), now);
    // Put message for partition 1 into accumulator
    accum.append(tp2, 0L, key, value, Record.EMPTY_HEADERS, null, maxBlockTimeMs, false, time.milliseconds());
    result = accum.ready(cluster, now + lingerMs + 1);
    assertEquals(Collections.singleton(node1), result.readyNodes, "Node1 should be ready");
    // tp1 should backoff while tp2 should not
    batches = accum.drain(cluster, result.readyNodes, Integer.MAX_VALUE, now + lingerMs + 1);
    assertEquals(1, batches.size(), "Node1 should be the only ready node.");
    assertEquals(1, batches.get(0).size(), "Node1 should only have one batch drained.");
    assertEquals(tp2, batches.get(0).get(0).topicPartition, "Node1 should only have one batch for partition 1.");
    // Partition 0 can be drained after retry backoff
    result = accum.ready(cluster, now + retryBackoffMs + 1);
    assertEquals(Collections.singleton(node1), result.readyNodes, "Node1 should be ready");
    batches = accum.drain(cluster, result.readyNodes, Integer.MAX_VALUE, now + retryBackoffMs + 1);
    assertEquals(1, batches.size(), "Node1 should be the only ready node.");
    assertEquals(1, batches.get(0).size(), "Node1 should only have one batch drained.");
    assertEquals(tp1, batches.get(0).get(0).topicPartition, "Node1 should only have one batch for partition 0.");
}
Also used : AtomicInteger(java.util.concurrent.atomic.AtomicInteger) NodeApiVersions(org.apache.kafka.clients.NodeApiVersions) ApiVersions(org.apache.kafka.clients.ApiVersions) Arrays.asList(java.util.Arrays.asList) List(java.util.List) ArrayList(java.util.ArrayList) Test(org.junit.jupiter.api.Test)

Example 24 with ApiVersions

use of org.apache.kafka.clients.ApiVersions in project kafka by apache.

the class KafkaAdminClient method createInternal.

static KafkaAdminClient createInternal(AdminClientConfig config, TimeoutProcessorFactory timeoutProcessorFactory, HostResolver hostResolver) {
    Metrics metrics = null;
    NetworkClient networkClient = null;
    Time time = Time.SYSTEM;
    String clientId = generateClientId(config);
    ChannelBuilder channelBuilder = null;
    Selector selector = null;
    ApiVersions apiVersions = new ApiVersions();
    LogContext logContext = createLogContext(clientId);
    try {
        // Since we only request node information, it's safe to pass true for allowAutoTopicCreation (and it
        // simplifies communication with older brokers)
        AdminMetadataManager metadataManager = new AdminMetadataManager(logContext, config.getLong(AdminClientConfig.RETRY_BACKOFF_MS_CONFIG), config.getLong(AdminClientConfig.METADATA_MAX_AGE_CONFIG));
        List<InetSocketAddress> addresses = ClientUtils.parseAndValidateAddresses(config.getList(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG), config.getString(AdminClientConfig.CLIENT_DNS_LOOKUP_CONFIG));
        metadataManager.update(Cluster.bootstrap(addresses), time.milliseconds());
        List<MetricsReporter> reporters = config.getConfiguredInstances(AdminClientConfig.METRIC_REPORTER_CLASSES_CONFIG, MetricsReporter.class, Collections.singletonMap(AdminClientConfig.CLIENT_ID_CONFIG, clientId));
        Map<String, String> metricTags = Collections.singletonMap("client-id", clientId);
        MetricConfig metricConfig = new MetricConfig().samples(config.getInt(AdminClientConfig.METRICS_NUM_SAMPLES_CONFIG)).timeWindow(config.getLong(AdminClientConfig.METRICS_SAMPLE_WINDOW_MS_CONFIG), TimeUnit.MILLISECONDS).recordLevel(Sensor.RecordingLevel.forName(config.getString(AdminClientConfig.METRICS_RECORDING_LEVEL_CONFIG))).tags(metricTags);
        JmxReporter jmxReporter = new JmxReporter();
        jmxReporter.configure(config.originals());
        reporters.add(jmxReporter);
        MetricsContext metricsContext = new KafkaMetricsContext(JMX_PREFIX, config.originalsWithPrefix(CommonClientConfigs.METRICS_CONTEXT_PREFIX));
        metrics = new Metrics(metricConfig, reporters, time, metricsContext);
        String metricGrpPrefix = "admin-client";
        channelBuilder = ClientUtils.createChannelBuilder(config, time, logContext);
        selector = new Selector(config.getLong(AdminClientConfig.CONNECTIONS_MAX_IDLE_MS_CONFIG), metrics, time, metricGrpPrefix, channelBuilder, logContext);
        networkClient = new NetworkClient(metadataManager.updater(), null, selector, clientId, 1, config.getLong(AdminClientConfig.RECONNECT_BACKOFF_MS_CONFIG), config.getLong(AdminClientConfig.RECONNECT_BACKOFF_MAX_MS_CONFIG), config.getInt(AdminClientConfig.SEND_BUFFER_CONFIG), config.getInt(AdminClientConfig.RECEIVE_BUFFER_CONFIG), (int) TimeUnit.HOURS.toMillis(1), config.getLong(AdminClientConfig.SOCKET_CONNECTION_SETUP_TIMEOUT_MS_CONFIG), config.getLong(AdminClientConfig.SOCKET_CONNECTION_SETUP_TIMEOUT_MAX_MS_CONFIG), time, true, apiVersions, null, logContext, (hostResolver == null) ? new DefaultHostResolver() : hostResolver);
        return new KafkaAdminClient(config, clientId, time, metadataManager, metrics, networkClient, timeoutProcessorFactory, logContext);
    } catch (Throwable exc) {
        closeQuietly(metrics, "Metrics");
        closeQuietly(networkClient, "NetworkClient");
        closeQuietly(selector, "Selector");
        closeQuietly(channelBuilder, "ChannelBuilder");
        throw new KafkaException("Failed to create new KafkaAdminClient", exc);
    }
}
Also used : MetricConfig(org.apache.kafka.common.metrics.MetricConfig) AdminMetadataManager(org.apache.kafka.clients.admin.internals.AdminMetadataManager) InetSocketAddress(java.net.InetSocketAddress) KafkaMetricsContext(org.apache.kafka.common.metrics.KafkaMetricsContext) MetricsContext(org.apache.kafka.common.metrics.MetricsContext) LogContext(org.apache.kafka.common.utils.LogContext) Time(org.apache.kafka.common.utils.Time) JmxReporter(org.apache.kafka.common.metrics.JmxReporter) Metrics(org.apache.kafka.common.metrics.Metrics) NetworkClient(org.apache.kafka.clients.NetworkClient) DefaultHostResolver(org.apache.kafka.clients.DefaultHostResolver) MetricsReporter(org.apache.kafka.common.metrics.MetricsReporter) KafkaMetricsContext(org.apache.kafka.common.metrics.KafkaMetricsContext) ApiVersions(org.apache.kafka.clients.ApiVersions) KafkaException(org.apache.kafka.common.KafkaException) ChannelBuilder(org.apache.kafka.common.network.ChannelBuilder) Selector(org.apache.kafka.common.network.Selector)

Aggregations

ApiVersions (org.apache.kafka.clients.ApiVersions)24 NodeApiVersions (org.apache.kafka.clients.NodeApiVersions)22 Node (org.apache.kafka.common.Node)13 Test (org.junit.jupiter.api.Test)12 LogContext (org.apache.kafka.common.utils.LogContext)10 NetworkClient (org.apache.kafka.clients.NetworkClient)8 Cluster (org.apache.kafka.common.Cluster)7 Metrics (org.apache.kafka.common.metrics.Metrics)7 ByteBuffer (java.nio.ByteBuffer)6 ClientRequest (org.apache.kafka.clients.ClientRequest)6 MetricName (org.apache.kafka.common.MetricName)6 KafkaMetric (org.apache.kafka.common.metrics.KafkaMetric)6 Sensor (org.apache.kafka.common.metrics.Sensor)6 NetworkReceive (org.apache.kafka.common.network.NetworkReceive)6 ApiVersionsResponse (org.apache.kafka.common.requests.ApiVersionsResponse)6 DelayedReceive (org.apache.kafka.test.DelayedReceive)6 MockSelector (org.apache.kafka.test.MockSelector)6 TopicPartition (org.apache.kafka.common.TopicPartition)5 ArrayList (java.util.ArrayList)4 Arrays.asList (java.util.Arrays.asList)4