Search in sources :

Example 21 with LogContext

use of org.apache.kafka.common.utils.LogContext in project apache-kafka-on-k8s by banzaicloud.

the class GlobalStateManagerImplTest method shouldRetryWhenPartitionsForThrowsTimeoutException.

@Test
public void shouldRetryWhenPartitionsForThrowsTimeoutException() {
    final int retries = 2;
    final AtomicInteger numberOfCalls = new AtomicInteger(0);
    consumer = new MockConsumer<byte[], byte[]>(OffsetResetStrategy.EARLIEST) {

        @Override
        public synchronized List<PartitionInfo> partitionsFor(String topic) {
            numberOfCalls.incrementAndGet();
            throw new TimeoutException();
        }
    };
    streamsConfig = new StreamsConfig(new Properties() {

        {
            put(StreamsConfig.APPLICATION_ID_CONFIG, "appId");
            put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, "dummy:1234");
            put(StreamsConfig.STATE_DIR_CONFIG, TestUtils.tempDirectory().getPath());
            put(StreamsConfig.RETRIES_CONFIG, retries);
        }
    });
    try {
        new GlobalStateManagerImpl(new LogContext("mock"), topology, consumer, stateDirectory, stateRestoreListener, streamsConfig);
    } catch (final StreamsException expected) {
        assertEquals(numberOfCalls.get(), retries);
    }
}
Also used : AtomicInteger(java.util.concurrent.atomic.AtomicInteger) StreamsException(org.apache.kafka.streams.errors.StreamsException) LogContext(org.apache.kafka.common.utils.LogContext) ArrayList(java.util.ArrayList) List(java.util.List) Properties(java.util.Properties) OffsetCheckpoint(org.apache.kafka.streams.state.internals.OffsetCheckpoint) TimeoutException(org.apache.kafka.common.errors.TimeoutException) StreamsConfig(org.apache.kafka.streams.StreamsConfig) Test(org.junit.Test)

Example 22 with LogContext

use of org.apache.kafka.common.utils.LogContext in project apache-kafka-on-k8s by banzaicloud.

the class GlobalStateManagerImplTest method before.

@Before
public void before() throws IOException {
    final Map<String, String> storeToTopic = new HashMap<>();
    storeToTopic.put(storeName1, t1.topic());
    storeToTopic.put(storeName2, t2.topic());
    storeToTopic.put(storeName3, t3.topic());
    storeToTopic.put(storeName4, t4.topic());
    store1 = new NoOpReadOnlyStore<>(storeName1, true);
    store2 = new NoOpReadOnlyStore<>(storeName2, true);
    store3 = new NoOpReadOnlyStore<>(storeName3);
    store4 = new NoOpReadOnlyStore<>(storeName4);
    topology = ProcessorTopology.withGlobalStores(Utils.<StateStore>mkList(store1, store2, store3, store4), storeToTopic);
    streamsConfig = new StreamsConfig(new Properties() {

        {
            put(StreamsConfig.APPLICATION_ID_CONFIG, "appId");
            put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, "dummy:1234");
            put(StreamsConfig.STATE_DIR_CONFIG, TestUtils.tempDirectory().getPath());
        }
    });
    stateDirectory = new StateDirectory(streamsConfig, time);
    consumer = new MockConsumer<>(OffsetResetStrategy.NONE);
    stateManager = new GlobalStateManagerImpl(new LogContext("test"), topology, consumer, stateDirectory, stateRestoreListener, streamsConfig);
    processorContext = new InternalMockProcessorContext(stateDirectory.globalStateDir(), streamsConfig);
    stateManager.setGlobalProcessorContext(processorContext);
    checkpointFile = new File(stateManager.baseDir(), ProcessorStateManager.CHECKPOINT_FILE_NAME);
}
Also used : HashMap(java.util.HashMap) StateStore(org.apache.kafka.streams.processor.StateStore) LogContext(org.apache.kafka.common.utils.LogContext) Properties(java.util.Properties) InternalMockProcessorContext(org.apache.kafka.test.InternalMockProcessorContext) File(java.io.File) StreamsConfig(org.apache.kafka.streams.StreamsConfig) Before(org.junit.Before)

Example 23 with LogContext

use of org.apache.kafka.common.utils.LogContext in project apache-kafka-on-k8s by banzaicloud.

the class ProcessorNodeTest method testMetrics.

@Test
public void testMetrics() {
    final StateSerdes anyStateSerde = StateSerdes.withBuiltinTypes("anyName", Bytes.class, Bytes.class);
    final Metrics metrics = new Metrics();
    final InternalMockProcessorContext context = new InternalMockProcessorContext(anyStateSerde, new RecordCollectorImpl(null, null, new LogContext("processnode-test "), new DefaultProductionExceptionHandler()), metrics);
    final ProcessorNode node = new ProcessorNode("name", new NoOpProcessor(), Collections.emptySet());
    node.init(context);
    String[] latencyOperations = { "process", "punctuate", "create", "destroy" };
    String throughputOperation = "forward";
    String groupName = "stream-processor-node-metrics";
    final Map<String, String> metricTags = new LinkedHashMap<>();
    metricTags.put("processor-node-id", node.name());
    metricTags.put("task-id", context.taskId().toString());
    for (String operation : latencyOperations) {
        assertNotNull(metrics.getSensor(operation));
    }
    assertNotNull(metrics.getSensor(throughputOperation));
    for (String opName : latencyOperations) {
        testSpecificMetrics(metrics, groupName, opName, metricTags);
    }
    assertNotNull(metrics.metrics().get(metrics.metricName(throughputOperation + "-rate", groupName, "The average number of occurrence of " + throughputOperation + " operation per second.", metricTags)));
    // test "all"
    metricTags.put("processor-node-id", "all");
    for (String opName : latencyOperations) {
        testSpecificMetrics(metrics, groupName, opName, metricTags);
    }
    assertNotNull(metrics.metrics().get(metrics.metricName(throughputOperation + "-rate", groupName, "The average number of occurrence of " + throughputOperation + " operation per second.", metricTags)));
    context.close();
}
Also used : Metrics(org.apache.kafka.common.metrics.Metrics) DefaultProductionExceptionHandler(org.apache.kafka.streams.errors.DefaultProductionExceptionHandler) LogContext(org.apache.kafka.common.utils.LogContext) StateSerdes(org.apache.kafka.streams.state.StateSerdes) InternalMockProcessorContext(org.apache.kafka.test.InternalMockProcessorContext) LinkedHashMap(java.util.LinkedHashMap) Test(org.junit.Test)

Example 24 with LogContext

use of org.apache.kafka.common.utils.LogContext in project apache-kafka-on-k8s by banzaicloud.

the class RecordDeserializerTest method shouldReturnNewConsumerRecordWithDeserializedValueWhenNoExceptions.

@SuppressWarnings("deprecation")
@Test
public void shouldReturnNewConsumerRecordWithDeserializedValueWhenNoExceptions() {
    final RecordDeserializer recordDeserializer = new RecordDeserializer(new TheSourceNode(false, false, "key", "value"), null, new LogContext());
    final ConsumerRecord<Object, Object> record = recordDeserializer.deserialize(null, rawRecord);
    assertEquals(rawRecord.topic(), record.topic());
    assertEquals(rawRecord.partition(), record.partition());
    assertEquals(rawRecord.offset(), record.offset());
    assertEquals(rawRecord.checksum(), record.checksum());
    assertEquals("key", record.key());
    assertEquals("value", record.value());
    assertEquals(rawRecord.timestamp(), record.timestamp());
    assertEquals(TimestampType.CREATE_TIME, record.timestampType());
}
Also used : LogContext(org.apache.kafka.common.utils.LogContext) Test(org.junit.Test)

Example 25 with LogContext

use of org.apache.kafka.common.utils.LogContext in project apache-kafka-on-k8s by banzaicloud.

the class RecordQueueTest method shouldDropOnNegativeTimestamp.

@Test
public void shouldDropOnNegativeTimestamp() {
    final List<ConsumerRecord<byte[], byte[]>> records = Collections.singletonList(new ConsumerRecord<>("topic", 1, 1, -1L, TimestampType.CREATE_TIME, 0L, 0, 0, recordKey, recordValue));
    final RecordQueue queue = new RecordQueue(new TopicPartition(topics[0], 1), new MockSourceNode<>(topics, intDeserializer, intDeserializer), new LogAndSkipOnInvalidTimestamp(), new LogAndContinueExceptionHandler(), null, new LogContext());
    queue.addRawRecords(records);
    assertEquals(0, queue.size());
}
Also used : TopicPartition(org.apache.kafka.common.TopicPartition) LogAndSkipOnInvalidTimestamp(org.apache.kafka.streams.processor.LogAndSkipOnInvalidTimestamp) LogContext(org.apache.kafka.common.utils.LogContext) LogAndContinueExceptionHandler(org.apache.kafka.streams.errors.LogAndContinueExceptionHandler) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) Test(org.junit.Test)

Aggregations

LogContext (org.apache.kafka.common.utils.LogContext)53 Metrics (org.apache.kafka.common.metrics.Metrics)28 Test (org.junit.Test)27 Before (org.junit.Before)17 InternalMockProcessorContext (org.apache.kafka.test.InternalMockProcessorContext)14 MockStreamsMetrics (org.apache.kafka.streams.processor.internals.MockStreamsMetrics)13 NoOpRecordCollector (org.apache.kafka.test.NoOpRecordCollector)10 HashMap (java.util.HashMap)8 MockTime (org.apache.kafka.common.utils.MockTime)8 InetSocketAddress (java.net.InetSocketAddress)7 Properties (java.util.Properties)7 StreamsConfig (org.apache.kafka.streams.StreamsConfig)7 TopicPartition (org.apache.kafka.common.TopicPartition)6 File (java.io.File)5 NetworkClient (org.apache.kafka.clients.NetworkClient)5 InternalStreamsBuilderTest (org.apache.kafka.streams.kstream.internals.InternalStreamsBuilderTest)5 ServerSocketChannel (java.nio.channels.ServerSocketChannel)4 SocketChannel (java.nio.channels.SocketChannel)4 Metadata (org.apache.kafka.clients.Metadata)4 MockClient (org.apache.kafka.clients.MockClient)4