Search in sources :

Example 6 with TimeoutException

use of org.apache.kafka.common.errors.TimeoutException in project kafka by apache.

the class KafkaStatusBackingStoreTest method putConnectorStateRetriableFailure.

@Test
public void putConnectorStateRetriableFailure() {
    KafkaBasedLog<String, byte[]> kafkaBasedLog = mock(KafkaBasedLog.class);
    Converter converter = mock(Converter.class);
    KafkaStatusBackingStore store = new KafkaStatusBackingStore(new MockTime(), converter, STATUS_TOPIC, kafkaBasedLog);
    byte[] value = new byte[0];
    expect(converter.fromConnectData(eq(STATUS_TOPIC), anyObject(Schema.class), anyObject(Struct.class))).andStubReturn(value);
    final Capture<Callback> callbackCapture = newCapture();
    kafkaBasedLog.send(eq("status-connector-conn"), eq(value), capture(callbackCapture));
    expectLastCall().andAnswer(new IAnswer<Void>() {

        @Override
        public Void answer() throws Throwable {
            callbackCapture.getValue().onCompletion(null, new TimeoutException());
            return null;
        }
    }).andAnswer(new IAnswer<Void>() {

        @Override
        public Void answer() throws Throwable {
            callbackCapture.getValue().onCompletion(null, null);
            return null;
        }
    });
    replayAll();
    ConnectorStatus status = new ConnectorStatus(CONNECTOR, ConnectorStatus.State.RUNNING, WORKER_ID, 0);
    store.put(status);
    // state is not visible until read back from the log
    assertEquals(null, store.get(CONNECTOR));
    verifyAll();
}
Also used : Schema(org.apache.kafka.connect.data.Schema) ConnectorStatus(org.apache.kafka.connect.runtime.ConnectorStatus) Struct(org.apache.kafka.connect.data.Struct) IAnswer(org.easymock.IAnswer) Callback(org.apache.kafka.clients.producer.Callback) MockTime(org.apache.kafka.common.utils.MockTime) TimeoutException(org.apache.kafka.common.errors.TimeoutException) Test(org.junit.Test)

Example 7 with TimeoutException

use of org.apache.kafka.common.errors.TimeoutException in project pinot by linkedin.

the class SimpleConsumerWrapper method fetchPartitionOffset.

/**
   * Fetches the numeric Kafka offset for this partition for a symbolic name ("largest" or "smallest").
   *
   * @param requestedOffset Either "largest" or "smallest"
   * @param timeoutMillis Timeout in milliseconds
   * @throws java.util.concurrent.TimeoutException If the operation could not be completed within {@code timeoutMillis}
   * milliseconds
   * @return An offset
   */
public synchronized long fetchPartitionOffset(String requestedOffset, int timeoutMillis) throws java.util.concurrent.TimeoutException {
    Preconditions.checkNotNull(requestedOffset);
    final long offsetRequestTime;
    if (requestedOffset.equalsIgnoreCase("largest")) {
        offsetRequestTime = kafka.api.OffsetRequest.LatestTime();
    } else if (requestedOffset.equalsIgnoreCase("smallest")) {
        offsetRequestTime = kafka.api.OffsetRequest.EarliestTime();
    } else if (requestedOffset.equalsIgnoreCase("testDummy")) {
        return -1L;
    } else {
        throw new IllegalArgumentException("Unknown initial offset value " + requestedOffset);
    }
    int kafkaErrorCount = 0;
    final int MAX_KAFKA_ERROR_COUNT = 10;
    final long endTime = System.currentTimeMillis() + timeoutMillis;
    while (System.currentTimeMillis() < endTime) {
        // Try to get into a state where we're connected to Kafka
        while (_currentState.getStateValue() != ConsumerState.CONNECTED_TO_PARTITION_LEADER && System.currentTimeMillis() < endTime) {
            _currentState.process();
        }
        if (_currentState.getStateValue() != ConsumerState.CONNECTED_TO_PARTITION_LEADER && endTime <= System.currentTimeMillis()) {
            throw new TimeoutException();
        }
        // Send the offset request to Kafka
        OffsetRequest request = new OffsetRequest(Collections.singletonMap(new TopicAndPartition(_topic, _partition), new PartitionOffsetRequestInfo(offsetRequestTime, 1)), kafka.api.OffsetRequest.CurrentVersion(), _clientId);
        OffsetResponse offsetResponse;
        try {
            offsetResponse = _simpleConsumer.getOffsetsBefore(request);
        } catch (Exception e) {
            _currentState.handleConsumerException(e);
            continue;
        }
        final short errorCode = offsetResponse.errorCode(_topic, _partition);
        if (errorCode == Errors.NONE.code()) {
            long offset = offsetResponse.offsets(_topic, _partition)[0];
            if (offset == 0L) {
                LOGGER.warn("Fetched offset of 0 for topic {} and partition {}, is this a newly created topic?", _topic, _partition);
            }
            return offset;
        } else if (errorCode == Errors.LEADER_NOT_AVAILABLE.code()) {
            // If there is no leader, it'll take some time for a new leader to be elected, wait 100 ms before retrying
            Uninterruptibles.sleepUninterruptibly(100, TimeUnit.MILLISECONDS);
        } else {
            // Retry after a short delay
            kafkaErrorCount++;
            if (MAX_KAFKA_ERROR_COUNT < kafkaErrorCount) {
                throw exceptionForKafkaErrorCode(errorCode);
            }
            Uninterruptibles.sleepUninterruptibly(100, TimeUnit.MILLISECONDS);
        }
    }
    throw new TimeoutException();
}
Also used : OffsetResponse(kafka.javaapi.OffsetResponse) PartitionOffsetRequestInfo(kafka.api.PartitionOffsetRequestInfo) TopicAndPartition(kafka.common.TopicAndPartition) TimeoutException(org.apache.kafka.common.errors.TimeoutException) IOException(java.io.IOException) TimeoutException(org.apache.kafka.common.errors.TimeoutException) OffsetRequest(kafka.javaapi.OffsetRequest)

Example 8 with TimeoutException

use of org.apache.kafka.common.errors.TimeoutException in project pinot by linkedin.

the class SimpleConsumerWrapper method getPartitionCount.

public synchronized int getPartitionCount(String topic, long timeoutMillis) {
    int unknownTopicReplyCount = 0;
    final int MAX_UNKNOWN_TOPIC_REPLY_COUNT = 10;
    int kafkaErrorCount = 0;
    final int MAX_KAFKA_ERROR_COUNT = 10;
    final long endTime = System.currentTimeMillis() + timeoutMillis;
    while (System.currentTimeMillis() < endTime) {
        // Try to get into a state where we're connected to Kafka
        while (!_currentState.isConnectedToKafkaBroker() && System.currentTimeMillis() < endTime) {
            _currentState.process();
        }
        if (endTime <= System.currentTimeMillis() && !_currentState.isConnectedToKafkaBroker()) {
            throw new TimeoutException("Failed to get the partition count for topic " + topic + " within " + timeoutMillis + " ms");
        }
        // Send the metadata request to Kafka
        TopicMetadataResponse topicMetadataResponse = null;
        try {
            topicMetadataResponse = _simpleConsumer.send(new TopicMetadataRequest(Collections.singletonList(topic)));
        } catch (Exception e) {
            _currentState.handleConsumerException(e);
            continue;
        }
        final TopicMetadata topicMetadata = topicMetadataResponse.topicsMetadata().get(0);
        final short errorCode = topicMetadata.errorCode();
        if (errorCode == Errors.NONE.code()) {
            return topicMetadata.partitionsMetadata().size();
        } else if (errorCode == Errors.LEADER_NOT_AVAILABLE.code()) {
            // If there is no leader, it'll take some time for a new leader to be elected, wait 100 ms before retrying
            Uninterruptibles.sleepUninterruptibly(100, TimeUnit.MILLISECONDS);
        } else if (errorCode == Errors.INVALID_TOPIC_EXCEPTION.code()) {
            throw new RuntimeException("Invalid topic name " + topic);
        } else if (errorCode == Errors.UNKNOWN_TOPIC_OR_PARTITION.code()) {
            if (MAX_UNKNOWN_TOPIC_REPLY_COUNT < unknownTopicReplyCount) {
                throw new RuntimeException("Topic " + topic + " does not exist");
            } else {
                // Kafka topic creation can sometimes take some time, so we'll retry after a little bit
                unknownTopicReplyCount++;
                Uninterruptibles.sleepUninterruptibly(100, TimeUnit.MILLISECONDS);
            }
        } else {
            // Retry after a short delay
            kafkaErrorCount++;
            if (MAX_KAFKA_ERROR_COUNT < kafkaErrorCount) {
                throw exceptionForKafkaErrorCode(errorCode);
            }
            Uninterruptibles.sleepUninterruptibly(100, TimeUnit.MILLISECONDS);
        }
    }
    throw new TimeoutException();
}
Also used : TopicMetadataRequest(kafka.javaapi.TopicMetadataRequest) TopicMetadataResponse(kafka.javaapi.TopicMetadataResponse) TimeoutException(org.apache.kafka.common.errors.TimeoutException) IOException(java.io.IOException) TimeoutException(org.apache.kafka.common.errors.TimeoutException) TopicMetadata(kafka.javaapi.TopicMetadata)

Example 9 with TimeoutException

use of org.apache.kafka.common.errors.TimeoutException in project flink by apache.

the class KafkaConsumerTestBase method runFailOnNoBrokerTest.

// ------------------------------------------------------------------------
//  Suite of Tests
//
//  The tests here are all not activated (by an @Test tag), but need
//  to be invoked from the extending classes. That way, the classes can
//  select which tests to run.
// ------------------------------------------------------------------------
/**
	 * Test that ensures the KafkaConsumer is properly failing if the topic doesnt exist
	 * and a wrong broker was specified
	 *
	 * @throws Exception
	 */
public void runFailOnNoBrokerTest() throws Exception {
    try {
        Properties properties = new Properties();
        StreamExecutionEnvironment see = StreamExecutionEnvironment.createRemoteEnvironment("localhost", flinkPort);
        see.getConfig().disableSysoutLogging();
        see.setRestartStrategy(RestartStrategies.noRestart());
        see.setParallelism(1);
        // use wrong ports for the consumers
        properties.setProperty("bootstrap.servers", "localhost:80");
        properties.setProperty("zookeeper.connect", "localhost:80");
        properties.setProperty("group.id", "test");
        // let the test fail fast
        properties.setProperty("request.timeout.ms", "3000");
        properties.setProperty("socket.timeout.ms", "3000");
        properties.setProperty("session.timeout.ms", "2000");
        properties.setProperty("fetch.max.wait.ms", "2000");
        properties.setProperty("heartbeat.interval.ms", "1000");
        properties.putAll(secureProps);
        FlinkKafkaConsumerBase<String> source = kafkaServer.getConsumer("doesntexist", new SimpleStringSchema(), properties);
        DataStream<String> stream = see.addSource(source);
        stream.print();
        see.execute("No broker test");
    } catch (ProgramInvocationException pie) {
        if (kafkaServer.getVersion().equals("0.9") || kafkaServer.getVersion().equals("0.10")) {
            assertTrue(pie.getCause() instanceof JobExecutionException);
            JobExecutionException jee = (JobExecutionException) pie.getCause();
            assertTrue(jee.getCause() instanceof TimeoutException);
            TimeoutException te = (TimeoutException) jee.getCause();
            assertEquals("Timeout expired while fetching topic metadata", te.getMessage());
        } else {
            assertTrue(pie.getCause() instanceof JobExecutionException);
            JobExecutionException jee = (JobExecutionException) pie.getCause();
            assertTrue(jee.getCause() instanceof RuntimeException);
            RuntimeException re = (RuntimeException) jee.getCause();
            assertTrue(re.getMessage().contains("Unable to retrieve any partitions for the requested topics [doesntexist]"));
        }
    }
}
Also used : JobExecutionException(org.apache.flink.runtime.client.JobExecutionException) SimpleStringSchema(org.apache.flink.streaming.util.serialization.SimpleStringSchema) ProgramInvocationException(org.apache.flink.client.program.ProgramInvocationException) StreamExecutionEnvironment(org.apache.flink.streaming.api.environment.StreamExecutionEnvironment) Properties(java.util.Properties) TimeoutException(org.apache.kafka.common.errors.TimeoutException)

Example 10 with TimeoutException

use of org.apache.kafka.common.errors.TimeoutException in project kafka by apache.

the class BufferPoolTest method testBlockTimeout.

/**
     * Test if Timeout exception is thrown when there is not enough memory to allocate and the elapsed time is greater than the max specified block time.
     * And verify that the allocation should finish soon after the maxBlockTimeMs.
     */
@Test
public void testBlockTimeout() throws Exception {
    BufferPool pool = new BufferPool(10, 1, metrics, Time.SYSTEM, metricGroup);
    ByteBuffer buffer1 = pool.allocate(1, maxBlockTimeMs);
    ByteBuffer buffer2 = pool.allocate(1, maxBlockTimeMs);
    ByteBuffer buffer3 = pool.allocate(1, maxBlockTimeMs);
    // First two buffers will be de-allocated within maxBlockTimeMs since the most recent de-allocation
    delayedDeallocate(pool, buffer1, maxBlockTimeMs / 2);
    delayedDeallocate(pool, buffer2, maxBlockTimeMs);
    // The third buffer will be de-allocated after maxBlockTimeMs since the most recent de-allocation
    delayedDeallocate(pool, buffer3, maxBlockTimeMs / 2 * 5);
    long beginTimeMs = Time.SYSTEM.milliseconds();
    try {
        pool.allocate(10, maxBlockTimeMs);
        fail("The buffer allocated more memory than its maximum value 10");
    } catch (TimeoutException e) {
    // this is good
    }
    long endTimeMs = Time.SYSTEM.milliseconds();
    assertTrue("Allocation should finish not much later than maxBlockTimeMs", endTimeMs - beginTimeMs < maxBlockTimeMs + 1000);
}
Also used : ByteBuffer(java.nio.ByteBuffer) TimeoutException(org.apache.kafka.common.errors.TimeoutException) Test(org.junit.Test)

Aggregations

TimeoutException (org.apache.kafka.common.errors.TimeoutException)18 Test (org.junit.Test)8 Callback (org.apache.kafka.clients.producer.Callback)5 StreamsException (org.apache.kafka.streams.errors.StreamsException)4 AtomicInteger (java.util.concurrent.atomic.AtomicInteger)3 ProducerRecord (org.apache.kafka.clients.producer.ProducerRecord)3 PartitionInfo (org.apache.kafka.common.PartitionInfo)3 MockTime (org.apache.kafka.common.utils.MockTime)3 IOException (java.io.IOException)2 ByteBuffer (java.nio.ByteBuffer)2 List (java.util.List)2 Future (java.util.concurrent.Future)2 MockConsumer (org.apache.kafka.clients.consumer.MockConsumer)2 MockProducer (org.apache.kafka.clients.producer.MockProducer)2 RecordMetadata (org.apache.kafka.clients.producer.RecordMetadata)2 DefaultPartitioner (org.apache.kafka.clients.producer.internals.DefaultPartitioner)2 Cluster (org.apache.kafka.common.Cluster)2 KafkaException (org.apache.kafka.common.KafkaException)2 TopicPartition (org.apache.kafka.common.TopicPartition)2 TopicAuthorizationException (org.apache.kafka.common.errors.TopicAuthorizationException)2