Search in sources :

Example 1 with StoreInitializationException

use of io.confluent.kafka.schemaregistry.storage.exceptions.StoreInitializationException in project schema-registry by confluentinc.

the class KafkaStore method createOrVerifySchemaTopic.

private void createOrVerifySchemaTopic() throws StoreInitializationException {
    if (this.skipSchemaTopicValidation) {
        log.info("Skipping auto topic creation and verification");
        return;
    }
    Properties props = new Properties();
    addSchemaRegistryConfigsToClientProperties(this.config, props);
    props.put(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapBrokers);
    try (AdminClient admin = AdminClient.create(props)) {
        // 
        Set<String> allTopics = admin.listTopics().names().get(initTimeout, TimeUnit.MILLISECONDS);
        if (allTopics.contains(topic)) {
            verifySchemaTopic(admin);
        } else {
            createSchemaTopic(admin);
        }
    } catch (TimeoutException e) {
        throw new StoreInitializationException("Timed out trying to create or validate schema topic configuration", e);
    } catch (InterruptedException | ExecutionException e) {
        throw new StoreInitializationException("Failed trying to create or validate schema topic configuration", e);
    }
}
Also used : StoreInitializationException(io.confluent.kafka.schemaregistry.storage.exceptions.StoreInitializationException) Properties(java.util.Properties) ExecutionException(java.util.concurrent.ExecutionException) AdminClient(org.apache.kafka.clients.admin.AdminClient) TimeoutException(java.util.concurrent.TimeoutException) StoreTimeoutException(io.confluent.kafka.schemaregistry.storage.exceptions.StoreTimeoutException)

Example 2 with StoreInitializationException

use of io.confluent.kafka.schemaregistry.storage.exceptions.StoreInitializationException in project schema-registry by confluentinc.

the class KafkaStore method verifySchemaTopic.

private void verifySchemaTopic(AdminClient admin) throws StoreInitializationException, InterruptedException, ExecutionException, TimeoutException {
    log.info("Validating schemas topic {}", topic);
    Set<String> topics = Collections.singleton(topic);
    Map<String, TopicDescription> topicDescription = admin.describeTopics(topics).all().get(initTimeout, TimeUnit.MILLISECONDS);
    TopicDescription description = topicDescription.get(topic);
    final int numPartitions = description.partitions().size();
    if (numPartitions != 1) {
        throw new StoreInitializationException("The schema topic " + topic + " should have only 1 " + "partition but has " + numPartitions);
    }
    if (description.partitions().get(0).replicas().size() < desiredReplicationFactor) {
        log.warn("The replication factor of the schema topic " + topic + " is less than the desired one of " + desiredReplicationFactor + ". If this is a production environment, it's crucial to add more brokers and " + "increase the replication factor of the topic.");
    }
    ConfigResource topicResource = new ConfigResource(ConfigResource.Type.TOPIC, topic);
    Map<ConfigResource, Config> configs = admin.describeConfigs(Collections.singleton(topicResource)).all().get(initTimeout, TimeUnit.MILLISECONDS);
    Config topicConfigs = configs.get(topicResource);
    String retentionPolicy = topicConfigs.get(TopicConfig.CLEANUP_POLICY_CONFIG).value();
    if (retentionPolicy == null || !TopicConfig.CLEANUP_POLICY_COMPACT.equals(retentionPolicy)) {
        log.error("The retention policy of the schema topic " + topic + " is incorrect. " + "You must configure the topic to 'compact' cleanup policy to avoid Kafka " + "deleting your schemas after a week. " + "Refer to Kafka documentation for more details on cleanup policies");
        throw new StoreInitializationException("The retention policy of the schema topic " + topic + " is incorrect. Expected cleanup.policy to be " + "'compact' but it is " + retentionPolicy);
    }
}
Also used : StoreInitializationException(io.confluent.kafka.schemaregistry.storage.exceptions.StoreInitializationException) Config(org.apache.kafka.clients.admin.Config) SchemaRegistryConfig(io.confluent.kafka.schemaregistry.rest.SchemaRegistryConfig) RestConfig(io.confluent.rest.RestConfig) ProducerConfig(org.apache.kafka.clients.producer.ProducerConfig) TopicConfig(org.apache.kafka.common.config.TopicConfig) AdminClientConfig(org.apache.kafka.clients.admin.AdminClientConfig) TopicDescription(org.apache.kafka.clients.admin.TopicDescription) ConfigResource(org.apache.kafka.common.config.ConfigResource)

Example 3 with StoreInitializationException

use of io.confluent.kafka.schemaregistry.storage.exceptions.StoreInitializationException in project schema-registry by confluentinc.

the class KafkaSchemaRegistry method init.

@Override
public void init() throws SchemaRegistryException {
    try {
        kafkaStore.init();
    } catch (StoreInitializationException e) {
        throw new SchemaRegistryInitializationException("Error initializing kafka store while initializing schema registry", e);
    }
    try {
        config.checkBootstrapServers();
        log.info("Joining schema registry with Kafka-based coordination");
        leaderElector = new KafkaGroupLeaderElector(config, myIdentity, this);
        leaderElector.init();
    } catch (SchemaRegistryStoreException e) {
        throw new SchemaRegistryInitializationException("Error electing leader while initializing schema registry", e);
    } catch (SchemaRegistryTimeoutException e) {
        throw new SchemaRegistryInitializationException(e);
    }
}
Also used : StoreInitializationException(io.confluent.kafka.schemaregistry.storage.exceptions.StoreInitializationException) KafkaGroupLeaderElector(io.confluent.kafka.schemaregistry.leaderelector.kafka.KafkaGroupLeaderElector) SchemaRegistryStoreException(io.confluent.kafka.schemaregistry.exceptions.SchemaRegistryStoreException) SchemaRegistryInitializationException(io.confluent.kafka.schemaregistry.exceptions.SchemaRegistryInitializationException) SchemaRegistryTimeoutException(io.confluent.kafka.schemaregistry.exceptions.SchemaRegistryTimeoutException)

Example 4 with StoreInitializationException

use of io.confluent.kafka.schemaregistry.storage.exceptions.StoreInitializationException in project schema-registry by confluentinc.

the class KafkaStore method createSchemaTopic.

private void createSchemaTopic(AdminClient admin) throws StoreInitializationException, InterruptedException, ExecutionException, TimeoutException {
    log.info("Creating schemas topic {}", topic);
    int numLiveBrokers = admin.describeCluster().nodes().get(initTimeout, TimeUnit.MILLISECONDS).size();
    if (numLiveBrokers <= 0) {
        throw new StoreInitializationException("No live Kafka brokers");
    }
    int schemaTopicReplicationFactor = Math.min(numLiveBrokers, desiredReplicationFactor);
    if (schemaTopicReplicationFactor < desiredReplicationFactor) {
        log.warn("Creating the schema topic " + topic + " using a replication factor of " + schemaTopicReplicationFactor + ", which is less than the desired one of " + desiredReplicationFactor + ". If this is a production environment, it's " + "crucial to add more brokers and increase the replication factor of the topic.");
    }
    NewTopic schemaTopicRequest = new NewTopic(topic, 1, (short) schemaTopicReplicationFactor);
    Map topicConfigs = new HashMap(config.originalsWithPrefix("kafkastore.topic.config."));
    topicConfigs.put(TopicConfig.CLEANUP_POLICY_CONFIG, TopicConfig.CLEANUP_POLICY_COMPACT);
    schemaTopicRequest.configs(topicConfigs);
    try {
        admin.createTopics(Collections.singleton(schemaTopicRequest)).all().get(initTimeout, TimeUnit.MILLISECONDS);
    } catch (ExecutionException e) {
        if (e.getCause() instanceof TopicExistsException) {
            // If topic already exists, ensure that it is configured correctly.
            verifySchemaTopic(admin);
        } else {
            throw e;
        }
    }
}
Also used : StoreInitializationException(io.confluent.kafka.schemaregistry.storage.exceptions.StoreInitializationException) HashMap(java.util.HashMap) NewTopic(org.apache.kafka.clients.admin.NewTopic) ExecutionException(java.util.concurrent.ExecutionException) TopicExistsException(org.apache.kafka.common.errors.TopicExistsException) HashMap(java.util.HashMap) Map(java.util.Map)

Example 5 with StoreInitializationException

use of io.confluent.kafka.schemaregistry.storage.exceptions.StoreInitializationException in project schema-registry by confluentinc.

the class KafkaStore method init.

@Override
public void init() throws StoreInitializationException {
    if (initialized.get()) {
        throw new StoreInitializationException("Illegal state while initializing store. Store was already initialized");
    }
    localStore.init();
    createOrVerifySchemaTopic();
    // set the producer properties and initialize a Kafka producer client
    Properties props = new Properties();
    addSchemaRegistryConfigsToClientProperties(this.config, props);
    props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapBrokers);
    props.put(ProducerConfig.ACKS_CONFIG, "-1");
    props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, org.apache.kafka.common.serialization.ByteArraySerializer.class);
    props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, org.apache.kafka.common.serialization.ByteArraySerializer.class);
    // Producer should not retry
    props.put(ProducerConfig.RETRIES_CONFIG, 0);
    props.put(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG, false);
    producer = new KafkaProducer<byte[], byte[]>(props);
    // start the background thread that subscribes to the Kafka topic and applies updates.
    // the thread must be created after the schema topic has been created.
    this.kafkaTopicReader = new KafkaStoreReaderThread<>(this.bootstrapBrokers, topic, groupId, this.storeUpdateHandler, serializer, this.localStore, this.producer, this.noopKey, this.initialized, this.config);
    this.kafkaTopicReader.start();
    try {
        waitUntilKafkaReaderReachesLastOffset(initTimeout);
    } catch (StoreException e) {
        throw new StoreInitializationException(e);
    }
    boolean isInitialized = initialized.compareAndSet(false, true);
    if (!isInitialized) {
        throw new StoreInitializationException("Illegal state while initializing store. Store " + "was already initialized");
    }
    this.storeUpdateHandler.cacheInitialized(new HashMap<>(kafkaTopicReader.checkpoints()));
    initLatch.countDown();
}
Also used : StoreInitializationException(io.confluent.kafka.schemaregistry.storage.exceptions.StoreInitializationException) Properties(java.util.Properties) StoreException(io.confluent.kafka.schemaregistry.storage.exceptions.StoreException)

Aggregations

StoreInitializationException (io.confluent.kafka.schemaregistry.storage.exceptions.StoreInitializationException)5 Properties (java.util.Properties)2 ExecutionException (java.util.concurrent.ExecutionException)2 SchemaRegistryInitializationException (io.confluent.kafka.schemaregistry.exceptions.SchemaRegistryInitializationException)1 SchemaRegistryStoreException (io.confluent.kafka.schemaregistry.exceptions.SchemaRegistryStoreException)1 SchemaRegistryTimeoutException (io.confluent.kafka.schemaregistry.exceptions.SchemaRegistryTimeoutException)1 KafkaGroupLeaderElector (io.confluent.kafka.schemaregistry.leaderelector.kafka.KafkaGroupLeaderElector)1 SchemaRegistryConfig (io.confluent.kafka.schemaregistry.rest.SchemaRegistryConfig)1 StoreException (io.confluent.kafka.schemaregistry.storage.exceptions.StoreException)1 StoreTimeoutException (io.confluent.kafka.schemaregistry.storage.exceptions.StoreTimeoutException)1 RestConfig (io.confluent.rest.RestConfig)1 HashMap (java.util.HashMap)1 Map (java.util.Map)1 TimeoutException (java.util.concurrent.TimeoutException)1 AdminClient (org.apache.kafka.clients.admin.AdminClient)1 AdminClientConfig (org.apache.kafka.clients.admin.AdminClientConfig)1 Config (org.apache.kafka.clients.admin.Config)1 NewTopic (org.apache.kafka.clients.admin.NewTopic)1 TopicDescription (org.apache.kafka.clients.admin.TopicDescription)1 ProducerConfig (org.apache.kafka.clients.producer.ProducerConfig)1