use of io.confluent.kafka.schemaregistry.storage.exceptions.StoreInitializationException in project schema-registry by confluentinc.
the class KafkaStore method createOrVerifySchemaTopic.
private void createOrVerifySchemaTopic() throws StoreInitializationException {
if (this.skipSchemaTopicValidation) {
log.info("Skipping auto topic creation and verification");
return;
}
Properties props = new Properties();
addSchemaRegistryConfigsToClientProperties(this.config, props);
props.put(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapBrokers);
try (AdminClient admin = AdminClient.create(props)) {
//
Set<String> allTopics = admin.listTopics().names().get(initTimeout, TimeUnit.MILLISECONDS);
if (allTopics.contains(topic)) {
verifySchemaTopic(admin);
} else {
createSchemaTopic(admin);
}
} catch (TimeoutException e) {
throw new StoreInitializationException("Timed out trying to create or validate schema topic configuration", e);
} catch (InterruptedException | ExecutionException e) {
throw new StoreInitializationException("Failed trying to create or validate schema topic configuration", e);
}
}
use of io.confluent.kafka.schemaregistry.storage.exceptions.StoreInitializationException in project schema-registry by confluentinc.
the class KafkaStore method verifySchemaTopic.
private void verifySchemaTopic(AdminClient admin) throws StoreInitializationException, InterruptedException, ExecutionException, TimeoutException {
log.info("Validating schemas topic {}", topic);
Set<String> topics = Collections.singleton(topic);
Map<String, TopicDescription> topicDescription = admin.describeTopics(topics).all().get(initTimeout, TimeUnit.MILLISECONDS);
TopicDescription description = topicDescription.get(topic);
final int numPartitions = description.partitions().size();
if (numPartitions != 1) {
throw new StoreInitializationException("The schema topic " + topic + " should have only 1 " + "partition but has " + numPartitions);
}
if (description.partitions().get(0).replicas().size() < desiredReplicationFactor) {
log.warn("The replication factor of the schema topic " + topic + " is less than the desired one of " + desiredReplicationFactor + ". If this is a production environment, it's crucial to add more brokers and " + "increase the replication factor of the topic.");
}
ConfigResource topicResource = new ConfigResource(ConfigResource.Type.TOPIC, topic);
Map<ConfigResource, Config> configs = admin.describeConfigs(Collections.singleton(topicResource)).all().get(initTimeout, TimeUnit.MILLISECONDS);
Config topicConfigs = configs.get(topicResource);
String retentionPolicy = topicConfigs.get(TopicConfig.CLEANUP_POLICY_CONFIG).value();
if (retentionPolicy == null || !TopicConfig.CLEANUP_POLICY_COMPACT.equals(retentionPolicy)) {
log.error("The retention policy of the schema topic " + topic + " is incorrect. " + "You must configure the topic to 'compact' cleanup policy to avoid Kafka " + "deleting your schemas after a week. " + "Refer to Kafka documentation for more details on cleanup policies");
throw new StoreInitializationException("The retention policy of the schema topic " + topic + " is incorrect. Expected cleanup.policy to be " + "'compact' but it is " + retentionPolicy);
}
}
use of io.confluent.kafka.schemaregistry.storage.exceptions.StoreInitializationException in project schema-registry by confluentinc.
the class KafkaSchemaRegistry method init.
@Override
public void init() throws SchemaRegistryException {
try {
kafkaStore.init();
} catch (StoreInitializationException e) {
throw new SchemaRegistryInitializationException("Error initializing kafka store while initializing schema registry", e);
}
try {
config.checkBootstrapServers();
log.info("Joining schema registry with Kafka-based coordination");
leaderElector = new KafkaGroupLeaderElector(config, myIdentity, this);
leaderElector.init();
} catch (SchemaRegistryStoreException e) {
throw new SchemaRegistryInitializationException("Error electing leader while initializing schema registry", e);
} catch (SchemaRegistryTimeoutException e) {
throw new SchemaRegistryInitializationException(e);
}
}
use of io.confluent.kafka.schemaregistry.storage.exceptions.StoreInitializationException in project schema-registry by confluentinc.
the class KafkaStore method createSchemaTopic.
private void createSchemaTopic(AdminClient admin) throws StoreInitializationException, InterruptedException, ExecutionException, TimeoutException {
log.info("Creating schemas topic {}", topic);
int numLiveBrokers = admin.describeCluster().nodes().get(initTimeout, TimeUnit.MILLISECONDS).size();
if (numLiveBrokers <= 0) {
throw new StoreInitializationException("No live Kafka brokers");
}
int schemaTopicReplicationFactor = Math.min(numLiveBrokers, desiredReplicationFactor);
if (schemaTopicReplicationFactor < desiredReplicationFactor) {
log.warn("Creating the schema topic " + topic + " using a replication factor of " + schemaTopicReplicationFactor + ", which is less than the desired one of " + desiredReplicationFactor + ". If this is a production environment, it's " + "crucial to add more brokers and increase the replication factor of the topic.");
}
NewTopic schemaTopicRequest = new NewTopic(topic, 1, (short) schemaTopicReplicationFactor);
Map topicConfigs = new HashMap(config.originalsWithPrefix("kafkastore.topic.config."));
topicConfigs.put(TopicConfig.CLEANUP_POLICY_CONFIG, TopicConfig.CLEANUP_POLICY_COMPACT);
schemaTopicRequest.configs(topicConfigs);
try {
admin.createTopics(Collections.singleton(schemaTopicRequest)).all().get(initTimeout, TimeUnit.MILLISECONDS);
} catch (ExecutionException e) {
if (e.getCause() instanceof TopicExistsException) {
// If topic already exists, ensure that it is configured correctly.
verifySchemaTopic(admin);
} else {
throw e;
}
}
}
use of io.confluent.kafka.schemaregistry.storage.exceptions.StoreInitializationException in project schema-registry by confluentinc.
the class KafkaStore method init.
@Override
public void init() throws StoreInitializationException {
if (initialized.get()) {
throw new StoreInitializationException("Illegal state while initializing store. Store was already initialized");
}
localStore.init();
createOrVerifySchemaTopic();
// set the producer properties and initialize a Kafka producer client
Properties props = new Properties();
addSchemaRegistryConfigsToClientProperties(this.config, props);
props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapBrokers);
props.put(ProducerConfig.ACKS_CONFIG, "-1");
props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, org.apache.kafka.common.serialization.ByteArraySerializer.class);
props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, org.apache.kafka.common.serialization.ByteArraySerializer.class);
// Producer should not retry
props.put(ProducerConfig.RETRIES_CONFIG, 0);
props.put(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG, false);
producer = new KafkaProducer<byte[], byte[]>(props);
// start the background thread that subscribes to the Kafka topic and applies updates.
// the thread must be created after the schema topic has been created.
this.kafkaTopicReader = new KafkaStoreReaderThread<>(this.bootstrapBrokers, topic, groupId, this.storeUpdateHandler, serializer, this.localStore, this.producer, this.noopKey, this.initialized, this.config);
this.kafkaTopicReader.start();
try {
waitUntilKafkaReaderReachesLastOffset(initTimeout);
} catch (StoreException e) {
throw new StoreInitializationException(e);
}
boolean isInitialized = initialized.compareAndSet(false, true);
if (!isInitialized) {
throw new StoreInitializationException("Illegal state while initializing store. Store " + "was already initialized");
}
this.storeUpdateHandler.cacheInitialized(new HashMap<>(kafkaTopicReader.checkpoints()));
initLatch.countDown();
}
Aggregations