Search in sources :

Example 6 with KafkaConfig

use of org.folio.kafka.KafkaConfig in project mod-inventory by folio-org.

the class DataImportConsumerVerticle method start.

@Override
public void start(Promise<Void> startPromise) {
    JsonObject config = vertx.getOrCreateContext().config();
    KafkaConfig kafkaConfig = KafkaConfig.builder().envId(config.getString(KAFKA_ENV)).kafkaHost(config.getString(KAFKA_HOST)).kafkaPort(config.getString(KAFKA_PORT)).okapiUrl(config.getString(OKAPI_URL)).replicationFactor(Integer.parseInt(config.getString(KAFKA_REPLICATION_FACTOR))).maxRequestSize(Integer.parseInt(config.getString(KAFKA_MAX_REQUEST_SIZE))).build();
    LOGGER.info(format("kafkaConfig: %s", kafkaConfig));
    EventManager.registerKafkaEventPublisher(kafkaConfig, vertx, maxDistributionNumber);
    HttpClientOptions params = new HttpClientOptions().setConnectTimeout(DEFAULT_HTTP_TIMEOUT_IN_MILLISECONDS);
    HttpClient client = vertx.createHttpClient(params);
    Storage storage = Storage.basedUpon(vertx, config, client);
    String profileSnapshotExpirationTime = getCacheEnvVariable(config, "inventory.profile-snapshot-cache.expiration.time.seconds");
    String mappingMetadataExpirationTime = getCacheEnvVariable(config, "inventory.mapping-metadata-cache.expiration.time.seconds");
    ProfileSnapshotCache profileSnapshotCache = new ProfileSnapshotCache(vertx, client, Long.parseLong(profileSnapshotExpirationTime));
    MappingMetadataCache mappingMetadataCache = new MappingMetadataCache(vertx, client, Long.parseLong(mappingMetadataExpirationTime));
    DataImportKafkaHandler dataImportKafkaHandler = new DataImportKafkaHandler(vertx, storage, client, profileSnapshotCache, kafkaConfig, mappingMetadataCache);
    List<Future> futures = EVENT_TYPES.stream().map(eventType -> createKafkaConsumerWrapper(kafkaConfig, eventType, dataImportKafkaHandler)).collect(Collectors.toList());
    CompositeFuture.all(futures).onFailure(startPromise::fail).onSuccess(ar -> {
        futures.forEach(future -> consumerWrappers.add((KafkaConsumerWrapper<String, String>) future.result()));
        startPromise.complete();
    });
}
Also used : StringUtils(org.apache.commons.lang.StringUtils) DI_SRS_MARC_BIB_RECORD_NOT_MATCHED(org.folio.DataImportEventTypes.DI_SRS_MARC_BIB_RECORD_NOT_MATCHED) DI_SRS_MARC_AUTHORITY_RECORD_MODIFIED_READY_FOR_POST_PROCESSING(org.folio.DataImportEventTypes.DI_SRS_MARC_AUTHORITY_RECORD_MODIFIED_READY_FOR_POST_PROCESSING) DI_INVENTORY_INSTANCE_MATCHED(org.folio.DataImportEventTypes.DI_INVENTORY_INSTANCE_MATCHED) DI_SRS_MARC_BIB_RECORD_MATCHED_READY_FOR_POST_PROCESSING(org.folio.DataImportEventTypes.DI_SRS_MARC_BIB_RECORD_MATCHED_READY_FOR_POST_PROCESSING) JsonObject(io.vertx.core.json.JsonObject) EventManager(org.folio.processing.events.EventManager) ProfileSnapshotCache(org.folio.inventory.dataimport.cache.ProfileSnapshotCache) DI_INVENTORY_INSTANCE_UPDATED(org.folio.DataImportEventTypes.DI_INVENTORY_INSTANCE_UPDATED) DI_INVENTORY_ITEM_MATCHED(org.folio.DataImportEventTypes.DI_INVENTORY_ITEM_MATCHED) Collectors(java.util.stream.Collectors) Future(io.vertx.core.Future) String.format(java.lang.String.format) DI_SRS_MARC_AUTHORITY_RECORD_NOT_MATCHED(org.folio.DataImportEventTypes.DI_SRS_MARC_AUTHORITY_RECORD_NOT_MATCHED) Storage(org.folio.inventory.storage.Storage) List(java.util.List) SubscriptionDefinition(org.folio.kafka.SubscriptionDefinition) Logger(org.apache.logging.log4j.Logger) DI_INVENTORY_INSTANCE_CREATED(org.folio.DataImportEventTypes.DI_INVENTORY_INSTANCE_CREATED) AbstractVerticle(io.vertx.core.AbstractVerticle) KAFKA_REPLICATION_FACTOR(org.folio.inventory.dataimport.util.KafkaConfigConstants.KAFKA_REPLICATION_FACTOR) DI_SRS_MARC_AUTHORITY_RECORD_CREATED(org.folio.DataImportEventTypes.DI_SRS_MARC_AUTHORITY_RECORD_CREATED) DI_SRS_MARC_BIB_RECORD_MODIFIED_READY_FOR_POST_PROCESSING(org.folio.DataImportEventTypes.DI_SRS_MARC_BIB_RECORD_MODIFIED_READY_FOR_POST_PROCESSING) ConsumerWrapperUtil(org.folio.inventory.dataimport.util.ConsumerWrapperUtil) HttpClient(io.vertx.core.http.HttpClient) KAFKA_MAX_REQUEST_SIZE(org.folio.inventory.dataimport.util.KafkaConfigConstants.KAFKA_MAX_REQUEST_SIZE) DataImportEventTypes(org.folio.DataImportEventTypes) DI_INVENTORY_HOLDING_CREATED(org.folio.DataImportEventTypes.DI_INVENTORY_HOLDING_CREATED) KafkaConsumerWrapper(org.folio.kafka.KafkaConsumerWrapper) DI_INVENTORY_HOLDING_NOT_MATCHED(org.folio.DataImportEventTypes.DI_INVENTORY_HOLDING_NOT_MATCHED) ArrayList(java.util.ArrayList) CompositeFuture(io.vertx.core.CompositeFuture) GlobalLoadSensor(org.folio.kafka.GlobalLoadSensor) DI_INVENTORY_ITEM_CREATED(org.folio.DataImportEventTypes.DI_INVENTORY_ITEM_CREATED) KAFKA_PORT(org.folio.inventory.dataimport.util.KafkaConfigConstants.KAFKA_PORT) KafkaTopicNameHelper(org.folio.kafka.KafkaTopicNameHelper) HttpClientOptions(io.vertx.core.http.HttpClientOptions) DI_SRS_MARC_HOLDING_RECORD_CREATED(org.folio.DataImportEventTypes.DI_SRS_MARC_HOLDING_RECORD_CREATED) Promise(io.vertx.core.Promise) DI_INVENTORY_HOLDING_MATCHED(org.folio.DataImportEventTypes.DI_INVENTORY_HOLDING_MATCHED) KAFKA_ENV(org.folio.inventory.dataimport.util.KafkaConfigConstants.KAFKA_ENV) DI_SRS_MARC_BIB_RECORD_CREATED(org.folio.DataImportEventTypes.DI_SRS_MARC_BIB_RECORD_CREATED) AsyncRecordHandler(org.folio.kafka.AsyncRecordHandler) DI_INVENTORY_HOLDING_UPDATED(org.folio.DataImportEventTypes.DI_INVENTORY_HOLDING_UPDATED) DI_SRS_MARC_AUTHORITY_RECORD_DELETED(org.folio.DataImportEventTypes.DI_SRS_MARC_AUTHORITY_RECORD_DELETED) DI_INVENTORY_INSTANCE_NOT_MATCHED(org.folio.DataImportEventTypes.DI_INVENTORY_INSTANCE_NOT_MATCHED) DI_SRS_MARC_BIB_RECORD_MODIFIED(org.folio.DataImportEventTypes.DI_SRS_MARC_BIB_RECORD_MODIFIED) DI_INVENTORY_ITEM_NOT_MATCHED(org.folio.DataImportEventTypes.DI_INVENTORY_ITEM_NOT_MATCHED) DI_SRS_MARC_BIB_RECORD_MATCHED(org.folio.DataImportEventTypes.DI_SRS_MARC_BIB_RECORD_MATCHED) LogManager(org.apache.logging.log4j.LogManager) KAFKA_HOST(org.folio.inventory.dataimport.util.KafkaConfigConstants.KAFKA_HOST) MappingMetadataCache(org.folio.inventory.dataimport.cache.MappingMetadataCache) KafkaConfig(org.folio.kafka.KafkaConfig) OKAPI_URL(org.folio.inventory.dataimport.util.KafkaConfigConstants.OKAPI_URL) DataImportKafkaHandler(org.folio.inventory.dataimport.consumers.DataImportKafkaHandler) KafkaConsumerWrapper(org.folio.kafka.KafkaConsumerWrapper) JsonObject(io.vertx.core.json.JsonObject) ProfileSnapshotCache(org.folio.inventory.dataimport.cache.ProfileSnapshotCache) HttpClientOptions(io.vertx.core.http.HttpClientOptions) Storage(org.folio.inventory.storage.Storage) MappingMetadataCache(org.folio.inventory.dataimport.cache.MappingMetadataCache) HttpClient(io.vertx.core.http.HttpClient) Future(io.vertx.core.Future) CompositeFuture(io.vertx.core.CompositeFuture) DataImportKafkaHandler(org.folio.inventory.dataimport.consumers.DataImportKafkaHandler) KafkaConfig(org.folio.kafka.KafkaConfig)

Example 7 with KafkaConfig

use of org.folio.kafka.KafkaConfig in project mod-inventory by folio-org.

the class MarcHridSetConsumerVerticle method start.

@Override
public void start(Promise<Void> startPromise) {
    JsonObject config = vertx.getOrCreateContext().config();
    KafkaConfig kafkaConfig = KafkaConfig.builder().envId(config.getString(KAFKA_ENV)).kafkaHost(config.getString(KAFKA_HOST)).kafkaPort(config.getString(KAFKA_PORT)).okapiUrl(config.getString(OKAPI_URL)).replicationFactor(Integer.parseInt(config.getString(KAFKA_REPLICATION_FACTOR))).maxRequestSize(Integer.parseInt(config.getString(KAFKA_MAX_REQUEST_SIZE))).build();
    LOGGER.info("kafkaConfig: {}", kafkaConfig);
    marcBibConsumerWrapper = createConsumerByEvent(kafkaConfig, DI_SRS_MARC_BIB_INSTANCE_HRID_SET);
    marcHoldingsConsumerWrapper = createConsumerByEvent(kafkaConfig, DI_SRS_MARC_HOLDINGS_HOLDING_HRID_SET);
    HttpClient client = vertx.createHttpClient();
    Storage storage = Storage.basedUpon(vertx, config, client);
    InstanceUpdateDelegate instanceUpdateDelegate = new InstanceUpdateDelegate(storage);
    HoldingsUpdateDelegate holdingsRecordUpdateDelegate = new HoldingsUpdateDelegate(storage);
    String mappingMetadataExpirationTime = getCacheEnvVariable(config, "inventory.mapping-metadata-cache.expiration.time.seconds");
    MappingMetadataCache mappingMetadataCache = new MappingMetadataCache(vertx, client, Long.parseLong(mappingMetadataExpirationTime));
    MarcBibInstanceHridSetKafkaHandler marcBibInstanceHridSetKafkaHandler = new MarcBibInstanceHridSetKafkaHandler(instanceUpdateDelegate, mappingMetadataCache);
    MarcHoldingsRecordHridSetKafkaHandler marcHoldingsRecordHridSetKafkaHandler = new MarcHoldingsRecordHridSetKafkaHandler(holdingsRecordUpdateDelegate, mappingMetadataCache);
    CompositeFuture.all(marcBibConsumerWrapper.start(marcBibInstanceHridSetKafkaHandler, constructModuleName()), marcHoldingsConsumerWrapper.start(marcHoldingsRecordHridSetKafkaHandler, constructModuleName())).onFailure(startPromise::fail).onSuccess(ar -> startPromise.complete());
}
Also used : MarcBibInstanceHridSetKafkaHandler(org.folio.inventory.dataimport.consumers.MarcBibInstanceHridSetKafkaHandler) Storage(org.folio.inventory.storage.Storage) MarcHoldingsRecordHridSetKafkaHandler(org.folio.inventory.dataimport.consumers.MarcHoldingsRecordHridSetKafkaHandler) MappingMetadataCache(org.folio.inventory.dataimport.cache.MappingMetadataCache) HttpClient(io.vertx.core.http.HttpClient) InstanceUpdateDelegate(org.folio.inventory.dataimport.handlers.actions.InstanceUpdateDelegate) JsonObject(io.vertx.core.json.JsonObject) HoldingsUpdateDelegate(org.folio.inventory.dataimport.handlers.actions.HoldingsUpdateDelegate) KafkaConfig(org.folio.kafka.KafkaConfig)

Example 8 with KafkaConfig

use of org.folio.kafka.KafkaConfig in project mod-inventory by folio-org.

the class QuickMarcConsumerVerticle method start.

@Override
public void start(Promise<Void> startPromise) {
    JsonObject config = vertx.getOrCreateContext().config();
    KafkaConfig kafkaConfig = getKafkaConfig(config);
    HttpClient client = vertx.createHttpClient();
    Storage storage = Storage.basedUpon(vertx, config, client);
    var precedingSucceedingTitlesHelper = new PrecedingSucceedingTitlesHelper(WebClient.wrap(client));
    var handler = new QuickMarcKafkaHandler(vertx, storage, maxDistributionNumber, kafkaConfig, precedingSucceedingTitlesHelper);
    var kafkaConsumerFuture = createKafkaConsumer(kafkaConfig, QMEventTypes.QM_SRS_MARC_RECORD_UPDATED, handler);
    kafkaConsumerFuture.onFailure(startPromise::fail).onSuccess(ar -> {
        consumer = ar;
        startPromise.complete();
    });
}
Also used : Storage(org.folio.inventory.storage.Storage) HttpClient(io.vertx.core.http.HttpClient) PrecedingSucceedingTitlesHelper(org.folio.inventory.dataimport.handlers.actions.PrecedingSucceedingTitlesHelper) QuickMarcKafkaHandler(org.folio.inventory.dataimport.consumers.QuickMarcKafkaHandler) JsonObject(io.vertx.core.json.JsonObject) KafkaConfig(org.folio.kafka.KafkaConfig)

Example 9 with KafkaConfig

use of org.folio.kafka.KafkaConfig in project mod-source-record-storage by folio-org.

the class AbstractLBServiceTest method setUpClass.

@BeforeClass
public static void setUpClass(TestContext context) throws Exception {
    Async async = context.async();
    vertx = Vertx.vertx();
    String[] hostAndPort = cluster.getBrokerList().split(":");
    System.setProperty(KAFKA_HOST, hostAndPort[0]);
    System.setProperty(KAFKA_PORT, hostAndPort[1]);
    System.setProperty(KAFKA_ENV, KAFKA_ENV_ID);
    System.setProperty(KAFKA_MAX_REQUEST_SIZE, String.valueOf(KAFKA_MAX_REQUEST_SIZE_VAL));
    System.setProperty(OKAPI_URL_ENV, OKAPI_URL);
    kafkaConfig = KafkaConfig.builder().kafkaHost(hostAndPort[0]).kafkaPort(hostAndPort[1]).envId(KAFKA_ENV_ID).maxRequestSize(KAFKA_MAX_REQUEST_SIZE_VAL).build();
    RestAssured.config = RestAssuredConfig.config().objectMapperConfig(new ObjectMapperConfig().jackson2ObjectMapperFactory(new Jackson2ObjectMapperFactory() {

        @Override
        public ObjectMapper create(Type arg0, String arg1) {
            ObjectMapper objectMapper = new ObjectMapper();
            return objectMapper;
        }
    }));
    PostgresClient.setPostgresTester(new PostgresTesterContainer());
    JsonObject pgClientConfig = PostgresClient.getInstance(vertx).getConnectionConfig();
    Envs.setEnv(pgClientConfig.getString(PostgresClientFactory.HOST), pgClientConfig.getInteger(PostgresClientFactory.PORT), pgClientConfig.getString(PostgresClientFactory.USERNAME), pgClientConfig.getString(PostgresClientFactory.PASSWORD), pgClientConfig.getString(PostgresClientFactory.DATABASE));
    TenantClient tenantClient = new TenantClient(OKAPI_URL, TENANT_ID, TOKEN);
    DeploymentOptions restVerticleDeploymentOptions = new DeploymentOptions().setConfig(new JsonObject().put("http.port", PORT));
    vertx.deployVerticle(RestVerticle.class.getName(), restVerticleDeploymentOptions, deployResponse -> {
        try {
            tenantClient.postTenant(new TenantAttributes().withModuleTo("3.2.0"), res2 -> {
                postgresClientFactory = new PostgresClientFactory(vertx);
                if (res2.result().statusCode() == 204) {
                    return;
                }
                if (res2.result().statusCode() == 201) {
                    tenantClient.getTenantByOperationId(res2.result().bodyAsJson(TenantJob.class).getId(), 60000, context.asyncAssertSuccess(res3 -> {
                        context.assertTrue(res3.bodyAsJson(TenantJob.class).getComplete());
                        String error = res3.bodyAsJson(TenantJob.class).getError();
                        if (error != null) {
                            context.assertTrue(error.contains("EventDescriptor was not registered for eventType"));
                        }
                    }));
                } else {
                    context.assertEquals("Failed to make post tenant. Received status code 400", res2.result().bodyAsString());
                }
                async.complete();
            });
        } catch (Exception e) {
            e.printStackTrace();
            async.complete();
        }
    });
}
Also used : RestVerticle(org.folio.rest.RestVerticle) TestContext(io.vertx.ext.unit.TestContext) Async(io.vertx.ext.unit.Async) RestVerticle(org.folio.rest.RestVerticle) BeforeClass(org.junit.BeforeClass) EmbeddedKafkaCluster(net.mguenther.kafka.junit.EmbeddedKafkaCluster) PostgresTesterContainer(org.folio.postgres.testing.PostgresTesterContainer) Jackson2ObjectMapperFactory(io.restassured.path.json.mapper.factory.Jackson2ObjectMapperFactory) TenantClient(org.folio.rest.client.TenantClient) JsonObject(io.vertx.core.json.JsonObject) Metadata(org.folio.rest.jaxrs.model.Metadata) ClassRule(org.junit.ClassRule) TenantJob(org.folio.rest.jaxrs.model.TenantJob) RestAssuredConfig(io.restassured.config.RestAssuredConfig) ObjectMapperConfig(io.restassured.config.ObjectMapperConfig) EmbeddedKafkaClusterConfig.useDefaults(net.mguenther.kafka.junit.EmbeddedKafkaClusterConfig.useDefaults) AfterClass(org.junit.AfterClass) Envs(org.folio.rest.tools.utils.Envs) ObjectMapper(com.fasterxml.jackson.databind.ObjectMapper) Vertx(io.vertx.core.Vertx) EmbeddedKafkaCluster.provisionWith(net.mguenther.kafka.junit.EmbeddedKafkaCluster.provisionWith) PostgresClient(org.folio.rest.persist.PostgresClient) NetworkUtils(org.folio.rest.tools.utils.NetworkUtils) DeploymentOptions(io.vertx.core.DeploymentOptions) Type(java.lang.reflect.Type) PostgresClientFactory(org.folio.dao.PostgresClientFactory) TenantAttributes(org.folio.rest.jaxrs.model.TenantAttributes) RestAssured(io.restassured.RestAssured) KafkaConfig(org.folio.kafka.KafkaConfig) Jackson2ObjectMapperFactory(io.restassured.path.json.mapper.factory.Jackson2ObjectMapperFactory) JsonObject(io.vertx.core.json.JsonObject) TenantJob(org.folio.rest.jaxrs.model.TenantJob) ObjectMapperConfig(io.restassured.config.ObjectMapperConfig) Type(java.lang.reflect.Type) DeploymentOptions(io.vertx.core.DeploymentOptions) TenantAttributes(org.folio.rest.jaxrs.model.TenantAttributes) PostgresClientFactory(org.folio.dao.PostgresClientFactory) Async(io.vertx.ext.unit.Async) TenantClient(org.folio.rest.client.TenantClient) PostgresTesterContainer(org.folio.postgres.testing.PostgresTesterContainer) ObjectMapper(com.fasterxml.jackson.databind.ObjectMapper) BeforeClass(org.junit.BeforeClass)

Aggregations

KafkaConfig (org.folio.kafka.KafkaConfig)9 JsonObject (io.vertx.core.json.JsonObject)5 HttpClient (io.vertx.core.http.HttpClient)4 MappingMetadataCache (org.folio.inventory.dataimport.cache.MappingMetadataCache)3 Storage (org.folio.inventory.storage.Storage)3 DeploymentOptions (io.vertx.core.DeploymentOptions)2 HttpClientOptions (io.vertx.core.http.HttpClientOptions)2 Async (io.vertx.ext.unit.Async)2 BeforeClass (org.junit.BeforeClass)2 Bean (org.springframework.context.annotation.Bean)2 ObjectMapper (com.fasterxml.jackson.databind.ObjectMapper)1 RegexPattern (com.github.tomakehurst.wiremock.matching.RegexPattern)1 UrlPathPattern (com.github.tomakehurst.wiremock.matching.UrlPathPattern)1 RestAssured (io.restassured.RestAssured)1 ObjectMapperConfig (io.restassured.config.ObjectMapperConfig)1 RestAssuredConfig (io.restassured.config.RestAssuredConfig)1 Jackson2ObjectMapperFactory (io.restassured.path.json.mapper.factory.Jackson2ObjectMapperFactory)1 AbstractVerticle (io.vertx.core.AbstractVerticle)1 CompositeFuture (io.vertx.core.CompositeFuture)1 Future (io.vertx.core.Future)1