use of org.folio.kafka.KafkaConfig in project mod-inventory by folio-org.
the class DataImportConsumerVerticle method start.
@Override
public void start(Promise<Void> startPromise) {
JsonObject config = vertx.getOrCreateContext().config();
KafkaConfig kafkaConfig = KafkaConfig.builder().envId(config.getString(KAFKA_ENV)).kafkaHost(config.getString(KAFKA_HOST)).kafkaPort(config.getString(KAFKA_PORT)).okapiUrl(config.getString(OKAPI_URL)).replicationFactor(Integer.parseInt(config.getString(KAFKA_REPLICATION_FACTOR))).maxRequestSize(Integer.parseInt(config.getString(KAFKA_MAX_REQUEST_SIZE))).build();
LOGGER.info(format("kafkaConfig: %s", kafkaConfig));
EventManager.registerKafkaEventPublisher(kafkaConfig, vertx, maxDistributionNumber);
HttpClientOptions params = new HttpClientOptions().setConnectTimeout(DEFAULT_HTTP_TIMEOUT_IN_MILLISECONDS);
HttpClient client = vertx.createHttpClient(params);
Storage storage = Storage.basedUpon(vertx, config, client);
String profileSnapshotExpirationTime = getCacheEnvVariable(config, "inventory.profile-snapshot-cache.expiration.time.seconds");
String mappingMetadataExpirationTime = getCacheEnvVariable(config, "inventory.mapping-metadata-cache.expiration.time.seconds");
ProfileSnapshotCache profileSnapshotCache = new ProfileSnapshotCache(vertx, client, Long.parseLong(profileSnapshotExpirationTime));
MappingMetadataCache mappingMetadataCache = new MappingMetadataCache(vertx, client, Long.parseLong(mappingMetadataExpirationTime));
DataImportKafkaHandler dataImportKafkaHandler = new DataImportKafkaHandler(vertx, storage, client, profileSnapshotCache, kafkaConfig, mappingMetadataCache);
List<Future> futures = EVENT_TYPES.stream().map(eventType -> createKafkaConsumerWrapper(kafkaConfig, eventType, dataImportKafkaHandler)).collect(Collectors.toList());
CompositeFuture.all(futures).onFailure(startPromise::fail).onSuccess(ar -> {
futures.forEach(future -> consumerWrappers.add((KafkaConsumerWrapper<String, String>) future.result()));
startPromise.complete();
});
}
use of org.folio.kafka.KafkaConfig in project mod-inventory by folio-org.
the class MarcHridSetConsumerVerticle method start.
@Override
public void start(Promise<Void> startPromise) {
JsonObject config = vertx.getOrCreateContext().config();
KafkaConfig kafkaConfig = KafkaConfig.builder().envId(config.getString(KAFKA_ENV)).kafkaHost(config.getString(KAFKA_HOST)).kafkaPort(config.getString(KAFKA_PORT)).okapiUrl(config.getString(OKAPI_URL)).replicationFactor(Integer.parseInt(config.getString(KAFKA_REPLICATION_FACTOR))).maxRequestSize(Integer.parseInt(config.getString(KAFKA_MAX_REQUEST_SIZE))).build();
LOGGER.info("kafkaConfig: {}", kafkaConfig);
marcBibConsumerWrapper = createConsumerByEvent(kafkaConfig, DI_SRS_MARC_BIB_INSTANCE_HRID_SET);
marcHoldingsConsumerWrapper = createConsumerByEvent(kafkaConfig, DI_SRS_MARC_HOLDINGS_HOLDING_HRID_SET);
HttpClient client = vertx.createHttpClient();
Storage storage = Storage.basedUpon(vertx, config, client);
InstanceUpdateDelegate instanceUpdateDelegate = new InstanceUpdateDelegate(storage);
HoldingsUpdateDelegate holdingsRecordUpdateDelegate = new HoldingsUpdateDelegate(storage);
String mappingMetadataExpirationTime = getCacheEnvVariable(config, "inventory.mapping-metadata-cache.expiration.time.seconds");
MappingMetadataCache mappingMetadataCache = new MappingMetadataCache(vertx, client, Long.parseLong(mappingMetadataExpirationTime));
MarcBibInstanceHridSetKafkaHandler marcBibInstanceHridSetKafkaHandler = new MarcBibInstanceHridSetKafkaHandler(instanceUpdateDelegate, mappingMetadataCache);
MarcHoldingsRecordHridSetKafkaHandler marcHoldingsRecordHridSetKafkaHandler = new MarcHoldingsRecordHridSetKafkaHandler(holdingsRecordUpdateDelegate, mappingMetadataCache);
CompositeFuture.all(marcBibConsumerWrapper.start(marcBibInstanceHridSetKafkaHandler, constructModuleName()), marcHoldingsConsumerWrapper.start(marcHoldingsRecordHridSetKafkaHandler, constructModuleName())).onFailure(startPromise::fail).onSuccess(ar -> startPromise.complete());
}
use of org.folio.kafka.KafkaConfig in project mod-inventory by folio-org.
the class QuickMarcConsumerVerticle method start.
@Override
public void start(Promise<Void> startPromise) {
JsonObject config = vertx.getOrCreateContext().config();
KafkaConfig kafkaConfig = getKafkaConfig(config);
HttpClient client = vertx.createHttpClient();
Storage storage = Storage.basedUpon(vertx, config, client);
var precedingSucceedingTitlesHelper = new PrecedingSucceedingTitlesHelper(WebClient.wrap(client));
var handler = new QuickMarcKafkaHandler(vertx, storage, maxDistributionNumber, kafkaConfig, precedingSucceedingTitlesHelper);
var kafkaConsumerFuture = createKafkaConsumer(kafkaConfig, QMEventTypes.QM_SRS_MARC_RECORD_UPDATED, handler);
kafkaConsumerFuture.onFailure(startPromise::fail).onSuccess(ar -> {
consumer = ar;
startPromise.complete();
});
}
use of org.folio.kafka.KafkaConfig in project mod-source-record-storage by folio-org.
the class AbstractLBServiceTest method setUpClass.
@BeforeClass
public static void setUpClass(TestContext context) throws Exception {
Async async = context.async();
vertx = Vertx.vertx();
String[] hostAndPort = cluster.getBrokerList().split(":");
System.setProperty(KAFKA_HOST, hostAndPort[0]);
System.setProperty(KAFKA_PORT, hostAndPort[1]);
System.setProperty(KAFKA_ENV, KAFKA_ENV_ID);
System.setProperty(KAFKA_MAX_REQUEST_SIZE, String.valueOf(KAFKA_MAX_REQUEST_SIZE_VAL));
System.setProperty(OKAPI_URL_ENV, OKAPI_URL);
kafkaConfig = KafkaConfig.builder().kafkaHost(hostAndPort[0]).kafkaPort(hostAndPort[1]).envId(KAFKA_ENV_ID).maxRequestSize(KAFKA_MAX_REQUEST_SIZE_VAL).build();
RestAssured.config = RestAssuredConfig.config().objectMapperConfig(new ObjectMapperConfig().jackson2ObjectMapperFactory(new Jackson2ObjectMapperFactory() {
@Override
public ObjectMapper create(Type arg0, String arg1) {
ObjectMapper objectMapper = new ObjectMapper();
return objectMapper;
}
}));
PostgresClient.setPostgresTester(new PostgresTesterContainer());
JsonObject pgClientConfig = PostgresClient.getInstance(vertx).getConnectionConfig();
Envs.setEnv(pgClientConfig.getString(PostgresClientFactory.HOST), pgClientConfig.getInteger(PostgresClientFactory.PORT), pgClientConfig.getString(PostgresClientFactory.USERNAME), pgClientConfig.getString(PostgresClientFactory.PASSWORD), pgClientConfig.getString(PostgresClientFactory.DATABASE));
TenantClient tenantClient = new TenantClient(OKAPI_URL, TENANT_ID, TOKEN);
DeploymentOptions restVerticleDeploymentOptions = new DeploymentOptions().setConfig(new JsonObject().put("http.port", PORT));
vertx.deployVerticle(RestVerticle.class.getName(), restVerticleDeploymentOptions, deployResponse -> {
try {
tenantClient.postTenant(new TenantAttributes().withModuleTo("3.2.0"), res2 -> {
postgresClientFactory = new PostgresClientFactory(vertx);
if (res2.result().statusCode() == 204) {
return;
}
if (res2.result().statusCode() == 201) {
tenantClient.getTenantByOperationId(res2.result().bodyAsJson(TenantJob.class).getId(), 60000, context.asyncAssertSuccess(res3 -> {
context.assertTrue(res3.bodyAsJson(TenantJob.class).getComplete());
String error = res3.bodyAsJson(TenantJob.class).getError();
if (error != null) {
context.assertTrue(error.contains("EventDescriptor was not registered for eventType"));
}
}));
} else {
context.assertEquals("Failed to make post tenant. Received status code 400", res2.result().bodyAsString());
}
async.complete();
});
} catch (Exception e) {
e.printStackTrace();
async.complete();
}
});
}
Aggregations