use of org.folio.inventory.dataimport.cache.ProfileSnapshotCache in project mod-inventory by folio-org.
the class DataImportKafkaHandlerTest method setUp.
@Before
public void setUp() {
MockitoAnnotations.openMocks(this);
String[] hostAndPort = cluster.getBrokerList().split(":");
WireMock.stubFor(get(new UrlPathPattern(new RegexPattern(JOB_PROFILE_URL + "/.*"), true)).willReturn(WireMock.ok().withBody(Json.encode(profileSnapshotWrapper))));
KafkaConfig kafkaConfig = KafkaConfig.builder().kafkaHost(hostAndPort[0]).kafkaPort(hostAndPort[1]).maxRequestSize(1048576).build();
HttpClient client = vertx.createHttpClient(new HttpClientOptions().setConnectTimeout(3000));
dataImportKafkaHandler = new DataImportKafkaHandler(vertx, mockedStorage, client, new ProfileSnapshotCache(vertx, client, 3600), kafkaConfig, new MappingMetadataCache(vertx, client, 3600));
EventManager.clearEventHandlers();
EventManager.registerKafkaEventPublisher(kafkaConfig, vertx, 1);
}
use of org.folio.inventory.dataimport.cache.ProfileSnapshotCache in project mod-inventory by folio-org.
the class DataImportConsumerVerticle method start.
@Override
public void start(Promise<Void> startPromise) {
JsonObject config = vertx.getOrCreateContext().config();
KafkaConfig kafkaConfig = KafkaConfig.builder().envId(config.getString(KAFKA_ENV)).kafkaHost(config.getString(KAFKA_HOST)).kafkaPort(config.getString(KAFKA_PORT)).okapiUrl(config.getString(OKAPI_URL)).replicationFactor(Integer.parseInt(config.getString(KAFKA_REPLICATION_FACTOR))).maxRequestSize(Integer.parseInt(config.getString(KAFKA_MAX_REQUEST_SIZE))).build();
LOGGER.info(format("kafkaConfig: %s", kafkaConfig));
EventManager.registerKafkaEventPublisher(kafkaConfig, vertx, maxDistributionNumber);
HttpClientOptions params = new HttpClientOptions().setConnectTimeout(DEFAULT_HTTP_TIMEOUT_IN_MILLISECONDS);
HttpClient client = vertx.createHttpClient(params);
Storage storage = Storage.basedUpon(vertx, config, client);
String profileSnapshotExpirationTime = getCacheEnvVariable(config, "inventory.profile-snapshot-cache.expiration.time.seconds");
String mappingMetadataExpirationTime = getCacheEnvVariable(config, "inventory.mapping-metadata-cache.expiration.time.seconds");
ProfileSnapshotCache profileSnapshotCache = new ProfileSnapshotCache(vertx, client, Long.parseLong(profileSnapshotExpirationTime));
MappingMetadataCache mappingMetadataCache = new MappingMetadataCache(vertx, client, Long.parseLong(mappingMetadataExpirationTime));
DataImportKafkaHandler dataImportKafkaHandler = new DataImportKafkaHandler(vertx, storage, client, profileSnapshotCache, kafkaConfig, mappingMetadataCache);
List<Future> futures = EVENT_TYPES.stream().map(eventType -> createKafkaConsumerWrapper(kafkaConfig, eventType, dataImportKafkaHandler)).collect(Collectors.toList());
CompositeFuture.all(futures).onFailure(startPromise::fail).onSuccess(ar -> {
futures.forEach(future -> consumerWrappers.add((KafkaConsumerWrapper<String, String>) future.result()));
startPromise.complete();
});
}
Aggregations