use of io.vertx.kafka.client.producer.impl.KafkaHeaderImpl in project strimzi-kafka-bridge by strimzi.
the class ConsumerIT method receiveSimpleMessageWithHeaders.
@Test
void receiveSimpleMessageWithHeaders(VertxTestContext context) throws InterruptedException, ExecutionException, TimeoutException {
KafkaFuture<Void> future = adminClientFacade.createTopic(topic, 1, 1);
String sentBody = "Simple message";
List<KafkaHeader> headers = new ArrayList<>();
headers.add(new KafkaHeaderImpl("key1", "value1"));
headers.add(new KafkaHeaderImpl("key1", "value1"));
headers.add(new KafkaHeaderImpl("key2", "value2"));
future.get();
basicKafkaClient.sendJsonMessagesPlain(topic, 1, headers, sentBody, true);
// create consumer
// subscribe to a topic
consumerService().createConsumer(context, groupId, consumerJson).subscribeConsumer(context, groupId, name, topic);
CompletableFuture<Boolean> consume = new CompletableFuture<>();
// consume records
consumerService().consumeRecordsRequest(groupId, name, BridgeContentType.KAFKA_JSON_JSON).as(BodyCodec.jsonArray()).send(ar -> {
context.verify(() -> {
assertThat(ar.succeeded(), is(true));
HttpResponse<JsonArray> response = ar.result();
assertThat(response.statusCode(), is(HttpResponseStatus.OK.code()));
assertThat(response.body().size(), is(1));
JsonObject jsonResponse = response.body().getJsonObject(0);
String kafkaTopic = jsonResponse.getString("topic");
int kafkaPartition = jsonResponse.getInteger("partition");
String key = jsonResponse.getString("key");
String value = jsonResponse.getString("value");
long offset = jsonResponse.getLong("offset");
JsonArray kafkaHeaders = jsonResponse.getJsonArray("headers");
assertThat(kafkaTopic, is(topic));
assertThat(value, is(sentBody));
assertThat(offset, is(0L));
assertThat(kafkaPartition, notNullValue());
assertThat(key, nullValue());
assertThat(kafkaHeaders.size(), is(3));
assertThat(kafkaHeaders.getJsonObject(0).getString("key"), is("key1"));
assertThat(new String(DatatypeConverter.parseBase64Binary(kafkaHeaders.getJsonObject(0).getString("value"))), is("value1"));
assertThat(kafkaHeaders.getJsonObject(1).getString("key"), is("key1"));
assertThat(new String(DatatypeConverter.parseBase64Binary(kafkaHeaders.getJsonObject(1).getString("value"))), is("value1"));
assertThat(kafkaHeaders.getJsonObject(2).getString("key"), is("key2"));
assertThat(new String(DatatypeConverter.parseBase64Binary(kafkaHeaders.getJsonObject(2).getString("value"))), is("value2"));
});
consume.complete(true);
});
consume.get(TEST_TIMEOUT, TimeUnit.SECONDS);
// consumer deletion
consumerService().deleteConsumer(context, groupId, name);
context.completeNow();
assertThat(context.awaitCompletion(TEST_TIMEOUT, TimeUnit.SECONDS), is(true));
}
use of io.vertx.kafka.client.producer.impl.KafkaHeaderImpl in project strimzi-kafka-bridge by strimzi.
the class ProducerIT method sendSimpleMessageWithHeaders.
@Test
void sendSimpleMessageWithHeaders(VertxTestContext context) throws ExecutionException, InterruptedException {
KafkaFuture<Void> future = adminClientFacade.createTopic(topic, 2, 1);
String value = "message-value";
String sentBody = "Simple message";
List<KafkaHeader> headers = new ArrayList<>();
headers.add(new KafkaHeaderImpl("key1", DatatypeConverter.printBase64Binary("value1".getBytes())));
headers.add(new KafkaHeaderImpl("key2", DatatypeConverter.printBase64Binary("value2".getBytes())));
headers.add(new KafkaHeaderImpl("key2", DatatypeConverter.printBase64Binary("value2".getBytes())));
JsonArray records = new JsonArray();
JsonObject json = new JsonObject();
json.put("value", value);
JsonArray jsonHeaders = new JsonArray();
for (KafkaHeader kafkaHeader : headers) {
JsonObject header = new JsonObject();
header.put("key", kafkaHeader.key());
header.put("value", kafkaHeader.value().toString());
jsonHeaders.add(header);
}
json.put("headers", jsonHeaders);
records.add(json);
JsonObject root = new JsonObject();
root.put("records", records);
future.get();
producerService().sendRecordsRequest(topic, root, BridgeContentType.KAFKA_JSON_JSON).sendJsonObject(root, verifyOK(context));
Properties consumerProperties = Consumer.fillDefaultProperties();
consumerProperties.setProperty(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, kafkaUri);
KafkaConsumer<String, String> consumer = KafkaConsumer.create(vertx, consumerProperties, new KafkaJsonDeserializer<>(String.class), new KafkaJsonDeserializer<>(String.class));
consumer.handler(record -> {
context.verify(() -> {
assertThat(record.value(), is(value));
assertThat(record.topic(), is(topic));
assertThat(record.partition(), notNullValue());
assertThat(record.offset(), is(0L));
assertThat(record.headers().size(), is(3));
assertThat(record.headers().get(0).key(), is("key1"));
assertThat(record.headers().get(0).value().toString(), is("value1"));
assertThat(record.headers().get(1).key(), is("key2"));
assertThat(record.headers().get(1).value().toString(), is("value2"));
assertThat(record.headers().get(2).key(), is("key2"));
assertThat(record.headers().get(2).value().toString(), is("value2"));
});
LOGGER.info("Message consumed topic={} partition={} offset={}, key={}, value={}, headers={}", record.topic(), record.partition(), record.offset(), record.key(), record.value(), record.headers());
consumer.close();
context.completeNow();
});
consumer.subscribe(topic, done -> {
if (!done.succeeded()) {
context.failNow(done.cause());
}
});
}
use of io.vertx.kafka.client.producer.impl.KafkaHeaderImpl in project strimzi-kafka-bridge by strimzi.
the class HttpBinaryMessageConverter method toKafkaRecord.
@Override
public KafkaProducerRecord<byte[], byte[]> toKafkaRecord(String kafkaTopic, Integer partition, Buffer message) {
Integer partitionFromBody = null;
byte[] key = null;
byte[] value = null;
List<KafkaHeader> headers = new ArrayList<>();
JsonObject json = message.toJsonObject();
if (!json.isEmpty()) {
if (json.containsKey("key")) {
key = DatatypeConverter.parseBase64Binary(json.getString("key"));
}
if (json.containsKey("value")) {
value = DatatypeConverter.parseBase64Binary(json.getString("value"));
}
if (json.containsKey("headers")) {
for (Object obj : json.getJsonArray("headers")) {
JsonObject jsonObject = (JsonObject) obj;
headers.add(new KafkaHeaderImpl(jsonObject.getString("key"), Buffer.buffer(DatatypeConverter.parseBase64Binary(jsonObject.getString("value")))));
}
}
if (json.containsKey("partition")) {
partitionFromBody = json.getInteger("partition");
}
if (partition != null && partitionFromBody != null) {
throw new IllegalStateException("Partition specified in body and in request path");
}
if (partition != null) {
partitionFromBody = partition;
}
}
KafkaProducerRecord<byte[], byte[]> record = KafkaProducerRecord.create(kafkaTopic, key, value, partitionFromBody);
record.addHeaders(headers);
return record;
}
use of io.vertx.kafka.client.producer.impl.KafkaHeaderImpl in project mod-source-record-manager by folio-org.
the class ChangeEngineServiceImpl method populateError.
private void populateError(Record record, JobExecution jobExecution, OkapiConnectionParams okapiParams) {
var eventPayload = getDataImportPayload(record, jobExecution, okapiParams);
eventPayload.getContext().put(RECORD_ID_HEADER, record.getId());
var key = String.valueOf(indexer.incrementAndGet() % maxDistributionNum);
LOGGER.error(HOLDINGS_004_TAG_ERROR_MESSAGE);
record.setParsedRecord(null);
record.setErrorRecord(new ErrorRecord().withContent(record.getRawRecord().getContent()).withDescription(new JsonObject().put(MESSAGE_KEY, HOLDINGS_004_TAG_ERROR_MESSAGE).encode()));
var kafkaHeaders = KafkaHeaderUtils.kafkaHeadersFromMultiMap(okapiParams.getHeaders());
kafkaHeaders.add(new KafkaHeaderImpl(RECORD_ID_HEADER, record.getId()));
sendEventToKafka(okapiParams.getTenantId(), Json.encode(eventPayload), DI_ERROR.value(), kafkaHeaders, kafkaConfig, key).onFailure(th -> LOGGER.error("Error publishing DI_ERROR event for MARC Holdings record with id {}", record.getId(), th));
}
use of io.vertx.kafka.client.producer.impl.KafkaHeaderImpl in project mod-source-record-manager by folio-org.
the class ChangeEngineServiceImpl method saveRecords.
/**
* Saves parsed records in mod-source-record-storage
*
* @param params - okapi params
* @param jobExecution - job execution related to records
* @param parsedRecords - parsed records
*/
private Future<List<Record>> saveRecords(OkapiConnectionParams params, JobExecution jobExecution, List<Record> parsedRecords) {
if (CollectionUtils.isEmpty(parsedRecords)) {
return Future.succeededFuture();
}
RecordCollection recordCollection = new RecordCollection().withRecords(parsedRecords).withTotalRecords(parsedRecords.size());
List<KafkaHeader> kafkaHeaders = KafkaHeaderUtils.kafkaHeadersFromMultiMap(params.getHeaders());
kafkaHeaders.add(new KafkaHeaderImpl(JOB_EXECUTION_ID_HEADER, jobExecution.getId()));
kafkaHeaders.add(new KafkaHeaderImpl(USER_ID_HEADER, jobExecution.getUserId()));
String key = String.valueOf(indexer.incrementAndGet() % maxDistributionNum);
return sendEventToKafka(params.getTenantId(), Json.encode(recordCollection), DI_RAW_RECORDS_CHUNK_PARSED.value(), kafkaHeaders, kafkaConfig, key).map(parsedRecords);
}
Aggregations