use of org.apache.kafka.common.utils.Bytes in project kafka-streams-examples by confluentinc.
the class WordCountInteractiveQueriesExample method createStreams.
static KafkaStreams createStreams(final Properties streamsConfiguration) {
final Serde<String> stringSerde = Serdes.String();
StreamsBuilder builder = new StreamsBuilder();
KStream<String, String> textLines = builder.stream(TEXT_LINES_TOPIC, Consumed.with(Serdes.String(), Serdes.String()));
final KGroupedStream<String, String> groupedByWord = textLines.flatMapValues(value -> Arrays.asList(value.toLowerCase().split("\\W+"))).groupBy((key, word) -> word, Serialized.with(stringSerde, stringSerde));
// Create a State Store for with the all time word count
groupedByWord.count(Materialized.<String, Long, KeyValueStore<Bytes, byte[]>>as("word-count").withValueSerde(Serdes.Long()));
// Create a Windowed State Store that contains the word count for every
// 1 minute
groupedByWord.windowedBy(TimeWindows.of(60000)).count(Materialized.<String, Long, WindowStore<Bytes, byte[]>>as("windowed-word-count").withValueSerde(Serdes.Long()));
return new KafkaStreams(builder.build(), streamsConfiguration);
}
use of org.apache.kafka.common.utils.Bytes in project flink by apache.
the class KafkaContainerClient method readMessages.
public <T> List<T> readMessages(int expectedNumMessages, String groupId, String topic, Deserializer<T> valueDeserializer) throws Exception {
Properties props = new Properties();
props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, container.getBootstrapServers());
props.put(ConsumerConfig.GROUP_ID_CONFIG, groupId);
props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "true");
props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
final List<T> messages = Collections.synchronizedList(new ArrayList<>(expectedNumMessages));
try (Consumer<Bytes, T> consumer = new KafkaConsumer<>(props, new BytesDeserializer(), valueDeserializer)) {
waitUntilTopicAvailableThenAssign(topic, consumer, Duration.ofSeconds(60));
// Keep polling until getting expected number of messages
final Deadline deadline = Deadline.fromNow(Duration.ofSeconds(120));
while (deadline.hasTimeLeft() && messages.size() < expectedNumMessages) {
LOG.info("Waiting for messages. Received {}/{}.", messages.size(), expectedNumMessages);
ConsumerRecords<Bytes, T> records = consumer.poll(Duration.ofMillis(1000));
for (ConsumerRecord<Bytes, T> record : records) {
messages.add(record.value());
}
}
if (messages.size() != expectedNumMessages) {
throw new IOException("Could not read expected number of messages.");
}
return messages;
}
}
use of org.apache.kafka.common.utils.Bytes in project flink by apache.
the class KafkaContainerClient method sendMessages.
public <T> void sendMessages(String topic, Serializer<T> valueSerializer, T... messages) {
Properties props = new Properties();
props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, container.getBootstrapServers());
props.put(ProducerConfig.ACKS_CONFIG, "all");
try (Producer<Bytes, T> producer = new KafkaProducer<>(props, new BytesSerializer(), valueSerializer)) {
for (T message : messages) {
producer.send(new ProducerRecord<>(topic, message));
}
}
}
use of org.apache.kafka.common.utils.Bytes in project kafka by apache.
the class InMemoryTimeOrderedKeyValueBuffer method flush.
@Override
public void flush() {
if (loggingEnabled) {
// counting on this getting called before the record collector's flush
for (final Bytes key : dirtyKeys) {
final BufferKey bufferKey = index.get(key);
if (bufferKey == null) {
// The record was evicted from the buffer. Send a tombstone.
logTombstone(key);
} else {
final BufferValue value = sortedMap.get(bufferKey);
logValue(key, bufferKey, value);
}
}
dirtyKeys.clear();
}
}
use of org.apache.kafka.common.utils.Bytes in project kafka by apache.
the class InMemoryTimeOrderedKeyValueBuffer method restoreBatch.
private void restoreBatch(final Collection<ConsumerRecord<byte[], byte[]>> batch) {
for (final ConsumerRecord<byte[], byte[]> record : batch) {
if (record.partition() != partition) {
throw new IllegalStateException(String.format("record partition [%d] is being restored by the wrong suppress partition [%d]", record.partition(), partition));
}
final Bytes key = Bytes.wrap(record.key());
if (record.value() == null) {
// This was a tombstone. Delete the record.
final BufferKey bufferKey = index.remove(key);
if (bufferKey != null) {
final BufferValue removed = sortedMap.remove(bufferKey);
if (removed != null) {
memBufferSize -= computeRecordSize(bufferKey.key(), removed);
}
if (bufferKey.time() == minTimestamp) {
minTimestamp = sortedMap.isEmpty() ? Long.MAX_VALUE : sortedMap.firstKey().time();
}
}
} else {
final Header versionHeader = record.headers().lastHeader("v");
if (versionHeader == null) {
// Version 0:
// value:
// - buffer time
// - old value
// - new value
final byte[] previousBufferedValue = index.containsKey(key) ? internalPriorValueForBuffered(key) : null;
final DeserializationResult deserializationResult = deserializeV0(record, key, previousBufferedValue);
cleanPut(deserializationResult.time(), deserializationResult.key(), deserializationResult.bufferValue());
} else if (Arrays.equals(versionHeader.value(), V_3_CHANGELOG_HEADER_VALUE)) {
// Version 3:
// value:
// - record context
// - prior value
// - old value
// - new value
// - buffer time
final DeserializationResult deserializationResult = deserializeV3(record, key);
cleanPut(deserializationResult.time(), deserializationResult.key(), deserializationResult.bufferValue());
} else if (Arrays.equals(versionHeader.value(), V_2_CHANGELOG_HEADER_VALUE)) {
// Version 2:
// value:
// - record context
// - old value
// - new value
// - prior value
// - buffer time
// NOTE: 2.4.0, 2.4.1, and 2.5.0 actually encode Version 3 formatted data,
// but still set the Version 2 flag, so to deserialize, we have to duck type.
final DeserializationResult deserializationResult = duckTypeV2(record, key);
cleanPut(deserializationResult.time(), deserializationResult.key(), deserializationResult.bufferValue());
} else if (Arrays.equals(versionHeader.value(), V_1_CHANGELOG_HEADER_VALUE)) {
// Version 1:
// value:
// - buffer time
// - record context
// - old value
// - new value
final byte[] previousBufferedValue = index.containsKey(key) ? internalPriorValueForBuffered(key) : null;
final DeserializationResult deserializationResult = deserializeV1(record, key, previousBufferedValue);
cleanPut(deserializationResult.time(), deserializationResult.key(), deserializationResult.bufferValue());
} else {
throw new IllegalArgumentException("Restoring apparently invalid changelog record: " + record);
}
}
}
updateBufferMetrics();
}
Aggregations