use of org.apache.kafka.common.utils.Bytes in project apache-kafka-on-k8s by banzaicloud.
the class CachingWindowStoreTest method shouldIterateCacheAndStore.
@Test
public void shouldIterateCacheAndStore() {
final Bytes key = Bytes.wrap("1".getBytes());
underlying.put(WindowKeySchema.toStoreKeyBinary(key, DEFAULT_TIMESTAMP, 0), "a".getBytes());
cachingStore.put(key, bytesValue("b"), DEFAULT_TIMESTAMP + WINDOW_SIZE);
final WindowStoreIterator<byte[]> fetch = cachingStore.fetch(bytesKey("1"), DEFAULT_TIMESTAMP, DEFAULT_TIMESTAMP + WINDOW_SIZE);
verifyKeyValue(fetch.next(), DEFAULT_TIMESTAMP, "a");
verifyKeyValue(fetch.next(), DEFAULT_TIMESTAMP + WINDOW_SIZE, "b");
assertFalse(fetch.hasNext());
}
use of org.apache.kafka.common.utils.Bytes in project apache-kafka-on-k8s by banzaicloud.
the class FilteredCacheIteratorTest method shouldPeekNextKey.
@Test
public void shouldPeekNextKey() {
while (allIterator.hasNext()) {
final Bytes nextKey = allIterator.peekNextKey();
final KeyValue<Bytes, LRUCacheEntry> next = allIterator.next();
assertThat(next.key, equalTo(nextKey));
}
}
use of org.apache.kafka.common.utils.Bytes in project apache-kafka-on-k8s by banzaicloud.
the class MergedSortedCacheKeyValueBytesStoreIteratorTest method shouldPeekNextKey.
@Test
public void shouldPeekNextKey() throws Exception {
final KeyValueStore<Bytes, byte[]> kv = new InMemoryKeyValueStore<>("one", Serdes.Bytes(), Serdes.ByteArray());
final ThreadCache cache = new ThreadCache(new LogContext("testCache "), 1000000L, new MockStreamsMetrics(new Metrics()));
byte[][] bytes = { { 0 }, { 1 }, { 2 }, { 3 }, { 4 }, { 5 }, { 6 }, { 7 }, { 8 }, { 9 }, { 10 } };
for (int i = 0; i < bytes.length - 1; i += 2) {
kv.put(Bytes.wrap(bytes[i]), bytes[i]);
cache.put(namespace, Bytes.wrap(bytes[i + 1]), new LRUCacheEntry(bytes[i + 1]));
}
final Bytes from = Bytes.wrap(new byte[] { 2 });
final Bytes to = Bytes.wrap(new byte[] { 9 });
final KeyValueIterator<Bytes, byte[]> storeIterator = kv.range(from, to);
final ThreadCache.MemoryLRUCacheBytesIterator cacheIterator = cache.range(namespace, from, to);
final MergedSortedCacheKeyValueBytesStoreIterator iterator = new MergedSortedCacheKeyValueBytesStoreIterator(cacheIterator, storeIterator);
final byte[][] values = new byte[8][];
int index = 0;
int bytesIndex = 2;
while (iterator.hasNext()) {
final byte[] keys = iterator.peekNextKey().get();
values[index++] = keys;
assertArrayEquals(bytes[bytesIndex++], keys);
iterator.next();
}
iterator.close();
}
use of org.apache.kafka.common.utils.Bytes in project apache-kafka-on-k8s by banzaicloud.
the class MergedSortedCacheKeyValueBytesStoreIteratorTest method shouldSkipAllDeletedFromCache.
@Test
public void shouldSkipAllDeletedFromCache() throws Exception {
final byte[][] bytes = { { 0 }, { 1 }, { 2 }, { 3 }, { 4 }, { 5 }, { 6 }, { 7 }, { 8 }, { 9 }, { 10 }, { 11 } };
for (byte[] aByte : bytes) {
Bytes aBytes = Bytes.wrap(aByte);
store.put(aBytes, aByte);
cache.put(namespace, aBytes, new LRUCacheEntry(aByte));
}
cache.put(namespace, Bytes.wrap(bytes[1]), new LRUCacheEntry(null));
cache.put(namespace, Bytes.wrap(bytes[2]), new LRUCacheEntry(null));
cache.put(namespace, Bytes.wrap(bytes[3]), new LRUCacheEntry(null));
cache.put(namespace, Bytes.wrap(bytes[8]), new LRUCacheEntry(null));
cache.put(namespace, Bytes.wrap(bytes[11]), new LRUCacheEntry(null));
final MergedSortedCacheKeyValueBytesStoreIterator iterator = createIterator();
assertArrayEquals(bytes[0], iterator.next().key.get());
assertArrayEquals(bytes[4], iterator.next().key.get());
assertArrayEquals(bytes[5], iterator.next().key.get());
assertArrayEquals(bytes[6], iterator.next().key.get());
assertArrayEquals(bytes[7], iterator.next().key.get());
assertArrayEquals(bytes[9], iterator.next().key.get());
assertArrayEquals(bytes[10], iterator.next().key.get());
assertFalse(iterator.hasNext());
}
use of org.apache.kafka.common.utils.Bytes in project kafka-streams-examples by confluentinc.
the class GlobalKTablesExample method createStreams.
public static KafkaStreams createStreams(final String bootstrapServers, final String schemaRegistryUrl, final String stateDir) {
final Properties streamsConfiguration = new Properties();
// Give the Streams application a unique name. The name must be unique in the Kafka cluster
// against which the application is run.
streamsConfiguration.put(StreamsConfig.APPLICATION_ID_CONFIG, "global-tables-example");
streamsConfiguration.put(StreamsConfig.CLIENT_ID_CONFIG, "global-tables-example-client");
// Where to find Kafka broker(s).
streamsConfiguration.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers);
streamsConfiguration.put(StreamsConfig.STATE_DIR_CONFIG, stateDir);
// Set to earliest so we don't miss any data that arrived in the topics before the process
// started
streamsConfiguration.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
// create and configure the SpecificAvroSerdes required in this example
final SpecificAvroSerde<Order> orderSerde = new SpecificAvroSerde<>();
final Map<String, String> serdeConfig = Collections.singletonMap(AbstractKafkaAvroSerDeConfig.SCHEMA_REGISTRY_URL_CONFIG, schemaRegistryUrl);
orderSerde.configure(serdeConfig, false);
final SpecificAvroSerde<Customer> customerSerde = new SpecificAvroSerde<>();
customerSerde.configure(serdeConfig, false);
final SpecificAvroSerde<Product> productSerde = new SpecificAvroSerde<>();
productSerde.configure(serdeConfig, false);
final SpecificAvroSerde<EnrichedOrder> enrichedOrdersSerde = new SpecificAvroSerde<>();
enrichedOrdersSerde.configure(serdeConfig, false);
final StreamsBuilder builder = new StreamsBuilder();
// Get the stream of orders
final KStream<Long, Order> ordersStream = builder.stream(ORDER_TOPIC, Consumed.with(Serdes.Long(), orderSerde));
// Create a global table for customers. The data from this global table
// will be fully replicated on each instance of this application.
final GlobalKTable<Long, Customer> customers = builder.globalTable(CUSTOMER_TOPIC, Materialized.<Long, Customer, KeyValueStore<Bytes, byte[]>>as(CUSTOMER_STORE).withKeySerde(Serdes.Long()).withValueSerde(customerSerde));
// Create a global table for products. The data from this global table
// will be fully replicated on each instance of this application.
final GlobalKTable<Long, Product> products = builder.globalTable(PRODUCT_TOPIC, Materialized.<Long, Product, KeyValueStore<Bytes, byte[]>>as(PRODUCT_STORE).withKeySerde(Serdes.Long()).withValueSerde(productSerde));
// Join the orders stream to the customer global table. As this is global table
// we can use a non-key based join with out needing to repartition the input stream
final KStream<Long, CustomerOrder> customerOrdersStream = ordersStream.join(customers, (orderId, order) -> order.getCustomerId(), (order, customer) -> new CustomerOrder(customer, order));
// Join the enriched customer order stream with the product global table. As this is global table
// we can use a non-key based join without needing to repartition the input stream
final KStream<Long, EnrichedOrder> enrichedOrdersStream = customerOrdersStream.join(products, (orderId, customerOrder) -> customerOrder.productId(), (customerOrder, product) -> new EnrichedOrder(product, customerOrder.customer, customerOrder.order));
// write the enriched order to the enriched-order topic
enrichedOrdersStream.to(ENRICHED_ORDER_TOPIC, Produced.with(Serdes.Long(), enrichedOrdersSerde));
return new KafkaStreams(builder.build(), new StreamsConfig(streamsConfiguration));
}
Aggregations