use of org.apache.kafka.streams.StreamsConfig in project incubator-rya by apache.
the class KafkaRunQuery method run.
@Override
public void run(final UUID queryId) throws RyaStreamsException {
requireNonNull(queryId);
// Fetch the query from the repository. Throw an exception if it isn't present.
final Optional<StreamsQuery> query = queryRepo.get(queryId);
if (!query.isPresent()) {
throw new RyaStreamsException("Could not run the Query with ID " + queryId + " because no such query " + "is currently registered.");
}
// Build a processing topology using the SPARQL, provided statements topic, and provided results topic.
final String sparql = query.get().getSparql();
final TopologyBuilder topologyBuilder;
try {
topologyBuilder = topologyFactory.build(sparql, statementsTopic, resultsTopic, new RandomUUIDFactory());
} catch (final Exception e) {
throw new RyaStreamsException("Could not run the Query with ID " + queryId + " because a processing " + "topolgoy could not be built for the SPARQL " + sparql, e);
}
// Setup the Kafka Stream program.
final Properties streamsProps = new Properties();
streamsProps.setProperty(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, kafkaHostname + ":" + kafkaPort);
// Use the Query ID as the Application ID to ensure we resume where we left off the last time this command was run.
streamsProps.put(StreamsConfig.APPLICATION_ID_CONFIG, "KafkaRunQuery-" + queryId);
// Always start at the beginning of the input topic.
streamsProps.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
final KafkaStreams streams = new KafkaStreams(topologyBuilder, new StreamsConfig(streamsProps));
// If an unhandled exception is thrown, rethrow it.
streams.setUncaughtExceptionHandler((t, e) -> {
// Log the problem and kill the program.
log.error("Unhandled exception while processing the Rya Streams query. Shutting down.", e);
System.exit(1);
});
// Setup a shutdown hook that kills the streams program at shutdown.
final CountDownLatch awaitTermination = new CountDownLatch(1);
Runtime.getRuntime().addShutdownHook(new Thread() {
@Override
public void run() {
awaitTermination.countDown();
}
});
// Run the streams program and wait for termination.
streams.start();
try {
awaitTermination.await();
} catch (final InterruptedException e) {
log.warn("Interrupted while waiting for termination. Shutting down.");
}
streams.close();
}
use of org.apache.kafka.streams.StreamsConfig in project kafka-streams-examples by confluentinc.
the class GlobalKTablesExample method createStreams.
public static KafkaStreams createStreams(final String bootstrapServers, final String schemaRegistryUrl, final String stateDir) {
final Properties streamsConfiguration = new Properties();
// Give the Streams application a unique name. The name must be unique in the Kafka cluster
// against which the application is run.
streamsConfiguration.put(StreamsConfig.APPLICATION_ID_CONFIG, "global-tables-example");
streamsConfiguration.put(StreamsConfig.CLIENT_ID_CONFIG, "global-tables-example-client");
// Where to find Kafka broker(s).
streamsConfiguration.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers);
streamsConfiguration.put(StreamsConfig.STATE_DIR_CONFIG, stateDir);
// Set to earliest so we don't miss any data that arrived in the topics before the process
// started
streamsConfiguration.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
// create and configure the SpecificAvroSerdes required in this example
final SpecificAvroSerde<Order> orderSerde = new SpecificAvroSerde<>();
final Map<String, String> serdeConfig = Collections.singletonMap(AbstractKafkaAvroSerDeConfig.SCHEMA_REGISTRY_URL_CONFIG, schemaRegistryUrl);
orderSerde.configure(serdeConfig, false);
final SpecificAvroSerde<Customer> customerSerde = new SpecificAvroSerde<>();
customerSerde.configure(serdeConfig, false);
final SpecificAvroSerde<Product> productSerde = new SpecificAvroSerde<>();
productSerde.configure(serdeConfig, false);
final SpecificAvroSerde<EnrichedOrder> enrichedOrdersSerde = new SpecificAvroSerde<>();
enrichedOrdersSerde.configure(serdeConfig, false);
final StreamsBuilder builder = new StreamsBuilder();
// Get the stream of orders
final KStream<Long, Order> ordersStream = builder.stream(ORDER_TOPIC, Consumed.with(Serdes.Long(), orderSerde));
// Create a global table for customers. The data from this global table
// will be fully replicated on each instance of this application.
final GlobalKTable<Long, Customer> customers = builder.globalTable(CUSTOMER_TOPIC, Materialized.<Long, Customer, KeyValueStore<Bytes, byte[]>>as(CUSTOMER_STORE).withKeySerde(Serdes.Long()).withValueSerde(customerSerde));
// Create a global table for products. The data from this global table
// will be fully replicated on each instance of this application.
final GlobalKTable<Long, Product> products = builder.globalTable(PRODUCT_TOPIC, Materialized.<Long, Product, KeyValueStore<Bytes, byte[]>>as(PRODUCT_STORE).withKeySerde(Serdes.Long()).withValueSerde(productSerde));
// Join the orders stream to the customer global table. As this is global table
// we can use a non-key based join with out needing to repartition the input stream
final KStream<Long, CustomerOrder> customerOrdersStream = ordersStream.join(customers, (orderId, order) -> order.getCustomerId(), (order, customer) -> new CustomerOrder(customer, order));
// Join the enriched customer order stream with the product global table. As this is global table
// we can use a non-key based join without needing to repartition the input stream
final KStream<Long, EnrichedOrder> enrichedOrdersStream = customerOrdersStream.join(products, (orderId, customerOrder) -> customerOrder.productId(), (customerOrder, product) -> new EnrichedOrder(product, customerOrder.customer, customerOrder.order));
// write the enriched order to the enriched-order topic
enrichedOrdersStream.to(ENRICHED_ORDER_TOPIC, Produced.with(Serdes.Long(), enrichedOrdersSerde));
return new KafkaStreams(builder.build(), new StreamsConfig(streamsConfiguration));
}
use of org.apache.kafka.streams.StreamsConfig in project kafka by apache.
the class InternalStreamsBuilderTest method shouldNotMaterializeSourceKTableIfNotRequired.
@Test
public void shouldNotMaterializeSourceKTableIfNotRequired() {
final MaterializedInternal<String, String, KeyValueStore<Bytes, byte[]>> materializedInternal = new MaterializedInternal<>(Materialized.with(null, null), builder, storePrefix);
final KTable<String, String> table1 = builder.table("topic2", consumed, materializedInternal);
builder.buildAndOptimizeTopology();
final ProcessorTopology topology = builder.internalTopologyBuilder.rewriteTopology(new StreamsConfig(StreamsTestUtils.getStreamsConfig(APP_ID))).buildTopology();
assertEquals(0, topology.stateStores().size());
assertEquals(0, topology.storeToChangelogTopic().size());
assertNull(table1.queryableStoreName());
}
use of org.apache.kafka.streams.StreamsConfig in project kafka by apache.
the class InternalStreamsBuilderTest method shouldHaveNullTimestampExtractorWhenNoneSupplied.
@Test
public void shouldHaveNullTimestampExtractorWhenNoneSupplied() {
builder.stream(Collections.singleton("topic"), consumed);
builder.buildAndOptimizeTopology();
builder.internalTopologyBuilder.rewriteTopology(new StreamsConfig(StreamsTestUtils.getStreamsConfig(APP_ID)));
final ProcessorTopology processorTopology = builder.internalTopologyBuilder.buildTopology();
assertNull(processorTopology.source("topic").getTimestampExtractor());
}
use of org.apache.kafka.streams.StreamsConfig in project kafka by apache.
the class MeteredTimestampedWindowStoreTest method setUp.
@Before
public void setUp() {
final StreamsMetricsImpl streamsMetrics = new StreamsMetricsImpl(metrics, "test", StreamsConfig.METRICS_LATEST, new MockTime());
context = new InternalMockProcessorContext<>(TestUtils.tempDirectory(), Serdes.String(), Serdes.Long(), streamsMetrics, new StreamsConfig(StreamsTestUtils.getStreamsConfig()), MockRecordCollector::new, new ThreadCache(new LogContext("testCache "), 0, streamsMetrics), Time.SYSTEM, taskId);
}
Aggregations