use of org.apache.kafka.common.serialization.StringDeserializer in project druid by druid-io.
the class KafkaLookupExtractorFactory method getConsumer.
// Overridden in tests
Consumer<String, String> getConsumer() {
// Workaround for Kafka String Serializer could not be found
// Adopted from org.apache.druid.indexing.kafka.KafkaRecordSupplier#getKafkaConsumer
ClassLoader currCtxCl = Thread.currentThread().getContextClassLoader();
final Properties properties = getConsumerProperties();
try {
Thread.currentThread().setContextClassLoader(getClass().getClassLoader());
return new KafkaConsumer<>(properties, new StringDeserializer(), new StringDeserializer());
} finally {
Thread.currentThread().setContextClassLoader(currCtxCl);
}
}
use of org.apache.kafka.common.serialization.StringDeserializer in project wikidata-query-rdf by wikimedia.
the class KafkaStreamConsumer method build.
public static KafkaStreamConsumer build(String brokers, String topic, int partition, String consumerId, int maxBatchLength, RDFChunkDeserializer deser, @Nullable BiConsumer<Consumer<String, MutationEventData>, TopicPartition> offsetReset, KafkaStreamConsumerMetricsListener metrics, int bufferedInputMessages, Predicate<MutationEventData> filter) {
Map<String, Object> props = new HashMap<>();
props.put("bootstrap.servers", brokers);
props.put("group.id", consumerId);
props.put("max.poll.interval.ms", "600000");
props.put("enable.auto.commit", "false");
props.put("isolation.level", "read_committed");
props.put("max.poll.records", bufferedInputMessages);
if (offsetReset == null) {
props.put("auto.offset.reset", "earliest");
} else {
props.put("auto.offset.reset", "none");
}
// 10 very large messages (120k)
props.put("max.partition.fetch.bytes", 10 * 120 * 1024);
KafkaConsumer<String, MutationEventData> consumer = new KafkaConsumer<>(props, new StringDeserializer(), new JsonDeserializer<>(singletonMap(topic, MutationEventData.class)));
TopicPartition topicPartition = new TopicPartition(topic, partition);
consumer.assign(singleton(new TopicPartition(topic, partition)));
try {
// Fetching position will fail if no offsets are positioned yet for this consumerId.
// This pattern only works because we know that we have a single consumer per blazegraph host.
// If it was a group of consumers like it's usually the case this strategy would make no sense.
consumer.position(topicPartition);
} catch (InvalidOffsetException ioe) {
if (offsetReset == null) {
throw new IllegalStateException("Failed to find earliest offsets for [" + topicPartition + "]", ioe);
}
offsetReset.accept(consumer, topicPartition);
}
return new KafkaStreamConsumer(consumer, topicPartition, deser, maxBatchLength, metrics, filter);
}
use of org.apache.kafka.common.serialization.StringDeserializer in project kafka by apache.
the class KafkaConsumerTest method newConsumer.
private KafkaConsumer<String, String> newConsumer(Time time, KafkaClient client, Metadata metadata, PartitionAssignor assignor, int rebalanceTimeoutMs, int sessionTimeoutMs, int heartbeatIntervalMs, boolean autoCommitEnabled, int autoCommitIntervalMs) {
// create a consumer with mocked time and mocked network
String clientId = "mock-consumer";
String groupId = "mock-group";
String metricGroupPrefix = "consumer";
long retryBackoffMs = 100;
long requestTimeoutMs = 30000;
boolean excludeInternalTopics = true;
int minBytes = 1;
int maxBytes = Integer.MAX_VALUE;
int maxWaitMs = 500;
int fetchSize = 1024 * 1024;
int maxPollRecords = Integer.MAX_VALUE;
boolean checkCrcs = true;
Deserializer<String> keyDeserializer = new StringDeserializer();
Deserializer<String> valueDeserializer = new StringDeserializer();
OffsetResetStrategy autoResetStrategy = OffsetResetStrategy.EARLIEST;
List<PartitionAssignor> assignors = Arrays.asList(assignor);
ConsumerInterceptors<String, String> interceptors = null;
Metrics metrics = new Metrics();
SubscriptionState subscriptions = new SubscriptionState(autoResetStrategy);
ConsumerNetworkClient consumerClient = new ConsumerNetworkClient(client, metadata, time, retryBackoffMs, requestTimeoutMs);
ConsumerCoordinator consumerCoordinator = new ConsumerCoordinator(consumerClient, groupId, rebalanceTimeoutMs, sessionTimeoutMs, heartbeatIntervalMs, assignors, metadata, subscriptions, metrics, metricGroupPrefix, time, retryBackoffMs, autoCommitEnabled, autoCommitIntervalMs, interceptors, excludeInternalTopics);
Fetcher<String, String> fetcher = new Fetcher<>(consumerClient, minBytes, maxBytes, maxWaitMs, fetchSize, maxPollRecords, checkCrcs, keyDeserializer, valueDeserializer, metadata, subscriptions, metrics, metricGroupPrefix, time, retryBackoffMs);
return new KafkaConsumer<>(clientId, consumerCoordinator, keyDeserializer, valueDeserializer, fetcher, interceptors, time, consumerClient, metrics, subscriptions, metadata, retryBackoffMs, requestTimeoutMs);
}
use of org.apache.kafka.common.serialization.StringDeserializer in project incubator-rya by apache.
the class TopologyFactory method build.
@Override
public TopologyBuilder build(final String sparqlQuery, final String statementsTopic, final String resultsTopic, final BNodeIdFactory bNodeIdFactory) throws MalformedQueryException, TopologyBuilderException {
requireNonNull(sparqlQuery);
requireNonNull(statementsTopic);
requireNonNull(resultsTopic);
final ParsedQuery parsedQuery = new SPARQLParser().parseQuery(sparqlQuery, null);
final TopologyBuilder builder = new TopologyBuilder();
final TupleExpr expr = parsedQuery.getTupleExpr();
final QueryVisitor visitor = new QueryVisitor(bNodeIdFactory);
expr.visit(visitor);
processorEntryList = visitor.getProcessorEntryList();
final Map<TupleExpr, String> idMap = visitor.getIDs();
// add source node
builder.addSource(SOURCE, new StringDeserializer(), new VisibilityStatementDeserializer(), statementsTopic);
// processing the processor entry list in reverse order means we go from leaf
// nodes -> parent nodes.
// So, when the parent processing nodes get added, the upstream
// processing node will already exist.
ProcessorEntry entry = null;
for (int ii = processorEntryList.size() - 1; ii >= 0; ii--) {
entry = processorEntryList.get(ii);
// statement patterns need to be connected to the Source.
if (entry.getNode() instanceof StatementPattern) {
builder.addProcessor(entry.getID(), entry.getSupplier(), SOURCE);
} else {
final List<TupleExpr> parents = entry.getUpstreamNodes();
final String[] parentIDs = new String[parents.size()];
for (int id = 0; id < parents.size(); id++) {
parentIDs[id] = idMap.get(parents.get(id));
}
builder.addProcessor(entry.getID(), entry.getSupplier(), parentIDs);
}
// Add a state store for any node type that requires one.
if (entry.getNode() instanceof Join || entry.getNode() instanceof LeftJoin || entry.getNode() instanceof Group) {
// Add a state store for the join processor.
final StateStoreSupplier joinStoreSupplier = Stores.create(entry.getID()).withStringKeys().withValues(new VisibilityBindingSetSerde()).persistent().build();
builder.addStateStore(joinStoreSupplier, entry.getID());
}
}
// Add a formatter that converts the ProcessorResults into the output format.
final SinkEntry<?, ?> sinkEntry = visitor.getSinkEntry();
builder.addProcessor("OUTPUT_FORMATTER", sinkEntry.getFormatterSupplier(), entry.getID());
// Add the sink.
builder.addSink(SINK, resultsTopic, sinkEntry.getKeySerializer(), sinkEntry.getValueSerializer(), "OUTPUT_FORMATTER");
return builder;
}
use of org.apache.kafka.common.serialization.StringDeserializer in project incubator-rya by apache.
the class PeriodicCommandNotificationConsumerIT method kafkaNotificationMillisProviderTest.
@Test
public void kafkaNotificationMillisProviderTest() throws InterruptedException {
BasicConfigurator.configure();
final BlockingQueue<TimestampedNotification> notifications = new LinkedBlockingQueue<>();
final Properties props = createKafkaConfig();
final KafkaProducer<String, CommandNotification> producer = new KafkaProducer<>(props);
final String topic = rule.getKafkaTopicName();
rule.createTopic(topic);
registration = new KafkaNotificationRegistrationClient(topic, producer);
coord = new PeriodicNotificationCoordinatorExecutor(1, notifications);
provider = new KafkaNotificationProvider(topic, new StringDeserializer(), new CommandNotificationSerializer(), props, coord, 1);
provider.start();
registration.addNotification("1", 1000, 0, TimeUnit.MILLISECONDS);
Thread.sleep(4000);
// check that notifications are being added to the blocking queue
Assert.assertEquals(true, notifications.size() > 0);
registration.deleteNotification("1");
Thread.sleep(2000);
final int size = notifications.size();
// sleep for 2 seconds to ensure no more messages being produced
Thread.sleep(2000);
Assert.assertEquals(size, notifications.size());
tearDown();
}
Aggregations