use of org.apache.kafka.clients.producer.ProducerRecord in project apache-kafka-on-k8s by banzaicloud.
the class RecordCollectorTest method shouldNotThrowStreamsExceptionOnFlushIfASendFailedWithContinueExceptionHandler.
@SuppressWarnings("unchecked")
@Test
public void shouldNotThrowStreamsExceptionOnFlushIfASendFailedWithContinueExceptionHandler() {
final RecordCollector collector = new RecordCollectorImpl(new MockProducer(cluster, true, new DefaultPartitioner(), byteArraySerializer, byteArraySerializer) {
@Override
public synchronized Future<RecordMetadata> send(final ProducerRecord record, final Callback callback) {
callback.onCompletion(null, new Exception());
return null;
}
}, "test", logContext, new AlwaysContinueProductionExceptionHandler());
collector.send("topic1", "3", "0", null, stringSerializer, stringSerializer, streamPartitioner);
collector.flush();
}
use of org.apache.kafka.clients.producer.ProducerRecord in project incubator-rya by apache.
the class KafkaLoadStatements method fromFile.
@Override
public void fromFile(final Path statementsPath, final String visibilities) throws RyaStreamsException {
requireNonNull(statementsPath);
requireNonNull(visibilities);
if (!statementsPath.toFile().exists()) {
throw new RyaStreamsException("Could not load statements at path '" + statementsPath + "' because that " + "does not exist. Make sure you've entered the correct path.");
}
// Create an RDF Parser whose format is derived from the statementPath's file extension.
final RDFFormat format = RDFFormat.forFileName(statementsPath.getFileName().toString());
final RDFParser parser = Rio.createParser(format);
// Set a handler that writes the statements to the specified kafka topic.
parser.setRDFHandler(new RDFHandlerBase() {
@Override
public void startRDF() throws RDFHandlerException {
log.trace("Starting loading statements.");
}
@Override
public void handleStatement(final Statement stmnt) throws RDFHandlerException {
final VisibilityStatement visiStatement = new VisibilityStatement(stmnt, visibilities);
producer.send(new ProducerRecord<>(topic, visiStatement));
}
@Override
public void endRDF() throws RDFHandlerException {
producer.flush();
log.trace("Done.");
}
});
// Do the parse and load.
try {
parser.parse(Files.newInputStream(statementsPath), "");
} catch (RDFParseException | RDFHandlerException | IOException e) {
throw new RyaStreamsException("Could not load the RDF file's Statements into Rya Streams.", e);
}
}
use of org.apache.kafka.clients.producer.ProducerRecord in project incubator-rya by apache.
the class KafkaRyaSubGraphExporter method export.
/**
* Exports the RyaSubGraph to a Kafka topic equivalent to the result returned by {@link RyaSubGraph#getId()}
* @param subgraph - RyaSubGraph exported to Kafka
* @param contructID - rowID of result that is exported. Used for logging purposes.
*/
@Override
public void export(final String constructID, final RyaSubGraph subGraph) throws ResultExportException {
checkNotNull(constructID);
checkNotNull(subGraph);
try {
// Send the result to the topic whose name matches the PCJ ID.
final ProducerRecord<String, RyaSubGraph> rec = new ProducerRecord<>(subGraph.getId(), subGraph);
final Future<RecordMetadata> future = producer.send(rec);
// Don't let the export return until the result has been written to the topic. Otherwise we may lose results.
future.get();
log.debug("Producer successfully sent record with id: {} and statements: {}", constructID, subGraph.getStatements());
} catch (final Throwable e) {
throw new ResultExportException("A result could not be exported to Kafka.", e);
}
}
use of org.apache.kafka.clients.producer.ProducerRecord in project auratrainingproject by liuqinghua666.
the class JavaKafkaEventProducer method main.
public static void main(String[] args) throws Exception {
String dataPath = "D:\\bigdata\\source\\auratrainingproject\\spark\\data\\IJCAI17_dataset";
String topic = KafkaRedisConfig.KAFKA_USER_PAY_TOPIC;
Properties props = getConfig();
Producer<String, String> producer = new KafkaProducer<String, String>(props);
// 准备文件路径
if (args.length > 0) {
dataPath = args[0];
}
String fileName = JavaSQLAliPayAnalyzer.getOSPath(dataPath + "/user_pay.txt");
// 使用RateLimiter做流量控制
int maxRatePerSecond = 10;
RateLimiter limiter = RateLimiter.create(maxRatePerSecond);
;
File file = new File(fileName);
BufferedReader reader = null;
try {
System.out.println("以行为单位读取文件内容,一次读一整行:");
reader = new BufferedReader(new FileReader(file));
String tempString = null;
int line = 1;
// 一次读入一行,直到读入null为文件结束
while ((tempString = reader.readLine()) != null) {
// 显示行号
// System.out.println("line[" + line + "]=" + tempString);
// 准备数据
String[] row = tempString.split(",");
if (row.length >= 3) {
// 每10ms产生1个消息
limiter.acquire();
// user_id
String key = "" + row[0];
// shop_id+”,”+time_stamp
String value = "" + row[1] + "," + row[2];
// 推送数据
producer.send(new ProducerRecord(topic, key, value));
System.out.println("Message[" + line + "] sent: " + key + "=>" + value);
producer.send(new ProducerRecord(topic, key, value));
line++;
// Thread.sleep(10);
}
}
reader.close();
} catch (IOException e) {
e.printStackTrace();
} finally {
if (reader != null) {
try {
reader.close();
} catch (IOException e1) {
}
}
}
}
use of org.apache.kafka.clients.producer.ProducerRecord in project kafka-streams-examples by confluentinc.
the class TopArticlesExampleDriver method produceInputs.
private static void produceInputs(String bootstrapServers, String schemaRegistryUrl) throws IOException {
final String[] users = { "erica", "bob", "joe", "damian", "tania", "phil", "sam", "lauren", "joseph" };
final String[] industries = { "engineering", "telco", "finance", "health", "science" };
final String[] pages = { "index.html", "news.html", "contact.html", "about.html", "stuff.html" };
final Properties props = new Properties();
props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers);
props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, io.confluent.kafka.serializers.KafkaAvroSerializer.class);
props.put(AbstractKafkaAvroSerDeConfig.SCHEMA_REGISTRY_URL_CONFIG, schemaRegistryUrl);
final KafkaProducer<String, GenericRecord> producer = new KafkaProducer<>(props);
final GenericRecordBuilder pageViewBuilder = new GenericRecordBuilder(loadSchema("pageview.avsc"));
final Random random = new Random();
for (String user : users) {
pageViewBuilder.set("industry", industries[random.nextInt(industries.length)]);
pageViewBuilder.set("flags", "ARTICLE");
// For each user generate some page views
IntStream.range(0, random.nextInt(10)).mapToObj(value -> {
pageViewBuilder.set("user", user);
pageViewBuilder.set("page", pages[random.nextInt(pages.length)]);
return pageViewBuilder.build();
}).forEach(record -> producer.send(new ProducerRecord<>(TopArticlesLambdaExample.PAGE_VIEWS, null, record)));
}
producer.flush();
}
Aggregations