use of kafka.javaapi.producer.Producer in project pinot by linkedin.
the class BaseClusterIntegrationTest method pushAvroIntoKafka.
public static void pushAvroIntoKafka(List<File> avroFiles, String kafkaBroker, String kafkaTopic, final byte[] header) {
Properties properties = new Properties();
properties.put("metadata.broker.list", kafkaBroker);
properties.put("serializer.class", "kafka.serializer.DefaultEncoder");
properties.put("request.required.acks", "1");
ProducerConfig producerConfig = new ProducerConfig(properties);
Producer<byte[], byte[]> producer = new Producer<byte[], byte[]>(producerConfig);
for (File avroFile : avroFiles) {
try {
ByteArrayOutputStream outputStream = new ByteArrayOutputStream(65536);
DataFileStream<GenericRecord> reader = AvroUtils.getAvroReader(avroFile);
BinaryEncoder binaryEncoder = new EncoderFactory().directBinaryEncoder(outputStream, null);
GenericDatumWriter<GenericRecord> datumWriter = new GenericDatumWriter<GenericRecord>(reader.getSchema());
int recordCount = 0;
List<KeyedMessage<byte[], byte[]>> messagesToWrite = new ArrayList<KeyedMessage<byte[], byte[]>>(10000);
int messagesInThisBatch = 0;
for (GenericRecord genericRecord : reader) {
outputStream.reset();
if (header != null && 0 < header.length) {
outputStream.write(header);
}
datumWriter.write(genericRecord, binaryEncoder);
binaryEncoder.flush();
byte[] bytes = outputStream.toByteArray();
KeyedMessage<byte[], byte[]> data = new KeyedMessage<byte[], byte[]>(kafkaTopic, Longs.toByteArray(System.currentTimeMillis()), bytes);
if (BATCH_KAFKA_MESSAGES) {
messagesToWrite.add(data);
messagesInThisBatch++;
if (MAX_MESSAGES_PER_BATCH <= messagesInThisBatch) {
LOGGER.debug("Sending a batch of {} records to Kafka", messagesInThisBatch);
messagesInThisBatch = 0;
producer.send(messagesToWrite);
messagesToWrite.clear();
}
} else {
producer.send(data);
}
recordCount += 1;
}
if (BATCH_KAFKA_MESSAGES) {
LOGGER.info("Sending last match of {} records to Kafka", messagesToWrite.size());
producer.send(messagesToWrite);
}
outputStream.close();
reader.close();
LOGGER.info("Finished writing " + recordCount + " records from " + avroFile.getName() + " into Kafka topic " + kafkaTopic + " from file " + avroFile.getName());
int totalRecordCount = totalAvroRecordWrittenCount.addAndGet(recordCount);
LOGGER.info("Total records written so far " + totalRecordCount);
} catch (Exception e) {
e.printStackTrace();
throw new RuntimeException(e);
}
}
}
use of kafka.javaapi.producer.Producer in project druid by druid-io.
the class TestKafkaExtractionCluster method testSimpleRename.
@Test(timeout = 60_000L)
public void testSimpleRename() throws InterruptedException {
final Properties kafkaProducerProperties = makeProducerProperties();
final Producer<byte[], byte[]> producer = new Producer<>(new ProducerConfig(kafkaProducerProperties));
closer.register(new Closeable() {
@Override
public void close() throws IOException {
producer.close();
}
});
checkServer();
assertUpdated(null, "foo");
assertReverseUpdated(ImmutableList.<String>of(), "foo");
long events = factory.getCompletedEventCount();
log.info("------------------------- Sending foo bar -------------------------------");
producer.send(new KeyedMessage<>(topicName, StringUtils.toUtf8("foo"), StringUtils.toUtf8("bar")));
long start = System.currentTimeMillis();
while (events == factory.getCompletedEventCount()) {
Thread.sleep(100);
if (System.currentTimeMillis() > start + 60_000) {
throw new ISE("Took too long to update event");
}
}
log.info("------------------------- Checking foo bar -------------------------------");
assertUpdated("bar", "foo");
assertReverseUpdated(Collections.singletonList("foo"), "bar");
assertUpdated(null, "baz");
checkServer();
events = factory.getCompletedEventCount();
log.info("------------------------- Sending baz bat -------------------------------");
producer.send(new KeyedMessage<>(topicName, StringUtils.toUtf8("baz"), StringUtils.toUtf8("bat")));
while (events == factory.getCompletedEventCount()) {
Thread.sleep(10);
if (System.currentTimeMillis() > start + 60_000) {
throw new ISE("Took too long to update event");
}
}
log.info("------------------------- Checking baz bat -------------------------------");
Assert.assertEquals("bat", factory.get().apply("baz"));
Assert.assertEquals(Collections.singletonList("baz"), factory.get().unapply("bat"));
}
use of kafka.javaapi.producer.Producer in project avro-kafka-storm by ransilberman.
the class MainTest method testCompiledDatumRecord.
@Test
public void testCompiledDatumRecord() throws IOException, InterruptedException {
Schema.Parser parser = new Schema.Parser();
Schema schema = parser.parse(getClass().getResourceAsStream("LPEvent.avsc"));
LPEvent datum = new LPEvent();
datum.setRevision(1L);
datum.setSiteId("28280110");
datum.setEventType("PLine");
datum.setTimeStamp(System.currentTimeMillis());
datum.setSessionId("123456II");
pline plineDatum = new pline();
plineDatum.setText("Hello, I am your agent");
plineDatum.setLineType(2);
plineDatum.setRepId("REPID7777");
datum.setSubrecord(plineDatum);
ByteArrayOutputStream out = new ByteArrayOutputStream();
DatumWriter<LPEvent> writer = new SpecificDatumWriter<LPEvent>(LPEvent.class);
Encoder encoder = EncoderFactory.get().binaryEncoder(out, null);
writer.write(datum, encoder);
encoder.flush();
out.close();
Message message = new Message(out.toByteArray());
Properties props = new Properties();
props.put("zk.connect", zkConnection);
Producer<Message, Message> producer = new kafka.javaapi.producer.Producer<Message, Message>(new ProducerConfig(props));
producer.send(new ProducerData<Message, Message>(topic, message));
}
use of kafka.javaapi.producer.Producer in project jdepth by Crab2died.
the class Kafka method main.
public static void main(String... args) {
String brokerList = "xjtz234:9091,xjtz234:9092,xjtz234:9093";
Properties props = new Properties();
props.put("metadata.broker.list", brokerList);
/*
* 0表示不等待结果返回<br/>
* 1表示等待至少有一个服务器返回数据接收标识<br/>
* -1表示必须接收到所有的服务器返回标识,及同步写入<br/>
*/
props.put("request.required.acks", "1");
/*
* 内部发送数据是异步还是同步
* sync:同步, 默认
* async:异步
*/
props.put("producer.type", "async");
/*
* 设置序列化的类
* 可选:kafka.serializer.StringEncoder
* 默认:kafka.serializer.DefaultEncoder
*/
props.put("serializer.class", "kafka.serializer.StringEncoder");
/*
* 设置分区类
* 根据key进行数据分区
* 默认是:kafka.producer.DefaultPartitioner ==> 安装key的hash进行分区
* 可选:kafka.serializer.ByteArrayPartitioner ==> 转换为字节数组后进行hash分区
*/
// props.put("partitioner.class", "com.github.jms.kafka.KafkaProducerPartitioner");
// 重试次数
props.put("message.send.max.retries", "3");
// 异步提交的时候(async),并发提交的记录数
props.put("batch.num.messages", "200");
// 设置缓冲区大小,默认10KB
props.put("send.buffer.bytes", "102400");
// 2. 构建Kafka Producer Configuration上下文
ProducerConfig config = new ProducerConfig(props);
// 3. 构建Producer对象
final Producer<String, String> producer = new Producer<>(config);
int numThreads = 10;
ExecutorService pool = Executors.newFixedThreadPool(numThreads);
CountDownLatch latch = new CountDownLatch(5);
for (int i = 0; i < 5; i++) {
int finalI = i;
pool.submit(new Thread(() -> {
// 发送数据
KeyedMessage message = new KeyedMessage("topic1", "key1" + finalI, "v1");
producer.send(message);
System.out.println("发送数据:" + message);
latch.countDown();
}, "Thread-" + i));
}
try {
latch.await();
} catch (InterruptedException e) {
e.printStackTrace();
}
producer.close();
}
use of kafka.javaapi.producer.Producer in project druid by druid-io.
the class TestKafkaExtractionCluster method setUp.
@Before
public void setUp() throws Exception {
zkTestServer = new TestingServer(-1, temporaryFolder.newFolder(), true);
zkTestServer.start();
closer.register(new Closeable() {
@Override
public void close() throws IOException {
zkTestServer.stop();
}
});
zkClient = new ZkClient(zkTestServer.getConnectString(), 10000, 10000, ZKStringSerializer$.MODULE$);
closer.register(new Closeable() {
@Override
public void close() throws IOException {
zkClient.close();
}
});
if (!zkClient.exists("/kafka")) {
zkClient.create("/kafka", null, CreateMode.PERSISTENT);
}
log.info("---------------------------Started ZK---------------------------");
final String zkKafkaPath = "/kafka";
final Properties serverProperties = new Properties();
serverProperties.putAll(kafkaProperties);
serverProperties.put("broker.id", "0");
serverProperties.put("log.dir", temporaryFolder.newFolder().getAbsolutePath());
serverProperties.put("log.cleaner.enable", "true");
serverProperties.put("host.name", "127.0.0.1");
serverProperties.put("zookeeper.connect", zkTestServer.getConnectString() + zkKafkaPath);
serverProperties.put("zookeeper.session.timeout.ms", "10000");
serverProperties.put("zookeeper.sync.time.ms", "200");
kafkaConfig = new KafkaConfig(serverProperties);
final long time = DateTime.parse("2015-01-01").getMillis();
kafkaServer = new KafkaServer(kafkaConfig, new Time() {
@Override
public long milliseconds() {
return time;
}
@Override
public long nanoseconds() {
return milliseconds() * 1_000_000;
}
@Override
public void sleep(long ms) {
try {
Thread.sleep(ms);
} catch (InterruptedException e) {
throw Throwables.propagate(e);
}
}
});
kafkaServer.startup();
closer.register(new Closeable() {
@Override
public void close() throws IOException {
kafkaServer.shutdown();
kafkaServer.awaitShutdown();
}
});
int sleepCount = 0;
while (!kafkaServer.kafkaController().isActive()) {
Thread.sleep(100);
if (++sleepCount > 10) {
throw new InterruptedException("Controller took to long to awaken");
}
}
log.info("---------------------------Started Kafka Server---------------------------");
final ZkClient zkClient = new ZkClient(zkTestServer.getConnectString() + zkKafkaPath, 10000, 10000, ZKStringSerializer$.MODULE$);
try (final AutoCloseable autoCloseable = new AutoCloseable() {
@Override
public void close() throws Exception {
if (zkClient.exists(zkKafkaPath)) {
try {
zkClient.deleteRecursive(zkKafkaPath);
} catch (org.I0Itec.zkclient.exception.ZkException ex) {
log.warn(ex, "error deleting %s zk node", zkKafkaPath);
}
}
zkClient.close();
}
}) {
final Properties topicProperties = new Properties();
topicProperties.put("cleanup.policy", "compact");
if (!AdminUtils.topicExists(zkClient, topicName)) {
AdminUtils.createTopic(zkClient, topicName, 1, 1, topicProperties);
}
log.info("---------------------------Created topic---------------------------");
Assert.assertTrue(AdminUtils.topicExists(zkClient, topicName));
}
final Properties kafkaProducerProperties = makeProducerProperties();
final Producer<byte[], byte[]> producer = new Producer<>(new ProducerConfig(kafkaProducerProperties));
try (final AutoCloseable autoCloseable = new AutoCloseable() {
@Override
public void close() throws Exception {
producer.close();
}
}) {
producer.send(new KeyedMessage<>(topicName, StringUtils.toUtf8("abcdefg"), StringUtils.toUtf8("abcdefg")));
}
System.setProperty("druid.extensions.searchCurrentClassloader", "false");
injector = Initialization.makeInjectorWithModules(GuiceInjectors.makeStartupInjector(), ImmutableList.of(new Module() {
@Override
public void configure(Binder binder) {
binder.bindConstant().annotatedWith(Names.named("serviceName")).to("test");
binder.bindConstant().annotatedWith(Names.named("servicePort")).to(0);
}
}, // These injections fail under IntelliJ but are required for maven
new NamespaceExtractionModule(), new KafkaExtractionNamespaceModule()));
mapper = injector.getInstance(ObjectMapper.class);
log.info("--------------------------- placed default item via producer ---------------------------");
final Map<String, String> consumerProperties = new HashMap<>(kafkaProperties);
consumerProperties.put("zookeeper.connect", zkTestServer.getConnectString() + zkKafkaPath);
consumerProperties.put("zookeeper.session.timeout.ms", "10000");
consumerProperties.put("zookeeper.sync.time.ms", "200");
final KafkaLookupExtractorFactory kafkaLookupExtractorFactory = new KafkaLookupExtractorFactory(null, topicName, consumerProperties);
factory = (KafkaLookupExtractorFactory) mapper.readValue(mapper.writeValueAsString(kafkaLookupExtractorFactory), LookupExtractorFactory.class);
Assert.assertEquals(kafkaLookupExtractorFactory.getKafkaTopic(), factory.getKafkaTopic());
Assert.assertEquals(kafkaLookupExtractorFactory.getKafkaProperties(), factory.getKafkaProperties());
factory.start();
closer.register(new Closeable() {
@Override
public void close() throws IOException {
factory.close();
}
});
log.info("--------------------------- started rename manager ---------------------------");
}
Aggregations