use of org.apache.avro.generic.GenericDatumWriter in project pinot by linkedin.
the class BaseClusterIntegrationTest method pushRandomAvroIntoKafka.
public static void pushRandomAvroIntoKafka(File avroFile, String kafkaBroker, String kafkaTopic, int rowCount, Random random) {
Properties properties = new Properties();
properties.put("metadata.broker.list", kafkaBroker);
properties.put("serializer.class", "kafka.serializer.DefaultEncoder");
properties.put("request.required.acks", "1");
ProducerConfig producerConfig = new ProducerConfig(properties);
Producer<String, byte[]> producer = new Producer<String, byte[]>(producerConfig);
try {
ByteArrayOutputStream outputStream = new ByteArrayOutputStream(65536);
DataFileStream<GenericRecord> reader = AvroUtils.getAvroReader(avroFile);
BinaryEncoder binaryEncoder = new EncoderFactory().directBinaryEncoder(outputStream, null);
Schema avroSchema = reader.getSchema();
GenericDatumWriter<GenericRecord> datumWriter = new GenericDatumWriter<GenericRecord>(avroSchema);
int recordCount = 0;
int rowsRemaining = rowCount;
int messagesInThisBatch = 0;
while (rowsRemaining > 0) {
int rowsInThisBatch = Math.min(rowsRemaining, MAX_MESSAGES_PER_BATCH);
List<KeyedMessage<String, byte[]>> messagesToWrite = new ArrayList<KeyedMessage<String, byte[]>>(rowsInThisBatch);
GenericRecord genericRecord = new GenericData.Record(avroSchema);
for (int i = 0; i < rowsInThisBatch; ++i) {
generateRandomRecord(genericRecord, avroSchema, random);
outputStream.reset();
datumWriter.write(genericRecord, binaryEncoder);
binaryEncoder.flush();
byte[] bytes = outputStream.toByteArray();
KeyedMessage<String, byte[]> data = new KeyedMessage<String, byte[]>(kafkaTopic, bytes);
if (BATCH_KAFKA_MESSAGES) {
messagesToWrite.add(data);
messagesInThisBatch++;
if (MAX_MESSAGES_PER_BATCH <= messagesInThisBatch) {
messagesInThisBatch = 0;
producer.send(messagesToWrite);
messagesToWrite.clear();
Uninterruptibles.sleepUninterruptibly(1, TimeUnit.SECONDS);
}
} else {
producer.send(data);
}
recordCount += 1;
}
if (BATCH_KAFKA_MESSAGES) {
producer.send(messagesToWrite);
}
// System.out.println("rowsRemaining = " + rowsRemaining);
rowsRemaining -= rowsInThisBatch;
}
outputStream.close();
reader.close();
LOGGER.info("Finished writing " + recordCount + " records from " + avroFile.getName() + " into Kafka topic " + kafkaTopic);
int totalRecordCount = totalAvroRecordWrittenCount.addAndGet(recordCount);
LOGGER.info("Total records written so far " + totalRecordCount);
} catch (Exception e) {
e.printStackTrace();
throw new RuntimeException(e);
}
}
use of org.apache.avro.generic.GenericDatumWriter in project databus by linkedin.
the class DummySuccessfulErrorCountingConsumer method createSampleSchema1Events.
static DbusEventInfo[] createSampleSchema1Events(int eventsNum) throws IOException {
Random rng = new Random();
DbusEventInfo[] result = new DbusEventInfo[eventsNum];
GenericDatumWriter<GenericRecord> writer = new GenericDatumWriter<GenericRecord>(SOURCE1_SCHEMA);
for (int i = 0; i < eventsNum; ++i) {
GenericRecord r = new GenericData.Record(SOURCE1_SCHEMA);
String s = RngUtils.randomString(rng.nextInt(100));
r.put("s", s);
ByteArrayOutputStream baos = new ByteArrayOutputStream(s.length() + 100);
BinaryEncoder out = new BinaryEncoder(baos);
try {
writer.write(r, out);
out.flush();
result[i] = new DbusEventInfo(DbusOpcode.UPSERT, 1, (short) 1, (short) 1, System.nanoTime(), (short) 1, SOURCE1_SCHEMAID, baos.toByteArray(), false, true);
result[i].setEventSerializationVersion(_eventFactory.getVersion());
} finally {
baos.close();
}
}
return result;
}
use of org.apache.avro.generic.GenericDatumWriter in project avro-kafka-storm by ransilberman.
the class MainTest method testGenericRecord.
@Test
public void testGenericRecord() throws IOException, InterruptedException {
Schema.Parser parser = new Schema.Parser();
Schema schema = parser.parse(getClass().getResourceAsStream("LPEvent.avsc"));
GenericRecord datum = new GenericData.Record(schema);
datum.put("revision", 1L);
datum.put("siteId", "28280110");
datum.put("eventType", "PLine");
datum.put("timeStamp", System.currentTimeMillis());
datum.put("sessionId", "123456II");
Map<String, Schema> unions = new HashMap<String, Schema>();
List<Schema> typeList = schema.getField("subrecord").schema().getTypes();
for (Schema sch : typeList) {
unions.put(sch.getName(), sch);
}
GenericRecord plineDatum = new GenericData.Record(unions.get("pline"));
plineDatum.put("text", "How can I help you?");
plineDatum.put("lineType", 1);
plineDatum.put("repId", "REPID12345");
datum.put("subrecord", plineDatum);
ByteArrayOutputStream out = new ByteArrayOutputStream();
DatumWriter<GenericRecord> writer = new GenericDatumWriter<GenericRecord>(schema);
Encoder encoder = EncoderFactory.get().binaryEncoder(out, null);
writer.write(datum, encoder);
encoder.flush();
out.close();
Message message = new Message(out.toByteArray());
Properties props = new Properties();
props.put("zk.connect", zkConnection);
Producer<Message, Message> producer = new kafka.javaapi.producer.Producer<Message, Message>(new ProducerConfig(props));
producer.send(new ProducerData<Message, Message>(topic, message));
}
use of org.apache.avro.generic.GenericDatumWriter in project avro-kafka-storm by ransilberman.
the class MainTest method testDataFile.
@Test
public void testDataFile() throws IOException {
File fileOut = new File("data.avro");
File fileIn = new File("data.avro");
Schema.Parser parser = new Schema.Parser();
Schema schema = parser.parse(getClass().getResourceAsStream("LPEvent.avsc"));
GenericRecord datum = new GenericData.Record(schema);
datum.put("revision", 1L);
datum.put("siteId", "28280110");
datum.put("eventType", "PLine");
datum.put("timeStamp", System.currentTimeMillis());
datum.put("sessionId", "123456II");
Map<String, Schema> unions = new HashMap<String, Schema>();
List<Schema> typeList = schema.getField("subrecord").schema().getTypes();
for (Schema sch : typeList) {
unions.put(sch.getName(), sch);
}
GenericRecord plineDatum = new GenericData.Record(unions.get("pline"));
plineDatum.put("text", "How can I help you?");
plineDatum.put("lineType", 1);
plineDatum.put("repId", "REPID12345");
datum.put("subrecord", plineDatum);
//write the file
DatumWriter<GenericRecord> writer = new GenericDatumWriter<GenericRecord>(schema);
DataFileWriter<GenericRecord> dataFileWriter = new DataFileWriter<GenericRecord>(writer);
dataFileWriter.create(schema, fileOut);
dataFileWriter.append(datum);
dataFileWriter.append(datum);
dataFileWriter.append(datum);
dataFileWriter.close();
//read the file
DatumReader<GenericRecord> reader = new GenericDatumReader<GenericRecord>();
DataFileReader<GenericRecord> dataFileReader = new DataFileReader<GenericRecord>(fileIn, reader);
assertThat("Scema is the same", schema, is(dataFileReader.getSchema()));
for (GenericRecord record : dataFileReader) {
assertThat(record.get("siteId").toString(), is("28280110"));
assertThat(record.get("eventType").toString(), is("PLine"));
}
}
use of org.apache.avro.generic.GenericDatumWriter in project core by s4.
the class AvroSerDeser method serialize.
public static byte[] serialize(Schema schema, GenericRecord content) throws IOException {
GenericDatumWriter<GenericRecord> serveWriter = new GenericDatumWriter<GenericRecord>(schema);
ByteArrayOutputStream out = new ByteArrayOutputStream();
serveWriter.write(content, new BinaryEncoder(out));
return out.toByteArray();
}
Aggregations