use of org.apache.hadoop.io.Writable in project goldenorb by jzachr.
the class MessageThread method testSingleVertex.
/**
* Tests mapping 2 messages manually to a single Vertex.
*
* @throws Exception
*/
@Test
public void testSingleVertex() throws Exception {
InboundMessageQueue imqTest = new InboundMessageQueue();
Message<Text> msg1 = new Message<Text>(Text.class);
msg1.setDestinationVertex("Test Vertex");
msg1.setMessageValue(new Text("Test Message"));
imqTest.addMessage(msg1);
Message<Text> msg2 = new Message<Text>(Text.class);
msg2.setDestinationVertex("Test Vertex");
msg2.setMessageValue(new Text("testtesttest"));
imqTest.addMessage(msg2);
List<Message<? extends Writable>> list = imqTest.getMessage("Test Vertex");
assertTrue(list.get(0) == msg1);
assertTrue(list.get(1) == msg2);
}
use of org.apache.hadoop.io.Writable in project goldenorb by jzachr.
the class MessageThread method testInboundMessageQueue.
/**
* Tests mapping many messages to many Vertices using threads.
*
* @throws Exception
*/
@Test
public void testInboundMessageQueue() throws Exception {
int numOfThreads = 100;
int numOfMessages = 10000;
InboundMessageQueue imq = new InboundMessageQueue();
CountDownLatch startLatch = new CountDownLatch(1);
CountDownLatch everyoneDoneLatch = new CountDownLatch(numOfThreads);
// create new MessageThreads that add the passed message to the inbound message queue
for (int i = 0; i < numOfThreads; i++) {
Messages msgs = new Messages(TextMessage.class);
for (int p = 0; p < numOfMessages; p++) {
TextMessage txtmsg = new TextMessage(Integer.toString(i), new Text("test message " + Integer.toString(p)));
msgs.add(txtmsg);
}
MessageThread mThread = new MessageThread(msgs, imq, startLatch, everyoneDoneLatch);
mThread.start();
}
// start the threads simultaneously
startLatch.countDown();
// wait until all threads are done
everyoneDoneLatch.await();
Iterator<String> iter = imq.getVerticesWithMessages().iterator();
int count = 0;
while (iter.hasNext()) {
iter.next();
count++;
}
// check a random Vertex
int randomVertex = (int) (Math.random() * (numOfThreads));
Iterator<Message<? extends Writable>> iter2 = imq.getMessage(Integer.toString(randomVertex)).iterator();
int count2 = 0;
while (iter2.hasNext()) {
iter2.next();
count2++;
}
assertTrue(count == numOfThreads);
assertTrue(count2 == numOfMessages);
assertThat(imq.getMessage(Integer.toString(randomVertex)), notNullValue());
}
use of org.apache.hadoop.io.Writable in project mavuno by metzlerd.
the class Tuple method readFields.
/**
* Deserializes the Tuple.
*
* @param in
* source for raw byte representation
*/
public void readFields(DataInput in) throws IOException {
int numFields = in.readInt();
mObjects = new Object[numFields];
mSymbols = new String[numFields];
mFields = new String[numFields];
mTypes = new Class[numFields];
for (int i = 0; i < numFields; i++) {
mFields[i] = in.readUTF();
}
for (int i = 0; i < numFields; i++) {
byte type = in.readByte();
if (type == SYMBOL) {
String className = in.readUTF();
try {
mTypes[i] = Class.forName(className);
} catch (Exception e) {
e.printStackTrace();
}
mObjects[i] = null;
mSymbols[i] = in.readUTF();
} else if (type == INT) {
mTypes[i] = Integer.class;
mObjects[i] = in.readInt();
} else if (type == BOOLEAN) {
mTypes[i] = Boolean.class;
mObjects[i] = in.readBoolean();
} else if (type == LONG) {
mTypes[i] = Long.class;
mObjects[i] = in.readLong();
} else if (type == FLOAT) {
mTypes[i] = Float.class;
mObjects[i] = in.readFloat();
} else if (type == DOUBLE) {
mTypes[i] = Double.class;
mObjects[i] = in.readDouble();
} else if (type == STRING) {
mTypes[i] = String.class;
mObjects[i] = in.readUTF();
} else {
try {
String className = in.readUTF();
mTypes[i] = Class.forName(className);
int sz = in.readInt();
byte[] bytes = new byte[sz];
in.readFully(bytes);
Writable obj = (Writable) mTypes[i].newInstance();
obj.readFields(new DataInputStream(new ByteArrayInputStream(bytes)));
mObjects[i] = obj;
} catch (Exception e) {
e.printStackTrace();
}
}
}
}
use of org.apache.hadoop.io.Writable in project akela by mozilla-metrics.
the class MultiScanTableMapReduceUtil method convertStringToScanArray.
/**
* Converts base64 scans string back into a Scan array
* @param base64
* @return
* @throws IOException
*/
public static Scan[] convertStringToScanArray(final String base64) throws IOException {
final DataInputStream dis = new DataInputStream(new ByteArrayInputStream(Base64.decode(base64)));
ArrayWritable aw = new ArrayWritable(Scan.class);
aw.readFields(dis);
Writable[] writables = aw.get();
Scan[] scans = new Scan[writables.length];
for (int i = 0; i < writables.length; i++) {
scans[i] = (Scan) writables[i];
}
return scans;
}
use of org.apache.hadoop.io.Writable in project presto by prestodb.
the class AbstractTestHiveFileFormats method createTestFile.
public static FileSplit createTestFile(String filePath, HiveStorageFormat storageFormat, HiveCompressionCodec compressionCodec, List<TestColumn> testColumns, int numRows) throws Exception {
HiveOutputFormat<?, ?> outputFormat = newInstance(storageFormat.getOutputFormat(), HiveOutputFormat.class);
@SuppressWarnings("deprecation") SerDe serDe = newInstance(storageFormat.getSerDe(), SerDe.class);
// filter out partition keys, which are not written to the file
testColumns = ImmutableList.copyOf(filter(testColumns, not(TestColumn::isPartitionKey)));
Properties tableProperties = new Properties();
tableProperties.setProperty("columns", Joiner.on(',').join(transform(testColumns, TestColumn::getName)));
tableProperties.setProperty("columns.types", Joiner.on(',').join(transform(testColumns, TestColumn::getType)));
serDe.initialize(new Configuration(), tableProperties);
JobConf jobConf = new JobConf();
configureCompression(jobConf, compressionCodec);
RecordWriter recordWriter = outputFormat.getHiveRecordWriter(jobConf, new Path(filePath), Text.class, compressionCodec != HiveCompressionCodec.NONE, tableProperties, () -> {
});
try {
serDe.initialize(new Configuration(), tableProperties);
SettableStructObjectInspector objectInspector = getStandardStructObjectInspector(ImmutableList.copyOf(transform(testColumns, TestColumn::getName)), ImmutableList.copyOf(transform(testColumns, TestColumn::getObjectInspector)));
Object row = objectInspector.create();
List<StructField> fields = ImmutableList.copyOf(objectInspector.getAllStructFieldRefs());
for (int rowNumber = 0; rowNumber < numRows; rowNumber++) {
for (int i = 0; i < testColumns.size(); i++) {
Object writeValue = testColumns.get(i).getWriteValue();
if (writeValue instanceof Slice) {
writeValue = ((Slice) writeValue).getBytes();
}
objectInspector.setStructFieldData(row, fields.get(i), writeValue);
}
Writable record = serDe.serialize(row, objectInspector);
recordWriter.write(record);
}
} finally {
recordWriter.close(false);
}
// todo to test with compression, the file must be renamed with the compression extension
Path path = new Path(filePath);
path.getFileSystem(new Configuration()).setVerifyChecksum(true);
File file = new File(filePath);
return new FileSplit(path, 0, file.length(), new String[0]);
}
Aggregations