use of org.apache.hadoop.io.LongWritable in project hadoop by apache.
the class TestJoinTupleWritable method testWritable.
@Test
public void testWritable() throws Exception {
Random r = new Random();
Writable[] writs = { new BooleanWritable(r.nextBoolean()), new FloatWritable(r.nextFloat()), new FloatWritable(r.nextFloat()), new IntWritable(r.nextInt()), new LongWritable(r.nextLong()), new BytesWritable("dingo".getBytes()), new LongWritable(r.nextLong()), new IntWritable(r.nextInt()), new BytesWritable("yak".getBytes()), new IntWritable(r.nextInt()) };
TupleWritable sTuple = makeTuple(writs);
ByteArrayOutputStream out = new ByteArrayOutputStream();
sTuple.write(new DataOutputStream(out));
ByteArrayInputStream in = new ByteArrayInputStream(out.toByteArray());
TupleWritable dTuple = new TupleWritable();
dTuple.readFields(new DataInputStream(in));
assertTrue("Failed to write/read tuple", sTuple.equals(dTuple));
}
use of org.apache.hadoop.io.LongWritable in project hadoop by apache.
the class TestFixedLengthInputFormat method testZeroRecordLength.
/**
* Test with record length set to 0
*/
@Test(timeout = 5000)
public void testZeroRecordLength() throws Exception {
localFs.delete(workDir, true);
Path file = new Path(workDir, new String("testFormat.txt"));
createFile(file, null, 10, 10);
Job job = Job.getInstance(defaultConf);
// Set the fixed length record length config property
FixedLengthInputFormat format = new FixedLengthInputFormat();
format.setRecordLength(job.getConfiguration(), 0);
FileInputFormat.setInputPaths(job, workDir);
List<InputSplit> splits = format.getSplits(job);
boolean exceptionThrown = false;
for (InputSplit split : splits) {
try {
TaskAttemptContext context = MapReduceTestUtil.createDummyMapTaskAttemptContext(job.getConfiguration());
RecordReader<LongWritable, BytesWritable> reader = format.createRecordReader(split, context);
MapContext<LongWritable, BytesWritable, LongWritable, BytesWritable> mcontext = new MapContextImpl<LongWritable, BytesWritable, LongWritable, BytesWritable>(job.getConfiguration(), context.getTaskAttemptID(), reader, null, null, MapReduceTestUtil.createDummyReporter(), split);
reader.initialize(split, mcontext);
} catch (IOException ioe) {
exceptionThrown = true;
LOG.info("Exception message:" + ioe.getMessage());
}
}
assertTrue("Exception for zero record length:", exceptionThrown);
}
use of org.apache.hadoop.io.LongWritable in project hadoop by apache.
the class TestNLineInputFormat method checkFormat.
void checkFormat(Job job, int expectedN, int lastN) throws IOException, InterruptedException {
NLineInputFormat format = new NLineInputFormat();
List<InputSplit> splits = format.getSplits(job);
int count = 0;
for (int i = 0; i < splits.size(); i++) {
assertEquals("There are no split locations", 0, splits.get(i).getLocations().length);
TaskAttemptContext context = MapReduceTestUtil.createDummyMapTaskAttemptContext(job.getConfiguration());
RecordReader<LongWritable, Text> reader = format.createRecordReader(splits.get(i), context);
Class<?> clazz = reader.getClass();
assertEquals("reader class is LineRecordReader.", LineRecordReader.class, clazz);
MapContext<LongWritable, Text, LongWritable, Text> mcontext = new MapContextImpl<LongWritable, Text, LongWritable, Text>(job.getConfiguration(), context.getTaskAttemptID(), reader, null, null, MapReduceTestUtil.createDummyReporter(), splits.get(i));
reader.initialize(splits.get(i), mcontext);
try {
count = 0;
while (reader.nextKeyValue()) {
count++;
}
} finally {
reader.close();
}
if (i == splits.size() - 1) {
assertEquals("number of lines in split(" + i + ") is wrong", lastN, count);
} else {
assertEquals("number of lines in split(" + i + ") is wrong", expectedN, count);
}
}
}
use of org.apache.hadoop.io.LongWritable in project hadoop by apache.
the class DBRecordReader method nextKeyValue.
/** {@inheritDoc} */
public boolean nextKeyValue() throws IOException {
try {
if (key == null) {
key = new LongWritable();
}
if (value == null) {
value = createValue();
}
if (null == this.results) {
// First time into this method, run the query.
this.results = executeQuery(getSelectQuery());
}
if (!results.next())
return false;
// Set the key field value as the output key value
key.set(pos + split.getStart());
value.readFields(results);
pos++;
} catch (SQLException e) {
throw new IOException("SQLException in nextKeyValue", e);
}
return true;
}
use of org.apache.hadoop.io.LongWritable in project hadoop by apache.
the class TestFileOutputCommitter method writeMapFileOutput.
private void writeMapFileOutput(RecordWriter theRecordWriter, TaskAttemptContext context) throws IOException, InterruptedException {
try {
int key = 0;
for (int i = 0; i < 10; ++i) {
key = i;
Text val = (i % 2 == 1) ? val1 : val2;
theRecordWriter.write(new LongWritable(key), val);
}
} finally {
theRecordWriter.close(context);
}
}
Aggregations