use of org.apache.hadoop.io.DataInputBuffer in project hadoop by apache.
the class TestMerger method readOnDiskMapOutput.
private void readOnDiskMapOutput(Configuration conf, FileSystem fs, Path path, List<String> keys, List<String> values) throws IOException {
FSDataInputStream in = CryptoUtils.wrapIfNecessary(conf, fs.open(path));
IFile.Reader<Text, Text> reader = new IFile.Reader<Text, Text>(conf, in, fs.getFileStatus(path).getLen(), null, null);
DataInputBuffer keyBuff = new DataInputBuffer();
DataInputBuffer valueBuff = new DataInputBuffer();
Text key = new Text();
Text value = new Text();
while (reader.nextRawKey(keyBuff)) {
key.readFields(keyBuff);
keys.add(key.toString());
reader.nextRawValue(valueBuff);
value.readFields(valueBuff);
values.add(value.toString());
}
}
use of org.apache.hadoop.io.DataInputBuffer in project hadoop by apache.
the class TestIFileStreams method testBadLength.
@Test
public void testBadLength() throws Exception {
final int DLEN = 100;
DataOutputBuffer dob = new DataOutputBuffer(DLEN + 4);
IFileOutputStream ifos = new IFileOutputStream(dob);
for (int i = 0; i < DLEN; ++i) {
ifos.write(i);
}
ifos.close();
DataInputBuffer dib = new DataInputBuffer();
dib.reset(dob.getData(), DLEN + 4);
IFileInputStream ifis = new IFileInputStream(dib, 100, new Configuration());
int i = 0;
try {
while (i < DLEN - 8) {
assertEquals(i++, ifis.read());
}
ifis.close();
} catch (ChecksumException e) {
assertEquals("Checksum before close", i, DLEN - 8);
return;
}
fail("Did not detect bad data in checksum");
}
use of org.apache.hadoop.io.DataInputBuffer in project hadoop by apache.
the class TestIFileStreams method testBadIFileStream.
@Test
public void testBadIFileStream() throws Exception {
final int DLEN = 100;
DataOutputBuffer dob = new DataOutputBuffer(DLEN + 4);
IFileOutputStream ifos = new IFileOutputStream(dob);
for (int i = 0; i < DLEN; ++i) {
ifos.write(i);
}
ifos.close();
DataInputBuffer dib = new DataInputBuffer();
final byte[] b = dob.getData();
++b[17];
dib.reset(b, DLEN + 4);
IFileInputStream ifis = new IFileInputStream(dib, 104, new Configuration());
int i = 0;
try {
while (i < DLEN) {
if (17 == i) {
assertEquals(18, ifis.read());
} else {
assertEquals(i, ifis.read());
}
++i;
}
ifis.close();
} catch (ChecksumException e) {
assertEquals("Unexpected bad checksum", DLEN - 1, i);
return;
}
fail("Did not detect bad data in checksum");
}
use of org.apache.hadoop.io.DataInputBuffer in project hadoop by apache.
the class TestCounters method testResetOnDeserialize.
@Test
public void testResetOnDeserialize() throws IOException {
// Allow only one counterGroup
Configuration conf = new Configuration();
conf.setInt(MRJobConfig.COUNTER_GROUPS_MAX_KEY, 1);
Limits.init(conf);
Counters countersWithOneGroup = new Counters();
countersWithOneGroup.findCounter("firstOf1Allowed", "First group");
boolean caughtExpectedException = false;
try {
countersWithOneGroup.findCounter("secondIsTooMany", "Second group");
} catch (LimitExceededException _) {
caughtExpectedException = true;
}
assertTrue("Did not throw expected exception", caughtExpectedException);
Counters countersWithZeroGroups = new Counters();
DataOutputBuffer out = new DataOutputBuffer();
countersWithZeroGroups.write(out);
DataInputBuffer in = new DataInputBuffer();
in.reset(out.getData(), out.getLength());
countersWithOneGroup.readFields(in);
// After reset one should be able to add a group
countersWithOneGroup.findCounter("firstGroupAfterReset", "After reset " + "limit should be set back to zero");
}
use of org.apache.hadoop.io.DataInputBuffer in project hadoop by apache.
the class TestDelegationToken method testDelegationToken.
@SuppressWarnings("deprecation")
@Test
public void testDelegationToken() throws Exception {
final JobClient client;
client = user1.doAs(new PrivilegedExceptionAction<JobClient>() {
@Override
public JobClient run() throws Exception {
return new JobClient(cluster.createJobConf());
}
});
final JobClient bobClient;
bobClient = user2.doAs(new PrivilegedExceptionAction<JobClient>() {
@Override
public JobClient run() throws Exception {
return new JobClient(cluster.createJobConf());
}
});
final Token<DelegationTokenIdentifier> token = client.getDelegationToken(new Text(user1.getUserName()));
DataInputBuffer inBuf = new DataInputBuffer();
byte[] bytes = token.getIdentifier();
inBuf.reset(bytes, bytes.length);
DelegationTokenIdentifier ident = new DelegationTokenIdentifier();
ident.readFields(inBuf);
assertEquals("alice", ident.getUser().getUserName());
long createTime = ident.getIssueDate();
long maxTime = ident.getMaxDate();
long currentTime = System.currentTimeMillis();
System.out.println("create time: " + createTime);
System.out.println("current time: " + currentTime);
System.out.println("max time: " + maxTime);
assertTrue("createTime < current", createTime < currentTime);
assertTrue("current < maxTime", currentTime < maxTime);
// renew should work as user alice
user1.doAs(new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
client.renewDelegationToken(token);
client.renewDelegationToken(token);
return null;
}
});
// bob should fail to renew
user2.doAs(new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
try {
bobClient.renewDelegationToken(token);
Assert.fail("bob renew");
} catch (AccessControlException ace) {
// PASS
}
return null;
}
});
// bob should fail to cancel
user2.doAs(new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
try {
bobClient.cancelDelegationToken(token);
Assert.fail("bob cancel");
} catch (AccessControlException ace) {
// PASS
}
return null;
}
});
// alice should be able to cancel but only cancel once
user1.doAs(new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
client.cancelDelegationToken(token);
try {
client.cancelDelegationToken(token);
Assert.fail("second alice cancel");
} catch (InvalidToken it) {
// PASS
}
return null;
}
});
}
Aggregations