use of org.apache.hadoop.io.DataInputBuffer in project hadoop by apache.
the class TestContainerManagerSecurity method testContainerTokenWithEpoch.
/**
* This tests whether a containerId is serialized/deserialized with epoch.
*
* @throws IOException
* @throws InterruptedException
* @throws YarnException
*/
private void testContainerTokenWithEpoch(Configuration conf) throws IOException, InterruptedException, YarnException {
LOG.info("Running test for serializing/deserializing containerIds");
NMTokenSecretManagerInRM nmTokenSecretManagerInRM = yarnCluster.getResourceManager().getRMContext().getNMTokenSecretManager();
ApplicationId appId = ApplicationId.newInstance(1, 1);
ApplicationAttemptId appAttemptId = ApplicationAttemptId.newInstance(appId, 0);
ContainerId cId = ContainerId.newContainerId(appAttemptId, (5L << 40) | 3L);
NodeManager nm = yarnCluster.getNodeManager(0);
NMTokenSecretManagerInNM nmTokenSecretManagerInNM = nm.getNMContext().getNMTokenSecretManager();
String user = "test";
waitForNMToReceiveNMTokenKey(nmTokenSecretManagerInNM, nm);
NodeId nodeId = nm.getNMContext().getNodeId();
// Both id should be equal.
Assert.assertEquals(nmTokenSecretManagerInNM.getCurrentKey().getKeyId(), nmTokenSecretManagerInRM.getCurrentKey().getKeyId());
// Creating a normal Container Token
RMContainerTokenSecretManager containerTokenSecretManager = yarnCluster.getResourceManager().getRMContext().getContainerTokenSecretManager();
Resource r = Resource.newInstance(1230, 2);
Token containerToken = containerTokenSecretManager.createContainerToken(cId, 0, nodeId, user, r, Priority.newInstance(0), 0);
ContainerTokenIdentifier containerTokenIdentifier = new ContainerTokenIdentifier();
byte[] tokenIdentifierContent = containerToken.getIdentifier().array();
DataInputBuffer dib = new DataInputBuffer();
dib.reset(tokenIdentifierContent, tokenIdentifierContent.length);
containerTokenIdentifier.readFields(dib);
Assert.assertEquals(cId, containerTokenIdentifier.getContainerID());
Assert.assertEquals(cId.toString(), containerTokenIdentifier.getContainerID().toString());
Token nmToken = nmTokenSecretManagerInRM.createNMToken(appAttemptId, nodeId, user);
YarnRPC rpc = YarnRPC.create(conf);
testStartContainer(rpc, appAttemptId, nodeId, containerToken, nmToken, false);
List<ContainerId> containerIds = new LinkedList<ContainerId>();
containerIds.add(cId);
ContainerManagementProtocol proxy = getContainerManagementProtocolProxy(rpc, nmToken, nodeId, user);
GetContainerStatusesResponse res = proxy.getContainerStatuses(GetContainerStatusesRequest.newInstance(containerIds));
Assert.assertNotNull(res.getContainerStatuses().get(0));
Assert.assertEquals(cId, res.getContainerStatuses().get(0).getContainerId());
Assert.assertEquals(cId.toString(), res.getContainerStatuses().get(0).getContainerId().toString());
}
use of org.apache.hadoop.io.DataInputBuffer in project hadoop by apache.
the class TestMRSequenceFileAsBinaryInputFormat method testBinary.
@Test
public void testBinary() throws IOException, InterruptedException {
Job job = Job.getInstance();
FileSystem fs = FileSystem.getLocal(job.getConfiguration());
Path dir = new Path(System.getProperty("test.build.data", ".") + "/mapred");
Path file = new Path(dir, "testbinary.seq");
Random r = new Random();
long seed = r.nextLong();
r.setSeed(seed);
fs.delete(dir, true);
FileInputFormat.setInputPaths(job, dir);
Text tkey = new Text();
Text tval = new Text();
SequenceFile.Writer writer = new SequenceFile.Writer(fs, job.getConfiguration(), file, Text.class, Text.class);
try {
for (int i = 0; i < RECORDS; ++i) {
tkey.set(Integer.toString(r.nextInt(), 36));
tval.set(Long.toString(r.nextLong(), 36));
writer.append(tkey, tval);
}
} finally {
writer.close();
}
TaskAttemptContext context = MapReduceTestUtil.createDummyMapTaskAttemptContext(job.getConfiguration());
InputFormat<BytesWritable, BytesWritable> bformat = new SequenceFileAsBinaryInputFormat();
int count = 0;
r.setSeed(seed);
BytesWritable bkey = new BytesWritable();
BytesWritable bval = new BytesWritable();
Text cmpkey = new Text();
Text cmpval = new Text();
DataInputBuffer buf = new DataInputBuffer();
FileInputFormat.setInputPaths(job, file);
for (InputSplit split : bformat.getSplits(job)) {
RecordReader<BytesWritable, BytesWritable> reader = bformat.createRecordReader(split, context);
MapContext<BytesWritable, BytesWritable, BytesWritable, BytesWritable> mcontext = new MapContextImpl<BytesWritable, BytesWritable, BytesWritable, BytesWritable>(job.getConfiguration(), context.getTaskAttemptID(), reader, null, null, MapReduceTestUtil.createDummyReporter(), split);
reader.initialize(split, mcontext);
try {
while (reader.nextKeyValue()) {
bkey = reader.getCurrentKey();
bval = reader.getCurrentValue();
tkey.set(Integer.toString(r.nextInt(), 36));
tval.set(Long.toString(r.nextLong(), 36));
buf.reset(bkey.getBytes(), bkey.getLength());
cmpkey.readFields(buf);
buf.reset(bval.getBytes(), bval.getLength());
cmpval.readFields(buf);
assertTrue("Keys don't match: " + "*" + cmpkey.toString() + ":" + tkey.toString() + "*", cmpkey.toString().equals(tkey.toString()));
assertTrue("Vals don't match: " + "*" + cmpval.toString() + ":" + tval.toString() + "*", cmpval.toString().equals(tval.toString()));
++count;
}
} finally {
reader.close();
}
}
assertEquals("Some records not found", RECORDS, count);
}
use of org.apache.hadoop.io.DataInputBuffer in project hadoop by apache.
the class TestBufferPushPull method testPull.
@Test
public void testPull() throws Exception {
final byte[] buff = new byte[BUFFER_LENGTH];
final InputBuffer input = new InputBuffer(buff);
final OutputBuffer out = new OutputBuffer(buff);
final Class<BytesWritable> iKClass = BytesWritable.class;
final Class<BytesWritable> iVClass = BytesWritable.class;
final NativeHandlerForPull handler = new NativeHandlerForPull(input, out);
final KeyValueIterator iter = new KeyValueIterator();
final BufferPullee pullee = new BufferPullee(iKClass, iVClass, iter, handler);
handler.setDataLoader(pullee);
final BufferPuller puller = new BufferPuller(handler);
handler.setDataReceiver(puller);
int count = 0;
while (puller.next()) {
final DataInputBuffer key = puller.getKey();
final DataInputBuffer value = puller.getValue();
final BytesWritable keyBytes = new BytesWritable();
final BytesWritable valueBytes = new BytesWritable();
keyBytes.readFields(key);
valueBytes.readFields(value);
Assert.assertEquals(dataInput[count].key.toString(), keyBytes.toString());
Assert.assertEquals(dataInput[count].value.toString(), valueBytes.toString());
count++;
}
puller.close();
pullee.close();
}
use of org.apache.hadoop.io.DataInputBuffer in project hadoop by apache.
the class TestMerger method getKeyAnswer.
private Answer<?> getKeyAnswer(final String segmentName, final boolean isCompressedInput) {
return new Answer<Object>() {
int i = 0;
@SuppressWarnings("unchecked")
public Boolean answer(InvocationOnMock invocation) {
if (i++ == 3) {
return false;
}
Reader<Text, Text> mock = (Reader<Text, Text>) invocation.getMock();
int multiplier = isCompressedInput ? 100 : 1;
mock.bytesRead += 10 * multiplier;
Object[] args = invocation.getArguments();
DataInputBuffer key = (DataInputBuffer) args[0];
key.reset(("Segment Key " + segmentName + i).getBytes(), 20);
return true;
}
};
}
use of org.apache.hadoop.io.DataInputBuffer in project hadoop by apache.
the class TestIFileStreams method testIFileStream.
@Test
public void testIFileStream() throws Exception {
final int DLEN = 100;
DataOutputBuffer dob = new DataOutputBuffer(DLEN + 4);
IFileOutputStream ifos = new IFileOutputStream(dob);
for (int i = 0; i < DLEN; ++i) {
ifos.write(i);
}
ifos.close();
DataInputBuffer dib = new DataInputBuffer();
dib.reset(dob.getData(), DLEN + 4);
IFileInputStream ifis = new IFileInputStream(dib, 104, new Configuration());
for (int i = 0; i < DLEN; ++i) {
assertEquals(i, ifis.read());
}
ifis.close();
}
Aggregations