use of org.apache.hadoop.hdfs.client.HdfsDataInputStream in project hadoop by apache.
the class TestMaintenanceState method getFirstBlockReplicasDatanodeInfos.
private static DatanodeInfo[] getFirstBlockReplicasDatanodeInfos(FileSystem fileSys, Path name) throws IOException {
// need a raw stream
assertTrue("Not HDFS:" + fileSys.getUri(), fileSys instanceof DistributedFileSystem);
HdfsDataInputStream dis = (HdfsDataInputStream) fileSys.open(name);
Collection<LocatedBlock> dinfo = dis.getAllBlocks();
if (dinfo.iterator().hasNext()) {
// for the first block
return dinfo.iterator().next().getLocations();
} else {
return null;
}
}
use of org.apache.hadoop.hdfs.client.HdfsDataInputStream in project hadoop by apache.
the class TestMaintenanceState method checkFile.
/*
* Verify that the number of replicas are as expected for each block in
* the given file.
*
* @return - null if no failure found, else an error message string.
*/
static String checkFile(FSNamesystem ns, FileSystem fileSys, Path name, int repl, DatanodeInfo expectedExcludedNode, DatanodeInfo expectedMaintenanceNode) throws IOException {
// need a raw stream
assertTrue("Not HDFS:" + fileSys.getUri(), fileSys instanceof DistributedFileSystem);
HdfsDataInputStream dis = (HdfsDataInputStream) fileSys.open(name);
BlockManager bm = ns.getBlockManager();
Collection<LocatedBlock> dinfo = dis.getAllBlocks();
String output;
for (LocatedBlock blk : dinfo) {
// for each block
DatanodeInfo[] nodes = blk.getLocations();
for (int j = 0; j < nodes.length; j++) {
// for each replica
if (expectedExcludedNode != null && nodes[j].equals(expectedExcludedNode)) {
//excluded node must not be in LocatedBlock.
output = "For block " + blk.getBlock() + " replica on " + nodes[j] + " found in LocatedBlock.";
LOG.info(output);
return output;
} else {
if (nodes[j].isInMaintenance()) {
//IN_MAINTENANCE node must not be in LocatedBlock.
output = "For block " + blk.getBlock() + " replica on " + nodes[j] + " which is in maintenance state.";
LOG.info(output);
return output;
}
}
}
if (repl != nodes.length) {
output = "Wrong number of replicas for block " + blk.getBlock() + ": expected " + repl + ", got " + nodes.length + " ,";
for (int j = 0; j < nodes.length; j++) {
// for each replica
output += nodes[j] + ",";
}
output += "pending block # " + ns.getPendingReplicationBlocks() + " ,";
output += "under replicated # " + ns.getUnderReplicatedBlocks() + " ,";
if (expectedExcludedNode != null) {
output += "excluded node " + expectedExcludedNode;
}
LOG.info(output);
return output;
}
// Verify it has the expected maintenance node
Iterator<DatanodeStorageInfo> storageInfoIter = bm.getStorages(blk.getBlock().getLocalBlock()).iterator();
List<DatanodeInfo> maintenanceNodes = new ArrayList<>();
while (storageInfoIter.hasNext()) {
DatanodeInfo node = storageInfoIter.next().getDatanodeDescriptor();
if (node.isMaintenance()) {
maintenanceNodes.add(node);
}
}
if (expectedMaintenanceNode != null) {
if (!maintenanceNodes.contains(expectedMaintenanceNode)) {
output = "No maintenance replica on " + expectedMaintenanceNode;
LOG.info(output);
return output;
}
} else {
if (maintenanceNodes.size() != 0) {
output = "Has maintenance replica(s)";
LOG.info(output);
return output;
}
}
}
return null;
}
use of org.apache.hadoop.hdfs.client.HdfsDataInputStream in project hadoop by apache.
the class TestReadWhileWriting method checkFile.
//check the file
static void checkFile(Path p, int expectedsize, final Configuration conf) throws IOException, InterruptedException {
//open the file with another user account
final String username = UserGroupInformation.getCurrentUser().getShortUserName() + "_" + ++userCount;
UserGroupInformation ugi = UserGroupInformation.createUserForTesting(username, new String[] { "supergroup" });
final FileSystem fs = DFSTestUtil.getFileSystemAs(ugi, conf);
final HdfsDataInputStream in = (HdfsDataInputStream) fs.open(p);
//Check visible length
Assert.assertTrue(in.getVisibleLength() >= expectedsize);
//Able to read?
for (int i = 0; i < expectedsize; i++) {
Assert.assertEquals((byte) i, (byte) in.read());
}
in.close();
}
use of org.apache.hadoop.hdfs.client.HdfsDataInputStream in project hadoop by apache.
the class TestShortCircuitLocalRead method checkUnsupportedMethod.
private boolean checkUnsupportedMethod(FileSystem fs, Path file, byte[] expected, int readOffset) throws IOException {
HdfsDataInputStream stm = (HdfsDataInputStream) fs.open(file);
ByteBuffer actual = ByteBuffer.allocateDirect(expected.length - readOffset);
IOUtils.skipFully(stm, readOffset);
try {
stm.read(actual);
} catch (UnsupportedOperationException unex) {
return true;
}
return false;
}
use of org.apache.hadoop.hdfs.client.HdfsDataInputStream in project hadoop by apache.
the class DFSClient method createWrappedInputStream.
/**
* Wraps the stream in a CryptoInputStream if the underlying file is
* encrypted.
*/
public HdfsDataInputStream createWrappedInputStream(DFSInputStream dfsis) throws IOException {
final FileEncryptionInfo feInfo = dfsis.getFileEncryptionInfo();
if (feInfo != null) {
// File is encrypted, wrap the stream in a crypto stream.
// Currently only one version, so no special logic based on the version #
getCryptoProtocolVersion(feInfo);
final CryptoCodec codec = getCryptoCodec(conf, feInfo);
final KeyVersion decrypted = decryptEncryptedDataEncryptionKey(feInfo);
final CryptoInputStream cryptoIn = new CryptoInputStream(dfsis, codec, decrypted.getMaterial(), feInfo.getIV());
return new HdfsDataInputStream(cryptoIn);
} else {
// No FileEncryptionInfo so no encryption.
return new HdfsDataInputStream(dfsis);
}
}
Aggregations