Search in sources :

Example 6 with HdfsDataInputStream

use of org.apache.hadoop.hdfs.client.HdfsDataInputStream in project hadoop by apache.

the class TestMaintenanceState method getFirstBlockReplicasDatanodeInfos.

private static DatanodeInfo[] getFirstBlockReplicasDatanodeInfos(FileSystem fileSys, Path name) throws IOException {
    // need a raw stream
    assertTrue("Not HDFS:" + fileSys.getUri(), fileSys instanceof DistributedFileSystem);
    HdfsDataInputStream dis = (HdfsDataInputStream) fileSys.open(name);
    Collection<LocatedBlock> dinfo = dis.getAllBlocks();
    if (dinfo.iterator().hasNext()) {
        // for the first block
        return dinfo.iterator().next().getLocations();
    } else {
        return null;
    }
}
Also used : LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) HdfsDataInputStream(org.apache.hadoop.hdfs.client.HdfsDataInputStream)

Example 7 with HdfsDataInputStream

use of org.apache.hadoop.hdfs.client.HdfsDataInputStream in project hadoop by apache.

the class TestMaintenanceState method checkFile.

/*
  * Verify that the number of replicas are as expected for each block in
  * the given file.
  *
  * @return - null if no failure found, else an error message string.
  */
static String checkFile(FSNamesystem ns, FileSystem fileSys, Path name, int repl, DatanodeInfo expectedExcludedNode, DatanodeInfo expectedMaintenanceNode) throws IOException {
    // need a raw stream
    assertTrue("Not HDFS:" + fileSys.getUri(), fileSys instanceof DistributedFileSystem);
    HdfsDataInputStream dis = (HdfsDataInputStream) fileSys.open(name);
    BlockManager bm = ns.getBlockManager();
    Collection<LocatedBlock> dinfo = dis.getAllBlocks();
    String output;
    for (LocatedBlock blk : dinfo) {
        // for each block
        DatanodeInfo[] nodes = blk.getLocations();
        for (int j = 0; j < nodes.length; j++) {
            // for each replica
            if (expectedExcludedNode != null && nodes[j].equals(expectedExcludedNode)) {
                //excluded node must not be in LocatedBlock.
                output = "For block " + blk.getBlock() + " replica on " + nodes[j] + " found in LocatedBlock.";
                LOG.info(output);
                return output;
            } else {
                if (nodes[j].isInMaintenance()) {
                    //IN_MAINTENANCE node must not be in LocatedBlock.
                    output = "For block " + blk.getBlock() + " replica on " + nodes[j] + " which is in maintenance state.";
                    LOG.info(output);
                    return output;
                }
            }
        }
        if (repl != nodes.length) {
            output = "Wrong number of replicas for block " + blk.getBlock() + ": expected " + repl + ", got " + nodes.length + " ,";
            for (int j = 0; j < nodes.length; j++) {
                // for each replica
                output += nodes[j] + ",";
            }
            output += "pending block # " + ns.getPendingReplicationBlocks() + " ,";
            output += "under replicated # " + ns.getUnderReplicatedBlocks() + " ,";
            if (expectedExcludedNode != null) {
                output += "excluded node " + expectedExcludedNode;
            }
            LOG.info(output);
            return output;
        }
        // Verify it has the expected maintenance node
        Iterator<DatanodeStorageInfo> storageInfoIter = bm.getStorages(blk.getBlock().getLocalBlock()).iterator();
        List<DatanodeInfo> maintenanceNodes = new ArrayList<>();
        while (storageInfoIter.hasNext()) {
            DatanodeInfo node = storageInfoIter.next().getDatanodeDescriptor();
            if (node.isMaintenance()) {
                maintenanceNodes.add(node);
            }
        }
        if (expectedMaintenanceNode != null) {
            if (!maintenanceNodes.contains(expectedMaintenanceNode)) {
                output = "No maintenance replica on " + expectedMaintenanceNode;
                LOG.info(output);
                return output;
            }
        } else {
            if (maintenanceNodes.size() != 0) {
                output = "Has maintenance replica(s)";
                LOG.info(output);
                return output;
            }
        }
    }
    return null;
}
Also used : DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) ArrayList(java.util.ArrayList) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) DatanodeStorageInfo(org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo) BlockManager(org.apache.hadoop.hdfs.server.blockmanagement.BlockManager) HdfsDataInputStream(org.apache.hadoop.hdfs.client.HdfsDataInputStream)

Example 8 with HdfsDataInputStream

use of org.apache.hadoop.hdfs.client.HdfsDataInputStream in project hadoop by apache.

the class TestReadWhileWriting method checkFile.

//check the file
static void checkFile(Path p, int expectedsize, final Configuration conf) throws IOException, InterruptedException {
    //open the file with another user account
    final String username = UserGroupInformation.getCurrentUser().getShortUserName() + "_" + ++userCount;
    UserGroupInformation ugi = UserGroupInformation.createUserForTesting(username, new String[] { "supergroup" });
    final FileSystem fs = DFSTestUtil.getFileSystemAs(ugi, conf);
    final HdfsDataInputStream in = (HdfsDataInputStream) fs.open(p);
    //Check visible length
    Assert.assertTrue(in.getVisibleLength() >= expectedsize);
    //Able to read?
    for (int i = 0; i < expectedsize; i++) {
        Assert.assertEquals((byte) i, (byte) in.read());
    }
    in.close();
}
Also used : FileSystem(org.apache.hadoop.fs.FileSystem) HdfsDataInputStream(org.apache.hadoop.hdfs.client.HdfsDataInputStream) UserGroupInformation(org.apache.hadoop.security.UserGroupInformation)

Example 9 with HdfsDataInputStream

use of org.apache.hadoop.hdfs.client.HdfsDataInputStream in project hadoop by apache.

the class TestShortCircuitLocalRead method checkUnsupportedMethod.

private boolean checkUnsupportedMethod(FileSystem fs, Path file, byte[] expected, int readOffset) throws IOException {
    HdfsDataInputStream stm = (HdfsDataInputStream) fs.open(file);
    ByteBuffer actual = ByteBuffer.allocateDirect(expected.length - readOffset);
    IOUtils.skipFully(stm, readOffset);
    try {
        stm.read(actual);
    } catch (UnsupportedOperationException unex) {
        return true;
    }
    return false;
}
Also used : ByteBuffer(java.nio.ByteBuffer) HdfsDataInputStream(org.apache.hadoop.hdfs.client.HdfsDataInputStream)

Example 10 with HdfsDataInputStream

use of org.apache.hadoop.hdfs.client.HdfsDataInputStream in project hadoop by apache.

the class DFSClient method createWrappedInputStream.

/**
   * Wraps the stream in a CryptoInputStream if the underlying file is
   * encrypted.
   */
public HdfsDataInputStream createWrappedInputStream(DFSInputStream dfsis) throws IOException {
    final FileEncryptionInfo feInfo = dfsis.getFileEncryptionInfo();
    if (feInfo != null) {
        // File is encrypted, wrap the stream in a crypto stream.
        // Currently only one version, so no special logic based on the version #
        getCryptoProtocolVersion(feInfo);
        final CryptoCodec codec = getCryptoCodec(conf, feInfo);
        final KeyVersion decrypted = decryptEncryptedDataEncryptionKey(feInfo);
        final CryptoInputStream cryptoIn = new CryptoInputStream(dfsis, codec, decrypted.getMaterial(), feInfo.getIV());
        return new HdfsDataInputStream(cryptoIn);
    } else {
        // No FileEncryptionInfo so no encryption.
        return new HdfsDataInputStream(dfsis);
    }
}
Also used : CryptoInputStream(org.apache.hadoop.crypto.CryptoInputStream) KeyVersion(org.apache.hadoop.crypto.key.KeyProvider.KeyVersion) EncryptedKeyVersion(org.apache.hadoop.crypto.key.KeyProviderCryptoExtension.EncryptedKeyVersion) CryptoCodec(org.apache.hadoop.crypto.CryptoCodec) FileEncryptionInfo(org.apache.hadoop.fs.FileEncryptionInfo) HdfsDataInputStream(org.apache.hadoop.hdfs.client.HdfsDataInputStream)

Aggregations

HdfsDataInputStream (org.apache.hadoop.hdfs.client.HdfsDataInputStream)20 Path (org.apache.hadoop.fs.Path)8 Test (org.junit.Test)7 ByteBuffer (java.nio.ByteBuffer)5 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)5 TimeoutException (java.util.concurrent.TimeoutException)4 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)4 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)4 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)4 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)4 IOException (java.io.IOException)3 ArrayList (java.util.ArrayList)2 Configuration (org.apache.hadoop.conf.Configuration)2 FSDataInputStream (org.apache.hadoop.fs.FSDataInputStream)2 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)2 FileSystem (org.apache.hadoop.fs.FileSystem)2 DefaultHttpResponse (io.netty.handler.codec.http.DefaultHttpResponse)1 HttpHeaders (io.netty.handler.codec.http.HttpHeaders)1 ChunkedStream (io.netty.handler.stream.ChunkedStream)1 EOFException (java.io.EOFException)1