Search in sources :

Example 1 with DFSClient

use of org.apache.hadoop.hdfs.DFSClient in project hbase by apache.

the class FanOutOneBlockAsyncDFSOutputSaslHelper method createTransparentCryptoHelper.

private static TransparentCryptoHelper createTransparentCryptoHelper() throws NoSuchMethodException {
    Method decryptEncryptedDataEncryptionKeyMethod = DFSClient.class.getDeclaredMethod("decryptEncryptedDataEncryptionKey", FileEncryptionInfo.class);
    decryptEncryptedDataEncryptionKeyMethod.setAccessible(true);
    return new TransparentCryptoHelper() {

        @Override
        public Encryptor createEncryptor(Configuration conf, FileEncryptionInfo feInfo, DFSClient client) throws IOException {
            try {
                KeyVersion decryptedKey = (KeyVersion) decryptEncryptedDataEncryptionKeyMethod.invoke(client, feInfo);
                CryptoCodec cryptoCodec = CryptoCodec.getInstance(conf, feInfo.getCipherSuite());
                Encryptor encryptor = cryptoCodec.createEncryptor();
                encryptor.init(decryptedKey.getMaterial(), feInfo.getIV());
                return encryptor;
            } catch (InvocationTargetException e) {
                Throwables.propagateIfPossible(e.getTargetException(), IOException.class);
                throw new RuntimeException(e.getTargetException());
            } catch (GeneralSecurityException e) {
                throw new IOException(e);
            } catch (IllegalAccessException e) {
                throw new RuntimeException(e);
            }
        }
    };
}
Also used : DFSClient(org.apache.hadoop.hdfs.DFSClient) Configuration(org.apache.hadoop.conf.Configuration) KeyVersion(org.apache.hadoop.crypto.key.KeyProvider.KeyVersion) GeneralSecurityException(java.security.GeneralSecurityException) Encryptor(org.apache.hadoop.crypto.Encryptor) Method(java.lang.reflect.Method) IOException(java.io.IOException) FileEncryptionInfo(org.apache.hadoop.fs.FileEncryptionInfo) InvocationTargetException(java.lang.reflect.InvocationTargetException) CryptoCodec(org.apache.hadoop.crypto.CryptoCodec)

Example 2 with DFSClient

use of org.apache.hadoop.hdfs.DFSClient in project hadoop by apache.

the class TestBlockReplacement method testDeletedBlockWhenAddBlockIsInEdit.

/**
   * Standby namenode doesn't queue Delete block request when the add block
   * request is in the edit log which are yet to be read.
   * @throws Exception
   */
@Test
public void testDeletedBlockWhenAddBlockIsInEdit() throws Exception {
    Configuration conf = new HdfsConfiguration();
    cluster = new MiniDFSCluster.Builder(conf).nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(1).build();
    DFSClient client = null;
    try {
        cluster.waitActive();
        assertEquals("Number of namenodes is not 2", 2, cluster.getNumNameNodes());
        // Transitioning the namenode 0 to active.
        cluster.transitionToActive(0);
        assertTrue("Namenode 0 should be in active state", cluster.getNameNode(0).isActiveState());
        assertTrue("Namenode 1 should be in standby state", cluster.getNameNode(1).isStandbyState());
        // Trigger heartbeat to mark DatanodeStorageInfo#heartbeatedSinceFailover
        // to true.
        DataNodeTestUtils.triggerHeartbeat(cluster.getDataNodes().get(0));
        FileSystem fs = cluster.getFileSystem(0);
        // Trigger blockReport to mark DatanodeStorageInfo#blockContentsStale
        // to false.
        cluster.getDataNodes().get(0).triggerBlockReport(new BlockReportOptions.Factory().setIncremental(false).build());
        Path fileName = new Path("/tmp.txt");
        // create a file with one block
        DFSTestUtil.createFile(fs, fileName, 10L, (short) 1, 1234L);
        DFSTestUtil.waitReplication(fs, fileName, (short) 1);
        client = new DFSClient(cluster.getFileSystem(0).getUri(), conf);
        List<LocatedBlock> locatedBlocks = client.getNamenode().getBlockLocations("/tmp.txt", 0, 10L).getLocatedBlocks();
        assertTrue(locatedBlocks.size() == 1);
        assertTrue(locatedBlocks.get(0).getLocations().length == 1);
        // add a second datanode to the cluster
        cluster.startDataNodes(conf, 1, true, null, null, null, null);
        assertEquals("Number of datanodes should be 2", 2, cluster.getDataNodes().size());
        DataNode dn0 = cluster.getDataNodes().get(0);
        DataNode dn1 = cluster.getDataNodes().get(1);
        String activeNNBPId = cluster.getNamesystem(0).getBlockPoolId();
        DatanodeDescriptor sourceDnDesc = NameNodeAdapter.getDatanode(cluster.getNamesystem(0), dn0.getDNRegistrationForBP(activeNNBPId));
        DatanodeDescriptor destDnDesc = NameNodeAdapter.getDatanode(cluster.getNamesystem(0), dn1.getDNRegistrationForBP(activeNNBPId));
        ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, fileName);
        LOG.info("replaceBlock:  " + replaceBlock(block, (DatanodeInfo) sourceDnDesc, (DatanodeInfo) sourceDnDesc, (DatanodeInfo) destDnDesc));
        // Waiting for the FsDatasetAsyncDsikService to delete the block
        for (int tries = 0; tries < 20; tries++) {
            Thread.sleep(1000);
            // Triggering the deletion block report to report the deleted block
            // to namnemode
            DataNodeTestUtils.triggerDeletionReport(cluster.getDataNodes().get(0));
            locatedBlocks = client.getNamenode().getBlockLocations("/tmp.txt", 0, 10L).getLocatedBlocks();
            // If block was deleted and only on 1 datanode then break out
            if (locatedBlocks.get(0).getLocations().length == 1) {
                break;
            }
        }
        cluster.transitionToStandby(0);
        cluster.transitionToActive(1);
        assertTrue("Namenode 1 should be in active state", cluster.getNameNode(1).isActiveState());
        assertTrue("Namenode 0 should be in standby state", cluster.getNameNode(0).isStandbyState());
        client.close();
        // Opening a new client for new active  namenode
        client = new DFSClient(cluster.getFileSystem(1).getUri(), conf);
        List<LocatedBlock> locatedBlocks1 = client.getNamenode().getBlockLocations("/tmp.txt", 0, 10L).getLocatedBlocks();
        assertEquals(1, locatedBlocks1.size());
        assertEquals("The block should be only on 1 datanode ", 1, locatedBlocks1.get(0).getLocations().length);
    } finally {
        IOUtils.cleanup(null, client);
        cluster.shutdown();
    }
}
Also used : DFSClient(org.apache.hadoop.hdfs.DFSClient) Path(org.apache.hadoop.fs.Path) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) DatanodeDescriptor(org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor) FileSystem(org.apache.hadoop.fs.FileSystem) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) BlockReportOptions(org.apache.hadoop.hdfs.client.BlockReportOptions) Test(org.junit.Test)

Example 3 with DFSClient

use of org.apache.hadoop.hdfs.DFSClient in project hadoop by apache.

the class TestRetryCacheWithHA method testDeleteSnapshot.

@Test(timeout = 60000)
public void testDeleteSnapshot() throws Exception {
    final DFSClient client = genClientWithDummyHandler();
    AtMostOnceOp op = new DeleteSnapshotOp(client, "/test", "s1");
    testClientRetryWithFailover(op);
}
Also used : DFSClient(org.apache.hadoop.hdfs.DFSClient) Test(org.junit.Test)

Example 4 with DFSClient

use of org.apache.hadoop.hdfs.DFSClient in project hadoop by apache.

the class TestRetryCacheWithHA method testAddCachePool.

@Test(timeout = 60000)
public void testAddCachePool() throws Exception {
    DFSClient client = genClientWithDummyHandler();
    AtMostOnceOp op = new AddCachePoolOp(client, "pool");
    testClientRetryWithFailover(op);
}
Also used : DFSClient(org.apache.hadoop.hdfs.DFSClient) Test(org.junit.Test)

Example 5 with DFSClient

use of org.apache.hadoop.hdfs.DFSClient in project hadoop by apache.

the class TestRetryCacheWithHA method testModifyCachePool.

@Test(timeout = 60000)
public void testModifyCachePool() throws Exception {
    DFSClient client = genClientWithDummyHandler();
    AtMostOnceOp op = new ModifyCachePoolOp(client, "pool");
    testClientRetryWithFailover(op);
}
Also used : DFSClient(org.apache.hadoop.hdfs.DFSClient) Test(org.junit.Test)

Aggregations

DFSClient (org.apache.hadoop.hdfs.DFSClient)107 Test (org.junit.Test)58 IOException (java.io.IOException)39 Nfs3FileAttributes (org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes)27 FileHandle (org.apache.hadoop.nfs.nfs3.FileHandle)26 Path (org.apache.hadoop.fs.Path)20 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)19 VisibleForTesting (com.google.common.annotations.VisibleForTesting)18 Configuration (org.apache.hadoop.conf.Configuration)17 HdfsFileStatus (org.apache.hadoop.hdfs.protocol.HdfsFileStatus)15 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)14 InetSocketAddress (java.net.InetSocketAddress)13 FileSystem (org.apache.hadoop.fs.FileSystem)12 NfsConfiguration (org.apache.hadoop.hdfs.nfs.conf.NfsConfiguration)12 FsPermission (org.apache.hadoop.fs.permission.FsPermission)10 HdfsDataOutputStream (org.apache.hadoop.hdfs.client.HdfsDataOutputStream)10 ArrayList (java.util.ArrayList)9 InvocationTargetException (java.lang.reflect.InvocationTargetException)8 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)8 ShellBasedIdMapping (org.apache.hadoop.security.ShellBasedIdMapping)8