Search in sources :

Example 16 with ClientProtocol

use of org.apache.hadoop.hdfs.protocol.ClientProtocol in project hadoop by apache.

the class DFSAdmin method finalizeUpgrade.

/**
   * Command to ask the namenode to finalize previously performed upgrade.
   * Usage: hdfs dfsadmin -finalizeUpgrade
   * @exception IOException 
   */
public int finalizeUpgrade() throws IOException {
    DistributedFileSystem dfs = getDFS();
    Configuration dfsConf = dfs.getConf();
    URI dfsUri = dfs.getUri();
    boolean isHaAndLogicalUri = HAUtilClient.isLogicalUri(dfsConf, dfsUri);
    if (isHaAndLogicalUri) {
        // In the case of HA and logical URI, run finalizeUpgrade for all
        // NNs in this nameservice.
        String nsId = dfsUri.getHost();
        List<ClientProtocol> namenodes = HAUtil.getProxiesForAllNameNodesInNameservice(dfsConf, nsId);
        if (!HAUtil.isAtLeastOneActive(namenodes)) {
            throw new IOException("Cannot finalize with no NameNode active");
        }
        List<ProxyAndInfo<ClientProtocol>> proxies = HAUtil.getProxiesForAllNameNodesInNameservice(dfsConf, nsId, ClientProtocol.class);
        for (ProxyAndInfo<ClientProtocol> proxy : proxies) {
            proxy.getProxy().finalizeUpgrade();
            System.out.println("Finalize upgrade successful for " + proxy.getAddress());
        }
    } else {
        dfs.finalizeUpgrade();
        System.out.println("Finalize upgrade successful");
    }
    return 0;
}
Also used : Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) ProxyAndInfo(org.apache.hadoop.hdfs.NameNodeProxiesClient.ProxyAndInfo) IOException(java.io.IOException) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) ClientProtocol(org.apache.hadoop.hdfs.protocol.ClientProtocol) URI(java.net.URI)

Example 17 with ClientProtocol

use of org.apache.hadoop.hdfs.protocol.ClientProtocol in project hadoop by apache.

the class DFSAdmin method restoreFailedStorage.

/**
   * Command to enable/disable/check restoring of failed storage replicas in the namenode.
   * Usage: hdfs dfsadmin -restoreFailedStorage true|false|check
   * @exception IOException 
   * @see org.apache.hadoop.hdfs.protocol.ClientProtocol#restoreFailedStorage(String arg)
   */
public int restoreFailedStorage(String arg) throws IOException {
    int exitCode = -1;
    if (!arg.equals("check") && !arg.equals("true") && !arg.equals("false")) {
        System.err.println("restoreFailedStorage valid args are true|false|check");
        return exitCode;
    }
    DistributedFileSystem dfs = getDFS();
    Configuration dfsConf = dfs.getConf();
    URI dfsUri = dfs.getUri();
    boolean isHaEnabled = HAUtilClient.isLogicalUri(dfsConf, dfsUri);
    if (isHaEnabled) {
        String nsId = dfsUri.getHost();
        List<ProxyAndInfo<ClientProtocol>> proxies = HAUtil.getProxiesForAllNameNodesInNameservice(dfsConf, nsId, ClientProtocol.class);
        for (ProxyAndInfo<ClientProtocol> proxy : proxies) {
            Boolean res = proxy.getProxy().restoreFailedStorage(arg);
            System.out.println("restoreFailedStorage is set to " + res + " for " + proxy.getAddress());
        }
    } else {
        Boolean res = dfs.restoreFailedStorage(arg);
        System.out.println("restoreFailedStorage is set to " + res);
    }
    exitCode = 0;
    return exitCode;
}
Also used : Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) ProxyAndInfo(org.apache.hadoop.hdfs.NameNodeProxiesClient.ProxyAndInfo) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) ClientProtocol(org.apache.hadoop.hdfs.protocol.ClientProtocol) URI(java.net.URI)

Example 18 with ClientProtocol

use of org.apache.hadoop.hdfs.protocol.ClientProtocol in project hadoop by apache.

the class TestEncryptionZones method testCipherSuiteNegotiation.

// This test only uses mocks. Called from the end of an existing test to
// avoid an extra mini cluster.
private static void testCipherSuiteNegotiation(DistributedFileSystem fs, Configuration conf) throws Exception {
    // Set up mock ClientProtocol to test client-side CipherSuite negotiation
    final ClientProtocol mcp = Mockito.mock(ClientProtocol.class);
    // Try with an empty conf
    final Configuration noCodecConf = new Configuration(conf);
    final CipherSuite suite = CipherSuite.AES_CTR_NOPADDING;
    final String confKey = CommonConfigurationKeysPublic.HADOOP_SECURITY_CRYPTO_CODEC_CLASSES_KEY_PREFIX + suite.getConfigSuffix();
    noCodecConf.set(confKey, "");
    fs.dfs = new DFSClient(null, mcp, noCodecConf, null);
    mockCreate(mcp, suite, CryptoProtocolVersion.ENCRYPTION_ZONES);
    try {
        fs.create(new Path("/mock"));
        fail("Created with no configured codecs!");
    } catch (UnknownCipherSuiteException e) {
        assertExceptionContains("No configuration found for the cipher", e);
    }
    // Try create with an UNKNOWN CipherSuite
    fs.dfs = new DFSClient(null, mcp, conf, null);
    CipherSuite unknown = CipherSuite.UNKNOWN;
    unknown.setUnknownValue(989);
    mockCreate(mcp, unknown, CryptoProtocolVersion.ENCRYPTION_ZONES);
    try {
        fs.create(new Path("/mock"));
        fail("Created with unknown cipher!");
    } catch (IOException e) {
        assertExceptionContains("unknown CipherSuite with ID 989", e);
    }
}
Also used : Path(org.apache.hadoop.fs.Path) Configuration(org.apache.hadoop.conf.Configuration) CipherSuite(org.apache.hadoop.crypto.CipherSuite) Mockito.anyString(org.mockito.Mockito.anyString) IOException(java.io.IOException) ClientProtocol(org.apache.hadoop.hdfs.protocol.ClientProtocol)

Example 19 with ClientProtocol

use of org.apache.hadoop.hdfs.protocol.ClientProtocol in project hadoop by apache.

the class TestMover method testMoverWithStripedFile.

@Test(timeout = 300000)
public void testMoverWithStripedFile() throws Exception {
    final Configuration conf = new HdfsConfiguration();
    initConfWithStripe(conf);
    // start 10 datanodes
    int numOfDatanodes = 10;
    int storagesPerDatanode = 2;
    long capacity = 10 * defaultBlockSize;
    long[][] capacities = new long[numOfDatanodes][storagesPerDatanode];
    for (int i = 0; i < numOfDatanodes; i++) {
        for (int j = 0; j < storagesPerDatanode; j++) {
            capacities[i][j] = capacity;
        }
    }
    conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY, StripedFileTestUtil.getDefaultECPolicy().getName());
    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numOfDatanodes).storagesPerDatanode(storagesPerDatanode).storageTypes(new StorageType[][] { { StorageType.DISK, StorageType.DISK }, { StorageType.DISK, StorageType.DISK }, { StorageType.DISK, StorageType.DISK }, { StorageType.DISK, StorageType.DISK }, { StorageType.DISK, StorageType.DISK }, { StorageType.DISK, StorageType.ARCHIVE }, { StorageType.DISK, StorageType.ARCHIVE }, { StorageType.DISK, StorageType.ARCHIVE }, { StorageType.DISK, StorageType.ARCHIVE }, { StorageType.DISK, StorageType.ARCHIVE } }).storageCapacities(capacities).build();
    try {
        cluster.waitActive();
        // set "/bar" directory with HOT storage policy.
        ClientProtocol client = NameNodeProxies.createProxy(conf, cluster.getFileSystem(0).getUri(), ClientProtocol.class).getProxy();
        String barDir = "/bar";
        client.mkdirs(barDir, new FsPermission((short) 777), true);
        client.setStoragePolicy(barDir, HdfsConstants.HOT_STORAGE_POLICY_NAME);
        // set an EC policy on "/bar" directory
        client.setErasureCodingPolicy(barDir, StripedFileTestUtil.getDefaultECPolicy().getName());
        // write file to barDir
        final String fooFile = "/bar/foo";
        long fileLen = 20 * defaultBlockSize;
        DFSTestUtil.createFile(cluster.getFileSystem(), new Path(fooFile), fileLen, (short) 3, 0);
        // verify storage types and locations
        LocatedBlocks locatedBlocks = client.getBlockLocations(fooFile, 0, fileLen);
        for (LocatedBlock lb : locatedBlocks.getLocatedBlocks()) {
            for (StorageType type : lb.getStorageTypes()) {
                Assert.assertEquals(StorageType.DISK, type);
            }
        }
        StripedFileTestUtil.verifyLocatedStripedBlocks(locatedBlocks, dataBlocks + parityBlocks);
        // start 5 more datanodes
        numOfDatanodes += 5;
        capacities = new long[5][storagesPerDatanode];
        for (int i = 0; i < 5; i++) {
            for (int j = 0; j < storagesPerDatanode; j++) {
                capacities[i][j] = capacity;
            }
        }
        cluster.startDataNodes(conf, 5, new StorageType[][] { { StorageType.ARCHIVE, StorageType.ARCHIVE }, { StorageType.ARCHIVE, StorageType.ARCHIVE }, { StorageType.ARCHIVE, StorageType.ARCHIVE }, { StorageType.ARCHIVE, StorageType.ARCHIVE }, { StorageType.ARCHIVE, StorageType.ARCHIVE } }, true, null, null, null, capacities, null, false, false, false, null);
        cluster.triggerHeartbeats();
        // move file to ARCHIVE
        client.setStoragePolicy(barDir, "COLD");
        // run Mover
        int rc = ToolRunner.run(conf, new Mover.Cli(), new String[] { "-p", barDir });
        Assert.assertEquals("Movement to ARCHIVE should be successful", 0, rc);
        // verify storage types and locations
        locatedBlocks = client.getBlockLocations(fooFile, 0, fileLen);
        for (LocatedBlock lb : locatedBlocks.getLocatedBlocks()) {
            for (StorageType type : lb.getStorageTypes()) {
                Assert.assertEquals(StorageType.ARCHIVE, type);
            }
        }
        StripedFileTestUtil.verifyLocatedStripedBlocks(locatedBlocks, dataBlocks + parityBlocks);
        // start 5 more datanodes
        numOfDatanodes += 5;
        capacities = new long[5][storagesPerDatanode];
        for (int i = 0; i < 5; i++) {
            for (int j = 0; j < storagesPerDatanode; j++) {
                capacities[i][j] = capacity;
            }
        }
        cluster.startDataNodes(conf, 5, new StorageType[][] { { StorageType.SSD, StorageType.DISK }, { StorageType.SSD, StorageType.DISK }, { StorageType.SSD, StorageType.DISK }, { StorageType.SSD, StorageType.DISK }, { StorageType.SSD, StorageType.DISK } }, true, null, null, null, capacities, null, false, false, false, null);
        cluster.triggerHeartbeats();
        // move file blocks to ONE_SSD policy
        client.setStoragePolicy(barDir, "ONE_SSD");
        // run Mover
        rc = ToolRunner.run(conf, new Mover.Cli(), new String[] { "-p", barDir });
        // verify storage types and locations
        // Movements should have been ignored for the unsupported policy on
        // striped file
        locatedBlocks = client.getBlockLocations(fooFile, 0, fileLen);
        for (LocatedBlock lb : locatedBlocks.getLocatedBlocks()) {
            for (StorageType type : lb.getStorageTypes()) {
                Assert.assertEquals(StorageType.ARCHIVE, type);
            }
        }
    } finally {
        cluster.shutdown();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) StorageType(org.apache.hadoop.fs.StorageType) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) LocatedBlocks(org.apache.hadoop.hdfs.protocol.LocatedBlocks) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) FsPermission(org.apache.hadoop.fs.permission.FsPermission) ClientProtocol(org.apache.hadoop.hdfs.protocol.ClientProtocol) Test(org.junit.Test)

Example 20 with ClientProtocol

use of org.apache.hadoop.hdfs.protocol.ClientProtocol in project hadoop by apache.

the class TestStripedINodeFile method testUnsuitableStoragePoliciesWithECStripedMode.

/**
   * Tests when choosing blocks on file creation of EC striped mode should
   * ignore storage policy if that is not suitable. Supported storage policies
   * for EC Striped mode are HOT, COLD and ALL_SSD. For all other policies set
   * will be ignored and considered default policy.
   */
@Test(timeout = 60000)
public void testUnsuitableStoragePoliciesWithECStripedMode() throws Exception {
    final Configuration conf = new HdfsConfiguration();
    int defaultStripedBlockSize = testECPolicy.getCellSize() * 4;
    conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, defaultStripedBlockSize);
    conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1L);
    conf.setLong(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY, 1L);
    conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_CONSIDERLOAD_KEY, false);
    conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY, StripedFileTestUtil.getDefaultECPolicy().getName());
    // start 10 datanodes
    int numOfDatanodes = 10;
    int storagesPerDatanode = 2;
    long capacity = 10 * defaultStripedBlockSize;
    long[][] capacities = new long[numOfDatanodes][storagesPerDatanode];
    for (int i = 0; i < numOfDatanodes; i++) {
        for (int j = 0; j < storagesPerDatanode; j++) {
            capacities[i][j] = capacity;
        }
    }
    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numOfDatanodes).storagesPerDatanode(storagesPerDatanode).storageTypes(new StorageType[][] { { StorageType.SSD, StorageType.DISK }, { StorageType.SSD, StorageType.DISK }, { StorageType.SSD, StorageType.DISK }, { StorageType.SSD, StorageType.DISK }, { StorageType.SSD, StorageType.DISK }, { StorageType.DISK, StorageType.SSD }, { StorageType.DISK, StorageType.SSD }, { StorageType.DISK, StorageType.SSD }, { StorageType.DISK, StorageType.SSD }, { StorageType.DISK, StorageType.SSD } }).storageCapacities(capacities).build();
    try {
        cluster.waitActive();
        // set "/foo" directory with ONE_SSD storage policy.
        ClientProtocol client = NameNodeProxies.createProxy(conf, cluster.getFileSystem(0).getUri(), ClientProtocol.class).getProxy();
        String fooDir = "/foo";
        client.mkdirs(fooDir, new FsPermission((short) 777), true);
        client.setStoragePolicy(fooDir, HdfsConstants.ONESSD_STORAGE_POLICY_NAME);
        // set an EC policy on "/foo" directory
        client.setErasureCodingPolicy(fooDir, StripedFileTestUtil.getDefaultECPolicy().getName());
        // write file to fooDir
        final String barFile = "/foo/bar";
        long fileLen = 20 * defaultStripedBlockSize;
        DFSTestUtil.createFile(cluster.getFileSystem(), new Path(barFile), fileLen, (short) 3, 0);
        // verify storage types and locations
        LocatedBlocks locatedBlocks = client.getBlockLocations(barFile, 0, fileLen);
        for (LocatedBlock lb : locatedBlocks.getLocatedBlocks()) {
            for (StorageType type : lb.getStorageTypes()) {
                Assert.assertEquals(StorageType.DISK, type);
            }
        }
    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) StorageType(org.apache.hadoop.fs.StorageType) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) LocatedBlocks(org.apache.hadoop.hdfs.protocol.LocatedBlocks) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) FsPermission(org.apache.hadoop.fs.permission.FsPermission) ClientProtocol(org.apache.hadoop.hdfs.protocol.ClientProtocol) Test(org.junit.Test)

Aggregations

ClientProtocol (org.apache.hadoop.hdfs.protocol.ClientProtocol)21 Configuration (org.apache.hadoop.conf.Configuration)14 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)10 URI (java.net.URI)8 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)8 ProxyAndInfo (org.apache.hadoop.hdfs.NameNodeProxiesClient.ProxyAndInfo)7 IOException (java.io.IOException)5 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)5 Path (org.apache.hadoop.fs.Path)4 Test (org.junit.Test)4 DFSClient (org.apache.hadoop.hdfs.DFSClient)3 LocatedBlocks (org.apache.hadoop.hdfs.protocol.LocatedBlocks)3 Field (java.lang.reflect.Field)2 InvocationTargetException (java.lang.reflect.InvocationTargetException)2 InetSocketAddress (java.net.InetSocketAddress)2 FileSystem (org.apache.hadoop.fs.FileSystem)2 StorageType (org.apache.hadoop.fs.StorageType)2 FsPermission (org.apache.hadoop.fs.permission.FsPermission)2 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)2 ClientNamenodeProtocolPB (org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolPB)2