use of org.apache.hadoop.hdfs.protocol.ClientProtocol in project hadoop by apache.
the class DFSAdmin method finalizeUpgrade.
/**
* Command to ask the namenode to finalize previously performed upgrade.
* Usage: hdfs dfsadmin -finalizeUpgrade
* @exception IOException
*/
public int finalizeUpgrade() throws IOException {
DistributedFileSystem dfs = getDFS();
Configuration dfsConf = dfs.getConf();
URI dfsUri = dfs.getUri();
boolean isHaAndLogicalUri = HAUtilClient.isLogicalUri(dfsConf, dfsUri);
if (isHaAndLogicalUri) {
// In the case of HA and logical URI, run finalizeUpgrade for all
// NNs in this nameservice.
String nsId = dfsUri.getHost();
List<ClientProtocol> namenodes = HAUtil.getProxiesForAllNameNodesInNameservice(dfsConf, nsId);
if (!HAUtil.isAtLeastOneActive(namenodes)) {
throw new IOException("Cannot finalize with no NameNode active");
}
List<ProxyAndInfo<ClientProtocol>> proxies = HAUtil.getProxiesForAllNameNodesInNameservice(dfsConf, nsId, ClientProtocol.class);
for (ProxyAndInfo<ClientProtocol> proxy : proxies) {
proxy.getProxy().finalizeUpgrade();
System.out.println("Finalize upgrade successful for " + proxy.getAddress());
}
} else {
dfs.finalizeUpgrade();
System.out.println("Finalize upgrade successful");
}
return 0;
}
use of org.apache.hadoop.hdfs.protocol.ClientProtocol in project hadoop by apache.
the class DFSAdmin method restoreFailedStorage.
/**
* Command to enable/disable/check restoring of failed storage replicas in the namenode.
* Usage: hdfs dfsadmin -restoreFailedStorage true|false|check
* @exception IOException
* @see org.apache.hadoop.hdfs.protocol.ClientProtocol#restoreFailedStorage(String arg)
*/
public int restoreFailedStorage(String arg) throws IOException {
int exitCode = -1;
if (!arg.equals("check") && !arg.equals("true") && !arg.equals("false")) {
System.err.println("restoreFailedStorage valid args are true|false|check");
return exitCode;
}
DistributedFileSystem dfs = getDFS();
Configuration dfsConf = dfs.getConf();
URI dfsUri = dfs.getUri();
boolean isHaEnabled = HAUtilClient.isLogicalUri(dfsConf, dfsUri);
if (isHaEnabled) {
String nsId = dfsUri.getHost();
List<ProxyAndInfo<ClientProtocol>> proxies = HAUtil.getProxiesForAllNameNodesInNameservice(dfsConf, nsId, ClientProtocol.class);
for (ProxyAndInfo<ClientProtocol> proxy : proxies) {
Boolean res = proxy.getProxy().restoreFailedStorage(arg);
System.out.println("restoreFailedStorage is set to " + res + " for " + proxy.getAddress());
}
} else {
Boolean res = dfs.restoreFailedStorage(arg);
System.out.println("restoreFailedStorage is set to " + res);
}
exitCode = 0;
return exitCode;
}
use of org.apache.hadoop.hdfs.protocol.ClientProtocol in project hadoop by apache.
the class TestEncryptionZones method testCipherSuiteNegotiation.
// This test only uses mocks. Called from the end of an existing test to
// avoid an extra mini cluster.
private static void testCipherSuiteNegotiation(DistributedFileSystem fs, Configuration conf) throws Exception {
// Set up mock ClientProtocol to test client-side CipherSuite negotiation
final ClientProtocol mcp = Mockito.mock(ClientProtocol.class);
// Try with an empty conf
final Configuration noCodecConf = new Configuration(conf);
final CipherSuite suite = CipherSuite.AES_CTR_NOPADDING;
final String confKey = CommonConfigurationKeysPublic.HADOOP_SECURITY_CRYPTO_CODEC_CLASSES_KEY_PREFIX + suite.getConfigSuffix();
noCodecConf.set(confKey, "");
fs.dfs = new DFSClient(null, mcp, noCodecConf, null);
mockCreate(mcp, suite, CryptoProtocolVersion.ENCRYPTION_ZONES);
try {
fs.create(new Path("/mock"));
fail("Created with no configured codecs!");
} catch (UnknownCipherSuiteException e) {
assertExceptionContains("No configuration found for the cipher", e);
}
// Try create with an UNKNOWN CipherSuite
fs.dfs = new DFSClient(null, mcp, conf, null);
CipherSuite unknown = CipherSuite.UNKNOWN;
unknown.setUnknownValue(989);
mockCreate(mcp, unknown, CryptoProtocolVersion.ENCRYPTION_ZONES);
try {
fs.create(new Path("/mock"));
fail("Created with unknown cipher!");
} catch (IOException e) {
assertExceptionContains("unknown CipherSuite with ID 989", e);
}
}
use of org.apache.hadoop.hdfs.protocol.ClientProtocol in project hadoop by apache.
the class TestMover method testMoverWithStripedFile.
@Test(timeout = 300000)
public void testMoverWithStripedFile() throws Exception {
final Configuration conf = new HdfsConfiguration();
initConfWithStripe(conf);
// start 10 datanodes
int numOfDatanodes = 10;
int storagesPerDatanode = 2;
long capacity = 10 * defaultBlockSize;
long[][] capacities = new long[numOfDatanodes][storagesPerDatanode];
for (int i = 0; i < numOfDatanodes; i++) {
for (int j = 0; j < storagesPerDatanode; j++) {
capacities[i][j] = capacity;
}
}
conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY, StripedFileTestUtil.getDefaultECPolicy().getName());
final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numOfDatanodes).storagesPerDatanode(storagesPerDatanode).storageTypes(new StorageType[][] { { StorageType.DISK, StorageType.DISK }, { StorageType.DISK, StorageType.DISK }, { StorageType.DISK, StorageType.DISK }, { StorageType.DISK, StorageType.DISK }, { StorageType.DISK, StorageType.DISK }, { StorageType.DISK, StorageType.ARCHIVE }, { StorageType.DISK, StorageType.ARCHIVE }, { StorageType.DISK, StorageType.ARCHIVE }, { StorageType.DISK, StorageType.ARCHIVE }, { StorageType.DISK, StorageType.ARCHIVE } }).storageCapacities(capacities).build();
try {
cluster.waitActive();
// set "/bar" directory with HOT storage policy.
ClientProtocol client = NameNodeProxies.createProxy(conf, cluster.getFileSystem(0).getUri(), ClientProtocol.class).getProxy();
String barDir = "/bar";
client.mkdirs(barDir, new FsPermission((short) 777), true);
client.setStoragePolicy(barDir, HdfsConstants.HOT_STORAGE_POLICY_NAME);
// set an EC policy on "/bar" directory
client.setErasureCodingPolicy(barDir, StripedFileTestUtil.getDefaultECPolicy().getName());
// write file to barDir
final String fooFile = "/bar/foo";
long fileLen = 20 * defaultBlockSize;
DFSTestUtil.createFile(cluster.getFileSystem(), new Path(fooFile), fileLen, (short) 3, 0);
// verify storage types and locations
LocatedBlocks locatedBlocks = client.getBlockLocations(fooFile, 0, fileLen);
for (LocatedBlock lb : locatedBlocks.getLocatedBlocks()) {
for (StorageType type : lb.getStorageTypes()) {
Assert.assertEquals(StorageType.DISK, type);
}
}
StripedFileTestUtil.verifyLocatedStripedBlocks(locatedBlocks, dataBlocks + parityBlocks);
// start 5 more datanodes
numOfDatanodes += 5;
capacities = new long[5][storagesPerDatanode];
for (int i = 0; i < 5; i++) {
for (int j = 0; j < storagesPerDatanode; j++) {
capacities[i][j] = capacity;
}
}
cluster.startDataNodes(conf, 5, new StorageType[][] { { StorageType.ARCHIVE, StorageType.ARCHIVE }, { StorageType.ARCHIVE, StorageType.ARCHIVE }, { StorageType.ARCHIVE, StorageType.ARCHIVE }, { StorageType.ARCHIVE, StorageType.ARCHIVE }, { StorageType.ARCHIVE, StorageType.ARCHIVE } }, true, null, null, null, capacities, null, false, false, false, null);
cluster.triggerHeartbeats();
// move file to ARCHIVE
client.setStoragePolicy(barDir, "COLD");
// run Mover
int rc = ToolRunner.run(conf, new Mover.Cli(), new String[] { "-p", barDir });
Assert.assertEquals("Movement to ARCHIVE should be successful", 0, rc);
// verify storage types and locations
locatedBlocks = client.getBlockLocations(fooFile, 0, fileLen);
for (LocatedBlock lb : locatedBlocks.getLocatedBlocks()) {
for (StorageType type : lb.getStorageTypes()) {
Assert.assertEquals(StorageType.ARCHIVE, type);
}
}
StripedFileTestUtil.verifyLocatedStripedBlocks(locatedBlocks, dataBlocks + parityBlocks);
// start 5 more datanodes
numOfDatanodes += 5;
capacities = new long[5][storagesPerDatanode];
for (int i = 0; i < 5; i++) {
for (int j = 0; j < storagesPerDatanode; j++) {
capacities[i][j] = capacity;
}
}
cluster.startDataNodes(conf, 5, new StorageType[][] { { StorageType.SSD, StorageType.DISK }, { StorageType.SSD, StorageType.DISK }, { StorageType.SSD, StorageType.DISK }, { StorageType.SSD, StorageType.DISK }, { StorageType.SSD, StorageType.DISK } }, true, null, null, null, capacities, null, false, false, false, null);
cluster.triggerHeartbeats();
// move file blocks to ONE_SSD policy
client.setStoragePolicy(barDir, "ONE_SSD");
// run Mover
rc = ToolRunner.run(conf, new Mover.Cli(), new String[] { "-p", barDir });
// verify storage types and locations
// Movements should have been ignored for the unsupported policy on
// striped file
locatedBlocks = client.getBlockLocations(fooFile, 0, fileLen);
for (LocatedBlock lb : locatedBlocks.getLocatedBlocks()) {
for (StorageType type : lb.getStorageTypes()) {
Assert.assertEquals(StorageType.ARCHIVE, type);
}
}
} finally {
cluster.shutdown();
}
}
use of org.apache.hadoop.hdfs.protocol.ClientProtocol in project hadoop by apache.
the class TestStripedINodeFile method testUnsuitableStoragePoliciesWithECStripedMode.
/**
* Tests when choosing blocks on file creation of EC striped mode should
* ignore storage policy if that is not suitable. Supported storage policies
* for EC Striped mode are HOT, COLD and ALL_SSD. For all other policies set
* will be ignored and considered default policy.
*/
@Test(timeout = 60000)
public void testUnsuitableStoragePoliciesWithECStripedMode() throws Exception {
final Configuration conf = new HdfsConfiguration();
int defaultStripedBlockSize = testECPolicy.getCellSize() * 4;
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, defaultStripedBlockSize);
conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1L);
conf.setLong(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY, 1L);
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_CONSIDERLOAD_KEY, false);
conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY, StripedFileTestUtil.getDefaultECPolicy().getName());
// start 10 datanodes
int numOfDatanodes = 10;
int storagesPerDatanode = 2;
long capacity = 10 * defaultStripedBlockSize;
long[][] capacities = new long[numOfDatanodes][storagesPerDatanode];
for (int i = 0; i < numOfDatanodes; i++) {
for (int j = 0; j < storagesPerDatanode; j++) {
capacities[i][j] = capacity;
}
}
final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numOfDatanodes).storagesPerDatanode(storagesPerDatanode).storageTypes(new StorageType[][] { { StorageType.SSD, StorageType.DISK }, { StorageType.SSD, StorageType.DISK }, { StorageType.SSD, StorageType.DISK }, { StorageType.SSD, StorageType.DISK }, { StorageType.SSD, StorageType.DISK }, { StorageType.DISK, StorageType.SSD }, { StorageType.DISK, StorageType.SSD }, { StorageType.DISK, StorageType.SSD }, { StorageType.DISK, StorageType.SSD }, { StorageType.DISK, StorageType.SSD } }).storageCapacities(capacities).build();
try {
cluster.waitActive();
// set "/foo" directory with ONE_SSD storage policy.
ClientProtocol client = NameNodeProxies.createProxy(conf, cluster.getFileSystem(0).getUri(), ClientProtocol.class).getProxy();
String fooDir = "/foo";
client.mkdirs(fooDir, new FsPermission((short) 777), true);
client.setStoragePolicy(fooDir, HdfsConstants.ONESSD_STORAGE_POLICY_NAME);
// set an EC policy on "/foo" directory
client.setErasureCodingPolicy(fooDir, StripedFileTestUtil.getDefaultECPolicy().getName());
// write file to fooDir
final String barFile = "/foo/bar";
long fileLen = 20 * defaultStripedBlockSize;
DFSTestUtil.createFile(cluster.getFileSystem(), new Path(barFile), fileLen, (short) 3, 0);
// verify storage types and locations
LocatedBlocks locatedBlocks = client.getBlockLocations(barFile, 0, fileLen);
for (LocatedBlock lb : locatedBlocks.getLocatedBlocks()) {
for (StorageType type : lb.getStorageTypes()) {
Assert.assertEquals(StorageType.DISK, type);
}
}
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
Aggregations