use of org.apache.hadoop.ipc.RemoteException in project hadoop by apache.
the class DFSAdmin method run.
/**
* @param argv The parameters passed to this program.
* @exception Exception if the filesystem does not exist.
* @return 0 on success, non zero on error.
*/
@Override
public int run(String[] argv) throws Exception {
if (argv.length < 1) {
printUsage("");
return -1;
}
int exitCode = -1;
int i = 0;
String cmd = argv[i++];
//
if ("-safemode".equals(cmd)) {
if (argv.length != 2) {
printUsage(cmd);
return exitCode;
}
} else if ("-allowSnapshot".equalsIgnoreCase(cmd)) {
if (argv.length != 2) {
printUsage(cmd);
return exitCode;
}
} else if ("-disallowSnapshot".equalsIgnoreCase(cmd)) {
if (argv.length != 2) {
printUsage(cmd);
return exitCode;
}
} else if ("-report".equals(cmd)) {
if (argv.length < 1) {
printUsage(cmd);
return exitCode;
}
} else if ("-saveNamespace".equals(cmd)) {
if (argv.length != 1 && argv.length != 2) {
printUsage(cmd);
return exitCode;
}
} else if ("-rollEdits".equals(cmd)) {
if (argv.length != 1) {
printUsage(cmd);
return exitCode;
}
} else if ("-restoreFailedStorage".equals(cmd)) {
if (argv.length != 2) {
printUsage(cmd);
return exitCode;
}
} else if ("-refreshNodes".equals(cmd)) {
if (argv.length != 1) {
printUsage(cmd);
return exitCode;
}
} else if ("-finalizeUpgrade".equals(cmd)) {
if (argv.length != 1) {
printUsage(cmd);
return exitCode;
}
} else if (RollingUpgradeCommand.matches(cmd)) {
if (argv.length < 1 || argv.length > 2) {
printUsage(cmd);
return exitCode;
}
} else if ("-metasave".equals(cmd)) {
if (argv.length != 2) {
printUsage(cmd);
return exitCode;
}
} else if ("-refreshServiceAcl".equals(cmd)) {
if (argv.length != 1) {
printUsage(cmd);
return exitCode;
}
} else if ("-refresh".equals(cmd)) {
if (argv.length < 3) {
printUsage(cmd);
return exitCode;
}
} else if ("-refreshUserToGroupsMappings".equals(cmd)) {
if (argv.length != 1) {
printUsage(cmd);
return exitCode;
}
} else if ("-printTopology".equals(cmd)) {
if (argv.length != 1) {
printUsage(cmd);
return exitCode;
}
} else if ("-refreshNamenodes".equals(cmd)) {
if (argv.length != 2) {
printUsage(cmd);
return exitCode;
}
} else if ("-reconfig".equals(cmd)) {
if (argv.length != 4) {
printUsage(cmd);
return exitCode;
}
} else if ("-deleteBlockPool".equals(cmd)) {
if ((argv.length != 3) && (argv.length != 4)) {
printUsage(cmd);
return exitCode;
}
} else if ("-setBalancerBandwidth".equals(cmd)) {
if (argv.length != 2) {
printUsage(cmd);
return exitCode;
}
} else if ("-getBalancerBandwidth".equalsIgnoreCase(cmd)) {
if (argv.length != 2) {
printUsage(cmd);
return exitCode;
}
} else if ("-fetchImage".equals(cmd)) {
if (argv.length != 2) {
printUsage(cmd);
return exitCode;
}
} else if ("-shutdownDatanode".equals(cmd)) {
if ((argv.length != 2) && (argv.length != 3)) {
printUsage(cmd);
return exitCode;
}
} else if ("-getDatanodeInfo".equals(cmd)) {
if (argv.length != 2) {
printUsage(cmd);
return exitCode;
}
} else if ("-triggerBlockReport".equals(cmd)) {
if (argv.length < 1) {
printUsage(cmd);
return exitCode;
}
}
// initialize DFSAdmin
try {
init();
} catch (RPC.VersionMismatch v) {
System.err.println("Version Mismatch between client and server" + "... command aborted.");
return exitCode;
} catch (IOException e) {
System.err.println("Bad connection to DFS... command aborted.");
return exitCode;
}
Exception debugException = null;
exitCode = 0;
try {
if ("-report".equals(cmd)) {
report(argv, i);
} else if ("-safemode".equals(cmd)) {
setSafeMode(argv, i);
} else if ("-allowSnapshot".equalsIgnoreCase(cmd)) {
allowSnapshot(argv);
} else if ("-disallowSnapshot".equalsIgnoreCase(cmd)) {
disallowSnapshot(argv);
} else if ("-saveNamespace".equals(cmd)) {
exitCode = saveNamespace(argv);
} else if ("-rollEdits".equals(cmd)) {
exitCode = rollEdits();
} else if ("-restoreFailedStorage".equals(cmd)) {
exitCode = restoreFailedStorage(argv[i]);
} else if ("-refreshNodes".equals(cmd)) {
exitCode = refreshNodes();
} else if ("-finalizeUpgrade".equals(cmd)) {
exitCode = finalizeUpgrade();
} else if (RollingUpgradeCommand.matches(cmd)) {
exitCode = RollingUpgradeCommand.run(getDFS(), argv, i);
} else if ("-metasave".equals(cmd)) {
exitCode = metaSave(argv, i);
} else if (ClearQuotaCommand.matches(cmd)) {
exitCode = new ClearQuotaCommand(argv, i, getConf()).runAll();
} else if (SetQuotaCommand.matches(cmd)) {
exitCode = new SetQuotaCommand(argv, i, getConf()).runAll();
} else if (ClearSpaceQuotaCommand.matches(cmd)) {
exitCode = new ClearSpaceQuotaCommand(argv, i, getConf()).runAll();
} else if (SetSpaceQuotaCommand.matches(cmd)) {
exitCode = new SetSpaceQuotaCommand(argv, i, getConf()).runAll();
} else if ("-refreshServiceAcl".equals(cmd)) {
exitCode = refreshServiceAcl();
} else if ("-refreshUserToGroupsMappings".equals(cmd)) {
exitCode = refreshUserToGroupsMappings();
} else if ("-refreshSuperUserGroupsConfiguration".equals(cmd)) {
exitCode = refreshSuperUserGroupsConfiguration();
} else if ("-refreshCallQueue".equals(cmd)) {
exitCode = refreshCallQueue();
} else if ("-refresh".equals(cmd)) {
exitCode = genericRefresh(argv, i);
} else if ("-printTopology".equals(cmd)) {
exitCode = printTopology();
} else if ("-refreshNamenodes".equals(cmd)) {
exitCode = refreshNamenodes(argv, i);
} else if ("-deleteBlockPool".equals(cmd)) {
exitCode = deleteBlockPool(argv, i);
} else if ("-setBalancerBandwidth".equals(cmd)) {
exitCode = setBalancerBandwidth(argv, i);
} else if ("-getBalancerBandwidth".equals(cmd)) {
exitCode = getBalancerBandwidth(argv, i);
} else if ("-fetchImage".equals(cmd)) {
exitCode = fetchImage(argv, i);
} else if ("-shutdownDatanode".equals(cmd)) {
exitCode = shutdownDatanode(argv, i);
} else if ("-evictWriters".equals(cmd)) {
exitCode = evictWriters(argv, i);
} else if ("-getDatanodeInfo".equals(cmd)) {
exitCode = getDatanodeInfo(argv, i);
} else if ("-reconfig".equals(cmd)) {
exitCode = reconfig(argv, i);
} else if ("-triggerBlockReport".equals(cmd)) {
exitCode = triggerBlockReport(argv);
} else if ("-help".equals(cmd)) {
if (i < argv.length) {
printHelp(argv[i]);
} else {
printHelp("");
}
} else {
exitCode = -1;
System.err.println(cmd.substring(1) + ": Unknown command");
printUsage("");
}
} catch (IllegalArgumentException arge) {
debugException = arge;
exitCode = -1;
System.err.println(cmd.substring(1) + ": " + arge.getLocalizedMessage());
printUsage(cmd);
} catch (RemoteException e) {
//
// This is a error returned by hadoop server. Print
// out the first line of the error message, ignore the stack trace.
exitCode = -1;
debugException = e;
try {
String[] content;
content = e.getLocalizedMessage().split("\n");
System.err.println(cmd.substring(1) + ": " + content[0]);
} catch (Exception ex) {
System.err.println(cmd.substring(1) + ": " + ex.getLocalizedMessage());
debugException = ex;
}
} catch (Exception e) {
exitCode = -1;
debugException = e;
System.err.println(cmd.substring(1) + ": " + e.getLocalizedMessage());
}
if (LOG.isDebugEnabled()) {
LOG.debug("Exception encountered:", debugException);
}
return exitCode;
}
use of org.apache.hadoop.ipc.RemoteException in project hadoop by apache.
the class ExceptionHandler method toResponse.
@Override
public Response toResponse(Exception e) {
if (LOG.isTraceEnabled()) {
LOG.trace("GOT EXCEPITION", e);
}
//clear content type
response.setContentType(null);
//Convert exception
if (e instanceof ParamException) {
final ParamException paramexception = (ParamException) e;
e = new IllegalArgumentException("Invalid value for webhdfs parameter \"" + paramexception.getParameterName() + "\": " + e.getCause().getMessage(), e);
}
if (e instanceof ContainerException) {
e = toCause(e);
}
if (e instanceof RemoteException) {
e = ((RemoteException) e).unwrapRemoteException();
}
if (e instanceof SecurityException) {
e = toCause(e);
}
//Map response status
final Response.Status s;
if (e instanceof SecurityException) {
s = Response.Status.FORBIDDEN;
} else if (e instanceof AuthorizationException) {
s = Response.Status.FORBIDDEN;
} else if (e instanceof FileNotFoundException) {
s = Response.Status.NOT_FOUND;
} else if (e instanceof IOException) {
s = Response.Status.FORBIDDEN;
} else if (e instanceof UnsupportedOperationException) {
s = Response.Status.BAD_REQUEST;
} else if (e instanceof IllegalArgumentException) {
s = Response.Status.BAD_REQUEST;
} else {
LOG.warn("INTERNAL_SERVER_ERROR", e);
s = Response.Status.INTERNAL_SERVER_ERROR;
}
final String js = JsonUtil.toJsonString(e);
return Response.status(s).type(MediaType.APPLICATION_JSON).entity(js).build();
}
use of org.apache.hadoop.ipc.RemoteException in project hadoop by apache.
the class TestDFSClientRetries method testNotYetReplicatedErrors.
// more tests related to different failure cases can be added here.
/**
* Verify that client will correctly give up after the specified number
* of times trying to add a block
*/
@SuppressWarnings({ "serial", "unchecked" })
@Test
public void testNotYetReplicatedErrors() throws IOException {
final String exceptionMsg = "Nope, not replicated yet...";
// Allow one retry (total of two calls)
final int maxRetries = 1;
conf.setInt(HdfsClientConfigKeys.BlockWrite.LOCATEFOLLOWINGBLOCK_RETRIES_KEY, maxRetries);
NamenodeProtocols mockNN = mock(NamenodeProtocols.class);
Answer<Object> answer = new ThrowsException(new IOException()) {
int retryCount = 0;
@Override
public Object answer(InvocationOnMock invocation) throws Throwable {
retryCount++;
System.out.println("addBlock has been called " + retryCount + " times");
if (// First call was not a retry
retryCount > maxRetries + 1)
throw new IOException("Retried too many times: " + retryCount);
else
throw new RemoteException(NotReplicatedYetException.class.getName(), exceptionMsg);
}
};
when(mockNN.addBlock(anyString(), anyString(), any(ExtendedBlock.class), any(DatanodeInfo[].class), anyLong(), any(String[].class), Matchers.<EnumSet<AddBlockFlag>>any())).thenAnswer(answer);
Mockito.doReturn(new HdfsFileStatus(0, false, 1, 1024, 0, 0, new FsPermission((short) 777), "owner", "group", new byte[0], new byte[0], 1010, 0, null, (byte) 0, null)).when(mockNN).getFileInfo(anyString());
Mockito.doReturn(new HdfsFileStatus(0, false, 1, 1024, 0, 0, new FsPermission((short) 777), "owner", "group", new byte[0], new byte[0], 1010, 0, null, (byte) 0, null)).when(mockNN).create(anyString(), (FsPermission) anyObject(), anyString(), (EnumSetWritable<CreateFlag>) anyObject(), anyBoolean(), anyShort(), anyLong(), (CryptoProtocolVersion[]) anyObject());
final DFSClient client = new DFSClient(null, mockNN, conf, null);
OutputStream os = client.create("testfile", true);
// write one random byte
os.write(20);
try {
os.close();
} catch (Exception e) {
assertTrue("Retries are not being stopped correctly: " + e.getMessage(), e.getMessage().equals(exceptionMsg));
}
}
use of org.apache.hadoop.ipc.RemoteException in project hadoop by apache.
the class TestDFSUpgrade method testUpgrade.
/**
* This test attempts to upgrade the NameNode and DataNode under
* a number of valid and invalid conditions.
*/
@Test(timeout = 60000)
public void testUpgrade() throws Exception {
File[] baseDirs;
StorageInfo storageInfo = null;
for (int numDirs = 1; numDirs <= 2; numDirs++) {
conf = new HdfsConfiguration();
conf = UpgradeUtilities.initializeStorageStateConf(numDirs, conf);
String[] nameNodeDirs = conf.getStrings(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY);
String[] dataNodeDirs = conf.getStrings(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY);
conf.setBoolean(DFSConfigKeys.DFS_DATANODE_DUPLICATE_REPLICA_DELETION, false);
log("Normal NameNode upgrade", numDirs);
UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current");
cluster = createCluster();
// make sure that rolling upgrade cannot be started
try {
final DistributedFileSystem dfs = cluster.getFileSystem();
dfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
dfs.rollingUpgrade(RollingUpgradeAction.PREPARE);
fail();
} catch (RemoteException re) {
assertEquals(InconsistentFSStateException.class.getName(), re.getClassName());
LOG.info("The exception is expected.", re);
}
checkNameNode(nameNodeDirs, EXPECTED_TXID);
if (numDirs > 1)
TestParallelImageWrite.checkImages(cluster.getNamesystem(), numDirs);
cluster.shutdown();
UpgradeUtilities.createEmptyDirs(nameNodeDirs);
log("Normal DataNode upgrade", numDirs);
UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current");
cluster = createCluster();
UpgradeUtilities.createDataNodeStorageDirs(dataNodeDirs, "current");
cluster.startDataNodes(conf, 1, false, StartupOption.REGULAR, null);
checkDataNode(dataNodeDirs, UpgradeUtilities.getCurrentBlockPoolID(null));
cluster.shutdown();
UpgradeUtilities.createEmptyDirs(nameNodeDirs);
UpgradeUtilities.createEmptyDirs(dataNodeDirs);
log("NameNode upgrade with existing previous dir", numDirs);
UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current");
UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "previous");
startNameNodeShouldFail(StartupOption.UPGRADE);
UpgradeUtilities.createEmptyDirs(nameNodeDirs);
log("DataNode upgrade with existing previous dir", numDirs);
UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current");
cluster = createCluster();
UpgradeUtilities.createDataNodeStorageDirs(dataNodeDirs, "current");
UpgradeUtilities.createDataNodeStorageDirs(dataNodeDirs, "previous");
cluster.startDataNodes(conf, 1, false, StartupOption.REGULAR, null);
checkDataNode(dataNodeDirs, UpgradeUtilities.getCurrentBlockPoolID(null));
cluster.shutdown();
UpgradeUtilities.createEmptyDirs(nameNodeDirs);
UpgradeUtilities.createEmptyDirs(dataNodeDirs);
log("DataNode upgrade with future stored layout version in current", numDirs);
UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current");
cluster = createCluster();
baseDirs = UpgradeUtilities.createDataNodeStorageDirs(dataNodeDirs, "current");
storageInfo = new StorageInfo(Integer.MIN_VALUE, UpgradeUtilities.getCurrentNamespaceID(cluster), UpgradeUtilities.getCurrentClusterID(cluster), UpgradeUtilities.getCurrentFsscTime(cluster), NodeType.DATA_NODE);
UpgradeUtilities.createDataNodeVersionFile(baseDirs, storageInfo, UpgradeUtilities.getCurrentBlockPoolID(cluster));
startBlockPoolShouldFail(StartupOption.REGULAR, UpgradeUtilities.getCurrentBlockPoolID(null));
cluster.shutdown();
UpgradeUtilities.createEmptyDirs(nameNodeDirs);
UpgradeUtilities.createEmptyDirs(dataNodeDirs);
log("DataNode upgrade with newer fsscTime in current", numDirs);
UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current");
cluster = createCluster();
baseDirs = UpgradeUtilities.createDataNodeStorageDirs(dataNodeDirs, "current");
storageInfo = new StorageInfo(HdfsServerConstants.DATANODE_LAYOUT_VERSION, UpgradeUtilities.getCurrentNamespaceID(cluster), UpgradeUtilities.getCurrentClusterID(cluster), Long.MAX_VALUE, NodeType.DATA_NODE);
UpgradeUtilities.createDataNodeVersionFile(baseDirs, storageInfo, UpgradeUtilities.getCurrentBlockPoolID(cluster));
// Ensure corresponding block pool failed to initialized
startBlockPoolShouldFail(StartupOption.REGULAR, UpgradeUtilities.getCurrentBlockPoolID(null));
cluster.shutdown();
UpgradeUtilities.createEmptyDirs(nameNodeDirs);
UpgradeUtilities.createEmptyDirs(dataNodeDirs);
log("NameNode upgrade with no edits file", numDirs);
UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current");
deleteStorageFilesWithPrefix(nameNodeDirs, "edits_");
startNameNodeShouldFail(StartupOption.UPGRADE);
UpgradeUtilities.createEmptyDirs(nameNodeDirs);
log("NameNode upgrade with no image file", numDirs);
UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current");
deleteStorageFilesWithPrefix(nameNodeDirs, "fsimage_");
startNameNodeShouldFail(StartupOption.UPGRADE);
UpgradeUtilities.createEmptyDirs(nameNodeDirs);
log("NameNode upgrade with corrupt version file", numDirs);
baseDirs = UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current");
for (File f : baseDirs) {
UpgradeUtilities.corruptFile(new File(f, "VERSION"), "layoutVersion".getBytes(Charsets.UTF_8), "xxxxxxxxxxxxx".getBytes(Charsets.UTF_8));
}
startNameNodeShouldFail(StartupOption.UPGRADE);
UpgradeUtilities.createEmptyDirs(nameNodeDirs);
log("NameNode upgrade with old layout version in current", numDirs);
baseDirs = UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current");
storageInfo = new StorageInfo(Storage.LAST_UPGRADABLE_LAYOUT_VERSION + 1, UpgradeUtilities.getCurrentNamespaceID(null), UpgradeUtilities.getCurrentClusterID(null), UpgradeUtilities.getCurrentFsscTime(null), NodeType.NAME_NODE);
UpgradeUtilities.createNameNodeVersionFile(conf, baseDirs, storageInfo, UpgradeUtilities.getCurrentBlockPoolID(cluster));
startNameNodeShouldFail(StartupOption.UPGRADE);
UpgradeUtilities.createEmptyDirs(nameNodeDirs);
log("NameNode upgrade with future layout version in current", numDirs);
baseDirs = UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current");
storageInfo = new StorageInfo(Integer.MIN_VALUE, UpgradeUtilities.getCurrentNamespaceID(null), UpgradeUtilities.getCurrentClusterID(null), UpgradeUtilities.getCurrentFsscTime(null), NodeType.NAME_NODE);
UpgradeUtilities.createNameNodeVersionFile(conf, baseDirs, storageInfo, UpgradeUtilities.getCurrentBlockPoolID(cluster));
startNameNodeShouldFail(StartupOption.UPGRADE);
UpgradeUtilities.createEmptyDirs(nameNodeDirs);
}
// end numDir loop
// One more check: normal NN upgrade with 4 directories, concurrent write
int numDirs = 4;
{
conf = new HdfsConfiguration();
conf.setInt(DFSConfigKeys.DFS_DATANODE_SCAN_PERIOD_HOURS_KEY, -1);
conf.setBoolean(DFSConfigKeys.DFS_DATANODE_DUPLICATE_REPLICA_DELETION, false);
conf = UpgradeUtilities.initializeStorageStateConf(numDirs, conf);
String[] nameNodeDirs = conf.getStrings(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY);
log("Normal NameNode upgrade", numDirs);
UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current");
cluster = createCluster();
// make sure that rolling upgrade cannot be started
try {
final DistributedFileSystem dfs = cluster.getFileSystem();
dfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
dfs.rollingUpgrade(RollingUpgradeAction.PREPARE);
fail();
} catch (RemoteException re) {
assertEquals(InconsistentFSStateException.class.getName(), re.getClassName());
LOG.info("The exception is expected.", re);
}
checkNameNode(nameNodeDirs, EXPECTED_TXID);
TestParallelImageWrite.checkImages(cluster.getNamesystem(), numDirs);
cluster.shutdown();
UpgradeUtilities.createEmptyDirs(nameNodeDirs);
}
}
use of org.apache.hadoop.ipc.RemoteException in project hadoop by apache.
the class TestFileAppend method testAppend2Twice.
/** Test two consecutive appends on a file with a full block. */
@Test
public void testAppend2Twice() throws Exception {
Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
final DistributedFileSystem fs1 = cluster.getFileSystem();
final FileSystem fs2 = AppendTestUtil.createHdfsWithDifferentUsername(conf);
try {
final Path p = new Path("/testAppendTwice/foo");
final int len = 1 << 16;
final byte[] fileContents = AppendTestUtil.initBuffer(len);
{
// create a new file with a full block.
FSDataOutputStream out = fs2.create(p, true, 4096, (short) 1, len);
out.write(fileContents, 0, len);
out.close();
}
//1st append does not add any data so that the last block remains full
//and the last block in INodeFileUnderConstruction is a BlockInfo
//but does not have a BlockUnderConstructionFeature.
((DistributedFileSystem) fs2).append(p, EnumSet.of(CreateFlag.APPEND, CreateFlag.NEW_BLOCK), 4096, null);
// 2nd append should get AlreadyBeingCreatedException
fs1.append(p);
Assert.fail();
} catch (RemoteException re) {
AppendTestUtil.LOG.info("Got an exception:", re);
Assert.assertEquals(AlreadyBeingCreatedException.class.getName(), re.getClassName());
} finally {
fs2.close();
fs1.close();
cluster.shutdown();
}
}
Aggregations