use of org.apache.hadoop.ipc.RemoteException in project hadoop by apache.
the class DFSClient method getXAttr.
public byte[] getXAttr(String src, String name) throws IOException {
checkOpen();
try (TraceScope ignored = newPathTraceScope("getXAttr", src)) {
final List<XAttr> xAttrs = XAttrHelper.buildXAttrAsList(name);
final List<XAttr> result = namenode.getXAttrs(src, xAttrs);
return XAttrHelper.getFirstXAttrValue(result);
} catch (RemoteException re) {
throw re.unwrapRemoteException(AccessControlException.class, FileNotFoundException.class, UnresolvedPathException.class);
}
}
use of org.apache.hadoop.ipc.RemoteException in project hadoop by apache.
the class DFSInputStream method readBlockLength.
/** Read the block length from one of the datanodes. */
private long readBlockLength(LocatedBlock locatedblock) throws IOException {
assert locatedblock != null : "LocatedBlock cannot be null";
int replicaNotFoundCount = locatedblock.getLocations().length;
final DfsClientConf conf = dfsClient.getConf();
final int timeout = conf.getSocketTimeout();
LinkedList<DatanodeInfo> nodeList = new LinkedList<DatanodeInfo>(Arrays.asList(locatedblock.getLocations()));
LinkedList<DatanodeInfo> retryList = new LinkedList<DatanodeInfo>();
boolean isRetry = false;
StopWatch sw = new StopWatch();
while (nodeList.size() > 0) {
DatanodeInfo datanode = nodeList.pop();
ClientDatanodeProtocol cdp = null;
try {
cdp = DFSUtilClient.createClientDatanodeProtocolProxy(datanode, dfsClient.getConfiguration(), timeout, conf.isConnectToDnViaHostname(), locatedblock);
final long n = cdp.getReplicaVisibleLength(locatedblock.getBlock());
if (n >= 0) {
return n;
}
} catch (IOException ioe) {
checkInterrupted(ioe);
if (ioe instanceof RemoteException) {
if (((RemoteException) ioe).unwrapRemoteException() instanceof ReplicaNotFoundException) {
// replica is not on the DN. We will treat it as 0 length
// if no one actually has a replica.
replicaNotFoundCount--;
} else if (((RemoteException) ioe).unwrapRemoteException() instanceof RetriableException) {
// add to the list to be retried if necessary.
retryList.add(datanode);
}
}
DFSClient.LOG.debug("Failed to getReplicaVisibleLength from datanode {}" + " for block {}", datanode, locatedblock.getBlock(), ioe);
} finally {
if (cdp != null) {
RPC.stopProxy(cdp);
}
}
// Ran out of nodes, but there are retriable nodes.
if (nodeList.size() == 0 && retryList.size() > 0) {
nodeList.addAll(retryList);
retryList.clear();
isRetry = true;
}
if (isRetry) {
// start the stop watch if not already running.
if (!sw.isRunning()) {
sw.start();
}
try {
// delay between retries.
Thread.sleep(500);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new InterruptedIOException("Interrupted while getting the length.");
}
}
// see if we ran out of retry time
if (sw.isRunning() && sw.now(TimeUnit.MILLISECONDS) > timeout) {
break;
}
}
// on a DN that has it. we want to report that error
if (replicaNotFoundCount == 0) {
return 0;
}
throw new IOException("Cannot obtain block length for " + locatedblock);
}
use of org.apache.hadoop.ipc.RemoteException in project hadoop by apache.
the class NameNodeConnector method checkAndMarkRunning.
/**
* The idea for making sure that there is no more than one instance
* running in an HDFS is to create a file in the HDFS, writes the hostname
* of the machine on which the instance is running to the file, but did not
* close the file until it exits.
*
* This prevents the second instance from running because it can not
* creates the file while the first one is running.
*
* This method checks if there is any running instance. If no, mark yes.
* Note that this is an atomic operation.
*
* @return null if there is a running instance;
* otherwise, the output stream to the newly created file.
*/
private OutputStream checkAndMarkRunning() throws IOException {
try {
if (fs.exists(idPath)) {
// try appending to it so that it will fail fast if another balancer is
// running.
IOUtils.closeStream(fs.append(idPath));
fs.delete(idPath, true);
}
final FSDataOutputStream fsout = fs.create(idPath, false);
// mark balancer idPath to be deleted during filesystem closure
fs.deleteOnExit(idPath);
if (write2IdFile) {
fsout.writeBytes(InetAddress.getLocalHost().getHostName());
fsout.hflush();
}
return fsout;
} catch (RemoteException e) {
if (AlreadyBeingCreatedException.class.getName().equals(e.getClassName())) {
return null;
} else {
throw e;
}
}
}
use of org.apache.hadoop.ipc.RemoteException in project hadoop by apache.
the class DFSAdmin method allowSnapshot.
/**
* Allow snapshot on a directory.
* Usage: hdfs dfsadmin -allowSnapshot snapshotDir
* @param argv List of of command line parameters.
* @exception IOException
*/
public void allowSnapshot(String[] argv) throws IOException {
Path p = new Path(argv[1]);
final DistributedFileSystem dfs = AdminHelper.getDFS(p.toUri(), getConf());
try {
dfs.allowSnapshot(p);
} catch (SnapshotException e) {
throw new RemoteException(e.getClass().getName(), e.getMessage());
}
System.out.println("Allowing snaphot on " + argv[1] + " succeeded");
}
use of org.apache.hadoop.ipc.RemoteException in project hadoop by apache.
the class DFSAdmin method disallowSnapshot.
/**
* Disallow snapshot on a directory.
* Usage: hdfs dfsadmin -disallowSnapshot snapshotDir
* @param argv List of of command line parameters.
* @exception IOException
*/
public void disallowSnapshot(String[] argv) throws IOException {
Path p = new Path(argv[1]);
final DistributedFileSystem dfs = AdminHelper.getDFS(p.toUri(), getConf());
try {
dfs.disallowSnapshot(p);
} catch (SnapshotException e) {
throw new RemoteException(e.getClass().getName(), e.getMessage());
}
System.out.println("Disallowing snaphot on " + argv[1] + " succeeded");
}
Aggregations