use of org.apache.hadoop.ipc.RemoteException in project hive by apache.
the class ClearDanglingScratchDir method run.
@Override
public void run() {
try {
Path rootHDFSDirPath = new Path(rootHDFSDir);
FileSystem fs = FileSystem.get(rootHDFSDirPath.toUri(), conf);
FileStatus[] userHDFSDirList = fs.listStatus(rootHDFSDirPath);
List<Path> scratchDirToRemove = new ArrayList<Path>();
for (FileStatus userHDFSDir : userHDFSDirList) {
FileStatus[] scratchDirList = fs.listStatus(userHDFSDir.getPath());
for (FileStatus scratchDir : scratchDirList) {
Path lockFilePath = new Path(scratchDir.getPath(), SessionState.LOCK_FILE_NAME);
if (!fs.exists(lockFilePath)) {
String message = "Skipping " + scratchDir.getPath() + " since it does not contain " + SessionState.LOCK_FILE_NAME;
if (verbose) {
consoleMessage(message);
}
continue;
}
boolean removable = false;
boolean inuse = false;
try {
IOUtils.closeStream(fs.append(lockFilePath));
removable = true;
} catch (RemoteException eAppend) {
// if the file is currently held by a writer
if (AlreadyBeingCreatedException.class.getName().equals(eAppend.getClassName())) {
inuse = true;
} else if (UnsupportedOperationException.class.getName().equals(eAppend.getClassName())) {
// Append is not supported in the cluster, try to use create
try {
IOUtils.closeStream(fs.create(lockFilePath, false));
} catch (RemoteException eCreate) {
if (AlreadyBeingCreatedException.class.getName().equals(eCreate.getClassName())) {
// If the file is held by a writer, will throw AlreadyBeingCreatedException
inuse = true;
} else {
consoleMessage("Unexpected error:" + eCreate.getMessage());
}
} catch (FileAlreadyExistsException eCreateNormal) {
// Otherwise, throw FileAlreadyExistsException, which means the file owner is
// dead
removable = true;
}
} else {
consoleMessage("Unexpected error:" + eAppend.getMessage());
}
}
if (inuse) {
// Cannot open the lock file for writing, must be held by a live process
String message = scratchDir.getPath() + " is being used by live process";
if (verbose) {
consoleMessage(message);
}
}
if (removable) {
scratchDirToRemove.add(scratchDir.getPath());
}
}
}
if (scratchDirToRemove.size() == 0) {
consoleMessage("Cannot find any scratch directory to clear");
return;
}
consoleMessage("Removing " + scratchDirToRemove.size() + " scratch directories");
for (Path scratchDir : scratchDirToRemove) {
if (dryRun) {
System.out.println(scratchDir);
} else {
boolean succ = fs.delete(scratchDir, true);
if (!succ) {
consoleMessage("Cannot remove " + scratchDir);
} else {
String message = scratchDir + " removed";
if (verbose) {
consoleMessage(message);
}
}
}
}
} catch (IOException e) {
consoleMessage("Unexpected exception " + e.getMessage());
}
}
use of org.apache.hadoop.ipc.RemoteException in project hadoop by apache.
the class TestDefaultRetryPolicy method testWithWrappedRetriable.
/**
* Verify that the default retry policy correctly retries
* a RetriableException wrapped in a RemoteException when
* defaultRetryPolicyEnabled is enabled.
*
* @throws IOException
*/
@Test
public void testWithWrappedRetriable() throws Exception {
Configuration conf = new Configuration();
RetryPolicy policy = RetryUtils.getDefaultRetryPolicy(conf, "Test.No.Such.Key", // defaultRetryPolicyEnabled = true
true, "Test.No.Such.Key", "10000,6", null);
RetryPolicy.RetryAction action = policy.shouldRetry(new RemoteException(RetriableException.class.getName(), "Dummy exception"), 0, 0, true);
assertThat(action.action, is(RetryPolicy.RetryAction.RetryDecision.RETRY));
}
use of org.apache.hadoop.ipc.RemoteException in project hadoop by apache.
the class BlockReaderFactory method getLegacyBlockReaderLocal.
/**
* Get {@link BlockReaderLocalLegacy} for short circuited local reads.
* This block reader implements the path-based style of local reads
* first introduced in HDFS-2246.
*/
private BlockReader getLegacyBlockReaderLocal() throws IOException {
LOG.trace("{}: trying to construct BlockReaderLocalLegacy", this);
if (!DFSUtilClient.isLocalAddress(inetSocketAddress)) {
LOG.trace("{}: can't construct BlockReaderLocalLegacy because the address" + "{} is not local", this, inetSocketAddress);
return null;
}
if (clientContext.getDisableLegacyBlockReaderLocal()) {
PerformanceAdvisory.LOG.debug("{}: can't construct " + "BlockReaderLocalLegacy because " + "disableLegacyBlockReaderLocal is set.", this);
return null;
}
IOException ioe;
try {
return BlockReaderLocalLegacy.newBlockReader(conf, userGroupInformation, configuration, fileName, block, token, datanode, startOffset, length, storageType, tracer);
} catch (RemoteException remoteException) {
ioe = remoteException.unwrapRemoteException(InvalidToken.class, AccessControlException.class);
} catch (IOException e) {
ioe = e;
}
if ((!(ioe instanceof AccessControlException)) && isSecurityException(ioe)) {
// which requires us to disable legacy SCR.
throw ioe;
}
LOG.warn(this + ": error creating legacy BlockReaderLocal. " + "Disabling legacy local reads.", ioe);
clientContext.setDisableLegacyBlockReaderLocal();
return null;
}
use of org.apache.hadoop.ipc.RemoteException in project hadoop by apache.
the class WebHdfsFileSystem method validateResponse.
private static Map<?, ?> validateResponse(final HttpOpParam.Op op, final HttpURLConnection conn, boolean unwrapException) throws IOException {
final int code = conn.getResponseCode();
// server is demanding an authentication we don't support
if (code == HttpURLConnection.HTTP_UNAUTHORIZED) {
// match hdfs/rpc exception
throw new AccessControlException(conn.getResponseMessage());
}
if (code != op.getExpectedHttpResponseCode()) {
final Map<?, ?> m;
try {
m = jsonParse(conn, true);
} catch (Exception e) {
throw new IOException("Unexpected HTTP response: code=" + code + " != " + op.getExpectedHttpResponseCode() + ", " + op.toQueryString() + ", message=" + conn.getResponseMessage(), e);
}
if (m == null) {
throw new IOException("Unexpected HTTP response: code=" + code + " != " + op.getExpectedHttpResponseCode() + ", " + op.toQueryString() + ", message=" + conn.getResponseMessage());
} else if (m.get(RemoteException.class.getSimpleName()) == null) {
return m;
}
IOException re = JsonUtilClient.toRemoteException(m);
//check if exception is due to communication with a Standby name node
if (re.getMessage() != null && re.getMessage().endsWith(StandbyException.class.getSimpleName())) {
LOG.trace("Detected StandbyException", re);
throw new IOException(re);
}
// to re-fetch a token if either report the token is expired
if (re.getMessage() != null && re.getMessage().startsWith(SecurityUtil.FAILED_TO_GET_UGI_MSG_HEADER)) {
String[] parts = re.getMessage().split(":\\s+", 3);
re = new RemoteException(parts[1], parts[2]);
re = ((RemoteException) re).unwrapRemoteException(InvalidToken.class);
}
throw unwrapException ? toIOException(re) : re;
}
return null;
}
use of org.apache.hadoop.ipc.RemoteException in project hadoop by apache.
the class DFSOutputStream method addBlock.
static LocatedBlock addBlock(DatanodeInfo[] excludedNodes, DFSClient dfsClient, String src, ExtendedBlock prevBlock, long fileId, String[] favoredNodes, EnumSet<AddBlockFlag> allocFlags) throws IOException {
final DfsClientConf conf = dfsClient.getConf();
int retries = conf.getNumBlockWriteLocateFollowingRetry();
long sleeptime = conf.getBlockWriteLocateFollowingInitialDelayMs();
long localstart = Time.monotonicNow();
while (true) {
try {
return dfsClient.namenode.addBlock(src, dfsClient.clientName, prevBlock, excludedNodes, fileId, favoredNodes, allocFlags);
} catch (RemoteException e) {
IOException ue = e.unwrapRemoteException(FileNotFoundException.class, AccessControlException.class, NSQuotaExceededException.class, DSQuotaExceededException.class, QuotaByStorageTypeExceededException.class, UnresolvedPathException.class);
if (ue != e) {
// no need to retry these exceptions
throw ue;
}
if (NotReplicatedYetException.class.getName().equals(e.getClassName())) {
if (retries == 0) {
throw e;
} else {
--retries;
LOG.info("Exception while adding a block", e);
long elapsed = Time.monotonicNow() - localstart;
if (elapsed > 5000) {
LOG.info("Waiting for replication for " + (elapsed / 1000) + " seconds");
}
try {
LOG.warn("NotReplicatedYetException sleeping " + src + " retries left " + retries);
Thread.sleep(sleeptime);
sleeptime *= 2;
} catch (InterruptedException ie) {
LOG.warn("Caught exception", ie);
}
}
} else {
throw e;
}
}
}
}
Aggregations