use of org.apache.hadoop.ipc.RemoteException in project hadoop by apache.
the class TestApplicationPriorityACLs method submitAppToRMWithInValidAcl.
private void submitAppToRMWithInValidAcl(String submitter, ApplicationSubmissionContext appSubmissionContext) throws YarnException, IOException, InterruptedException {
ApplicationClientProtocol submitterClient = getRMClientForUser(submitter);
SubmitApplicationRequest submitRequest = SubmitApplicationRequest.newInstance(appSubmissionContext);
try {
submitterClient.submitApplication(submitRequest);
} catch (YarnException ex) {
Assert.assertTrue(ex.getCause() instanceof RemoteException);
}
}
use of org.apache.hadoop.ipc.RemoteException in project hbase by apache.
the class RpcRetryingCallerImpl method translateException.
/**
* Get the good or the remote exception if any, throws the DoNotRetryIOException.
* @param t the throwable to analyze
* @return the translated exception, if it's not a DoNotRetryIOException
* @throws DoNotRetryIOException - if we find it, we throw it instead of translating.
*/
static Throwable translateException(Throwable t) throws DoNotRetryIOException {
if (t instanceof UndeclaredThrowableException) {
if (t.getCause() != null) {
t = t.getCause();
}
}
if (t instanceof RemoteException) {
t = ((RemoteException) t).unwrapRemoteException();
}
if (t instanceof LinkageError) {
throw new DoNotRetryIOException(t);
}
if (t instanceof ServiceException) {
ServiceException se = (ServiceException) t;
Throwable cause = se.getCause();
if (cause != null && cause instanceof DoNotRetryIOException) {
throw (DoNotRetryIOException) cause;
}
// Don't let ServiceException out; its rpc specific.
t = cause;
// t could be a RemoteException so go around again.
translateException(t);
} else if (t instanceof DoNotRetryIOException) {
throw (DoNotRetryIOException) t;
}
return t;
}
use of org.apache.hadoop.ipc.RemoteException in project hbase by apache.
the class HRegionServer method createRegionServerStatusStub.
/**
* Get the current master from ZooKeeper and open the RPC connection to it. To get a fresh
* connection, the current rssStub must be null. Method will block until a master is available.
* You can break from this block by requesting the server stop.
* @param refresh If true then master address will be read from ZK, otherwise use cached data
* @return master + port, or null if server has been stopped
*/
@VisibleForTesting
protected synchronized ServerName createRegionServerStatusStub(boolean refresh) {
if (rssStub != null) {
return masterAddressTracker.getMasterAddress();
}
ServerName sn = null;
long previousLogTime = 0;
RegionServerStatusService.BlockingInterface intRssStub = null;
LockService.BlockingInterface intLockStub = null;
boolean interrupted = false;
try {
while (keepLooping()) {
sn = this.masterAddressTracker.getMasterAddress(refresh);
if (sn == null) {
if (!keepLooping()) {
// give up with no connection.
LOG.debug("No master found and cluster is stopped; bailing out");
return null;
}
if (System.currentTimeMillis() > (previousLogTime + 1000)) {
LOG.debug("No master found; retry");
previousLogTime = System.currentTimeMillis();
}
// let's try pull it from ZK directly
refresh = true;
if (sleep(200)) {
interrupted = true;
}
continue;
}
// If we are on the active master, use the shortcut
if (this instanceof HMaster && sn.equals(getServerName())) {
intRssStub = ((HMaster) this).getMasterRpcServices();
intLockStub = ((HMaster) this).getMasterRpcServices();
break;
}
try {
BlockingRpcChannel channel = this.rpcClient.createBlockingRpcChannel(sn, userProvider.getCurrent(), shortOperationTimeout);
intRssStub = RegionServerStatusService.newBlockingStub(channel);
intLockStub = LockService.newBlockingStub(channel);
break;
} catch (IOException e) {
if (System.currentTimeMillis() > (previousLogTime + 1000)) {
e = e instanceof RemoteException ? ((RemoteException) e).unwrapRemoteException() : e;
if (e instanceof ServerNotRunningYetException) {
LOG.info("Master isn't available yet, retrying");
} else {
LOG.warn("Unable to connect to master. Retrying. Error was:", e);
}
previousLogTime = System.currentTimeMillis();
}
if (sleep(200)) {
interrupted = true;
}
}
}
} finally {
if (interrupted) {
Thread.currentThread().interrupt();
}
}
this.rssStub = intRssStub;
this.lockStub = intLockStub;
return sn;
}
use of org.apache.hadoop.ipc.RemoteException in project hbase by apache.
the class MemStoreFlusher method flushRegion.
/**
* Flush a region.
* @param region Region to flush.
* @param emergencyFlush Set if we are being force flushed. If true the region
* needs to be removed from the flush queue. If false, when we were called
* from the main flusher run loop and we got the entry to flush by calling
* poll on the flush queue (which removed it).
* @param forceFlushAllStores whether we want to flush all store.
* @return true if the region was successfully flushed, false otherwise. If
* false, there will be accompanying log messages explaining why the region was
* not flushed.
*/
private boolean flushRegion(final Region region, final boolean emergencyFlush, boolean forceFlushAllStores) {
synchronized (this.regionsInQueue) {
FlushRegionEntry fqe = this.regionsInQueue.remove(region);
// Use the start time of the FlushRegionEntry if available
if (fqe != null && emergencyFlush) {
// Need to remove from region from delay queue. When NOT an
// emergencyFlush, then item was removed via a flushQueue.poll.
flushQueue.remove(fqe);
}
}
lock.readLock().lock();
try {
notifyFlushRequest(region, emergencyFlush);
FlushResult flushResult = region.flush(forceFlushAllStores);
boolean shouldCompact = flushResult.isCompactionNeeded();
// We just want to check the size
boolean shouldSplit = ((HRegion) region).checkSplit() != null;
if (shouldSplit) {
this.server.compactSplitThread.requestSplit(region);
} else if (shouldCompact) {
server.compactSplitThread.requestSystemCompaction(region, Thread.currentThread().getName());
}
} catch (DroppedSnapshotException ex) {
// Cache flush can fail in a few places. If it fails in a critical
// section, we get a DroppedSnapshotException and a replay of wal
// is required. Currently the only way to do this is a restart of
// the server. Abort because hdfs is probably bad (HBASE-644 is a case
// where hdfs was bad but passed the hdfs check).
server.abort("Replay of WAL required. Forcing server shutdown", ex);
return false;
} catch (IOException ex) {
ex = ex instanceof RemoteException ? ((RemoteException) ex).unwrapRemoteException() : ex;
LOG.error("Cache flush failed" + (region != null ? (" for region " + Bytes.toStringBinary(region.getRegionInfo().getRegionName())) : ""), ex);
if (!server.checkFileSystem()) {
return false;
}
} finally {
lock.readLock().unlock();
wakeUpIfBlocking();
}
return true;
}
use of org.apache.hadoop.ipc.RemoteException in project storm by apache.
the class TestHdfsSemantics method testAppendSemantics.
@Test
public void testAppendSemantics() throws Exception {
//1 try to append to an open file
Path file1 = new Path(dir.toString() + Path.SEPARATOR_CHAR + "file1");
FSDataOutputStream os1 = fs.create(file1, false);
try {
// should fail
fs.append(file1);
Assert.assertTrue("Append did not throw an exception", false);
} catch (RemoteException e) {
// expecting AlreadyBeingCreatedException inside RemoteException
Assert.assertEquals(AlreadyBeingCreatedException.class, e.unwrapRemoteException().getClass());
}
//2 try to append to a closed file
os1.close();
// should pass
FSDataOutputStream os2 = fs.append(file1);
os2.close();
}
Aggregations