Search in sources :

Example 41 with RemoteException

use of org.apache.hadoop.ipc.RemoteException in project hadoop by apache.

the class TestApplicationPriorityACLs method submitAppToRMWithInValidAcl.

private void submitAppToRMWithInValidAcl(String submitter, ApplicationSubmissionContext appSubmissionContext) throws YarnException, IOException, InterruptedException {
    ApplicationClientProtocol submitterClient = getRMClientForUser(submitter);
    SubmitApplicationRequest submitRequest = SubmitApplicationRequest.newInstance(appSubmissionContext);
    try {
        submitterClient.submitApplication(submitRequest);
    } catch (YarnException ex) {
        Assert.assertTrue(ex.getCause() instanceof RemoteException);
    }
}
Also used : RemoteException(org.apache.hadoop.ipc.RemoteException) ApplicationClientProtocol(org.apache.hadoop.yarn.api.ApplicationClientProtocol) SubmitApplicationRequest(org.apache.hadoop.yarn.api.protocolrecords.SubmitApplicationRequest) YarnException(org.apache.hadoop.yarn.exceptions.YarnException)

Example 42 with RemoteException

use of org.apache.hadoop.ipc.RemoteException in project hbase by apache.

the class RpcRetryingCallerImpl method translateException.

/**
   * Get the good or the remote exception if any, throws the DoNotRetryIOException.
   * @param t the throwable to analyze
   * @return the translated exception, if it's not a DoNotRetryIOException
   * @throws DoNotRetryIOException - if we find it, we throw it instead of translating.
   */
static Throwable translateException(Throwable t) throws DoNotRetryIOException {
    if (t instanceof UndeclaredThrowableException) {
        if (t.getCause() != null) {
            t = t.getCause();
        }
    }
    if (t instanceof RemoteException) {
        t = ((RemoteException) t).unwrapRemoteException();
    }
    if (t instanceof LinkageError) {
        throw new DoNotRetryIOException(t);
    }
    if (t instanceof ServiceException) {
        ServiceException se = (ServiceException) t;
        Throwable cause = se.getCause();
        if (cause != null && cause instanceof DoNotRetryIOException) {
            throw (DoNotRetryIOException) cause;
        }
        // Don't let ServiceException out; its rpc specific.
        t = cause;
        // t could be a RemoteException so go around again.
        translateException(t);
    } else if (t instanceof DoNotRetryIOException) {
        throw (DoNotRetryIOException) t;
    }
    return t;
}
Also used : ServiceException(org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) UndeclaredThrowableException(java.lang.reflect.UndeclaredThrowableException) RemoteException(org.apache.hadoop.ipc.RemoteException)

Example 43 with RemoteException

use of org.apache.hadoop.ipc.RemoteException in project hbase by apache.

the class HRegionServer method createRegionServerStatusStub.

/**
   * Get the current master from ZooKeeper and open the RPC connection to it. To get a fresh
   * connection, the current rssStub must be null. Method will block until a master is available.
   * You can break from this block by requesting the server stop.
   * @param refresh If true then master address will be read from ZK, otherwise use cached data
   * @return master + port, or null if server has been stopped
   */
@VisibleForTesting
protected synchronized ServerName createRegionServerStatusStub(boolean refresh) {
    if (rssStub != null) {
        return masterAddressTracker.getMasterAddress();
    }
    ServerName sn = null;
    long previousLogTime = 0;
    RegionServerStatusService.BlockingInterface intRssStub = null;
    LockService.BlockingInterface intLockStub = null;
    boolean interrupted = false;
    try {
        while (keepLooping()) {
            sn = this.masterAddressTracker.getMasterAddress(refresh);
            if (sn == null) {
                if (!keepLooping()) {
                    // give up with no connection.
                    LOG.debug("No master found and cluster is stopped; bailing out");
                    return null;
                }
                if (System.currentTimeMillis() > (previousLogTime + 1000)) {
                    LOG.debug("No master found; retry");
                    previousLogTime = System.currentTimeMillis();
                }
                // let's try pull it from ZK directly
                refresh = true;
                if (sleep(200)) {
                    interrupted = true;
                }
                continue;
            }
            // If we are on the active master, use the shortcut
            if (this instanceof HMaster && sn.equals(getServerName())) {
                intRssStub = ((HMaster) this).getMasterRpcServices();
                intLockStub = ((HMaster) this).getMasterRpcServices();
                break;
            }
            try {
                BlockingRpcChannel channel = this.rpcClient.createBlockingRpcChannel(sn, userProvider.getCurrent(), shortOperationTimeout);
                intRssStub = RegionServerStatusService.newBlockingStub(channel);
                intLockStub = LockService.newBlockingStub(channel);
                break;
            } catch (IOException e) {
                if (System.currentTimeMillis() > (previousLogTime + 1000)) {
                    e = e instanceof RemoteException ? ((RemoteException) e).unwrapRemoteException() : e;
                    if (e instanceof ServerNotRunningYetException) {
                        LOG.info("Master isn't available yet, retrying");
                    } else {
                        LOG.warn("Unable to connect to master. Retrying. Error was:", e);
                    }
                    previousLogTime = System.currentTimeMillis();
                }
                if (sleep(200)) {
                    interrupted = true;
                }
            }
        }
    } finally {
        if (interrupted) {
            Thread.currentThread().interrupt();
        }
    }
    this.rssStub = intRssStub;
    this.lockStub = intLockStub;
    return sn;
}
Also used : LockService(org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockService) ServerName(org.apache.hadoop.hbase.ServerName) HMaster(org.apache.hadoop.hbase.master.HMaster) RegionServerStatusService(org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionServerStatusService) BlockingRpcChannel(org.apache.hadoop.hbase.shaded.com.google.protobuf.BlockingRpcChannel) InterruptedIOException(java.io.InterruptedIOException) IOException(java.io.IOException) RemoteException(org.apache.hadoop.ipc.RemoteException) ServerNotRunningYetException(org.apache.hadoop.hbase.ipc.ServerNotRunningYetException) VisibleForTesting(com.google.common.annotations.VisibleForTesting)

Example 44 with RemoteException

use of org.apache.hadoop.ipc.RemoteException in project hbase by apache.

the class MemStoreFlusher method flushRegion.

/**
   * Flush a region.
   * @param region Region to flush.
   * @param emergencyFlush Set if we are being force flushed. If true the region
   * needs to be removed from the flush queue. If false, when we were called
   * from the main flusher run loop and we got the entry to flush by calling
   * poll on the flush queue (which removed it).
   * @param forceFlushAllStores whether we want to flush all store.
   * @return true if the region was successfully flushed, false otherwise. If
   * false, there will be accompanying log messages explaining why the region was
   * not flushed.
   */
private boolean flushRegion(final Region region, final boolean emergencyFlush, boolean forceFlushAllStores) {
    synchronized (this.regionsInQueue) {
        FlushRegionEntry fqe = this.regionsInQueue.remove(region);
        // Use the start time of the FlushRegionEntry if available
        if (fqe != null && emergencyFlush) {
            // Need to remove from region from delay queue.  When NOT an
            // emergencyFlush, then item was removed via a flushQueue.poll.
            flushQueue.remove(fqe);
        }
    }
    lock.readLock().lock();
    try {
        notifyFlushRequest(region, emergencyFlush);
        FlushResult flushResult = region.flush(forceFlushAllStores);
        boolean shouldCompact = flushResult.isCompactionNeeded();
        // We just want to check the size
        boolean shouldSplit = ((HRegion) region).checkSplit() != null;
        if (shouldSplit) {
            this.server.compactSplitThread.requestSplit(region);
        } else if (shouldCompact) {
            server.compactSplitThread.requestSystemCompaction(region, Thread.currentThread().getName());
        }
    } catch (DroppedSnapshotException ex) {
        // Cache flush can fail in a few places. If it fails in a critical
        // section, we get a DroppedSnapshotException and a replay of wal
        // is required. Currently the only way to do this is a restart of
        // the server. Abort because hdfs is probably bad (HBASE-644 is a case
        // where hdfs was bad but passed the hdfs check).
        server.abort("Replay of WAL required. Forcing server shutdown", ex);
        return false;
    } catch (IOException ex) {
        ex = ex instanceof RemoteException ? ((RemoteException) ex).unwrapRemoteException() : ex;
        LOG.error("Cache flush failed" + (region != null ? (" for region " + Bytes.toStringBinary(region.getRegionInfo().getRegionName())) : ""), ex);
        if (!server.checkFileSystem()) {
            return false;
        }
    } finally {
        lock.readLock().unlock();
        wakeUpIfBlocking();
    }
    return true;
}
Also used : DroppedSnapshotException(org.apache.hadoop.hbase.DroppedSnapshotException) IOException(java.io.IOException) FlushResult(org.apache.hadoop.hbase.regionserver.Region.FlushResult) RemoteException(org.apache.hadoop.ipc.RemoteException)

Example 45 with RemoteException

use of org.apache.hadoop.ipc.RemoteException in project storm by apache.

the class TestHdfsSemantics method testAppendSemantics.

@Test
public void testAppendSemantics() throws Exception {
    //1 try to append to an open file
    Path file1 = new Path(dir.toString() + Path.SEPARATOR_CHAR + "file1");
    FSDataOutputStream os1 = fs.create(file1, false);
    try {
        // should fail
        fs.append(file1);
        Assert.assertTrue("Append did not throw an exception", false);
    } catch (RemoteException e) {
        // expecting AlreadyBeingCreatedException inside RemoteException
        Assert.assertEquals(AlreadyBeingCreatedException.class, e.unwrapRemoteException().getClass());
    }
    //2 try to append to a closed file
    os1.close();
    // should pass
    FSDataOutputStream os2 = fs.append(file1);
    os2.close();
}
Also used : Path(org.apache.hadoop.fs.Path) AlreadyBeingCreatedException(org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) RemoteException(org.apache.hadoop.ipc.RemoteException) Test(org.junit.Test)

Aggregations

RemoteException (org.apache.hadoop.ipc.RemoteException)99 IOException (java.io.IOException)53 Test (org.junit.Test)39 Path (org.apache.hadoop.fs.Path)36 Configuration (org.apache.hadoop.conf.Configuration)20 FileNotFoundException (java.io.FileNotFoundException)19 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)13 FileSystem (org.apache.hadoop.fs.FileSystem)12 InterruptedIOException (java.io.InterruptedIOException)10 AccessControlException (org.apache.hadoop.security.AccessControlException)10 ServerName (org.apache.hadoop.hbase.ServerName)9 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)8 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)8 FileAlreadyExistsException (org.apache.hadoop.fs.FileAlreadyExistsException)7 HRegionInfo (org.apache.hadoop.hbase.HRegionInfo)7 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)7 EOFException (java.io.EOFException)6 ArrayList (java.util.ArrayList)6 DoNotRetryIOException (org.apache.hadoop.hbase.DoNotRetryIOException)6 HBaseIOException (org.apache.hadoop.hbase.HBaseIOException)6