Search in sources :

Example 71 with RemoteException

use of org.apache.hadoop.ipc.RemoteException in project hadoop by apache.

the class DFSOutputStream method newStreamForCreate.

static DFSOutputStream newStreamForCreate(DFSClient dfsClient, String src, FsPermission masked, EnumSet<CreateFlag> flag, boolean createParent, short replication, long blockSize, Progressable progress, DataChecksum checksum, String[] favoredNodes) throws IOException {
    try (TraceScope ignored = dfsClient.newPathTraceScope("newStreamForCreate", src)) {
        HdfsFileStatus stat = null;
        // Retry the create if we get a RetryStartFileException up to a maximum
        // number of times
        boolean shouldRetry = true;
        int retryCount = CREATE_RETRY_COUNT;
        while (shouldRetry) {
            shouldRetry = false;
            try {
                stat = dfsClient.namenode.create(src, masked, dfsClient.clientName, new EnumSetWritable<>(flag), createParent, replication, blockSize, SUPPORTED_CRYPTO_VERSIONS);
                break;
            } catch (RemoteException re) {
                IOException e = re.unwrapRemoteException(AccessControlException.class, DSQuotaExceededException.class, QuotaByStorageTypeExceededException.class, FileAlreadyExistsException.class, FileNotFoundException.class, ParentNotDirectoryException.class, NSQuotaExceededException.class, RetryStartFileException.class, SafeModeException.class, UnresolvedPathException.class, SnapshotAccessControlException.class, UnknownCryptoProtocolVersionException.class);
                if (e instanceof RetryStartFileException) {
                    if (retryCount > 0) {
                        shouldRetry = true;
                        retryCount--;
                    } else {
                        throw new IOException("Too many retries because of encryption" + " zone operations", e);
                    }
                } else {
                    throw e;
                }
            }
        }
        Preconditions.checkNotNull(stat, "HdfsFileStatus should not be null!");
        final DFSOutputStream out;
        if (stat.getErasureCodingPolicy() != null) {
            out = new DFSStripedOutputStream(dfsClient, src, stat, flag, progress, checksum, favoredNodes);
        } else {
            out = new DFSOutputStream(dfsClient, src, stat, flag, progress, checksum, favoredNodes, true);
        }
        out.start();
        return out;
    }
}
Also used : EnumSetWritable(org.apache.hadoop.io.EnumSetWritable) QuotaByStorageTypeExceededException(org.apache.hadoop.hdfs.protocol.QuotaByStorageTypeExceededException) FileAlreadyExistsException(org.apache.hadoop.fs.FileAlreadyExistsException) TraceScope(org.apache.htrace.core.TraceScope) FileNotFoundException(java.io.FileNotFoundException) AccessControlException(org.apache.hadoop.security.AccessControlException) SnapshotAccessControlException(org.apache.hadoop.hdfs.protocol.SnapshotAccessControlException) InterruptedIOException(java.io.InterruptedIOException) IOException(java.io.IOException) MultipleIOException(org.apache.hadoop.io.MultipleIOException) RetryStartFileException(org.apache.hadoop.hdfs.server.namenode.RetryStartFileException) ParentNotDirectoryException(org.apache.hadoop.fs.ParentNotDirectoryException) HdfsFileStatus(org.apache.hadoop.hdfs.protocol.HdfsFileStatus) DSQuotaExceededException(org.apache.hadoop.hdfs.protocol.DSQuotaExceededException) NSQuotaExceededException(org.apache.hadoop.hdfs.protocol.NSQuotaExceededException) SafeModeException(org.apache.hadoop.hdfs.server.namenode.SafeModeException) SnapshotAccessControlException(org.apache.hadoop.hdfs.protocol.SnapshotAccessControlException) RemoteException(org.apache.hadoop.ipc.RemoteException) UnresolvedPathException(org.apache.hadoop.hdfs.protocol.UnresolvedPathException)

Example 72 with RemoteException

use of org.apache.hadoop.ipc.RemoteException in project hadoop by apache.

the class RpcProgramNfs3 method access.

@VisibleForTesting
ACCESS3Response access(XDR xdr, SecurityHandler securityHandler, SocketAddress remoteAddress) {
    ACCESS3Response response = new ACCESS3Response(Nfs3Status.NFS3_OK);
    if (!checkAccessPrivilege(remoteAddress, AccessPrivilege.READ_ONLY)) {
        response.setStatus(Nfs3Status.NFS3ERR_ACCES);
        return response;
    }
    DFSClient dfsClient = clientCache.getDfsClient(securityHandler.getUser());
    if (dfsClient == null) {
        response.setStatus(Nfs3Status.NFS3ERR_SERVERFAULT);
        return response;
    }
    ACCESS3Request request;
    try {
        request = ACCESS3Request.deserialize(xdr);
    } catch (IOException e) {
        LOG.error("Invalid ACCESS request");
        return new ACCESS3Response(Nfs3Status.NFS3ERR_INVAL);
    }
    FileHandle handle = request.getHandle();
    Nfs3FileAttributes attrs;
    if (LOG.isDebugEnabled()) {
        LOG.debug("NFS ACCESS fileId: " + handle.getFileId() + " client: " + remoteAddress);
    }
    try {
        attrs = writeManager.getFileAttr(dfsClient, handle, iug);
        if (attrs == null) {
            LOG.error("Can't get path for fileId: " + handle.getFileId());
            return new ACCESS3Response(Nfs3Status.NFS3ERR_STALE);
        }
        if (iug.getUserName(securityHandler.getUid(), "unknown").equals(superuser)) {
            int access = Nfs3Constant.ACCESS3_LOOKUP | Nfs3Constant.ACCESS3_DELETE | Nfs3Constant.ACCESS3_EXECUTE | Nfs3Constant.ACCESS3_EXTEND | Nfs3Constant.ACCESS3_MODIFY | Nfs3Constant.ACCESS3_READ;
            return new ACCESS3Response(Nfs3Status.NFS3_OK, attrs, access);
        }
        int access = Nfs3Utils.getAccessRightsForUserGroup(securityHandler.getUid(), securityHandler.getGid(), securityHandler.getAuxGids(), attrs);
        return new ACCESS3Response(Nfs3Status.NFS3_OK, attrs, access);
    } catch (RemoteException r) {
        LOG.warn("Exception ", r);
        IOException io = r.unwrapRemoteException();
        /**
       * AuthorizationException can be thrown if the user can't be proxy'ed.
       */
        if (io instanceof AuthorizationException) {
            return new ACCESS3Response(Nfs3Status.NFS3ERR_ACCES);
        } else {
            return new ACCESS3Response(Nfs3Status.NFS3ERR_IO);
        }
    } catch (IOException e) {
        LOG.warn("Exception ", e);
        int status = mapErrorStatus(e);
        return new ACCESS3Response(status);
    }
}
Also used : DFSClient(org.apache.hadoop.hdfs.DFSClient) ACCESS3Request(org.apache.hadoop.nfs.nfs3.request.ACCESS3Request) AuthorizationException(org.apache.hadoop.security.authorize.AuthorizationException) FileHandle(org.apache.hadoop.nfs.nfs3.FileHandle) ACCESS3Response(org.apache.hadoop.nfs.nfs3.response.ACCESS3Response) Nfs3FileAttributes(org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes) IOException(java.io.IOException) RemoteException(org.apache.hadoop.ipc.RemoteException) VisibleForTesting(com.google.common.annotations.VisibleForTesting)

Example 73 with RemoteException

use of org.apache.hadoop.ipc.RemoteException in project hadoop by apache.

the class RpcProgramNfs3 method fsstat.

@VisibleForTesting
FSSTAT3Response fsstat(XDR xdr, SecurityHandler securityHandler, SocketAddress remoteAddress) {
    FSSTAT3Response response = new FSSTAT3Response(Nfs3Status.NFS3_OK);
    if (!checkAccessPrivilege(remoteAddress, AccessPrivilege.READ_ONLY)) {
        response.setStatus(Nfs3Status.NFS3ERR_ACCES);
        return response;
    }
    DFSClient dfsClient = clientCache.getDfsClient(securityHandler.getUser());
    if (dfsClient == null) {
        response.setStatus(Nfs3Status.NFS3ERR_SERVERFAULT);
        return response;
    }
    FSSTAT3Request request;
    try {
        request = FSSTAT3Request.deserialize(xdr);
    } catch (IOException e) {
        LOG.error("Invalid FSSTAT request");
        return new FSSTAT3Response(Nfs3Status.NFS3ERR_INVAL);
    }
    FileHandle handle = request.getHandle();
    if (LOG.isDebugEnabled()) {
        LOG.debug("NFS FSSTAT fileId: " + handle.getFileId() + " client: " + remoteAddress);
    }
    try {
        FsStatus fsStatus = dfsClient.getDiskStatus();
        long totalBytes = fsStatus.getCapacity();
        long freeBytes = fsStatus.getRemaining();
        Nfs3FileAttributes attrs = writeManager.getFileAttr(dfsClient, handle, iug);
        if (attrs == null) {
            LOG.info("Can't get path for fileId: " + handle.getFileId());
            return new FSSTAT3Response(Nfs3Status.NFS3ERR_STALE);
        }
        long maxFsObjects = config.getLong("dfs.max.objects", 0);
        if (maxFsObjects == 0) {
            // A value of zero in HDFS indicates no limit to the number
            // of objects that dfs supports. Using Integer.MAX_VALUE instead of
            // Long.MAX_VALUE so 32bit client won't complain.
            maxFsObjects = Integer.MAX_VALUE;
        }
        return new FSSTAT3Response(Nfs3Status.NFS3_OK, attrs, totalBytes, freeBytes, freeBytes, maxFsObjects, maxFsObjects, maxFsObjects, 0);
    } catch (RemoteException r) {
        LOG.warn("Exception ", r);
        IOException io = r.unwrapRemoteException();
        /**
       * AuthorizationException can be thrown if the user can't be proxy'ed.
       */
        if (io instanceof AuthorizationException) {
            return new FSSTAT3Response(Nfs3Status.NFS3ERR_ACCES);
        } else {
            return new FSSTAT3Response(Nfs3Status.NFS3ERR_IO);
        }
    } catch (IOException e) {
        LOG.warn("Exception ", e);
        int status = mapErrorStatus(e);
        return new FSSTAT3Response(status);
    }
}
Also used : DFSClient(org.apache.hadoop.hdfs.DFSClient) AuthorizationException(org.apache.hadoop.security.authorize.AuthorizationException) FileHandle(org.apache.hadoop.nfs.nfs3.FileHandle) Nfs3FileAttributes(org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes) FSSTAT3Response(org.apache.hadoop.nfs.nfs3.response.FSSTAT3Response) IOException(java.io.IOException) FSSTAT3Request(org.apache.hadoop.nfs.nfs3.request.FSSTAT3Request) RemoteException(org.apache.hadoop.ipc.RemoteException) FsStatus(org.apache.hadoop.fs.FsStatus) VisibleForTesting(com.google.common.annotations.VisibleForTesting)

Example 74 with RemoteException

use of org.apache.hadoop.ipc.RemoteException in project hadoop by apache.

the class BPServiceActor method offerService.

/**
   * Main loop for each BP thread. Run until shutdown,
   * forever calling remote NameNode functions.
   */
private void offerService() throws Exception {
    LOG.info("For namenode " + nnAddr + " using" + " BLOCKREPORT_INTERVAL of " + dnConf.blockReportInterval + "msec" + " CACHEREPORT_INTERVAL of " + dnConf.cacheReportInterval + "msec" + " Initial delay: " + dnConf.initialBlockReportDelayMs + "msec" + "; heartBeatInterval=" + dnConf.heartBeatInterval + (lifelineSender != null ? "; lifelineIntervalMs=" + dnConf.getLifelineIntervalMs() : ""));
    long fullBlockReportLeaseId = 0;
    //
    while (shouldRun()) {
        try {
            final long startTime = scheduler.monotonicNow();
            //
            // Every so often, send heartbeat or block-report
            //
            final boolean sendHeartbeat = scheduler.isHeartbeatDue(startTime);
            HeartbeatResponse resp = null;
            if (sendHeartbeat) {
                //
                // All heartbeat messages include following info:
                // -- Datanode name
                // -- data transfer port
                // -- Total capacity
                // -- Bytes remaining
                //
                boolean requestBlockReportLease = (fullBlockReportLeaseId == 0) && scheduler.isBlockReportDue(startTime);
                if (!dn.areHeartbeatsDisabledForTests()) {
                    resp = sendHeartBeat(requestBlockReportLease);
                    assert resp != null;
                    if (resp.getFullBlockReportLeaseId() != 0) {
                        if (fullBlockReportLeaseId != 0) {
                            LOG.warn(nnAddr + " sent back a full block report lease " + "ID of 0x" + Long.toHexString(resp.getFullBlockReportLeaseId()) + ", but we already have a lease ID of 0x" + Long.toHexString(fullBlockReportLeaseId) + ". " + "Overwriting old lease ID.");
                        }
                        fullBlockReportLeaseId = resp.getFullBlockReportLeaseId();
                    }
                    dn.getMetrics().addHeartbeat(scheduler.monotonicNow() - startTime);
                    // If the state of this NN has changed (eg STANDBY->ACTIVE)
                    // then let the BPOfferService update itself.
                    //
                    // Important that this happens before processCommand below,
                    // since the first heartbeat to a new active might have commands
                    // that we should actually process.
                    bpos.updateActorStatesFromHeartbeat(this, resp.getNameNodeHaState());
                    state = resp.getNameNodeHaState().getState();
                    if (state == HAServiceState.ACTIVE) {
                        handleRollingUpgradeStatus(resp);
                    }
                    long startProcessCommands = monotonicNow();
                    if (!processCommand(resp.getCommands()))
                        continue;
                    long endProcessCommands = monotonicNow();
                    if (endProcessCommands - startProcessCommands > 2000) {
                        LOG.info("Took " + (endProcessCommands - startProcessCommands) + "ms to process " + resp.getCommands().length + " commands from NN");
                    }
                }
            }
            if (ibrManager.sendImmediately() || sendHeartbeat) {
                ibrManager.sendIBRs(bpNamenode, bpRegistration, bpos.getBlockPoolId(), dn.getMetrics());
            }
            List<DatanodeCommand> cmds = null;
            boolean forceFullBr = scheduler.forceFullBlockReport.getAndSet(false);
            if (forceFullBr) {
                LOG.info("Forcing a full block report to " + nnAddr);
            }
            if ((fullBlockReportLeaseId != 0) || forceFullBr) {
                cmds = blockReport(fullBlockReportLeaseId);
                fullBlockReportLeaseId = 0;
            }
            processCommand(cmds == null ? null : cmds.toArray(new DatanodeCommand[cmds.size()]));
            if (!dn.areCacheReportsDisabledForTests()) {
                DatanodeCommand cmd = cacheReport();
                processCommand(new DatanodeCommand[] { cmd });
            }
            if (sendHeartbeat) {
                dn.getMetrics().addHeartbeatTotal(scheduler.monotonicNow() - startTime);
            }
            // There is no work to do;  sleep until hearbeat timer elapses, 
            // or work arrives, and then iterate again.
            ibrManager.waitTillNextIBR(scheduler.getHeartbeatWaitTime());
        } catch (RemoteException re) {
            String reClass = re.getClassName();
            if (UnregisteredNodeException.class.getName().equals(reClass) || DisallowedDatanodeException.class.getName().equals(reClass) || IncorrectVersionException.class.getName().equals(reClass)) {
                LOG.warn(this + " is shutting down", re);
                shouldServiceRun = false;
                return;
            }
            LOG.warn("RemoteException in offerService", re);
            sleepAfterException();
        } catch (IOException e) {
            LOG.warn("IOException in offerService", e);
            sleepAfterException();
        }
        processQueueMessages();
    }
// while (shouldRun())
}
Also used : HeartbeatResponse(org.apache.hadoop.hdfs.server.protocol.HeartbeatResponse) IncorrectVersionException(org.apache.hadoop.hdfs.server.common.IncorrectVersionException) DatanodeCommand(org.apache.hadoop.hdfs.server.protocol.DatanodeCommand) IOException(java.io.IOException) RemoteException(org.apache.hadoop.ipc.RemoteException)

Example 75 with RemoteException

use of org.apache.hadoop.ipc.RemoteException in project hadoop by apache.

the class ExceptionHandler method exceptionCaught.

static DefaultFullHttpResponse exceptionCaught(Throwable cause) {
    Exception e = cause instanceof Exception ? (Exception) cause : new Exception(cause);
    if (LOG.isTraceEnabled()) {
        LOG.trace("GOT EXCEPTION", e);
    }
    //Convert exception
    if (e instanceof ParamException) {
        final ParamException paramexception = (ParamException) e;
        e = new IllegalArgumentException("Invalid value for webhdfs parameter \"" + paramexception.getParameterName() + "\": " + e.getCause().getMessage(), e);
    } else if (e instanceof ContainerException || e instanceof SecurityException) {
        e = toCause(e);
    } else if (e instanceof RemoteException) {
        e = ((RemoteException) e).unwrapRemoteException();
    }
    //Map response status
    final HttpResponseStatus s;
    if (e instanceof SecurityException) {
        s = FORBIDDEN;
    } else if (e instanceof AuthorizationException) {
        s = FORBIDDEN;
    } else if (e instanceof FileNotFoundException) {
        s = NOT_FOUND;
    } else if (e instanceof IOException) {
        s = FORBIDDEN;
    } else if (e instanceof UnsupportedOperationException) {
        s = BAD_REQUEST;
    } else if (e instanceof IllegalArgumentException) {
        s = BAD_REQUEST;
    } else {
        LOG.warn("INTERNAL_SERVER_ERROR", e);
        s = INTERNAL_SERVER_ERROR;
    }
    final byte[] js = JsonUtil.toJsonString(e).getBytes(Charsets.UTF_8);
    DefaultFullHttpResponse resp = new DefaultFullHttpResponse(HTTP_1_1, s, Unpooled.wrappedBuffer(js));
    resp.headers().set(CONTENT_TYPE, APPLICATION_JSON_UTF8);
    resp.headers().set(CONTENT_LENGTH, js.length);
    return resp;
}
Also used : DefaultFullHttpResponse(io.netty.handler.codec.http.DefaultFullHttpResponse) AuthorizationException(org.apache.hadoop.security.authorize.AuthorizationException) HttpResponseStatus(io.netty.handler.codec.http.HttpResponseStatus) FileNotFoundException(java.io.FileNotFoundException) IOException(java.io.IOException) StandbyException(org.apache.hadoop.ipc.StandbyException) ContainerException(com.sun.jersey.api.container.ContainerException) IOException(java.io.IOException) RemoteException(org.apache.hadoop.ipc.RemoteException) FileNotFoundException(java.io.FileNotFoundException) AuthorizationException(org.apache.hadoop.security.authorize.AuthorizationException) ParamException(com.sun.jersey.api.ParamException) ContainerException(com.sun.jersey.api.container.ContainerException) ParamException(com.sun.jersey.api.ParamException) RemoteException(org.apache.hadoop.ipc.RemoteException)

Aggregations

RemoteException (org.apache.hadoop.ipc.RemoteException)99 IOException (java.io.IOException)53 Test (org.junit.Test)39 Path (org.apache.hadoop.fs.Path)36 Configuration (org.apache.hadoop.conf.Configuration)20 FileNotFoundException (java.io.FileNotFoundException)19 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)13 FileSystem (org.apache.hadoop.fs.FileSystem)12 InterruptedIOException (java.io.InterruptedIOException)10 AccessControlException (org.apache.hadoop.security.AccessControlException)10 ServerName (org.apache.hadoop.hbase.ServerName)9 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)8 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)8 FileAlreadyExistsException (org.apache.hadoop.fs.FileAlreadyExistsException)7 HRegionInfo (org.apache.hadoop.hbase.HRegionInfo)7 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)7 EOFException (java.io.EOFException)6 ArrayList (java.util.ArrayList)6 DoNotRetryIOException (org.apache.hadoop.hbase.DoNotRetryIOException)6 HBaseIOException (org.apache.hadoop.hbase.HBaseIOException)6