Search in sources :

Example 1 with NewShmInfo

use of org.apache.hadoop.hdfs.server.datanode.ShortCircuitRegistry.NewShmInfo in project hadoop by apache.

the class DataXceiver method requestShortCircuitShm.

@Override
public void requestShortCircuitShm(String clientName) throws IOException {
    NewShmInfo shmInfo = null;
    boolean success = false;
    DomainSocket sock = peer.getDomainSocket();
    try {
        if (sock == null) {
            sendShmErrorResponse(ERROR_INVALID, "Bad request from " + peer + ": must request a shared " + "memory segment over a UNIX domain socket.");
            return;
        }
        try {
            shmInfo = datanode.shortCircuitRegistry.createNewMemorySegment(clientName, sock);
            // After calling #{ShortCircuitRegistry#createNewMemorySegment}, the
            // socket is managed by the DomainSocketWatcher, not the DataXceiver.
            releaseSocket();
        } catch (UnsupportedOperationException e) {
            sendShmErrorResponse(ERROR_UNSUPPORTED, "This datanode has not been configured to support " + "short-circuit shared memory segments.");
            return;
        } catch (IOException e) {
            sendShmErrorResponse(ERROR, "Failed to create shared file descriptor: " + e.getMessage());
            return;
        }
        sendShmSuccessResponse(sock, shmInfo);
        success = true;
    } finally {
        if (ClientTraceLog.isInfoEnabled()) {
            if (success) {
                BlockSender.ClientTraceLog.info(String.format("cliID: %s, src: 127.0.0.1, dest: 127.0.0.1, " + "op: REQUEST_SHORT_CIRCUIT_SHM," + " shmId: %016x%016x, srvID: %s, success: true", clientName, shmInfo.getShmId().getHi(), shmInfo.getShmId().getLo(), datanode.getDatanodeUuid()));
            } else {
                BlockSender.ClientTraceLog.info(String.format("cliID: %s, src: 127.0.0.1, dest: 127.0.0.1, " + "op: REQUEST_SHORT_CIRCUIT_SHM, " + "shmId: n/a, srvID: %s, success: false", clientName, datanode.getDatanodeUuid()));
            }
        }
        if ((!success) && (peer == null)) {
            // bad behavior inside the poll() call.  See HADOOP-11802 for details.
            try {
                LOG.warn("Failed to send success response back to the client.  " + "Shutting down socket for " + shmInfo.getShmId() + ".");
                sock.shutdown();
            } catch (IOException e) {
                LOG.warn("Failed to shut down socket in error handler", e);
            }
        }
        IOUtils.cleanup(null, shmInfo);
    }
}
Also used : DomainSocket(org.apache.hadoop.net.unix.DomainSocket) NewShmInfo(org.apache.hadoop.hdfs.server.datanode.ShortCircuitRegistry.NewShmInfo) InterruptedIOException(java.io.InterruptedIOException) IOException(java.io.IOException)

Aggregations

IOException (java.io.IOException)1 InterruptedIOException (java.io.InterruptedIOException)1 NewShmInfo (org.apache.hadoop.hdfs.server.datanode.ShortCircuitRegistry.NewShmInfo)1 DomainSocket (org.apache.hadoop.net.unix.DomainSocket)1