Search in sources :

Example 21 with DatanodeRegistration

use of org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration in project hadoop by apache.

the class DatanodeProtocolServerSideTranslatorPB method registerDatanode.

@Override
public RegisterDatanodeResponseProto registerDatanode(RpcController controller, RegisterDatanodeRequestProto request) throws ServiceException {
    DatanodeRegistration registration = PBHelper.convert(request.getRegistration());
    DatanodeRegistration registrationResp;
    try {
        registrationResp = impl.registerDatanode(registration);
    } catch (IOException e) {
        throw new ServiceException(e);
    }
    return RegisterDatanodeResponseProto.newBuilder().setRegistration(PBHelper.convert(registrationResp)).build();
}
Also used : DatanodeRegistration(org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration) ServiceException(com.google.protobuf.ServiceException) IOException(java.io.IOException)

Example 22 with DatanodeRegistration

use of org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration in project hadoop by apache.

the class DataXceiver method readBlock.

@Override
public void readBlock(final ExtendedBlock block, final Token<BlockTokenIdentifier> blockToken, final String clientName, final long blockOffset, final long length, final boolean sendChecksum, final CachingStrategy cachingStrategy) throws IOException {
    previousOpClientName = clientName;
    long read = 0;
    updateCurrentThreadName("Sending block " + block);
    OutputStream baseStream = getOutputStream();
    DataOutputStream out = getBufferedOutputStream();
    checkAccess(out, true, block, blockToken, Op.READ_BLOCK, BlockTokenIdentifier.AccessMode.READ);
    // send the block
    BlockSender blockSender = null;
    DatanodeRegistration dnR = datanode.getDNRegistrationForBP(block.getBlockPoolId());
    final String clientTraceFmt = clientName.length() > 0 && ClientTraceLog.isInfoEnabled() ? String.format(DN_CLIENTTRACE_FORMAT, localAddress, remoteAddress, "%d", "HDFS_READ", clientName, "%d", dnR.getDatanodeUuid(), block, "%d") : dnR + " Served block " + block + " to " + remoteAddress;
    try {
        try {
            blockSender = new BlockSender(block, blockOffset, length, true, false, sendChecksum, datanode, clientTraceFmt, cachingStrategy);
        } catch (IOException e) {
            String msg = "opReadBlock " + block + " received exception " + e;
            LOG.info(msg);
            sendResponse(ERROR, msg);
            throw e;
        }
        // send op status
        writeSuccessWithChecksumInfo(blockSender, new DataOutputStream(getOutputStream()));
        long beginRead = Time.monotonicNow();
        // send data
        read = blockSender.sendBlock(out, baseStream, null);
        long duration = Time.monotonicNow() - beginRead;
        if (blockSender.didSendEntireByteRange()) {
            // to respond with a Status enum.
            try {
                ClientReadStatusProto stat = ClientReadStatusProto.parseFrom(PBHelperClient.vintPrefixed(in));
                if (!stat.hasStatus()) {
                    LOG.warn("Client " + peer.getRemoteAddressString() + " did not send a valid status code after reading. " + "Will close connection.");
                    IOUtils.closeStream(out);
                }
            } catch (IOException ioe) {
                LOG.debug("Error reading client status response. Will close connection.", ioe);
                IOUtils.closeStream(out);
                incrDatanodeNetworkErrors();
            }
        } else {
            IOUtils.closeStream(out);
        }
        datanode.metrics.incrBytesRead((int) read);
        datanode.metrics.incrBlocksRead();
        datanode.metrics.incrTotalReadTime(duration);
    } catch (SocketException ignored) {
        if (LOG.isTraceEnabled()) {
            LOG.trace(dnR + ":Ignoring exception while serving " + block + " to " + remoteAddress, ignored);
        }
        // Its ok for remote side to close the connection anytime.
        datanode.metrics.incrBlocksRead();
        IOUtils.closeStream(out);
    } catch (IOException ioe) {
        /* What exactly should we do here?
       * Earlier version shutdown() datanode if there is disk error.
       */
        if (!(ioe instanceof SocketTimeoutException)) {
            LOG.warn(dnR + ":Got exception while serving " + block + " to " + remoteAddress, ioe);
            incrDatanodeNetworkErrors();
        }
        throw ioe;
    } finally {
        IOUtils.closeStream(blockSender);
    }
    //update metrics
    datanode.metrics.addReadBlockOp(elapsed());
    datanode.metrics.incrReadsFromClient(peer.isLocal(), read);
}
Also used : SocketException(java.net.SocketException) DatanodeRegistration(org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration) SocketTimeoutException(java.net.SocketTimeoutException) DataOutputStream(java.io.DataOutputStream) DataOutputStream(java.io.DataOutputStream) BufferedOutputStream(java.io.BufferedOutputStream) OutputStream(java.io.OutputStream) ByteString(com.google.protobuf.ByteString) InterruptedIOException(java.io.InterruptedIOException) IOException(java.io.IOException) ClientReadStatusProto(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto)

Example 23 with DatanodeRegistration

use of org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration in project hadoop by apache.

the class DataXceiver method requestShortCircuitFds.

@Override
public void requestShortCircuitFds(final ExtendedBlock blk, final Token<BlockTokenIdentifier> token, SlotId slotId, int maxVersion, boolean supportsReceiptVerification) throws IOException {
    updateCurrentThreadName("Passing file descriptors for block " + blk);
    DataOutputStream out = getBufferedOutputStream();
    checkAccess(out, true, blk, token, Op.REQUEST_SHORT_CIRCUIT_FDS, BlockTokenIdentifier.AccessMode.READ);
    BlockOpResponseProto.Builder bld = BlockOpResponseProto.newBuilder();
    FileInputStream[] fis = null;
    SlotId registeredSlotId = null;
    boolean success = false;
    try {
        try {
            if (peer.getDomainSocket() == null) {
                throw new IOException("You cannot pass file descriptors over " + "anything but a UNIX domain socket.");
            }
            if (slotId != null) {
                boolean isCached = datanode.data.isCached(blk.getBlockPoolId(), blk.getBlockId());
                datanode.shortCircuitRegistry.registerSlot(ExtendedBlockId.fromExtendedBlock(blk), slotId, isCached);
                registeredSlotId = slotId;
            }
            fis = datanode.requestShortCircuitFdsForRead(blk, token, maxVersion);
            Preconditions.checkState(fis != null);
            bld.setStatus(SUCCESS);
            bld.setShortCircuitAccessVersion(DataNode.CURRENT_BLOCK_FORMAT_VERSION);
        } catch (ShortCircuitFdsVersionException e) {
            bld.setStatus(ERROR_UNSUPPORTED);
            bld.setShortCircuitAccessVersion(DataNode.CURRENT_BLOCK_FORMAT_VERSION);
            bld.setMessage(e.getMessage());
        } catch (ShortCircuitFdsUnsupportedException e) {
            bld.setStatus(ERROR_UNSUPPORTED);
            bld.setMessage(e.getMessage());
        } catch (IOException e) {
            bld.setStatus(ERROR);
            bld.setMessage(e.getMessage());
        }
        bld.build().writeDelimitedTo(socketOut);
        if (fis != null) {
            FileDescriptor[] fds = new FileDescriptor[fis.length];
            for (int i = 0; i < fds.length; i++) {
                fds[i] = fis[i].getFD();
            }
            byte[] buf = new byte[1];
            if (supportsReceiptVerification) {
                buf[0] = (byte) USE_RECEIPT_VERIFICATION.getNumber();
            } else {
                buf[0] = (byte) DO_NOT_USE_RECEIPT_VERIFICATION.getNumber();
            }
            DomainSocket sock = peer.getDomainSocket();
            sock.sendFileDescriptors(fds, buf, 0, buf.length);
            if (supportsReceiptVerification) {
                LOG.trace("Reading receipt verification byte for " + slotId);
                int val = sock.getInputStream().read();
                if (val < 0) {
                    throw new EOFException();
                }
            } else {
                LOG.trace("Receipt verification is not enabled on the DataNode.  " + "Not verifying " + slotId);
            }
            success = true;
        }
    } finally {
        if ((!success) && (registeredSlotId != null)) {
            LOG.info("Unregistering " + registeredSlotId + " because the " + "requestShortCircuitFdsForRead operation failed.");
            datanode.shortCircuitRegistry.unregisterSlot(registeredSlotId);
        }
        if (ClientTraceLog.isInfoEnabled()) {
            DatanodeRegistration dnR = datanode.getDNRegistrationForBP(blk.getBlockPoolId());
            BlockSender.ClientTraceLog.info(String.format("src: 127.0.0.1, dest: 127.0.0.1, op: REQUEST_SHORT_CIRCUIT_FDS," + " blockid: %s, srvID: %s, success: %b", blk.getBlockId(), dnR.getDatanodeUuid(), success));
        }
        if (fis != null) {
            IOUtils.cleanup(null, fis);
        }
    }
}
Also used : ShortCircuitFdsVersionException(org.apache.hadoop.hdfs.server.datanode.DataNode.ShortCircuitFdsVersionException) DataOutputStream(java.io.DataOutputStream) BlockOpResponseProto(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto) ShortCircuitFdsUnsupportedException(org.apache.hadoop.hdfs.server.datanode.DataNode.ShortCircuitFdsUnsupportedException) InterruptedIOException(java.io.InterruptedIOException) IOException(java.io.IOException) FileInputStream(java.io.FileInputStream) FileDescriptor(java.io.FileDescriptor) SlotId(org.apache.hadoop.hdfs.shortcircuit.ShortCircuitShm.SlotId) DatanodeRegistration(org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration) DomainSocket(org.apache.hadoop.net.unix.DomainSocket) EOFException(java.io.EOFException)

Example 24 with DatanodeRegistration

use of org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration in project hadoop by apache.

the class TestDatanodeRegistration method testChangeStorageID.

@Test
public void testChangeStorageID() throws Exception {
    final String DN_IP_ADDR = "127.0.0.1";
    final String DN_HOSTNAME = "localhost";
    final int DN_XFER_PORT = 12345;
    final int DN_INFO_PORT = 12346;
    final int DN_INFO_SECURE_PORT = 12347;
    final int DN_IPC_PORT = 12348;
    Configuration conf = new HdfsConfiguration();
    MiniDFSCluster cluster = null;
    try {
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
        InetSocketAddress addr = new InetSocketAddress("localhost", cluster.getNameNodePort());
        DFSClient client = new DFSClient(addr, conf);
        NamenodeProtocols rpcServer = cluster.getNameNodeRpc();
        // register a datanode
        DatanodeID dnId = new DatanodeID(DN_IP_ADDR, DN_HOSTNAME, "fake-datanode-id", DN_XFER_PORT, DN_INFO_PORT, DN_INFO_SECURE_PORT, DN_IPC_PORT);
        long nnCTime = cluster.getNamesystem().getFSImage().getStorage().getCTime();
        StorageInfo mockStorageInfo = mock(StorageInfo.class);
        doReturn(nnCTime).when(mockStorageInfo).getCTime();
        doReturn(HdfsServerConstants.DATANODE_LAYOUT_VERSION).when(mockStorageInfo).getLayoutVersion();
        DatanodeRegistration dnReg = new DatanodeRegistration(dnId, mockStorageInfo, null, VersionInfo.getVersion());
        rpcServer.registerDatanode(dnReg);
        DatanodeInfo[] report = client.datanodeReport(DatanodeReportType.ALL);
        assertEquals("Expected a registered datanode", 1, report.length);
        // register the same datanode again with a different storage ID
        dnId = new DatanodeID(DN_IP_ADDR, DN_HOSTNAME, "changed-fake-datanode-id", DN_XFER_PORT, DN_INFO_PORT, DN_INFO_SECURE_PORT, DN_IPC_PORT);
        dnReg = new DatanodeRegistration(dnId, mockStorageInfo, null, VersionInfo.getVersion());
        rpcServer.registerDatanode(dnReg);
        report = client.datanodeReport(DatanodeReportType.ALL);
        assertEquals("Datanode with changed storage ID not recognized", 1, report.length);
    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}
Also used : NamenodeProtocols(org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols) DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) Configuration(org.apache.hadoop.conf.Configuration) InetSocketAddress(java.net.InetSocketAddress) DatanodeID(org.apache.hadoop.hdfs.protocol.DatanodeID) DatanodeRegistration(org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration) DatanodeStorageInfo(org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo) StorageInfo(org.apache.hadoop.hdfs.server.common.StorageInfo) Test(org.junit.Test)

Example 25 with DatanodeRegistration

use of org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration in project hadoop by apache.

the class TestDatanodeRegistration method testRegistrationWithDifferentSoftwareVersionsDuringUpgrade.

@Test
public void testRegistrationWithDifferentSoftwareVersionsDuringUpgrade() throws Exception {
    Configuration conf = new HdfsConfiguration();
    conf.set(DFSConfigKeys.DFS_DATANODE_MIN_SUPPORTED_NAMENODE_VERSION_KEY, "1.0.0");
    MiniDFSCluster cluster = null;
    try {
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
        NamenodeProtocols rpcServer = cluster.getNameNodeRpc();
        long nnCTime = cluster.getNamesystem().getFSImage().getStorage().getCTime();
        StorageInfo mockStorageInfo = mock(StorageInfo.class);
        doReturn(nnCTime).when(mockStorageInfo).getCTime();
        DatanodeRegistration mockDnReg = mock(DatanodeRegistration.class);
        doReturn(HdfsServerConstants.DATANODE_LAYOUT_VERSION).when(mockDnReg).getVersion();
        doReturn("fake-storage-id").when(mockDnReg).getDatanodeUuid();
        doReturn(mockStorageInfo).when(mockDnReg).getStorageInfo();
        // Should succeed when software versions are the same and CTimes are the
        // same.
        doReturn(VersionInfo.getVersion()).when(mockDnReg).getSoftwareVersion();
        doReturn("127.0.0.1").when(mockDnReg).getIpAddr();
        doReturn(123).when(mockDnReg).getXferPort();
        rpcServer.registerDatanode(mockDnReg);
        // Should succeed when software versions are the same and CTimes are
        // different.
        doReturn(nnCTime + 1).when(mockStorageInfo).getCTime();
        rpcServer.registerDatanode(mockDnReg);
        // Should fail when software version of DN is different from NN and CTimes
        // are different.
        doReturn(VersionInfo.getVersion() + ".1").when(mockDnReg).getSoftwareVersion();
        try {
            rpcServer.registerDatanode(mockDnReg);
            fail("Should not have been able to register DN with different software" + " versions and CTimes");
        } catch (IncorrectVersionException ive) {
            GenericTestUtils.assertExceptionContains("does not match CTime of NN", ive);
            LOG.info("Got expected exception", ive);
        }
    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}
Also used : NamenodeProtocols(org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols) IncorrectVersionException(org.apache.hadoop.hdfs.server.common.IncorrectVersionException) DatanodeRegistration(org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration) Configuration(org.apache.hadoop.conf.Configuration) DatanodeStorageInfo(org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo) StorageInfo(org.apache.hadoop.hdfs.server.common.StorageInfo) Test(org.junit.Test)

Aggregations

DatanodeRegistration (org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration)48 Test (org.junit.Test)36 Configuration (org.apache.hadoop.conf.Configuration)19 Path (org.apache.hadoop.fs.Path)16 FSNamesystem (org.apache.hadoop.hdfs.server.namenode.FSNamesystem)12 StorageBlockReport (org.apache.hadoop.hdfs.server.protocol.StorageBlockReport)12 ArrayList (java.util.ArrayList)10 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)10 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)10 DatanodeStorage (org.apache.hadoop.hdfs.server.protocol.DatanodeStorage)10 Block (org.apache.hadoop.hdfs.protocol.Block)9 IOException (java.io.IOException)8 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)8 DatanodeID (org.apache.hadoop.hdfs.protocol.DatanodeID)8 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)8 DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)8 DatanodeStorageInfo (org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo)7 StorageInfo (org.apache.hadoop.hdfs.server.common.StorageInfo)7 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)6 BlockReportContext (org.apache.hadoop.hdfs.server.protocol.BlockReportContext)5