Search in sources :

Example 6 with DatanodeInfoBuilder

use of org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder in project hadoop by apache.

the class TestPBHelper method testDataNodeInfoPBHelper.

@Test
public void testDataNodeInfoPBHelper() {
    DatanodeID id = DFSTestUtil.getLocalDatanodeID();
    DatanodeInfo dnInfos0 = new DatanodeInfoBuilder().setNodeID(id).build();
    dnInfos0.setCapacity(3500L);
    dnInfos0.setDfsUsed(1000L);
    dnInfos0.setNonDfsUsed(2000L);
    dnInfos0.setRemaining(500L);
    HdfsProtos.DatanodeInfoProto dnproto = PBHelperClient.convert(dnInfos0);
    DatanodeInfo dnInfos1 = PBHelperClient.convert(dnproto);
    compare(dnInfos0, dnInfos1);
    assertEquals(dnInfos0.getNonDfsUsed(), dnInfos1.getNonDfsUsed());
    //Testing without nonDfs field
    HdfsProtos.DatanodeInfoProto.Builder b = HdfsProtos.DatanodeInfoProto.newBuilder();
    b.setId(PBHelperClient.convert(id)).setCapacity(3500L).setDfsUsed(1000L).setRemaining(500L);
    DatanodeInfo dnInfos3 = PBHelperClient.convert(b.build());
    assertEquals(dnInfos0.getNonDfsUsed(), dnInfos3.getNonDfsUsed());
}
Also used : DatanodeID(org.apache.hadoop.hdfs.protocol.DatanodeID) DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) HdfsProtos(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos) DatanodeInfoBuilder(org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder) Test(org.junit.Test)

Example 7 with DatanodeInfoBuilder

use of org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder in project hadoop by apache.

the class StripedBlockWriter method init.

/**
   * Initialize  output/input streams for transferring data to target
   * and send create block request.
   */
private void init() throws IOException {
    Socket socket = null;
    DataOutputStream out = null;
    DataInputStream in = null;
    boolean success = false;
    try {
        InetSocketAddress targetAddr = stripedWriter.getSocketAddress4Transfer(target);
        socket = datanode.newSocket();
        NetUtils.connect(socket, targetAddr, datanode.getDnConf().getSocketTimeout());
        socket.setTcpNoDelay(datanode.getDnConf().getDataTransferServerTcpNoDelay());
        socket.setSoTimeout(datanode.getDnConf().getSocketTimeout());
        Token<BlockTokenIdentifier> blockToken = datanode.getBlockAccessToken(block, EnumSet.of(BlockTokenIdentifier.AccessMode.WRITE));
        long writeTimeout = datanode.getDnConf().getSocketWriteTimeout();
        OutputStream unbufOut = NetUtils.getOutputStream(socket, writeTimeout);
        InputStream unbufIn = NetUtils.getInputStream(socket);
        DataEncryptionKeyFactory keyFactory = datanode.getDataEncryptionKeyFactoryForBlock(block);
        IOStreamPair saslStreams = datanode.getSaslClient().socketSend(socket, unbufOut, unbufIn, keyFactory, blockToken, target);
        unbufOut = saslStreams.out;
        unbufIn = saslStreams.in;
        out = new DataOutputStream(new BufferedOutputStream(unbufOut, DFSUtilClient.getSmallBufferSize(conf)));
        in = new DataInputStream(unbufIn);
        DatanodeInfo source = new DatanodeInfoBuilder().setNodeID(datanode.getDatanodeId()).build();
        new Sender(out).writeBlock(block, storageType, blockToken, "", new DatanodeInfo[] { target }, new StorageType[] { storageType }, source, BlockConstructionStage.PIPELINE_SETUP_CREATE, 0, 0, 0, 0, stripedWriter.getChecksum(), stripedWriter.getCachingStrategy(), false, false, null);
        targetSocket = socket;
        targetOutputStream = out;
        targetInputStream = in;
        success = true;
    } finally {
        if (!success) {
            IOUtils.closeStream(out);
            IOUtils.closeStream(in);
            IOUtils.closeStream(socket);
        }
    }
}
Also used : DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) DatanodeInfoBuilder(org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder) DataOutputStream(java.io.DataOutputStream) InetSocketAddress(java.net.InetSocketAddress) DataInputStream(java.io.DataInputStream) InputStream(java.io.InputStream) BufferedOutputStream(java.io.BufferedOutputStream) DataOutputStream(java.io.DataOutputStream) OutputStream(java.io.OutputStream) DataInputStream(java.io.DataInputStream) DataEncryptionKeyFactory(org.apache.hadoop.hdfs.protocol.datatransfer.sasl.DataEncryptionKeyFactory) Sender(org.apache.hadoop.hdfs.protocol.datatransfer.Sender) BlockTokenIdentifier(org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier) IOStreamPair(org.apache.hadoop.hdfs.protocol.datatransfer.IOStreamPair) BufferedOutputStream(java.io.BufferedOutputStream) Socket(java.net.Socket)

Example 8 with DatanodeInfoBuilder

use of org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder in project hadoop by apache.

the class ReportBadBlockAction method reportTo.

@Override
public void reportTo(DatanodeProtocolClientSideTranslatorPB bpNamenode, DatanodeRegistration bpRegistration) throws BPServiceActorActionException {
    if (bpRegistration == null) {
        return;
    }
    DatanodeInfo[] dnArr = { new DatanodeInfoBuilder().setNodeID(bpRegistration).build() };
    String[] uuids = { storageUuid };
    StorageType[] types = { storageType };
    LocatedBlock[] locatedBlock = { new LocatedBlock(block, dnArr, uuids, types) };
    try {
        bpNamenode.reportBadBlocks(locatedBlock);
    } catch (RemoteException re) {
        DataNode.LOG.info("reportBadBlock encountered RemoteException for " + "block:  " + block, re);
    } catch (IOException e) {
        throw new BPServiceActorActionException("Failed to report bad block " + block + " to namenode.", e);
    }
}
Also used : DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) StorageType(org.apache.hadoop.fs.StorageType) DatanodeInfoBuilder(org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) IOException(java.io.IOException) RemoteException(org.apache.hadoop.ipc.RemoteException)

Example 9 with DatanodeInfoBuilder

use of org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder in project hadoop by apache.

the class TestBlockReaderFactory method testShortCircuitReadFromServerWithoutShm.

/**
   * Test that a client which supports short-circuit reads using
   * shared memory can fall back to not using shared memory when
   * the server doesn't support it.
   */
@Test
public void testShortCircuitReadFromServerWithoutShm() throws Exception {
    TemporarySocketDirectory sockDir = new TemporarySocketDirectory();
    Configuration clientConf = createShortCircuitConf("testShortCircuitReadFromServerWithoutShm", sockDir);
    Configuration serverConf = new Configuration(clientConf);
    serverConf.setInt(DFS_SHORT_CIRCUIT_SHARED_MEMORY_WATCHER_INTERRUPT_CHECK_MS, 0);
    DFSInputStream.tcpReadsDisabledForTesting = true;
    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(serverConf).numDataNodes(1).build();
    cluster.waitActive();
    clientConf.set(DFS_CLIENT_CONTEXT, "testShortCircuitReadFromServerWithoutShm_clientContext");
    final DistributedFileSystem fs = (DistributedFileSystem) FileSystem.get(cluster.getURI(0), clientConf);
    final String TEST_FILE = "/test_file";
    final int TEST_FILE_LEN = 4000;
    final int SEED = 0xFADEC;
    DFSTestUtil.createFile(fs, new Path(TEST_FILE), TEST_FILE_LEN, (short) 1, SEED);
    byte[] contents = DFSTestUtil.readFileBuffer(fs, new Path(TEST_FILE));
    byte[] expected = DFSTestUtil.calculateFileContentsFromSeed(SEED, TEST_FILE_LEN);
    Assert.assertTrue(Arrays.equals(contents, expected));
    final ShortCircuitCache cache = fs.getClient().getClientContext().getShortCircuitCache();
    final DatanodeInfo datanode = new DatanodeInfoBuilder().setNodeID(cluster.getDataNodes().get(0).getDatanodeId()).build();
    cache.getDfsClientShmManager().visit(new Visitor() {

        @Override
        public void visit(HashMap<DatanodeInfo, PerDatanodeVisitorInfo> info) throws IOException {
            Assert.assertEquals(1, info.size());
            PerDatanodeVisitorInfo vinfo = info.get(datanode);
            Assert.assertTrue(vinfo.disabled);
            Assert.assertEquals(0, vinfo.full.size());
            Assert.assertEquals(0, vinfo.notFull.size());
        }
    });
    cluster.shutdown();
    sockDir.close();
}
Also used : Path(org.apache.hadoop.fs.Path) DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) DatanodeInfoBuilder(org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder) Visitor(org.apache.hadoop.hdfs.shortcircuit.DfsClientShmManager.Visitor) DatanodeInfoBuilder(org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder) IOException(java.io.IOException) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) ShortCircuitCache(org.apache.hadoop.hdfs.shortcircuit.ShortCircuitCache) TemporarySocketDirectory(org.apache.hadoop.net.unix.TemporarySocketDirectory) PerDatanodeVisitorInfo(org.apache.hadoop.hdfs.shortcircuit.DfsClientShmManager.PerDatanodeVisitorInfo) Test(org.junit.Test)

Example 10 with DatanodeInfoBuilder

use of org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder in project hadoop by apache.

the class PBHelperClient method convert.

public static DatanodeInfo convert(DatanodeInfoProto di) {
    if (di == null) {
        return null;
    }
    DatanodeInfoBuilder dinfo = new DatanodeInfoBuilder().setNodeID(convert(di.getId())).setNetworkLocation(di.hasLocation() ? di.getLocation() : null).setCapacity(di.getCapacity()).setDfsUsed(di.getDfsUsed()).setRemaining(di.getRemaining()).setBlockPoolUsed(di.getBlockPoolUsed()).setCacheCapacity(di.getCacheCapacity()).setCacheUsed(di.getCacheUsed()).setLastUpdate(di.getLastUpdate()).setLastUpdateMonotonic(di.getLastUpdateMonotonic()).setXceiverCount(di.getXceiverCount()).setAdminState(convert(di.getAdminState())).setUpgradeDomain(di.hasUpgradeDomain() ? di.getUpgradeDomain() : null).setLastBlockReportTime(di.hasLastBlockReportTime() ? di.getLastBlockReportTime() : 0).setLastBlockReportMonotonic(di.hasLastBlockReportMonotonic() ? di.getLastBlockReportMonotonic() : 0);
    if (di.hasNonDfsUsed()) {
        dinfo.setNonDfsUsed(di.getNonDfsUsed());
    } else {
        // use the legacy way for older datanodes
        long nonDFSUsed = di.getCapacity() - di.getDfsUsed() - di.getRemaining();
        dinfo.setNonDfsUsed(nonDFSUsed < 0 ? 0 : nonDFSUsed);
    }
    return dinfo.build();
}
Also used : DatanodeInfoBuilder(org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder)

Aggregations

DatanodeInfoBuilder (org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder)15 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)11 Test (org.junit.Test)6 IOException (java.io.IOException)5 Configuration (org.apache.hadoop.conf.Configuration)4 Path (org.apache.hadoop.fs.Path)3 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)3 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)3 PerDatanodeVisitorInfo (org.apache.hadoop.hdfs.shortcircuit.DfsClientShmManager.PerDatanodeVisitorInfo)3 Visitor (org.apache.hadoop.hdfs.shortcircuit.DfsClientShmManager.Visitor)3 TemporarySocketDirectory (org.apache.hadoop.net.unix.TemporarySocketDirectory)3 InetSocketAddress (java.net.InetSocketAddress)2 Socket (java.net.Socket)2 HashMap (java.util.HashMap)2 MutableBoolean (org.apache.commons.lang.mutable.MutableBoolean)2 DatanodeID (org.apache.hadoop.hdfs.protocol.DatanodeID)2 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)2 DatanodeDescriptor (org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor)2 DatanodeManager (org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager)2 RecoveringBlock (org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock)2