Search in sources :

Example 11 with DatanodeInfo

use of org.apache.hadoop.hdfs.protocol.DatanodeInfo in project hadoop by apache.

the class TestReplaceDatanodeOnFailure method testDefaultPolicy.

/** Test DEFAULT ReplaceDatanodeOnFailure policy. */
@Test
public void testDefaultPolicy() throws Exception {
    final Configuration conf = new HdfsConfiguration();
    final ReplaceDatanodeOnFailure p = ReplaceDatanodeOnFailure.get(conf);
    final DatanodeInfo[] infos = new DatanodeInfo[5];
    final DatanodeInfo[][] datanodes = new DatanodeInfo[infos.length + 1][];
    datanodes[0] = new DatanodeInfo[0];
    for (int i = 0; i < infos.length; ) {
        infos[i] = DFSTestUtil.getLocalDatanodeInfo(9867 + i);
        i++;
        datanodes[i] = new DatanodeInfo[i];
        System.arraycopy(infos, 0, datanodes[i], 0, datanodes[i].length);
    }
    final boolean[] isAppend = { true, true, false, false };
    final boolean[] isHflushed = { true, false, true, false };
    for (short replication = 1; replication <= infos.length; replication++) {
        for (int nExistings = 0; nExistings < datanodes.length; nExistings++) {
            final DatanodeInfo[] existings = datanodes[nExistings];
            Assert.assertEquals(nExistings, existings.length);
            for (int i = 0; i < isAppend.length; i++) {
                for (int j = 0; j < isHflushed.length; j++) {
                    final int half = replication / 2;
                    final boolean enoughReplica = replication <= nExistings;
                    final boolean noReplica = nExistings == 0;
                    final boolean replicationL3 = replication < 3;
                    final boolean existingsLEhalf = nExistings <= half;
                    final boolean isAH = isAppend[i] || isHflushed[j];
                    final boolean expected;
                    if (enoughReplica || noReplica || replicationL3) {
                        expected = false;
                    } else {
                        expected = isAH || existingsLEhalf;
                    }
                    final boolean computed = p.satisfy(replication, existings, isAppend[i], isHflushed[j]);
                    try {
                        Assert.assertEquals(expected, computed);
                    } catch (AssertionError e) {
                        final String s = "replication=" + replication + "\nnExistings =" + nExistings + "\nisAppend   =" + isAppend[i] + "\nisHflushed =" + isHflushed[j];
                        throw new RuntimeException(s, e);
                    }
                }
            }
        }
    }
}
Also used : DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) Configuration(org.apache.hadoop.conf.Configuration) ReplaceDatanodeOnFailure(org.apache.hadoop.hdfs.protocol.datatransfer.ReplaceDatanodeOnFailure) Test(org.junit.Test)

Example 12 with DatanodeInfo

use of org.apache.hadoop.hdfs.protocol.DatanodeInfo in project hadoop by apache.

the class TestReplication method checkFile.

/* check if there are at least two nodes are on the same rack */
private void checkFile(FileSystem fileSys, Path name, int repl) throws IOException {
    Configuration conf = fileSys.getConf();
    ClientProtocol namenode = NameNodeProxies.createProxy(conf, fileSys.getUri(), ClientProtocol.class).getProxy();
    waitForBlockReplication(name.toString(), namenode, Math.min(numDatanodes, repl), -1);
    LocatedBlocks locations = namenode.getBlockLocations(name.toString(), 0, Long.MAX_VALUE);
    FileStatus stat = fileSys.getFileStatus(name);
    BlockLocation[] blockLocations = fileSys.getFileBlockLocations(stat, 0L, Long.MAX_VALUE);
    // verify that rack locations match
    assertTrue(blockLocations.length == locations.locatedBlockCount());
    for (int i = 0; i < blockLocations.length; i++) {
        LocatedBlock blk = locations.get(i);
        DatanodeInfo[] datanodes = blk.getLocations();
        String[] topologyPaths = blockLocations[i].getTopologyPaths();
        assertTrue(topologyPaths.length == datanodes.length);
        for (int j = 0; j < topologyPaths.length; j++) {
            boolean found = false;
            for (int k = 0; k < racks.length; k++) {
                if (topologyPaths[j].startsWith(racks[k])) {
                    found = true;
                    break;
                }
            }
            assertTrue(found);
        }
    }
    boolean isOnSameRack = true, isNotOnSameRack = true;
    for (LocatedBlock blk : locations.getLocatedBlocks()) {
        DatanodeInfo[] datanodes = blk.getLocations();
        if (datanodes.length <= 1)
            break;
        if (datanodes.length == 2) {
            isNotOnSameRack = !(datanodes[0].getNetworkLocation().equals(datanodes[1].getNetworkLocation()));
            break;
        }
        isOnSameRack = false;
        isNotOnSameRack = false;
        for (int i = 0; i < datanodes.length - 1; i++) {
            LOG.info("datanode " + i + ": " + datanodes[i]);
            boolean onRack = false;
            for (int j = i + 1; j < datanodes.length; j++) {
                if (datanodes[i].getNetworkLocation().equals(datanodes[j].getNetworkLocation())) {
                    onRack = true;
                }
            }
            if (onRack) {
                isOnSameRack = true;
            }
            if (!onRack) {
                isNotOnSameRack = true;
            }
            if (isOnSameRack && isNotOnSameRack)
                break;
        }
        if (!isOnSameRack || !isNotOnSameRack)
            break;
    }
    assertTrue(isOnSameRack);
    assertTrue(isNotOnSameRack);
}
Also used : DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) FileStatus(org.apache.hadoop.fs.FileStatus) Configuration(org.apache.hadoop.conf.Configuration) LocatedBlocks(org.apache.hadoop.hdfs.protocol.LocatedBlocks) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) BlockLocation(org.apache.hadoop.fs.BlockLocation) ClientProtocol(org.apache.hadoop.hdfs.protocol.ClientProtocol)

Example 13 with DatanodeInfo

use of org.apache.hadoop.hdfs.protocol.DatanodeInfo in project hadoop by apache.

the class BlockReaderTestUtil method getBlockReader.

/**
   * Get a BlockReader for the given block.
   */
public static BlockReader getBlockReader(final DistributedFileSystem fs, LocatedBlock testBlock, int offset, long lenToRead) throws IOException {
    InetSocketAddress targetAddr = null;
    ExtendedBlock block = testBlock.getBlock();
    DatanodeInfo[] nodes = testBlock.getLocations();
    targetAddr = NetUtils.createSocketAddr(nodes[0].getXferAddr());
    return new BlockReaderFactory(fs.getClient().getConf()).setInetSocketAddress(targetAddr).setBlock(block).setFileName(targetAddr.toString() + ":" + block.getBlockId()).setBlockToken(testBlock.getBlockToken()).setStartOffset(offset).setLength(lenToRead).setVerifyChecksum(true).setClientName("BlockReaderTestUtil").setDatanodeInfo(nodes[0]).setClientCacheContext(ClientContext.getFromConf(fs.getConf())).setCachingStrategy(CachingStrategy.newDefaultStrategy()).setConfiguration(fs.getConf()).setAllowShortCircuitLocalReads(true).setTracer(FsTracer.get(fs.getConf())).setRemotePeerFactory(new RemotePeerFactory() {

        @Override
        public Peer newConnectedPeer(InetSocketAddress addr, Token<BlockTokenIdentifier> blockToken, DatanodeID datanodeId) throws IOException {
            Peer peer = null;
            Socket sock = NetUtils.getDefaultSocketFactory(fs.getConf()).createSocket();
            try {
                sock.connect(addr, HdfsConstants.READ_TIMEOUT);
                sock.setSoTimeout(HdfsConstants.READ_TIMEOUT);
                peer = DFSUtilClient.peerFromSocket(sock);
            } finally {
                if (peer == null) {
                    IOUtils.closeQuietly(sock);
                }
            }
            return peer;
        }
    }).build();
}
Also used : DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) DatanodeID(org.apache.hadoop.hdfs.protocol.DatanodeID) InetSocketAddress(java.net.InetSocketAddress) Peer(org.apache.hadoop.hdfs.net.Peer) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) Token(org.apache.hadoop.security.token.Token) RemotePeerFactory(org.apache.hadoop.hdfs.RemotePeerFactory) Socket(java.net.Socket)

Example 14 with DatanodeInfo

use of org.apache.hadoop.hdfs.protocol.DatanodeInfo in project hadoop by apache.

the class TestBlockReaderFactory method testShortCircuitReadFromServerWithoutShm.

/**
   * Test that a client which supports short-circuit reads using
   * shared memory can fall back to not using shared memory when
   * the server doesn't support it.
   */
@Test
public void testShortCircuitReadFromServerWithoutShm() throws Exception {
    TemporarySocketDirectory sockDir = new TemporarySocketDirectory();
    Configuration clientConf = createShortCircuitConf("testShortCircuitReadFromServerWithoutShm", sockDir);
    Configuration serverConf = new Configuration(clientConf);
    serverConf.setInt(DFS_SHORT_CIRCUIT_SHARED_MEMORY_WATCHER_INTERRUPT_CHECK_MS, 0);
    DFSInputStream.tcpReadsDisabledForTesting = true;
    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(serverConf).numDataNodes(1).build();
    cluster.waitActive();
    clientConf.set(DFS_CLIENT_CONTEXT, "testShortCircuitReadFromServerWithoutShm_clientContext");
    final DistributedFileSystem fs = (DistributedFileSystem) FileSystem.get(cluster.getURI(0), clientConf);
    final String TEST_FILE = "/test_file";
    final int TEST_FILE_LEN = 4000;
    final int SEED = 0xFADEC;
    DFSTestUtil.createFile(fs, new Path(TEST_FILE), TEST_FILE_LEN, (short) 1, SEED);
    byte[] contents = DFSTestUtil.readFileBuffer(fs, new Path(TEST_FILE));
    byte[] expected = DFSTestUtil.calculateFileContentsFromSeed(SEED, TEST_FILE_LEN);
    Assert.assertTrue(Arrays.equals(contents, expected));
    final ShortCircuitCache cache = fs.getClient().getClientContext().getShortCircuitCache();
    final DatanodeInfo datanode = new DatanodeInfoBuilder().setNodeID(cluster.getDataNodes().get(0).getDatanodeId()).build();
    cache.getDfsClientShmManager().visit(new Visitor() {

        @Override
        public void visit(HashMap<DatanodeInfo, PerDatanodeVisitorInfo> info) throws IOException {
            Assert.assertEquals(1, info.size());
            PerDatanodeVisitorInfo vinfo = info.get(datanode);
            Assert.assertTrue(vinfo.disabled);
            Assert.assertEquals(0, vinfo.full.size());
            Assert.assertEquals(0, vinfo.notFull.size());
        }
    });
    cluster.shutdown();
    sockDir.close();
}
Also used : Path(org.apache.hadoop.fs.Path) DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) DatanodeInfoBuilder(org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder) Visitor(org.apache.hadoop.hdfs.shortcircuit.DfsClientShmManager.Visitor) DatanodeInfoBuilder(org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder) IOException(java.io.IOException) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) ShortCircuitCache(org.apache.hadoop.hdfs.shortcircuit.ShortCircuitCache) TemporarySocketDirectory(org.apache.hadoop.net.unix.TemporarySocketDirectory) PerDatanodeVisitorInfo(org.apache.hadoop.hdfs.shortcircuit.DfsClientShmManager.PerDatanodeVisitorInfo) Test(org.junit.Test)

Example 15 with DatanodeInfo

use of org.apache.hadoop.hdfs.protocol.DatanodeInfo in project hadoop by apache.

the class TestSetTimes method testTimesAtClose.

/**
   * Tests mod time change at close in DFS.
   */
@Test
public void testTimesAtClose() throws IOException {
    Configuration conf = new HdfsConfiguration();
    // 2s
    final int MAX_IDLE_TIME = 2000;
    int replicas = 1;
    // parameter initialization
    conf.setInt("ipc.client.connection.maxidletime", MAX_IDLE_TIME);
    conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1000);
    conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
    conf.setInt(DFSConfigKeys.DFS_DATANODE_HANDLER_COUNT_KEY, 50);
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).build();
    cluster.waitActive();
    InetSocketAddress addr = new InetSocketAddress("localhost", cluster.getNameNodePort());
    DFSClient client = new DFSClient(addr, conf);
    DatanodeInfo[] info = client.datanodeReport(DatanodeReportType.LIVE);
    assertEquals("Number of Datanodes ", numDatanodes, info.length);
    FileSystem fileSys = cluster.getFileSystem();
    assertTrue(fileSys instanceof DistributedFileSystem);
    try {
        // create a new file and write to it
        Path file1 = new Path("/simple.dat");
        FSDataOutputStream stm = writeFile(fileSys, file1, replicas);
        System.out.println("Created and wrote file simple.dat");
        FileStatus statBeforeClose = fileSys.getFileStatus(file1);
        long mtimeBeforeClose = statBeforeClose.getModificationTime();
        String mdateBeforeClose = dateForm.format(new Date(mtimeBeforeClose));
        System.out.println("mtime on " + file1 + " before close is " + mdateBeforeClose + " (" + mtimeBeforeClose + ")");
        assertTrue(mtimeBeforeClose != 0);
        //close file after writing
        stm.close();
        System.out.println("Closed file.");
        FileStatus statAfterClose = fileSys.getFileStatus(file1);
        long mtimeAfterClose = statAfterClose.getModificationTime();
        String mdateAfterClose = dateForm.format(new Date(mtimeAfterClose));
        System.out.println("mtime on " + file1 + " after close is " + mdateAfterClose + " (" + mtimeAfterClose + ")");
        assertTrue(mtimeAfterClose != 0);
        assertTrue(mtimeBeforeClose != mtimeAfterClose);
        cleanupFile(fileSys, file1);
    } catch (IOException e) {
        info = client.datanodeReport(DatanodeReportType.ALL);
        printDatanodeReport(info);
        throw e;
    } finally {
        fileSys.close();
        cluster.shutdown();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) FileStatus(org.apache.hadoop.fs.FileStatus) Configuration(org.apache.hadoop.conf.Configuration) InetSocketAddress(java.net.InetSocketAddress) IOException(java.io.IOException) Date(java.util.Date) FileSystem(org.apache.hadoop.fs.FileSystem) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) Test(org.junit.Test)

Aggregations

DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)214 Test (org.junit.Test)103 Path (org.apache.hadoop.fs.Path)91 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)73 IOException (java.io.IOException)47 FileSystem (org.apache.hadoop.fs.FileSystem)44 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)43 ArrayList (java.util.ArrayList)39 Configuration (org.apache.hadoop.conf.Configuration)38 DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)37 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)32 LocatedBlocks (org.apache.hadoop.hdfs.protocol.LocatedBlocks)32 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)29 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)27 FSNamesystem (org.apache.hadoop.hdfs.server.namenode.FSNamesystem)25 InetSocketAddress (java.net.InetSocketAddress)20 LocatedStripedBlock (org.apache.hadoop.hdfs.protocol.LocatedStripedBlock)20 StorageType (org.apache.hadoop.fs.StorageType)18 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)14 DatanodeInfoBuilder (org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder)14