Search in sources :

Example 96 with LocatedBlocks

use of org.apache.hadoop.hdfs.protocol.LocatedBlocks in project hadoop by apache.

the class TestDataNodeErasureCodingMetrics method doTest.

private void doTest(String fileName, int fileLen, int deadNodeIndex) throws Exception {
    assertTrue(fileLen > 0);
    assertTrue(deadNodeIndex >= 0 && deadNodeIndex < numDNs);
    Path file = new Path(fileName);
    final byte[] data = StripedFileTestUtil.generateBytes(fileLen);
    DFSTestUtil.writeFile(fs, file, data);
    StripedFileTestUtil.waitBlockGroupsReported(fs, fileName);
    final LocatedBlocks locatedBlocks = StripedFileTestUtil.getLocatedBlocks(file, fs);
    final LocatedStripedBlock lastBlock = (LocatedStripedBlock) locatedBlocks.getLastLocatedBlock();
    assertTrue(lastBlock.getLocations().length > deadNodeIndex);
    final DataNode toCorruptDn = cluster.getDataNode(lastBlock.getLocations()[deadNodeIndex].getIpcPort());
    LOG.info("Datanode to be corrupted: " + toCorruptDn);
    assertNotNull("Failed to find a datanode to be corrupted", toCorruptDn);
    toCorruptDn.shutdown();
    setDataNodeDead(toCorruptDn.getDatanodeId());
    DFSTestUtil.waitForDatanodeState(cluster, toCorruptDn.getDatanodeUuid(), false, 10000);
    final int workCount = getComputedDatanodeWork();
    assertTrue("Wrongly computed block reconstruction work", workCount > 0);
    cluster.triggerHeartbeats();
    int totalBlocks = (fileLen / blockGroupSize) * groupSize;
    final int remainder = fileLen % blockGroupSize;
    totalBlocks += (remainder == 0) ? 0 : (remainder % blockSize == 0) ? remainder / blockSize + parityBlocks : remainder / blockSize + 1 + parityBlocks;
    StripedFileTestUtil.waitForAllReconstructionFinished(file, fs, totalBlocks);
}
Also used : Path(org.apache.hadoop.fs.Path) LocatedStripedBlock(org.apache.hadoop.hdfs.protocol.LocatedStripedBlock) LocatedBlocks(org.apache.hadoop.hdfs.protocol.LocatedBlocks)

Example 97 with LocatedBlocks

use of org.apache.hadoop.hdfs.protocol.LocatedBlocks in project hadoop by apache.

the class TestBlockHasMultipleReplicasOnSameDN method testBlockHasMultipleReplicasOnSameDN.

/**
   * Verify NameNode behavior when a given DN reports multiple replicas
   * of a given block.
   */
@Test
public void testBlockHasMultipleReplicasOnSameDN() throws IOException {
    String filename = makeFileName(GenericTestUtils.getMethodName());
    Path filePath = new Path(filename);
    // Write out a file with a few blocks.
    DFSTestUtil.createFile(fs, filePath, BLOCK_SIZE, BLOCK_SIZE * NUM_BLOCKS, BLOCK_SIZE, NUM_DATANODES, seed);
    // Get the block list for the file with the block locations.
    LocatedBlocks locatedBlocks = client.getLocatedBlocks(filePath.toString(), 0, BLOCK_SIZE * NUM_BLOCKS);
    // Generate a fake block report from one of the DataNodes, such
    // that it reports one copy of each block on either storage.
    DataNode dn = cluster.getDataNodes().get(0);
    DatanodeRegistration dnReg = dn.getDNRegistrationForBP(bpid);
    StorageBlockReport[] reports = new StorageBlockReport[cluster.getStoragesPerDatanode()];
    ArrayList<ReplicaInfo> blocks = new ArrayList<>();
    for (LocatedBlock locatedBlock : locatedBlocks.getLocatedBlocks()) {
        Block localBlock = locatedBlock.getBlock().getLocalBlock();
        blocks.add(new FinalizedReplica(localBlock, null, null));
    }
    Collections.sort(blocks);
    try (FsDatasetSpi.FsVolumeReferences volumes = dn.getFSDataset().getFsVolumeReferences()) {
        BlockListAsLongs bll = BlockListAsLongs.encode(blocks);
        for (int i = 0; i < cluster.getStoragesPerDatanode(); ++i) {
            DatanodeStorage dns = new DatanodeStorage(volumes.get(i).getStorageID());
            reports[i] = new StorageBlockReport(dns, bll);
        }
    }
    // Should not assert!
    cluster.getNameNodeRpc().blockReport(dnReg, bpid, reports, new BlockReportContext(1, 0, System.nanoTime(), 0L, true));
    // Get the block locations once again.
    locatedBlocks = client.getLocatedBlocks(filename, 0, BLOCK_SIZE * NUM_BLOCKS);
    // Make sure that each block has two replicas, one on each DataNode.
    for (LocatedBlock locatedBlock : locatedBlocks.getLocatedBlocks()) {
        DatanodeInfo[] locations = locatedBlock.getLocations();
        assertThat(locations.length, is((int) NUM_DATANODES));
        assertThat(locations[0].getDatanodeUuid(), not(locations[1].getDatanodeUuid()));
    }
}
Also used : Path(org.apache.hadoop.fs.Path) DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) LocatedBlocks(org.apache.hadoop.hdfs.protocol.LocatedBlocks) FsDatasetSpi(org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi) StorageBlockReport(org.apache.hadoop.hdfs.server.protocol.StorageBlockReport) ArrayList(java.util.ArrayList) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) DatanodeRegistration(org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration) BlockReportContext(org.apache.hadoop.hdfs.server.protocol.BlockReportContext) DatanodeStorage(org.apache.hadoop.hdfs.server.protocol.DatanodeStorage) BlockListAsLongs(org.apache.hadoop.hdfs.protocol.BlockListAsLongs) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) Block(org.apache.hadoop.hdfs.protocol.Block) Test(org.junit.Test)

Example 98 with LocatedBlocks

use of org.apache.hadoop.hdfs.protocol.LocatedBlocks in project hadoop by apache.

the class TestDataNodeRollingUpgrade method getBlockForFile.

/** Test assumes that the file has a single block */
private File getBlockForFile(Path path, boolean exists) throws IOException {
    LocatedBlocks blocks = nn.getRpcServer().getBlockLocations(path.toString(), 0, Long.MAX_VALUE);
    assertEquals("The test helper functions assume that each file has a single block", 1, blocks.getLocatedBlocks().size());
    ExtendedBlock block = blocks.getLocatedBlocks().get(0).getBlock();
    BlockLocalPathInfo bInfo = dn0.getFSDataset().getBlockLocalPathInfo(block);
    File blockFile = new File(bInfo.getBlockPath());
    assertEquals(exists, blockFile.exists());
    return blockFile;
}
Also used : LocatedBlocks(org.apache.hadoop.hdfs.protocol.LocatedBlocks) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) BlockLocalPathInfo(org.apache.hadoop.hdfs.protocol.BlockLocalPathInfo) File(java.io.File)

Example 99 with LocatedBlocks

use of org.apache.hadoop.hdfs.protocol.LocatedBlocks in project hadoop by apache.

the class TestMover method testMoverWithStripedFile.

@Test(timeout = 300000)
public void testMoverWithStripedFile() throws Exception {
    final Configuration conf = new HdfsConfiguration();
    initConfWithStripe(conf);
    // start 10 datanodes
    int numOfDatanodes = 10;
    int storagesPerDatanode = 2;
    long capacity = 10 * defaultBlockSize;
    long[][] capacities = new long[numOfDatanodes][storagesPerDatanode];
    for (int i = 0; i < numOfDatanodes; i++) {
        for (int j = 0; j < storagesPerDatanode; j++) {
            capacities[i][j] = capacity;
        }
    }
    conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY, StripedFileTestUtil.getDefaultECPolicy().getName());
    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numOfDatanodes).storagesPerDatanode(storagesPerDatanode).storageTypes(new StorageType[][] { { StorageType.DISK, StorageType.DISK }, { StorageType.DISK, StorageType.DISK }, { StorageType.DISK, StorageType.DISK }, { StorageType.DISK, StorageType.DISK }, { StorageType.DISK, StorageType.DISK }, { StorageType.DISK, StorageType.ARCHIVE }, { StorageType.DISK, StorageType.ARCHIVE }, { StorageType.DISK, StorageType.ARCHIVE }, { StorageType.DISK, StorageType.ARCHIVE }, { StorageType.DISK, StorageType.ARCHIVE } }).storageCapacities(capacities).build();
    try {
        cluster.waitActive();
        // set "/bar" directory with HOT storage policy.
        ClientProtocol client = NameNodeProxies.createProxy(conf, cluster.getFileSystem(0).getUri(), ClientProtocol.class).getProxy();
        String barDir = "/bar";
        client.mkdirs(barDir, new FsPermission((short) 777), true);
        client.setStoragePolicy(barDir, HdfsConstants.HOT_STORAGE_POLICY_NAME);
        // set an EC policy on "/bar" directory
        client.setErasureCodingPolicy(barDir, StripedFileTestUtil.getDefaultECPolicy().getName());
        // write file to barDir
        final String fooFile = "/bar/foo";
        long fileLen = 20 * defaultBlockSize;
        DFSTestUtil.createFile(cluster.getFileSystem(), new Path(fooFile), fileLen, (short) 3, 0);
        // verify storage types and locations
        LocatedBlocks locatedBlocks = client.getBlockLocations(fooFile, 0, fileLen);
        for (LocatedBlock lb : locatedBlocks.getLocatedBlocks()) {
            for (StorageType type : lb.getStorageTypes()) {
                Assert.assertEquals(StorageType.DISK, type);
            }
        }
        StripedFileTestUtil.verifyLocatedStripedBlocks(locatedBlocks, dataBlocks + parityBlocks);
        // start 5 more datanodes
        numOfDatanodes += 5;
        capacities = new long[5][storagesPerDatanode];
        for (int i = 0; i < 5; i++) {
            for (int j = 0; j < storagesPerDatanode; j++) {
                capacities[i][j] = capacity;
            }
        }
        cluster.startDataNodes(conf, 5, new StorageType[][] { { StorageType.ARCHIVE, StorageType.ARCHIVE }, { StorageType.ARCHIVE, StorageType.ARCHIVE }, { StorageType.ARCHIVE, StorageType.ARCHIVE }, { StorageType.ARCHIVE, StorageType.ARCHIVE }, { StorageType.ARCHIVE, StorageType.ARCHIVE } }, true, null, null, null, capacities, null, false, false, false, null);
        cluster.triggerHeartbeats();
        // move file to ARCHIVE
        client.setStoragePolicy(barDir, "COLD");
        // run Mover
        int rc = ToolRunner.run(conf, new Mover.Cli(), new String[] { "-p", barDir });
        Assert.assertEquals("Movement to ARCHIVE should be successful", 0, rc);
        // verify storage types and locations
        locatedBlocks = client.getBlockLocations(fooFile, 0, fileLen);
        for (LocatedBlock lb : locatedBlocks.getLocatedBlocks()) {
            for (StorageType type : lb.getStorageTypes()) {
                Assert.assertEquals(StorageType.ARCHIVE, type);
            }
        }
        StripedFileTestUtil.verifyLocatedStripedBlocks(locatedBlocks, dataBlocks + parityBlocks);
        // start 5 more datanodes
        numOfDatanodes += 5;
        capacities = new long[5][storagesPerDatanode];
        for (int i = 0; i < 5; i++) {
            for (int j = 0; j < storagesPerDatanode; j++) {
                capacities[i][j] = capacity;
            }
        }
        cluster.startDataNodes(conf, 5, new StorageType[][] { { StorageType.SSD, StorageType.DISK }, { StorageType.SSD, StorageType.DISK }, { StorageType.SSD, StorageType.DISK }, { StorageType.SSD, StorageType.DISK }, { StorageType.SSD, StorageType.DISK } }, true, null, null, null, capacities, null, false, false, false, null);
        cluster.triggerHeartbeats();
        // move file blocks to ONE_SSD policy
        client.setStoragePolicy(barDir, "ONE_SSD");
        // run Mover
        rc = ToolRunner.run(conf, new Mover.Cli(), new String[] { "-p", barDir });
        // verify storage types and locations
        // Movements should have been ignored for the unsupported policy on
        // striped file
        locatedBlocks = client.getBlockLocations(fooFile, 0, fileLen);
        for (LocatedBlock lb : locatedBlocks.getLocatedBlocks()) {
            for (StorageType type : lb.getStorageTypes()) {
                Assert.assertEquals(StorageType.ARCHIVE, type);
            }
        }
    } finally {
        cluster.shutdown();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) StorageType(org.apache.hadoop.fs.StorageType) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) LocatedBlocks(org.apache.hadoop.hdfs.protocol.LocatedBlocks) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) FsPermission(org.apache.hadoop.fs.permission.FsPermission) ClientProtocol(org.apache.hadoop.hdfs.protocol.ClientProtocol) Test(org.junit.Test)

Example 100 with LocatedBlocks

use of org.apache.hadoop.hdfs.protocol.LocatedBlocks in project hadoop by apache.

the class TestStorageMover method testMigrateOpenFileToArchival.

/**
   * Move an open file into archival storage
   */
@Test
public void testMigrateOpenFileToArchival() throws Exception {
    LOG.info("testMigrateOpenFileToArchival");
    final Path fooDir = new Path("/foo");
    Map<Path, BlockStoragePolicy> policyMap = Maps.newHashMap();
    policyMap.put(fooDir, COLD);
    NamespaceScheme nsScheme = new NamespaceScheme(Arrays.asList(fooDir), null, BLOCK_SIZE, null, policyMap);
    ClusterScheme clusterScheme = new ClusterScheme(DEFAULT_CONF, NUM_DATANODES, REPL, genStorageTypes(NUM_DATANODES), null);
    MigrationTest test = new MigrationTest(clusterScheme, nsScheme);
    test.setupCluster();
    // create an open file
    banner("writing to file /foo/bar");
    final Path barFile = new Path(fooDir, "bar");
    DFSTestUtil.createFile(test.dfs, barFile, BLOCK_SIZE, (short) 1, 0L);
    FSDataOutputStream out = test.dfs.append(barFile);
    out.writeBytes("hello, ");
    ((DFSOutputStream) out.getWrappedStream()).hsync();
    try {
        banner("start data migration");
        // set /foo to COLD
        test.setStoragePolicy();
        test.migrate(ExitStatus.SUCCESS);
        // make sure the under construction block has not been migrated
        LocatedBlocks lbs = test.dfs.getClient().getLocatedBlocks(barFile.toString(), BLOCK_SIZE);
        LOG.info("Locations: " + lbs);
        List<LocatedBlock> blks = lbs.getLocatedBlocks();
        Assert.assertEquals(1, blks.size());
        Assert.assertEquals(1, blks.get(0).getLocations().length);
        banner("finish the migration, continue writing");
        // make sure the writing can continue
        out.writeBytes("world!");
        ((DFSOutputStream) out.getWrappedStream()).hsync();
        IOUtils.cleanup(LOG, out);
        lbs = test.dfs.getClient().getLocatedBlocks(barFile.toString(), BLOCK_SIZE);
        LOG.info("Locations: " + lbs);
        blks = lbs.getLocatedBlocks();
        Assert.assertEquals(1, blks.size());
        Assert.assertEquals(1, blks.get(0).getLocations().length);
        banner("finish writing, starting reading");
        // check the content of /foo/bar
        FSDataInputStream in = test.dfs.open(barFile);
        byte[] buf = new byte[13];
        // read from offset 1024
        in.readFully(BLOCK_SIZE, buf, 0, buf.length);
        IOUtils.cleanup(LOG, in);
        Assert.assertEquals("hello, world!", new String(buf));
    } finally {
        test.shutdownCluster();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) LocatedBlocks(org.apache.hadoop.hdfs.protocol.LocatedBlocks) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) BlockStoragePolicy(org.apache.hadoop.hdfs.protocol.BlockStoragePolicy) FSDataInputStream(org.apache.hadoop.fs.FSDataInputStream) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) DFSOutputStream(org.apache.hadoop.hdfs.DFSOutputStream) Test(org.junit.Test)

Aggregations

LocatedBlocks (org.apache.hadoop.hdfs.protocol.LocatedBlocks)118 Test (org.junit.Test)67 Path (org.apache.hadoop.fs.Path)65 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)52 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)33 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)32 Configuration (org.apache.hadoop.conf.Configuration)29 IOException (java.io.IOException)20 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)20 DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)20 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)18 FileSystem (org.apache.hadoop.fs.FileSystem)17 LocatedStripedBlock (org.apache.hadoop.hdfs.protocol.LocatedStripedBlock)17 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)13 Block (org.apache.hadoop.hdfs.protocol.Block)11 InetSocketAddress (java.net.InetSocketAddress)10 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)10 FSDataInputStream (org.apache.hadoop.fs.FSDataInputStream)9 HdfsFileStatus (org.apache.hadoop.hdfs.protocol.HdfsFileStatus)7 BlockManager (org.apache.hadoop.hdfs.server.blockmanagement.BlockManager)7