Search in sources :

Example 11 with LocatedBlocks

use of org.apache.hadoop.hdfs.protocol.LocatedBlocks in project hadoop by apache.

the class TestDiskError method testReplicationError.

/**
   * Test that when there is a failure replicating a block the temporary
   * and meta files are cleaned up and subsequent replication succeeds.
   */
@Test
public void testReplicationError() throws Exception {
    // create a file of replication factor of 1
    final Path fileName = new Path("/test.txt");
    final int fileLen = 1;
    DFSTestUtil.createFile(fs, fileName, 1, (short) 1, 1L);
    DFSTestUtil.waitReplication(fs, fileName, (short) 1);
    // get the block belonged to the created file
    LocatedBlocks blocks = NameNodeAdapter.getBlockLocations(cluster.getNameNode(), fileName.toString(), 0, (long) fileLen);
    assertEquals("Should only find 1 block", blocks.locatedBlockCount(), 1);
    LocatedBlock block = blocks.get(0);
    // bring up a second datanode
    cluster.startDataNodes(conf, 1, true, null, null);
    cluster.waitActive();
    final int sndNode = 1;
    DataNode datanode = cluster.getDataNodes().get(sndNode);
    FsDatasetTestUtils utils = cluster.getFsDatasetTestUtils(datanode);
    // replicate the block to the second datanode
    InetSocketAddress target = datanode.getXferAddress();
    Socket s = new Socket(target.getAddress(), target.getPort());
    // write the header.
    DataOutputStream out = new DataOutputStream(s.getOutputStream());
    DataChecksum checksum = DataChecksum.newDataChecksum(DataChecksum.Type.CRC32, 512);
    new Sender(out).writeBlock(block.getBlock(), StorageType.DEFAULT, BlockTokenSecretManager.DUMMY_TOKEN, "", new DatanodeInfo[0], new StorageType[0], null, BlockConstructionStage.PIPELINE_SETUP_CREATE, 1, 0L, 0L, 0L, checksum, CachingStrategy.newDefaultStrategy(), false, false, null);
    out.flush();
    // close the connection before sending the content of the block
    out.close();
    // the temporary block & meta files should be deleted
    String bpid = cluster.getNamesystem().getBlockPoolId();
    while (utils.getStoredReplicas(bpid).hasNext()) {
        Thread.sleep(100);
    }
    // then increase the file's replication factor
    fs.setReplication(fileName, (short) 2);
    // replication should succeed
    DFSTestUtil.waitReplication(fs, fileName, (short) 1);
    // clean up the file
    fs.delete(fileName, false);
}
Also used : Path(org.apache.hadoop.fs.Path) Sender(org.apache.hadoop.hdfs.protocol.datatransfer.Sender) InetSocketAddress(java.net.InetSocketAddress) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) DataOutputStream(java.io.DataOutputStream) LocatedBlocks(org.apache.hadoop.hdfs.protocol.LocatedBlocks) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) Socket(java.net.Socket) DataChecksum(org.apache.hadoop.util.DataChecksum) Test(org.junit.Test)

Example 12 with LocatedBlocks

use of org.apache.hadoop.hdfs.protocol.LocatedBlocks in project hadoop by apache.

the class TestReadOnlySharedStorage method getLocatedBlock.

private LocatedBlock getLocatedBlock() throws IOException {
    LocatedBlocks locatedBlocks = client.getLocatedBlocks(PATH.toString(), 0, BLOCK_SIZE);
    assertThat(locatedBlocks.getLocatedBlocks().size(), is(1));
    return Iterables.getOnlyElement(locatedBlocks.getLocatedBlocks());
}
Also used : LocatedBlocks(org.apache.hadoop.hdfs.protocol.LocatedBlocks)

Example 13 with LocatedBlocks

use of org.apache.hadoop.hdfs.protocol.LocatedBlocks in project hadoop by apache.

the class FSNamesystem method getBlockLocations.

/**
   * Get block locations within the specified range.
   * @see ClientProtocol#getBlockLocations(String, long, long)
   */
LocatedBlocks getBlockLocations(String clientMachine, String srcArg, long offset, long length) throws IOException {
    final String operationName = "open";
    checkOperation(OperationCategory.READ);
    GetBlockLocationsResult res = null;
    FSPermissionChecker pc = getPermissionChecker();
    readLock();
    try {
        checkOperation(OperationCategory.READ);
        res = FSDirStatAndListingOp.getBlockLocations(dir, pc, srcArg, offset, length, true);
        if (isInSafeMode()) {
            for (LocatedBlock b : res.blocks.getLocatedBlocks()) {
                // if safemode & no block locations yet then throw safemodeException
                if ((b.getLocations() == null) || (b.getLocations().length == 0)) {
                    SafeModeException se = newSafemodeException("Zero blocklocations for " + srcArg);
                    if (haEnabled && haContext != null && haContext.getState().getServiceState() == HAServiceState.ACTIVE) {
                        throw new RetriableException(se);
                    } else {
                        throw se;
                    }
                }
            }
        }
    } catch (AccessControlException e) {
        logAuditEvent(false, operationName, srcArg);
        throw e;
    } finally {
        readUnlock(operationName);
    }
    logAuditEvent(true, operationName, srcArg);
    if (!isInSafeMode() && res.updateAccessTime()) {
        String src = srcArg;
        writeLock();
        final long now = now();
        try {
            checkOperation(OperationCategory.WRITE);
            /**
         * Resolve the path again and update the atime only when the file
         * exists.
         *
         * XXX: Races can still occur even after resolving the path again.
         * For example:
         *
         * <ul>
         *   <li>Get the block location for "/a/b"</li>
         *   <li>Rename "/a/b" to "/c/b"</li>
         *   <li>The second resolution still points to "/a/b", which is
         *   wrong.</li>
         * </ul>
         *
         * The behavior is incorrect but consistent with the one before
         * HDFS-7463. A better fix is to change the edit log of SetTime to
         * use inode id instead of a path.
         */
            final INodesInPath iip = dir.resolvePath(pc, srcArg, DirOp.READ);
            src = iip.getPath();
            INode inode = iip.getLastINode();
            boolean updateAccessTime = inode != null && now > inode.getAccessTime() + dir.getAccessTimePrecision();
            if (!isInSafeMode() && updateAccessTime) {
                boolean changed = FSDirAttrOp.setTimes(dir, iip, -1, now, false);
                if (changed) {
                    getEditLog().logTimes(src, -1, now);
                }
            }
        } catch (Throwable e) {
            LOG.warn("Failed to update the access time of " + src, e);
        } finally {
            writeUnlock(operationName);
        }
    }
    LocatedBlocks blocks = res.blocks;
    sortLocatedBlocks(clientMachine, blocks);
    return blocks;
}
Also used : LocatedBlocks(org.apache.hadoop.hdfs.protocol.LocatedBlocks) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) AccessControlException(org.apache.hadoop.security.AccessControlException) SnapshotAccessControlException(org.apache.hadoop.hdfs.protocol.SnapshotAccessControlException) RetriableException(org.apache.hadoop.ipc.RetriableException)

Example 14 with LocatedBlocks

use of org.apache.hadoop.hdfs.protocol.LocatedBlocks in project hadoop by apache.

the class TestFileAppend4 method testAppendInsufficientLocations.

/**
   * Test that an append with no locations fails with an exception
   * showing insufficient locations.
   */
@Test(timeout = 60000)
public void testAppendInsufficientLocations() throws Exception {
    Configuration conf = new Configuration();
    // lower heartbeat interval for fast recognition of DN
    conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1000);
    conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
    conf.setInt(HdfsClientConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY, 3000);
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
    DistributedFileSystem fileSystem = null;
    try {
        // create a file with replication 3
        fileSystem = cluster.getFileSystem();
        Path f = new Path("/testAppend");
        FSDataOutputStream create = fileSystem.create(f, (short) 2);
        create.write("/testAppend".getBytes());
        create.close();
        // Check for replications
        DFSTestUtil.waitReplication(fileSystem, f, (short) 2);
        // Shut down all DNs that have the last block location for the file
        LocatedBlocks lbs = fileSystem.dfs.getNamenode().getBlockLocations("/testAppend", 0, Long.MAX_VALUE);
        List<DataNode> dnsOfCluster = cluster.getDataNodes();
        DatanodeInfo[] dnsWithLocations = lbs.getLastLocatedBlock().getLocations();
        for (DataNode dn : dnsOfCluster) {
            for (DatanodeInfo loc : dnsWithLocations) {
                if (dn.getDatanodeId().equals(loc)) {
                    dn.shutdown();
                    DFSTestUtil.waitForDatanodeDeath(dn);
                }
            }
        }
        // Wait till 0 replication is recognized
        DFSTestUtil.waitReplication(fileSystem, f, (short) 0);
        // have the block.
        try {
            fileSystem.append(f);
            fail("Append should fail because insufficient locations");
        } catch (IOException e) {
            LOG.info("Expected exception: ", e);
        }
        FSDirectory dir = cluster.getNamesystem().getFSDirectory();
        final INodeFile inode = INodeFile.valueOf(dir.getINode("/testAppend"), "/testAppend");
        assertTrue("File should remain closed", !inode.isUnderConstruction());
    } finally {
        if (null != fileSystem) {
            fileSystem.close();
        }
        cluster.shutdown();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) Configuration(org.apache.hadoop.conf.Configuration) LocatedBlocks(org.apache.hadoop.hdfs.protocol.LocatedBlocks) FSDirectory(org.apache.hadoop.hdfs.server.namenode.FSDirectory) IOException(java.io.IOException) INodeFile(org.apache.hadoop.hdfs.server.namenode.INodeFile) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) Test(org.junit.Test)

Example 15 with LocatedBlocks

use of org.apache.hadoop.hdfs.protocol.LocatedBlocks in project hadoop by apache.

the class TestFileAppend method testAppend2AfterSoftLimit.

/** Tests appending after soft-limit expires. */
@Test
public void testAppend2AfterSoftLimit() throws Exception {
    Configuration conf = new HdfsConfiguration();
    conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);
    //Set small soft-limit for lease
    final long softLimit = 1L;
    final long hardLimit = 9999999L;
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
    cluster.setLeasePeriod(softLimit, hardLimit);
    cluster.waitActive();
    DistributedFileSystem fs = cluster.getFileSystem();
    DistributedFileSystem fs2 = new DistributedFileSystem();
    fs2.initialize(fs.getUri(), conf);
    final Path testPath = new Path("/testAppendAfterSoftLimit");
    final byte[] fileContents = AppendTestUtil.initBuffer(32);
    // create a new file without closing
    FSDataOutputStream out = fs.create(testPath);
    out.write(fileContents);
    //Wait for > soft-limit
    Thread.sleep(250);
    try {
        FSDataOutputStream appendStream2 = fs2.append(testPath, EnumSet.of(CreateFlag.APPEND, CreateFlag.NEW_BLOCK), 4096, null);
        appendStream2.write(fileContents);
        appendStream2.close();
        assertEquals(fileContents.length, fs.getFileStatus(testPath).getLen());
        // make sure we now have 1 block since the first writer was revoked
        LocatedBlocks blks = fs.getClient().getLocatedBlocks(testPath.toString(), 0L);
        assertEquals(1, blks.getLocatedBlocks().size());
        for (LocatedBlock blk : blks.getLocatedBlocks()) {
            assertEquals(fileContents.length, blk.getBlockSize());
        }
    } finally {
        fs.close();
        fs2.close();
        cluster.shutdown();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) Configuration(org.apache.hadoop.conf.Configuration) LocatedBlocks(org.apache.hadoop.hdfs.protocol.LocatedBlocks) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) Test(org.junit.Test)

Aggregations

LocatedBlocks (org.apache.hadoop.hdfs.protocol.LocatedBlocks)118 Test (org.junit.Test)67 Path (org.apache.hadoop.fs.Path)65 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)52 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)33 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)32 Configuration (org.apache.hadoop.conf.Configuration)29 IOException (java.io.IOException)20 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)20 DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)20 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)18 FileSystem (org.apache.hadoop.fs.FileSystem)17 LocatedStripedBlock (org.apache.hadoop.hdfs.protocol.LocatedStripedBlock)17 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)13 Block (org.apache.hadoop.hdfs.protocol.Block)11 InetSocketAddress (java.net.InetSocketAddress)10 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)10 FSDataInputStream (org.apache.hadoop.fs.FSDataInputStream)9 HdfsFileStatus (org.apache.hadoop.hdfs.protocol.HdfsFileStatus)7 BlockManager (org.apache.hadoop.hdfs.server.blockmanagement.BlockManager)7