Search in sources :

Example 31 with NamenodeProtocols

use of org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols in project hadoop by apache.

the class NamenodeWebHdfsMethods method chooseDatanode.

@VisibleForTesting
static DatanodeInfo chooseDatanode(final NameNode namenode, final String path, final HttpOpParam.Op op, final long openOffset, final long blocksize, final String excludeDatanodes, final String remoteAddr) throws IOException {
    FSNamesystem fsn = namenode.getNamesystem();
    if (fsn == null) {
        throw new IOException("Namesystem has not been intialized yet.");
    }
    final BlockManager bm = fsn.getBlockManager();
    HashSet<Node> excludes = new HashSet<Node>();
    if (excludeDatanodes != null) {
        for (String host : StringUtils.getTrimmedStringCollection(excludeDatanodes)) {
            int idx = host.indexOf(":");
            if (idx != -1) {
                excludes.add(bm.getDatanodeManager().getDatanodeByXferAddr(host.substring(0, idx), Integer.parseInt(host.substring(idx + 1))));
            } else {
                excludes.add(bm.getDatanodeManager().getDatanodeByHost(host));
            }
        }
    }
    if (op == PutOpParam.Op.CREATE) {
        //choose a datanode near to client 
        final DatanodeDescriptor clientNode = bm.getDatanodeManager().getDatanodeByHost(remoteAddr);
        if (clientNode != null) {
            final DatanodeStorageInfo[] storages = bm.chooseTarget4WebHDFS(path, clientNode, excludes, blocksize);
            if (storages.length > 0) {
                return storages[0].getDatanodeDescriptor();
            }
        }
    } else if (op == GetOpParam.Op.OPEN || op == GetOpParam.Op.GETFILECHECKSUM || op == PostOpParam.Op.APPEND) {
        //choose a datanode containing a replica 
        final NamenodeProtocols np = getRPCServer(namenode);
        final HdfsFileStatus status = np.getFileInfo(path);
        if (status == null) {
            throw new FileNotFoundException("File " + path + " not found.");
        }
        final long len = status.getLen();
        if (op == GetOpParam.Op.OPEN) {
            if (openOffset < 0L || (openOffset >= len && len > 0)) {
                throw new IOException("Offset=" + openOffset + " out of the range [0, " + len + "); " + op + ", path=" + path);
            }
        }
        if (len > 0) {
            final long offset = op == GetOpParam.Op.OPEN ? openOffset : len - 1;
            final LocatedBlocks locations = np.getBlockLocations(path, offset, 1);
            final int count = locations.locatedBlockCount();
            if (count > 0) {
                return bestNode(locations.get(0).getLocations(), excludes);
            }
        }
    }
    return (DatanodeDescriptor) bm.getDatanodeManager().getNetworkTopology().chooseRandom(NodeBase.ROOT, excludes);
}
Also used : NamenodeProtocols(org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols) NameNode(org.apache.hadoop.hdfs.server.namenode.NameNode) Node(org.apache.hadoop.net.Node) LocatedBlocks(org.apache.hadoop.hdfs.protocol.LocatedBlocks) FileNotFoundException(java.io.FileNotFoundException) IOException(java.io.IOException) DatanodeDescriptor(org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor) DatanodeStorageInfo(org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo) BlockManager(org.apache.hadoop.hdfs.server.blockmanagement.BlockManager) HdfsFileStatus(org.apache.hadoop.hdfs.protocol.HdfsFileStatus) FSNamesystem(org.apache.hadoop.hdfs.server.namenode.FSNamesystem) HashSet(java.util.HashSet) VisibleForTesting(com.google.common.annotations.VisibleForTesting)

Example 32 with NamenodeProtocols

use of org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols in project hadoop by apache.

the class UpgradeUtilities method initialize.

/**
   * Initialize the data structures used by this class.  
   * IMPORTANT NOTE: This method must be called once before calling 
   *                 any other public method on this class.  
   * <p>
   * Creates a singleton master populated storage
   * directory for a Namenode (contains edits, fsimage,
   * version, and time files) and a Datanode (contains version and
   * block files).  This can be a lengthy operation.
   */
public static void initialize() throws Exception {
    createEmptyDirs(new String[] { TEST_ROOT_DIR.toString() });
    Configuration config = new HdfsConfiguration();
    config.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, namenodeStorage.toString());
    config.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, namenodeStorage.toString());
    config.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, datanodeStorage.toString());
    MiniDFSCluster cluster = null;
    String bpid = null;
    try {
        // format data-node
        createEmptyDirs(new String[] { datanodeStorage.toString() });
        // format and start NameNode and start DataNode
        DFSTestUtil.formatNameNode(config);
        cluster = new MiniDFSCluster.Builder(config).numDataNodes(1).startupOption(StartupOption.REGULAR).format(false).manageDataDfsDirs(false).manageNameDfsDirs(false).build();
        NamenodeProtocols namenode = cluster.getNameNodeRpc();
        namenodeStorageNamespaceID = namenode.versionRequest().getNamespaceID();
        namenodeStorageFsscTime = namenode.versionRequest().getCTime();
        namenodeStorageClusterID = namenode.versionRequest().getClusterID();
        namenodeStorageBlockPoolID = namenode.versionRequest().getBlockPoolID();
        FileSystem fs = FileSystem.get(config);
        Path baseDir = new Path("/TestUpgrade");
        fs.mkdirs(baseDir);
        // write some files
        int bufferSize = 4096;
        byte[] buffer = new byte[bufferSize];
        for (int i = 0; i < bufferSize; i++) buffer[i] = (byte) ('0' + i % 50);
        writeFile(fs, new Path(baseDir, "file1"), buffer, bufferSize);
        writeFile(fs, new Path(baseDir, "file2"), buffer, bufferSize);
        // save image
        namenode.setSafeMode(SafeModeAction.SAFEMODE_ENTER, false);
        namenode.saveNamespace(0, 0);
        namenode.setSafeMode(SafeModeAction.SAFEMODE_LEAVE, false);
        // write more files
        writeFile(fs, new Path(baseDir, "file3"), buffer, bufferSize);
        writeFile(fs, new Path(baseDir, "file4"), buffer, bufferSize);
        bpid = cluster.getNamesystem(0).getBlockPoolId();
    } finally {
        // shutdown
        if (cluster != null)
            cluster.shutdown();
        FileUtil.fullyDelete(new File(namenodeStorage, "in_use.lock"));
        FileUtil.fullyDelete(new File(datanodeStorage, "in_use.lock"));
    }
    namenodeStorageChecksum = checksumContents(NAME_NODE, new File(namenodeStorage, "current"), false);
    File dnCurDir = new File(datanodeStorage, "current");
    datanodeStorageChecksum = checksumContents(DATA_NODE, dnCurDir, false);
    File bpCurDir = new File(BlockPoolSliceStorage.getBpRoot(bpid, dnCurDir), "current");
    blockPoolStorageChecksum = checksumContents(DATA_NODE, bpCurDir, false);
    File bpCurFinalizeDir = new File(BlockPoolSliceStorage.getBpRoot(bpid, dnCurDir), "current/" + DataStorage.STORAGE_DIR_FINALIZED);
    blockPoolFinalizedStorageChecksum = checksumContents(DATA_NODE, bpCurFinalizeDir, true);
    File bpCurRbwDir = new File(BlockPoolSliceStorage.getBpRoot(bpid, dnCurDir), "current/" + DataStorage.STORAGE_DIR_RBW);
    blockPoolRbwStorageChecksum = checksumContents(DATA_NODE, bpCurRbwDir, false);
}
Also used : Path(org.apache.hadoop.fs.Path) NamenodeProtocols(org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols) Configuration(org.apache.hadoop.conf.Configuration) FileSystem(org.apache.hadoop.fs.FileSystem) LocalFileSystem(org.apache.hadoop.fs.LocalFileSystem) File(java.io.File)

Example 33 with NamenodeProtocols

use of org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols in project hadoop by apache.

the class TestBlockManager method testNeededReconstructionWhileAppending.

@Test(timeout = 60000)
public void testNeededReconstructionWhileAppending() throws IOException {
    Configuration conf = new HdfsConfiguration();
    String src = "/test-file";
    Path file = new Path(src);
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
    cluster.waitActive();
    try {
        BlockManager bm = cluster.getNamesystem().getBlockManager();
        FileSystem fs = cluster.getFileSystem();
        NamenodeProtocols namenode = cluster.getNameNodeRpc();
        DFSOutputStream out = null;
        try {
            out = (DFSOutputStream) (fs.create(file).getWrappedStream());
            out.write(1);
            out.hflush();
            out.close();
            FSDataInputStream in = null;
            ExtendedBlock oldBlock = null;
            try {
                in = fs.open(file);
                oldBlock = DFSTestUtil.getAllBlocks(in).get(0).getBlock();
            } finally {
                IOUtils.closeStream(in);
            }
            String clientName = ((DistributedFileSystem) fs).getClient().getClientName();
            namenode.append(src, clientName, new EnumSetWritable<>(EnumSet.of(CreateFlag.APPEND)));
            LocatedBlock newLocatedBlock = namenode.updateBlockForPipeline(oldBlock, clientName);
            ExtendedBlock newBlock = new ExtendedBlock(oldBlock.getBlockPoolId(), oldBlock.getBlockId(), oldBlock.getNumBytes(), newLocatedBlock.getBlock().getGenerationStamp());
            namenode.updatePipeline(clientName, oldBlock, newBlock, newLocatedBlock.getLocations(), newLocatedBlock.getStorageIDs());
            BlockInfo bi = bm.getStoredBlock(newBlock.getLocalBlock());
            assertFalse(bm.isNeededReconstruction(bi, bm.countNodes(bi, cluster.getNamesystem().isInStartupSafeMode())));
        } finally {
            IOUtils.closeStream(out);
        }
    } finally {
        cluster.shutdown();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) NamenodeProtocols(org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) ReceivedDeletedBlockInfo(org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo) FileSystem(org.apache.hadoop.fs.FileSystem) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) FSDataInputStream(org.apache.hadoop.fs.FSDataInputStream) DFSOutputStream(org.apache.hadoop.hdfs.DFSOutputStream) Test(org.junit.Test)

Example 34 with NamenodeProtocols

use of org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols in project hadoop by apache.

the class TestDataNodeVolumeFailure method countNNBlocks.

/**
   * Count datanodes that have copies of the blocks for a file
   * put it into the map
   * @param map
   * @param path
   * @param size
   * @return
   * @throws IOException
   */
private int countNNBlocks(Map<String, BlockLocs> map, String path, long size) throws IOException {
    int total = 0;
    NamenodeProtocols nn = cluster.getNameNodeRpc();
    List<LocatedBlock> locatedBlocks = nn.getBlockLocations(path, 0, size).getLocatedBlocks();
    for (LocatedBlock lb : locatedBlocks) {
        String blockId = "" + lb.getBlock().getBlockId();
        //System.out.print(blockId + ": ");
        DatanodeInfo[] dn_locs = lb.getLocations();
        BlockLocs bl = map.get(blockId);
        if (bl == null) {
            bl = new BlockLocs();
        }
        //System.out.print(dn_info.name+",");
        total += dn_locs.length;
        bl.num_locs += dn_locs.length;
        map.put(blockId, bl);
    //System.out.println();
    }
    return total;
}
Also used : NamenodeProtocols(org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols) DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock)

Example 35 with NamenodeProtocols

use of org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols in project hadoop by apache.

the class TestCheckpoint method testNamespaceVerifiedOnFileTransfer.

/**
   * Test that the primary NN will not serve any files to a 2NN who doesn't
   * share its namespace ID, and also will not accept any files from one.
   */
@Test
public void testNamespaceVerifiedOnFileTransfer() throws IOException {
    MiniDFSCluster cluster = null;
    Configuration conf = new HdfsConfiguration();
    try {
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).format(true).build();
        NamenodeProtocols nn = cluster.getNameNodeRpc();
        URL fsName = DFSUtil.getInfoServer(cluster.getNameNode().getServiceRpcAddress(), conf, DFSUtil.getHttpClientScheme(conf)).toURL();
        // Make a finalized log on the server side. 
        nn.rollEditLog();
        RemoteEditLogManifest manifest = nn.getEditLogManifest(1);
        RemoteEditLog log = manifest.getLogs().get(0);
        NNStorage dstImage = Mockito.mock(NNStorage.class);
        Mockito.doReturn(Lists.newArrayList(new File("/wont-be-written"))).when(dstImage).getFiles(Mockito.<NameNodeDirType>anyObject(), Mockito.anyString());
        File mockImageFile = File.createTempFile("image", "");
        FileOutputStream imageFile = new FileOutputStream(mockImageFile);
        imageFile.write("data".getBytes());
        imageFile.close();
        Mockito.doReturn(mockImageFile).when(dstImage).findImageFile(Mockito.any(NameNodeFile.class), Mockito.anyLong());
        Mockito.doReturn(new StorageInfo(1, 1, "X", 1, NodeType.NAME_NODE).toColonSeparatedString()).when(dstImage).toColonSeparatedString();
        try {
            TransferFsImage.downloadImageToStorage(fsName, 0, dstImage, false, false);
            fail("Storage info was not verified");
        } catch (IOException ioe) {
            String msg = StringUtils.stringifyException(ioe);
            assertTrue(msg, msg.contains("but the secondary expected"));
        }
        try {
            TransferFsImage.downloadEditsToStorage(fsName, log, dstImage);
            fail("Storage info was not verified");
        } catch (IOException ioe) {
            String msg = StringUtils.stringifyException(ioe);
            assertTrue(msg, msg.contains("but the secondary expected"));
        }
        try {
            TransferFsImage.uploadImageFromStorage(fsName, conf, dstImage, NameNodeFile.IMAGE, 0);
            fail("Storage info was not verified");
        } catch (IOException ioe) {
            String msg = StringUtils.stringifyException(ioe);
            assertTrue(msg, msg.contains("but the secondary expected"));
        }
    } finally {
        cleanup(cluster);
        cluster = null;
    }
}
Also used : NamenodeProtocols(org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) MetricsRecordBuilder(org.apache.hadoop.metrics2.MetricsRecordBuilder) IOException(java.io.IOException) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) URL(java.net.URL) RemoteEditLogManifest(org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest) FileOutputStream(java.io.FileOutputStream) NameNodeFile(org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeFile) StorageInfo(org.apache.hadoop.hdfs.server.common.StorageInfo) RemoteEditLog(org.apache.hadoop.hdfs.server.protocol.RemoteEditLog) RandomAccessFile(java.io.RandomAccessFile) EditLogFile(org.apache.hadoop.hdfs.server.namenode.FileJournalManager.EditLogFile) NameNodeFile(org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeFile) File(java.io.File) Test(org.junit.Test)

Aggregations

NamenodeProtocols (org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols)54 Test (org.junit.Test)45 IOException (java.io.IOException)24 Configuration (org.apache.hadoop.conf.Configuration)21 Path (org.apache.hadoop.fs.Path)19 FileSystem (org.apache.hadoop.fs.FileSystem)16 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)15 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)12 RemoteException (org.apache.hadoop.ipc.RemoteException)10 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)9 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)9 File (java.io.File)8 FileNotFoundException (java.io.FileNotFoundException)8 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)8 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)7 StandbyException (org.apache.hadoop.ipc.StandbyException)7 EOFException (java.io.EOFException)6 ConnectException (java.net.ConnectException)6 URISyntaxException (java.net.URISyntaxException)6 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)6