use of org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols in project hadoop by apache.
the class NamenodeWebHdfsMethods method chooseDatanode.
@VisibleForTesting
static DatanodeInfo chooseDatanode(final NameNode namenode, final String path, final HttpOpParam.Op op, final long openOffset, final long blocksize, final String excludeDatanodes, final String remoteAddr) throws IOException {
FSNamesystem fsn = namenode.getNamesystem();
if (fsn == null) {
throw new IOException("Namesystem has not been intialized yet.");
}
final BlockManager bm = fsn.getBlockManager();
HashSet<Node> excludes = new HashSet<Node>();
if (excludeDatanodes != null) {
for (String host : StringUtils.getTrimmedStringCollection(excludeDatanodes)) {
int idx = host.indexOf(":");
if (idx != -1) {
excludes.add(bm.getDatanodeManager().getDatanodeByXferAddr(host.substring(0, idx), Integer.parseInt(host.substring(idx + 1))));
} else {
excludes.add(bm.getDatanodeManager().getDatanodeByHost(host));
}
}
}
if (op == PutOpParam.Op.CREATE) {
//choose a datanode near to client
final DatanodeDescriptor clientNode = bm.getDatanodeManager().getDatanodeByHost(remoteAddr);
if (clientNode != null) {
final DatanodeStorageInfo[] storages = bm.chooseTarget4WebHDFS(path, clientNode, excludes, blocksize);
if (storages.length > 0) {
return storages[0].getDatanodeDescriptor();
}
}
} else if (op == GetOpParam.Op.OPEN || op == GetOpParam.Op.GETFILECHECKSUM || op == PostOpParam.Op.APPEND) {
//choose a datanode containing a replica
final NamenodeProtocols np = getRPCServer(namenode);
final HdfsFileStatus status = np.getFileInfo(path);
if (status == null) {
throw new FileNotFoundException("File " + path + " not found.");
}
final long len = status.getLen();
if (op == GetOpParam.Op.OPEN) {
if (openOffset < 0L || (openOffset >= len && len > 0)) {
throw new IOException("Offset=" + openOffset + " out of the range [0, " + len + "); " + op + ", path=" + path);
}
}
if (len > 0) {
final long offset = op == GetOpParam.Op.OPEN ? openOffset : len - 1;
final LocatedBlocks locations = np.getBlockLocations(path, offset, 1);
final int count = locations.locatedBlockCount();
if (count > 0) {
return bestNode(locations.get(0).getLocations(), excludes);
}
}
}
return (DatanodeDescriptor) bm.getDatanodeManager().getNetworkTopology().chooseRandom(NodeBase.ROOT, excludes);
}
use of org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols in project hadoop by apache.
the class UpgradeUtilities method initialize.
/**
* Initialize the data structures used by this class.
* IMPORTANT NOTE: This method must be called once before calling
* any other public method on this class.
* <p>
* Creates a singleton master populated storage
* directory for a Namenode (contains edits, fsimage,
* version, and time files) and a Datanode (contains version and
* block files). This can be a lengthy operation.
*/
public static void initialize() throws Exception {
createEmptyDirs(new String[] { TEST_ROOT_DIR.toString() });
Configuration config = new HdfsConfiguration();
config.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, namenodeStorage.toString());
config.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, namenodeStorage.toString());
config.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, datanodeStorage.toString());
MiniDFSCluster cluster = null;
String bpid = null;
try {
// format data-node
createEmptyDirs(new String[] { datanodeStorage.toString() });
// format and start NameNode and start DataNode
DFSTestUtil.formatNameNode(config);
cluster = new MiniDFSCluster.Builder(config).numDataNodes(1).startupOption(StartupOption.REGULAR).format(false).manageDataDfsDirs(false).manageNameDfsDirs(false).build();
NamenodeProtocols namenode = cluster.getNameNodeRpc();
namenodeStorageNamespaceID = namenode.versionRequest().getNamespaceID();
namenodeStorageFsscTime = namenode.versionRequest().getCTime();
namenodeStorageClusterID = namenode.versionRequest().getClusterID();
namenodeStorageBlockPoolID = namenode.versionRequest().getBlockPoolID();
FileSystem fs = FileSystem.get(config);
Path baseDir = new Path("/TestUpgrade");
fs.mkdirs(baseDir);
// write some files
int bufferSize = 4096;
byte[] buffer = new byte[bufferSize];
for (int i = 0; i < bufferSize; i++) buffer[i] = (byte) ('0' + i % 50);
writeFile(fs, new Path(baseDir, "file1"), buffer, bufferSize);
writeFile(fs, new Path(baseDir, "file2"), buffer, bufferSize);
// save image
namenode.setSafeMode(SafeModeAction.SAFEMODE_ENTER, false);
namenode.saveNamespace(0, 0);
namenode.setSafeMode(SafeModeAction.SAFEMODE_LEAVE, false);
// write more files
writeFile(fs, new Path(baseDir, "file3"), buffer, bufferSize);
writeFile(fs, new Path(baseDir, "file4"), buffer, bufferSize);
bpid = cluster.getNamesystem(0).getBlockPoolId();
} finally {
// shutdown
if (cluster != null)
cluster.shutdown();
FileUtil.fullyDelete(new File(namenodeStorage, "in_use.lock"));
FileUtil.fullyDelete(new File(datanodeStorage, "in_use.lock"));
}
namenodeStorageChecksum = checksumContents(NAME_NODE, new File(namenodeStorage, "current"), false);
File dnCurDir = new File(datanodeStorage, "current");
datanodeStorageChecksum = checksumContents(DATA_NODE, dnCurDir, false);
File bpCurDir = new File(BlockPoolSliceStorage.getBpRoot(bpid, dnCurDir), "current");
blockPoolStorageChecksum = checksumContents(DATA_NODE, bpCurDir, false);
File bpCurFinalizeDir = new File(BlockPoolSliceStorage.getBpRoot(bpid, dnCurDir), "current/" + DataStorage.STORAGE_DIR_FINALIZED);
blockPoolFinalizedStorageChecksum = checksumContents(DATA_NODE, bpCurFinalizeDir, true);
File bpCurRbwDir = new File(BlockPoolSliceStorage.getBpRoot(bpid, dnCurDir), "current/" + DataStorage.STORAGE_DIR_RBW);
blockPoolRbwStorageChecksum = checksumContents(DATA_NODE, bpCurRbwDir, false);
}
use of org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols in project hadoop by apache.
the class TestBlockManager method testNeededReconstructionWhileAppending.
@Test(timeout = 60000)
public void testNeededReconstructionWhileAppending() throws IOException {
Configuration conf = new HdfsConfiguration();
String src = "/test-file";
Path file = new Path(src);
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
cluster.waitActive();
try {
BlockManager bm = cluster.getNamesystem().getBlockManager();
FileSystem fs = cluster.getFileSystem();
NamenodeProtocols namenode = cluster.getNameNodeRpc();
DFSOutputStream out = null;
try {
out = (DFSOutputStream) (fs.create(file).getWrappedStream());
out.write(1);
out.hflush();
out.close();
FSDataInputStream in = null;
ExtendedBlock oldBlock = null;
try {
in = fs.open(file);
oldBlock = DFSTestUtil.getAllBlocks(in).get(0).getBlock();
} finally {
IOUtils.closeStream(in);
}
String clientName = ((DistributedFileSystem) fs).getClient().getClientName();
namenode.append(src, clientName, new EnumSetWritable<>(EnumSet.of(CreateFlag.APPEND)));
LocatedBlock newLocatedBlock = namenode.updateBlockForPipeline(oldBlock, clientName);
ExtendedBlock newBlock = new ExtendedBlock(oldBlock.getBlockPoolId(), oldBlock.getBlockId(), oldBlock.getNumBytes(), newLocatedBlock.getBlock().getGenerationStamp());
namenode.updatePipeline(clientName, oldBlock, newBlock, newLocatedBlock.getLocations(), newLocatedBlock.getStorageIDs());
BlockInfo bi = bm.getStoredBlock(newBlock.getLocalBlock());
assertFalse(bm.isNeededReconstruction(bi, bm.countNodes(bi, cluster.getNamesystem().isInStartupSafeMode())));
} finally {
IOUtils.closeStream(out);
}
} finally {
cluster.shutdown();
}
}
use of org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols in project hadoop by apache.
the class TestDataNodeVolumeFailure method countNNBlocks.
/**
* Count datanodes that have copies of the blocks for a file
* put it into the map
* @param map
* @param path
* @param size
* @return
* @throws IOException
*/
private int countNNBlocks(Map<String, BlockLocs> map, String path, long size) throws IOException {
int total = 0;
NamenodeProtocols nn = cluster.getNameNodeRpc();
List<LocatedBlock> locatedBlocks = nn.getBlockLocations(path, 0, size).getLocatedBlocks();
for (LocatedBlock lb : locatedBlocks) {
String blockId = "" + lb.getBlock().getBlockId();
//System.out.print(blockId + ": ");
DatanodeInfo[] dn_locs = lb.getLocations();
BlockLocs bl = map.get(blockId);
if (bl == null) {
bl = new BlockLocs();
}
//System.out.print(dn_info.name+",");
total += dn_locs.length;
bl.num_locs += dn_locs.length;
map.put(blockId, bl);
//System.out.println();
}
return total;
}
use of org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols in project hadoop by apache.
the class TestCheckpoint method testNamespaceVerifiedOnFileTransfer.
/**
* Test that the primary NN will not serve any files to a 2NN who doesn't
* share its namespace ID, and also will not accept any files from one.
*/
@Test
public void testNamespaceVerifiedOnFileTransfer() throws IOException {
MiniDFSCluster cluster = null;
Configuration conf = new HdfsConfiguration();
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).format(true).build();
NamenodeProtocols nn = cluster.getNameNodeRpc();
URL fsName = DFSUtil.getInfoServer(cluster.getNameNode().getServiceRpcAddress(), conf, DFSUtil.getHttpClientScheme(conf)).toURL();
// Make a finalized log on the server side.
nn.rollEditLog();
RemoteEditLogManifest manifest = nn.getEditLogManifest(1);
RemoteEditLog log = manifest.getLogs().get(0);
NNStorage dstImage = Mockito.mock(NNStorage.class);
Mockito.doReturn(Lists.newArrayList(new File("/wont-be-written"))).when(dstImage).getFiles(Mockito.<NameNodeDirType>anyObject(), Mockito.anyString());
File mockImageFile = File.createTempFile("image", "");
FileOutputStream imageFile = new FileOutputStream(mockImageFile);
imageFile.write("data".getBytes());
imageFile.close();
Mockito.doReturn(mockImageFile).when(dstImage).findImageFile(Mockito.any(NameNodeFile.class), Mockito.anyLong());
Mockito.doReturn(new StorageInfo(1, 1, "X", 1, NodeType.NAME_NODE).toColonSeparatedString()).when(dstImage).toColonSeparatedString();
try {
TransferFsImage.downloadImageToStorage(fsName, 0, dstImage, false, false);
fail("Storage info was not verified");
} catch (IOException ioe) {
String msg = StringUtils.stringifyException(ioe);
assertTrue(msg, msg.contains("but the secondary expected"));
}
try {
TransferFsImage.downloadEditsToStorage(fsName, log, dstImage);
fail("Storage info was not verified");
} catch (IOException ioe) {
String msg = StringUtils.stringifyException(ioe);
assertTrue(msg, msg.contains("but the secondary expected"));
}
try {
TransferFsImage.uploadImageFromStorage(fsName, conf, dstImage, NameNodeFile.IMAGE, 0);
fail("Storage info was not verified");
} catch (IOException ioe) {
String msg = StringUtils.stringifyException(ioe);
assertTrue(msg, msg.contains("but the secondary expected"));
}
} finally {
cleanup(cluster);
cluster = null;
}
}
Aggregations