use of org.apache.hadoop.hdfs.server.common.StorageInfo in project hadoop by apache.
the class TestComputeInvalidateWork method testDatanodeReRegistration.
@Test(timeout = 12000)
public void testDatanodeReRegistration() throws Exception {
// Create a test file
final DistributedFileSystem dfs = cluster.getFileSystem();
final Path path = new Path("/testRR");
// Create a file and shutdown the DNs, which populates InvalidateBlocks
DFSTestUtil.createFile(dfs, path, dfs.getDefaultBlockSize(), (short) NUM_OF_DATANODES, 0xED0ED0);
DFSTestUtil.waitForReplication(dfs, path, (short) NUM_OF_DATANODES, 12000);
for (DataNode dn : cluster.getDataNodes()) {
dn.shutdown();
}
dfs.delete(path, false);
namesystem.writeLock();
InvalidateBlocks invalidateBlocks;
int expected = NUM_OF_DATANODES;
try {
invalidateBlocks = (InvalidateBlocks) Whitebox.getInternalState(cluster.getNamesystem().getBlockManager(), "invalidateBlocks");
assertEquals("Expected invalidate blocks to be the number of DNs", (long) expected, invalidateBlocks.numBlocks());
} finally {
namesystem.writeUnlock();
}
// Re-register each DN and see that it wipes the invalidation work
for (DataNode dn : cluster.getDataNodes()) {
DatanodeID did = dn.getDatanodeId();
DatanodeRegistration reg = new DatanodeRegistration(new DatanodeID(UUID.randomUUID().toString(), did), new StorageInfo(HdfsServerConstants.NodeType.DATA_NODE), new ExportedBlockKeys(), VersionInfo.getVersion());
namesystem.writeLock();
try {
bm.getDatanodeManager().registerDatanode(reg);
expected--;
assertEquals("Expected number of invalidate blocks to decrease", (long) expected, invalidateBlocks.numBlocks());
} finally {
namesystem.writeUnlock();
}
}
}
use of org.apache.hadoop.hdfs.server.common.StorageInfo in project hadoop by apache.
the class TestCheckpoint method testNamespaceVerifiedOnFileTransfer.
/**
* Test that the primary NN will not serve any files to a 2NN who doesn't
* share its namespace ID, and also will not accept any files from one.
*/
@Test
public void testNamespaceVerifiedOnFileTransfer() throws IOException {
MiniDFSCluster cluster = null;
Configuration conf = new HdfsConfiguration();
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).format(true).build();
NamenodeProtocols nn = cluster.getNameNodeRpc();
URL fsName = DFSUtil.getInfoServer(cluster.getNameNode().getServiceRpcAddress(), conf, DFSUtil.getHttpClientScheme(conf)).toURL();
// Make a finalized log on the server side.
nn.rollEditLog();
RemoteEditLogManifest manifest = nn.getEditLogManifest(1);
RemoteEditLog log = manifest.getLogs().get(0);
NNStorage dstImage = Mockito.mock(NNStorage.class);
Mockito.doReturn(Lists.newArrayList(new File("/wont-be-written"))).when(dstImage).getFiles(Mockito.<NameNodeDirType>anyObject(), Mockito.anyString());
File mockImageFile = File.createTempFile("image", "");
FileOutputStream imageFile = new FileOutputStream(mockImageFile);
imageFile.write("data".getBytes());
imageFile.close();
Mockito.doReturn(mockImageFile).when(dstImage).findImageFile(Mockito.any(NameNodeFile.class), Mockito.anyLong());
Mockito.doReturn(new StorageInfo(1, 1, "X", 1, NodeType.NAME_NODE).toColonSeparatedString()).when(dstImage).toColonSeparatedString();
try {
TransferFsImage.downloadImageToStorage(fsName, 0, dstImage, false, false);
fail("Storage info was not verified");
} catch (IOException ioe) {
String msg = StringUtils.stringifyException(ioe);
assertTrue(msg, msg.contains("but the secondary expected"));
}
try {
TransferFsImage.downloadEditsToStorage(fsName, log, dstImage);
fail("Storage info was not verified");
} catch (IOException ioe) {
String msg = StringUtils.stringifyException(ioe);
assertTrue(msg, msg.contains("but the secondary expected"));
}
try {
TransferFsImage.uploadImageFromStorage(fsName, conf, dstImage, NameNodeFile.IMAGE, 0);
fail("Storage info was not verified");
} catch (IOException ioe) {
String msg = StringUtils.stringifyException(ioe);
assertTrue(msg, msg.contains("but the secondary expected"));
}
} finally {
cleanup(cluster);
cluster = null;
}
}
use of org.apache.hadoop.hdfs.server.common.StorageInfo in project hadoop by apache.
the class PBHelper method convert.
public static NamespaceInfoProto convert(NamespaceInfo info) {
NamespaceInfoProto.Builder builder = NamespaceInfoProto.newBuilder();
builder.setBlockPoolID(info.getBlockPoolID()).setBuildVersion(info.getBuildVersion()).setUnused(0).setStorageInfo(PBHelper.convert((StorageInfo) info)).setSoftwareVersion(info.getSoftwareVersion()).setCapabilities(info.getCapabilities());
HAServiceState state = info.getState();
if (state != null) {
builder.setState(convert(info.getState()));
}
return builder.build();
}
use of org.apache.hadoop.hdfs.server.common.StorageInfo in project hadoop by apache.
the class FileJournalManager method getJournalCTime.
@Override
public long getJournalCTime() throws IOException {
StorageInfo sInfo = new StorageInfo((NodeType) null);
sInfo.readProperties(sd);
return sInfo.getCTime();
}
use of org.apache.hadoop.hdfs.server.common.StorageInfo in project hadoop by apache.
the class TestDFSUpgrade method testUpgrade.
/**
* This test attempts to upgrade the NameNode and DataNode under
* a number of valid and invalid conditions.
*/
@Test(timeout = 60000)
public void testUpgrade() throws Exception {
File[] baseDirs;
StorageInfo storageInfo = null;
for (int numDirs = 1; numDirs <= 2; numDirs++) {
conf = new HdfsConfiguration();
conf = UpgradeUtilities.initializeStorageStateConf(numDirs, conf);
String[] nameNodeDirs = conf.getStrings(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY);
String[] dataNodeDirs = conf.getStrings(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY);
conf.setBoolean(DFSConfigKeys.DFS_DATANODE_DUPLICATE_REPLICA_DELETION, false);
log("Normal NameNode upgrade", numDirs);
UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current");
cluster = createCluster();
// make sure that rolling upgrade cannot be started
try {
final DistributedFileSystem dfs = cluster.getFileSystem();
dfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
dfs.rollingUpgrade(RollingUpgradeAction.PREPARE);
fail();
} catch (RemoteException re) {
assertEquals(InconsistentFSStateException.class.getName(), re.getClassName());
LOG.info("The exception is expected.", re);
}
checkNameNode(nameNodeDirs, EXPECTED_TXID);
if (numDirs > 1)
TestParallelImageWrite.checkImages(cluster.getNamesystem(), numDirs);
cluster.shutdown();
UpgradeUtilities.createEmptyDirs(nameNodeDirs);
log("Normal DataNode upgrade", numDirs);
UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current");
cluster = createCluster();
UpgradeUtilities.createDataNodeStorageDirs(dataNodeDirs, "current");
cluster.startDataNodes(conf, 1, false, StartupOption.REGULAR, null);
checkDataNode(dataNodeDirs, UpgradeUtilities.getCurrentBlockPoolID(null));
cluster.shutdown();
UpgradeUtilities.createEmptyDirs(nameNodeDirs);
UpgradeUtilities.createEmptyDirs(dataNodeDirs);
log("NameNode upgrade with existing previous dir", numDirs);
UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current");
UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "previous");
startNameNodeShouldFail(StartupOption.UPGRADE);
UpgradeUtilities.createEmptyDirs(nameNodeDirs);
log("DataNode upgrade with existing previous dir", numDirs);
UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current");
cluster = createCluster();
UpgradeUtilities.createDataNodeStorageDirs(dataNodeDirs, "current");
UpgradeUtilities.createDataNodeStorageDirs(dataNodeDirs, "previous");
cluster.startDataNodes(conf, 1, false, StartupOption.REGULAR, null);
checkDataNode(dataNodeDirs, UpgradeUtilities.getCurrentBlockPoolID(null));
cluster.shutdown();
UpgradeUtilities.createEmptyDirs(nameNodeDirs);
UpgradeUtilities.createEmptyDirs(dataNodeDirs);
log("DataNode upgrade with future stored layout version in current", numDirs);
UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current");
cluster = createCluster();
baseDirs = UpgradeUtilities.createDataNodeStorageDirs(dataNodeDirs, "current");
storageInfo = new StorageInfo(Integer.MIN_VALUE, UpgradeUtilities.getCurrentNamespaceID(cluster), UpgradeUtilities.getCurrentClusterID(cluster), UpgradeUtilities.getCurrentFsscTime(cluster), NodeType.DATA_NODE);
UpgradeUtilities.createDataNodeVersionFile(baseDirs, storageInfo, UpgradeUtilities.getCurrentBlockPoolID(cluster));
startBlockPoolShouldFail(StartupOption.REGULAR, UpgradeUtilities.getCurrentBlockPoolID(null));
cluster.shutdown();
UpgradeUtilities.createEmptyDirs(nameNodeDirs);
UpgradeUtilities.createEmptyDirs(dataNodeDirs);
log("DataNode upgrade with newer fsscTime in current", numDirs);
UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current");
cluster = createCluster();
baseDirs = UpgradeUtilities.createDataNodeStorageDirs(dataNodeDirs, "current");
storageInfo = new StorageInfo(HdfsServerConstants.DATANODE_LAYOUT_VERSION, UpgradeUtilities.getCurrentNamespaceID(cluster), UpgradeUtilities.getCurrentClusterID(cluster), Long.MAX_VALUE, NodeType.DATA_NODE);
UpgradeUtilities.createDataNodeVersionFile(baseDirs, storageInfo, UpgradeUtilities.getCurrentBlockPoolID(cluster));
// Ensure corresponding block pool failed to initialized
startBlockPoolShouldFail(StartupOption.REGULAR, UpgradeUtilities.getCurrentBlockPoolID(null));
cluster.shutdown();
UpgradeUtilities.createEmptyDirs(nameNodeDirs);
UpgradeUtilities.createEmptyDirs(dataNodeDirs);
log("NameNode upgrade with no edits file", numDirs);
UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current");
deleteStorageFilesWithPrefix(nameNodeDirs, "edits_");
startNameNodeShouldFail(StartupOption.UPGRADE);
UpgradeUtilities.createEmptyDirs(nameNodeDirs);
log("NameNode upgrade with no image file", numDirs);
UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current");
deleteStorageFilesWithPrefix(nameNodeDirs, "fsimage_");
startNameNodeShouldFail(StartupOption.UPGRADE);
UpgradeUtilities.createEmptyDirs(nameNodeDirs);
log("NameNode upgrade with corrupt version file", numDirs);
baseDirs = UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current");
for (File f : baseDirs) {
UpgradeUtilities.corruptFile(new File(f, "VERSION"), "layoutVersion".getBytes(Charsets.UTF_8), "xxxxxxxxxxxxx".getBytes(Charsets.UTF_8));
}
startNameNodeShouldFail(StartupOption.UPGRADE);
UpgradeUtilities.createEmptyDirs(nameNodeDirs);
log("NameNode upgrade with old layout version in current", numDirs);
baseDirs = UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current");
storageInfo = new StorageInfo(Storage.LAST_UPGRADABLE_LAYOUT_VERSION + 1, UpgradeUtilities.getCurrentNamespaceID(null), UpgradeUtilities.getCurrentClusterID(null), UpgradeUtilities.getCurrentFsscTime(null), NodeType.NAME_NODE);
UpgradeUtilities.createNameNodeVersionFile(conf, baseDirs, storageInfo, UpgradeUtilities.getCurrentBlockPoolID(cluster));
startNameNodeShouldFail(StartupOption.UPGRADE);
UpgradeUtilities.createEmptyDirs(nameNodeDirs);
log("NameNode upgrade with future layout version in current", numDirs);
baseDirs = UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current");
storageInfo = new StorageInfo(Integer.MIN_VALUE, UpgradeUtilities.getCurrentNamespaceID(null), UpgradeUtilities.getCurrentClusterID(null), UpgradeUtilities.getCurrentFsscTime(null), NodeType.NAME_NODE);
UpgradeUtilities.createNameNodeVersionFile(conf, baseDirs, storageInfo, UpgradeUtilities.getCurrentBlockPoolID(cluster));
startNameNodeShouldFail(StartupOption.UPGRADE);
UpgradeUtilities.createEmptyDirs(nameNodeDirs);
}
// end numDir loop
// One more check: normal NN upgrade with 4 directories, concurrent write
int numDirs = 4;
{
conf = new HdfsConfiguration();
conf.setInt(DFSConfigKeys.DFS_DATANODE_SCAN_PERIOD_HOURS_KEY, -1);
conf.setBoolean(DFSConfigKeys.DFS_DATANODE_DUPLICATE_REPLICA_DELETION, false);
conf = UpgradeUtilities.initializeStorageStateConf(numDirs, conf);
String[] nameNodeDirs = conf.getStrings(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY);
log("Normal NameNode upgrade", numDirs);
UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current");
cluster = createCluster();
// make sure that rolling upgrade cannot be started
try {
final DistributedFileSystem dfs = cluster.getFileSystem();
dfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
dfs.rollingUpgrade(RollingUpgradeAction.PREPARE);
fail();
} catch (RemoteException re) {
assertEquals(InconsistentFSStateException.class.getName(), re.getClassName());
LOG.info("The exception is expected.", re);
}
checkNameNode(nameNodeDirs, EXPECTED_TXID);
TestParallelImageWrite.checkImages(cluster.getNamesystem(), numDirs);
cluster.shutdown();
UpgradeUtilities.createEmptyDirs(nameNodeDirs);
}
}
Aggregations