use of org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory in project hadoop by apache.
the class TestJournalNodeSync method testSyncForMultipleMissingLogs.
@Test(timeout = 30000)
public void testSyncForMultipleMissingLogs() throws Exception {
File firstJournalDir = jCluster.getJournalDir(0, jid);
File firstJournalCurrentDir = new StorageDirectory(firstJournalDir).getCurrentDir();
// Generate some edit logs and delete two.
long firstTxId = generateEditLog();
long nextTxId = generateEditLog();
List<File> missingLogs = Lists.newArrayList();
missingLogs.add(deleteEditLog(firstJournalCurrentDir, firstTxId));
missingLogs.add(deleteEditLog(firstJournalCurrentDir, nextTxId));
GenericTestUtils.waitFor(editLogExists(missingLogs), 500, 10000);
}
use of org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory in project hadoop by apache.
the class UpgradeUtilities method createNameNodeVersionFile.
/**
* Create a <code>version</code> file for namenode inside the specified parent
* directory. If such a file already exists, it will be overwritten.
* The given version string will be written to the file as the layout
* version. None of the parameters may be null.
*
* @param parent directory where namenode VERSION file is stored
* @param version StorageInfo to create VERSION file from
* @param bpid Block pool Id
*
* @return the created version file
*/
public static File[] createNameNodeVersionFile(Configuration conf, File[] parent, StorageInfo version, String bpid) throws IOException {
Storage storage = new NNStorage(conf, Collections.<URI>emptyList(), Collections.<URI>emptyList());
storage.setStorageInfo(version);
File[] versionFiles = new File[parent.length];
for (int i = 0; i < parent.length; i++) {
versionFiles[i] = new File(parent[i], "VERSION");
StorageDirectory sd = new StorageDirectory(parent[i].getParentFile());
storage.writeProperties(versionFiles[i], sd);
}
return versionFiles;
}
use of org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory in project hadoop by apache.
the class UpgradeUtilities method createDataNodeStorageDirs.
/**
* Simulate the {@link DFSConfigKeys#DFS_DATANODE_DATA_DIR_KEY} of a
* populated DFS filesystem.
* This method populates for each parent directory, <code>parent/dirName</code>
* with the content of datanode storage directory that comes from a singleton
* datanode master (that contains version and block files). If the destination
* directory does not exist, it will be created. If the directory already
* exists, it will first be deleted.
*
* @param parents parent directory where {@code dirName} is created
* @param dirName directory under which storage directory is created
* @return the array of created directories
*/
public static File[] createDataNodeStorageDirs(String[] parents, String dirName) throws Exception {
File[] retVal = new File[parents.length];
for (int i = 0; i < parents.length; i++) {
File newDir = new File(parents[i], dirName);
createEmptyDirs(new String[] { newDir.toString() });
LocalFileSystem localFS = FileSystem.getLocal(new HdfsConfiguration());
localFS.copyToLocalFile(new Path(datanodeStorage.toString(), "current"), new Path(newDir.toString()), false);
// Change the storage UUID to avoid conflicts when DN starts up.
StorageDirectory sd = new StorageDirectory(new File(datanodeStorage.toString()));
sd.setStorageUuid(DatanodeStorage.generateUuid());
Properties properties = Storage.readPropertiesFile(sd.getVersionFile());
properties.setProperty("storageID", sd.getStorageUuid());
Storage.writeProperties(sd.getVersionFile(), properties);
retVal[i] = newDir;
}
return retVal;
}
use of org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory in project hadoop by apache.
the class TestFsVolumeList method testGetNextVolumeWithClosedVolume.
@Test(timeout = 30000)
public void testGetNextVolumeWithClosedVolume() throws IOException {
FsVolumeList volumeList = new FsVolumeList(Collections.<VolumeFailureInfo>emptyList(), blockScanner, blockChooser);
final List<FsVolumeImpl> volumes = new ArrayList<>();
for (int i = 0; i < 3; i++) {
File curDir = new File(baseDir, "nextvolume-" + i);
curDir.mkdirs();
FsVolumeImpl volume = new FsVolumeImplBuilder().setConf(conf).setDataset(dataset).setStorageID("storage-id").setStorageDirectory(new StorageDirectory(StorageLocation.parse(curDir.getPath()))).build();
volume.setCapacityForTesting(1024 * 1024 * 1024);
volumes.add(volume);
volumeList.addVolume(volume.obtainReference());
}
// Close the second volume.
volumes.get(1).setClosed();
try {
GenericTestUtils.waitFor(new Supplier<Boolean>() {
@Override
public Boolean get() {
return volumes.get(1).checkClosed();
}
}, 100, 3000);
} catch (TimeoutException e) {
fail("timed out while waiting for volume to be removed.");
} catch (InterruptedException ie) {
Thread.currentThread().interrupt();
}
for (int i = 0; i < 10; i++) {
try (FsVolumeReference ref = volumeList.getNextVolume(StorageType.DEFAULT, 128)) {
// volume No.2 will not be chosen.
assertNotEquals(ref.getVolume(), volumes.get(1));
}
}
}
use of org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory in project hadoop by apache.
the class TestCheckpoint method testEditFailureBeforeRename.
/**
* Test that a fault while downloading edits does not prevent future
* checkpointing
*/
@Test(timeout = 30000)
public void testEditFailureBeforeRename() throws IOException {
Configuration conf = new HdfsConfiguration();
SecondaryNameNode secondary = null;
MiniDFSCluster cluster = null;
FileSystem fs = null;
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).build();
cluster.waitActive();
fs = cluster.getFileSystem();
secondary = startSecondaryNameNode(conf);
DFSTestUtil.createFile(fs, new Path("tmpfile0"), 1024, (short) 1, 0l);
secondary.doCheckpoint();
// Cause edit rename to fail during next checkpoint
Mockito.doThrow(new IOException("Injecting failure before edit rename")).when(faultInjector).beforeEditsRename();
DFSTestUtil.createFile(fs, new Path("tmpfile1"), 1024, (short) 1, 0l);
try {
secondary.doCheckpoint();
fail("Fault injection failed.");
} catch (IOException ioe) {
GenericTestUtils.assertExceptionContains("Injecting failure before edit rename", ioe);
}
Mockito.reset(faultInjector);
// truncate the tmp edits file to simulate a partial download
for (StorageDirectory sd : secondary.getFSImage().getStorage().dirIterable(NameNodeDirType.EDITS)) {
File[] tmpEdits = sd.getCurrentDir().listFiles(tmpEditsFilter);
assertTrue("Expected a single tmp edits file in directory " + sd.toString(), tmpEdits.length == 1);
RandomAccessFile randFile = new RandomAccessFile(tmpEdits[0], "rw");
randFile.setLength(0);
randFile.close();
}
// Next checkpoint should succeed
secondary.doCheckpoint();
} finally {
if (secondary != null) {
secondary.shutdown();
}
if (fs != null) {
fs.close();
}
if (cluster != null) {
cluster.shutdown();
}
Mockito.reset(faultInjector);
}
}
Aggregations