use of org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory in project hadoop by apache.
the class TestFileJournalManager method testManyLogsWithGaps.
/**
* Test that we receive the correct number of transactions when we count
* the number of transactions around gaps.
* Set up a single edits directory, with no failures. Delete the 4th logfile.
* Test that getNumberOfTransactions returns the correct number of
* transactions before this gap and after this gap. Also verify that if you
* try to count on the gap that an exception is thrown.
*/
@Test
public void testManyLogsWithGaps() throws IOException {
File f = new File(TestEditLog.TEST_DIR + "/manylogswithgaps");
NNStorage storage = setupEdits(Collections.<URI>singletonList(f.toURI()), 10);
StorageDirectory sd = storage.dirIterator(NameNodeDirType.EDITS).next();
final long startGapTxId = 3 * TXNS_PER_ROLL + 1;
final long endGapTxId = 4 * TXNS_PER_ROLL;
File[] files = new File(f, "current").listFiles(new FilenameFilter() {
@Override
public boolean accept(File dir, String name) {
if (name.startsWith(NNStorage.getFinalizedEditsFileName(startGapTxId, endGapTxId))) {
return true;
}
return false;
}
});
assertEquals(1, files.length);
assertTrue(files[0].delete());
FileJournalManager jm = new FileJournalManager(conf, sd, storage);
assertEquals(startGapTxId - 1, getNumberOfTransactions(jm, 1, true, true));
assertEquals(0, getNumberOfTransactions(jm, startGapTxId, true, true));
// rolled 10 times so there should be 11 files.
assertEquals(11 * TXNS_PER_ROLL - endGapTxId, getNumberOfTransactions(jm, endGapTxId + 1, true, true));
}
use of org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory in project hadoop by apache.
the class TestFileJournalManager method testExcludeInProgressStreams.
/**
* Make sure that in-progress streams aren't counted if we don't ask for
* them.
*/
@Test
public void testExcludeInProgressStreams() throws CorruptionException, IOException {
File f = new File(TestEditLog.TEST_DIR + "/excludeinprogressstreams");
// Don't close the edit log once the files have been set up.
NNStorage storage = setupEdits(Collections.<URI>singletonList(f.toURI()), 10, false);
StorageDirectory sd = storage.dirIterator(NameNodeDirType.EDITS).next();
FileJournalManager jm = new FileJournalManager(conf, sd, storage);
// If we exclude the in-progess stream, we should only have 100 tx.
assertEquals(100, getNumberOfTransactions(jm, 1, false, false));
EditLogInputStream elis = getJournalInputStream(jm, 90, false);
try {
FSEditLogOp lastReadOp = null;
while ((lastReadOp = elis.readOp()) != null) {
assertTrue(lastReadOp.getTransactionId() <= 100);
}
} finally {
IOUtils.cleanup(LOG, elis);
}
}
use of org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory in project hadoop by apache.
the class TestDataStorage method testAddStorageDirectories.
@Test
public void testAddStorageDirectories() throws IOException, URISyntaxException {
final int numLocations = 3;
final int numNamespace = 3;
List<StorageLocation> locations = createStorageLocations(numLocations);
// Add volumes for multiple namespaces.
List<NamespaceInfo> namespaceInfos = createNamespaceInfos(numNamespace);
for (NamespaceInfo ni : namespaceInfos) {
storage.addStorageLocations(mockDN, ni, locations, START_OPT);
for (StorageLocation sl : locations) {
checkDir(new File(sl.getUri()));
checkDir(new File(sl.getUri()), ni.getBlockPoolID());
}
}
assertEquals(numLocations, storage.getNumStorageDirs());
locations = createStorageLocations(numLocations);
List<StorageDirectory> addedLocation = storage.addStorageLocations(mockDN, namespaceInfos.get(0), locations, START_OPT);
assertTrue(addedLocation.isEmpty());
// The number of active storage dirs has not changed, since it tries to
// add the storage dirs that are under service.
assertEquals(numLocations, storage.getNumStorageDirs());
// Add more directories.
locations = createStorageLocations(6);
storage.addStorageLocations(mockDN, nsInfo, locations, START_OPT);
assertEquals(6, storage.getNumStorageDirs());
}
use of org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory in project hadoop by apache.
the class OfflineEditsViewerHelper method getEditsFilename.
/**
* Get edits filename
*
* @return edits file name for cluster
*/
private String getEditsFilename(CheckpointSignature sig) throws IOException {
FSImage image = cluster.getNameNode().getFSImage();
// it was set up to only have ONE StorageDirectory
Iterator<StorageDirectory> it = image.getStorage().dirIterator(NameNodeDirType.EDITS);
StorageDirectory sd = it.next();
File ret = NNStorage.getFinalizedEditsFile(sd, 1, sig.curSegmentTxId - 1);
assert ret.exists() : "expected " + ret + " exists";
return ret.getAbsolutePath();
}
use of org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory in project hadoop by apache.
the class FSImageTestUtil method findNewestImageFile.
/**
* @return the fsimage file with the most recent transaction ID in the
* given 'current/' directory.
*/
public static File findNewestImageFile(String currentDirPath) throws IOException {
StorageDirectory sd = FSImageTestUtil.mockStorageDirectory(new File(currentDirPath), NameNodeDirType.IMAGE);
FSImageTransactionalStorageInspector inspector = new FSImageTransactionalStorageInspector();
inspector.inspectDirectory(sd);
List<FSImageFile> latestImages = inspector.getLatestImages();
return (latestImages.isEmpty()) ? null : latestImages.get(0).getFile();
}
Aggregations