use of org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory in project hadoop by apache.
the class TestFileJournalManager method testInprogressRecoveryMixed.
/**
* Test a mixture of inprogress files and finalised. Set up 3 edits
* directories and fail the second on the last roll. Verify that reading
* the transactions, reads from the finalised directories.
*/
@Test
public void testInprogressRecoveryMixed() throws IOException {
File f1 = new File(TestEditLog.TEST_DIR + "/mixtest0");
File f2 = new File(TestEditLog.TEST_DIR + "/mixtest1");
File f3 = new File(TestEditLog.TEST_DIR + "/mixtest2");
List<URI> editUris = ImmutableList.of(f1.toURI(), f2.toURI(), f3.toURI());
// abort after the 5th roll
NNStorage storage = setupEdits(editUris, 5, new AbortSpec(5, 1));
Iterator<StorageDirectory> dirs = storage.dirIterator(NameNodeDirType.EDITS);
StorageDirectory sd = dirs.next();
FileJournalManager jm = new FileJournalManager(conf, sd, storage);
assertEquals(6 * TXNS_PER_ROLL, getNumberOfTransactions(jm, 1, true, false));
sd = dirs.next();
jm = new FileJournalManager(conf, sd, storage);
assertEquals(5 * TXNS_PER_ROLL + TXNS_PER_FAIL, getNumberOfTransactions(jm, 1, true, false));
sd = dirs.next();
jm = new FileJournalManager(conf, sd, storage);
assertEquals(6 * TXNS_PER_ROLL, getNumberOfTransactions(jm, 1, true, false));
}
use of org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory in project hadoop by apache.
the class TestFileJournalManager method testInprogressRecovery.
/**
* Test that inprogress files are handled correct. Set up a single
* edits directory. Fail on after the last roll. Then verify that the
* logs have the expected number of transactions.
*/
@Test
public void testInprogressRecovery() throws IOException {
File f = new File(TestEditLog.TEST_DIR + "/inprogressrecovery");
// abort after the 5th roll
NNStorage storage = setupEdits(Collections.<URI>singletonList(f.toURI()), 5, new AbortSpec(5, 0));
StorageDirectory sd = storage.dirIterator(NameNodeDirType.EDITS).next();
FileJournalManager jm = new FileJournalManager(conf, sd, storage);
assertEquals(5 * TXNS_PER_ROLL + TXNS_PER_FAIL, getNumberOfTransactions(jm, 1, true, false));
}
use of org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory in project hadoop by apache.
the class TestFileJournalManager method testManyLogsWithCorruptInprogress.
/**
* Test that we can load an edits directory with a corrupt inprogress file.
* The corrupt inprogress file should be moved to the side.
*/
@Test
public void testManyLogsWithCorruptInprogress() throws IOException {
File f = new File(TestEditLog.TEST_DIR + "/manylogswithcorruptinprogress");
NNStorage storage = setupEdits(Collections.<URI>singletonList(f.toURI()), 10, new AbortSpec(10, 0));
StorageDirectory sd = storage.dirIterator(NameNodeDirType.EDITS).next();
File[] files = new File(f, "current").listFiles(new FilenameFilter() {
@Override
public boolean accept(File dir, String name) {
if (name.startsWith("edits_inprogress")) {
return true;
}
return false;
}
});
assertEquals(files.length, 1);
corruptAfterStartSegment(files[0]);
FileJournalManager jm = new FileJournalManager(conf, sd, storage);
assertEquals(10 * TXNS_PER_ROLL + 1, getNumberOfTransactions(jm, 1, true, false));
}
use of org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory in project hadoop by apache.
the class TestFileJournalManager method testReadFromStream.
/**
* Test that we can read from a stream created by FileJournalManager.
* Create a single edits directory, failing it on the final roll.
* Then try loading from the point of the 3rd roll. Verify that we read
* the correct number of transactions from this point.
*/
@Test
public void testReadFromStream() throws IOException {
File f = new File(TestEditLog.TEST_DIR + "/readfromstream");
// abort after 10th roll
NNStorage storage = setupEdits(Collections.<URI>singletonList(f.toURI()), 10, new AbortSpec(10, 0));
StorageDirectory sd = storage.dirIterator(NameNodeDirType.EDITS).next();
FileJournalManager jm = new FileJournalManager(conf, sd, storage);
long expectedTotalTxnCount = TXNS_PER_ROLL * 10 + TXNS_PER_FAIL;
assertEquals(expectedTotalTxnCount, getNumberOfTransactions(jm, 1, true, false));
// skip first 3 files
long skippedTxns = (3 * TXNS_PER_ROLL);
long startingTxId = skippedTxns + 1;
long numLoadable = getNumberOfTransactions(jm, startingTxId, true, false);
assertEquals(expectedTotalTxnCount - skippedTxns, numLoadable);
}
use of org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory in project hadoop by apache.
the class TestFSImageStorageInspector method testCurrentStorageInspector.
/**
* Simple test with image, edits, and inprogress edits
*/
@Test
public void testCurrentStorageInspector() throws IOException {
FSImageTransactionalStorageInspector inspector = new FSImageTransactionalStorageInspector();
StorageDirectory mockDir = FSImageTestUtil.mockStorageDirectory(NameNodeDirType.IMAGE_AND_EDITS, false, "/foo/current/" + getImageFileName(123), "/foo/current/" + getFinalizedEditsFileName(123, 456), "/foo/current/" + getImageFileName(456), "/foo/current/" + getInProgressEditsFileName(457));
inspector.inspectDirectory(mockDir);
assertEquals(2, inspector.foundImages.size());
FSImageFile latestImage = inspector.getLatestImages().get(0);
assertEquals(456, latestImage.txId);
assertSame(mockDir, latestImage.sd);
assertTrue(inspector.isUpgradeFinalized());
assertEquals(new File("/foo/current/" + getImageFileName(456)), latestImage.getFile());
}
Aggregations