Search in sources :

Example 1 with AbortSpec

use of org.apache.hadoop.hdfs.server.namenode.TestEditLog.AbortSpec in project hadoop by apache.

the class TestFileJournalManager method testFinalizeErrorReportedToNNStorage.

@Test(expected = IllegalStateException.class)
public void testFinalizeErrorReportedToNNStorage() throws IOException, InterruptedException {
    File f = new File(TestEditLog.TEST_DIR + "/filejournaltestError");
    // abort after 10th roll
    NNStorage storage = setupEdits(Collections.<URI>singletonList(f.toURI()), 10, new AbortSpec(10, 0));
    StorageDirectory sd = storage.dirIterator(NameNodeDirType.EDITS).next();
    FileJournalManager jm = new FileJournalManager(conf, sd, storage);
    String sdRootPath = sd.getRoot().getAbsolutePath();
    FileUtil.chmod(sdRootPath, "-w", true);
    try {
        jm.finalizeLogSegment(0, 1);
    } finally {
        FileUtil.chmod(sdRootPath, "+w", true);
        assertTrue(storage.getRemovedStorageDirs().contains(sd));
    }
}
Also used : AbortSpec(org.apache.hadoop.hdfs.server.namenode.TestEditLog.AbortSpec) StorageDirectory(org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory) RandomAccessFile(java.io.RandomAccessFile) File(java.io.File) Test(org.junit.Test)

Example 2 with AbortSpec

use of org.apache.hadoop.hdfs.server.namenode.TestEditLog.AbortSpec in project hadoop by apache.

the class TestFileJournalManager method testInprogressRecoveryMixed.

/**
   * Test a mixture of inprogress files and finalised. Set up 3 edits 
   * directories and fail the second on the last roll. Verify that reading
   * the transactions, reads from the finalised directories.
   */
@Test
public void testInprogressRecoveryMixed() throws IOException {
    File f1 = new File(TestEditLog.TEST_DIR + "/mixtest0");
    File f2 = new File(TestEditLog.TEST_DIR + "/mixtest1");
    File f3 = new File(TestEditLog.TEST_DIR + "/mixtest2");
    List<URI> editUris = ImmutableList.of(f1.toURI(), f2.toURI(), f3.toURI());
    // abort after the 5th roll 
    NNStorage storage = setupEdits(editUris, 5, new AbortSpec(5, 1));
    Iterator<StorageDirectory> dirs = storage.dirIterator(NameNodeDirType.EDITS);
    StorageDirectory sd = dirs.next();
    FileJournalManager jm = new FileJournalManager(conf, sd, storage);
    assertEquals(6 * TXNS_PER_ROLL, getNumberOfTransactions(jm, 1, true, false));
    sd = dirs.next();
    jm = new FileJournalManager(conf, sd, storage);
    assertEquals(5 * TXNS_PER_ROLL + TXNS_PER_FAIL, getNumberOfTransactions(jm, 1, true, false));
    sd = dirs.next();
    jm = new FileJournalManager(conf, sd, storage);
    assertEquals(6 * TXNS_PER_ROLL, getNumberOfTransactions(jm, 1, true, false));
}
Also used : AbortSpec(org.apache.hadoop.hdfs.server.namenode.TestEditLog.AbortSpec) StorageDirectory(org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory) RandomAccessFile(java.io.RandomAccessFile) File(java.io.File) URI(java.net.URI) Test(org.junit.Test)

Example 3 with AbortSpec

use of org.apache.hadoop.hdfs.server.namenode.TestEditLog.AbortSpec in project hadoop by apache.

the class TestFileJournalManager method testInprogressRecovery.

/**
   * Test that inprogress files are handled correct. Set up a single
   * edits directory. Fail on after the last roll. Then verify that the 
   * logs have the expected number of transactions.
   */
@Test
public void testInprogressRecovery() throws IOException {
    File f = new File(TestEditLog.TEST_DIR + "/inprogressrecovery");
    // abort after the 5th roll 
    NNStorage storage = setupEdits(Collections.<URI>singletonList(f.toURI()), 5, new AbortSpec(5, 0));
    StorageDirectory sd = storage.dirIterator(NameNodeDirType.EDITS).next();
    FileJournalManager jm = new FileJournalManager(conf, sd, storage);
    assertEquals(5 * TXNS_PER_ROLL + TXNS_PER_FAIL, getNumberOfTransactions(jm, 1, true, false));
}
Also used : AbortSpec(org.apache.hadoop.hdfs.server.namenode.TestEditLog.AbortSpec) StorageDirectory(org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory) RandomAccessFile(java.io.RandomAccessFile) File(java.io.File) Test(org.junit.Test)

Example 4 with AbortSpec

use of org.apache.hadoop.hdfs.server.namenode.TestEditLog.AbortSpec in project hadoop by apache.

the class TestFileJournalManager method testManyLogsWithCorruptInprogress.

/** 
   * Test that we can load an edits directory with a corrupt inprogress file.
   * The corrupt inprogress file should be moved to the side.
   */
@Test
public void testManyLogsWithCorruptInprogress() throws IOException {
    File f = new File(TestEditLog.TEST_DIR + "/manylogswithcorruptinprogress");
    NNStorage storage = setupEdits(Collections.<URI>singletonList(f.toURI()), 10, new AbortSpec(10, 0));
    StorageDirectory sd = storage.dirIterator(NameNodeDirType.EDITS).next();
    File[] files = new File(f, "current").listFiles(new FilenameFilter() {

        @Override
        public boolean accept(File dir, String name) {
            if (name.startsWith("edits_inprogress")) {
                return true;
            }
            return false;
        }
    });
    assertEquals(files.length, 1);
    corruptAfterStartSegment(files[0]);
    FileJournalManager jm = new FileJournalManager(conf, sd, storage);
    assertEquals(10 * TXNS_PER_ROLL + 1, getNumberOfTransactions(jm, 1, true, false));
}
Also used : FilenameFilter(java.io.FilenameFilter) AbortSpec(org.apache.hadoop.hdfs.server.namenode.TestEditLog.AbortSpec) StorageDirectory(org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory) RandomAccessFile(java.io.RandomAccessFile) File(java.io.File) Test(org.junit.Test)

Example 5 with AbortSpec

use of org.apache.hadoop.hdfs.server.namenode.TestEditLog.AbortSpec in project hadoop by apache.

the class TestFileJournalManager method testReadFromStream.

/** 
   * Test that we can read from a stream created by FileJournalManager.
   * Create a single edits directory, failing it on the final roll.
   * Then try loading from the point of the 3rd roll. Verify that we read 
   * the correct number of transactions from this point.
   */
@Test
public void testReadFromStream() throws IOException {
    File f = new File(TestEditLog.TEST_DIR + "/readfromstream");
    // abort after 10th roll
    NNStorage storage = setupEdits(Collections.<URI>singletonList(f.toURI()), 10, new AbortSpec(10, 0));
    StorageDirectory sd = storage.dirIterator(NameNodeDirType.EDITS).next();
    FileJournalManager jm = new FileJournalManager(conf, sd, storage);
    long expectedTotalTxnCount = TXNS_PER_ROLL * 10 + TXNS_PER_FAIL;
    assertEquals(expectedTotalTxnCount, getNumberOfTransactions(jm, 1, true, false));
    // skip first 3 files
    long skippedTxns = (3 * TXNS_PER_ROLL);
    long startingTxId = skippedTxns + 1;
    long numLoadable = getNumberOfTransactions(jm, startingTxId, true, false);
    assertEquals(expectedTotalTxnCount - skippedTxns, numLoadable);
}
Also used : AbortSpec(org.apache.hadoop.hdfs.server.namenode.TestEditLog.AbortSpec) StorageDirectory(org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory) RandomAccessFile(java.io.RandomAccessFile) File(java.io.File) Test(org.junit.Test)

Aggregations

File (java.io.File)6 RandomAccessFile (java.io.RandomAccessFile)6 StorageDirectory (org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory)6 AbortSpec (org.apache.hadoop.hdfs.server.namenode.TestEditLog.AbortSpec)6 Test (org.junit.Test)6 URI (java.net.URI)2 FilenameFilter (java.io.FilenameFilter)1