Search in sources :

Example 11 with StorageDirectory

use of org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory in project hadoop by apache.

the class TestFileJournalManager method testManyLogsWithGaps.

/** 
   * Test that we receive the correct number of transactions when we count
   * the number of transactions around gaps.
   * Set up a single edits directory, with no failures. Delete the 4th logfile.
   * Test that getNumberOfTransactions returns the correct number of 
   * transactions before this gap and after this gap. Also verify that if you
   * try to count on the gap that an exception is thrown.
   */
@Test
public void testManyLogsWithGaps() throws IOException {
    File f = new File(TestEditLog.TEST_DIR + "/manylogswithgaps");
    NNStorage storage = setupEdits(Collections.<URI>singletonList(f.toURI()), 10);
    StorageDirectory sd = storage.dirIterator(NameNodeDirType.EDITS).next();
    final long startGapTxId = 3 * TXNS_PER_ROLL + 1;
    final long endGapTxId = 4 * TXNS_PER_ROLL;
    File[] files = new File(f, "current").listFiles(new FilenameFilter() {

        @Override
        public boolean accept(File dir, String name) {
            if (name.startsWith(NNStorage.getFinalizedEditsFileName(startGapTxId, endGapTxId))) {
                return true;
            }
            return false;
        }
    });
    assertEquals(1, files.length);
    assertTrue(files[0].delete());
    FileJournalManager jm = new FileJournalManager(conf, sd, storage);
    assertEquals(startGapTxId - 1, getNumberOfTransactions(jm, 1, true, true));
    assertEquals(0, getNumberOfTransactions(jm, startGapTxId, true, true));
    // rolled 10 times so there should be 11 files.
    assertEquals(11 * TXNS_PER_ROLL - endGapTxId, getNumberOfTransactions(jm, endGapTxId + 1, true, true));
}
Also used : FilenameFilter(java.io.FilenameFilter) StorageDirectory(org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory) RandomAccessFile(java.io.RandomAccessFile) File(java.io.File) Test(org.junit.Test)

Example 12 with StorageDirectory

use of org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory in project hadoop by apache.

the class TestFileJournalManager method testExcludeInProgressStreams.

/**
   * Make sure that in-progress streams aren't counted if we don't ask for
   * them.
   */
@Test
public void testExcludeInProgressStreams() throws CorruptionException, IOException {
    File f = new File(TestEditLog.TEST_DIR + "/excludeinprogressstreams");
    // Don't close the edit log once the files have been set up.
    NNStorage storage = setupEdits(Collections.<URI>singletonList(f.toURI()), 10, false);
    StorageDirectory sd = storage.dirIterator(NameNodeDirType.EDITS).next();
    FileJournalManager jm = new FileJournalManager(conf, sd, storage);
    // If we exclude the in-progess stream, we should only have 100 tx.
    assertEquals(100, getNumberOfTransactions(jm, 1, false, false));
    EditLogInputStream elis = getJournalInputStream(jm, 90, false);
    try {
        FSEditLogOp lastReadOp = null;
        while ((lastReadOp = elis.readOp()) != null) {
            assertTrue(lastReadOp.getTransactionId() <= 100);
        }
    } finally {
        IOUtils.cleanup(LOG, elis);
    }
}
Also used : StorageDirectory(org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory) RandomAccessFile(java.io.RandomAccessFile) File(java.io.File) Test(org.junit.Test)

Example 13 with StorageDirectory

use of org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory in project hadoop by apache.

the class TestDataStorage method testAddStorageDirectories.

@Test
public void testAddStorageDirectories() throws IOException, URISyntaxException {
    final int numLocations = 3;
    final int numNamespace = 3;
    List<StorageLocation> locations = createStorageLocations(numLocations);
    // Add volumes for multiple namespaces.
    List<NamespaceInfo> namespaceInfos = createNamespaceInfos(numNamespace);
    for (NamespaceInfo ni : namespaceInfos) {
        storage.addStorageLocations(mockDN, ni, locations, START_OPT);
        for (StorageLocation sl : locations) {
            checkDir(new File(sl.getUri()));
            checkDir(new File(sl.getUri()), ni.getBlockPoolID());
        }
    }
    assertEquals(numLocations, storage.getNumStorageDirs());
    locations = createStorageLocations(numLocations);
    List<StorageDirectory> addedLocation = storage.addStorageLocations(mockDN, namespaceInfos.get(0), locations, START_OPT);
    assertTrue(addedLocation.isEmpty());
    // The number of active storage dirs has not changed, since it tries to
    // add the storage dirs that are under service.
    assertEquals(numLocations, storage.getNumStorageDirs());
    // Add more directories.
    locations = createStorageLocations(6);
    storage.addStorageLocations(mockDN, nsInfo, locations, START_OPT);
    assertEquals(6, storage.getNumStorageDirs());
}
Also used : StorageDirectory(org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory) NamespaceInfo(org.apache.hadoop.hdfs.server.protocol.NamespaceInfo) File(java.io.File) Test(org.junit.Test)

Example 14 with StorageDirectory

use of org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory in project hadoop by apache.

the class OfflineEditsViewerHelper method getEditsFilename.

/**
   * Get edits filename
   *
   * @return edits file name for cluster
   */
private String getEditsFilename(CheckpointSignature sig) throws IOException {
    FSImage image = cluster.getNameNode().getFSImage();
    // it was set up to only have ONE StorageDirectory
    Iterator<StorageDirectory> it = image.getStorage().dirIterator(NameNodeDirType.EDITS);
    StorageDirectory sd = it.next();
    File ret = NNStorage.getFinalizedEditsFile(sd, 1, sig.curSegmentTxId - 1);
    assert ret.exists() : "expected " + ret + " exists";
    return ret.getAbsolutePath();
}
Also used : StorageDirectory(org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory) File(java.io.File)

Example 15 with StorageDirectory

use of org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory in project hadoop by apache.

the class FSImageTestUtil method findNewestImageFile.

/**
   * @return the fsimage file with the most recent transaction ID in the
   * given 'current/' directory.
   */
public static File findNewestImageFile(String currentDirPath) throws IOException {
    StorageDirectory sd = FSImageTestUtil.mockStorageDirectory(new File(currentDirPath), NameNodeDirType.IMAGE);
    FSImageTransactionalStorageInspector inspector = new FSImageTransactionalStorageInspector();
    inspector.inspectDirectory(sd);
    List<FSImageFile> latestImages = inspector.getLatestImages();
    return (latestImages.isEmpty()) ? null : latestImages.get(0).getFile();
}
Also used : FSImageFile(org.apache.hadoop.hdfs.server.namenode.FSImageStorageInspector.FSImageFile) StorageDirectory(org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory) RandomAccessFile(java.io.RandomAccessFile) EditLogFile(org.apache.hadoop.hdfs.server.namenode.FileJournalManager.EditLogFile) FSImageFile(org.apache.hadoop.hdfs.server.namenode.FSImageStorageInspector.FSImageFile) File(java.io.File)

Aggregations

StorageDirectory (org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory)83 File (java.io.File)59 Test (org.junit.Test)45 RandomAccessFile (java.io.RandomAccessFile)29 IOException (java.io.IOException)24 Configuration (org.apache.hadoop.conf.Configuration)22 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)21 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)20 EditLogFile (org.apache.hadoop.hdfs.server.namenode.FileJournalManager.EditLogFile)19 NameNodeFile (org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeFile)15 URI (java.net.URI)11 FileSystem (org.apache.hadoop.fs.FileSystem)11 Path (org.apache.hadoop.fs.Path)10 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)9 FSImageFile (org.apache.hadoop.hdfs.server.namenode.FSImageStorageInspector.FSImageFile)7 FileJournalManager.getLogFile (org.apache.hadoop.hdfs.server.namenode.FileJournalManager.getLogFile)6 InconsistentFSStateException (org.apache.hadoop.hdfs.server.common.InconsistentFSStateException)5 AbortSpec (org.apache.hadoop.hdfs.server.namenode.TestEditLog.AbortSpec)5 ArrayList (java.util.ArrayList)4 StorageState (org.apache.hadoop.hdfs.server.common.Storage.StorageState)4