Search in sources :

Example 81 with StorageDirectory

use of org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory in project hadoop by apache.

the class TestSecurityTokenEditLog method testEditLog.

/**
   * Tests transaction logging in dfs.
   */
@Test
public void testEditLog() throws IOException {
    // start a cluster 
    Configuration conf = new HdfsConfiguration();
    MiniDFSCluster cluster = null;
    FileSystem fileSys = null;
    try {
        conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true);
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATA_NODES).build();
        cluster.waitActive();
        fileSys = cluster.getFileSystem();
        final FSNamesystem namesystem = cluster.getNamesystem();
        for (Iterator<URI> it = cluster.getNameDirs(0).iterator(); it.hasNext(); ) {
            File dir = new File(it.next().getPath());
            System.out.println(dir);
        }
        FSImage fsimage = namesystem.getFSImage();
        FSEditLog editLog = fsimage.getEditLog();
        // set small size of flush buffer
        editLog.setOutputBufferCapacity(2048);
        // Create threads and make them run transactions concurrently.
        Thread[] threadId = new Thread[NUM_THREADS];
        for (int i = 0; i < NUM_THREADS; i++) {
            Transactions trans = new Transactions(namesystem, NUM_TRANSACTIONS);
            threadId[i] = new Thread(trans, "TransactionThread-" + i);
            threadId[i].start();
        }
        // wait for all transactions to get over
        for (int i = 0; i < NUM_THREADS; i++) {
            try {
                threadId[i].join();
            } catch (InterruptedException e) {
                // retry 
                i--;
            }
        }
        editLog.close();
        // Verify that we can read in all the transactions that we have written.
        // If there were any corruptions, it is likely that the reading in
        // of these transactions will throw an exception.
        //
        namesystem.getDelegationTokenSecretManager().stopThreads();
        int numKeys = namesystem.getDelegationTokenSecretManager().getNumberOfKeys();
        int expectedTransactions = NUM_THREADS * opsPerTrans * NUM_TRANSACTIONS + numKeys + // + 2 for BEGIN and END txns
        2;
        for (StorageDirectory sd : fsimage.getStorage().dirIterable(NameNodeDirType.EDITS)) {
            File editFile = NNStorage.getFinalizedEditsFile(sd, 1, 1 + expectedTransactions - 1);
            System.out.println("Verifying file: " + editFile);
            FSEditLogLoader loader = new FSEditLogLoader(namesystem, 0);
            long numEdits = loader.loadFSEdits(new EditLogFileInputStream(editFile), 1);
            assertEquals("Verification for " + editFile, expectedTransactions, numEdits);
        }
    } finally {
        if (fileSys != null)
            fileSys.close();
        if (cluster != null)
            cluster.shutdown();
    }
}
Also used : MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) Configuration(org.apache.hadoop.conf.Configuration) StorageDirectory(org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) URI(java.net.URI) FileSystem(org.apache.hadoop.fs.FileSystem) File(java.io.File) Test(org.junit.Test)

Example 82 with StorageDirectory

use of org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory in project hadoop by apache.

the class TestStorageRestore method invalidateStorage.

/**
   * invalidate storage by removing the second and third storage directories
   */
public void invalidateStorage(FSImage fi, Set<File> filesToInvalidate) throws IOException {
    ArrayList<StorageDirectory> al = new ArrayList<StorageDirectory>(2);
    Iterator<StorageDirectory> it = fi.getStorage().dirIterator();
    while (it.hasNext()) {
        StorageDirectory sd = it.next();
        if (filesToInvalidate.contains(sd.getRoot())) {
            LOG.info("causing IO error on " + sd.getRoot());
            al.add(sd);
        }
    }
    // simulate an error
    fi.getStorage().reportErrorsOnDirectories(al);
    for (JournalAndStream j : fi.getEditLog().getJournals()) {
        if (j.getManager() instanceof FileJournalManager) {
            FileJournalManager fm = (FileJournalManager) j.getManager();
            if (fm.getStorageDirectory().getRoot().equals(path2) || fm.getStorageDirectory().getRoot().equals(path3)) {
                EditLogOutputStream mockStream = spy(j.getCurrentStream());
                j.setCurrentStreamForTests(mockStream);
                doThrow(new IOException("Injected fault: write")).when(mockStream).write(Mockito.<FSEditLogOp>anyObject());
            }
        }
    }
}
Also used : JournalAndStream(org.apache.hadoop.hdfs.server.namenode.JournalSet.JournalAndStream) ArrayList(java.util.ArrayList) StorageDirectory(org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory) IOException(java.io.IOException)

Example 83 with StorageDirectory

use of org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory in project hadoop by apache.

the class TestHAStateTransitions method createEmptyInProgressEditLog.

private static void createEmptyInProgressEditLog(MiniDFSCluster cluster, NameNode nn, boolean writeHeader) throws IOException {
    long txid = nn.getNamesystem().getEditLog().getLastWrittenTxId();
    URI sharedEditsUri = cluster.getSharedEditsDir(0, 1);
    File sharedEditsDir = new File(sharedEditsUri.getPath());
    StorageDirectory storageDir = new StorageDirectory(sharedEditsDir);
    File inProgressFile = NameNodeAdapter.getInProgressEditsFile(storageDir, txid + 1);
    assertTrue("Failed to create in-progress edits file", inProgressFile.createNewFile());
    if (writeHeader) {
        DataOutputStream out = new DataOutputStream(new FileOutputStream(inProgressFile));
        EditLogFileOutputStream.writeHeader(NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION, out);
        out.close();
    }
}
Also used : FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) DataOutputStream(java.io.DataOutputStream) EditLogFileOutputStream(org.apache.hadoop.hdfs.server.namenode.EditLogFileOutputStream) FileOutputStream(java.io.FileOutputStream) StorageDirectory(org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory) URI(java.net.URI) File(java.io.File)

Aggregations

StorageDirectory (org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory)83 File (java.io.File)59 Test (org.junit.Test)45 RandomAccessFile (java.io.RandomAccessFile)29 IOException (java.io.IOException)24 Configuration (org.apache.hadoop.conf.Configuration)22 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)21 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)20 EditLogFile (org.apache.hadoop.hdfs.server.namenode.FileJournalManager.EditLogFile)19 NameNodeFile (org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeFile)15 URI (java.net.URI)11 FileSystem (org.apache.hadoop.fs.FileSystem)11 Path (org.apache.hadoop.fs.Path)10 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)9 FSImageFile (org.apache.hadoop.hdfs.server.namenode.FSImageStorageInspector.FSImageFile)7 FileJournalManager.getLogFile (org.apache.hadoop.hdfs.server.namenode.FileJournalManager.getLogFile)6 InconsistentFSStateException (org.apache.hadoop.hdfs.server.common.InconsistentFSStateException)5 AbortSpec (org.apache.hadoop.hdfs.server.namenode.TestEditLog.AbortSpec)5 ArrayList (java.util.ArrayList)4 StorageState (org.apache.hadoop.hdfs.server.common.Storage.StorageState)4