Search in sources :

Example 1 with StorageDirectory

use of org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory in project hadoop by apache.

the class StorageAdapter method spyOnStorageDirectory.

/**
   * Inject and return a spy on a storage directory
   */
public static StorageDirectory spyOnStorageDirectory(Storage s, int idx) {
    StorageDirectory dir = Mockito.spy(s.getStorageDir(idx));
    s.getStorageDirs().set(idx, dir);
    return dir;
}
Also used : StorageDirectory(org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory)

Example 2 with StorageDirectory

use of org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory in project hadoop by apache.

the class TestEditLog method testReadActivelyUpdatedLog.

/**
   *
   * @throws Exception
   */
@Test
public void testReadActivelyUpdatedLog() throws Exception {
    final TestAppender appender = new TestAppender();
    LogManager.getRootLogger().addAppender(appender);
    Configuration conf = new HdfsConfiguration();
    conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
    // Set single handler thread, so all transactions hit same thread-local ops.
    conf.setInt(DFSConfigKeys.DFS_NAMENODE_HANDLER_COUNT_KEY, 1);
    MiniDFSCluster cluster = null;
    try {
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
        cluster.waitActive();
        FSImage fsimage = cluster.getNamesystem().getFSImage();
        StorageDirectory sd = fsimage.getStorage().getStorageDir(0);
        final DistributedFileSystem fileSys = cluster.getFileSystem();
        DFSInotifyEventInputStream events = fileSys.getInotifyEventStream();
        fileSys.mkdirs(new Path("/test"));
        fileSys.mkdirs(new Path("/test/dir1"));
        fileSys.delete(new Path("/test/dir1"), true);
        fsimage.getEditLog().logSync();
        fileSys.mkdirs(new Path("/test/dir2"));
        final File inProgressEdit = NNStorage.getInProgressEditsFile(sd, 1);
        assertTrue(inProgressEdit.exists());
        EditLogFileInputStream elis = new EditLogFileInputStream(inProgressEdit);
        FSEditLogOp op;
        long pos = 0;
        while (true) {
            op = elis.readOp();
            if (op != null && op.opCode != FSEditLogOpCodes.OP_INVALID) {
                pos = elis.getPosition();
            } else {
                break;
            }
        }
        elis.close();
        assertTrue(pos > 0);
        RandomAccessFile rwf = new RandomAccessFile(inProgressEdit, "rw");
        rwf.seek(pos);
        assertEquals(rwf.readByte(), (byte) -1);
        rwf.seek(pos + 1);
        rwf.writeByte(2);
        rwf.close();
        events.poll();
        String pattern = "Caught exception after reading (.*) ops";
        Pattern r = Pattern.compile(pattern);
        final List<LoggingEvent> log = appender.getLog();
        for (LoggingEvent event : log) {
            Matcher m = r.matcher(event.getRenderedMessage());
            if (m.find()) {
                fail("Should not try to read past latest syned edit log op");
            }
        }
    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
        LogManager.getRootLogger().removeAppender(appender);
    }
}
Also used : Path(org.apache.hadoop.fs.Path) Pattern(java.util.regex.Pattern) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) Matcher(java.util.regex.Matcher) StorageDirectory(org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) LoggingEvent(org.apache.log4j.spi.LoggingEvent) RandomAccessFile(java.io.RandomAccessFile) DFSInotifyEventInputStream(org.apache.hadoop.hdfs.DFSInotifyEventInputStream) RandomAccessFile(java.io.RandomAccessFile) File(java.io.File) Test(org.junit.Test)

Example 3 with StorageDirectory

use of org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory in project hadoop by apache.

the class TestEditLogRace method testEditLogRolling.

/**
   * Tests rolling edit logs while transactions are ongoing.
   */
@Test
public void testEditLogRolling() throws Exception {
    // start a cluster 
    Configuration conf = getConf();
    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATA_NODES).build();
    FileSystem fileSys = null;
    AtomicReference<Throwable> caughtErr = new AtomicReference<Throwable>();
    try {
        cluster.waitActive();
        fileSys = cluster.getFileSystem();
        final NamenodeProtocols nn = cluster.getNameNode().getRpcServer();
        FSImage fsimage = cluster.getNamesystem().getFSImage();
        StorageDirectory sd = fsimage.getStorage().getStorageDir(0);
        startTransactionWorkers(cluster, caughtErr);
        long previousLogTxId = 1;
        for (int i = 0; i < NUM_ROLLS && caughtErr.get() == null; i++) {
            try {
                Thread.sleep(20);
            } catch (InterruptedException e) {
            }
            LOG.info("Starting roll " + i + ".");
            CheckpointSignature sig = nn.rollEditLog();
            long nextLog = sig.curSegmentTxId;
            String logFileName = NNStorage.getFinalizedEditsFileName(previousLogTxId, nextLog - 1);
            previousLogTxId += verifyEditLogs(cluster.getNamesystem(), fsimage, logFileName, previousLogTxId);
            assertEquals(previousLogTxId, nextLog);
            File expectedLog = NNStorage.getInProgressEditsFile(sd, previousLogTxId);
            assertTrue("Expect " + expectedLog + " to exist", expectedLog.exists());
        }
    } finally {
        stopTransactionWorkers();
        if (caughtErr.get() != null) {
            throw new RuntimeException(caughtErr.get());
        }
        if (fileSys != null)
            fileSys.close();
        if (cluster != null)
            cluster.shutdown();
    }
}
Also used : NamenodeProtocols(org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) AtomicReference(java.util.concurrent.atomic.AtomicReference) StorageDirectory(org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory) FileSystem(org.apache.hadoop.fs.FileSystem) File(java.io.File) Test(org.junit.Test)

Example 4 with StorageDirectory

use of org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory in project hadoop by apache.

the class TestFSEditLogLoader method testDisplayRecentEditLogOpCodes.

@Test
public void testDisplayRecentEditLogOpCodes() throws IOException {
    // start a cluster
    Configuration conf = getConf();
    MiniDFSCluster cluster = null;
    FileSystem fileSys = null;
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATA_NODES).enableManagedDfsDirsRedundancy(false).build();
    cluster.waitActive();
    fileSys = cluster.getFileSystem();
    final FSNamesystem namesystem = cluster.getNamesystem();
    FSImage fsimage = namesystem.getFSImage();
    for (int i = 0; i < 20; i++) {
        fileSys.mkdirs(new Path("/tmp/tmp" + i));
    }
    StorageDirectory sd = fsimage.getStorage().dirIterator(NameNodeDirType.EDITS).next();
    cluster.shutdown();
    File editFile = FSImageTestUtil.findLatestEditsLog(sd).getFile();
    assertTrue("Should exist: " + editFile, editFile.exists());
    // Corrupt the edits file.
    long fileLen = editFile.length();
    RandomAccessFile rwf = new RandomAccessFile(editFile, "rw");
    rwf.seek(fileLen - 40);
    for (int i = 0; i < 20; i++) {
        rwf.write(FSEditLogOpCodes.OP_DELETE.getOpCode());
    }
    rwf.close();
    StringBuilder bld = new StringBuilder();
    bld.append("^Error replaying edit log at offset \\d+.  ");
    bld.append("Expected transaction ID was \\d+\n");
    bld.append("Recent opcode offsets: (\\d+\\s*){4}$");
    try {
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATA_NODES).enableManagedDfsDirsRedundancy(false).format(false).build();
        fail("should not be able to start");
    } catch (IOException e) {
        assertTrue("error message contains opcodes message", e.getMessage().matches(bld.toString()));
    }
}
Also used : Path(org.apache.hadoop.fs.Path) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) StorageDirectory(org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory) IOException(java.io.IOException) RandomAccessFile(java.io.RandomAccessFile) FileSystem(org.apache.hadoop.fs.FileSystem) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) RandomAccessFile(java.io.RandomAccessFile) File(java.io.File) Test(org.junit.Test)

Example 5 with StorageDirectory

use of org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory in project hadoop by apache.

the class TestFileJournalManager method testFinalizeErrorReportedToNNStorage.

@Test(expected = IllegalStateException.class)
public void testFinalizeErrorReportedToNNStorage() throws IOException, InterruptedException {
    File f = new File(TestEditLog.TEST_DIR + "/filejournaltestError");
    // abort after 10th roll
    NNStorage storage = setupEdits(Collections.<URI>singletonList(f.toURI()), 10, new AbortSpec(10, 0));
    StorageDirectory sd = storage.dirIterator(NameNodeDirType.EDITS).next();
    FileJournalManager jm = new FileJournalManager(conf, sd, storage);
    String sdRootPath = sd.getRoot().getAbsolutePath();
    FileUtil.chmod(sdRootPath, "-w", true);
    try {
        jm.finalizeLogSegment(0, 1);
    } finally {
        FileUtil.chmod(sdRootPath, "+w", true);
        assertTrue(storage.getRemovedStorageDirs().contains(sd));
    }
}
Also used : AbortSpec(org.apache.hadoop.hdfs.server.namenode.TestEditLog.AbortSpec) StorageDirectory(org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory) RandomAccessFile(java.io.RandomAccessFile) File(java.io.File) Test(org.junit.Test)

Aggregations

StorageDirectory (org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory)83 File (java.io.File)59 Test (org.junit.Test)45 RandomAccessFile (java.io.RandomAccessFile)29 IOException (java.io.IOException)24 Configuration (org.apache.hadoop.conf.Configuration)22 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)21 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)20 EditLogFile (org.apache.hadoop.hdfs.server.namenode.FileJournalManager.EditLogFile)19 NameNodeFile (org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeFile)15 URI (java.net.URI)11 FileSystem (org.apache.hadoop.fs.FileSystem)11 Path (org.apache.hadoop.fs.Path)10 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)9 FSImageFile (org.apache.hadoop.hdfs.server.namenode.FSImageStorageInspector.FSImageFile)7 FileJournalManager.getLogFile (org.apache.hadoop.hdfs.server.namenode.FileJournalManager.getLogFile)6 InconsistentFSStateException (org.apache.hadoop.hdfs.server.common.InconsistentFSStateException)5 AbortSpec (org.apache.hadoop.hdfs.server.namenode.TestEditLog.AbortSpec)5 ArrayList (java.util.ArrayList)4 StorageState (org.apache.hadoop.hdfs.server.common.Storage.StorageState)4