Search in sources :

Example 36 with StorageDirectory

use of org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory in project hadoop by apache.

the class TestJournalNodeSync method testSyncForMultipleMissingLogs.

@Test(timeout = 30000)
public void testSyncForMultipleMissingLogs() throws Exception {
    File firstJournalDir = jCluster.getJournalDir(0, jid);
    File firstJournalCurrentDir = new StorageDirectory(firstJournalDir).getCurrentDir();
    // Generate some edit logs and delete two.
    long firstTxId = generateEditLog();
    long nextTxId = generateEditLog();
    List<File> missingLogs = Lists.newArrayList();
    missingLogs.add(deleteEditLog(firstJournalCurrentDir, firstTxId));
    missingLogs.add(deleteEditLog(firstJournalCurrentDir, nextTxId));
    GenericTestUtils.waitFor(editLogExists(missingLogs), 500, 10000);
}
Also used : StorageDirectory(org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory) File(java.io.File) FileJournalManager.getLogFile(org.apache.hadoop.hdfs.server.namenode.FileJournalManager.getLogFile) EditLogFile(org.apache.hadoop.hdfs.server.namenode.FileJournalManager.EditLogFile) Test(org.junit.Test)

Example 37 with StorageDirectory

use of org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory in project hadoop by apache.

the class UpgradeUtilities method createNameNodeVersionFile.

/**
   * Create a <code>version</code> file for namenode inside the specified parent
   * directory.  If such a file already exists, it will be overwritten.
   * The given version string will be written to the file as the layout
   * version. None of the parameters may be null.
   *
   * @param parent directory where namenode VERSION file is stored
   * @param version StorageInfo to create VERSION file from
   * @param bpid Block pool Id
   *
   * @return the created version file
   */
public static File[] createNameNodeVersionFile(Configuration conf, File[] parent, StorageInfo version, String bpid) throws IOException {
    Storage storage = new NNStorage(conf, Collections.<URI>emptyList(), Collections.<URI>emptyList());
    storage.setStorageInfo(version);
    File[] versionFiles = new File[parent.length];
    for (int i = 0; i < parent.length; i++) {
        versionFiles[i] = new File(parent[i], "VERSION");
        StorageDirectory sd = new StorageDirectory(parent[i].getParentFile());
        storage.writeProperties(versionFiles[i], sd);
    }
    return versionFiles;
}
Also used : BlockPoolSliceStorage(org.apache.hadoop.hdfs.server.datanode.BlockPoolSliceStorage) Storage(org.apache.hadoop.hdfs.server.common.Storage) NNStorage(org.apache.hadoop.hdfs.server.namenode.NNStorage) DatanodeStorage(org.apache.hadoop.hdfs.server.protocol.DatanodeStorage) DataStorage(org.apache.hadoop.hdfs.server.datanode.DataStorage) NNStorage(org.apache.hadoop.hdfs.server.namenode.NNStorage) StorageDirectory(org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory) File(java.io.File)

Example 38 with StorageDirectory

use of org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory in project hadoop by apache.

the class UpgradeUtilities method createDataNodeStorageDirs.

/**
   * Simulate the {@link DFSConfigKeys#DFS_DATANODE_DATA_DIR_KEY} of a 
   * populated DFS filesystem.
   * This method populates for each parent directory, <code>parent/dirName</code>
   * with the content of datanode storage directory that comes from a singleton
   * datanode master (that contains version and block files). If the destination
   * directory does not exist, it will be created.  If the directory already 
   * exists, it will first be deleted.
   * 
   * @param parents parent directory where {@code dirName} is created
   * @param dirName directory under which storage directory is created
   * @return the array of created directories
   */
public static File[] createDataNodeStorageDirs(String[] parents, String dirName) throws Exception {
    File[] retVal = new File[parents.length];
    for (int i = 0; i < parents.length; i++) {
        File newDir = new File(parents[i], dirName);
        createEmptyDirs(new String[] { newDir.toString() });
        LocalFileSystem localFS = FileSystem.getLocal(new HdfsConfiguration());
        localFS.copyToLocalFile(new Path(datanodeStorage.toString(), "current"), new Path(newDir.toString()), false);
        // Change the storage UUID to avoid conflicts when DN starts up.
        StorageDirectory sd = new StorageDirectory(new File(datanodeStorage.toString()));
        sd.setStorageUuid(DatanodeStorage.generateUuid());
        Properties properties = Storage.readPropertiesFile(sd.getVersionFile());
        properties.setProperty("storageID", sd.getStorageUuid());
        Storage.writeProperties(sd.getVersionFile(), properties);
        retVal[i] = newDir;
    }
    return retVal;
}
Also used : Path(org.apache.hadoop.fs.Path) LocalFileSystem(org.apache.hadoop.fs.LocalFileSystem) StorageDirectory(org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory) Properties(java.util.Properties) File(java.io.File)

Example 39 with StorageDirectory

use of org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory in project hadoop by apache.

the class TestFsVolumeList method testGetNextVolumeWithClosedVolume.

@Test(timeout = 30000)
public void testGetNextVolumeWithClosedVolume() throws IOException {
    FsVolumeList volumeList = new FsVolumeList(Collections.<VolumeFailureInfo>emptyList(), blockScanner, blockChooser);
    final List<FsVolumeImpl> volumes = new ArrayList<>();
    for (int i = 0; i < 3; i++) {
        File curDir = new File(baseDir, "nextvolume-" + i);
        curDir.mkdirs();
        FsVolumeImpl volume = new FsVolumeImplBuilder().setConf(conf).setDataset(dataset).setStorageID("storage-id").setStorageDirectory(new StorageDirectory(StorageLocation.parse(curDir.getPath()))).build();
        volume.setCapacityForTesting(1024 * 1024 * 1024);
        volumes.add(volume);
        volumeList.addVolume(volume.obtainReference());
    }
    // Close the second volume.
    volumes.get(1).setClosed();
    try {
        GenericTestUtils.waitFor(new Supplier<Boolean>() {

            @Override
            public Boolean get() {
                return volumes.get(1).checkClosed();
            }
        }, 100, 3000);
    } catch (TimeoutException e) {
        fail("timed out while waiting for volume to be removed.");
    } catch (InterruptedException ie) {
        Thread.currentThread().interrupt();
    }
    for (int i = 0; i < 10; i++) {
        try (FsVolumeReference ref = volumeList.getNextVolume(StorageType.DEFAULT, 128)) {
            // volume No.2 will not be chosen.
            assertNotEquals(ref.getVolume(), volumes.get(1));
        }
    }
}
Also used : ArrayList(java.util.ArrayList) StorageDirectory(org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory) FsVolumeReference(org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeReference) File(java.io.File) TimeoutException(java.util.concurrent.TimeoutException) Test(org.junit.Test)

Example 40 with StorageDirectory

use of org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory in project hadoop by apache.

the class TestCheckpoint method testEditFailureBeforeRename.

/**
   * Test that a fault while downloading edits does not prevent future
   * checkpointing
   */
@Test(timeout = 30000)
public void testEditFailureBeforeRename() throws IOException {
    Configuration conf = new HdfsConfiguration();
    SecondaryNameNode secondary = null;
    MiniDFSCluster cluster = null;
    FileSystem fs = null;
    try {
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).build();
        cluster.waitActive();
        fs = cluster.getFileSystem();
        secondary = startSecondaryNameNode(conf);
        DFSTestUtil.createFile(fs, new Path("tmpfile0"), 1024, (short) 1, 0l);
        secondary.doCheckpoint();
        // Cause edit rename to fail during next checkpoint
        Mockito.doThrow(new IOException("Injecting failure before edit rename")).when(faultInjector).beforeEditsRename();
        DFSTestUtil.createFile(fs, new Path("tmpfile1"), 1024, (short) 1, 0l);
        try {
            secondary.doCheckpoint();
            fail("Fault injection failed.");
        } catch (IOException ioe) {
            GenericTestUtils.assertExceptionContains("Injecting failure before edit rename", ioe);
        }
        Mockito.reset(faultInjector);
        // truncate the tmp edits file to simulate a partial download
        for (StorageDirectory sd : secondary.getFSImage().getStorage().dirIterable(NameNodeDirType.EDITS)) {
            File[] tmpEdits = sd.getCurrentDir().listFiles(tmpEditsFilter);
            assertTrue("Expected a single tmp edits file in directory " + sd.toString(), tmpEdits.length == 1);
            RandomAccessFile randFile = new RandomAccessFile(tmpEdits[0], "rw");
            randFile.setLength(0);
            randFile.close();
        }
        // Next checkpoint should succeed
        secondary.doCheckpoint();
    } finally {
        if (secondary != null) {
            secondary.shutdown();
        }
        if (fs != null) {
            fs.close();
        }
        if (cluster != null) {
            cluster.shutdown();
        }
        Mockito.reset(faultInjector);
    }
}
Also used : Path(org.apache.hadoop.fs.Path) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) RandomAccessFile(java.io.RandomAccessFile) FileSystem(org.apache.hadoop.fs.FileSystem) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) IOException(java.io.IOException) StorageDirectory(org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) RandomAccessFile(java.io.RandomAccessFile) EditLogFile(org.apache.hadoop.hdfs.server.namenode.FileJournalManager.EditLogFile) NameNodeFile(org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeFile) File(java.io.File) Test(org.junit.Test)

Aggregations

StorageDirectory (org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory)83 File (java.io.File)59 Test (org.junit.Test)45 RandomAccessFile (java.io.RandomAccessFile)29 IOException (java.io.IOException)24 Configuration (org.apache.hadoop.conf.Configuration)22 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)21 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)20 EditLogFile (org.apache.hadoop.hdfs.server.namenode.FileJournalManager.EditLogFile)19 NameNodeFile (org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeFile)15 URI (java.net.URI)11 FileSystem (org.apache.hadoop.fs.FileSystem)11 Path (org.apache.hadoop.fs.Path)10 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)9 FSImageFile (org.apache.hadoop.hdfs.server.namenode.FSImageStorageInspector.FSImageFile)7 FileJournalManager.getLogFile (org.apache.hadoop.hdfs.server.namenode.FileJournalManager.getLogFile)6 InconsistentFSStateException (org.apache.hadoop.hdfs.server.common.InconsistentFSStateException)5 AbortSpec (org.apache.hadoop.hdfs.server.namenode.TestEditLog.AbortSpec)5 ArrayList (java.util.ArrayList)4 StorageState (org.apache.hadoop.hdfs.server.common.Storage.StorageState)4