Search in sources :

Example 6 with Storage

use of org.apache.hadoop.hdfs.server.common.Storage in project hadoop by apache.

the class UpgradeUtilities method createNameNodeVersionFile.

/**
   * Create a <code>version</code> file for namenode inside the specified parent
   * directory.  If such a file already exists, it will be overwritten.
   * The given version string will be written to the file as the layout
   * version. None of the parameters may be null.
   *
   * @param parent directory where namenode VERSION file is stored
   * @param version StorageInfo to create VERSION file from
   * @param bpid Block pool Id
   *
   * @return the created version file
   */
public static File[] createNameNodeVersionFile(Configuration conf, File[] parent, StorageInfo version, String bpid) throws IOException {
    Storage storage = new NNStorage(conf, Collections.<URI>emptyList(), Collections.<URI>emptyList());
    storage.setStorageInfo(version);
    File[] versionFiles = new File[parent.length];
    for (int i = 0; i < parent.length; i++) {
        versionFiles[i] = new File(parent[i], "VERSION");
        StorageDirectory sd = new StorageDirectory(parent[i].getParentFile());
        storage.writeProperties(versionFiles[i], sd);
    }
    return versionFiles;
}
Also used : BlockPoolSliceStorage(org.apache.hadoop.hdfs.server.datanode.BlockPoolSliceStorage) Storage(org.apache.hadoop.hdfs.server.common.Storage) NNStorage(org.apache.hadoop.hdfs.server.namenode.NNStorage) DatanodeStorage(org.apache.hadoop.hdfs.server.protocol.DatanodeStorage) DataStorage(org.apache.hadoop.hdfs.server.datanode.DataStorage) NNStorage(org.apache.hadoop.hdfs.server.namenode.NNStorage) StorageDirectory(org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory) File(java.io.File)

Example 7 with Storage

use of org.apache.hadoop.hdfs.server.common.Storage in project hadoop by apache.

the class TestFsDatasetImpl method getDfsUsedValueOfNewVolume.

private long getDfsUsedValueOfNewVolume(long cacheDfsUsed, long waitIntervalTime) throws IOException, InterruptedException {
    List<NamespaceInfo> nsInfos = Lists.newArrayList();
    nsInfos.add(new NamespaceInfo(0, CLUSTER_ID, BLOCK_POOL_IDS[0], 1));
    String CURRENT_DIR = "current";
    String DU_CACHE_FILE = BlockPoolSlice.DU_CACHE_FILE;
    String path = BASE_DIR + "/newData0";
    String pathUri = new Path(path).toUri().toString();
    StorageLocation loc = StorageLocation.parse(pathUri);
    Storage.StorageDirectory sd = createStorageDirectory(new File(path));
    DataStorage.VolumeBuilder builder = new DataStorage.VolumeBuilder(storage, sd);
    when(storage.prepareVolume(eq(datanode), eq(loc), anyListOf(NamespaceInfo.class))).thenReturn(builder);
    String cacheFilePath = String.format("%s/%s/%s/%s/%s", path, CURRENT_DIR, BLOCK_POOL_IDS[0], CURRENT_DIR, DU_CACHE_FILE);
    File outFile = new File(cacheFilePath);
    if (!outFile.getParentFile().exists()) {
        outFile.getParentFile().mkdirs();
    }
    if (outFile.exists()) {
        outFile.delete();
    }
    FakeTimer timer = new FakeTimer();
    try {
        try (Writer out = new OutputStreamWriter(new FileOutputStream(outFile), StandardCharsets.UTF_8)) {
            // Write the dfsUsed value and the time to cache file
            out.write(Long.toString(cacheDfsUsed) + " " + Long.toString(timer.now()));
            out.flush();
        }
    } catch (IOException ioe) {
    }
    dataset.setTimer(timer);
    timer.advance(waitIntervalTime);
    dataset.addVolume(loc, nsInfos);
    // Get the last volume which was just added before
    FsVolumeImpl newVolume;
    try (FsDatasetSpi.FsVolumeReferences volumes = dataset.getFsVolumeReferences()) {
        newVolume = (FsVolumeImpl) volumes.get(volumes.size() - 1);
    }
    long dfsUsed = newVolume.getDfsUsed();
    return dfsUsed;
}
Also used : Path(org.apache.hadoop.fs.Path) DataStorage(org.apache.hadoop.hdfs.server.datanode.DataStorage) StorageDirectory(org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory) FsDatasetSpi(org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi) Matchers.anyString(org.mockito.Matchers.anyString) IOException(java.io.IOException) MultipleIOException(org.apache.hadoop.io.MultipleIOException) FsVolumeReferences(org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi.FsVolumeReferences) DataStorage(org.apache.hadoop.hdfs.server.datanode.DataStorage) Storage(org.apache.hadoop.hdfs.server.common.Storage) FileOutputStream(java.io.FileOutputStream) OutputStreamWriter(java.io.OutputStreamWriter) NamespaceInfo(org.apache.hadoop.hdfs.server.protocol.NamespaceInfo) StorageLocation(org.apache.hadoop.hdfs.server.datanode.StorageLocation) File(java.io.File) FakeTimer(org.apache.hadoop.util.FakeTimer) Writer(java.io.Writer) OutputStreamWriter(java.io.OutputStreamWriter)

Example 8 with Storage

use of org.apache.hadoop.hdfs.server.common.Storage in project hadoop by apache.

the class TestFsDatasetImpl method createStorageDirs.

private static void createStorageDirs(DataStorage storage, Configuration conf, int numDirs) throws IOException {
    List<Storage.StorageDirectory> dirs = new ArrayList<Storage.StorageDirectory>();
    List<String> dirStrings = new ArrayList<String>();
    FileUtils.deleteDirectory(new File(BASE_DIR));
    for (int i = 0; i < numDirs; i++) {
        File loc = new File(BASE_DIR + "/data" + i);
        dirStrings.add(new Path(loc.toString()).toUri().toString());
        loc.mkdirs();
        dirs.add(createStorageDirectory(loc));
        when(storage.getStorageDir(i)).thenReturn(dirs.get(i));
    }
    String dataDir = StringUtils.join(",", dirStrings);
    conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, dataDir);
    when(storage.dirIterator()).thenReturn(dirs.iterator());
    when(storage.getNumStorageDirs()).thenReturn(numDirs);
}
Also used : Path(org.apache.hadoop.fs.Path) DataStorage(org.apache.hadoop.hdfs.server.datanode.DataStorage) Storage(org.apache.hadoop.hdfs.server.common.Storage) ArrayList(java.util.ArrayList) StorageDirectory(org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory) Matchers.anyString(org.mockito.Matchers.anyString) File(java.io.File)

Example 9 with Storage

use of org.apache.hadoop.hdfs.server.common.Storage in project hadoop by apache.

the class DataNode method parseChangedVolumes.

/**
   * Parse the new DFS_DATANODE_DATA_DIR value in the configuration to detect
   * changed volumes.
   * @param newVolumes a comma separated string that specifies the data volumes.
   * @return changed volumes.
   * @throws IOException if none of the directories are specified in the
   * configuration, or the storage type of a directory is changed.
   */
@VisibleForTesting
ChangedVolumes parseChangedVolumes(String newVolumes) throws IOException {
    Configuration conf = new Configuration();
    conf.set(DFS_DATANODE_DATA_DIR_KEY, newVolumes);
    List<StorageLocation> newStorageLocations = getStorageLocations(conf);
    if (newStorageLocations.isEmpty()) {
        throw new IOException("No directory is specified.");
    }
    // Use the existing storage locations from the current conf
    // to detect new storage additions or removals.
    Map<String, StorageLocation> existingStorageLocations = new HashMap<>();
    for (StorageLocation loc : getStorageLocations(getConf())) {
        existingStorageLocations.put(loc.getNormalizedUri().toString(), loc);
    }
    ChangedVolumes results = new ChangedVolumes();
    results.newLocations.addAll(newStorageLocations);
    for (Iterator<Storage.StorageDirectory> it = storage.dirIterator(); it.hasNext(); ) {
        Storage.StorageDirectory dir = it.next();
        boolean found = false;
        for (Iterator<StorageLocation> newLocationItr = results.newLocations.iterator(); newLocationItr.hasNext(); ) {
            StorageLocation newLocation = newLocationItr.next();
            if (newLocation.matchesStorageDirectory(dir)) {
                StorageLocation oldLocation = existingStorageLocations.get(newLocation.getNormalizedUri().toString());
                if (oldLocation != null && oldLocation.getStorageType() != newLocation.getStorageType()) {
                    throw new IOException("Changing storage type is not allowed.");
                }
                // Update the unchanged locations as this location
                // from the new conf is really not a new one.
                newLocationItr.remove();
                results.unchangedLocations.add(newLocation);
                found = true;
                break;
            }
        }
        // the current storage locations. Add to the deactivateLocations list.
        if (!found) {
            LOG.info("Deactivation request received for active volume: " + dir.getRoot().toString());
            results.deactivateLocations.add(StorageLocation.parse(dir.getRoot().toString()));
        }
    }
    // to detect removals in the new conf.
    if (getFSDataset().getNumFailedVolumes() > 0) {
        for (String failedStorageLocation : getFSDataset().getVolumeFailureSummary().getFailedStorageLocations()) {
            boolean found = false;
            for (Iterator<StorageLocation> newLocationItr = results.newLocations.iterator(); newLocationItr.hasNext(); ) {
                StorageLocation newLocation = newLocationItr.next();
                if (newLocation.getNormalizedUri().toString().equals(failedStorageLocation)) {
                    // The failed storage is being re-added. DataNode#refreshVolumes()
                    // will take care of re-assessing it.
                    found = true;
                    break;
                }
            }
            // Add to the deactivate locations list.
            if (!found) {
                LOG.info("Deactivation request received for failed volume: " + failedStorageLocation);
                results.deactivateLocations.add(StorageLocation.parse(failedStorageLocation));
            }
        }
    }
    return results;
}
Also used : Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) HashMap(java.util.HashMap) IOException(java.io.IOException) Storage(org.apache.hadoop.hdfs.server.common.Storage) VisibleForTesting(com.google.common.annotations.VisibleForTesting)

Example 10 with Storage

use of org.apache.hadoop.hdfs.server.common.Storage in project hadoop by apache.

the class TestCheckpoint method testStorageAlreadyLockedErrorMessage.

/**
   * Test that, an attempt to lock a storage that is already locked by nodename,
   * logs error message that includes JVM name of the namenode that locked it.
   */
@Test
public void testStorageAlreadyLockedErrorMessage() throws Exception {
    Configuration conf = new HdfsConfiguration();
    MiniDFSCluster cluster = null;
    StorageDirectory savedSd = null;
    try {
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
        NNStorage storage = cluster.getNameNode().getFSImage().getStorage();
        for (StorageDirectory sd : storage.dirIterable(null)) {
            assertLockFails(sd);
            savedSd = sd;
        }
        LogCapturer logs = GenericTestUtils.LogCapturer.captureLogs(LogFactory.getLog(Storage.class));
        try {
            // try to lock the storage that's already locked
            savedSd.lock();
            fail("Namenode should not be able to lock a storage" + " that is already locked");
        } catch (IOException ioe) {
            // cannot read lock file on Windows, so message cannot get JVM name
            String lockingJvmName = Path.WINDOWS ? "" : " " + ManagementFactory.getRuntimeMXBean().getName();
            String expectedLogMessage = "It appears that another node " + lockingJvmName + " has already locked the storage directory";
            assertTrue("Log output does not contain expected log message: " + expectedLogMessage, logs.getOutput().contains(expectedLogMessage));
        }
    } finally {
        cleanup(cluster);
        cluster = null;
    }
}
Also used : MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Storage(org.apache.hadoop.hdfs.server.common.Storage) CheckpointStorage(org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode.CheckpointStorage) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) LogCapturer(org.apache.hadoop.test.GenericTestUtils.LogCapturer) StorageDirectory(org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory) IOException(java.io.IOException) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) Test(org.junit.Test)

Aggregations

Storage (org.apache.hadoop.hdfs.server.common.Storage)14 File (java.io.File)11 StorageDirectory (org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory)9 Test (org.junit.Test)8 DataStorage (org.apache.hadoop.hdfs.server.datanode.DataStorage)7 Matchers.anyString (org.mockito.Matchers.anyString)7 Path (org.apache.hadoop.fs.Path)5 StorageLocation (org.apache.hadoop.hdfs.server.datanode.StorageLocation)5 NamespaceInfo (org.apache.hadoop.hdfs.server.protocol.NamespaceInfo)5 IOException (java.io.IOException)4 FsDatasetSpi (org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi)4 DatanodeStorage (org.apache.hadoop.hdfs.server.protocol.DatanodeStorage)4 Configuration (org.apache.hadoop.conf.Configuration)3 ArrayList (java.util.ArrayList)2 HashSet (java.util.HashSet)2 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)2 FsVolumeReferences (org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi.FsVolumeReferences)2 FsVolumeSpi (org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi)2 MultipleIOException (org.apache.hadoop.io.MultipleIOException)2 CoreMatchers.containsString (org.hamcrest.CoreMatchers.containsString)2