Search in sources :

Example 1 with DataStorage

use of org.apache.hadoop.hdfs.server.datanode.DataStorage in project hadoop by apache.

the class FsDatasetImpl method addVolume.

@Override
public void addVolume(final StorageLocation location, final List<NamespaceInfo> nsInfos) throws IOException {
    // Prepare volume in DataStorage
    final DataStorage.VolumeBuilder builder;
    try {
        builder = dataStorage.prepareVolume(datanode, location, nsInfos);
    } catch (IOException e) {
        volumes.addVolumeFailureInfo(new VolumeFailureInfo(location, Time.now()));
        throw e;
    }
    final Storage.StorageDirectory sd = builder.getStorageDirectory();
    StorageType storageType = location.getStorageType();
    final FsVolumeImpl fsVolume = createFsVolume(sd.getStorageUuid(), sd, location);
    final ReplicaMap tempVolumeMap = new ReplicaMap(new AutoCloseableLock());
    ArrayList<IOException> exceptions = Lists.newArrayList();
    for (final NamespaceInfo nsInfo : nsInfos) {
        String bpid = nsInfo.getBlockPoolID();
        try {
            fsVolume.addBlockPool(bpid, this.conf, this.timer);
            fsVolume.getVolumeMap(bpid, tempVolumeMap, ramDiskReplicaTracker);
        } catch (IOException e) {
            LOG.warn("Caught exception when adding " + fsVolume + ". Will throw later.", e);
            exceptions.add(e);
        }
    }
    if (!exceptions.isEmpty()) {
        try {
            sd.unlock();
        } catch (IOException e) {
            exceptions.add(e);
        }
        throw MultipleIOException.createIOException(exceptions);
    }
    final FsVolumeReference ref = fsVolume.obtainReference();
    setupAsyncLazyPersistThread(fsVolume);
    builder.build();
    activateVolume(tempVolumeMap, sd, storageType, ref);
    LOG.info("Added volume - " + location + ", StorageType: " + storageType);
}
Also used : DataStorage(org.apache.hadoop.hdfs.server.datanode.DataStorage) StorageType(org.apache.hadoop.fs.StorageType) IOException(java.io.IOException) MultipleIOException(org.apache.hadoop.io.MultipleIOException) DataStorage(org.apache.hadoop.hdfs.server.datanode.DataStorage) Storage(org.apache.hadoop.hdfs.server.common.Storage) DatanodeStorage(org.apache.hadoop.hdfs.server.protocol.DatanodeStorage) FsVolumeReference(org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeReference) AutoCloseableLock(org.apache.hadoop.util.AutoCloseableLock) NamespaceInfo(org.apache.hadoop.hdfs.server.protocol.NamespaceInfo)

Example 2 with DataStorage

use of org.apache.hadoop.hdfs.server.datanode.DataStorage in project hadoop by apache.

the class FsDatasetImpl method getInitialVolumeFailureInfos.

/**
   * Gets initial volume failure information for all volumes that failed
   * immediately at startup.  The method works by determining the set difference
   * between all configured storage locations and the actual storage locations in
   * use after attempting to put all of them into service.
   *
   * @return each storage location that has failed
   */
private static List<VolumeFailureInfo> getInitialVolumeFailureInfos(Collection<StorageLocation> dataLocations, DataStorage storage) {
    Set<StorageLocation> failedLocationSet = Sets.newHashSetWithExpectedSize(dataLocations.size());
    for (StorageLocation sl : dataLocations) {
        failedLocationSet.add(sl);
    }
    for (Iterator<Storage.StorageDirectory> it = storage.dirIterator(); it.hasNext(); ) {
        Storage.StorageDirectory sd = it.next();
        failedLocationSet.remove(sd.getStorageLocation());
    }
    List<VolumeFailureInfo> volumeFailureInfos = Lists.newArrayListWithCapacity(failedLocationSet.size());
    long failureDate = Time.now();
    for (StorageLocation failedStorageLocation : failedLocationSet) {
        volumeFailureInfos.add(new VolumeFailureInfo(failedStorageLocation, failureDate));
    }
    return volumeFailureInfos;
}
Also used : DataStorage(org.apache.hadoop.hdfs.server.datanode.DataStorage) Storage(org.apache.hadoop.hdfs.server.common.Storage) DatanodeStorage(org.apache.hadoop.hdfs.server.protocol.DatanodeStorage) StorageLocation(org.apache.hadoop.hdfs.server.datanode.StorageLocation)

Example 3 with DataStorage

use of org.apache.hadoop.hdfs.server.datanode.DataStorage in project hadoop by apache.

the class TestFsDatasetImpl method createStorageDirs.

private static void createStorageDirs(DataStorage storage, Configuration conf, int numDirs) throws IOException {
    List<Storage.StorageDirectory> dirs = new ArrayList<Storage.StorageDirectory>();
    List<String> dirStrings = new ArrayList<String>();
    FileUtils.deleteDirectory(new File(BASE_DIR));
    for (int i = 0; i < numDirs; i++) {
        File loc = new File(BASE_DIR + "/data" + i);
        dirStrings.add(new Path(loc.toString()).toUri().toString());
        loc.mkdirs();
        dirs.add(createStorageDirectory(loc));
        when(storage.getStorageDir(i)).thenReturn(dirs.get(i));
    }
    String dataDir = StringUtils.join(",", dirStrings);
    conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, dataDir);
    when(storage.dirIterator()).thenReturn(dirs.iterator());
    when(storage.getNumStorageDirs()).thenReturn(numDirs);
}
Also used : Path(org.apache.hadoop.fs.Path) DataStorage(org.apache.hadoop.hdfs.server.datanode.DataStorage) Storage(org.apache.hadoop.hdfs.server.common.Storage) ArrayList(java.util.ArrayList) StorageDirectory(org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory) Matchers.anyString(org.mockito.Matchers.anyString) File(java.io.File)

Example 4 with DataStorage

use of org.apache.hadoop.hdfs.server.datanode.DataStorage in project hadoop by apache.

the class UpgradeUtilities method createDataNodeVersionFile.

/**
   * Create a <code>version</code> file for datanode inside the specified parent
   * directory.  If such a file already exists, it will be overwritten.
   * The given version string will be written to the file as the layout
   * version. None of the parameters may be null.
   *
   * @param parent directory where namenode VERSION file is stored
   * @param version StorageInfo to create VERSION file from
   * @param bpid Block pool Id
   * @param bpidToWrite Block pool Id to write into the version file
   */
public static void createDataNodeVersionFile(File[] parent, StorageInfo version, String bpid, String bpidToWrite) throws IOException {
    DataStorage storage = new DataStorage(version);
    storage.setDatanodeUuid("FixedDatanodeUuid");
    File[] versionFiles = new File[parent.length];
    for (int i = 0; i < parent.length; i++) {
        File versionFile = new File(parent[i], "VERSION");
        StorageDirectory sd = new StorageDirectory(parent[i].getParentFile());
        DataStorage.createStorageID(sd, false);
        storage.writeProperties(versionFile, sd);
        versionFiles[i] = versionFile;
        File bpDir = BlockPoolSliceStorage.getBpRoot(bpid, parent[i]);
        createBlockPoolVersionFile(bpDir, version, bpidToWrite);
    }
}
Also used : DataStorage(org.apache.hadoop.hdfs.server.datanode.DataStorage) StorageDirectory(org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory) File(java.io.File)

Aggregations

DataStorage (org.apache.hadoop.hdfs.server.datanode.DataStorage)4 Storage (org.apache.hadoop.hdfs.server.common.Storage)3 File (java.io.File)2 StorageDirectory (org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory)2 DatanodeStorage (org.apache.hadoop.hdfs.server.protocol.DatanodeStorage)2 IOException (java.io.IOException)1 ArrayList (java.util.ArrayList)1 Path (org.apache.hadoop.fs.Path)1 StorageType (org.apache.hadoop.fs.StorageType)1 StorageLocation (org.apache.hadoop.hdfs.server.datanode.StorageLocation)1 FsVolumeReference (org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeReference)1 NamespaceInfo (org.apache.hadoop.hdfs.server.protocol.NamespaceInfo)1 MultipleIOException (org.apache.hadoop.io.MultipleIOException)1 AutoCloseableLock (org.apache.hadoop.util.AutoCloseableLock)1 Matchers.anyString (org.mockito.Matchers.anyString)1