use of org.apache.hadoop.hdfs.server.common.Storage in project hadoop by apache.
the class UpgradeUtilities method createNameNodeVersionFile.
/**
* Create a <code>version</code> file for namenode inside the specified parent
* directory. If such a file already exists, it will be overwritten.
* The given version string will be written to the file as the layout
* version. None of the parameters may be null.
*
* @param parent directory where namenode VERSION file is stored
* @param version StorageInfo to create VERSION file from
* @param bpid Block pool Id
*
* @return the created version file
*/
public static File[] createNameNodeVersionFile(Configuration conf, File[] parent, StorageInfo version, String bpid) throws IOException {
Storage storage = new NNStorage(conf, Collections.<URI>emptyList(), Collections.<URI>emptyList());
storage.setStorageInfo(version);
File[] versionFiles = new File[parent.length];
for (int i = 0; i < parent.length; i++) {
versionFiles[i] = new File(parent[i], "VERSION");
StorageDirectory sd = new StorageDirectory(parent[i].getParentFile());
storage.writeProperties(versionFiles[i], sd);
}
return versionFiles;
}
use of org.apache.hadoop.hdfs.server.common.Storage in project hadoop by apache.
the class TestFsDatasetImpl method getDfsUsedValueOfNewVolume.
private long getDfsUsedValueOfNewVolume(long cacheDfsUsed, long waitIntervalTime) throws IOException, InterruptedException {
List<NamespaceInfo> nsInfos = Lists.newArrayList();
nsInfos.add(new NamespaceInfo(0, CLUSTER_ID, BLOCK_POOL_IDS[0], 1));
String CURRENT_DIR = "current";
String DU_CACHE_FILE = BlockPoolSlice.DU_CACHE_FILE;
String path = BASE_DIR + "/newData0";
String pathUri = new Path(path).toUri().toString();
StorageLocation loc = StorageLocation.parse(pathUri);
Storage.StorageDirectory sd = createStorageDirectory(new File(path));
DataStorage.VolumeBuilder builder = new DataStorage.VolumeBuilder(storage, sd);
when(storage.prepareVolume(eq(datanode), eq(loc), anyListOf(NamespaceInfo.class))).thenReturn(builder);
String cacheFilePath = String.format("%s/%s/%s/%s/%s", path, CURRENT_DIR, BLOCK_POOL_IDS[0], CURRENT_DIR, DU_CACHE_FILE);
File outFile = new File(cacheFilePath);
if (!outFile.getParentFile().exists()) {
outFile.getParentFile().mkdirs();
}
if (outFile.exists()) {
outFile.delete();
}
FakeTimer timer = new FakeTimer();
try {
try (Writer out = new OutputStreamWriter(new FileOutputStream(outFile), StandardCharsets.UTF_8)) {
// Write the dfsUsed value and the time to cache file
out.write(Long.toString(cacheDfsUsed) + " " + Long.toString(timer.now()));
out.flush();
}
} catch (IOException ioe) {
}
dataset.setTimer(timer);
timer.advance(waitIntervalTime);
dataset.addVolume(loc, nsInfos);
// Get the last volume which was just added before
FsVolumeImpl newVolume;
try (FsDatasetSpi.FsVolumeReferences volumes = dataset.getFsVolumeReferences()) {
newVolume = (FsVolumeImpl) volumes.get(volumes.size() - 1);
}
long dfsUsed = newVolume.getDfsUsed();
return dfsUsed;
}
use of org.apache.hadoop.hdfs.server.common.Storage in project hadoop by apache.
the class TestFsDatasetImpl method createStorageDirs.
private static void createStorageDirs(DataStorage storage, Configuration conf, int numDirs) throws IOException {
List<Storage.StorageDirectory> dirs = new ArrayList<Storage.StorageDirectory>();
List<String> dirStrings = new ArrayList<String>();
FileUtils.deleteDirectory(new File(BASE_DIR));
for (int i = 0; i < numDirs; i++) {
File loc = new File(BASE_DIR + "/data" + i);
dirStrings.add(new Path(loc.toString()).toUri().toString());
loc.mkdirs();
dirs.add(createStorageDirectory(loc));
when(storage.getStorageDir(i)).thenReturn(dirs.get(i));
}
String dataDir = StringUtils.join(",", dirStrings);
conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, dataDir);
when(storage.dirIterator()).thenReturn(dirs.iterator());
when(storage.getNumStorageDirs()).thenReturn(numDirs);
}
use of org.apache.hadoop.hdfs.server.common.Storage in project hadoop by apache.
the class DataNode method parseChangedVolumes.
/**
* Parse the new DFS_DATANODE_DATA_DIR value in the configuration to detect
* changed volumes.
* @param newVolumes a comma separated string that specifies the data volumes.
* @return changed volumes.
* @throws IOException if none of the directories are specified in the
* configuration, or the storage type of a directory is changed.
*/
@VisibleForTesting
ChangedVolumes parseChangedVolumes(String newVolumes) throws IOException {
Configuration conf = new Configuration();
conf.set(DFS_DATANODE_DATA_DIR_KEY, newVolumes);
List<StorageLocation> newStorageLocations = getStorageLocations(conf);
if (newStorageLocations.isEmpty()) {
throw new IOException("No directory is specified.");
}
// Use the existing storage locations from the current conf
// to detect new storage additions or removals.
Map<String, StorageLocation> existingStorageLocations = new HashMap<>();
for (StorageLocation loc : getStorageLocations(getConf())) {
existingStorageLocations.put(loc.getNormalizedUri().toString(), loc);
}
ChangedVolumes results = new ChangedVolumes();
results.newLocations.addAll(newStorageLocations);
for (Iterator<Storage.StorageDirectory> it = storage.dirIterator(); it.hasNext(); ) {
Storage.StorageDirectory dir = it.next();
boolean found = false;
for (Iterator<StorageLocation> newLocationItr = results.newLocations.iterator(); newLocationItr.hasNext(); ) {
StorageLocation newLocation = newLocationItr.next();
if (newLocation.matchesStorageDirectory(dir)) {
StorageLocation oldLocation = existingStorageLocations.get(newLocation.getNormalizedUri().toString());
if (oldLocation != null && oldLocation.getStorageType() != newLocation.getStorageType()) {
throw new IOException("Changing storage type is not allowed.");
}
// Update the unchanged locations as this location
// from the new conf is really not a new one.
newLocationItr.remove();
results.unchangedLocations.add(newLocation);
found = true;
break;
}
}
// the current storage locations. Add to the deactivateLocations list.
if (!found) {
LOG.info("Deactivation request received for active volume: " + dir.getRoot().toString());
results.deactivateLocations.add(StorageLocation.parse(dir.getRoot().toString()));
}
}
// to detect removals in the new conf.
if (getFSDataset().getNumFailedVolumes() > 0) {
for (String failedStorageLocation : getFSDataset().getVolumeFailureSummary().getFailedStorageLocations()) {
boolean found = false;
for (Iterator<StorageLocation> newLocationItr = results.newLocations.iterator(); newLocationItr.hasNext(); ) {
StorageLocation newLocation = newLocationItr.next();
if (newLocation.getNormalizedUri().toString().equals(failedStorageLocation)) {
// The failed storage is being re-added. DataNode#refreshVolumes()
// will take care of re-assessing it.
found = true;
break;
}
}
// Add to the deactivate locations list.
if (!found) {
LOG.info("Deactivation request received for failed volume: " + failedStorageLocation);
results.deactivateLocations.add(StorageLocation.parse(failedStorageLocation));
}
}
}
return results;
}
use of org.apache.hadoop.hdfs.server.common.Storage in project hadoop by apache.
the class TestCheckpoint method testStorageAlreadyLockedErrorMessage.
/**
* Test that, an attempt to lock a storage that is already locked by nodename,
* logs error message that includes JVM name of the namenode that locked it.
*/
@Test
public void testStorageAlreadyLockedErrorMessage() throws Exception {
Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster = null;
StorageDirectory savedSd = null;
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
NNStorage storage = cluster.getNameNode().getFSImage().getStorage();
for (StorageDirectory sd : storage.dirIterable(null)) {
assertLockFails(sd);
savedSd = sd;
}
LogCapturer logs = GenericTestUtils.LogCapturer.captureLogs(LogFactory.getLog(Storage.class));
try {
// try to lock the storage that's already locked
savedSd.lock();
fail("Namenode should not be able to lock a storage" + " that is already locked");
} catch (IOException ioe) {
// cannot read lock file on Windows, so message cannot get JVM name
String lockingJvmName = Path.WINDOWS ? "" : " " + ManagementFactory.getRuntimeMXBean().getName();
String expectedLogMessage = "It appears that another node " + lockingJvmName + " has already locked the storage directory";
assertTrue("Log output does not contain expected log message: " + expectedLogMessage, logs.getOutput().contains(expectedLogMessage));
}
} finally {
cleanup(cluster);
cluster = null;
}
}
Aggregations