use of org.apache.hadoop.fs.LocalFileSystem in project hadoop by apache.
the class StorageLocationChecker method check.
/**
* Initiate a check of the supplied storage volumes and return
* a list of failed volumes.
*
* StorageLocations are returned in the same order as the input
* for compatibility with existing unit tests.
*
* @param conf HDFS configuration.
* @param dataDirs list of volumes to check.
* @return returns a list of failed volumes. Returns the empty list if
* there are no failed volumes.
*
* @throws InterruptedException if the check was interrupted.
* @throws IOException if the number of failed volumes exceeds the
* maximum allowed or if there are no good
* volumes.
*/
public List<StorageLocation> check(final Configuration conf, final Collection<StorageLocation> dataDirs) throws InterruptedException, IOException {
final HashMap<StorageLocation, Boolean> goodLocations = new LinkedHashMap<>();
final Set<StorageLocation> failedLocations = new HashSet<>();
final Map<StorageLocation, ListenableFuture<VolumeCheckResult>> futures = Maps.newHashMap();
final LocalFileSystem localFS = FileSystem.getLocal(conf);
final CheckContext context = new CheckContext(localFS, expectedPermission);
// Start parallel disk check operations on all StorageLocations.
for (StorageLocation location : dataDirs) {
goodLocations.put(location, true);
Optional<ListenableFuture<VolumeCheckResult>> olf = delegateChecker.schedule(location, context);
if (olf.isPresent()) {
futures.put(location, olf.get());
}
}
if (maxVolumeFailuresTolerated >= dataDirs.size()) {
throw new DiskErrorException("Invalid value configured for " + DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY + " - " + maxVolumeFailuresTolerated + ". Value configured is >= " + "to the number of configured volumes (" + dataDirs.size() + ").");
}
final long checkStartTimeMs = timer.monotonicNow();
// Retrieve the results of the disk checks.
for (Map.Entry<StorageLocation, ListenableFuture<VolumeCheckResult>> entry : futures.entrySet()) {
// Determine how much time we can allow for this check to complete.
// The cumulative wait time cannot exceed maxAllowedTimeForCheck.
final long waitSoFarMs = (timer.monotonicNow() - checkStartTimeMs);
final long timeLeftMs = Math.max(0, maxAllowedTimeForCheckMs - waitSoFarMs);
final StorageLocation location = entry.getKey();
try {
final VolumeCheckResult result = entry.getValue().get(timeLeftMs, TimeUnit.MILLISECONDS);
switch(result) {
case HEALTHY:
break;
case DEGRADED:
LOG.warn("StorageLocation {} appears to be degraded.", location);
break;
case FAILED:
LOG.warn("StorageLocation {} detected as failed.", location);
failedLocations.add(location);
goodLocations.remove(location);
break;
default:
LOG.error("Unexpected health check result {} for StorageLocation {}", result, location);
}
} catch (ExecutionException | TimeoutException e) {
LOG.warn("Exception checking StorageLocation " + location, e.getCause());
failedLocations.add(location);
goodLocations.remove(location);
}
}
if (failedLocations.size() > maxVolumeFailuresTolerated) {
throw new DiskErrorException("Too many failed volumes - " + "current valid volumes: " + goodLocations.size() + ", volumes configured: " + dataDirs.size() + ", volumes failed: " + failedLocations.size() + ", volume failures tolerated: " + maxVolumeFailuresTolerated);
}
if (goodLocations.size() == 0) {
throw new DiskErrorException("All directories in " + DFS_DATANODE_DATA_DIR_KEY + " are invalid: " + failedLocations);
}
return new ArrayList<>(goodLocations.keySet());
}
use of org.apache.hadoop.fs.LocalFileSystem in project hadoop by apache.
the class UpgradeUtilities method createDataNodeStorageDirs.
/**
* Simulate the {@link DFSConfigKeys#DFS_DATANODE_DATA_DIR_KEY} of a
* populated DFS filesystem.
* This method populates for each parent directory, <code>parent/dirName</code>
* with the content of datanode storage directory that comes from a singleton
* datanode master (that contains version and block files). If the destination
* directory does not exist, it will be created. If the directory already
* exists, it will first be deleted.
*
* @param parents parent directory where {@code dirName} is created
* @param dirName directory under which storage directory is created
* @return the array of created directories
*/
public static File[] createDataNodeStorageDirs(String[] parents, String dirName) throws Exception {
File[] retVal = new File[parents.length];
for (int i = 0; i < parents.length; i++) {
File newDir = new File(parents[i], dirName);
createEmptyDirs(new String[] { newDir.toString() });
LocalFileSystem localFS = FileSystem.getLocal(new HdfsConfiguration());
localFS.copyToLocalFile(new Path(datanodeStorage.toString(), "current"), new Path(newDir.toString()), false);
// Change the storage UUID to avoid conflicts when DN starts up.
StorageDirectory sd = new StorageDirectory(new File(datanodeStorage.toString()));
sd.setStorageUuid(DatanodeStorage.generateUuid());
Properties properties = Storage.readPropertiesFile(sd.getVersionFile());
properties.setProperty("storageID", sd.getStorageUuid());
Storage.writeProperties(sd.getVersionFile(), properties);
retVal[i] = newDir;
}
return retVal;
}
use of org.apache.hadoop.fs.LocalFileSystem in project hadoop by apache.
the class UpgradeUtilities method createNameNodeStorageDirs.
/**
* Simulate the {@link DFSConfigKeys#DFS_NAMENODE_NAME_DIR_KEY} of a populated
* DFS filesystem.
* This method populates for each parent directory, <code>parent/dirName</code>
* with the content of namenode storage directory that comes from a singleton
* namenode master (that contains edits, fsimage, version and time files).
* If the destination directory does not exist, it will be created.
* If the directory already exists, it will first be deleted.
*
* @param parents parent directory where {@code dirName} is created
* @param dirName directory under which storage directory is created
* @return the array of created directories
*/
public static File[] createNameNodeStorageDirs(String[] parents, String dirName) throws Exception {
File[] retVal = new File[parents.length];
for (int i = 0; i < parents.length; i++) {
File newDir = new File(parents[i], dirName);
createEmptyDirs(new String[] { newDir.toString() });
LocalFileSystem localFS = FileSystem.getLocal(new HdfsConfiguration());
localFS.copyToLocalFile(new Path(namenodeStorage.toString(), "current"), new Path(newDir.toString()), false);
retVal[i] = newDir;
}
return retVal;
}
use of org.apache.hadoop.fs.LocalFileSystem in project ignite by apache.
the class HadoopV2TaskContext method prepareTaskEnvironment.
/**
* {@inheritDoc}
*/
@Override
public void prepareTaskEnvironment() throws IgniteCheckedException {
File locDir;
switch(taskInfo().type()) {
case MAP:
case REDUCE:
job().prepareTaskEnvironment(taskInfo());
locDir = taskLocalDir(job.igniteWorkDirectory(), locNodeId, taskInfo());
break;
default:
locDir = jobLocalDir(job.igniteWorkDirectory(), locNodeId, taskInfo().jobId());
}
ClassLoader oldLdr = HadoopCommonUtils.setContextClassLoader(jobConf().getClassLoader());
try {
FileSystem.get(jobConf());
LocalFileSystem locFs = FileSystem.getLocal(jobConf());
locFs.setWorkingDirectory(new Path(locDir.getAbsolutePath()));
} catch (Throwable e) {
if (e instanceof Error)
throw (Error) e;
throw transformException(e);
} finally {
HadoopCommonUtils.restoreContextClassLoader(oldLdr);
}
}
use of org.apache.hadoop.fs.LocalFileSystem in project incubator-systemml by apache.
the class VariableCPInstruction method writeScalarToHDFS.
/**
* Helper function to write scalars to HDFS based on its value type.
*
* @param ec execution context
* @param fname file name
*/
private void writeScalarToHDFS(ExecutionContext ec, String fname) {
try {
ScalarObject scalar = ec.getScalarInput(getInput1().getName(), getInput1().getValueType(), getInput1().isLiteral());
MapReduceTool.writeObjectToHDFS(scalar.getValue(), fname);
MapReduceTool.writeScalarMetaDataFile(fname + ".mtd", getInput1().getValueType());
FileSystem fs = IOUtilFunctions.getFileSystem(fname);
if (fs instanceof LocalFileSystem) {
Path path = new Path(fname);
IOUtilFunctions.deleteCrcFilesFromLocalFileSystem(fs, path);
}
} catch (IOException e) {
throw new DMLRuntimeException(e);
}
}
Aggregations