use of alluxio.underfs.UnderFileSystemConfiguration in project alluxio by Alluxio.
the class MountInfo method toDisplayMountPointInfo.
/**
* @return the {@link MountPointInfo} for the mount point. Some information is formatted
* for display purpose.
*/
public MountPointInfo toDisplayMountPointInfo() {
MountPointInfo info = toMountPointInfo();
UnderFileSystemConfiguration conf = UnderFileSystemConfiguration.defaults(new InstancedConfiguration(new AlluxioProperties())).createMountSpecificConf(info.getProperties());
Map<String, String> displayConf = conf.toUserPropertyMap(ConfigurationValueOptions.defaults().useDisplayValue(true));
info.setProperties(displayConf);
return info;
}
use of alluxio.underfs.UnderFileSystemConfiguration in project alluxio by Alluxio.
the class HdfsValidationToolFactory method create.
@Override
public ValidationTool create(Map<Object, Object> configMap) {
String ufsPath = (String) configMap.get(ValidationConfig.UFS_PATH);
UnderFileSystemConfiguration ufsConf = (UnderFileSystemConfiguration) configMap.get(ValidationConfig.UFS_CONFIG);
return new HdfsValidationTool(ufsPath, ufsConf);
}
use of alluxio.underfs.UnderFileSystemConfiguration in project alluxio by Alluxio.
the class UfsIOBench method read.
private IOTaskResult read(ExecutorService pool) throws InterruptedException, ExecutionException {
UnderFileSystemConfiguration ufsConf;
UnderFileSystem ufs;
int numThreads;
long ioSizeBytes;
try {
// Use multiple threads to saturate the bandwidth of this worker
numThreads = mParameters.mThreads;
ioSizeBytes = FormatUtils.parseSpaceSize(mParameters.mDataSize);
ufsConf = UnderFileSystemConfiguration.defaults(mConf).createMountSpecificConf(mParameters.mConf);
ufs = UnderFileSystem.Factory.create(mDataDir, ufsConf);
if (!ufs.exists(mDataDir)) {
// If the directory does not exist, there's no point proceeding
throw new IOException(String.format("The target directory %s does not exist!", mDataDir));
}
} catch (Exception e) {
LOG.error("Failed to access UFS path {}", mDataDir);
// If the UFS path is not valid, abort the test
IOTaskResult result = new IOTaskResult();
result.setParameters(mParameters);
result.setBaseParameters(mBaseParameters);
result.addError(ValidationUtils.getErrorInfo(e));
return result;
}
List<CompletableFuture<IOTaskResult>> futures = new ArrayList<>();
for (int i = 0; i < numThreads; i++) {
final int idx = i;
CompletableFuture<IOTaskResult> future = CompletableFuture.supplyAsync(() -> {
IOTaskResult result = new IOTaskResult();
result.setBaseParameters(mBaseParameters);
result.setParameters(mParameters);
long startTime = CommonUtils.getCurrentMs();
String filePath = getFilePath(idx);
LOG.debug("Reading filePath={}", filePath);
long readBytes = 0;
InputStream inStream = null;
try {
inStream = ufs.open(filePath);
byte[] buf = new byte[BUFFER_SIZE];
int readBufBytes;
while (readBytes < ioSizeBytes && (readBufBytes = inStream.read(buf)) > 0) {
readBytes += readBufBytes;
}
long endTime = CommonUtils.getCurrentMs();
// convert to second
double duration = (endTime - startTime) / 1000.0;
IOTaskResult.Point p = new IOTaskResult.Point(IOTaskResult.IOMode.READ, duration, readBytes);
result.addPoint(p);
LOG.debug("Read task finished {}", p);
} catch (Exception e) {
LOG.error("Failed to read {}", filePath, e);
result.addError(ValidationUtils.getErrorInfo(e));
} finally {
if (inStream != null) {
try {
inStream.close();
} catch (IOException e) {
LOG.warn("Failed to close read stream {}", filePath, e);
result.addError(e.getMessage());
}
}
}
return result;
}, pool);
futures.add(future);
}
// Collect the result
CompletableFuture[] cfs = futures.toArray(new CompletableFuture[0]);
List<IOTaskResult> results = CompletableFuture.allOf(cfs).thenApply(f -> futures.stream().map(CompletableFuture::join).collect(Collectors.toList())).get();
return IOTaskResult.reduceList(results);
}
use of alluxio.underfs.UnderFileSystemConfiguration in project alluxio by Alluxio.
the class UfsIOBench method write.
private IOTaskResult write(ExecutorService pool) throws InterruptedException, ExecutionException {
UnderFileSystemConfiguration ufsConf;
UnderFileSystem ufs;
int numThreads;
long ioSizeBytes;
try {
// Use multiple threads to saturate the bandwidth of this worker
numThreads = mParameters.mThreads;
ioSizeBytes = FormatUtils.parseSpaceSize(mParameters.mDataSize);
ufsConf = UnderFileSystemConfiguration.defaults(mConf).createMountSpecificConf(mParameters.mConf);
// Create a subdir for the IO
ufs = UnderFileSystem.Factory.create(mDataDir, ufsConf);
if (!ufs.exists(mDataDir)) {
LOG.debug("Prepare directory {}", mDataDir);
ufs.mkdirs(mDataDir);
}
} catch (Exception e) {
LOG.error("Failed to prepare base directory {}", mDataDir);
// If the UFS path is not valid, abort the test
IOTaskResult result = new IOTaskResult();
result.setParameters(mParameters);
result.setBaseParameters(mBaseParameters);
result.addError(ValidationUtils.getErrorInfo(e));
return result;
}
List<CompletableFuture<IOTaskResult>> futures = new ArrayList<>();
final byte[] randomData = CommonUtils.randomBytes(BUFFER_SIZE);
for (int i = 0; i < numThreads; i++) {
final int idx = i;
CompletableFuture<IOTaskResult> future = CompletableFuture.supplyAsync(() -> {
IOTaskResult result = new IOTaskResult();
result.setParameters(mParameters);
result.setBaseParameters(mBaseParameters);
long startTime = CommonUtils.getCurrentMs();
String filePath = getFilePath(idx);
LOG.debug("filePath={}, data to write={}", filePath, mParameters.mDataSize);
long wroteBytes = 0;
BufferedOutputStream outStream = null;
try {
outStream = new BufferedOutputStream(ufs.create(filePath));
while (wroteBytes < ioSizeBytes) {
long bytesToWrite = Math.min(ioSizeBytes - wroteBytes, BUFFER_SIZE);
// bytesToWrite is bounded by BUFFER_SIZE, which is an integer
outStream.write(randomData, 0, (int) bytesToWrite);
wroteBytes += bytesToWrite;
}
outStream.flush();
long endTime = CommonUtils.getCurrentMs();
// convert to second
double duration = (endTime - startTime) / 1000.0;
IOTaskResult.Point p = new IOTaskResult.Point(IOTaskResult.IOMode.WRITE, duration, wroteBytes);
result.addPoint(p);
LOG.debug("Write task finished {}", p);
} catch (Exception e) {
LOG.error("Failed to write to UFS: ", e);
result.addError(e.getMessage());
} finally {
if (outStream != null) {
try {
outStream.close();
} catch (IOException e) {
LOG.warn("Failed to close stream to UFS: ", e);
result.addError(e.getMessage());
}
}
}
LOG.debug("Thread {} file={}, IOBench result={}", Thread.currentThread().getName(), filePath, result);
return result;
}, pool);
futures.add(future);
}
// Collect the result
CompletableFuture[] cfs = futures.toArray(new CompletableFuture[0]);
List<IOTaskResult> results = CompletableFuture.allOf(cfs).thenApply(f -> futures.stream().map(CompletableFuture::join).collect(Collectors.toList())).get();
return IOTaskResult.reduceList(results);
}
use of alluxio.underfs.UnderFileSystemConfiguration in project alluxio by Alluxio.
the class HdfsUnderFileSystem method createConfiguration.
/**
* Prepares the Hadoop configuration necessary to successfully obtain a {@link FileSystem}
* instance that can access the provided path.
* <p>
* Derived implementations that work with specialised Hadoop {@linkplain FileSystem} API
* compatible implementations can override this method to add implementation specific
* configuration necessary for obtaining a usable {@linkplain FileSystem} instance.
* </p>
*
* @param conf the configuration for this UFS
* @return the configuration for HDFS
*/
public static Configuration createConfiguration(UnderFileSystemConfiguration conf) {
Preconditions.checkNotNull(conf, "conf");
Configuration hdfsConf = new Configuration();
// the path of this file can be passed through --option
for (String path : conf.getString(PropertyKey.UNDERFS_HDFS_CONFIGURATION).split(":")) {
if (!path.isEmpty()) {
hdfsConf.addResource(new Path(path));
}
}
// On Hadoop 2.x this is strictly unnecessary since it uses ServiceLoader to automatically
// discover available file system implementations. However this configuration setting is
// required for earlier Hadoop versions plus it is still honoured as an override even in 2.x so
// if present propagate it to the Hadoop configuration
String ufsHdfsImpl = conf.getString(PropertyKey.UNDERFS_HDFS_IMPL);
if (!StringUtils.isEmpty(ufsHdfsImpl)) {
hdfsConf.set("fs.hdfs.impl", ufsHdfsImpl);
}
// Disable HDFS client caching so that input configuration is respected. Configurable from
// system property
hdfsConf.set("fs.hdfs.impl.disable.cache", System.getProperty("fs.hdfs.impl.disable.cache", "true"));
// Set all parameters passed through --option
for (Map.Entry<String, Object> entry : conf.getMountSpecificConf().entrySet()) {
hdfsConf.set(entry.getKey(), entry.getValue() == null ? null : String.valueOf(entry.getValue()));
}
return hdfsConf;
}
Aggregations