use of alluxio.conf.AlluxioConfiguration in project alluxio by Alluxio.
the class InstallCommand method run.
@Override
public int run(CommandLine cl) {
String uri = cl.getArgs()[0];
AlluxioConfiguration conf = ServerConfiguration.global();
String extensionsDir = conf.getString(PropertyKey.EXTENSIONS_DIR);
File dir = new File(extensionsDir);
if (!dir.exists() && !dir.mkdirs()) {
System.err.println("Failed to create extensions directory " + extensionsDir);
return -1;
}
List<String> failedHosts = new ArrayList<>();
for (String host : ConfigurationUtils.getServerHostnames(conf)) {
try {
LOG.info("Attempting to install extension on host {}", host);
// Parent folder on target host
String targetUriParent = extensionsDir.endsWith("/") ? extensionsDir : extensionsDir.concat("/");
String rsyncCmd = String.format("rsync -e \"ssh %s\" -az %s %s:%s", ShellUtils.COMMON_SSH_OPTS, uri, host, targetUriParent);
LOG.debug("Executing: {}", rsyncCmd);
String output = ShellUtils.execCommand("bash", "-c", rsyncCmd);
LOG.debug("Succeeded w/ output: {}", output);
} catch (IOException e) {
LOG.error("Error installing extension on host {}", host, e);
failedHosts.add(host);
}
}
if (failedHosts.size() != 0) {
System.err.println("Failed to install extension on hosts:");
for (String failedHost : failedHosts) {
System.err.println(failedHost);
}
return -1;
}
System.out.println("Extension installed successfully.");
return 0;
}
use of alluxio.conf.AlluxioConfiguration in project alluxio by Alluxio.
the class MigrateDefinitionRunTaskTest method before.
@Before
public void before() throws Exception {
AlluxioConfiguration conf = ConfigurationTestUtils.defaults();
mMockFileSystem = Mockito.mock(FileSystem.class);
mMockFileSystemContext = PowerMockito.mock(FileSystemContext.class);
when(mMockFileSystemContext.getClientContext()).thenReturn(ClientContext.create(conf));
when(mMockFileSystemContext.getClusterConf()).thenReturn(conf);
when(mMockFileSystemContext.getPathConf(any(AlluxioURI.class))).thenReturn(conf);
mMockInStream = new MockFileInStream(TEST_SOURCE_CONTENTS);
when(mMockFileSystem.openFile(eq(new AlluxioURI(TEST_SOURCE)), any(OpenFilePOptions.class))).thenReturn(mMockInStream);
mMockOutStream = new MockFileOutStream(mMockFileSystemContext);
when(mMockFileSystem.createFile(eq(new AlluxioURI(TEST_DESTINATION)), any(CreateFilePOptions.class))).thenReturn(mMockOutStream);
mMockUfsManager = Mockito.mock(UfsManager.class);
}
use of alluxio.conf.AlluxioConfiguration in project alluxio by Alluxio.
the class LoadTableCommand method run.
@Override
public int run(CommandLine cl) throws AlluxioException, IOException {
System.out.println("***Tips:Load table command only support hive table for now.***");
String[] args = cl.getArgs();
String dbName = args[0];
String tableName = args[1];
if (!tableExists(dbName, tableName)) {
System.out.printf("Failed to load table %s.%s: table is not exit.%n", dbName, tableName);
return 0;
}
mActiveJobs = FileSystemShellUtils.getIntArg(cl, ACTIVE_JOB_COUNT_OPTION, AbstractDistributedJobCommand.DEFAULT_ACTIVE_JOBS);
System.out.format("Allow up to %s active jobs%n", mActiveJobs);
AlluxioConfiguration conf = mFsContext.getClusterConf();
int defaultBatchSize = conf.getInt(PropertyKey.JOB_REQUEST_BATCH_SIZE);
int defaultReplication = conf.getInt(PropertyKey.TABLE_LOAD_DEFAULT_REPLICATION);
int replication = FileSystemShellUtils.getIntArg(cl, REPLICATION_OPTION, defaultReplication);
int batchSize = FileSystemShellUtils.getIntArg(cl, BATCH_SIZE_OPTION, defaultBatchSize);
boolean directCache = !cl.hasOption(PASSIVE_CACHE_OPTION.getLongOpt());
Set<String> workerSet = new HashSet<>();
Set<String> excludedWorkerSet = new HashSet<>();
Set<String> localityIds = new HashSet<>();
Set<String> excludedLocalityIds = new HashSet<>();
if (cl.hasOption(HOST_FILE_OPTION.getLongOpt())) {
String hostFile = cl.getOptionValue(HOST_FILE_OPTION.getLongOpt()).trim();
readLinesToSet(workerSet, hostFile);
} else if (cl.hasOption(HOSTS_OPTION.getLongOpt())) {
String argOption = cl.getOptionValue(HOSTS_OPTION.getLongOpt()).trim();
readItemsFromOptionString(workerSet, argOption);
}
if (cl.hasOption(EXCLUDED_HOST_FILE_OPTION.getLongOpt())) {
String hostFile = cl.getOptionValue(EXCLUDED_HOST_FILE_OPTION.getLongOpt()).trim();
readLinesToSet(excludedWorkerSet, hostFile);
} else if (cl.hasOption(EXCLUDED_HOSTS_OPTION.getLongOpt())) {
String argOption = cl.getOptionValue(EXCLUDED_HOSTS_OPTION.getLongOpt()).trim();
readItemsFromOptionString(excludedWorkerSet, argOption);
}
if (cl.hasOption(LOCALITY_FILE_OPTION.getLongOpt())) {
String localityFile = cl.getOptionValue(LOCALITY_FILE_OPTION.getLongOpt()).trim();
readLinesToSet(localityIds, localityFile);
} else if (cl.hasOption(LOCALITY_OPTION.getLongOpt())) {
String argOption = cl.getOptionValue(LOCALITY_OPTION.getLongOpt()).trim();
readItemsFromOptionString(localityIds, argOption);
}
if (cl.hasOption(EXCLUDED_LOCALITY_FILE_OPTION.getLongOpt())) {
String localityFile = cl.getOptionValue(EXCLUDED_LOCALITY_FILE_OPTION.getLongOpt()).trim();
readLinesToSet(excludedLocalityIds, localityFile);
} else if (cl.hasOption(EXCLUDED_LOCALITY_OPTION.getLongOpt())) {
String argOption = cl.getOptionValue(EXCLUDED_LOCALITY_OPTION.getLongOpt()).trim();
readItemsFromOptionString(excludedLocalityIds, argOption);
}
List<URIStatus> filePool = new ArrayList<>(batchSize);
// Only support hive table for now.
String udbType = "hive";
// To load table into Alluxio space, we get the SDS table's Alluxio parent path first and
// then load data under the path. For now, each table have one single parent path generated
// by CatalogPathUtils#getTablePathUdb.
// The parent path is mounted by SDS, it's mapping of Utable's UFS path in Alluxio.
// e.g.
// attached
// [SDS]default.test <===== [hive]default.test
// mount
// [SDS]alluxio:///catalog/default/tables/test/hive/ <===== [hive]hdfs:///.../default.db/test/
// PLEASE NOTE: If Alluxio support different parent path, this statement can not guaranteed
// to be correct.
AlluxioURI path = CatalogPathUtils.getTablePathUdb(dbName, tableName, udbType);
System.out.printf("Loading table %s.%s...%n", dbName, tableName);
DistributedLoadUtils.distributedLoad(this, filePool, batchSize, path, replication, workerSet, excludedWorkerSet, localityIds, excludedLocalityIds, directCache, true);
System.out.println("Done");
return 0;
}
use of alluxio.conf.AlluxioConfiguration in project alluxio by Alluxio.
the class LocalFileDataWriter method create.
/**
* Creates an instance of {@link LocalFileDataWriter}. This requires the block to be locked
* beforehand.
*
* @param context the file system context
* @param address the worker network address
* @param blockId the block ID
* @param blockSize the block size in bytes
* @param options the output stream options
* @return the {@link LocalFileDataWriter} created
*/
public static LocalFileDataWriter create(final FileSystemContext context, final WorkerNetAddress address, long blockId, long blockSize, OutStreamOptions options) throws IOException {
AlluxioConfiguration conf = context.getClusterConf();
long chunkSize = conf.getBytes(PropertyKey.USER_LOCAL_WRITER_CHUNK_SIZE_BYTES);
Closer closer = Closer.create();
try {
CloseableResource<BlockWorkerClient> blockWorker = context.acquireBlockWorkerClient(address);
closer.register(blockWorker);
int writerBufferSizeMessages = conf.getInt(PropertyKey.USER_STREAMING_WRITER_BUFFER_SIZE_MESSAGES);
long fileBufferBytes = conf.getBytes(PropertyKey.USER_FILE_BUFFER_BYTES);
long dataTimeout = conf.getMs(PropertyKey.USER_STREAMING_DATA_WRITE_TIMEOUT);
// in cases we know precise block size, make more accurate reservation.
long reservedBytes = Math.min(blockSize, conf.getBytes(PropertyKey.USER_FILE_RESERVED_BYTES));
CreateLocalBlockRequest.Builder builder = CreateLocalBlockRequest.newBuilder().setBlockId(blockId).setTier(options.getWriteTier()).setSpaceToReserve(reservedBytes).setMediumType(options.getMediumType()).setPinOnCreate(options.getWriteType() == WriteType.ASYNC_THROUGH);
if (options.getWriteType() == WriteType.ASYNC_THROUGH && conf.getBoolean(PropertyKey.USER_FILE_UFS_TIER_ENABLED)) {
builder.setCleanupOnFailure(false);
}
CreateLocalBlockRequest createRequest = builder.build();
GrpcBlockingStream<CreateLocalBlockRequest, CreateLocalBlockResponse> stream = new GrpcBlockingStream<>(blockWorker.get()::createLocalBlock, writerBufferSizeMessages, MoreObjects.toStringHelper(LocalFileDataWriter.class).add("request", createRequest).add("address", address).toString());
stream.send(createRequest, dataTimeout);
CreateLocalBlockResponse response = stream.receive(dataTimeout);
Preconditions.checkState(response != null && response.hasPath());
LocalFileBlockWriter writer = closer.register(new LocalFileBlockWriter(response.getPath()));
return new LocalFileDataWriter(chunkSize, writer, createRequest, stream, closer, fileBufferBytes, dataTimeout);
} catch (Exception e) {
throw CommonUtils.closeAndRethrow(closer, e);
}
}
use of alluxio.conf.AlluxioConfiguration in project alluxio by Alluxio.
the class BaseFileSystem method openFile.
@Override
public FileInStream openFile(AlluxioURI path, OpenFilePOptions options) throws FileDoesNotExistException, OpenDirectoryException, FileIncompleteException, IOException, AlluxioException {
checkUri(path);
AlluxioConfiguration conf = mFsContext.getPathConf(path);
URIStatus status = getStatus(path, FileSystemOptions.getStatusDefaults(conf).toBuilder().setAccessMode(Bits.READ).setUpdateTimestamps(options.getUpdateLastAccessTime()).build());
return openFile(status, options);
}
Aggregations