use of alluxio.client.file.FileInStream in project alluxio by Alluxio.
the class FileInStreamIntegrationTest method eofSeek.
/**
* Tests {@link FileInStream#seek(long)} when at the end of a file at the block boundary.
*/
@Test
public void eofSeek() throws Exception {
String uniqPath = PathUtils.uniqPath();
int length = BLOCK_SIZE * 3;
for (CreateFilePOptions op : getOptionSet()) {
String filename = uniqPath + "/file_" + op.hashCode();
AlluxioURI uri = new AlluxioURI(filename);
FileSystemTestUtils.createByteFile(mFileSystem, filename, length, op);
FileInStream is = mFileSystem.openFile(uri, FileSystemTestUtils.toOpenFileOptions(op));
byte[] data = new byte[length];
is.read(data, 0, length);
Assert.assertTrue(BufferUtils.equalIncreasingByteArray(length, data));
is.seek(0);
is.read(data, 0, length);
Assert.assertTrue(BufferUtils.equalIncreasingByteArray(length, data));
is.close();
}
}
use of alluxio.client.file.FileInStream in project alluxio by Alluxio.
the class MultiWorkerIntegrationTest method readRecoverFromLostWorker.
@Test
@LocalAlluxioClusterResource.Config(confParams = { PropertyKey.Name.USER_SHORT_CIRCUIT_ENABLED, "false", PropertyKey.Name.USER_BLOCK_SIZE_BYTES_DEFAULT, "16MB", PropertyKey.Name.USER_STREAMING_READER_CHUNK_SIZE_BYTES, "64KB", PropertyKey.Name.USER_BLOCK_READ_RETRY_MAX_DURATION, "1s", PropertyKey.Name.WORKER_RAMDISK_SIZE, "1GB" })
public void readRecoverFromLostWorker() throws Exception {
int offset = 17 * Constants.MB;
int length = 33 * Constants.MB;
int total = offset + length;
// creates a test file on one worker
AlluxioURI filePath = new AlluxioURI("/test");
createFileOnWorker(total, filePath, mResource.get().getWorkerAddress());
FileSystem fs = mResource.get().getClient();
try (FileInStream in = fs.openFile(filePath, OpenFilePOptions.getDefaultInstance())) {
byte[] buf = new byte[total];
int size = in.read(buf, 0, offset);
replicateFileBlocks(filePath);
mResource.get().getWorkerProcess().stop();
size += in.read(buf, offset, length);
Assert.assertEquals(total, size);
Assert.assertTrue(BufferUtils.equalIncreasingByteArray(offset, size, buf));
}
}
use of alluxio.client.file.FileInStream in project alluxio by Alluxio.
the class PathsRestServiceHandler method downloadFile.
/**
* @summary Download a file.
* @param path the path
* @return the response
*/
@GET
@Path(PATH_PARAM + DOWNLOAD_FILE)
@ApiOperation(value = "Download the given file at the path", response = java.io.InputStream.class)
@Produces(MediaType.APPLICATION_OCTET_STREAM)
public Response downloadFile(@PathParam("path") final String path) {
AlluxioURI uri = new AlluxioURI(path);
Map<String, Object> headers = new HashMap<>();
headers.put("Content-Disposition", "attachment; filename=" + uri.getName());
return RestUtils.call(new RestUtils.RestCallable<InputStream>() {
@Override
public InputStream call() throws Exception {
FileInStream is = mFileSystem.openFile(uri);
if (is != null) {
return is;
}
throw new IllegalArgumentException("stream does not exist");
}
}, ServerConfiguration.global(), headers);
}
use of alluxio.client.file.FileInStream in project alluxio by Alluxio.
the class MultiMount method main.
/**
* Entry point for the {@link MultiMount} program.
*
* @param args command-line arguments
*/
public static void main(String[] args) {
if (args.length != 1) {
System.err.println("Usage: ./bin/alluxio runClass alluxio.examples.MultiMount <HDFS_URL>");
System.exit(-1);
}
AlluxioConfiguration alluxioConf = new InstancedConfiguration(ConfigurationUtils.defaults());
AlluxioURI mntPath = new AlluxioURI("/mnt");
AlluxioURI s3Mount = new AlluxioURI("/mnt/s3");
AlluxioURI inputPath = new AlluxioURI("/mnt/s3/hello.txt");
AlluxioURI s3Path = new AlluxioURI("s3a://alluxio-demo/");
AlluxioURI hdfsMount = new AlluxioURI("/mnt/hdfs");
AlluxioURI outputPath = new AlluxioURI("/mnt/hdfs/hello.txt");
AlluxioURI hdfsPath = new AlluxioURI(args[0]);
FileSystem fileSystem = FileSystem.Factory.create(alluxioConf);
try {
// Make sure mount directory exists.
if (!fileSystem.exists(mntPath)) {
System.out.print("creating " + mntPath + " ... ");
fileSystem.createDirectory(mntPath);
System.out.println("done");
}
// Make sure the S3 mount point does not exist.
if (fileSystem.exists(s3Mount)) {
System.out.print("unmounting " + s3Mount + " ... ");
fileSystem.unmount(s3Mount);
System.out.println("done");
}
// Make sure the HDFS mount point does not exist.
if (fileSystem.exists(hdfsMount)) {
System.out.print("unmounting " + hdfsMount + " ... ");
fileSystem.unmount(hdfsMount);
System.out.println("done");
}
// Mount S3.
System.out.print("mounting " + s3Path + " to " + s3Mount + " ... ");
fileSystem.mount(s3Mount, s3Path);
System.out.println("done");
// Mount HDFS.
System.out.print("mounting " + hdfsPath + " to " + hdfsMount + " ... ");
fileSystem.mount(hdfsMount, hdfsPath);
System.out.println("done");
// Make sure output file does not exist.
if (fileSystem.exists(outputPath)) {
System.out.print("deleting " + outputPath + " ... ");
fileSystem.delete(outputPath);
System.out.println("done");
}
// Open the input stream.
System.out.print("opening " + inputPath + " ... ");
FileInStream is = fileSystem.openFile(inputPath);
System.out.println("done");
// Open the output stream, setting the write type to make sure result is persisted.
System.out.print("opening " + outputPath + " ... ");
CreateFilePOptions options = CreateFilePOptions.newBuilder().setWriteType(WritePType.CACHE_THROUGH).setRecursive(true).build();
FileOutStream os = fileSystem.createFile(outputPath, options);
System.out.println("done");
// Copy the data
System.out.print("transferring data from " + inputPath + " to " + outputPath + " ... ");
IOUtils.copy(is, os);
System.out.println("done");
// Close the input stream.
System.out.print("closing " + inputPath + " ... ");
is.close();
System.out.println("done");
// Close the output stream.
System.out.print("closing " + outputPath + " ... ");
os.close();
System.out.println("done");
} catch (Exception e) {
System.out.println("fail");
e.printStackTrace();
} finally {
// Make sure the S3 mount point is removed.
try {
if (fileSystem.exists(s3Mount)) {
System.out.print("unmounting " + s3Mount + " ... ");
fileSystem.unmount(s3Mount);
System.out.println("done");
}
} catch (Exception e) {
System.out.println("fail");
e.printStackTrace();
}
// Make sure the HDFS mount point is removed.
try {
if (fileSystem.exists(hdfsMount)) {
System.out.print("unmounting " + hdfsMount + " ... ");
fileSystem.unmount(hdfsMount);
System.out.println("done");
}
} catch (Exception e) {
System.out.println("fail");
e.printStackTrace();
}
}
}
use of alluxio.client.file.FileInStream in project alluxio by Alluxio.
the class MigrateDefinition method migrate.
/**
* @param command the migrate command to execute
* @param writeType the write type to use for the moved file
* @param fileSystem the Alluxio file system
* @param overwrite whether to overwrite destination
*/
private static void migrate(MigrateCommand command, WritePType writeType, FileSystem fileSystem, boolean overwrite) throws Exception {
String source = command.getSource();
String destination = command.getDestination();
LOG.debug("Migrating {} to {}", source, destination);
CreateFilePOptions createOptions = CreateFilePOptions.newBuilder().setWriteType(writeType).build();
OpenFilePOptions openFileOptions = OpenFilePOptions.newBuilder().setReadType(ReadPType.NO_CACHE).build();
final AlluxioURI destinationURI = new AlluxioURI(destination);
boolean retry;
do {
retry = false;
try (FileInStream in = fileSystem.openFile(new AlluxioURI(source), openFileOptions);
FileOutStream out = fileSystem.createFile(destinationURI, createOptions)) {
try {
IOUtils.copyLarge(in, out, new byte[8 * Constants.MB]);
} catch (Throwable t) {
try {
out.cancel();
} catch (Throwable t2) {
t.addSuppressed(t2);
}
throw t;
}
} catch (FileAlreadyExistsException e) {
if (overwrite) {
fileSystem.delete(destinationURI);
retry = true;
} else {
throw e;
}
}
} while (retry);
}
Aggregations