use of org.apache.hadoop.fs.s3a.S3AFileSystem in project hadoop by apache.
the class AbstractSTestS3AHugeFiles method test_040_PositionedReadHugeFile.
@Test
public void test_040_PositionedReadHugeFile() throws Throwable {
assumeHugeFileExists();
final String encryption = getConf().getTrimmed(SERVER_SIDE_ENCRYPTION_ALGORITHM);
boolean encrypted = encryption != null;
if (encrypted) {
LOG.info("File is encrypted with algorithm {}", encryption);
}
String filetype = encrypted ? "encrypted file" : "file";
describe("Positioned reads of %s %s", filetype, hugefile);
S3AFileSystem fs = getFileSystem();
S3AFileStatus status = fs.getFileStatus(hugefile);
long filesize = status.getLen();
int ops = 0;
final int bufferSize = 8192;
byte[] buffer = new byte[bufferSize];
long eof = filesize - 1;
ContractTestUtils.NanoTimer timer = new ContractTestUtils.NanoTimer();
ContractTestUtils.NanoTimer readAtByte0, readAtByte0Again, readAtEOF;
try (FSDataInputStream in = fs.open(hugefile, uploadBlockSize)) {
readAtByte0 = new ContractTestUtils.NanoTimer();
in.readFully(0, buffer);
readAtByte0.end("time to read data at start of file");
ops++;
readAtEOF = new ContractTestUtils.NanoTimer();
in.readFully(eof - bufferSize, buffer);
readAtEOF.end("time to read data at end of file");
ops++;
readAtByte0Again = new ContractTestUtils.NanoTimer();
in.readFully(0, buffer);
readAtByte0Again.end("time to read data at start of file again");
ops++;
LOG.info("Final stream state: {}", in);
}
long mb = Math.max(filesize / _1MB, 1);
logFSState();
timer.end("time to performed positioned reads of %s of %d MB ", filetype, mb);
LOG.info("Time per positioned read = {} nS", toHuman(timer.nanosPerOperation(ops)));
}
use of org.apache.hadoop.fs.s3a.S3AFileSystem in project hadoop by apache.
the class AbstractSTestS3AHugeFiles method test_050_readHugeFile.
@Test
public void test_050_readHugeFile() throws Throwable {
assumeHugeFileExists();
describe("Reading %s", hugefile);
S3AFileSystem fs = getFileSystem();
S3AFileStatus status = fs.getFileStatus(hugefile);
long filesize = status.getLen();
long blocks = filesize / uploadBlockSize;
byte[] data = new byte[uploadBlockSize];
ContractTestUtils.NanoTimer timer = new ContractTestUtils.NanoTimer();
try (FSDataInputStream in = fs.open(hugefile, uploadBlockSize)) {
for (long block = 0; block < blocks; block++) {
in.readFully(data);
}
LOG.info("Final stream state: {}", in);
}
long mb = Math.max(filesize / _1MB, 1);
timer.end("time to read file of %d MB ", mb);
LOG.info("Time per MB to read = {} nS", toHuman(timer.nanosPerOperation(mb)));
bandwidth(timer, filesize);
logFSState();
}
use of org.apache.hadoop.fs.s3a.S3AFileSystem in project hadoop by apache.
the class AbstractSTestS3AHugeFiles method test_100_renameHugeFile.
@Test
public void test_100_renameHugeFile() throws Throwable {
assumeHugeFileExists();
describe("renaming %s to %s", hugefile, hugefileRenamed);
S3AFileSystem fs = getFileSystem();
S3AFileStatus status = fs.getFileStatus(hugefile);
long filesize = status.getLen();
fs.delete(hugefileRenamed, false);
ContractTestUtils.NanoTimer timer = new ContractTestUtils.NanoTimer();
fs.rename(hugefile, hugefileRenamed);
long mb = Math.max(filesize / _1MB, 1);
timer.end("time to rename file of %d MB", mb);
LOG.info("Time per MB to rename = {} nS", toHuman(timer.nanosPerOperation(mb)));
bandwidth(timer, filesize);
logFSState();
S3AFileStatus destFileStatus = fs.getFileStatus(hugefileRenamed);
assertEquals(filesize, destFileStatus.getLen());
// rename back
ContractTestUtils.NanoTimer timer2 = new ContractTestUtils.NanoTimer();
fs.rename(hugefileRenamed, hugefile);
timer2.end("Renaming back");
LOG.info("Time per MB to rename = {} nS", toHuman(timer2.nanosPerOperation(mb)));
bandwidth(timer2, filesize);
}
use of org.apache.hadoop.fs.s3a.S3AFileSystem in project hadoop by apache.
the class ITestS3AConcurrentOps method getRestrictedFileSystem.
private S3AFileSystem getRestrictedFileSystem() throws Exception {
Configuration conf = getConfiguration();
conf.setInt(MAX_THREADS, 2);
conf.setInt(MAX_TOTAL_TASKS, 1);
conf.set(MIN_MULTIPART_THRESHOLD, "10M");
conf.set(MULTIPART_SIZE, "5M");
S3AFileSystem s3a = getFileSystem();
URI rootURI = new URI(conf.get(TEST_FS_S3A_NAME));
s3a.initialize(rootURI, conf);
return s3a;
}
use of org.apache.hadoop.fs.s3a.S3AFileSystem in project hadoop by apache.
the class ITestS3AConcurrentOps method getNormalFileSystem.
private S3AFileSystem getNormalFileSystem() throws Exception {
S3AFileSystem s3a = new S3AFileSystem();
Configuration conf = new Configuration();
URI rootURI = new URI(conf.get(TEST_FS_S3A_NAME));
s3a.initialize(rootURI, conf);
return s3a;
}
Aggregations