use of org.apache.hadoop.fs.FileChecksum in project hadoop by apache.
the class TestViewFsFileStatusHdfs method testGetFileChecksum.
@Test
public void testGetFileChecksum() throws IOException, URISyntaxException {
// Create two different files in HDFS
fileSystemTestHelper.createFile(fHdfs, someFile);
fileSystemTestHelper.createFile(fHdfs, fileSystemTestHelper.getTestRootPath(fHdfs, someFile + "other"), 1, 512);
// Get checksum through ViewFS
FileChecksum viewFSCheckSum = vfs.getFileChecksum(new Path("/vfstmp/someFileForTestGetFileChecksum"));
// Get checksum through HDFS.
FileChecksum hdfsCheckSum = fHdfs.getFileChecksum(new Path(someFile));
// Get checksum of different file in HDFS
FileChecksum otherHdfsFileCheckSum = fHdfs.getFileChecksum(new Path(someFile + "other"));
// Checksums of the same file (got through HDFS and ViewFS should be same)
assertEquals("HDFS and ViewFS checksums were not the same", viewFSCheckSum, hdfsCheckSum);
// Checksum of different files should be different.
assertFalse("Some other HDFS file which should not have had the same " + "checksum as viewFS did!", viewFSCheckSum.equals(otherHdfsFileCheckSum));
}
use of org.apache.hadoop.fs.FileChecksum in project hadoop by apache.
the class TestSnapshotFileLength method testSnapshotfileLength.
/**
* Test that we cannot read a file beyond its snapshot length
* when accessing it via a snapshot path.
*
*/
@Test(timeout = 300000)
public void testSnapshotfileLength() throws Exception {
hdfs.mkdirs(sub);
int bytesRead;
byte[] buffer = new byte[BLOCKSIZE * 8];
int origLen = BLOCKSIZE + 1;
int toAppend = BLOCKSIZE;
FSDataInputStream fis = null;
FileStatus fileStatus = null;
// Create and write a file.
Path file1 = new Path(sub, file1Name);
DFSTestUtil.createFile(hdfs, file1, BLOCKSIZE, 0, BLOCKSIZE, REPLICATION, SEED);
DFSTestUtil.appendFile(hdfs, file1, origLen);
// Create a snapshot on the parent directory.
hdfs.allowSnapshot(sub);
hdfs.createSnapshot(sub, snapshot1);
Path file1snap1 = SnapshotTestHelper.getSnapshotPath(sub, snapshot1, file1Name);
final FileChecksum snapChksum1 = hdfs.getFileChecksum(file1snap1);
assertThat("file and snapshot file checksums are not equal", hdfs.getFileChecksum(file1), is(snapChksum1));
// Append to the file.
FSDataOutputStream out = hdfs.append(file1);
// HDFS-8150:Fetching checksum for file under construction should fail
try {
hdfs.getFileChecksum(file1);
fail("getFileChecksum should fail for files " + "with blocks under construction");
} catch (IOException ie) {
assertTrue(ie.getMessage().contains("Fail to get checksum, since file " + file1 + " is under construction."));
}
assertThat("snapshot checksum (post-open for append) has changed", hdfs.getFileChecksum(file1snap1), is(snapChksum1));
try {
AppendTestUtil.write(out, 0, toAppend);
// Test reading from snapshot of file that is open for append
byte[] dataFromSnapshot = DFSTestUtil.readFileBuffer(hdfs, file1snap1);
assertThat("Wrong data size in snapshot.", dataFromSnapshot.length, is(origLen));
// Verify that checksum didn't change
assertThat("snapshot checksum (post-append) has changed", hdfs.getFileChecksum(file1snap1), is(snapChksum1));
} finally {
out.close();
}
assertThat("file and snapshot file checksums (post-close) are equal", hdfs.getFileChecksum(file1), not(snapChksum1));
assertThat("snapshot file checksum (post-close) has changed", hdfs.getFileChecksum(file1snap1), is(snapChksum1));
// Make sure we can read the entire file via its non-snapshot path.
fileStatus = hdfs.getFileStatus(file1);
assertThat(fileStatus.getLen(), is((long) origLen + toAppend));
fis = hdfs.open(file1);
bytesRead = fis.read(0, buffer, 0, buffer.length);
assertThat(bytesRead, is(origLen + toAppend));
fis.close();
// Try to open the file via its snapshot path.
fis = hdfs.open(file1snap1);
fileStatus = hdfs.getFileStatus(file1snap1);
assertThat(fileStatus.getLen(), is((long) origLen));
// Make sure we can only read up to the snapshot length.
bytesRead = fis.read(0, buffer, 0, buffer.length);
assertThat(bytesRead, is(origLen));
fis.close();
byte[] dataFromSnapshot = DFSTestUtil.readFileBuffer(hdfs, file1snap1);
assertThat("Wrong data size in snapshot.", dataFromSnapshot.length, is(origLen));
}
use of org.apache.hadoop.fs.FileChecksum in project hadoop by apache.
the class DistributedFileSystem method getFileChecksum.
@Override
public FileChecksum getFileChecksum(Path f, final long length) throws IOException {
statistics.incrementReadOps(1);
storageStatistics.incrementOpCounter(OpType.GET_FILE_CHECKSUM);
Path absF = fixRelativePart(f);
return new FileSystemLinkResolver<FileChecksum>() {
@Override
public FileChecksum doCall(final Path p) throws IOException {
return dfs.getFileChecksum(getPathName(p), length);
}
@Override
public FileChecksum next(final FileSystem fs, final Path p) throws IOException {
if (fs instanceof DistributedFileSystem) {
return fs.getFileChecksum(p, length);
} else {
throw new UnsupportedFileSystemException("getFileChecksum(Path, long) is not supported by " + fs.getClass().getSimpleName());
}
}
}.resolve(this, absF);
}
use of org.apache.hadoop.fs.FileChecksum in project hadoop by apache.
the class DistributedFileSystem method getFileChecksum.
@Override
public FileChecksum getFileChecksum(Path f) throws IOException {
statistics.incrementReadOps(1);
storageStatistics.incrementOpCounter(OpType.GET_FILE_CHECKSUM);
Path absF = fixRelativePart(f);
return new FileSystemLinkResolver<FileChecksum>() {
@Override
public FileChecksum doCall(final Path p) throws IOException {
return dfs.getFileChecksum(getPathName(p), Long.MAX_VALUE);
}
@Override
public FileChecksum next(final FileSystem fs, final Path p) throws IOException {
return fs.getFileChecksum(p);
}
}.resolve(this, absF);
}
use of org.apache.hadoop.fs.FileChecksum in project hadoop by apache.
the class TestEncryptedTransfer method testEncryptedRead.
private void testEncryptedRead(String algorithm, String cipherSuite, boolean matchLog, boolean readAfterRestart) throws IOException {
// set encryption algorithm and cipher suites, but don't enable transfer
// encryption yet.
conf.set(DFSConfigKeys.DFS_DATA_ENCRYPTION_ALGORITHM_KEY, algorithm);
conf.set(HdfsClientConfigKeys.DFS_ENCRYPT_DATA_TRANSFER_CIPHER_SUITES_KEY, cipherSuite);
FileChecksum checksum = writeUnencryptedAndThenRestartEncryptedCluster();
LogCapturer logs = GenericTestUtils.LogCapturer.captureLogs(LogFactory.getLog(SaslDataTransferServer.class));
LogCapturer logs1 = GenericTestUtils.LogCapturer.captureLogs(LogFactory.getLog(DataTransferSaslUtil.class));
try {
assertEquals(PLAIN_TEXT, DFSTestUtil.readFile(fs, TEST_PATH));
assertEquals(checksum, fs.getFileChecksum(TEST_PATH));
} finally {
logs.stopCapturing();
logs1.stopCapturing();
}
if (resolverClazz == null) {
if (matchLog) {
// Test client and server negotiate cipher option
GenericTestUtils.assertMatches(logs.getOutput(), "Server using cipher suite");
// Check the IOStreamPair
GenericTestUtils.assertMatches(logs1.getOutput(), "Creating IOStreamPair of CryptoInputStream and CryptoOutputStream.");
} else {
// Test client and server negotiate cipher option
GenericTestUtils.assertDoesNotMatch(logs.getOutput(), "Server using cipher suite");
// Check the IOStreamPair
GenericTestUtils.assertDoesNotMatch(logs1.getOutput(), "Creating IOStreamPair of CryptoInputStream and CryptoOutputStream.");
}
}
if (readAfterRestart) {
cluster.restartNameNode();
fs = getFileSystem(conf);
assertEquals(PLAIN_TEXT, DFSTestUtil.readFile(fs, TEST_PATH));
assertEquals(checksum, fs.getFileChecksum(TEST_PATH));
}
}
Aggregations