Search in sources :

Example 6 with FileChecksum

use of org.apache.hadoop.fs.FileChecksum in project hadoop by apache.

the class TestViewFsFileStatusHdfs method testGetFileChecksum.

@Test
public void testGetFileChecksum() throws IOException, URISyntaxException {
    // Create two different files in HDFS
    fileSystemTestHelper.createFile(fHdfs, someFile);
    fileSystemTestHelper.createFile(fHdfs, fileSystemTestHelper.getTestRootPath(fHdfs, someFile + "other"), 1, 512);
    // Get checksum through ViewFS
    FileChecksum viewFSCheckSum = vfs.getFileChecksum(new Path("/vfstmp/someFileForTestGetFileChecksum"));
    // Get checksum through HDFS. 
    FileChecksum hdfsCheckSum = fHdfs.getFileChecksum(new Path(someFile));
    // Get checksum of different file in HDFS
    FileChecksum otherHdfsFileCheckSum = fHdfs.getFileChecksum(new Path(someFile + "other"));
    // Checksums of the same file (got through HDFS and ViewFS should be same)
    assertEquals("HDFS and ViewFS checksums were not the same", viewFSCheckSum, hdfsCheckSum);
    // Checksum of different files should be different.
    assertFalse("Some other HDFS file which should not have had the same " + "checksum as viewFS did!", viewFSCheckSum.equals(otherHdfsFileCheckSum));
}
Also used : Path(org.apache.hadoop.fs.Path) FileChecksum(org.apache.hadoop.fs.FileChecksum) Test(org.junit.Test)

Example 7 with FileChecksum

use of org.apache.hadoop.fs.FileChecksum in project hadoop by apache.

the class TestSnapshotFileLength method testSnapshotfileLength.

/**
   * Test that we cannot read a file beyond its snapshot length
   * when accessing it via a snapshot path.
   *
   */
@Test(timeout = 300000)
public void testSnapshotfileLength() throws Exception {
    hdfs.mkdirs(sub);
    int bytesRead;
    byte[] buffer = new byte[BLOCKSIZE * 8];
    int origLen = BLOCKSIZE + 1;
    int toAppend = BLOCKSIZE;
    FSDataInputStream fis = null;
    FileStatus fileStatus = null;
    // Create and write a file.
    Path file1 = new Path(sub, file1Name);
    DFSTestUtil.createFile(hdfs, file1, BLOCKSIZE, 0, BLOCKSIZE, REPLICATION, SEED);
    DFSTestUtil.appendFile(hdfs, file1, origLen);
    // Create a snapshot on the parent directory.
    hdfs.allowSnapshot(sub);
    hdfs.createSnapshot(sub, snapshot1);
    Path file1snap1 = SnapshotTestHelper.getSnapshotPath(sub, snapshot1, file1Name);
    final FileChecksum snapChksum1 = hdfs.getFileChecksum(file1snap1);
    assertThat("file and snapshot file checksums are not equal", hdfs.getFileChecksum(file1), is(snapChksum1));
    // Append to the file.
    FSDataOutputStream out = hdfs.append(file1);
    // HDFS-8150:Fetching checksum for file under construction should fail
    try {
        hdfs.getFileChecksum(file1);
        fail("getFileChecksum should fail for files " + "with blocks under construction");
    } catch (IOException ie) {
        assertTrue(ie.getMessage().contains("Fail to get checksum, since file " + file1 + " is under construction."));
    }
    assertThat("snapshot checksum (post-open for append) has changed", hdfs.getFileChecksum(file1snap1), is(snapChksum1));
    try {
        AppendTestUtil.write(out, 0, toAppend);
        // Test reading from snapshot of file that is open for append
        byte[] dataFromSnapshot = DFSTestUtil.readFileBuffer(hdfs, file1snap1);
        assertThat("Wrong data size in snapshot.", dataFromSnapshot.length, is(origLen));
        // Verify that checksum didn't change
        assertThat("snapshot checksum (post-append) has changed", hdfs.getFileChecksum(file1snap1), is(snapChksum1));
    } finally {
        out.close();
    }
    assertThat("file and snapshot file checksums (post-close) are equal", hdfs.getFileChecksum(file1), not(snapChksum1));
    assertThat("snapshot file checksum (post-close) has changed", hdfs.getFileChecksum(file1snap1), is(snapChksum1));
    // Make sure we can read the entire file via its non-snapshot path.
    fileStatus = hdfs.getFileStatus(file1);
    assertThat(fileStatus.getLen(), is((long) origLen + toAppend));
    fis = hdfs.open(file1);
    bytesRead = fis.read(0, buffer, 0, buffer.length);
    assertThat(bytesRead, is(origLen + toAppend));
    fis.close();
    // Try to open the file via its snapshot path.
    fis = hdfs.open(file1snap1);
    fileStatus = hdfs.getFileStatus(file1snap1);
    assertThat(fileStatus.getLen(), is((long) origLen));
    // Make sure we can only read up to the snapshot length.
    bytesRead = fis.read(0, buffer, 0, buffer.length);
    assertThat(bytesRead, is(origLen));
    fis.close();
    byte[] dataFromSnapshot = DFSTestUtil.readFileBuffer(hdfs, file1snap1);
    assertThat("Wrong data size in snapshot.", dataFromSnapshot.length, is(origLen));
}
Also used : Path(org.apache.hadoop.fs.Path) FileStatus(org.apache.hadoop.fs.FileStatus) FSDataInputStream(org.apache.hadoop.fs.FSDataInputStream) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) IOException(java.io.IOException) FileChecksum(org.apache.hadoop.fs.FileChecksum) Test(org.junit.Test)

Example 8 with FileChecksum

use of org.apache.hadoop.fs.FileChecksum in project hadoop by apache.

the class DistributedFileSystem method getFileChecksum.

@Override
public FileChecksum getFileChecksum(Path f, final long length) throws IOException {
    statistics.incrementReadOps(1);
    storageStatistics.incrementOpCounter(OpType.GET_FILE_CHECKSUM);
    Path absF = fixRelativePart(f);
    return new FileSystemLinkResolver<FileChecksum>() {

        @Override
        public FileChecksum doCall(final Path p) throws IOException {
            return dfs.getFileChecksum(getPathName(p), length);
        }

        @Override
        public FileChecksum next(final FileSystem fs, final Path p) throws IOException {
            if (fs instanceof DistributedFileSystem) {
                return fs.getFileChecksum(p, length);
            } else {
                throw new UnsupportedFileSystemException("getFileChecksum(Path, long) is not supported by " + fs.getClass().getSimpleName());
            }
        }
    }.resolve(this, absF);
}
Also used : Path(org.apache.hadoop.fs.Path) FileSystem(org.apache.hadoop.fs.FileSystem) UnsupportedFileSystemException(org.apache.hadoop.fs.UnsupportedFileSystemException) IOException(java.io.IOException) FileChecksum(org.apache.hadoop.fs.FileChecksum)

Example 9 with FileChecksum

use of org.apache.hadoop.fs.FileChecksum in project hadoop by apache.

the class DistributedFileSystem method getFileChecksum.

@Override
public FileChecksum getFileChecksum(Path f) throws IOException {
    statistics.incrementReadOps(1);
    storageStatistics.incrementOpCounter(OpType.GET_FILE_CHECKSUM);
    Path absF = fixRelativePart(f);
    return new FileSystemLinkResolver<FileChecksum>() {

        @Override
        public FileChecksum doCall(final Path p) throws IOException {
            return dfs.getFileChecksum(getPathName(p), Long.MAX_VALUE);
        }

        @Override
        public FileChecksum next(final FileSystem fs, final Path p) throws IOException {
            return fs.getFileChecksum(p);
        }
    }.resolve(this, absF);
}
Also used : Path(org.apache.hadoop.fs.Path) FileSystem(org.apache.hadoop.fs.FileSystem) IOException(java.io.IOException) FileChecksum(org.apache.hadoop.fs.FileChecksum)

Example 10 with FileChecksum

use of org.apache.hadoop.fs.FileChecksum in project hadoop by apache.

the class TestEncryptedTransfer method testEncryptedRead.

private void testEncryptedRead(String algorithm, String cipherSuite, boolean matchLog, boolean readAfterRestart) throws IOException {
    // set encryption algorithm and cipher suites, but don't enable transfer
    // encryption yet.
    conf.set(DFSConfigKeys.DFS_DATA_ENCRYPTION_ALGORITHM_KEY, algorithm);
    conf.set(HdfsClientConfigKeys.DFS_ENCRYPT_DATA_TRANSFER_CIPHER_SUITES_KEY, cipherSuite);
    FileChecksum checksum = writeUnencryptedAndThenRestartEncryptedCluster();
    LogCapturer logs = GenericTestUtils.LogCapturer.captureLogs(LogFactory.getLog(SaslDataTransferServer.class));
    LogCapturer logs1 = GenericTestUtils.LogCapturer.captureLogs(LogFactory.getLog(DataTransferSaslUtil.class));
    try {
        assertEquals(PLAIN_TEXT, DFSTestUtil.readFile(fs, TEST_PATH));
        assertEquals(checksum, fs.getFileChecksum(TEST_PATH));
    } finally {
        logs.stopCapturing();
        logs1.stopCapturing();
    }
    if (resolverClazz == null) {
        if (matchLog) {
            // Test client and server negotiate cipher option
            GenericTestUtils.assertMatches(logs.getOutput(), "Server using cipher suite");
            // Check the IOStreamPair
            GenericTestUtils.assertMatches(logs1.getOutput(), "Creating IOStreamPair of CryptoInputStream and CryptoOutputStream.");
        } else {
            // Test client and server negotiate cipher option
            GenericTestUtils.assertDoesNotMatch(logs.getOutput(), "Server using cipher suite");
            // Check the IOStreamPair
            GenericTestUtils.assertDoesNotMatch(logs1.getOutput(), "Creating IOStreamPair of CryptoInputStream and CryptoOutputStream.");
        }
    }
    if (readAfterRestart) {
        cluster.restartNameNode();
        fs = getFileSystem(conf);
        assertEquals(PLAIN_TEXT, DFSTestUtil.readFile(fs, TEST_PATH));
        assertEquals(checksum, fs.getFileChecksum(TEST_PATH));
    }
}
Also used : SaslDataTransferServer(org.apache.hadoop.hdfs.protocol.datatransfer.sasl.SaslDataTransferServer) LogCapturer(org.apache.hadoop.test.GenericTestUtils.LogCapturer) FileChecksum(org.apache.hadoop.fs.FileChecksum) DataTransferSaslUtil(org.apache.hadoop.hdfs.protocol.datatransfer.sasl.DataTransferSaslUtil)

Aggregations

FileChecksum (org.apache.hadoop.fs.FileChecksum)26 Path (org.apache.hadoop.fs.Path)13 Test (org.junit.Test)11 FileSystem (org.apache.hadoop.fs.FileSystem)8 IOException (java.io.IOException)6 ArrayList (java.util.ArrayList)2 Configuration (org.apache.hadoop.conf.Configuration)2 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)2 FileStatus (org.apache.hadoop.fs.FileStatus)2 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)2 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)2 DataInput (java.io.DataInput)1 DataOutput (java.io.DataOutput)1 FileNotFoundException (java.io.FileNotFoundException)1 FileOutputStream (java.io.FileOutputStream)1 OutputStream (java.io.OutputStream)1 HttpURLConnection (java.net.HttpURLConnection)1 SocketTimeoutException (java.net.SocketTimeoutException)1 HashMap (java.util.HashMap)1 TreeMap (java.util.TreeMap)1