use of org.apache.hadoop.fs.FileChecksum in project cdap by caskdata.
the class ReplicationStatusTool method getClusterChecksumMap.
private static SortedMap<String, String> getClusterChecksumMap() throws IOException {
FileSystem fileSystem = FileSystem.get(hConf);
List<String> fileList = addAllFiles(fileSystem);
SortedMap<String, String> checksumMap = new TreeMap<String, String>();
for (String file : fileList) {
FileChecksum fileChecksum = fileSystem.getFileChecksum(new Path(file));
checksumMap.put(normalizedFileName(file), fileChecksum.toString());
}
LOG.info("Added " + checksumMap.size() + " checksums for snapshot files.");
return checksumMap;
}
use of org.apache.hadoop.fs.FileChecksum in project hadoop by apache.
the class BaseTestHttpFSWith method testChecksum.
private void testChecksum() throws Exception {
if (!isLocalFS()) {
FileSystem fs = FileSystem.get(getProxiedFSConf());
fs.mkdirs(getProxiedFSTestDir());
Path path = new Path(getProxiedFSTestDir(), "foo.txt");
OutputStream os = fs.create(path);
os.write(1);
os.close();
FileChecksum hdfsChecksum = fs.getFileChecksum(path);
fs.close();
fs = getHttpFSFileSystem();
FileChecksum httpChecksum = fs.getFileChecksum(path);
fs.close();
assertEquals(httpChecksum.getAlgorithmName(), hdfsChecksum.getAlgorithmName());
assertEquals(httpChecksum.getLength(), hdfsChecksum.getLength());
assertArrayEquals(httpChecksum.getBytes(), hdfsChecksum.getBytes());
}
}
use of org.apache.hadoop.fs.FileChecksum in project hadoop by apache.
the class TestViewFileSystemHdfs method testFileChecksum.
@Test
public void testFileChecksum() throws IOException {
ViewFileSystem viewFs = (ViewFileSystem) fsView;
Path mountDataRootPath = new Path("/data");
String fsTargetFileName = "debug.log";
Path fsTargetFilePath = new Path(targetTestRoot, "data/debug.log");
Path mountDataFilePath = new Path(mountDataRootPath, fsTargetFileName);
fileSystemTestHelper.createFile(fsTarget, fsTargetFilePath);
FileStatus fileStatus = viewFs.getFileStatus(mountDataFilePath);
long fileLength = fileStatus.getLen();
FileChecksum fileChecksumViaViewFs = viewFs.getFileChecksum(mountDataFilePath);
FileChecksum fileChecksumViaTargetFs = fsTarget.getFileChecksum(fsTargetFilePath);
Assert.assertTrue("File checksum not matching!", fileChecksumViaViewFs.equals(fileChecksumViaTargetFs));
fileChecksumViaViewFs = viewFs.getFileChecksum(mountDataFilePath, fileLength / 2);
fileChecksumViaTargetFs = fsTarget.getFileChecksum(fsTargetFilePath, fileLength / 2);
Assert.assertTrue("File checksum not matching!", fileChecksumViaViewFs.equals(fileChecksumViaTargetFs));
}
use of org.apache.hadoop.fs.FileChecksum in project hadoop by apache.
the class TestDFSClientRetries method testGetFileChecksum.
@Test
public void testGetFileChecksum() throws Exception {
final String f = "/testGetFileChecksum";
final Path p = new Path(f);
final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
try {
cluster.waitActive();
//create a file
final FileSystem fs = cluster.getFileSystem();
DFSTestUtil.createFile(fs, p, 1L << 20, (short) 3, 20100402L);
//get checksum
final FileChecksum cs1 = fs.getFileChecksum(p);
assertTrue(cs1 != null);
//stop the first datanode
final List<LocatedBlock> locatedblocks = DFSClient.callGetBlockLocations(cluster.getNameNodeRpc(), f, 0, Long.MAX_VALUE).getLocatedBlocks();
final DatanodeInfo first = locatedblocks.get(0).getLocations()[0];
cluster.stopDataNode(first.getXferAddr());
//get checksum again
final FileChecksum cs2 = fs.getFileChecksum(p);
assertEquals(cs1, cs2);
} finally {
cluster.shutdown();
}
}
use of org.apache.hadoop.fs.FileChecksum in project hadoop by apache.
the class TestEncryptedTransfer method testLongLivedReadClientAfterRestart.
@Test
public void testLongLivedReadClientAfterRestart() throws IOException {
FileChecksum checksum = writeUnencryptedAndThenRestartEncryptedCluster();
assertEquals(PLAIN_TEXT, DFSTestUtil.readFile(fs, TEST_PATH));
assertEquals(checksum, fs.getFileChecksum(TEST_PATH));
// Restart the NN and DN, after which the client's encryption key will no
// longer be valid.
cluster.restartNameNode();
assertTrue(cluster.restartDataNode(0));
assertEquals(PLAIN_TEXT, DFSTestUtil.readFile(fs, TEST_PATH));
assertEquals(checksum, fs.getFileChecksum(TEST_PATH));
}
Aggregations