use of org.apache.hadoop.fs.FileChecksum in project hadoop by apache.
the class TestEncryptedTransfer method testLongLivedClient.
@Test
public void testLongLivedClient() throws IOException, InterruptedException {
FileChecksum checksum = writeUnencryptedAndThenRestartEncryptedCluster();
BlockTokenSecretManager btsm = cluster.getNamesystem().getBlockManager().getBlockTokenSecretManager();
btsm.setKeyUpdateIntervalForTesting(2 * 1000);
btsm.setTokenLifetime(2 * 1000);
btsm.clearAllKeysForTesting();
assertEquals(PLAIN_TEXT, DFSTestUtil.readFile(fs, TEST_PATH));
assertEquals(checksum, fs.getFileChecksum(TEST_PATH));
// Sleep for 15 seconds, after which the encryption key will no longer be
// valid. It needs to be a few multiples of the block token lifetime,
// since several block tokens are valid at any given time (the current
// and the last two, by default.)
LOG.info("Sleeping so that encryption keys expire...");
Thread.sleep(15 * 1000);
LOG.info("Done sleeping.");
assertEquals(PLAIN_TEXT, DFSTestUtil.readFile(fs, TEST_PATH));
assertEquals(checksum, fs.getFileChecksum(TEST_PATH));
}
use of org.apache.hadoop.fs.FileChecksum in project hadoop by apache.
the class TestDecommissionWithStriped method testFileChecksumAfterDecommission.
/**
* Tests to verify that the file checksum should be able to compute after the
* decommission operation.
*
* Below is the block indices list after the decommission. ' represents
* decommissioned node index.
*
* 0, 2, 3, 4, 5, 6, 7, 8, 1, 1'
*
* Here, this list contains duplicated blocks and does not maintaining any
* order.
*/
@Test(timeout = 120000)
public void testFileChecksumAfterDecommission() throws Exception {
LOG.info("Starting test testFileChecksumAfterDecommission");
final Path ecFile = new Path(ecDir, "testFileChecksumAfterDecommission");
int writeBytes = cellSize * dataBlocks;
writeStripedFile(dfs, ecFile, writeBytes);
Assert.assertEquals(0, bm.numOfUnderReplicatedBlocks());
FileChecksum fileChecksum1 = dfs.getFileChecksum(ecFile, writeBytes);
final List<DatanodeInfo> decommisionNodes = new ArrayList<DatanodeInfo>();
LocatedBlock lb = dfs.getClient().getLocatedBlocks(ecFile.toString(), 0).get(0);
DatanodeInfo[] dnLocs = lb.getLocations();
assertEquals(dataBlocks + parityBlocks, dnLocs.length);
int decommNodeIndex = 1;
// add the node which will be decommissioning
decommisionNodes.add(dnLocs[decommNodeIndex]);
decommissionNode(0, decommisionNodes, AdminStates.DECOMMISSIONED);
assertEquals(decommisionNodes.size(), fsn.getNumDecomLiveDataNodes());
assertNull(checkFile(dfs, ecFile, 9, decommisionNodes, numDNs));
StripedFileTestUtil.checkData(dfs, ecFile, writeBytes, decommisionNodes, null, blockGroupSize);
// verify checksum
FileChecksum fileChecksum2 = dfs.getFileChecksum(ecFile, writeBytes);
LOG.info("fileChecksum1:" + fileChecksum1);
LOG.info("fileChecksum2:" + fileChecksum2);
Assert.assertTrue("Checksum mismatches!", fileChecksum1.equals(fileChecksum2));
}
use of org.apache.hadoop.fs.FileChecksum in project hadoop by apache.
the class TestDistributedFileSystem method testFileChecksum.
@Test
public void testFileChecksum() throws Exception {
final long seed = RAN.nextLong();
System.out.println("seed=" + seed);
RAN.setSeed(seed);
final Configuration conf = getTestConfiguration();
final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
final FileSystem hdfs = cluster.getFileSystem();
final String nnAddr = conf.get(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY);
final UserGroupInformation current = UserGroupInformation.getCurrentUser();
final UserGroupInformation ugi = UserGroupInformation.createUserForTesting(current.getShortUserName() + "x", new String[] { "user" });
try {
hdfs.getFileChecksum(new Path("/test/TestNonExistingFile"));
fail("Expecting FileNotFoundException");
} catch (FileNotFoundException e) {
assertTrue("Not throwing the intended exception message", e.getMessage().contains("File does not exist: /test/TestNonExistingFile"));
}
try {
Path path = new Path("/test/TestExistingDir/");
hdfs.mkdirs(path);
hdfs.getFileChecksum(path);
fail("Expecting FileNotFoundException");
} catch (FileNotFoundException e) {
assertTrue("Not throwing the intended exception message", e.getMessage().contains("Path is not a file: /test/TestExistingDir"));
}
//webhdfs
final String webhdfsuri = WebHdfsConstants.WEBHDFS_SCHEME + "://" + nnAddr;
System.out.println("webhdfsuri=" + webhdfsuri);
final FileSystem webhdfs = ugi.doAs(new PrivilegedExceptionAction<FileSystem>() {
@Override
public FileSystem run() throws Exception {
return new Path(webhdfsuri).getFileSystem(conf);
}
});
final Path dir = new Path("/filechecksum");
final int block_size = 1024;
final int buffer_size = conf.getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096);
conf.setInt(HdfsClientConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, 512);
//try different number of blocks
for (int n = 0; n < 5; n++) {
//generate random data
final byte[] data = new byte[RAN.nextInt(block_size / 2 - 1) + n * block_size + 1];
RAN.nextBytes(data);
System.out.println("data.length=" + data.length);
//write data to a file
final Path foo = new Path(dir, "foo" + n);
{
final FSDataOutputStream out = hdfs.create(foo, false, buffer_size, (short) 2, block_size);
out.write(data);
out.close();
}
//compute checksum
final FileChecksum hdfsfoocs = hdfs.getFileChecksum(foo);
System.out.println("hdfsfoocs=" + hdfsfoocs);
//webhdfs
final FileChecksum webhdfsfoocs = webhdfs.getFileChecksum(foo);
System.out.println("webhdfsfoocs=" + webhdfsfoocs);
final Path webhdfsqualified = new Path(webhdfsuri + dir, "foo" + n);
final FileChecksum webhdfs_qfoocs = webhdfs.getFileChecksum(webhdfsqualified);
System.out.println("webhdfs_qfoocs=" + webhdfs_qfoocs);
//create a zero byte file
final Path zeroByteFile = new Path(dir, "zeroByteFile" + n);
{
final FSDataOutputStream out = hdfs.create(zeroByteFile, false, buffer_size, (short) 2, block_size);
out.close();
}
//write another file
final Path bar = new Path(dir, "bar" + n);
{
final FSDataOutputStream out = hdfs.create(bar, false, buffer_size, (short) 2, block_size);
out.write(data);
out.close();
}
{
final FileChecksum zeroChecksum = hdfs.getFileChecksum(zeroByteFile);
final String magicValue = "MD5-of-0MD5-of-0CRC32:70bc8f4b72a86921468bf8e8441dce51";
// verify the magic val for zero byte files
assertEquals(magicValue, zeroChecksum.toString());
//verify checksums for empty file and 0 request length
final FileChecksum checksumWith0 = hdfs.getFileChecksum(bar, 0);
assertEquals(zeroChecksum, checksumWith0);
//verify checksum
final FileChecksum barcs = hdfs.getFileChecksum(bar);
final int barhashcode = barcs.hashCode();
assertEquals(hdfsfoocs.hashCode(), barhashcode);
assertEquals(hdfsfoocs, barcs);
//webhdfs
assertEquals(webhdfsfoocs.hashCode(), barhashcode);
assertEquals(webhdfsfoocs, barcs);
assertEquals(webhdfs_qfoocs.hashCode(), barhashcode);
assertEquals(webhdfs_qfoocs, barcs);
}
hdfs.setPermission(dir, new FsPermission((short) 0));
{
//test permission error on webhdfs
try {
webhdfs.getFileChecksum(webhdfsqualified);
fail();
} catch (IOException ioe) {
FileSystem.LOG.info("GOOD: getting an exception", ioe);
}
}
hdfs.setPermission(dir, new FsPermission((short) 0777));
}
cluster.shutdown();
}
use of org.apache.hadoop.fs.FileChecksum in project hadoop by apache.
the class TestFileChecksum method testStripedFileChecksum.
private void testStripedFileChecksum(int range1, int range2) throws Exception {
FileChecksum stripedFileChecksum1 = getFileChecksum(stripedFile1, range1, false);
FileChecksum stripedFileChecksum2 = getFileChecksum(stripedFile2, range1, false);
FileChecksum stripedFileChecksum3 = getFileChecksum(stripedFile2, range2, false);
LOG.info("stripedFileChecksum1:" + stripedFileChecksum1);
LOG.info("stripedFileChecksum2:" + stripedFileChecksum2);
LOG.info("stripedFileChecksum3:" + stripedFileChecksum3);
Assert.assertTrue(stripedFileChecksum1.equals(stripedFileChecksum2));
if (range1 >= 0 && range1 != range2) {
Assert.assertFalse(stripedFileChecksum1.equals(stripedFileChecksum3));
}
}
use of org.apache.hadoop.fs.FileChecksum in project hadoop by apache.
the class TestFileChecksum method testStripedFileChecksumWithMissedDataBlocks2.
@Test(timeout = 90000)
public void testStripedFileChecksumWithMissedDataBlocks2() throws Exception {
prepareTestFiles(fileSize, new String[] { stripedFile1, stripedFile2 });
FileChecksum stripedFileChecksum1 = getFileChecksum(stripedFile1, -1, false);
FileChecksum stripedFileChecksum2 = getFileChecksum(stripedFile2, -1, false);
FileChecksum stripedFileChecksum2Recon = getFileChecksum(stripedFile2, -1, true);
LOG.info("stripedFileChecksum1:" + stripedFileChecksum1);
LOG.info("stripedFileChecksum2:" + stripedFileChecksum1);
LOG.info("stripedFileChecksum2Recon:" + stripedFileChecksum2Recon);
Assert.assertTrue("Checksum mismatches!", stripedFileChecksum1.equals(stripedFileChecksum2));
Assert.assertTrue("Checksum mismatches!", stripedFileChecksum1.equals(stripedFileChecksum2Recon));
Assert.assertTrue("Checksum mismatches!", stripedFileChecksum2.equals(stripedFileChecksum2Recon));
}
Aggregations