use of org.apache.hadoop.tools.CopyListingFileStatus in project hadoop by apache.
the class TestCopyMapper method testFailCopyWithAccessControlException.
@Test(timeout = 40000)
public void testFailCopyWithAccessControlException() {
try {
deleteState();
createSourceData();
UserGroupInformation tmpUser = UserGroupInformation.createRemoteUser("guest");
final CopyMapper copyMapper = new CopyMapper();
final StubContext stubContext = tmpUser.doAs(new PrivilegedAction<StubContext>() {
@Override
public StubContext run() {
try {
return new StubContext(getConfiguration(), null, 0);
} catch (Exception e) {
LOG.error("Exception encountered ", e);
throw new RuntimeException(e);
}
}
});
EnumSet<DistCpOptions.FileAttribute> preserveStatus = EnumSet.allOf(DistCpOptions.FileAttribute.class);
preserveStatus.remove(DistCpOptions.FileAttribute.ACL);
preserveStatus.remove(DistCpOptions.FileAttribute.XATTR);
final Mapper<Text, CopyListingFileStatus, Text, Text>.Context<Text, CopyListingFileStatus, Text, Text> context = stubContext.getContext();
context.getConfiguration().set(DistCpConstants.CONF_LABEL_PRESERVE_STATUS, DistCpUtils.packAttributes(preserveStatus));
touchFile(SOURCE_PATH + "/src/file");
OutputStream out = cluster.getFileSystem().create(new Path(TARGET_PATH + "/src/file"));
out.write("hello world".getBytes());
out.close();
cluster.getFileSystem().setPermission(new Path(SOURCE_PATH + "/src/file"), new FsPermission(FsAction.READ, FsAction.READ, FsAction.READ));
cluster.getFileSystem().setPermission(new Path(TARGET_PATH + "/src/file"), new FsPermission(FsAction.READ, FsAction.READ, FsAction.READ));
final FileSystem tmpFS = tmpUser.doAs(new PrivilegedAction<FileSystem>() {
@Override
public FileSystem run() {
try {
return FileSystem.get(configuration);
} catch (IOException e) {
LOG.error("Exception encountered ", e);
Assert.fail("Test failed: " + e.getMessage());
throw new RuntimeException("Test ought to fail here");
}
}
});
tmpUser.doAs(new PrivilegedAction<Integer>() {
@Override
public Integer run() {
try {
copyMapper.setup(context);
copyMapper.map(new Text("/src/file"), new CopyListingFileStatus(tmpFS.getFileStatus(new Path(SOURCE_PATH + "/src/file"))), context);
Assert.fail("Didn't expect the file to be copied");
} catch (AccessControlException ignore) {
} catch (Exception e) {
// itself.
if (e.getCause() == null || e.getCause().getCause() == null || !(e.getCause().getCause() instanceof AccessControlException)) {
throw new RuntimeException(e);
}
}
return null;
}
});
} catch (Exception e) {
LOG.error("Exception encountered ", e);
Assert.fail("Test failed: " + e.getMessage());
}
}
use of org.apache.hadoop.tools.CopyListingFileStatus in project hadoop by apache.
the class TestCopyMapper method doTestIgnoreFailures.
private void doTestIgnoreFailures(boolean ignoreFailures) {
try {
deleteState();
createSourceData();
FileSystem fs = cluster.getFileSystem();
CopyMapper copyMapper = new CopyMapper();
StubContext stubContext = new StubContext(getConfiguration(), null, 0);
Mapper<Text, CopyListingFileStatus, Text, Text>.Context<Text, CopyListingFileStatus, Text, Text> context = stubContext.getContext();
Configuration configuration = context.getConfiguration();
configuration.setBoolean(DistCpOptionSwitch.IGNORE_FAILURES.getConfigLabel(), ignoreFailures);
configuration.setBoolean(DistCpOptionSwitch.OVERWRITE.getConfigLabel(), true);
configuration.setBoolean(DistCpOptionSwitch.SKIP_CRC.getConfigLabel(), true);
copyMapper.setup(context);
for (Path path : pathList) {
final FileStatus fileStatus = fs.getFileStatus(path);
if (!fileStatus.isDirectory()) {
fs.delete(path, true);
copyMapper.map(new Text(DistCpUtils.getRelativePath(new Path(SOURCE_PATH), path)), new CopyListingFileStatus(fileStatus), context);
}
}
if (ignoreFailures) {
for (Text value : stubContext.getWriter().values()) {
Assert.assertTrue(value.toString() + " is not skipped", value.toString().startsWith("FAIL:"));
}
}
Assert.assertTrue("There should have been an exception.", ignoreFailures);
} catch (Exception e) {
Assert.assertTrue("Unexpected exception: " + e.getMessage(), !ignoreFailures);
e.printStackTrace();
}
}
use of org.apache.hadoop.tools.CopyListingFileStatus in project hadoop by apache.
the class TestCopyMapper method testPreserveBlockSizeAndReplicationImpl.
private void testPreserveBlockSizeAndReplicationImpl(boolean preserve) {
try {
deleteState();
createSourceData();
FileSystem fs = cluster.getFileSystem();
CopyMapper copyMapper = new CopyMapper();
StubContext stubContext = new StubContext(getConfiguration(), null, 0);
Mapper<Text, CopyListingFileStatus, Text, Text>.Context<Text, CopyListingFileStatus, Text, Text> context = stubContext.getContext();
Configuration configuration = context.getConfiguration();
EnumSet<DistCpOptions.FileAttribute> fileAttributes = EnumSet.noneOf(DistCpOptions.FileAttribute.class);
if (preserve) {
fileAttributes.add(DistCpOptions.FileAttribute.BLOCKSIZE);
fileAttributes.add(DistCpOptions.FileAttribute.REPLICATION);
}
configuration.set(DistCpOptionSwitch.PRESERVE_STATUS.getConfigLabel(), DistCpUtils.packAttributes(fileAttributes));
copyMapper.setup(context);
for (Path path : pathList) {
final FileStatus fileStatus = fs.getFileStatus(path);
copyMapper.map(new Text(DistCpUtils.getRelativePath(new Path(SOURCE_PATH), path)), new CopyListingFileStatus(fileStatus), context);
}
// Check that the block-size/replication aren't preserved.
for (Path path : pathList) {
final Path targetPath = new Path(path.toString().replaceAll(SOURCE_PATH, TARGET_PATH));
final FileStatus source = fs.getFileStatus(path);
final FileStatus target = fs.getFileStatus(targetPath);
if (!source.isDirectory()) {
Assert.assertTrue(preserve || source.getBlockSize() != target.getBlockSize());
Assert.assertTrue(preserve || source.getReplication() != target.getReplication());
Assert.assertTrue(!preserve || source.getBlockSize() == target.getBlockSize());
Assert.assertTrue(!preserve || source.getReplication() == target.getReplication());
}
}
} catch (Exception e) {
Assert.assertTrue("Unexpected exception: " + e.getMessage(), false);
e.printStackTrace();
}
}
use of org.apache.hadoop.tools.CopyListingFileStatus in project hadoop by apache.
the class TestCopyMapper method testFileToDir.
@Test(timeout = 40000)
public void testFileToDir() {
try {
deleteState();
createSourceData();
FileSystem fs = cluster.getFileSystem();
CopyMapper copyMapper = new CopyMapper();
StubContext stubContext = new StubContext(getConfiguration(), null, 0);
Mapper<Text, CopyListingFileStatus, Text, Text>.Context<Text, CopyListingFileStatus, Text, Text> context = stubContext.getContext();
touchFile(SOURCE_PATH + "/src/file");
mkdirs(TARGET_PATH + "/src/file");
try {
copyMapper.setup(context);
copyMapper.map(new Text("/src/file"), new CopyListingFileStatus(fs.getFileStatus(new Path(SOURCE_PATH + "/src/file"))), context);
} catch (IOException e) {
Assert.assertTrue(e.getMessage().startsWith("Can't replace"));
}
} catch (Exception e) {
LOG.error("Exception encountered ", e);
Assert.fail("Test failed: " + e.getMessage());
}
}
use of org.apache.hadoop.tools.CopyListingFileStatus in project hadoop by apache.
the class TestDistCpUtils method testPreserveTimestampOnDirectory.
@Test
public void testPreserveTimestampOnDirectory() throws IOException {
FileSystem fs = FileSystem.get(config);
EnumSet<FileAttribute> attributes = EnumSet.of(FileAttribute.TIMES);
Path dst = new Path("/tmp/abc");
Path src = new Path("/tmp/src");
createDirectory(fs, src);
createDirectory(fs, dst);
fs.setPermission(src, fullPerm);
fs.setOwner(src, "somebody", "somebody-group");
fs.setTimes(src, 0, 0);
fs.setPermission(dst, noPerm);
fs.setOwner(dst, "nobody", "nobody-group");
fs.setTimes(dst, 100, 100);
CopyListingFileStatus srcStatus = new CopyListingFileStatus(fs.getFileStatus(src));
DistCpUtils.preserve(fs, dst, srcStatus, attributes, false);
CopyListingFileStatus dstStatus = new CopyListingFileStatus(fs.getFileStatus(dst));
// FileStatus.equals only compares path field, must explicitly compare all fields
Assert.assertFalse(srcStatus.getPermission().equals(dstStatus.getPermission()));
Assert.assertFalse(srcStatus.getOwner().equals(dstStatus.getOwner()));
Assert.assertFalse(srcStatus.getGroup().equals(dstStatus.getGroup()));
Assert.assertTrue(srcStatus.getAccessTime() == dstStatus.getAccessTime());
Assert.assertTrue(srcStatus.getModificationTime() == dstStatus.getModificationTime());
}
Aggregations