use of org.apache.hadoop.hive.shims.HadoopShims in project hive by apache.
the class TestJdbcWithMiniHS2ErasureCoding method addErasurePolicy.
/**
* Add a Erasure Coding Policy to a Path.
*/
private static void addErasurePolicy(MiniDFSShim dfs, String pathString, String policyName) throws IOException {
HadoopShims hadoopShims = ShimLoader.getHadoopShims();
HdfsErasureCodingShim erasureCodingShim = hadoopShims.createHdfsErasureCodingShim(dfs.getFileSystem(), conf);
erasureCodingShim.enableErasureCodingPolicy(policyName);
Path fsRoot = new Path(pathString);
erasureCodingShim.setErasureCodingPolicy(fsRoot, policyName);
HadoopShims.HdfsFileErasureCodingPolicy erasureCodingPolicy = erasureCodingShim.getErasureCodingPolicy(fsRoot);
assertEquals(policyName, erasureCodingPolicy.getName());
}
use of org.apache.hadoop.hive.shims.HadoopShims in project hive by apache.
the class FileUtils method checkDeletePermission.
/**
* Checks if delete can be performed on given path by given user.
* If file does not exist it just returns without throwing an Exception
* @param path
* @param conf
* @param user
* @throws AccessControlException
* @throws InterruptedException
* @throws Exception
*/
public static void checkDeletePermission(Path path, Configuration conf, String user) throws AccessControlException, InterruptedException, Exception {
if (path == null) {
// no file/dir to be deleted
return;
}
final FileSystem fs = path.getFileSystem(conf);
// check user has write permissions on the parent dir
FileStatus stat = null;
try {
stat = fs.getFileStatus(path);
} catch (FileNotFoundException e) {
// ignore
}
if (stat == null) {
// no file/dir to be deleted
return;
}
FileUtils.checkFileAccessWithImpersonation(fs, stat, FsAction.WRITE, user);
HadoopShims shims = ShimLoader.getHadoopShims();
if (!shims.supportStickyBit()) {
// not supports sticky bit
return;
}
// check if sticky bit is set on the parent dir
FileStatus parStatus = fs.getFileStatus(path.getParent());
if (!shims.hasStickyBit(parStatus.getPermission())) {
// no further checks needed
return;
}
// check if user is owner of parent dir
if (parStatus.getOwner().equals(user)) {
return;
}
// check if user is owner of current dir/file
FileStatus childStatus = fs.getFileStatus(path);
if (childStatus.getOwner().equals(user)) {
return;
}
String msg = String.format("Permission Denied: User %s can't delete %s because sticky bit is" + " set on the parent dir and user does not own this file or its parent", user, path);
throw new IOException(msg);
}
use of org.apache.hadoop.hive.shims.HadoopShims in project hive by apache.
the class ErasureProcessor method getErasureShim.
/**
* Get an instance of HdfsErasureCodingShim from a config.
*/
public static HadoopShims.HdfsErasureCodingShim getErasureShim(Configuration config) throws IOException {
HadoopShims hadoopShims = ShimLoader.getHadoopShims();
FileSystem fileSystem = FileSystem.get(config);
return hadoopShims.createHdfsErasureCodingShim(fileSystem, config);
}
use of org.apache.hadoop.hive.shims.HadoopShims in project hive by apache.
the class TestFileUtils method testCopyWithDistCpAs.
@Test
public void testCopyWithDistCpAs() throws IOException {
Path copySrc = new Path("copySrc");
Path copyDst = new Path("copyDst");
HiveConf conf = new HiveConf(TestFileUtils.class);
FileSystem fs = copySrc.getFileSystem(conf);
String doAsUser = conf.getVar(HiveConf.ConfVars.HIVE_DISTCP_DOAS_USER);
UserGroupInformation proxyUser = UserGroupInformation.createProxyUser(doAsUser, UserGroupInformation.getLoginUser());
HadoopShims shims = mock(HadoopShims.class);
when(shims.runDistCpAs(Collections.singletonList(copySrc), copyDst, conf, proxyUser)).thenReturn(true);
when(shims.runDistCp(Collections.singletonList(copySrc), copyDst, conf)).thenReturn(false);
// doAs when asked
Assert.assertTrue(FileUtils.distCp(fs, Collections.singletonList(copySrc), copyDst, false, proxyUser, conf, shims));
verify(shims).runDistCpAs(Collections.singletonList(copySrc), copyDst, conf, proxyUser);
// don't doAs when not asked
Assert.assertFalse(FileUtils.distCp(fs, Collections.singletonList(copySrc), copyDst, true, null, conf, shims));
verify(shims).runDistCp(Collections.singletonList(copySrc), copyDst, conf);
// should be fixed. For now check is added to avoid wrong usage. So if doAs is set, delete source should be false.
try {
FileUtils.distCp(fs, Collections.singletonList(copySrc), copyDst, true, proxyUser, conf, shims);
Assert.assertTrue("Should throw IOException as doAs is called with delete source set to true".equals(""));
} catch (IOException e) {
Assert.assertTrue(e.getMessage().equalsIgnoreCase("Distcp is called with doAsUser and delete source set as true"));
}
}
use of org.apache.hadoop.hive.shims.HadoopShims in project hive by apache.
the class TestHCatLoaderEncryption method initEncryptionShim.
void initEncryptionShim(HiveConf conf) throws IOException {
FileSystem fs;
HadoopShims shims = ShimLoader.getHadoopShims();
conf.set(SECURITY_KEY_PROVIDER_URI_NAME, getKeyProviderURI());
int numberOfDataNodes = 4;
dfs = shims.getMiniDfs(conf, numberOfDataNodes, true, null);
fs = dfs.getFileSystem();
// set up a java key provider for encrypted hdfs cluster
shims.createHdfsEncryptionShim(fs, conf);
}
Aggregations