use of org.talend.designer.hdfsbrowse.exceptions.HadoopServerException in project tbd-studio-se by Talend.
the class ExtractHDFSSchemaManager method getHDFSFilePath.
private Object getHDFSFilePath(HDFSConnectionBean connection, ClassLoader classLoader, String path) throws HadoopServerException {
Object pathObj = null;
try {
Object fileSystem = HadoopServerUtil.getDFS(connection, classLoader);
if (fileSystem == null) {
return null;
}
// $NON-NLS-1$
pathObj = ReflectionUtils.newInstance("org.apache.hadoop.fs.Path", classLoader, new Object[] { path });
} catch (Exception e) {
throw new HadoopServerException(e);
}
return pathObj;
}
use of org.talend.designer.hdfsbrowse.exceptions.HadoopServerException in project tbd-studio-se by Talend.
the class CheckedWorkUnit method execute.
@Override
public Object execute() throws HadoopServerException {
Object result = null;
int to = DEFAULT_TIMEOUT;
if (timeout != null) {
to = timeout;
}
Callable<Object> callable = getCallable();
ExecutorService executor = Executors.newSingleThreadExecutor();
Future<Object> future = executor.submit(callable);
try {
result = future.get(to, TimeUnit.SECONDS);
} catch (Exception e) {
future.cancel(true);
throw new HadoopServerException(e);
}
return result;
}
use of org.talend.designer.hdfsbrowse.exceptions.HadoopServerException in project tbd-studio-se by Talend.
the class HadoopServerUtil method getDFS.
/**
* DOC ycbai Comment method "getDFS".
*
* Provides access to the HDFS System.
*
* @param connection
* @return
* @throws HadoopServerException
*/
public static Object getDFS(HDFSConnectionBean connection, ClassLoader currentClassLoader) throws HadoopServerException {
if (connection == null) {
return null;
}
String nameNodeURI = connection.getNameNodeURI();
if (nameNodeURI == null) {
return null;
}
Object dfs = null;
ClassLoader classLoader = currentClassLoader;
ClassLoader oldClassLoaderLoader = Thread.currentThread().getContextClassLoader();
try {
if (classLoader == null) {
classLoader = getClassLoader(connection);
}
Thread.currentThread().setContextClassLoader(classLoader);
Object conf = getConfiguration(connection, classLoader);
// add the needed jars automatically
List<String> classIds = EHadoopAdditionalJars.getBaseLoaderClassIds();
for (String classId : classIds) {
// $NON-NLS-1$
Object obj = ReflectionUtils.invokeMethod(conf, "get", new Object[] { classId }, String.class, String.class);
if (obj != null && obj instanceof String) {
String[] addedJars = EHadoopAdditionalJarsMapping.getAddedJarsByClassName(obj.toString());
if (classLoader instanceof DynamicClassLoader) {
classLoader = DynamicClassLoader.updateBaseLoader((DynamicClassLoader) classLoader, addedJars);
Thread.currentThread().setContextClassLoader(classLoader);
}
}
}
// $NON-NLS-1$
ReflectionUtils.invokeStaticMethod(// $NON-NLS-1$
"org.apache.hadoop.security.UserGroupInformation", // $NON-NLS-1$
classLoader, "setConfiguration", // $NON-NLS-1$
new Object[] { conf });
boolean enableKerberos = connection.isEnableKerberos();
String userName = StringUtils.trimToNull(connection.getUserName());
if (userName != null) {
userName = TalendQuoteUtils.removeQuotesIfExist(userName);
}
if (enableKerberos) {
userName = null;
}
String group = StringUtils.trimToNull(connection.getGroup());
if (userName == null || group != null) {
dfs = // $NON-NLS-1$ //$NON-NLS-2$
ReflectionUtils.invokeStaticMethod(// $NON-NLS-1$ //$NON-NLS-2$
"org.apache.hadoop.fs.FileSystem", // $NON-NLS-1$ //$NON-NLS-2$
classLoader, // $NON-NLS-1$ //$NON-NLS-2$
"get", new Object[] { conf });
} else {
dfs = ReflectionUtils.invokeStaticMethod("org.apache.hadoop.fs.FileSystem", classLoader, "get", new Object[] { // $NON-NLS-1$ //$NON-NLS-2$
new URI(EHadoopConfProperties.FS_DEFAULT_URI.get(conf)), conf, userName });
}
} catch (Exception e) {
throw new HadoopServerException(e);
} finally {
Thread.currentThread().setContextClassLoader(oldClassLoaderLoader);
}
return dfs;
}
use of org.talend.designer.hdfsbrowse.exceptions.HadoopServerException in project tbd-studio-se by Talend.
the class HadoopServerUtil method getConfiguration.
public static Object getConfiguration(HDFSConnectionBean connection, ClassLoader currentClassLoader) throws HadoopServerException {
Object conf = null;
String userName = StringUtils.trimToNull(connection.getUserName());
String namenodePrincipal = StringUtils.trimToNull(connection.getPrincipal());
String group = StringUtils.trimToNull(connection.getGroup());
boolean enableKerberos = connection.isEnableKerberos();
boolean useKeytab = connection.isUseKeytab();
String keytabPrincipal = StringUtils.trimToNull(connection.getKeytabPrincipal());
String keytab = StringUtils.trimToNull(connection.getKeytab());
String nameNodeURI = connection.getNameNodeURI();
nameNodeURI = TalendQuoteUtils.removeQuotesIfExist(nameNodeURI);
if (userName != null) {
userName = TalendQuoteUtils.removeQuotesIfExist(userName);
}
if (namenodePrincipal != null) {
namenodePrincipal = TalendQuoteUtils.removeQuotesIfExist(namenodePrincipal);
}
if (group != null) {
group = TalendQuoteUtils.removeQuotesIfExist(group);
}
if (keytabPrincipal != null) {
keytabPrincipal = TalendQuoteUtils.removeQuotesIfExist(keytabPrincipal);
}
if (keytab != null) {
keytab = TalendQuoteUtils.removeQuotesIfExist(keytab);
}
if (HadoopClassLoaderUtil.isWebHDFS(nameNodeURI)) {
IHadoopClusterService hadoopClusterService = HadoopRepositoryUtil.getHadoopClusterService();
String hcId = connection.getRelativeHadoopClusterId();
if (StringUtils.isNotBlank(hcId) && hadoopClusterService != null) {
Map<String, String> parameters = hadoopClusterService.getHadoopDbParameters(hcId);
if (parameters.size() > 0) {
ContextType contextType = hadoopClusterService.getHadoopClusterContextType(hcId);
if (contextType != null) {
connection.setParentContextType(contextType);
}
boolean isUseSSL = Boolean.parseBoolean(parameters.get(ConnParameterKeys.CONN_PARA_KEY_USE_WEBHDFS_SSL));
String trustStorePath = connection.getRealValue(parameters.get(ConnParameterKeys.CONN_PARA_KEY_WEBHDFS_SSL_TRUST_STORE_PATH), true);
String trustStorePassword = connection.getRealValue(parameters.get(ConnParameterKeys.CONN_PARA_KEY_WEBHDFS_SSL_TRUST_STORE_PASSWORD), true);
HadoopRepositoryUtil.setSSLSystemProperty(isUseSSL, nameNodeURI, trustStorePath, trustStorePassword);
}
}
}
ClassLoader classLoader = currentClassLoader;
ClassLoader oldClassLoaderLoader = Thread.currentThread().getContextClassLoader();
try {
if (classLoader == null) {
classLoader = getClassLoader(connection);
}
Thread.currentThread().setContextClassLoader(classLoader);
// $NON-NLS-1$
conf = Class.forName("org.apache.hadoop.conf.Configuration", true, classLoader).newInstance();
EHadoopConfProperties.FS_DEFAULT_URI.set(conf, nameNodeURI);
if (enableKerberos) {
assert namenodePrincipal != null;
userName = null;
EHadoopConfProperties.KERBEROS_PRINCIPAL.set(conf, namenodePrincipal);
// $NON-NLS-1$
EHadoopConfProperties.AUTHENTICATION.set(conf, "KERBEROS");
}
if (group != null) {
assert userName != null;
EHadoopConfProperties.JOB_UGI.set(conf, userName + GROUP_SEPARATOR + group);
}
if (useKeytab) {
assert keytabPrincipal != null;
assert keytab != null;
// $NON-NLS-1$
ReflectionUtils.invokeStaticMethod(// $NON-NLS-1$
"org.apache.hadoop.security.UserGroupInformation", // $NON-NLS-1$
classLoader, "loginUserFromKeytab", // $NON-NLS-1$
new String[] { keytabPrincipal, keytab });
}
Map<String, Object> configurations = connection.getConfigurations();
Iterator<Entry<String, Object>> configsIterator = configurations.entrySet().iterator();
while (configsIterator.hasNext()) {
Entry<String, Object> configEntry = configsIterator.next();
String key = configEntry.getKey();
Object value = configEntry.getValue();
if (key == null) {
continue;
}
ReflectionUtils.invokeMethod(conf, "set", new Object[] { key, String.valueOf(value) }, String.class, // $NON-NLS-1$
String.class);
}
// $NON-NLS-1$ //$NON-NLS-2$ //$NON-NLS-3$
ReflectionUtils.invokeMethod(// $NON-NLS-1$ //$NON-NLS-2$ //$NON-NLS-3$
conf, // $NON-NLS-1$ //$NON-NLS-2$ //$NON-NLS-3$
"set", // $NON-NLS-1$ //$NON-NLS-2$ //$NON-NLS-3$
new Object[] { "dfs.client.use.datanode.hostname", "true" }, // $NON-NLS-1$ //$NON-NLS-2$ //$NON-NLS-3$
String.class, String.class);
} catch (Exception e) {
throw new HadoopServerException(e);
} finally {
Thread.currentThread().setContextClassLoader(oldClassLoaderLoader);
}
return conf;
}
use of org.talend.designer.hdfsbrowse.exceptions.HadoopServerException in project tbd-studio-se by Talend.
the class HadoopServerUtil method hasAuthority.
public static boolean hasAuthority(Object status, String userName, String group, ELinuxAuthority authority) throws HadoopServerException {
boolean hasAuthority = false;
if (status == null) {
return hasAuthority;
}
if (authority == null) {
authority = ELinuxAuthority.READ;
}
try {
Object permission = ReflectionUtils.invokeMethod(status, "getPermission", new Object[0]);
if (permission == null) {
return hasAuthority;
}
if (StringUtils.trimToNull(userName) != null) {
userName = TalendQuoteUtils.removeQuotesIfExist(userName);
}
String[] groups = new String[0];
if (StringUtils.trimToNull(group) != null) {
group = TalendQuoteUtils.removeQuotesIfExist(group);
groups = group.split(GROUP_SEPARATOR);
}
String fileOwner = (String) ReflectionUtils.invokeMethod(status, "getOwner", new Object[0]);
fileOwner = TalendQuoteUtils.removeQuotesIfExist(fileOwner);
String fileGroup = (String) ReflectionUtils.invokeMethod(status, "getGroup", new Object[0]);
fileGroup = TalendQuoteUtils.removeQuotesIfExist(fileGroup);
Object userAction = ReflectionUtils.invokeMethod(permission, "getUserAction", new Object[0]);
Object groupAction = ReflectionUtils.invokeMethod(permission, "getGroupAction", new Object[0]);
Object otherAction = ReflectionUtils.invokeMethod(permission, "getOtherAction", new Object[0]);
switch(authority) {
case READ:
if (fileOwner != null && fileOwner.equals(userName)) {
return hasReadAuthority(userAction);
}
if (fileGroup != null && ArrayUtils.contains(groups, fileGroup)) {
return hasReadAuthority(groupAction);
}
return hasReadAuthority(otherAction);
case WRITE:
if (fileOwner != null && fileOwner.equals(userName)) {
return hasWriteAuthority(userAction);
}
if (fileGroup != null && ArrayUtils.contains(groups, fileGroup)) {
return hasWriteAuthority(groupAction);
}
return hasWriteAuthority(otherAction);
case EXCUTE:
if (fileOwner != null && fileOwner.equals(userName)) {
return hasExcuteAuthority(userAction);
}
if (fileGroup != null && ArrayUtils.contains(groups, fileGroup)) {
return hasExcuteAuthority(groupAction);
}
return hasExcuteAuthority(otherAction);
default:
break;
}
} catch (Exception e) {
throw new HadoopServerException(e);
}
return hasAuthority;
}
Aggregations