use of com.dtstack.taier.pluginapi.exception.PluginDefineException in project Taier by DTStack.
the class HadoopClient method uploadStringToHdfs.
/**
* 上传文件到hdfs中
* @param bytes
* @param hdfsPath 文件路径
* @return
*/
@Override
public String uploadStringToHdfs(String bytes, String hdfsPath) {
try {
return KerberosUtils.login(config, () -> {
FileSystem fs = null;
try {
ByteArrayInputStream is = new ByteArrayInputStream(bytes.getBytes());
fs = FileSystem.get(conf);
Path destP = new Path(hdfsPath);
FSDataOutputStream os = fs.create(destP);
IOUtils.copyBytes(is, os, 4096, true);
} catch (IOException e) {
LOG.error("submit file {} to hdfs error", hdfsPath, e);
throw new PluginDefineException("上传文件失败", e);
} finally {
if (null != fs) {
try {
fs.close();
} catch (IOException e) {
}
}
}
if (LOG.isDebugEnabled()) {
LOG.debug("submit file {} to hdfs success.", hdfsPath);
}
return conf.get("fs.defaultFS") + hdfsPath;
}, conf);
} catch (Exception e) {
throw new PluginDefineException("上传文件失败", e);
}
}
use of com.dtstack.taier.pluginapi.exception.PluginDefineException in project Taier by DTStack.
the class HadoopClient method buildYarnClient.
private YarnClient buildYarnClient() {
try {
return KerberosUtils.login(config, () -> {
LOG.info("buildYarnClient, init YarnClient!");
YarnClient yarnClient1 = YarnClient.createYarnClient();
yarnClient1.init(conf);
yarnClient1.start();
yarnClient = yarnClient1;
return yarnClient;
}, conf);
} catch (Exception e) {
LOG.error("initSecurity happens error", e);
throw new PluginDefineException(e);
}
}
use of com.dtstack.taier.pluginapi.exception.PluginDefineException in project Taier by DTStack.
the class HadoopClient method getClusterResource.
@Override
public ClusterResource getClusterResource() {
ClusterResource clusterResource = new ClusterResource();
try {
KerberosUtils.login(config, () -> {
YarnClient resourceClient = null;
try {
resourceClient = YarnClient.createYarnClient();
resourceClient.init(conf);
resourceClient.start();
List<NodeReport> nodes = resourceClient.getNodeReports(NodeState.RUNNING);
List<ClusterResource.NodeDescription> clusterNodes = new ArrayList<>();
Integer totalMem = 0;
Integer totalCores = 0;
Integer usedMem = 0;
Integer usedCores = 0;
for (NodeReport rep : nodes) {
ClusterResource.NodeDescription node = new ClusterResource.NodeDescription();
String nodeName = rep.getHttpAddress().split(":")[0];
node.setNodeName(nodeName);
node.setMemory(rep.getCapability().getMemory());
node.setUsedMemory(rep.getUsed().getMemory());
node.setUsedVirtualCores(rep.getUsed().getVirtualCores());
node.setVirtualCores(rep.getCapability().getVirtualCores());
clusterNodes.add(node);
// 计算集群资源总量和使用量
Resource capability = rep.getCapability();
Resource used = rep.getUsed();
totalMem += capability.getMemory();
totalCores += capability.getVirtualCores();
usedMem += used.getMemory();
usedCores += used.getVirtualCores();
}
ClusterResource.ResourceMetrics metrics = createResourceMetrics(totalMem, usedMem, totalCores, usedCores);
clusterResource.setNodes(clusterNodes);
String webAddress = getYarnWebAddress(yarnClient);
String schedulerUrl = String.format(YARN_SCHEDULER_FORMAT, webAddress);
String schedulerInfoMsg = PoolHttpClient.get(schedulerUrl, null);
JSONObject schedulerInfo = JSONObject.parseObject(schedulerInfoMsg);
if (schedulerInfo.containsKey("scheduler")) {
clusterResource.setScheduleInfo(schedulerInfo.getJSONObject("scheduler").getJSONObject("schedulerInfo"));
}
clusterResource.setQueues(getQueueResource(yarnClient));
clusterResource.setResourceMetrics(metrics);
} catch (Exception e) {
LOG.error("close reource error ", e);
} finally {
if (null != resourceClient) {
try {
resourceClient.close();
} catch (IOException e) {
LOG.error("close reource error ", e);
}
}
}
return clusterResource;
}, conf);
} catch (Exception e) {
throw new PluginDefineException(e.getMessage());
}
return clusterResource;
}
use of com.dtstack.taier.pluginapi.exception.PluginDefineException in project Taier by DTStack.
the class HadoopClient method beforeSubmitFunc.
@Override
public void beforeSubmitFunc(JobClient jobClient) {
String sql = jobClient.getSql();
List<String> sqlArr = DtStringUtil.splitIgnoreQuota(sql, ';');
if (sqlArr.size() == 0) {
return;
}
List<String> sqlList = Lists.newArrayList(sqlArr);
Iterator<String> sqlItera = sqlList.iterator();
List<String> fileList = Lists.newArrayList();
while (sqlItera.hasNext()) {
String tmpSql = sqlItera.next();
// handle add jar statements and comment statements on the same line
tmpSql = AddJarOperator.handleSql(tmpSql);
if (AddJarOperator.verific(tmpSql)) {
JarFileInfo jarFileInfo = AddJarOperator.parseSql(tmpSql);
String addFilePath = jarFileInfo.getJarPath();
// 只支持hdfs
if (!addFilePath.startsWith(HDFS_PREFIX)) {
throw new PluginDefineException("only support hdfs protocol for jar path");
}
String localJarPath = TMP_PATH + File.separator + UUID.randomUUID().toString() + ".jar";
try {
downloadHdfsFile(addFilePath, localJarPath);
} catch (IOException e) {
throw new RuntimeException(e);
}
jarFileInfo.setJarPath(localJarPath);
jobClient.setCoreJarInfo(jarFileInfo);
fileList.add(localJarPath);
}
}
cacheFile.put(jobClient.getJobId(), fileList);
}
use of com.dtstack.taier.pluginapi.exception.PluginDefineException in project Taier by DTStack.
the class AbstractClientFactory method createClientFactory.
public static IClientFactory createClientFactory(FlinkClientBuilder flinkClientBuilder) {
FlinkConfig flinkConfig = flinkClientBuilder.getFlinkConfig();
ClusterMode clusterMode = ClusterMode.getClusteMode(flinkConfig.getClusterMode());
IClientFactory clientFactory;
switch(clusterMode) {
case PER_JOB:
clientFactory = new PerJobClientFactory(flinkClientBuilder);
break;
case SESSION:
clientFactory = new SessionClientFactory(flinkClientBuilder);
break;
case STANDALONE:
clientFactory = new StandaloneClientFactory(flinkClientBuilder);
break;
default:
throw new PluginDefineException("not support clusterMode: " + clusterMode);
}
return clientFactory;
}
Aggregations