use of com.dtstack.taier.pluginapi.pojo.ClusterResource in project Taier by DTStack.
the class HadoopClient method getClusterResource.
@Override
public ClusterResource getClusterResource() {
ClusterResource clusterResource = new ClusterResource();
try {
KerberosUtils.login(config, () -> {
YarnClient resourceClient = null;
try {
resourceClient = YarnClient.createYarnClient();
resourceClient.init(conf);
resourceClient.start();
List<NodeReport> nodes = resourceClient.getNodeReports(NodeState.RUNNING);
List<ClusterResource.NodeDescription> clusterNodes = new ArrayList<>();
Integer totalMem = 0;
Integer totalCores = 0;
Integer usedMem = 0;
Integer usedCores = 0;
for (NodeReport rep : nodes) {
ClusterResource.NodeDescription node = new ClusterResource.NodeDescription();
String nodeName = rep.getHttpAddress().split(":")[0];
node.setNodeName(nodeName);
node.setMemory(rep.getCapability().getMemory());
node.setUsedMemory(rep.getUsed().getMemory());
node.setUsedVirtualCores(rep.getUsed().getVirtualCores());
node.setVirtualCores(rep.getCapability().getVirtualCores());
clusterNodes.add(node);
// 计算集群资源总量和使用量
Resource capability = rep.getCapability();
Resource used = rep.getUsed();
totalMem += capability.getMemory();
totalCores += capability.getVirtualCores();
usedMem += used.getMemory();
usedCores += used.getVirtualCores();
}
ClusterResource.ResourceMetrics metrics = createResourceMetrics(totalMem, usedMem, totalCores, usedCores);
clusterResource.setNodes(clusterNodes);
String webAddress = getYarnWebAddress(yarnClient);
String schedulerUrl = String.format(YARN_SCHEDULER_FORMAT, webAddress);
String schedulerInfoMsg = PoolHttpClient.get(schedulerUrl, null);
JSONObject schedulerInfo = JSONObject.parseObject(schedulerInfoMsg);
if (schedulerInfo.containsKey("scheduler")) {
clusterResource.setScheduleInfo(schedulerInfo.getJSONObject("scheduler").getJSONObject("schedulerInfo"));
}
clusterResource.setQueues(getQueueResource(yarnClient));
clusterResource.setResourceMetrics(metrics);
} catch (Exception e) {
LOG.error("close reource error ", e);
} finally {
if (null != resourceClient) {
try {
resourceClient.close();
} catch (IOException e) {
LOG.error("close reource error ", e);
}
}
}
return clusterResource;
}, conf);
} catch (Exception e) {
throw new PluginDefineException(e.getMessage());
}
return clusterResource;
}
use of com.dtstack.taier.pluginapi.pojo.ClusterResource in project Taier by DTStack.
the class HiveClient method main.
public static void main(String[] args) throws IOException {
FileInputStream fileInputStream = null;
InputStreamReader inputStreamReader = null;
BufferedReader reader = null;
try {
System.setProperty("HADOOP_USER_NAME", "admin");
// input params json file path
String filePath = args[0];
File paramsFile = new File(filePath);
fileInputStream = new FileInputStream(paramsFile);
inputStreamReader = new InputStreamReader(fileInputStream);
reader = new BufferedReader(inputStreamReader);
String request = reader.readLine();
Map params = PublicUtil.jsonStrToObject(request, Map.class);
ParamAction paramAction = PublicUtil.mapToObject(params, ParamAction.class);
JobClient jobClient = new JobClient(paramAction);
String pluginInfo = jobClient.getPluginInfo();
Properties properties = PublicUtil.jsonStrToObject(pluginInfo, Properties.class);
String md5plugin = MD5Util.getMd5String(pluginInfo);
properties.setProperty("md5sum", md5plugin);
HiveClient client = new HiveClient();
client.init(properties);
ClusterResource clusterResource = client.getClusterResource();
LOG.info("submit success!");
LOG.info(clusterResource.toString());
System.exit(0);
} catch (Exception e) {
LOG.error("submit error!", e);
} finally {
if (reader != null) {
reader.close();
inputStreamReader.close();
fileInputStream.close();
}
}
}
use of com.dtstack.taier.pluginapi.pojo.ClusterResource in project Taier by DTStack.
the class HadoopClient method main.
public static void main(String[] args) throws Exception {
FileInputStream fileInputStream = null;
InputStreamReader inputStreamReader = null;
BufferedReader reader = null;
try {
System.setProperty("HADOOP_USER_NAME", "admin");
// input params json file path
String filePath = args[0];
File paramsFile = new File(filePath);
fileInputStream = new FileInputStream(paramsFile);
inputStreamReader = new InputStreamReader(fileInputStream);
reader = new BufferedReader(inputStreamReader);
String request = reader.readLine();
Map params = PublicUtil.jsonStrToObject(request, Map.class);
ParamAction paramAction = PublicUtil.mapToObject(params, ParamAction.class);
JobClient jobClient = new JobClient(paramAction);
String pluginInfo = jobClient.getPluginInfo();
Properties properties = PublicUtil.jsonStrToObject(pluginInfo, Properties.class);
String md5plugin = MD5Util.getMd5String(pluginInfo);
properties.setProperty("md5sum", md5plugin);
HadoopClient client = new HadoopClient();
client.init(properties);
ClusterResource clusterResource = client.getClusterResource();
LOG.info("submit success!");
LOG.info(clusterResource.toString());
System.exit(0);
} catch (Exception e) {
LOG.error("submit error!", e);
} finally {
if (reader != null) {
reader.close();
inputStreamReader.close();
fileInputStream.close();
}
}
}
use of com.dtstack.taier.pluginapi.pojo.ClusterResource in project Taier by DTStack.
the class DtYarnClient method main.
public static void main(String[] args) throws Exception {
System.setProperty("HADOOP_USER_NAME", "admin");
// input params json file path
String filePath = args[0];
File paramsFile = new File(filePath);
BufferedReader reader = new BufferedReader(new InputStreamReader(new FileInputStream(paramsFile)));
String request = reader.readLine();
Map params = PublicUtil.jsonStrToObject(request, Map.class);
ParamAction paramAction = PublicUtil.mapToObject(params, ParamAction.class);
JobClient jobClient = new JobClient(paramAction);
String pluginInfo = jobClient.getPluginInfo();
Properties properties = PublicUtil.jsonStrToObject(pluginInfo, Properties.class);
String md5plugin = MD5Util.getMd5String(pluginInfo);
properties.setProperty("md5sum", md5plugin);
DtYarnClient client = new DtYarnClient();
client.init(properties);
ClusterResource clusterResource = client.getClusterResource();
LOG.info("submit success!");
LOG.info(clusterResource.toString());
System.exit(0);
}
use of com.dtstack.taier.pluginapi.pojo.ClusterResource in project Taier by DTStack.
the class DtYarnClient method getClusterResource.
@Override
public ClusterResource getClusterResource() {
ClusterResource clusterResource = new ClusterResource();
try {
KerberosUtils.login(config, () -> {
YarnClient resourceClient = null;
try {
resourceClient = YarnClient.createYarnClient();
resourceClient.init(configuration);
resourceClient.start();
List<NodeReport> nodes = resourceClient.getNodeReports(NodeState.RUNNING);
List<ClusterResource.NodeDescription> clusterNodes = new ArrayList<>();
Integer totalMem = 0;
Integer totalCores = 0;
Integer usedMem = 0;
Integer usedCores = 0;
for (NodeReport rep : nodes) {
ClusterResource.NodeDescription node = new ClusterResource.NodeDescription();
String nodeName = rep.getHttpAddress().split(":")[0];
node.setNodeName(nodeName);
node.setMemory(rep.getCapability().getMemory());
node.setUsedMemory(rep.getUsed().getMemory());
node.setUsedVirtualCores(rep.getUsed().getVirtualCores());
node.setVirtualCores(rep.getCapability().getVirtualCores());
clusterNodes.add(node);
// 计算集群资源总量和使用量
Resource capability = rep.getCapability();
Resource used = rep.getUsed();
totalMem += capability.getMemory();
totalCores += capability.getVirtualCores();
usedMem += used.getMemory();
usedCores += used.getVirtualCores();
}
ClusterResource.ResourceMetrics metrics = createResourceMetrics(totalMem, usedMem, totalCores, usedCores);
clusterResource.setNodes(clusterNodes);
String webAddress = getYarnWebAddress(resourceClient);
String schedulerUrl = String.format(YARN_SCHEDULER_FORMAT, webAddress);
String schedulerInfoMsg = PoolHttpClient.get(schedulerUrl, null);
JSONObject schedulerInfo = JSONObject.parseObject(schedulerInfoMsg);
if (schedulerInfo.containsKey("scheduler")) {
clusterResource.setScheduleInfo(schedulerInfo.getJSONObject("scheduler").getJSONObject("schedulerInfo"));
}
clusterResource.setQueues(getQueueResource(resourceClient));
clusterResource.setResourceMetrics(metrics);
} catch (Exception e) {
LOG.error("close reource error ", e);
} finally {
if (null != resourceClient) {
try {
resourceClient.close();
} catch (IOException e) {
LOG.error("close reource error ", e);
}
}
}
return clusterResource;
}, configuration);
} catch (Exception e) {
throw new PluginDefineException(e.getMessage());
}
return clusterResource;
}
Aggregations