use of com.dtstack.taier.pluginapi.JobParam in project Taier by DTStack.
the class FlinkClient method submitJobWithJar.
private JobResult submitJobWithJar(JobClient jobClient, List<URL> classPaths, List<String> programArgList) {
JobParam jobParam = new JobParam(jobClient);
String jarPath = jobParam.getJarPath();
if (jarPath == null) {
logger.error("can not submit a job without jar path, please check it");
return JobResult.createErrorResult("can not submit a job without jar path, please check it");
}
String args = jobParam.getClassArgs();
if (StringUtils.isNotBlank(args)) {
programArgList.addAll(DtStringUtil.splitIgnoreQuota(args, ' '));
}
// 如果jar包里面未指定mainclass,需要设置该参数
String entryPointClass = jobParam.getMainClass();
SavepointRestoreSettings spSettings = buildSavepointSetting(jobClient);
String[] programArgs = programArgList.toArray(new String[programArgList.size()]);
PackagedProgram packagedProgram = null;
JobGraph jobGraph = null;
Pair<String, String> runResult;
try {
ClusterMode clusterMode = ClusterMode.getClusteMode(flinkConfig.getClusterMode());
if (ClusterMode.isPerjob(clusterMode)) {
// perjob模式延后创建PackagedProgram
ClusterSpecification clusterSpecification = FlinkConfUtil.createClusterSpecification(flinkClientBuilder.getFlinkConfiguration(), jobClient.getApplicationPriority(), jobClient.getConfProperties());
clusterSpecification.setClasspaths(classPaths);
clusterSpecification.setEntryPointClass(entryPointClass);
clusterSpecification.setJarFile(new File(jarPath));
clusterSpecification.setSpSetting(spSettings);
clusterSpecification.setProgramArgs(programArgs);
clusterSpecification.setCreateProgramDelay(true);
clusterSpecification.setYarnConfiguration(hadoopConf.getYarnConfiguration());
logger.info("--------taskId: {} run by PerJob mode-----", jobClient.getJobId());
runResult = runJobByPerJob(clusterSpecification, jobClient);
jobGraph = clusterSpecification.getJobGraph();
packagedProgram = clusterSpecification.getProgram();
} else {
Integer runParallelism = FlinkUtil.getJobParallelism(jobClient.getConfProperties());
packagedProgram = FlinkUtil.buildProgram(jarPath, classPaths, jobClient.getJobType(), entryPointClass, programArgs, spSettings, flinkClientBuilder.getFlinkConfiguration(), filesystemManager);
jobGraph = PackagedProgramUtils.createJobGraph(packagedProgram, flinkClientBuilder.getFlinkConfiguration(), runParallelism, false);
// 只有当程序本身没有指定并行度的时候该参数才生效
clearClassPathShipfileLoadMode(packagedProgram);
logger.info("--------taskId: {} run by Session mode-----", jobClient.getJobId());
runResult = runJobBySession(jobGraph);
}
JobResult jobResult = JobResult.createSuccessResult(runResult.getSecond(), runResult.getFirst());
jobResult.setExtraData(JobResultConstant.JOB_GRAPH, JobGraphBuildUtil.buildLatencyMarker(jobGraph));
long checkpointInterval = jobGraph.getCheckpointingSettings().getCheckpointCoordinatorConfiguration().getCheckpointInterval();
if (checkpointInterval >= Long.MAX_VALUE) {
checkpointInterval = 0;
}
jobResult.setExtraData(JobResultConstant.FLINK_CHECKPOINT, String.valueOf(checkpointInterval));
return jobResult;
} catch (Throwable e) {
return JobResult.createErrorResult(e);
} finally {
if (packagedProgram != null) {
packagedProgram.deleteExtractedLibraries();
}
}
}
use of com.dtstack.taier.pluginapi.JobParam in project Taier by DTStack.
the class HadoopClient method submitJobWithJar.
private JobResult submitJobWithJar(JobClient jobClient) {
try {
setHadoopUserName(config);
JobParam jobParam = new JobParam(jobClient);
Map<String, Object> plugininfo = PublicUtil.jsonStrToObject(jobClient.getPluginInfo(), Map.class);
Configuration jobConf = fillJobConfig(jobClient, conf);
if (plugininfo.containsKey(QUEUE)) {
jobConf.set(MRJobConfig.QUEUE_NAME, plugininfo.get(QUEUE).toString());
}
MapReduceTemplate mr = new MapReduceTemplate(jobConf, jobParam);
mr.run();
LOG.info("mr jobId:{} jobName:{}", mr.getJobId(), jobParam.getJobName());
return JobResult.createSuccessResult(mr.getJobId());
} catch (Throwable ex) {
LOG.error("", ex);
return JobResult.createErrorResult(ex);
}
}
Aggregations