use of com.dtstack.taier.pluginapi.JobClient in project Taier by DTStack.
the class RdbsExeQueueTest method testInit.
@Test
public void testInit() throws Exception {
BlockingQueue<JobClient> waitQueue = Queues.newLinkedBlockingQueue();
JobClient jobClient = new JobClient();
jobClient.setJobName("test");
jobClient.setJobId("test");
jobClient.setSql("select * from tableTest;");
jobClient.setTaskParams("{\"task\":\"test\"}");
waitQueue.add(jobClient);
MemberModifier.field(RdbsExeQueue.class, "waitQueue").set(rdbsExeQueue, waitQueue);
MemberModifier.field(RdbsExeQueue.class, "minSize").set(rdbsExeQueue, 1);
MemberModifier.field(RdbsExeQueue.class, "maxSize").set(rdbsExeQueue, 1);
rdbsExeQueue.init();
}
use of com.dtstack.taier.pluginapi.JobClient in project Taier by DTStack.
the class ScheduleActionService method start.
/**
* 接受来自客户端的请求, 并判断节点队列长度。
* 如在当前节点,则直接处理任务
*/
public Boolean start(ParamActionExt paramActionExt) {
if (LOGGER.isInfoEnabled()) {
LOGGER.info("start actionParam: {}", JSONObject.toJSONString(paramActionExt, propertyFilter));
}
try {
boolean canAccepted = receiveStartJob(paramActionExt);
// 会对重复数据做校验
if (canAccepted) {
JobClient jobClient = new JobClient(paramActionExt);
jobClient.setType(getOrDefault(paramActionExt.getType(), EScheduleType.TEMP_JOB.getType()));
jobDealer.addSubmitJob(jobClient);
engineJobRetryMapper.delete(Wrappers.lambdaQuery(ScheduleEngineJobRetry.class).eq(ScheduleEngineJobRetry::getJobId, jobClient.getJobId()));
return true;
}
LOGGER.warn("jobId:" + paramActionExt.getJobId() + " duplicate submissions are not allowed");
} catch (Exception e) {
runJobFail(paramActionExt, e, paramActionExt.getJobId());
}
return false;
}
use of com.dtstack.taier.pluginapi.JobClient in project Taier by DTStack.
the class GroupPriorityQueue method emitJob2PriorityQueue.
/**
* @return false: blocked | true: unblocked
*/
private boolean emitJob2PriorityQueue() {
boolean empty = false;
String localAddress = "";
try {
if (priorityQueueSize() >= getQueueSizeLimited()) {
return false;
}
localAddress = environmentContext.getLocalAddress();
long startId = 0L;
outLoop: while (true) {
List<ScheduleEngineJobCache> jobCaches = engineJobCacheService.listByStage(startId, localAddress, EJobCacheStage.DB.getStage(), jobResource);
if (CollectionUtils.isEmpty(jobCaches)) {
empty = true;
break;
}
for (ScheduleEngineJobCache jobCache : jobCaches) {
try {
ParamAction paramAction = PublicUtil.jsonStrToObject(jobCache.getJobInfo(), ParamAction.class);
JobClient jobClient = new JobClient(paramAction);
jobClient.setCallBack((jobStatus) -> {
jobDealer.updateJobStatus(jobClient.getJobId(), jobStatus);
});
boolean addInner = this.addInner(jobClient, false);
LOGGER.info("jobId:{} load from db, {} emit job to queue.", jobClient.getJobId(), addInner ? "success" : "failed");
if (!addInner) {
empty = false;
break outLoop;
}
startId = jobCache.getId();
} catch (Exception e) {
LOGGER.error("", e);
// 数据转换异常--打日志
jobDealer.dealSubmitFailJob(jobCache.getJobId(), "This task stores information exception and cannot be converted." + e.toString());
}
}
}
} catch (Exception e) {
LOGGER.error("emitJob2PriorityQueue localAddress:{} error:", localAddress, e);
}
if (empty) {
blocked.set(false);
}
return empty;
}
use of com.dtstack.taier.pluginapi.JobClient in project Taier by DTStack.
the class NodeRecoverService method recoverJobCaches.
public void recoverJobCaches() {
String localAddress = environmentContext.getLocalAddress();
try {
long startId = 0L;
while (true) {
List<ScheduleEngineJobCache> jobCaches = engineJobCacheService.listByStage(startId, localAddress, EJobCacheStage.SUBMITTED.getStage(), null);
if (CollectionUtils.isEmpty(jobCaches)) {
break;
}
List<JobClient> afterJobClients = new ArrayList<>(jobCaches.size());
for (ScheduleEngineJobCache jobCache : jobCaches) {
try {
ParamAction paramAction = PublicUtil.jsonStrToObject(jobCache.getJobInfo(), ParamAction.class);
JobClient jobClient = new JobClient(paramAction);
afterJobClients.add(jobClient);
startId = jobCache.getId();
} catch (Exception e) {
LOGGER.error("", e);
// 数据转换异常--打日志
jobDealer.dealSubmitFailJob(jobCache.getJobId(), "This task stores information exception and cannot be converted." + ExceptionUtil.getErrorMessage(e));
}
}
if (CollectionUtils.isNotEmpty(afterJobClients)) {
jobDealer.afterSubmitJobVast(afterJobClients);
}
}
} catch (Exception e) {
LOGGER.error("----broker:{} RecoverDealer error:", localAddress, e);
}
}
use of com.dtstack.taier.pluginapi.JobClient in project Taier by DTStack.
the class HadoopClient method main.
public static void main(String[] args) throws Exception {
FileInputStream fileInputStream = null;
InputStreamReader inputStreamReader = null;
BufferedReader reader = null;
try {
System.setProperty("HADOOP_USER_NAME", "admin");
// input params json file path
String filePath = args[0];
File paramsFile = new File(filePath);
fileInputStream = new FileInputStream(paramsFile);
inputStreamReader = new InputStreamReader(fileInputStream);
reader = new BufferedReader(inputStreamReader);
String request = reader.readLine();
Map params = PublicUtil.jsonStrToObject(request, Map.class);
ParamAction paramAction = PublicUtil.mapToObject(params, ParamAction.class);
JobClient jobClient = new JobClient(paramAction);
String pluginInfo = jobClient.getPluginInfo();
Properties properties = PublicUtil.jsonStrToObject(pluginInfo, Properties.class);
String md5plugin = MD5Util.getMd5String(pluginInfo);
properties.setProperty("md5sum", md5plugin);
HadoopClient client = new HadoopClient();
client.init(properties);
ClusterResource clusterResource = client.getClusterResource();
LOG.info("submit success!");
LOG.info(clusterResource.toString());
System.exit(0);
} catch (Exception e) {
LOG.error("submit error!", e);
} finally {
if (reader != null) {
reader.close();
inputStreamReader.close();
fileInputStream.close();
}
}
}
Aggregations