use of co.cask.cdap.api.schedule.TriggeringScheduleInfo in project cdap by caskdata.
the class TriggeringScheduleInfoAdapterTest method testSerDeserScheduleInfo.
@Test
public void testSerDeserScheduleInfo() {
BasicWorkflowToken token = new BasicWorkflowToken(1);
token.setCurrentNode("node");
token.put("tokenKey", "tokenVal");
List<TriggerInfo> triggerInfos = ImmutableList.<TriggerInfo>of(new DefaultProgramStatusTriggerInfo("ns", Specifications.from(new WebCrawlApp()), ProgramType.WORKFLOW, "workflow", RunIds.generate(), ProgramStatus.COMPLETED, token, Collections.<String, String>emptyMap()), new DefaultPartitionTriggerInfo("ns", "ds", 10, 11), new DefaultTimeTriggerInfo("1 * * * *", 0L));
TriggeringScheduleInfo scheduleInfo = new DefaultTriggeringScheduleInfo("schedule", "description", triggerInfos, ImmutableMap.of("key", "value"));
String scheduleInfoJson = GSON.toJson(scheduleInfo);
TriggeringScheduleInfo deserializedScheduleInfo = GSON.fromJson(scheduleInfoJson, TriggeringScheduleInfo.class);
Assert.assertEquals(scheduleInfoJson, GSON.toJson(deserializedScheduleInfo));
DefaultProgramStatusTriggerInfo expectedProgramStatusTriggerInfo = (DefaultProgramStatusTriggerInfo) triggerInfos.get(0);
DefaultProgramStatusTriggerInfo deserializedProgramStatusTriggerInfo = (DefaultProgramStatusTriggerInfo) deserializedScheduleInfo.getTriggerInfos().get(0);
Assert.assertEquals(expectedProgramStatusTriggerInfo.getApplicationSpecification().getName(), deserializedProgramStatusTriggerInfo.getApplicationSpecification().getName());
Assert.assertEquals(expectedProgramStatusTriggerInfo.getWorkflowToken().getAll(), deserializedProgramStatusTriggerInfo.getWorkflowToken().getAll());
}
use of co.cask.cdap.api.schedule.TriggeringScheduleInfo in project cdap by caskdata.
the class ScheduleTaskRunner method launch.
public void launch(Job job) throws Exception {
ProgramSchedule schedule = job.getSchedule();
ProgramId programId = schedule.getProgramId();
Map<String, String> userArgs = Maps.newHashMap();
userArgs.putAll(schedule.getProperties());
userArgs.putAll(propertiesResolver.getUserProperties(Id.Program.fromEntityId(programId)));
Map<String, String> systemArgs = Maps.newHashMap();
systemArgs.putAll(propertiesResolver.getSystemProperties(Id.Program.fromEntityId(programId)));
// Let the triggers update the arguments first before setting the triggering schedule info
((SatisfiableTrigger) job.getSchedule().getTrigger()).updateLaunchArguments(job.getSchedule(), job.getNotifications(), userArgs, systemArgs);
TriggeringScheduleInfo triggeringScheduleInfo = getTriggeringScheduleInfo(job);
systemArgs.put(ProgramOptionConstants.TRIGGERING_SCHEDULE_INFO, GSON.toJson(triggeringScheduleInfo));
execute(programId, systemArgs, userArgs);
LOG.info("Successfully started program {} in schedule {}.", schedule.getProgramId(), schedule.getName());
}
use of co.cask.cdap.api.schedule.TriggeringScheduleInfo in project cdap by caskdata.
the class CoreSchedulerServiceTest method testRunScheduledJobs.
@Test
@Category(XSlowTests.class)
public void testRunScheduledJobs() throws Exception {
CConfiguration cConf = getInjector().getInstance(CConfiguration.class);
dataEventTopic = NamespaceId.SYSTEM.topic(cConf.get(Constants.Dataset.DATA_EVENT_TOPIC));
// Deploy the app with version
Id.Artifact appArtifactId = Id.Artifact.from(Id.Namespace.DEFAULT, "appwithschedules", VERSION1);
addAppArtifact(appArtifactId, AppWithFrequentScheduledWorkflows.class);
AppRequest<? extends Config> appRequest = new AppRequest<>(new ArtifactSummary(appArtifactId.getName(), appArtifactId.getVersion().getVersion()));
deploy(APP_ID, appRequest);
// Resume the schedule because schedules are initialized as paused
enableSchedule(AppWithFrequentScheduledWorkflows.TEN_SECOND_SCHEDULE_1);
enableSchedule(AppWithFrequentScheduledWorkflows.TEN_SECOND_SCHEDULE_2);
enableSchedule(AppWithFrequentScheduledWorkflows.DATASET_PARTITION_SCHEDULE_1);
enableSchedule(AppWithFrequentScheduledWorkflows.DATASET_PARTITION_SCHEDULE_2);
for (int i = 0; i < 5; i++) {
testNewPartition(i + 1);
}
// Enable COMPOSITE_SCHEDULE before publishing events to DATASET_NAME2
enableSchedule(AppWithFrequentScheduledWorkflows.COMPOSITE_SCHEDULE);
// disable the two partition schedules, send them notifications (but they should not trigger)
int runs1 = getRuns(WORKFLOW_1, ProgramRunStatus.ALL);
int runs2 = getRuns(WORKFLOW_2, ProgramRunStatus.ALL);
disableSchedule(AppWithFrequentScheduledWorkflows.DATASET_PARTITION_SCHEDULE_1);
disableSchedule(AppWithFrequentScheduledWorkflows.DATASET_PARTITION_SCHEDULE_2);
publishNotification(dataEventTopic, NamespaceId.DEFAULT, AppWithFrequentScheduledWorkflows.DATASET_NAME1);
long minPublishTime = System.currentTimeMillis();
publishNotification(dataEventTopic, NamespaceId.DEFAULT, AppWithFrequentScheduledWorkflows.DATASET_NAME2);
// This would make sure the subscriber has processed the data event
waitUntilProcessed(dataEventTopic, minPublishTime);
// Both workflows must run at least once.
// If the testNewPartition() loop took longer than expected, it may be more (quartz fired multiple times)
Tasks.waitFor(true, new Callable<Boolean>() {
@Override
public Boolean call() throws Exception {
return getRuns(SCHEDULED_WORKFLOW_1, ProgramRunStatus.COMPLETED) > 0 && getRuns(SCHEDULED_WORKFLOW_2, ProgramRunStatus.COMPLETED) > 0;
}
}, 10, TimeUnit.SECONDS);
// There shouldn't be any partition trigger in the job queue
Assert.assertFalse(Iterables.any(getAllJobs(), new Predicate<Job>() {
@Override
public boolean apply(Job job) {
return job.getSchedule().getTrigger() instanceof ProtoTrigger.PartitionTrigger;
}
}));
ProgramId compositeWorkflow = APP_ID.workflow(AppWithFrequentScheduledWorkflows.COMPOSITE_WORKFLOW);
// Workflow scheduled with the composite trigger has never been started
Assert.assertEquals(0, getRuns(compositeWorkflow, ProgramRunStatus.ALL));
// Publish two more new partition notifications to satisfy the partition trigger in the composite trigger,
// and thus the whole composite trigger will be satisfied
publishNotification(dataEventTopic, NamespaceId.DEFAULT, AppWithFrequentScheduledWorkflows.DATASET_NAME2);
minPublishTime = System.currentTimeMillis();
publishNotification(dataEventTopic, NamespaceId.DEFAULT, AppWithFrequentScheduledWorkflows.DATASET_NAME2);
// This would make sure the subscriber has processed the data event
waitUntilProcessed(dataEventTopic, minPublishTime);
// Wait for 1 run to complete for compositeWorkflow
waitForCompleteRuns(1, compositeWorkflow);
for (RunRecordMeta runRecordMeta : store.getRuns(SCHEDULED_WORKFLOW_1, ProgramRunStatus.ALL, 0, Long.MAX_VALUE, Integer.MAX_VALUE).values()) {
Map<String, String> sysArgs = runRecordMeta.getSystemArgs();
Assert.assertNotNull(sysArgs);
TriggeringScheduleInfo scheduleInfo = GSON.fromJson(sysArgs.get(ProgramOptionConstants.TRIGGERING_SCHEDULE_INFO), TriggeringScheduleInfo.class);
Assert.assertEquals(AppWithFrequentScheduledWorkflows.TEN_SECOND_SCHEDULE_1, scheduleInfo.getName());
List<TriggerInfo> triggerInfos = scheduleInfo.getTriggerInfos();
// Only one notification is enough to satisfy Time Trigger
Assert.assertEquals(1, triggerInfos.size());
Assert.assertEquals(TriggerInfo.Type.TIME, triggerInfos.get(0).getType());
}
// Also verify that the two partition schedules did not trigger
Assert.assertEquals(runs1, getRuns(WORKFLOW_1, ProgramRunStatus.ALL));
Assert.assertEquals(runs2, getRuns(WORKFLOW_2, ProgramRunStatus.ALL));
// enable partition schedule 2
enableSchedule(AppWithFrequentScheduledWorkflows.DATASET_PARTITION_SCHEDULE_2);
testScheduleUpdate("disable");
testScheduleUpdate("update");
testScheduleUpdate("delete");
}
use of co.cask.cdap.api.schedule.TriggeringScheduleInfo in project cdap by caskdata.
the class SmartWorkflow method initialize.
@Override
public void initialize(WorkflowContext context) throws Exception {
super.initialize(context);
TriggeringScheduleInfo scheduleInfo = context.getTriggeringScheduleInfo();
if (scheduleInfo != null) {
String propertiesMappingString = scheduleInfo.getProperties().get(TRIGGERING_PROPERTIES_MAPPING);
if (propertiesMappingString != null) {
TriggeringPropertyMapping propertiesMapping = GSON.fromJson(propertiesMappingString, TriggeringPropertyMapping.class);
updateTokenWithTriggeringProperties(scheduleInfo, propertiesMapping, context.getToken());
}
}
PipelineRuntime pipelineRuntime = new PipelineRuntime(context, workflowMetrics);
WRAPPERLOGGER.info("Pipeline '{}' is started by user '{}' with arguments {}", context.getApplicationSpecification().getName(), UserGroupInformation.getCurrentUser().getShortUserName(), pipelineRuntime.getArguments().asMap());
alertPublishers = new HashMap<>();
postActions = new LinkedHashMap<>();
spec = GSON.fromJson(context.getWorkflowSpecification().getProperty(Constants.PIPELINE_SPEC_KEY), BatchPipelineSpec.class);
stageSpecs = new HashMap<>();
MacroEvaluator macroEvaluator = new DefaultMacroEvaluator(pipelineRuntime.getArguments(), context.getLogicalStartTime(), context, context.getNamespace());
PluginContext pluginContext = new PipelinePluginContext(context, workflowMetrics, spec.isStageLoggingEnabled(), spec.isProcessTimingEnabled());
for (ActionSpec actionSpec : spec.getEndingActions()) {
String stageName = actionSpec.getName();
postActions.put(stageName, (PostAction) pluginContext.newPluginInstance(stageName, macroEvaluator));
stageSpecs.put(stageName, StageSpec.builder(stageName, actionSpec.getPluginSpec()).setStageLoggingEnabled(spec.isStageLoggingEnabled()).setProcessTimingEnabled(spec.isProcessTimingEnabled()).build());
}
for (StageSpec stageSpec : spec.getStages()) {
String stageName = stageSpec.getName();
stageSpecs.put(stageName, stageSpec);
if (AlertPublisher.PLUGIN_TYPE.equals(stageSpec.getPluginType())) {
AlertPublisher alertPublisher = context.newPluginInstance(stageName, macroEvaluator);
alertPublishers.put(stageName, alertPublisher);
}
}
WRAPPERLOGGER.info("Pipeline '{}' running", context.getApplicationSpecification().getName());
}
Aggregations