Search in sources :

Example 6 with TriggerInfo

use of co.cask.cdap.api.schedule.TriggerInfo in project cdap by caskdata.

the class CoreSchedulerServiceTest method testRunScheduledJobs.

@Test
@Category(XSlowTests.class)
public void testRunScheduledJobs() throws Exception {
    CConfiguration cConf = getInjector().getInstance(CConfiguration.class);
    dataEventTopic = NamespaceId.SYSTEM.topic(cConf.get(Constants.Dataset.DATA_EVENT_TOPIC));
    // Deploy the app with version
    Id.Artifact appArtifactId = Id.Artifact.from(Id.Namespace.DEFAULT, "appwithschedules", VERSION1);
    addAppArtifact(appArtifactId, AppWithFrequentScheduledWorkflows.class);
    AppRequest<? extends Config> appRequest = new AppRequest<>(new ArtifactSummary(appArtifactId.getName(), appArtifactId.getVersion().getVersion()));
    deploy(APP_ID, appRequest);
    // Resume the schedule because schedules are initialized as paused
    enableSchedule(AppWithFrequentScheduledWorkflows.TEN_SECOND_SCHEDULE_1);
    enableSchedule(AppWithFrequentScheduledWorkflows.TEN_SECOND_SCHEDULE_2);
    enableSchedule(AppWithFrequentScheduledWorkflows.DATASET_PARTITION_SCHEDULE_1);
    enableSchedule(AppWithFrequentScheduledWorkflows.DATASET_PARTITION_SCHEDULE_2);
    for (int i = 0; i < 5; i++) {
        testNewPartition(i + 1);
    }
    // Enable COMPOSITE_SCHEDULE before publishing events to DATASET_NAME2
    enableSchedule(AppWithFrequentScheduledWorkflows.COMPOSITE_SCHEDULE);
    // disable the two partition schedules, send them notifications (but they should not trigger)
    int runs1 = getRuns(WORKFLOW_1, ProgramRunStatus.ALL);
    int runs2 = getRuns(WORKFLOW_2, ProgramRunStatus.ALL);
    disableSchedule(AppWithFrequentScheduledWorkflows.DATASET_PARTITION_SCHEDULE_1);
    disableSchedule(AppWithFrequentScheduledWorkflows.DATASET_PARTITION_SCHEDULE_2);
    publishNotification(dataEventTopic, NamespaceId.DEFAULT, AppWithFrequentScheduledWorkflows.DATASET_NAME1);
    long minPublishTime = System.currentTimeMillis();
    publishNotification(dataEventTopic, NamespaceId.DEFAULT, AppWithFrequentScheduledWorkflows.DATASET_NAME2);
    // This would make sure the subscriber has processed the data event
    waitUntilProcessed(dataEventTopic, minPublishTime);
    // Both workflows must run at least once.
    // If the testNewPartition() loop took longer than expected, it may be more (quartz fired multiple times)
    Tasks.waitFor(true, new Callable<Boolean>() {

        @Override
        public Boolean call() throws Exception {
            return getRuns(SCHEDULED_WORKFLOW_1, ProgramRunStatus.COMPLETED) > 0 && getRuns(SCHEDULED_WORKFLOW_2, ProgramRunStatus.COMPLETED) > 0;
        }
    }, 10, TimeUnit.SECONDS);
    // There shouldn't be any partition trigger in the job queue
    Assert.assertFalse(Iterables.any(getAllJobs(), new Predicate<Job>() {

        @Override
        public boolean apply(Job job) {
            return job.getSchedule().getTrigger() instanceof ProtoTrigger.PartitionTrigger;
        }
    }));
    ProgramId compositeWorkflow = APP_ID.workflow(AppWithFrequentScheduledWorkflows.COMPOSITE_WORKFLOW);
    // Workflow scheduled with the composite trigger has never been started
    Assert.assertEquals(0, getRuns(compositeWorkflow, ProgramRunStatus.ALL));
    // Publish two more new partition notifications to satisfy the partition trigger in the composite trigger,
    // and thus the whole composite trigger will be satisfied
    publishNotification(dataEventTopic, NamespaceId.DEFAULT, AppWithFrequentScheduledWorkflows.DATASET_NAME2);
    minPublishTime = System.currentTimeMillis();
    publishNotification(dataEventTopic, NamespaceId.DEFAULT, AppWithFrequentScheduledWorkflows.DATASET_NAME2);
    // This would make sure the subscriber has processed the data event
    waitUntilProcessed(dataEventTopic, minPublishTime);
    // Wait for 1 run to complete for compositeWorkflow
    waitForCompleteRuns(1, compositeWorkflow);
    for (RunRecordMeta runRecordMeta : store.getRuns(SCHEDULED_WORKFLOW_1, ProgramRunStatus.ALL, 0, Long.MAX_VALUE, Integer.MAX_VALUE).values()) {
        Map<String, String> sysArgs = runRecordMeta.getSystemArgs();
        Assert.assertNotNull(sysArgs);
        TriggeringScheduleInfo scheduleInfo = GSON.fromJson(sysArgs.get(ProgramOptionConstants.TRIGGERING_SCHEDULE_INFO), TriggeringScheduleInfo.class);
        Assert.assertEquals(AppWithFrequentScheduledWorkflows.TEN_SECOND_SCHEDULE_1, scheduleInfo.getName());
        List<TriggerInfo> triggerInfos = scheduleInfo.getTriggerInfos();
        // Only one notification is enough to satisfy Time Trigger
        Assert.assertEquals(1, triggerInfos.size());
        Assert.assertEquals(TriggerInfo.Type.TIME, triggerInfos.get(0).getType());
    }
    // Also verify that the two partition schedules did not trigger
    Assert.assertEquals(runs1, getRuns(WORKFLOW_1, ProgramRunStatus.ALL));
    Assert.assertEquals(runs2, getRuns(WORKFLOW_2, ProgramRunStatus.ALL));
    // enable partition schedule 2
    enableSchedule(AppWithFrequentScheduledWorkflows.DATASET_PARTITION_SCHEDULE_2);
    testScheduleUpdate("disable");
    testScheduleUpdate("update");
    testScheduleUpdate("delete");
}
Also used : RunRecordMeta(co.cask.cdap.internal.app.store.RunRecordMeta) TriggerInfo(co.cask.cdap.api.schedule.TriggerInfo) TriggeringScheduleInfo(co.cask.cdap.api.schedule.TriggeringScheduleInfo) ProgramId(co.cask.cdap.proto.id.ProgramId) CConfiguration(co.cask.cdap.common.conf.CConfiguration) Constraint(co.cask.cdap.internal.schedule.constraint.Constraint) ConflictException(co.cask.cdap.common.ConflictException) AlreadyExistsException(co.cask.cdap.common.AlreadyExistsException) NotFoundException(co.cask.cdap.common.NotFoundException) AppRequest(co.cask.cdap.proto.artifact.AppRequest) Predicate(com.google.common.base.Predicate) ArtifactSummary(co.cask.cdap.api.artifact.ArtifactSummary) WorkflowId(co.cask.cdap.proto.id.WorkflowId) TopicId(co.cask.cdap.proto.id.TopicId) ProgramId(co.cask.cdap.proto.id.ProgramId) Id(co.cask.cdap.common.id.Id) NamespaceId(co.cask.cdap.proto.id.NamespaceId) MessageId(co.cask.cdap.messaging.data.MessageId) ScheduleId(co.cask.cdap.proto.id.ScheduleId) DatasetId(co.cask.cdap.proto.id.DatasetId) ApplicationId(co.cask.cdap.proto.id.ApplicationId) ProgramRunId(co.cask.cdap.proto.id.ProgramRunId) Job(co.cask.cdap.internal.app.runtime.schedule.queue.Job) PartitionTrigger(co.cask.cdap.internal.app.runtime.schedule.trigger.PartitionTrigger) Category(org.junit.experimental.categories.Category) Test(org.junit.Test)

Example 7 with TriggerInfo

use of co.cask.cdap.api.schedule.TriggerInfo in project cdap by caskdata.

the class SmartWorkflow method updateTokenWithTriggeringProperties.

private void updateTokenWithTriggeringProperties(TriggeringScheduleInfo scheduleInfo, TriggeringPropertyMapping propertiesMapping, WorkflowToken token) {
    List<ProgramStatusTriggerInfo> programStatusTriggerInfos = new ArrayList<>();
    for (TriggerInfo info : scheduleInfo.getTriggerInfos()) {
        if (info instanceof ProgramStatusTriggerInfo) {
            programStatusTriggerInfos.add((ProgramStatusTriggerInfo) info);
        }
    }
    // If no ProgramStatusTriggerInfo, no need of override the existing runtimeArgs
    if (programStatusTriggerInfos.isEmpty()) {
        return;
    }
    // Currently only expecting one trigger in a schedule
    ProgramStatusTriggerInfo triggerInfo = programStatusTriggerInfos.get(0);
    BasicArguments triggeringArguments = new BasicArguments(triggerInfo.getWorkflowToken(), triggerInfo.getRuntimeArguments());
    // Get the value of every triggering pipeline arguments specified in the propertiesMapping and update newRuntimeArgs
    List<ArgumentMapping> argumentMappings = propertiesMapping.getArguments();
    for (ArgumentMapping mapping : argumentMappings) {
        String sourceKey = mapping.getSource();
        if (sourceKey == null) {
            LOG.warn("The name of argument from the triggering pipeline cannot be null, " + "skip this argument mapping: '{}'.", mapping);
            continue;
        }
        String value = triggeringArguments.get(sourceKey);
        if (value == null) {
            LOG.warn("Runtime argument '{}' is not found in run '{}' of the triggering pipeline '{}' " + "in namespace '{}' ", sourceKey, triggerInfo.getRunId(), triggerInfo.getApplicationSpecification().getName(), triggerInfo.getNamespace());
            continue;
        }
        // Use the argument name in the triggering pipeline if target is not specified
        String targetKey = mapping.getTarget() == null ? sourceKey : mapping.getTarget();
        token.put(targetKey, value);
    }
    // Get the resolved plugin properties map from triggering pipeline's workflow token in triggeringArguments
    Map<String, Map<String, String>> resolvedProperties = GSON.fromJson(triggeringArguments.get(RESOLVED_PLUGIN_PROPERTIES_MAP), STAGE_PROPERTIES_MAP);
    for (PluginPropertyMapping mapping : propertiesMapping.getPluginProperties()) {
        String stageName = mapping.getStageName();
        if (stageName == null) {
            LOG.warn("The name of the stage cannot be null in plugin property mapping, skip this mapping: '{}'.", mapping);
            continue;
        }
        Map<String, String> pluginProperties = resolvedProperties.get(stageName);
        if (pluginProperties == null) {
            LOG.warn("No plugin properties can be found with stage name '{}' in triggering pipeline '{}' " + "in namespace '{}' ", mapping.getStageName(), triggerInfo.getApplicationSpecification().getName(), triggerInfo.getNamespace());
            continue;
        }
        String sourceKey = mapping.getSource();
        if (sourceKey == null) {
            LOG.warn("The name of argument from the triggering pipeline cannot be null, " + "skip this argument mapping: '{}'.", mapping);
            continue;
        }
        String value = pluginProperties.get(sourceKey);
        if (value == null) {
            LOG.warn("No property with name '{}' can be found in plugin '{}' of the triggering pipeline '{}' " + "in namespace '{}' ", sourceKey, stageName, triggerInfo.getApplicationSpecification().getName(), triggerInfo.getNamespace());
            continue;
        }
        // Use the argument name in the triggering pipeline if target is not specified
        String targetKey = mapping.getTarget() == null ? sourceKey : mapping.getTarget();
        token.put(targetKey, value);
    }
    return;
}
Also used : ArgumentMapping(co.cask.cdap.etl.proto.v2.ArgumentMapping) ProgramStatusTriggerInfo(co.cask.cdap.api.schedule.ProgramStatusTriggerInfo) ArrayList(java.util.ArrayList) ProgramStatusTriggerInfo(co.cask.cdap.api.schedule.ProgramStatusTriggerInfo) TriggerInfo(co.cask.cdap.api.schedule.TriggerInfo) PluginPropertyMapping(co.cask.cdap.etl.proto.v2.PluginPropertyMapping) BasicArguments(co.cask.cdap.etl.common.BasicArguments) Map(java.util.Map) HashMap(java.util.HashMap) LinkedHashMap(java.util.LinkedHashMap)

Aggregations

TriggerInfo (co.cask.cdap.api.schedule.TriggerInfo)7 ProgramStatusTriggerInfo (co.cask.cdap.api.schedule.ProgramStatusTriggerInfo)2 TriggeringScheduleInfo (co.cask.cdap.api.schedule.TriggeringScheduleInfo)2 ArrayList (java.util.ArrayList)2 HashMap (java.util.HashMap)2 Map (java.util.Map)2 Test (org.junit.Test)2 WebCrawlApp (co.cask.cdap.WebCrawlApp)1 ArtifactSummary (co.cask.cdap.api.artifact.ArtifactSummary)1 AlreadyExistsException (co.cask.cdap.common.AlreadyExistsException)1 ConflictException (co.cask.cdap.common.ConflictException)1 NotFoundException (co.cask.cdap.common.NotFoundException)1 CConfiguration (co.cask.cdap.common.conf.CConfiguration)1 Id (co.cask.cdap.common.id.Id)1 BasicArguments (co.cask.cdap.etl.common.BasicArguments)1 ArgumentMapping (co.cask.cdap.etl.proto.v2.ArgumentMapping)1 PluginPropertyMapping (co.cask.cdap.etl.proto.v2.PluginPropertyMapping)1 Job (co.cask.cdap.internal.app.runtime.schedule.queue.Job)1 DefaultPartitionTriggerInfo (co.cask.cdap.internal.app.runtime.schedule.trigger.DefaultPartitionTriggerInfo)1 DefaultProgramStatusTriggerInfo (co.cask.cdap.internal.app.runtime.schedule.trigger.DefaultProgramStatusTriggerInfo)1