use of io.cdap.cdap.api.Config in project cdap by caskdata.
the class ProgramLifecycleHttpHandlerTest method testHistory.
private void testHistory(Class<?> app, Id.Program program) throws Exception {
String namespace = program.getNamespaceId();
deploy(app, 200, Constants.Gateway.API_VERSION_3_TOKEN, namespace);
verifyProgramHistory(program.toEntityId());
deleteApp(program.getApplication(), 200);
ApplicationId appId = new ApplicationId(namespace, program.getApplicationId(), VERSION1);
ProgramId programId = appId.program(program.getType(), program.getId());
Id.Artifact artifactId = Id.Artifact.from(program.getNamespace(), app.getSimpleName(), "1.0.0");
addAppArtifact(artifactId, app);
AppRequest<Config> request = new AppRequest<>(new ArtifactSummary(artifactId.getName(), artifactId.getVersion().getVersion()), null);
Assert.assertEquals(200, deploy(appId, request).getResponseCode());
verifyProgramHistory(programId);
deleteApp(appId, 200);
}
use of io.cdap.cdap.api.Config in project cdap by caskdata.
the class CoreSchedulerServiceTest method testRunScheduledJobs.
@Test
@Category(XSlowTests.class)
public void testRunScheduledJobs() throws Exception {
CConfiguration cConf = getInjector().getInstance(CConfiguration.class);
dataEventTopic = NamespaceId.SYSTEM.topic(cConf.get(Constants.Dataset.DATA_EVENT_TOPIC));
// Deploy the app with version
Id.Artifact appArtifactId = Id.Artifact.from(Id.Namespace.DEFAULT, "appwithschedules", VERSION1);
addAppArtifact(appArtifactId, AppWithFrequentScheduledWorkflows.class);
AppRequest<? extends Config> appRequest = new AppRequest<>(new ArtifactSummary(appArtifactId.getName(), appArtifactId.getVersion().getVersion()));
deploy(APP_ID, appRequest);
// Resume the schedule because schedules are initialized as paused
enableSchedule(AppWithFrequentScheduledWorkflows.TEN_SECOND_SCHEDULE_1);
enableSchedule(AppWithFrequentScheduledWorkflows.TEN_SECOND_SCHEDULE_2);
enableSchedule(AppWithFrequentScheduledWorkflows.DATASET_PARTITION_SCHEDULE_1);
enableSchedule(AppWithFrequentScheduledWorkflows.DATASET_PARTITION_SCHEDULE_2);
for (int i = 0; i < 5; i++) {
testNewPartition(i + 1);
}
// Enable COMPOSITE_SCHEDULE before publishing events to DATASET_NAME2
enableSchedule(AppWithFrequentScheduledWorkflows.COMPOSITE_SCHEDULE);
// disable the two partition schedules, send them notifications (but they should not trigger)
int runs1 = getRuns(WORKFLOW_1, ProgramRunStatus.ALL);
int runs2 = getRuns(WORKFLOW_2, ProgramRunStatus.ALL);
disableSchedule(AppWithFrequentScheduledWorkflows.DATASET_PARTITION_SCHEDULE_1);
// ensure schedule 2 is disabled after schedule 1
Thread.sleep(BUFFER);
long disableBeforeTime = System.currentTimeMillis();
disableSchedule(AppWithFrequentScheduledWorkflows.DATASET_PARTITION_SCHEDULE_2);
long disableAfterTime = System.currentTimeMillis() + 1;
publishNotification(dataEventTopic, NamespaceId.DEFAULT, AppWithFrequentScheduledWorkflows.DATASET_NAME1);
long minPublishTime = System.currentTimeMillis();
publishNotification(dataEventTopic, NamespaceId.DEFAULT, AppWithFrequentScheduledWorkflows.DATASET_NAME2);
// This would make sure the subscriber has processed the data event
waitUntilProcessed(dataEventTopic, minPublishTime);
// Both workflows must run at least once.
// If the testNewPartition() loop took longer than expected, it may be more (quartz fired multiple times)
Tasks.waitFor(true, () -> getRuns(SCHEDULED_WORKFLOW_1, ProgramRunStatus.COMPLETED) > 0 && getRuns(SCHEDULED_WORKFLOW_2, ProgramRunStatus.COMPLETED) > 0, 10, TimeUnit.SECONDS);
// There shouldn't be any partition trigger in the job queue
Assert.assertFalse(Iterables.any(getAllJobs(), job -> job.getSchedule().getTrigger() instanceof ProtoTrigger.PartitionTrigger));
ProgramId compositeWorkflow = APP_ID.workflow(AppWithFrequentScheduledWorkflows.COMPOSITE_WORKFLOW);
// Workflow scheduled with the composite trigger has never been started
Assert.assertEquals(0, getRuns(compositeWorkflow, ProgramRunStatus.ALL));
// Publish two more new partition notifications to satisfy the partition trigger in the composite trigger,
// and thus the whole composite trigger will be satisfied
publishNotification(dataEventTopic, NamespaceId.DEFAULT, AppWithFrequentScheduledWorkflows.DATASET_NAME2);
minPublishTime = System.currentTimeMillis();
publishNotification(dataEventTopic, NamespaceId.DEFAULT, AppWithFrequentScheduledWorkflows.DATASET_NAME2);
// This would make sure the subscriber has processed the data event
waitUntilProcessed(dataEventTopic, minPublishTime);
// Wait for 1 run to complete for compositeWorkflow
waitForCompleteRuns(1, compositeWorkflow);
for (RunRecordDetail runRecordMeta : store.getRuns(SCHEDULED_WORKFLOW_1, ProgramRunStatus.ALL, 0, Long.MAX_VALUE, Integer.MAX_VALUE).values()) {
Map<String, String> sysArgs = runRecordMeta.getSystemArgs();
Assert.assertNotNull(sysArgs);
TriggeringScheduleInfo scheduleInfo = GSON.fromJson(sysArgs.get(ProgramOptionConstants.TRIGGERING_SCHEDULE_INFO), TriggeringScheduleInfo.class);
Assert.assertEquals(AppWithFrequentScheduledWorkflows.TEN_SECOND_SCHEDULE_1, scheduleInfo.getName());
List<TriggerInfo> triggerInfos = scheduleInfo.getTriggerInfos();
// Only one notification is enough to satisfy Time Trigger
Assert.assertEquals(1, triggerInfos.size());
Assert.assertEquals(TriggerInfo.Type.TIME, triggerInfos.get(0).getType());
}
// Also verify that the two partition schedules did not trigger
Assert.assertEquals(runs1, getRuns(WORKFLOW_1, ProgramRunStatus.ALL));
Assert.assertEquals(runs2, getRuns(WORKFLOW_2, ProgramRunStatus.ALL));
// enable partition schedule 2 and test reEnableSchedules
scheduler.reEnableSchedules(NamespaceId.DEFAULT, disableBeforeTime, disableAfterTime);
Assert.assertEquals(ProgramScheduleStatus.SCHEDULED, scheduler.getScheduleStatus(APP_ID.schedule(AppWithFrequentScheduledWorkflows.DATASET_PARTITION_SCHEDULE_2)));
Assert.assertEquals(ProgramScheduleStatus.SUSPENDED, scheduler.getScheduleStatus(APP_ID.schedule(AppWithFrequentScheduledWorkflows.DATASET_PARTITION_SCHEDULE_1)));
testScheduleUpdate("disable");
testScheduleUpdate("update");
testScheduleUpdate("delete");
}
use of io.cdap.cdap.api.Config in project cdap by caskdata.
the class ScheduleFetcherTest method testListSchedules.
@Test
public void testListSchedules() throws Exception {
ScheduleFetcher fetcher = getScheduleFetcher(fetcherType);
String namespace = TEST_NAMESPACE1;
String appName = AppWithSchedule.NAME;
// Deploy the application with 2 schedules on the workflow
Config appConfig = new AppWithSchedule.AppConfig(true, true, true);
deploy(AppWithSchedule.class, 200, Constants.Gateway.API_VERSION_3_TOKEN, namespace, appConfig);
// Get and validate the schedule
ProgramId programId = new ProgramId(namespace, appName, ProgramType.WORKFLOW, AppWithSchedule.WORKFLOW_NAME);
List<ScheduleDetail> scheduleList = fetcher.list(programId);
Assert.assertEquals(2, scheduleList.size());
Assert.assertEquals(AppWithSchedule.SCHEDULE, scheduleList.get(0).getName());
Assert.assertEquals(AppWithSchedule.SCHEDULE_2, scheduleList.get(1).getName());
// Delete the application
Assert.assertEquals(200, doDelete(getVersionedAPIPath("apps/", Constants.Gateway.API_VERSION_3_TOKEN, namespace)).getResponseCode());
}
use of io.cdap.cdap.api.Config in project cdap by caskdata.
the class DefaultArtifactInspector method inspectApplications.
private ArtifactClasses.Builder inspectApplications(Id.Artifact artifactId, ArtifactClasses.Builder builder, Location artifactLocation, ClassLoader artifactClassLoader) throws IOException, InvalidArtifactException {
// right now we force users to include the application main class as an attribute in their manifest,
// which forces them to have a single application class.
// in the future, we may want to let users do this or maybe specify a list of classes or
// a package that will be searched for applications, to allow multiple applications in a single artifact.
String mainClassName;
try {
Manifest manifest = BundleJarUtil.getManifest(artifactLocation);
if (manifest == null) {
return builder;
}
Attributes manifestAttributes = manifest.getMainAttributes();
if (manifestAttributes == null) {
return builder;
}
mainClassName = manifestAttributes.getValue(ManifestFields.MAIN_CLASS);
} catch (ZipException e) {
throw new InvalidArtifactException(String.format("Couldn't unzip artifact %s, please check it is a valid jar file.", artifactId), e);
}
if (mainClassName == null) {
return builder;
}
try {
Class<?> mainClass = artifactClassLoader.loadClass(mainClassName);
if (!(Application.class.isAssignableFrom(mainClass))) {
// possible for 3rd party plugin artifacts to have the main class set
return builder;
}
Application app = (Application) mainClass.newInstance();
java.lang.reflect.Type configType;
// we can deserialize the config into that object. Otherwise it'll just be a Config
try {
configType = Artifacts.getConfigType(app.getClass());
} catch (Exception e) {
throw new InvalidArtifactException(String.format("Could not resolve config type for Application class %s in artifact %s. " + "The type must extend Config and cannot be parameterized.", mainClassName, artifactId));
}
Schema configSchema = configType == Config.class ? null : schemaGenerator.generate(configType);
builder.addApp(new ApplicationClass(mainClassName, "", configSchema, getArtifactRequirements(app.getClass())));
} catch (ClassNotFoundException e) {
throw new InvalidArtifactException(String.format("Could not find Application main class %s in artifact %s.", mainClassName, artifactId));
} catch (UnsupportedTypeException e) {
throw new InvalidArtifactException(String.format("Config for Application %s in artifact %s has an unsupported schema. " + "The type must extend Config and cannot be parameterized.", mainClassName, artifactId));
} catch (InstantiationException | IllegalAccessException e) {
throw new InvalidArtifactException(String.format("Could not instantiate Application class %s in artifact %s.", mainClassName, artifactId), e);
}
return builder;
}
use of io.cdap.cdap.api.Config in project cdap by caskdata.
the class AppLifecycleHttpHandlerTest method testDeployUsingNonexistantArtifact404.
@Test
public void testDeployUsingNonexistantArtifact404() throws Exception {
Id.Application appId = Id.Application.from(Id.Namespace.DEFAULT, "badapp");
AppRequest<Config> appRequest = new AppRequest<>(new ArtifactSummary("something", "1.0.0"), null);
HttpResponse response = deploy(appId, appRequest);
Assert.assertEquals(404, response.getResponseCode());
}
Aggregations