use of io.cdap.cdap.proto.id.ProgramId in project cdap by caskdata.
the class ServiceProgramRunner method run.
@Override
public ProgramController run(Program program, ProgramOptions options) {
int instanceId = Integer.parseInt(options.getArguments().getOption(ProgramOptionConstants.INSTANCE_ID, "-1"));
Preconditions.checkArgument(instanceId >= 0, "Missing instance Id");
int instanceCount = Integer.parseInt(options.getArguments().getOption(ProgramOptionConstants.INSTANCES, "0"));
Preconditions.checkArgument(instanceCount > 0, "Invalid or missing instance count");
RunId runId = ProgramRunners.getRunId(options);
ApplicationSpecification appSpec = program.getApplicationSpecification();
Preconditions.checkNotNull(appSpec, "Missing application specification.");
ProgramType programType = program.getType();
Preconditions.checkNotNull(programType, "Missing processor type.");
Preconditions.checkArgument(programType == ProgramType.SERVICE, "Only Service process type is supported.");
ServiceSpecification spec = appSpec.getServices().get(program.getName());
String host = options.getArguments().getOption(ProgramOptionConstants.HOST);
Preconditions.checkArgument(host != null, "No hostname is provided");
// Setup dataset framework context, if required
if (datasetFramework instanceof ProgramContextAware) {
ProgramId programId = program.getId();
((ProgramContextAware) datasetFramework).setContext(new BasicProgramContext(programId.run(runId)));
}
final PluginInstantiator pluginInstantiator = createPluginInstantiator(options, program.getClassLoader());
try {
RetryStrategy retryStrategy = SystemArguments.getRetryStrategy(options.getUserArguments().asMap(), program.getType(), cConf);
ArtifactManager artifactManager = artifactManagerFactory.create(program.getId().getNamespaceId(), retryStrategy);
ServiceHttpServer component = new ServiceHttpServer(host, program, options, cConf, spec, instanceId, instanceCount, serviceAnnouncer, metricsCollectionService, datasetFramework, txClient, discoveryServiceClient, pluginInstantiator, secureStore, secureStoreManager, messagingService, artifactManager, metadataReader, metadataPublisher, namespaceQueryAdmin, pluginFinder, fieldLineageWriter, transactionRunner, preferencesFetcher, remoteClientFactory, contextAccessEnforcer);
// Add a service listener to make sure the plugin instantiator is closed when the http server is finished.
component.addListener(createRuntimeServiceListener(Collections.singleton(pluginInstantiator)), Threads.SAME_THREAD_EXECUTOR);
ProgramController controller = new ServiceProgramControllerAdapter(component, program.getId().run(runId));
component.start();
return controller;
} catch (Throwable t) {
Closeables.closeQuietly(pluginInstantiator);
throw t;
}
}
use of io.cdap.cdap.proto.id.ProgramId in project cdap by caskdata.
the class MetadataSubscriberServiceTest method testProfileMetadata.
@Test
public void testProfileMetadata() throws Exception {
Injector injector = getInjector();
ApplicationSpecification appSpec = Specifications.from(new AppWithWorkflow());
ApplicationId appId = NamespaceId.DEFAULT.app(appSpec.getName());
ProgramId workflowId = appId.workflow("SampleWorkflow");
ScheduleId scheduleId = appId.schedule("tsched1");
// publish a creation of a schedule that will never exist
// this tests that such a message is eventually discarded
// note that for this test, we configure a fast retry strategy and a small number of retries
// therefore this will cost only a few seconds delay
publishBogusCreationEvent();
// get the mds should be empty property since we haven't started the MetadataSubscriberService
MetadataStorage mds = injector.getInstance(MetadataStorage.class);
Assert.assertEquals(Collections.emptyMap(), mds.read(new Read(workflowId.toMetadataEntity())).getProperties());
Assert.assertEquals(Collections.emptyMap(), mds.read(new Read(scheduleId.toMetadataEntity())).getProperties());
// add a app with workflow to app meta store
// note: since we bypass the app-fabric when adding this app, no ENTITY_CREATION message
// will be published for the app (it happens in app lifecycle service). Therefore this
// app must exist before assigning the profile for the namespace, otherwise the app's
// programs will not receive the profile metadata.
Store store = injector.getInstance(DefaultStore.class);
store.addApplication(appId, appSpec);
// set default namespace to use the profile, since now MetadataSubscriberService is not started,
// it should not affect the mds
PreferencesService preferencesService = injector.getInstance(PreferencesService.class);
preferencesService.setProperties(NamespaceId.DEFAULT, Collections.singletonMap(SystemArguments.PROFILE_NAME, ProfileId.NATIVE.getScopedName()));
// add a schedule to schedule store
ProgramScheduleService scheduleService = injector.getInstance(ProgramScheduleService.class);
scheduleService.add(new ProgramSchedule("tsched1", "one time schedule", workflowId, Collections.emptyMap(), new TimeTrigger("* * ? * 1"), ImmutableList.of()));
// add a new profile in default namespace
ProfileService profileService = injector.getInstance(ProfileService.class);
ProfileId myProfile = new ProfileId(NamespaceId.DEFAULT.getNamespace(), "MyProfile");
Profile profile1 = new Profile("MyProfile", Profile.NATIVE.getLabel(), Profile.NATIVE.getDescription(), Profile.NATIVE.getScope(), Profile.NATIVE.getProvisioner());
profileService.saveProfile(myProfile, profile1);
// add a second profile in default namespace
ProfileId myProfile2 = new ProfileId(NamespaceId.DEFAULT.getNamespace(), "MyProfile2");
Profile profile2 = new Profile("MyProfile2", Profile.NATIVE.getLabel(), Profile.NATIVE.getDescription(), Profile.NATIVE.getScope(), Profile.NATIVE.getProvisioner());
profileService.saveProfile(myProfile2, profile2);
try {
// Verify the workflow profile metadata is updated to default profile
Tasks.waitFor(ProfileId.NATIVE.getScopedName(), () -> getProfileProperty(mds, workflowId), 10, TimeUnit.SECONDS, 100, TimeUnit.MILLISECONDS);
// Verify the schedule profile metadata is updated to default profile
Tasks.waitFor(ProfileId.NATIVE.getScopedName(), () -> getProfileProperty(mds, scheduleId), 10, TimeUnit.SECONDS, 100, TimeUnit.MILLISECONDS);
// set default namespace to use my profile
preferencesService.setProperties(NamespaceId.DEFAULT, Collections.singletonMap(SystemArguments.PROFILE_NAME, "USER:MyProfile"));
// Verify the workflow profile metadata is updated to my profile
Tasks.waitFor(myProfile.getScopedName(), () -> getProfileProperty(mds, workflowId), 10, TimeUnit.SECONDS, 100, TimeUnit.MILLISECONDS);
// Verify the schedule profile metadata is updated to my profile
Tasks.waitFor(myProfile.getScopedName(), () -> getProfileProperty(mds, scheduleId), 10, TimeUnit.SECONDS, 100, TimeUnit.MILLISECONDS);
// set app level to use my profile 2
preferencesService.setProperties(appId, Collections.singletonMap(SystemArguments.PROFILE_NAME, "USER:MyProfile2"));
// set instance level to system profile
preferencesService.setProperties(Collections.singletonMap(SystemArguments.PROFILE_NAME, ProfileId.NATIVE.getScopedName()));
// Verify the workflow profile metadata is updated to MyProfile2 which is at app level
Tasks.waitFor(myProfile2.getScopedName(), () -> getProfileProperty(mds, workflowId), 10, TimeUnit.SECONDS, 100, TimeUnit.MILLISECONDS);
// Verify the schedule profile metadata is updated to MyProfile2 which is at app level
Tasks.waitFor(myProfile2.getScopedName(), () -> getProfileProperty(mds, scheduleId), 10, TimeUnit.SECONDS, 100, TimeUnit.MILLISECONDS);
// remove the preferences at instance level, should not affect the metadata
preferencesService.deleteProperties();
// Verify the workflow profile metadata is updated to default profile
Tasks.waitFor(myProfile2.getScopedName(), () -> getProfileProperty(mds, workflowId), 10, TimeUnit.SECONDS, 100, TimeUnit.MILLISECONDS);
// Verify the schedule profile metadata is updated to default profile
Tasks.waitFor(myProfile2.getScopedName(), () -> getProfileProperty(mds, scheduleId), 10, TimeUnit.SECONDS, 100, TimeUnit.MILLISECONDS);
// remove app level pref should let the programs/schedules use ns level pref
preferencesService.deleteProperties(appId);
// Verify the workflow profile metadata is updated to MyProfile
Tasks.waitFor(myProfile.getScopedName(), () -> getProfileProperty(mds, workflowId), 10, TimeUnit.SECONDS, 100, TimeUnit.MILLISECONDS);
// Verify the schedule profile metadata is updated to MyProfile
Tasks.waitFor(myProfile.getScopedName(), () -> getProfileProperty(mds, scheduleId), 10, TimeUnit.SECONDS, 100, TimeUnit.MILLISECONDS);
// remove ns level pref so no pref is there
preferencesService.deleteProperties(NamespaceId.DEFAULT);
// Verify the workflow profile metadata is updated to default profile
Tasks.waitFor(ProfileId.NATIVE.getScopedName(), () -> getProfileProperty(mds, workflowId), 10, TimeUnit.SECONDS, 100, TimeUnit.MILLISECONDS);
// Verify the schedule profile metadata is updated to default profile
Tasks.waitFor(ProfileId.NATIVE.getScopedName(), () -> getProfileProperty(mds, scheduleId), 10, TimeUnit.SECONDS, 100, TimeUnit.MILLISECONDS);
} finally {
// stop and clean up the store
preferencesService.deleteProperties(NamespaceId.DEFAULT);
preferencesService.deleteProperties();
preferencesService.deleteProperties(appId);
store.removeAll(NamespaceId.DEFAULT);
scheduleService.delete(scheduleId);
profileService.disableProfile(myProfile);
profileService.disableProfile(myProfile2);
profileService.deleteAllProfiles(myProfile.getNamespaceId());
mds.apply(new MetadataMutation.Drop(workflowId.toMetadataEntity()), MutationOptions.DEFAULT);
mds.apply(new MetadataMutation.Drop(scheduleId.toMetadataEntity()), MutationOptions.DEFAULT);
}
}
use of io.cdap.cdap.proto.id.ProgramId in project cdap by caskdata.
the class MetadataSubscriberServiceTest method testAppDeletionMessage.
@Test
public void testAppDeletionMessage() throws Exception {
Injector injector = getInjector();
// get the alert publisher
CConfiguration cConf = injector.getInstance(CConfiguration.class);
MessagingService messagingService = injector.getInstance(MessagingService.class);
MultiThreadMessagingContext messagingContext = new MultiThreadMessagingContext(messagingService);
AdminEventPublisher publisher = new AdminEventPublisher(cConf, messagingContext);
// get the mds and put some workflow metadata in that, the publish of app deletion message should get the metadata
// deleted
MetadataStorage mds = injector.getInstance(MetadataStorage.class);
// use an app with all program types to get all specification tested
ApplicationId appId = NamespaceId.DEFAULT.app(AllProgramsApp.NAME);
ProgramId workflowId = appId.workflow(AllProgramsApp.NoOpWorkflow.NAME);
// generate an app spec from the application
ApplicationSpecification appSpec = Specifications.from(new AllProgramsApp());
// need to put metadata on workflow since we currently only set or delete workflow metadata
mds.apply(new MetadataMutation.Update(workflowId.toMetadataEntity(), new Metadata(MetadataScope.SYSTEM, Collections.singletonMap("profile", ProfileId.NATIVE.getScopedName()))), MutationOptions.DEFAULT);
Assert.assertEquals(ProfileId.NATIVE.getScopedName(), getProfileProperty(mds, workflowId));
// publish app deletion message
publisher.publishAppDeletion(appId, appSpec);
// Verify the workflow profile metadata is removed because of the publish app deletion message
Tasks.waitFor(true, () -> mds.read(new Read(workflowId.toMetadataEntity())).isEmpty(), 10, TimeUnit.SECONDS, 100, TimeUnit.MILLISECONDS);
}
use of io.cdap.cdap.proto.id.ProgramId in project cdap by caskdata.
the class CoreSchedulerServiceTest method testRunScheduledJobs.
@Test
@Category(XSlowTests.class)
public void testRunScheduledJobs() throws Exception {
CConfiguration cConf = getInjector().getInstance(CConfiguration.class);
dataEventTopic = NamespaceId.SYSTEM.topic(cConf.get(Constants.Dataset.DATA_EVENT_TOPIC));
// Deploy the app with version
Id.Artifact appArtifactId = Id.Artifact.from(Id.Namespace.DEFAULT, "appwithschedules", VERSION1);
addAppArtifact(appArtifactId, AppWithFrequentScheduledWorkflows.class);
AppRequest<? extends Config> appRequest = new AppRequest<>(new ArtifactSummary(appArtifactId.getName(), appArtifactId.getVersion().getVersion()));
deploy(APP_ID, appRequest);
// Resume the schedule because schedules are initialized as paused
enableSchedule(AppWithFrequentScheduledWorkflows.TEN_SECOND_SCHEDULE_1);
enableSchedule(AppWithFrequentScheduledWorkflows.TEN_SECOND_SCHEDULE_2);
enableSchedule(AppWithFrequentScheduledWorkflows.DATASET_PARTITION_SCHEDULE_1);
enableSchedule(AppWithFrequentScheduledWorkflows.DATASET_PARTITION_SCHEDULE_2);
for (int i = 0; i < 5; i++) {
testNewPartition(i + 1);
}
// Enable COMPOSITE_SCHEDULE before publishing events to DATASET_NAME2
enableSchedule(AppWithFrequentScheduledWorkflows.COMPOSITE_SCHEDULE);
// disable the two partition schedules, send them notifications (but they should not trigger)
int runs1 = getRuns(WORKFLOW_1, ProgramRunStatus.ALL);
int runs2 = getRuns(WORKFLOW_2, ProgramRunStatus.ALL);
disableSchedule(AppWithFrequentScheduledWorkflows.DATASET_PARTITION_SCHEDULE_1);
// ensure schedule 2 is disabled after schedule 1
Thread.sleep(BUFFER);
long disableBeforeTime = System.currentTimeMillis();
disableSchedule(AppWithFrequentScheduledWorkflows.DATASET_PARTITION_SCHEDULE_2);
long disableAfterTime = System.currentTimeMillis() + 1;
publishNotification(dataEventTopic, NamespaceId.DEFAULT, AppWithFrequentScheduledWorkflows.DATASET_NAME1);
long minPublishTime = System.currentTimeMillis();
publishNotification(dataEventTopic, NamespaceId.DEFAULT, AppWithFrequentScheduledWorkflows.DATASET_NAME2);
// This would make sure the subscriber has processed the data event
waitUntilProcessed(dataEventTopic, minPublishTime);
// Both workflows must run at least once.
// If the testNewPartition() loop took longer than expected, it may be more (quartz fired multiple times)
Tasks.waitFor(true, () -> getRuns(SCHEDULED_WORKFLOW_1, ProgramRunStatus.COMPLETED) > 0 && getRuns(SCHEDULED_WORKFLOW_2, ProgramRunStatus.COMPLETED) > 0, 10, TimeUnit.SECONDS);
// There shouldn't be any partition trigger in the job queue
Assert.assertFalse(Iterables.any(getAllJobs(), job -> job.getSchedule().getTrigger() instanceof ProtoTrigger.PartitionTrigger));
ProgramId compositeWorkflow = APP_ID.workflow(AppWithFrequentScheduledWorkflows.COMPOSITE_WORKFLOW);
// Workflow scheduled with the composite trigger has never been started
Assert.assertEquals(0, getRuns(compositeWorkflow, ProgramRunStatus.ALL));
// Publish two more new partition notifications to satisfy the partition trigger in the composite trigger,
// and thus the whole composite trigger will be satisfied
publishNotification(dataEventTopic, NamespaceId.DEFAULT, AppWithFrequentScheduledWorkflows.DATASET_NAME2);
minPublishTime = System.currentTimeMillis();
publishNotification(dataEventTopic, NamespaceId.DEFAULT, AppWithFrequentScheduledWorkflows.DATASET_NAME2);
// This would make sure the subscriber has processed the data event
waitUntilProcessed(dataEventTopic, minPublishTime);
// Wait for 1 run to complete for compositeWorkflow
waitForCompleteRuns(1, compositeWorkflow);
for (RunRecordDetail runRecordMeta : store.getRuns(SCHEDULED_WORKFLOW_1, ProgramRunStatus.ALL, 0, Long.MAX_VALUE, Integer.MAX_VALUE).values()) {
Map<String, String> sysArgs = runRecordMeta.getSystemArgs();
Assert.assertNotNull(sysArgs);
TriggeringScheduleInfo scheduleInfo = GSON.fromJson(sysArgs.get(ProgramOptionConstants.TRIGGERING_SCHEDULE_INFO), TriggeringScheduleInfo.class);
Assert.assertEquals(AppWithFrequentScheduledWorkflows.TEN_SECOND_SCHEDULE_1, scheduleInfo.getName());
List<TriggerInfo> triggerInfos = scheduleInfo.getTriggerInfos();
// Only one notification is enough to satisfy Time Trigger
Assert.assertEquals(1, triggerInfos.size());
Assert.assertEquals(TriggerInfo.Type.TIME, triggerInfos.get(0).getType());
}
// Also verify that the two partition schedules did not trigger
Assert.assertEquals(runs1, getRuns(WORKFLOW_1, ProgramRunStatus.ALL));
Assert.assertEquals(runs2, getRuns(WORKFLOW_2, ProgramRunStatus.ALL));
// enable partition schedule 2 and test reEnableSchedules
scheduler.reEnableSchedules(NamespaceId.DEFAULT, disableBeforeTime, disableAfterTime);
Assert.assertEquals(ProgramScheduleStatus.SCHEDULED, scheduler.getScheduleStatus(APP_ID.schedule(AppWithFrequentScheduledWorkflows.DATASET_PARTITION_SCHEDULE_2)));
Assert.assertEquals(ProgramScheduleStatus.SUSPENDED, scheduler.getScheduleStatus(APP_ID.schedule(AppWithFrequentScheduledWorkflows.DATASET_PARTITION_SCHEDULE_1)));
testScheduleUpdate("disable");
testScheduleUpdate("update");
testScheduleUpdate("delete");
}
use of io.cdap.cdap.proto.id.ProgramId in project cdap by caskdata.
the class ScheduleFetcherTest method testListSchedules.
@Test
public void testListSchedules() throws Exception {
ScheduleFetcher fetcher = getScheduleFetcher(fetcherType);
String namespace = TEST_NAMESPACE1;
String appName = AppWithSchedule.NAME;
// Deploy the application with 2 schedules on the workflow
Config appConfig = new AppWithSchedule.AppConfig(true, true, true);
deploy(AppWithSchedule.class, 200, Constants.Gateway.API_VERSION_3_TOKEN, namespace, appConfig);
// Get and validate the schedule
ProgramId programId = new ProgramId(namespace, appName, ProgramType.WORKFLOW, AppWithSchedule.WORKFLOW_NAME);
List<ScheduleDetail> scheduleList = fetcher.list(programId);
Assert.assertEquals(2, scheduleList.size());
Assert.assertEquals(AppWithSchedule.SCHEDULE, scheduleList.get(0).getName());
Assert.assertEquals(AppWithSchedule.SCHEDULE_2, scheduleList.get(1).getName());
// Delete the application
Assert.assertEquals(200, doDelete(getVersionedAPIPath("apps/", Constants.Gateway.API_VERSION_3_TOKEN, namespace)).getResponseCode());
}
Aggregations