use of io.cdap.cdap.proto.id.ProgramRunId in project cdap by caskdata.
the class DirectRuntimeRequestValidatorTest method testInvalid.
@Test(expected = BadRequestException.class)
public void testInvalid() throws BadRequestException {
ProgramRunId programRunId = NamespaceId.DEFAULT.app("app").spark("spark").run(RunIds.generate());
// Validation should fail
RuntimeRequestValidator validator = new DirectRuntimeRequestValidator(cConf, txRunner, new MockProgramRunRecordFetcher(), accessEnforcer, authenticationContext);
validator.getProgramRunStatus(programRunId, new DefaultHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, "/"));
}
use of io.cdap.cdap.proto.id.ProgramRunId in project cdap by caskdata.
the class DirectRuntimeRequestValidatorTest method testValidProgramInStoppingState.
@Test
public void testValidProgramInStoppingState() throws BadRequestException {
ProgramRunId programRunId = NamespaceId.DEFAULT.app("app").spark("spark").run(RunIds.generate());
// Insert the run
TransactionRunners.run(txRunner, context -> {
AppMetadataStore store = AppMetadataStore.create(context);
store.recordProgramProvisioning(programRunId, Collections.emptyMap(), Collections.singletonMap(SystemArguments.PROFILE_NAME, "system:default"), createSourceId(1), ARTIFACT_ID);
store.recordProgramProvisioned(programRunId, 1, createSourceId(2));
store.recordProgramStart(programRunId, null, Collections.emptyMap(), createSourceId(3));
store.recordProgramRunning(programRunId, System.currentTimeMillis(), null, createSourceId(3));
store.recordProgramStopping(programRunId, createSourceId(3), System.currentTimeMillis(), System.currentTimeMillis() + 1000);
});
// Validation should pass
RuntimeRequestValidator validator = new DirectRuntimeRequestValidator(cConf, txRunner, new MockProgramRunRecordFetcher(), accessEnforcer, authenticationContext);
ProgramRunInfo programRunInfo = validator.getProgramRunStatus(programRunId, new DefaultHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, "/"));
// After recording the program start in AppMetaDataStore the expected state is Stopping
Assert.assertEquals(ProgramRunStatus.STOPPING, programRunInfo.getProgramRunStatus());
Assert.assertNotNull("Payload isn't null when program status is Stopping", programRunInfo.getPayload());
}
use of io.cdap.cdap.proto.id.ProgramRunId in project cdap by caskdata.
the class RuntimeClientServiceTest method beforeTest.
@Before
public void beforeTest() throws Exception {
CConfiguration cConf = CConfiguration.create();
cConf.set(Constants.CFG_LOCAL_DATA_DIR, TEMP_FOLDER.newFolder().getAbsolutePath());
cConf.set(Constants.RuntimeMonitor.TOPICS_CONFIGS, TOPIC_CONFIGS_VALUE);
topicConfigs = RuntimeMonitors.createTopicConfigs(cConf);
InMemoryDiscoveryService discoveryService = new InMemoryDiscoveryService();
// Injector for the server side
Injector injector = Guice.createInjector(new ConfigModule(cConf), new LocalLocationModule(), new MessagingServerRuntimeModule().getInMemoryModules(), new AuthenticationContextModules().getNoOpModule(), new RuntimeServerModule() {
@Override
protected void bindRequestValidator() {
bind(RuntimeRequestValidator.class).toInstance((programRunId, request) -> new ProgramRunInfo(ProgramRunStatus.COMPLETED, null));
}
@Override
protected void bindLogProcessor() {
bind(RemoteExecutionLogProcessor.class).toInstance(payloads -> {
});
}
}, new AbstractModule() {
@Override
protected void configure() {
bind(MetricsCollectionService.class).to(NoOpMetricsCollectionService.class);
bind(DiscoveryService.class).toInstance(discoveryService);
bind(DiscoveryServiceClient.class).toInstance(discoveryService);
}
});
messagingService = injector.getInstance(MessagingService.class);
if (messagingService instanceof Service) {
((Service) messagingService).startAndWait();
}
runtimeServer = injector.getInstance(RuntimeServer.class);
runtimeServer.startAndWait();
// Injector for the client side
clientCConf = CConfiguration.create();
clientCConf.set(Constants.CFG_LOCAL_DATA_DIR, TEMP_FOLDER.newFolder().getAbsolutePath());
clientCConf.set(Constants.RuntimeMonitor.TOPICS_CONFIGS, TOPIC_CONFIGS_VALUE);
// Shorten the poll delay and grace period to speed up testing of program terminate state handling
clientCConf.setLong(Constants.RuntimeMonitor.POLL_TIME_MS, 200);
clientCConf.setLong(Constants.RuntimeMonitor.GRACEFUL_SHUTDOWN_MS, 3000);
// Use smaller batch size so that fetches is broken into multiple fetches
clientCConf.setInt(Constants.RuntimeMonitor.BATCH_SIZE, 1);
injector = Guice.createInjector(new ConfigModule(clientCConf), RemoteAuthenticatorModules.getNoOpModule(), new MessagingServerRuntimeModule().getInMemoryModules(), new AuthenticationContextModules().getNoOpModule(), new AbstractModule() {
@Override
protected void configure() {
bind(MetricsCollectionService.class).to(NoOpMetricsCollectionService.class);
bind(DiscoveryService.class).toInstance(discoveryService);
bind(DiscoveryServiceClient.class).toInstance(discoveryService);
bind(ProgramRunId.class).toInstance(PROGRAM_RUN_ID);
}
});
clientMessagingService = injector.getInstance(MessagingService.class);
if (clientMessagingService instanceof Service) {
((Service) clientMessagingService).startAndWait();
}
runtimeClientService = injector.getInstance(RuntimeClientService.class);
runtimeClientService.startAndWait();
}
use of io.cdap.cdap.proto.id.ProgramRunId in project cdap by caskdata.
the class ConcurrencyConstraintTest method testMaxConcurrentRuns.
@Test
public void testMaxConcurrentRuns() {
Store store = AppFabricTestHelper.getInjector().getInstance(Store.class);
try {
long now = System.currentTimeMillis();
ProgramSchedule schedule = new ProgramSchedule("SCHED1", "one partition schedule", WORKFLOW_ID, ImmutableMap.of("prop3", "abc"), new PartitionTrigger(DATASET_ID, 1), ImmutableList.of());
SimpleJob job = new SimpleJob(schedule, 0, now, Collections.emptyList(), Job.State.PENDING_TRIGGER, 0L);
ConcurrencyConstraint concurrencyConstraint = new ConcurrencyConstraint(2);
ConstraintContext constraintContext = new ConstraintContext(job, now, store);
assertSatisfied(true, concurrencyConstraint.check(schedule, constraintContext));
ProgramRunId pid1 = WORKFLOW_ID.run(RunIds.generate().getId());
ProgramRunId pid2 = WORKFLOW_ID.run(RunIds.generate().getId());
ProgramRunId pid3 = WORKFLOW_ID.run(RunIds.generate().getId());
// add a run for the schedule
Map<String, String> systemArgs = ImmutableMap.of(ProgramOptionConstants.SCHEDULE_NAME, schedule.getName());
setStartAndRunning(store, pid1, EMPTY_MAP, systemArgs);
assertSatisfied(true, concurrencyConstraint.check(schedule, constraintContext));
// add a run for the program from a different schedule. Since there are now 2 running instances of the
// workflow (regardless of the schedule name), the constraint is not met
systemArgs = ImmutableMap.of(ProgramOptionConstants.SCHEDULE_NAME, "not" + schedule.getName());
setStartAndRunning(store, pid2, EMPTY_MAP, systemArgs);
assertSatisfied(false, concurrencyConstraint.check(schedule, constraintContext));
// add a run for the program that wasn't from a schedule
// there are now three concurrent runs, so the constraint will not be met
setStartAndRunning(store, pid3);
assertSatisfied(false, concurrencyConstraint.check(schedule, constraintContext));
// stop the first program; constraint will not be satisfied as there are still 2 running
store.setStop(pid1, System.currentTimeMillis(), ProgramRunStatus.COMPLETED, AppFabricTestHelper.createSourceId(++sourceId));
assertSatisfied(false, concurrencyConstraint.check(schedule, constraintContext));
// suspending/resuming the workflow doesn't reduce its concurrency count
store.setSuspend(pid3, AppFabricTestHelper.createSourceId(++sourceId), -1);
assertSatisfied(false, concurrencyConstraint.check(schedule, constraintContext));
store.setResume(pid3, AppFabricTestHelper.createSourceId(++sourceId), -1);
assertSatisfied(false, concurrencyConstraint.check(schedule, constraintContext));
// but the constraint will be satisfied with it completes, as there is only 1 remaining RUNNING
store.setStop(pid3, System.currentTimeMillis(), ProgramRunStatus.KILLED, AppFabricTestHelper.createSourceId(++sourceId));
assertSatisfied(true, concurrencyConstraint.check(schedule, constraintContext));
// add a run in provisioning state, constraint will not be satisfied since active runs increased to 2
ProgramRunId pid4 = WORKFLOW_ID.run(RunIds.generate().getId());
setProvisioning(store, pid4, Collections.emptyMap(), Collections.emptyMap());
assertSatisfied(false, concurrencyConstraint.check(schedule, constraintContext));
// stop the provisioning run, constraint will be satisfied since active runs decreased to 1
store.setStop(pid4, System.currentTimeMillis(), ProgramRunStatus.FAILED, AppFabricTestHelper.createSourceId(++sourceId));
assertSatisfied(true, concurrencyConstraint.check(schedule, constraintContext));
// stopping the last running workflow will also satisfy the constraint
store.setStop(pid2, System.currentTimeMillis(), ProgramRunStatus.FAILED, AppFabricTestHelper.createSourceId(++sourceId));
assertSatisfied(true, concurrencyConstraint.check(schedule, constraintContext));
} finally {
AppFabricTestHelper.shutdown();
}
}
use of io.cdap.cdap.proto.id.ProgramRunId in project cdap by caskdata.
the class DefaultRuntimeJobTest method testInjector.
@Test
public void testInjector() throws Exception {
CConfiguration cConf = CConfiguration.create();
cConf.set(Constants.CFG_LOCAL_DATA_DIR, TEMP_FOLDER.newFolder().toString());
LocationFactory locationFactory = new LocalLocationFactory(TEMP_FOLDER.newFile());
DefaultRuntimeJob defaultRuntimeJob = new DefaultRuntimeJob();
Arguments systemArgs = new BasicArguments(Collections.singletonMap(SystemArguments.PROFILE_NAME, "test"));
Node node = new Node("test", Node.Type.MASTER, "127.0.0.1", System.currentTimeMillis(), Collections.emptyMap());
Cluster cluster = new Cluster("test", ClusterStatus.RUNNING, Collections.singleton(node), Collections.emptyMap());
ProgramRunId programRunId = NamespaceId.DEFAULT.app("app").workflow("workflow").run(RunIds.generate());
SimpleProgramOptions programOpts = new SimpleProgramOptions(programRunId.getParent(), systemArgs, new BasicArguments());
Injector injector = Guice.createInjector(defaultRuntimeJob.createModules(new RuntimeJobEnvironment() {
@Override
public LocationFactory getLocationFactory() {
return locationFactory;
}
@Override
public TwillRunner getTwillRunner() {
return new NoopTwillRunnerService();
}
@Override
public Map<String, String> getProperties() {
return Collections.emptyMap();
}
}, cConf, programRunId, programOpts));
injector.getInstance(LogAppenderInitializer.class);
defaultRuntimeJob.createCoreServices(injector, systemArgs, cluster);
}
Aggregations