use of co.cask.cdap.proto.id.ProgramRunId in project cdap by caskdata.
the class ProgramLifecycleService method issueStop.
/**
* Issues a command to stop the specified {@link RunId} of the specified {@link ProgramId} and returns a
* {@link ListenableFuture} with the {@link ProgramController} for it.
* Clients can wait for completion of the {@link ListenableFuture}.
*
* @param programId the {@link ProgramId program} to issue a stop for
* @param runId the runId of the program run to stop. If null, all runs of the program as returned by
* {@link ProgramRuntimeService} are stopped.
* @return a list of {@link ListenableFuture} with a {@link ProgramController} that clients can wait on for stop
* to complete.
* @throws NotFoundException if the app, program or run was not found
* @throws BadRequestException if an attempt is made to stop a program that is either not running or
* was started by a workflow
* @throws UnauthorizedException if the user issuing the command is not authorized to stop the program. To stop a
* program, a user requires {@link Action#EXECUTE} permission on the program.
*/
public List<ListenableFuture<ProgramController>> issueStop(ProgramId programId, @Nullable String runId) throws Exception {
authorizationEnforcer.enforce(programId, authenticationContext.getPrincipal(), Action.EXECUTE);
List<ProgramRuntimeService.RuntimeInfo> runtimeInfos = findRuntimeInfo(programId, runId);
if (runtimeInfos.isEmpty()) {
if (!store.applicationExists(programId.getParent())) {
throw new ApplicationNotFoundException(programId.getParent());
} else if (!store.programExists(programId)) {
throw new ProgramNotFoundException(programId);
} else if (runId != null) {
ProgramRunId programRunId = programId.run(runId);
// Check if the program is running and is started by the Workflow
RunRecordMeta runRecord = store.getRun(programId, runId);
if (runRecord != null && runRecord.getProperties().containsKey("workflowrunid") && runRecord.getStatus().equals(ProgramRunStatus.RUNNING)) {
String workflowRunId = runRecord.getProperties().get("workflowrunid");
throw new BadRequestException(String.format("Cannot stop the program '%s' started by the Workflow " + "run '%s'. Please stop the Workflow.", programRunId, workflowRunId));
}
throw new NotFoundException(programRunId);
}
throw new BadRequestException(String.format("Program '%s' is not running.", programId));
}
List<ListenableFuture<ProgramController>> futures = new ArrayList<>();
for (ProgramRuntimeService.RuntimeInfo runtimeInfo : runtimeInfos) {
futures.add(runtimeInfo.getController().stop());
}
return futures;
}
use of co.cask.cdap.proto.id.ProgramRunId in project cdap by caskdata.
the class LineageAdminTest method testDirectCycleTwoRuns.
@Test
public void testDirectCycleTwoRuns() throws Exception {
// Lineage for:
//
// D1 -> P1 (run1)
//
// D1 <- P1 (run2)
//
LineageStore lineageStore = new LineageStore(getTxExecFactory(), getDatasetFramework(), NamespaceId.DEFAULT.dataset("testDirectCycleTwoRuns"));
Store store = getInjector().getInstance(Store.class);
MetadataStore metadataStore = getInjector().getInstance(MetadataStore.class);
LineageAdmin lineageAdmin = new LineageAdmin(lineageStore, store, metadataStore, new NoOpEntityExistenceVerifier());
// Add accesses
addRuns(store, run1, run2, run3, run4, run5);
// It is okay to use current time here since access time is ignore during assertions
lineageStore.addAccess(run1, dataset1, AccessType.READ, System.currentTimeMillis(), flowlet1);
// Write is in a different run
lineageStore.addAccess(new ProgramRunId(run1.getNamespace(), run1.getApplication(), run1.getParent().getType(), run1.getProgram(), run2.getEntityName()), dataset1, AccessType.WRITE, System.currentTimeMillis(), flowlet1);
Lineage expectedLineage = new Lineage(ImmutableSet.of(new Relation(dataset1, program1, AccessType.READ, twillRunId(run1), toSet(flowlet1)), new Relation(dataset1, program1, AccessType.WRITE, twillRunId(run2), toSet(flowlet1))));
Assert.assertEquals(expectedLineage, lineageAdmin.computeLineage(dataset1, 500, 20000, 100));
}
use of co.cask.cdap.proto.id.ProgramRunId in project cdap by caskdata.
the class ProgramLifecycleHttpHandler method getMapReduceInfo.
/**
* Relays job-level and task-level information about a particular MapReduce program run.
*/
@GET
@Path("/apps/{app-id}/mapreduce/{mapreduce-id}/runs/{run-id}/info")
public void getMapReduceInfo(HttpRequest request, HttpResponder responder, @PathParam("namespace-id") String namespaceId, @PathParam("app-id") String appId, @PathParam("mapreduce-id") String mapreduceId, @PathParam("run-id") String runId) throws IOException, NotFoundException {
ProgramId programId = new ProgramId(namespaceId, appId, ProgramType.MAPREDUCE, mapreduceId);
ProgramRunId run = programId.run(runId);
ApplicationSpecification appSpec = store.getApplication(programId.getParent());
if (appSpec == null) {
throw new NotFoundException(programId.getApplication());
}
if (!appSpec.getMapReduce().containsKey(mapreduceId)) {
throw new NotFoundException(programId);
}
RunRecordMeta runRecordMeta = store.getRun(programId, runId);
if (runRecordMeta == null) {
throw new NotFoundException(run);
}
MRJobInfo mrJobInfo = mrJobInfoFetcher.getMRJobInfo(run.toId());
mrJobInfo.setState(runRecordMeta.getStatus().name());
// Multiple startTs / endTs by 1000, to be consistent with Task-level start/stop times returned by JobClient
// in milliseconds. RunRecord returns seconds value.
mrJobInfo.setStartTime(TimeUnit.SECONDS.toMillis(runRecordMeta.getStartTs()));
Long stopTs = runRecordMeta.getStopTs();
if (stopTs != null) {
mrJobInfo.setStopTime(TimeUnit.SECONDS.toMillis(stopTs));
}
// JobClient (in DistributedMRJobInfoFetcher) can return NaN as some of the values, and GSON otherwise fails
Gson gson = new GsonBuilder().serializeSpecialFloatingPointValues().create();
responder.sendJson(HttpResponseStatus.OK, mrJobInfo, mrJobInfo.getClass(), gson);
}
use of co.cask.cdap.proto.id.ProgramRunId in project cdap by caskdata.
the class AppMetadataStore method getHistoricalRuns.
private Map<ProgramRunId, RunRecordMeta> getHistoricalRuns(@Nullable ProgramId programId, ProgramRunStatus status, final long startTime, final long endTime, int limit, @Nullable Predicate<RunRecordMeta> filter) {
if (programId == null || !programId.getVersion().equals(ApplicationId.DEFAULT_VERSION)) {
MDSKey key = getProgramKeyBuilder(TYPE_RUN_RECORD_COMPLETED, programId).build();
return getHistoricalRuns(key, status, startTime, endTime, limit, null, filter);
}
Predicate<MDSKey> keyPredicate = new AppVersionPredicate(ApplicationId.DEFAULT_VERSION);
MDSKey key = getProgramKeyBuilder(TYPE_RUN_RECORD_COMPLETED, programId).build();
Map<ProgramRunId, RunRecordMeta> newRecords = getHistoricalRuns(key, status, startTime, endTime, limit, keyPredicate, filter);
int remaining = limit - newRecords.size();
if (remaining > 0 && !upgradeComplete.get()) {
// We need to scan twice since the key is modified again in getHistoricalRuns since we want to use the
// endTime and startTime to reduce the scan range
key = getVersionLessProgramKeyBuilder(TYPE_RUN_RECORD_COMPLETED, programId).build();
Map<ProgramRunId, RunRecordMeta> oldRecords = getHistoricalRuns(key, status, startTime, endTime, remaining, keyPredicate, filter);
newRecords.putAll(oldRecords);
}
return newRecords;
}
use of co.cask.cdap.proto.id.ProgramRunId in project cdap by caskdata.
the class AppMetadataStoreTest method testgetRuns.
@Test
public void testgetRuns() throws Exception {
DatasetId storeTable = NamespaceId.DEFAULT.dataset("testgetRuns");
datasetFramework.addInstance(Table.class.getName(), storeTable, DatasetProperties.EMPTY);
Table table = datasetFramework.getDataset(storeTable, ImmutableMap.<String, String>of(), null);
Assert.assertNotNull(table);
final AppMetadataStore metadataStoreDataset = new AppMetadataStore(table, cConf, new AtomicBoolean(false));
TransactionExecutor txnl = txExecutorFactory.createExecutor(Collections.singleton((TransactionAware) metadataStoreDataset));
// Add some run records
final Set<String> expected = new TreeSet<>();
final Set<String> expectedHalf = new TreeSet<>();
final Set<ProgramRunId> programRunIdSet = new HashSet<>();
final Set<ProgramRunId> programRunIdSetHalf = new HashSet<>();
for (int i = 0; i < 100; ++i) {
ApplicationId application = NamespaceId.DEFAULT.app("app");
final ProgramId program = application.program(ProgramType.FLOW, "program");
final RunId runId = RunIds.generate((i + 1) * 10000);
expected.add(runId.toString());
final int index = i;
// Add every other runId
if ((i % 2) == 0) {
expectedHalf.add(runId.toString());
}
ProgramRunId programRunId = new ProgramRunId(program.getNamespace(), program.getApplication(), program.getType(), program.getProgram(), runId.toString());
programRunIdSet.add(programRunId);
//Add every other programRunId
if ((i % 2) == 0) {
programRunIdSetHalf.add(programRunId);
}
txnl.execute(new TransactionExecutor.Subroutine() {
@Override
public void apply() throws Exception {
// Start the program and stop it
metadataStoreDataset.recordProgramStart(program, runId.getId(), RunIds.getTime(runId, TimeUnit.SECONDS), null, null, null);
metadataStoreDataset.recordProgramStop(program, runId.getId(), RunIds.getTime(runId, TimeUnit.SECONDS), ProgramRunStatus.values()[index % ProgramRunStatus.values().length], null);
}
});
}
txnl.execute(new TransactionExecutor.Subroutine() {
@Override
public void apply() throws Exception {
Map<ProgramRunId, RunRecordMeta> runMap = metadataStoreDataset.getRuns(programRunIdSet);
Set<String> actual = new TreeSet<>();
for (Map.Entry<ProgramRunId, RunRecordMeta> entry : runMap.entrySet()) {
actual.add(entry.getValue().getPid());
}
Assert.assertEquals(expected, actual);
Map<ProgramRunId, RunRecordMeta> runMapHalf = metadataStoreDataset.getRuns(programRunIdSetHalf);
Set<String> actualHalf = new TreeSet<>();
for (Map.Entry<ProgramRunId, RunRecordMeta> entry : runMapHalf.entrySet()) {
actualHalf.add(entry.getValue().getPid());
}
Assert.assertEquals(expectedHalf, actualHalf);
}
});
}
Aggregations