use of io.cdap.cdap.proto.Notification in project cdap by cdapio.
the class MapReduceRunnerTestBase method getDataNotifications.
/**
* Returns a list of {@link Notification} object fetched from the data event topic in TMS that was published
* starting from the given time.
*/
protected List<Notification> getDataNotifications(long startTime) throws Exception {
// Get data notifications from TMS
List<Notification> notifications = new ArrayList<>();
MessagingContext messagingContext = new MultiThreadMessagingContext(injector.getInstance(MessagingService.class));
try (CloseableIterator<Message> messages = messagingContext.getMessageFetcher().fetch(NamespaceId.SYSTEM.getNamespace(), injector.getInstance(CConfiguration.class).get(Constants.Dataset.DATA_EVENT_TOPIC), 10, startTime)) {
while (messages.hasNext()) {
notifications.add(GSON.fromJson(new String(messages.next().getPayload(), StandardCharsets.UTF_8), Notification.class));
}
}
return notifications;
}
use of io.cdap.cdap.proto.Notification in project cdap by caskdata.
the class JobQueueTable method addNotification.
@Override
public void addNotification(ProgramScheduleRecord record, Notification notification) throws IOException {
boolean jobExists = false;
ProgramSchedule schedule = record.getSchedule();
// Only add notifications for enabled schedules
if (record.getMeta().getStatus() != ProgramScheduleStatus.SCHEDULED) {
return;
}
int nextGenerationId = 0;
try (CloseableIterator<Job> jobs = getJobsForSchedule(schedule.getScheduleId())) {
while (jobs.hasNext()) {
Job job = jobs.next();
if (job.getGenerationId() >= nextGenerationId) {
nextGenerationId = job.getGenerationId() + 1;
}
if (job.getState() == Job.State.PENDING_TRIGGER) {
// ConstraintCheckerService
if (job.isToBeDeleted()) {
// ignore, it will be deleted by ConstraintCheckerService
continue;
}
long scheduleLastUpdated = record.getMeta().getLastUpdated();
if (job.getScheduleLastUpdatedTime() != scheduleLastUpdated) {
// schedule has changed: this job is obsolete
writeJobObsolete(job, System.currentTimeMillis());
} else if (System.currentTimeMillis() - job.getCreationTime() > job.getSchedule().getTimeoutMillis()) {
// job has timed out; mark it obsolete
writeJobObsolete(job, System.currentTimeMillis());
} else {
jobExists = true;
addNotification(job, notification);
break;
}
}
}
}
// if no job exists for the scheduleId, add a new job with the first notification
if (!jobExists) {
List<Notification> notifications = Collections.singletonList(notification);
Job.State jobState = isTriggerSatisfied(schedule, notifications) ? Job.State.PENDING_CONSTRAINT : Job.State.PENDING_TRIGGER;
writeJob(new SimpleJob(schedule, nextGenerationId, System.currentTimeMillis(), notifications, jobState, record.getMeta().getLastUpdated()));
}
}
use of io.cdap.cdap.proto.Notification in project cdap by caskdata.
the class DynamicPartitionerWithAvroTest method runDynamicPartitionerMR.
private void runDynamicPartitionerMR(final List<? extends GenericRecord> records, boolean allowConcurrentWriters, final boolean precreatePartitions, @Nullable final DynamicPartitioner.PartitionWriteOption partitionWriteOption, boolean expectedStatus) throws Exception {
ApplicationWithPrograms app = deployApp(AppWithMapReduceUsingAvroDynamicPartitioner.class);
final long now = System.currentTimeMillis();
final Multimap<PartitionKey, GenericRecord> keyToRecordsMap = groupByPartitionKey(records, now);
// write values to the input kvTable
final KeyValueTable kvTable = datasetCache.getDataset(INPUT_DATASET);
Transactions.createTransactionExecutor(txExecutorFactory, kvTable).execute(new TransactionExecutor.Subroutine() {
@Override
public void apply() {
// the keys are not used; it matters that they're unique though
for (int i = 0; i < records.size(); i++) {
kvTable.write(Integer.toString(i), records.get(i).toString());
}
}
});
final PartitionedFileSet pfs = datasetCache.getDataset(OUTPUT_DATASET);
if (precreatePartitions) {
Transactions.createTransactionExecutor(txExecutorFactory, (TransactionAware) pfs).execute(new TransactionExecutor.Subroutine() {
@Override
public void apply() throws IOException {
writeFile(pfs, createKey(now, 95111));
writeFile(pfs, createKey(now, 98123));
writeFile(pfs, createKey(now, 84125));
}
});
}
String allowConcurrencyKey = "dataset." + OUTPUT_DATASET + "." + PartitionedFileSetArguments.DYNAMIC_PARTITIONER_ALLOW_CONCURRENCY;
// run the partition writer m/r with this output partition time
Map<String, String> arguments = new HashMap<>();
arguments.put(OUTPUT_PARTITION_KEY, Long.toString(now));
arguments.put(allowConcurrencyKey, Boolean.toString(allowConcurrentWriters));
if (partitionWriteOption != null) {
arguments.put("partitionWriteOption", partitionWriteOption.name());
}
long startTime = System.currentTimeMillis();
boolean status = runProgram(app, AppWithMapReduceUsingAvroDynamicPartitioner.DynamicPartitioningMapReduce.class, new BasicArguments(arguments));
Assert.assertEquals(expectedStatus, status);
if (!expectedStatus) {
// if we expect the program to fail, no need to check the output data for expected results
return;
}
// Verify notifications
List<Notification> notifications = getDataNotifications(startTime);
Assert.assertEquals(1, notifications.size());
Assert.assertEquals(NamespaceId.DEFAULT.dataset(OUTPUT_DATASET), DatasetId.fromString(notifications.get(0).getProperties().get("datasetId")));
// this should have created a partition in the pfs
final Location pfsBaseLocation = pfs.getEmbeddedFileSet().getBaseLocation();
Transactions.createTransactionExecutor(txExecutorFactory, (TransactionAware) pfs).execute(new TransactionExecutor.Subroutine() {
@Override
public void apply() throws IOException {
Map<PartitionKey, PartitionDetail> partitions = new HashMap<>();
for (PartitionDetail partition : pfs.getPartitions(null)) {
partitions.put(partition.getPartitionKey(), partition);
// check that the mapreduce wrote the output partition metadata to all the output partitions
Assert.assertEquals(getExpectedMetadata(precreatePartitions, partitionWriteOption), partition.getMetadata().asMap());
// if files were precreated, and the option is to append, expect the empty file to exist
// if partition write option is configured to overwrite, then the file is expected to not exist
Location preexistingFile = partition.getLocation().append("file");
if (precreatePartitions && partitionWriteOption == DynamicPartitioner.PartitionWriteOption.CREATE_OR_APPEND) {
Assert.assertTrue(preexistingFile.exists());
try (InputStream inputStream = preexistingFile.getInputStream()) {
Assert.assertEquals(-1, inputStream.read());
}
} else {
Assert.assertFalse(preexistingFile.exists());
}
}
Assert.assertEquals(3, partitions.size());
Assert.assertEquals(keyToRecordsMap.keySet(), partitions.keySet());
// Check relative paths of the partitions. Also check that their location = pfs baseLocation + relativePath
for (Map.Entry<PartitionKey, PartitionDetail> partitionKeyEntry : partitions.entrySet()) {
PartitionDetail partitionDetail = partitionKeyEntry.getValue();
String relativePath = partitionDetail.getRelativePath();
int zip = (int) partitionKeyEntry.getKey().getField("zip");
Assert.assertEquals(Long.toString(now) + Path.SEPARATOR + zip, relativePath);
Assert.assertEquals(pfsBaseLocation.append(relativePath), partitionDetail.getLocation());
}
for (Map.Entry<PartitionKey, Collection<GenericRecord>> keyToRecordsEntry : keyToRecordsMap.asMap().entrySet()) {
Set<GenericRecord> genericRecords = new HashSet<>(keyToRecordsEntry.getValue());
Assert.assertEquals(genericRecords, readOutput(partitions.get(keyToRecordsEntry.getKey()).getLocation()));
}
}
});
}
use of io.cdap.cdap.proto.Notification in project cdap by caskdata.
the class TimeTrigger method updateLaunchArguments.
@Override
public void updateLaunchArguments(ProgramSchedule schedule, List<Notification> notifications, Map<String, String> systemArgs, Map<String, String> userArgs) {
for (Notification notification : notifications) {
if (!isSatisfied(schedule, notification)) {
continue;
}
String systemOverridesJson = notification.getProperties().get(ProgramOptionConstants.SYSTEM_OVERRIDES);
String userOverridesJson = notification.getProperties().get(ProgramOptionConstants.USER_OVERRIDES);
if (userOverridesJson == null || systemOverridesJson == null) {
// Ignore the malformed notification
continue;
}
systemArgs.putAll(GSON.<Map<String, String>>fromJson(systemOverridesJson, STRING_STRING_MAP));
userArgs.putAll(GSON.<Map<String, String>>fromJson(userOverridesJson, STRING_STRING_MAP));
return;
}
}
use of io.cdap.cdap.proto.Notification in project cdap by caskdata.
the class ProgramNotificationSubscriberService method processWorkflowOnStop.
/**
* On workflow program stop, inspects inner program states and adjust them if they are not in end state already.
*
* @param appMetadataStore the {@link AppMetadataStore} to write the status to
* @param programHeartbeatTable the {@link ProgramHeartbeatTable} to write the status to
* @param programRunId the program run of the completed program
* @param programRunStatus the status of the completion
* @param notification the {@link Notification} that carries information about the workflow completion
* @param sourceId the source message id of the notification
* @param runnables a {@link List} adding {@link Runnable} to be executed after event handling is completed
* @throws Exception if failed to update program status
*/
private void processWorkflowOnStop(AppMetadataStore appMetadataStore, ProgramHeartbeatTable programHeartbeatTable, ProgramRunId programRunId, ProgramRunStatus programRunStatus, Notification notification, byte[] sourceId, List<Runnable> runnables) throws Exception {
ApplicationId appId = programRunId.getParent().getParent();
WorkflowSpecification workflowSpec = Optional.ofNullable(appMetadataStore.getApplication(appId)).map(appMeta -> appMeta.getSpec().getWorkflows().get(programRunId.getProgram())).orElse(null);
// If cannot find the workflow spec (e.g. app deleted), then there is nothing we can do.
if (workflowSpec == null) {
return;
}
// For all MR and Spark nodes, we need to update the inner program run status if they are not in end state yet.
for (WorkflowNode workflowNode : workflowSpec.getNodeIdMap().values()) {
if (!(workflowNode instanceof WorkflowActionNode)) {
continue;
}
ScheduleProgramInfo programInfo = ((WorkflowActionNode) workflowNode).getProgram();
if (!WORKFLOW_INNER_PROGRAM_TYPES.containsKey(programInfo.getProgramType())) {
continue;
}
// Get all active runs of the inner program. If the parent workflow runId is the same as this one,
// set a terminal state for the inner program run.
ProgramId innerProgramId = appId.program(WORKFLOW_INNER_PROGRAM_TYPES.get(programInfo.getProgramType()), programInfo.getProgramName());
Map<ProgramRunId, Notification> innerProgramNotifications = new LinkedHashMap<>();
appMetadataStore.scanActiveRuns(innerProgramId, runRecord -> {
Map<String, String> systemArgs = runRecord.getSystemArgs();
String workflowName = systemArgs.get(ProgramOptionConstants.WORKFLOW_NAME);
String workflowRun = systemArgs.get(ProgramOptionConstants.WORKFLOW_RUN_ID);
if (workflowName == null || workflowRun == null) {
return;
}
ProgramRunId workflowRunId = appId.program(ProgramType.WORKFLOW, workflowName).run(workflowRun);
if (!programRunId.equals(workflowRunId)) {
return;
}
Map<String, String> notificationProps = new HashMap<>(notification.getProperties());
notificationProps.put(ProgramOptionConstants.PROGRAM_RUN_ID, GSON.toJson(runRecord.getProgramRunId()));
innerProgramNotifications.put(runRecord.getProgramRunId(), new Notification(Notification.Type.PROGRAM_STATUS, notificationProps));
});
for (Map.Entry<ProgramRunId, Notification> entry : innerProgramNotifications.entrySet()) {
handleProgramEvent(entry.getKey(), programRunStatus, entry.getValue(), sourceId, appMetadataStore, programHeartbeatTable, runnables);
}
}
}
Aggregations