use of io.cdap.cdap.data2.dataset2.lib.table.Update in project cdap by caskdata.
the class ProgramScheduleStoreDatasetTest method testDeleteScheduleByTriggeringProgram.
@Test
public void testDeleteScheduleByTriggeringProgram() throws Exception {
DatasetFramework dsFramework = getInjector().getInstance(DatasetFramework.class);
TransactionSystemClient txClient = getInjector().getInstance(TransactionSystemClient.class);
TransactionExecutorFactory txExecutorFactory = new DynamicTransactionExecutorFactory(txClient);
dsFramework.truncateInstance(Schedulers.STORE_DATASET_ID);
final ProgramScheduleStoreDataset store = dsFramework.getDataset(Schedulers.STORE_DATASET_ID, new HashMap<String, String>(), null);
Assert.assertNotNull(store);
TransactionExecutor txExecutor = txExecutorFactory.createExecutor(Collections.singleton((TransactionAware) store));
SatisfiableTrigger prog1Trigger = new ProgramStatusTrigger(PROG1_ID, ProgramStatus.COMPLETED, ProgramStatus.FAILED, ProgramStatus.KILLED);
SatisfiableTrigger prog2Trigger = new ProgramStatusTrigger(PROG2_ID, ProgramStatus.COMPLETED, ProgramStatus.FAILED, ProgramStatus.KILLED);
final ProgramSchedule sched1 = new ProgramSchedule("sched1", "a program status trigger", PROG3_ID, ImmutableMap.of("propper", "popper"), prog1Trigger, ImmutableList.<Constraint>of());
final ProgramSchedule sched2 = new ProgramSchedule("sched2", "a program status trigger", PROG3_ID, ImmutableMap.of("propper", "popper"), prog2Trigger, ImmutableList.<Constraint>of());
final ProgramSchedule schedOr = new ProgramSchedule("schedOr", "an OR trigger", PROG3_ID, ImmutableMap.of("propper", "popper"), new OrTrigger(new PartitionTrigger(DS1_ID, 1), prog1Trigger, new AndTrigger(new OrTrigger(prog1Trigger, prog2Trigger), new PartitionTrigger(DS2_ID, 1)), new OrTrigger(prog2Trigger)), ImmutableList.<Constraint>of());
final ProgramSchedule schedAnd = new ProgramSchedule("schedAnd", "an AND trigger", PROG3_ID, ImmutableMap.of("propper", "popper"), new AndTrigger(new PartitionTrigger(DS1_ID, 1), prog2Trigger, new AndTrigger(prog1Trigger, new PartitionTrigger(DS2_ID, 1))), ImmutableList.<Constraint>of());
txExecutor.execute(new TransactionExecutor.Subroutine() {
@Override
public void apply() throws Exception {
store.addSchedules(ImmutableList.of(sched1, sched2, schedOr, schedAnd));
}
});
txExecutor.execute(new TransactionExecutor.Subroutine() {
@Override
public void apply() throws Exception {
// ProgramStatus event for PROG1_ID should trigger only sched1, schedOr, schedAnd
Assert.assertEquals(ImmutableSet.of(sched1, schedOr, schedAnd), toScheduleSet(store.findSchedules(Schedulers.triggerKeyForProgramStatus(PROG1_ID, ProgramStatus.COMPLETED))));
// ProgramStatus event for PROG2_ID should trigger only sched2, schedOr, schedAnd
Assert.assertEquals(ImmutableSet.of(sched2, schedOr, schedAnd), toScheduleSet(store.findSchedules(Schedulers.triggerKeyForProgramStatus(PROG2_ID, ProgramStatus.FAILED))));
}
});
// update or delete all schedules triggered by PROG1_ID
txExecutor.execute(new TransactionExecutor.Subroutine() {
@Override
public void apply() throws Exception {
store.modifySchedulesTriggeredByDeletedProgram(PROG1_ID);
}
});
final ProgramSchedule schedOrNew = new ProgramSchedule("schedOr", "an OR trigger", PROG3_ID, ImmutableMap.of("propper", "popper"), new OrTrigger(new PartitionTrigger(DS1_ID, 1), new AndTrigger(prog2Trigger, new PartitionTrigger(DS2_ID, 1)), prog2Trigger), ImmutableList.of());
txExecutor.execute(new TransactionExecutor.Subroutine() {
@Override
public void apply() throws Exception {
// ProgramStatus event for PROG1_ID should trigger no schedules after modifying schedules triggered by it
Assert.assertEquals(Collections.emptySet(), toScheduleSet(store.findSchedules(Schedulers.triggerKeyForProgramStatus(PROG1_ID, ProgramStatus.COMPLETED))));
Assert.assertEquals(Collections.emptySet(), toScheduleSet(store.findSchedules(Schedulers.triggerKeyForProgramStatus(PROG1_ID, ProgramStatus.FAILED))));
Assert.assertEquals(Collections.emptySet(), toScheduleSet(store.findSchedules(Schedulers.triggerKeyForProgramStatus(PROG1_ID, ProgramStatus.KILLED))));
// ProgramStatus event for PROG2_ID should trigger only sched2 and schedOrNew
Assert.assertEquals(ImmutableSet.of(sched2, schedOrNew), toScheduleSet(store.findSchedules(Schedulers.triggerKeyForProgramStatus(PROG2_ID, ProgramStatus.FAILED))));
}
});
// update or delete all schedules triggered by PROG2_ID
txExecutor.execute(new TransactionExecutor.Subroutine() {
@Override
public void apply() throws Exception {
store.modifySchedulesTriggeredByDeletedProgram(PROG2_ID);
}
});
final ProgramSchedule schedOrNew1 = new ProgramSchedule("schedOr", "an OR trigger", PROG3_ID, ImmutableMap.of("propper", "popper"), new PartitionTrigger(DS1_ID, 1), ImmutableList.of());
final Set<ProgramSchedule> ds1Schedules = new HashSet<>();
txExecutor.execute(new TransactionExecutor.Subroutine() {
@Override
public void apply() throws Exception {
// ProgramStatus event for PROG2_ID should trigger no schedules after modifying schedules triggered by it
Assert.assertEquals(Collections.emptySet(), toScheduleSet(store.findSchedules(Schedulers.triggerKeyForProgramStatus(PROG2_ID, ProgramStatus.COMPLETED))));
Assert.assertEquals(Collections.emptySet(), toScheduleSet(store.findSchedules(Schedulers.triggerKeyForProgramStatus(PROG2_ID, ProgramStatus.FAILED))));
Assert.assertEquals(Collections.emptySet(), toScheduleSet(store.findSchedules(Schedulers.triggerKeyForProgramStatus(PROG2_ID, ProgramStatus.KILLED))));
// event for DS1 should trigger only schedOrNew1 since all other schedules are deleted
ds1Schedules.addAll(toScheduleSet(store.findSchedules(Schedulers.triggerKeyForPartition(DS1_ID))));
}
});
Assert.assertEquals(ImmutableSet.of(schedOrNew1), ds1Schedules);
}
use of io.cdap.cdap.data2.dataset2.lib.table.Update in project cdap by caskdata.
the class AppMetadataStore method addWorkflowNodeState.
private void addWorkflowNodeState(ProgramRunId programRunId, Map<String, String> systemArgs, ProgramRunStatus status, @Nullable BasicThrowable failureCause, byte[] sourceId) {
String workflowNodeId = systemArgs.get(ProgramOptionConstants.WORKFLOW_NODE_ID);
String workflowName = systemArgs.get(ProgramOptionConstants.WORKFLOW_NAME);
String workflowRun = systemArgs.get(ProgramOptionConstants.WORKFLOW_RUN_ID);
ApplicationId appId = programRunId.getParent().getParent();
ProgramRunId workflowRunId = appId.workflow(workflowName).run(workflowRun);
// Node states will be stored with following key:
// workflowNodeState.namespace.app.WORKFLOW.workflowName.workflowRun.workflowNodeId
MDSKey key = getProgramKeyBuilder(TYPE_WORKFLOW_NODE_STATE, workflowRunId).add(workflowNodeId).build();
WorkflowNodeStateDetail nodeStateDetail = new WorkflowNodeStateDetail(workflowNodeId, ProgramRunStatus.toNodeStatus(status), programRunId.getRun(), failureCause);
write(key, nodeStateDetail);
// Get the run record of the Workflow which started this program
key = getProgramKeyBuilder(TYPE_RUN_RECORD_STARTED, workflowRunId).build();
RunRecordMeta record = get(key, RunRecordMeta.class);
if (record != null) {
// Update the parent Workflow run record by adding node id and program run id in the properties
Map<String, String> properties = new HashMap<>(record.getProperties());
properties.put(workflowNodeId, programRunId.getRun());
write(key, RunRecordMeta.builder(record).setProperties(properties).setSourceId(sourceId).build());
}
}
use of io.cdap.cdap.data2.dataset2.lib.table.Update in project cdap by caskdata.
the class HBaseTable method persist.
@Override
protected void persist(NavigableMap<byte[], NavigableMap<byte[], Update>> updates) throws Exception {
if (updates.isEmpty()) {
return;
}
byte[] txId = tx == null ? null : Bytes.toBytes(tx.getTransactionId());
byte[] txWritePointer = tx == null ? null : Bytes.toBytes(tx.getWritePointer());
List<Mutation> mutations = new ArrayList<>();
for (Map.Entry<byte[], NavigableMap<byte[], Update>> row : updates.entrySet()) {
// create these only when they are needed
PutBuilder put = null;
PutBuilder incrementPut = null;
IncrementBuilder increment = null;
for (Map.Entry<byte[], Update> column : row.getValue().entrySet()) {
// we want support tx and non-tx modes
if (tx != null) {
// TODO: hijacking timestamp... bad
Update val = column.getValue();
if (val instanceof IncrementValue) {
if (safeReadlessIncrements) {
increment = getIncrement(increment, row.getKey(), txId, txWritePointer);
increment.add(columnFamily, column.getKey(), tx.getWritePointer(), ((IncrementValue) val).getValue());
} else {
incrementPut = getPutForIncrement(incrementPut, row.getKey(), txId);
incrementPut.add(columnFamily, column.getKey(), tx.getWritePointer(), Bytes.toBytes(((IncrementValue) val).getValue()));
}
} else if (val instanceof PutValue) {
put = getPut(put, row.getKey(), txId);
put.add(columnFamily, column.getKey(), tx.getWritePointer(), wrapDeleteIfNeeded(((PutValue) val).getValue()));
}
} else {
Update val = column.getValue();
if (val instanceof IncrementValue) {
incrementPut = getPutForIncrement(incrementPut, row.getKey(), txId);
incrementPut.add(columnFamily, column.getKey(), Bytes.toBytes(((IncrementValue) val).getValue()));
} else if (val instanceof PutValue) {
put = getPut(put, row.getKey(), txId);
put.add(columnFamily, column.getKey(), ((PutValue) val).getValue());
}
}
}
if (incrementPut != null) {
mutations.add(incrementPut.build());
}
if (increment != null) {
mutations.add(increment.build());
}
if (put != null) {
mutations.add(put.build());
}
}
if (!hbaseFlush(mutations)) {
LOG.info("No writes to persist!");
}
}
use of io.cdap.cdap.data2.dataset2.lib.table.Update in project cdap by caskdata.
the class LevelDBTable method persist.
@Override
protected void persist(NavigableMap<byte[], NavigableMap<byte[], Update>> changes) throws Exception {
persistedVersion = tx == null ? System.currentTimeMillis() : tx.getWritePointer();
NavigableMap<byte[], NavigableMap<byte[], byte[]>> puts = Maps.newTreeMap(Bytes.BYTES_COMPARATOR);
NavigableMap<byte[], NavigableMap<byte[], Long>> increments = Maps.newTreeMap(Bytes.BYTES_COMPARATOR);
for (Map.Entry<byte[], NavigableMap<byte[], Update>> rowEntry : changes.entrySet()) {
for (Map.Entry<byte[], Update> colEntry : rowEntry.getValue().entrySet()) {
Update val = colEntry.getValue();
if (val instanceof IncrementValue) {
NavigableMap<byte[], Long> incrCols = increments.get(rowEntry.getKey());
if (incrCols == null) {
incrCols = Maps.newTreeMap(Bytes.BYTES_COMPARATOR);
increments.put(rowEntry.getKey(), incrCols);
}
incrCols.put(colEntry.getKey(), ((IncrementValue) val).getValue());
} else if (val instanceof PutValue) {
NavigableMap<byte[], byte[]> putCols = puts.get(rowEntry.getKey());
if (putCols == null) {
putCols = Maps.newTreeMap(Bytes.BYTES_COMPARATOR);
puts.put(rowEntry.getKey(), putCols);
}
putCols.put(colEntry.getKey(), ((PutValue) val).getValue());
}
}
}
if (!increments.isEmpty() || !puts.isEmpty()) {
persist(increments, puts);
}
}
use of io.cdap.cdap.data2.dataset2.lib.table.Update in project cdap by caskdata.
the class InMemoryTableService method increment.
// todo: remove it from here: only used by "system" metrics table, which should be revised
@Deprecated
public static synchronized Map<byte[], Long> increment(String tableName, byte[] row, Map<byte[], Long> increments) {
Map<byte[], Long> resultMap = Maps.newTreeMap(Bytes.BYTES_COMPARATOR);
ConcurrentNavigableMap<byte[], NavigableMap<byte[], NavigableMap<Long, Update>>> table = tables.get(tableName);
// get the correct row from the table, create it if it doesn't exist
NavigableMap<byte[], NavigableMap<Long, Update>> rowMap = table.get(row);
if (rowMap == null) {
rowMap = Maps.newTreeMap(Bytes.BYTES_COMPARATOR);
table.put(row, rowMap);
}
// now increment each column, one by one
long versionForWrite = System.currentTimeMillis();
for (Map.Entry<byte[], Long> inc : increments.entrySet()) {
IncrementValue increment = new IncrementValue(inc.getValue());
// create the column in the row if it does not exist
NavigableMap<Long, Update> colMap = rowMap.get(inc.getKey());
Update last = null;
if (colMap == null) {
colMap = Maps.newTreeMap();
rowMap.put(inc.getKey(), colMap);
} else {
last = colMap.lastEntry().getValue();
}
Update merged = Updates.mergeUpdates(last, increment);
// put into the column with given version
long newValue = Bytes.toLong(merged.getBytes());
resultMap.put(inc.getKey(), newValue);
colMap.put(versionForWrite, merged);
}
return resultMap;
}
Aggregations