Search in sources :

Example 1 with Update

use of io.cdap.cdap.data2.dataset2.lib.table.Update in project cdap by caskdata.

the class ProgramScheduleStoreDatasetTest method testDeleteScheduleByTriggeringProgram.

@Test
public void testDeleteScheduleByTriggeringProgram() throws Exception {
    DatasetFramework dsFramework = getInjector().getInstance(DatasetFramework.class);
    TransactionSystemClient txClient = getInjector().getInstance(TransactionSystemClient.class);
    TransactionExecutorFactory txExecutorFactory = new DynamicTransactionExecutorFactory(txClient);
    dsFramework.truncateInstance(Schedulers.STORE_DATASET_ID);
    final ProgramScheduleStoreDataset store = dsFramework.getDataset(Schedulers.STORE_DATASET_ID, new HashMap<String, String>(), null);
    Assert.assertNotNull(store);
    TransactionExecutor txExecutor = txExecutorFactory.createExecutor(Collections.singleton((TransactionAware) store));
    SatisfiableTrigger prog1Trigger = new ProgramStatusTrigger(PROG1_ID, ProgramStatus.COMPLETED, ProgramStatus.FAILED, ProgramStatus.KILLED);
    SatisfiableTrigger prog2Trigger = new ProgramStatusTrigger(PROG2_ID, ProgramStatus.COMPLETED, ProgramStatus.FAILED, ProgramStatus.KILLED);
    final ProgramSchedule sched1 = new ProgramSchedule("sched1", "a program status trigger", PROG3_ID, ImmutableMap.of("propper", "popper"), prog1Trigger, ImmutableList.<Constraint>of());
    final ProgramSchedule sched2 = new ProgramSchedule("sched2", "a program status trigger", PROG3_ID, ImmutableMap.of("propper", "popper"), prog2Trigger, ImmutableList.<Constraint>of());
    final ProgramSchedule schedOr = new ProgramSchedule("schedOr", "an OR trigger", PROG3_ID, ImmutableMap.of("propper", "popper"), new OrTrigger(new PartitionTrigger(DS1_ID, 1), prog1Trigger, new AndTrigger(new OrTrigger(prog1Trigger, prog2Trigger), new PartitionTrigger(DS2_ID, 1)), new OrTrigger(prog2Trigger)), ImmutableList.<Constraint>of());
    final ProgramSchedule schedAnd = new ProgramSchedule("schedAnd", "an AND trigger", PROG3_ID, ImmutableMap.of("propper", "popper"), new AndTrigger(new PartitionTrigger(DS1_ID, 1), prog2Trigger, new AndTrigger(prog1Trigger, new PartitionTrigger(DS2_ID, 1))), ImmutableList.<Constraint>of());
    txExecutor.execute(new TransactionExecutor.Subroutine() {

        @Override
        public void apply() throws Exception {
            store.addSchedules(ImmutableList.of(sched1, sched2, schedOr, schedAnd));
        }
    });
    txExecutor.execute(new TransactionExecutor.Subroutine() {

        @Override
        public void apply() throws Exception {
            // ProgramStatus event for PROG1_ID should trigger only sched1, schedOr, schedAnd
            Assert.assertEquals(ImmutableSet.of(sched1, schedOr, schedAnd), toScheduleSet(store.findSchedules(Schedulers.triggerKeyForProgramStatus(PROG1_ID, ProgramStatus.COMPLETED))));
            // ProgramStatus event for PROG2_ID should trigger only sched2, schedOr, schedAnd
            Assert.assertEquals(ImmutableSet.of(sched2, schedOr, schedAnd), toScheduleSet(store.findSchedules(Schedulers.triggerKeyForProgramStatus(PROG2_ID, ProgramStatus.FAILED))));
        }
    });
    // update or delete all schedules triggered by PROG1_ID
    txExecutor.execute(new TransactionExecutor.Subroutine() {

        @Override
        public void apply() throws Exception {
            store.modifySchedulesTriggeredByDeletedProgram(PROG1_ID);
        }
    });
    final ProgramSchedule schedOrNew = new ProgramSchedule("schedOr", "an OR trigger", PROG3_ID, ImmutableMap.of("propper", "popper"), new OrTrigger(new PartitionTrigger(DS1_ID, 1), new AndTrigger(prog2Trigger, new PartitionTrigger(DS2_ID, 1)), prog2Trigger), ImmutableList.of());
    txExecutor.execute(new TransactionExecutor.Subroutine() {

        @Override
        public void apply() throws Exception {
            // ProgramStatus event for PROG1_ID should trigger no schedules after modifying schedules triggered by it
            Assert.assertEquals(Collections.emptySet(), toScheduleSet(store.findSchedules(Schedulers.triggerKeyForProgramStatus(PROG1_ID, ProgramStatus.COMPLETED))));
            Assert.assertEquals(Collections.emptySet(), toScheduleSet(store.findSchedules(Schedulers.triggerKeyForProgramStatus(PROG1_ID, ProgramStatus.FAILED))));
            Assert.assertEquals(Collections.emptySet(), toScheduleSet(store.findSchedules(Schedulers.triggerKeyForProgramStatus(PROG1_ID, ProgramStatus.KILLED))));
            // ProgramStatus event for PROG2_ID should trigger only sched2 and schedOrNew
            Assert.assertEquals(ImmutableSet.of(sched2, schedOrNew), toScheduleSet(store.findSchedules(Schedulers.triggerKeyForProgramStatus(PROG2_ID, ProgramStatus.FAILED))));
        }
    });
    // update or delete all schedules triggered by PROG2_ID
    txExecutor.execute(new TransactionExecutor.Subroutine() {

        @Override
        public void apply() throws Exception {
            store.modifySchedulesTriggeredByDeletedProgram(PROG2_ID);
        }
    });
    final ProgramSchedule schedOrNew1 = new ProgramSchedule("schedOr", "an OR trigger", PROG3_ID, ImmutableMap.of("propper", "popper"), new PartitionTrigger(DS1_ID, 1), ImmutableList.of());
    final Set<ProgramSchedule> ds1Schedules = new HashSet<>();
    txExecutor.execute(new TransactionExecutor.Subroutine() {

        @Override
        public void apply() throws Exception {
            // ProgramStatus event for PROG2_ID should trigger no schedules after modifying schedules triggered by it
            Assert.assertEquals(Collections.emptySet(), toScheduleSet(store.findSchedules(Schedulers.triggerKeyForProgramStatus(PROG2_ID, ProgramStatus.COMPLETED))));
            Assert.assertEquals(Collections.emptySet(), toScheduleSet(store.findSchedules(Schedulers.triggerKeyForProgramStatus(PROG2_ID, ProgramStatus.FAILED))));
            Assert.assertEquals(Collections.emptySet(), toScheduleSet(store.findSchedules(Schedulers.triggerKeyForProgramStatus(PROG2_ID, ProgramStatus.KILLED))));
            // event for DS1 should trigger only schedOrNew1 since all other schedules are deleted
            ds1Schedules.addAll(toScheduleSet(store.findSchedules(Schedulers.triggerKeyForPartition(DS1_ID))));
        }
    });
    Assert.assertEquals(ImmutableSet.of(schedOrNew1), ds1Schedules);
}
Also used : OrTrigger(co.cask.cdap.internal.app.runtime.schedule.trigger.OrTrigger) DynamicTransactionExecutorFactory(co.cask.cdap.data.runtime.DynamicTransactionExecutorFactory) TransactionExecutor(org.apache.tephra.TransactionExecutor) DatasetManagementException(co.cask.cdap.api.dataset.DatasetManagementException) TransactionExecutorFactory(co.cask.cdap.data2.transaction.TransactionExecutorFactory) DynamicTransactionExecutorFactory(co.cask.cdap.data.runtime.DynamicTransactionExecutorFactory) AndTrigger(co.cask.cdap.internal.app.runtime.schedule.trigger.AndTrigger) DatasetFramework(co.cask.cdap.data2.dataset2.DatasetFramework) TransactionSystemClient(org.apache.tephra.TransactionSystemClient) SatisfiableTrigger(co.cask.cdap.internal.app.runtime.schedule.trigger.SatisfiableTrigger) ProgramSchedule(co.cask.cdap.internal.app.runtime.schedule.ProgramSchedule) TransactionAware(org.apache.tephra.TransactionAware) ProgramStatusTrigger(co.cask.cdap.internal.app.runtime.schedule.trigger.ProgramStatusTrigger) PartitionTrigger(co.cask.cdap.internal.app.runtime.schedule.trigger.PartitionTrigger) HashSet(java.util.HashSet) Test(org.junit.Test)

Example 2 with Update

use of io.cdap.cdap.data2.dataset2.lib.table.Update in project cdap by caskdata.

the class AppMetadataStore method addWorkflowNodeState.

private void addWorkflowNodeState(ProgramRunId programRunId, Map<String, String> systemArgs, ProgramRunStatus status, @Nullable BasicThrowable failureCause, byte[] sourceId) {
    String workflowNodeId = systemArgs.get(ProgramOptionConstants.WORKFLOW_NODE_ID);
    String workflowName = systemArgs.get(ProgramOptionConstants.WORKFLOW_NAME);
    String workflowRun = systemArgs.get(ProgramOptionConstants.WORKFLOW_RUN_ID);
    ApplicationId appId = programRunId.getParent().getParent();
    ProgramRunId workflowRunId = appId.workflow(workflowName).run(workflowRun);
    // Node states will be stored with following key:
    // workflowNodeState.namespace.app.WORKFLOW.workflowName.workflowRun.workflowNodeId
    MDSKey key = getProgramKeyBuilder(TYPE_WORKFLOW_NODE_STATE, workflowRunId).add(workflowNodeId).build();
    WorkflowNodeStateDetail nodeStateDetail = new WorkflowNodeStateDetail(workflowNodeId, ProgramRunStatus.toNodeStatus(status), programRunId.getRun(), failureCause);
    write(key, nodeStateDetail);
    // Get the run record of the Workflow which started this program
    key = getProgramKeyBuilder(TYPE_RUN_RECORD_STARTED, workflowRunId).build();
    RunRecordMeta record = get(key, RunRecordMeta.class);
    if (record != null) {
        // Update the parent Workflow run record by adding node id and program run id in the properties
        Map<String, String> properties = new HashMap<>(record.getProperties());
        properties.put(workflowNodeId, programRunId.getRun());
        write(key, RunRecordMeta.builder(record).setProperties(properties).setSourceId(sourceId).build());
    }
}
Also used : HashMap(java.util.HashMap) LinkedHashMap(java.util.LinkedHashMap) MDSKey(co.cask.cdap.data2.dataset2.lib.table.MDSKey) ProgramRunId(co.cask.cdap.proto.id.ProgramRunId) ApplicationId(co.cask.cdap.proto.id.ApplicationId) WorkflowNodeStateDetail(co.cask.cdap.proto.WorkflowNodeStateDetail)

Example 3 with Update

use of io.cdap.cdap.data2.dataset2.lib.table.Update in project cdap by caskdata.

the class HBaseTable method persist.

@Override
protected void persist(NavigableMap<byte[], NavigableMap<byte[], Update>> updates) throws Exception {
    if (updates.isEmpty()) {
        return;
    }
    byte[] txId = tx == null ? null : Bytes.toBytes(tx.getTransactionId());
    byte[] txWritePointer = tx == null ? null : Bytes.toBytes(tx.getWritePointer());
    List<Mutation> mutations = new ArrayList<>();
    for (Map.Entry<byte[], NavigableMap<byte[], Update>> row : updates.entrySet()) {
        // create these only when they are needed
        PutBuilder put = null;
        PutBuilder incrementPut = null;
        IncrementBuilder increment = null;
        for (Map.Entry<byte[], Update> column : row.getValue().entrySet()) {
            // we want support tx and non-tx modes
            if (tx != null) {
                // TODO: hijacking timestamp... bad
                Update val = column.getValue();
                if (val instanceof IncrementValue) {
                    if (safeReadlessIncrements) {
                        increment = getIncrement(increment, row.getKey(), txId, txWritePointer);
                        increment.add(columnFamily, column.getKey(), tx.getWritePointer(), ((IncrementValue) val).getValue());
                    } else {
                        incrementPut = getPutForIncrement(incrementPut, row.getKey(), txId);
                        incrementPut.add(columnFamily, column.getKey(), tx.getWritePointer(), Bytes.toBytes(((IncrementValue) val).getValue()));
                    }
                } else if (val instanceof PutValue) {
                    put = getPut(put, row.getKey(), txId);
                    put.add(columnFamily, column.getKey(), tx.getWritePointer(), wrapDeleteIfNeeded(((PutValue) val).getValue()));
                }
            } else {
                Update val = column.getValue();
                if (val instanceof IncrementValue) {
                    incrementPut = getPutForIncrement(incrementPut, row.getKey(), txId);
                    incrementPut.add(columnFamily, column.getKey(), Bytes.toBytes(((IncrementValue) val).getValue()));
                } else if (val instanceof PutValue) {
                    put = getPut(put, row.getKey(), txId);
                    put.add(columnFamily, column.getKey(), ((PutValue) val).getValue());
                }
            }
        }
        if (incrementPut != null) {
            mutations.add(incrementPut.build());
        }
        if (increment != null) {
            mutations.add(increment.build());
        }
        if (put != null) {
            mutations.add(put.build());
        }
    }
    if (!hbaseFlush(mutations)) {
        LOG.info("No writes to persist!");
    }
}
Also used : NavigableMap(java.util.NavigableMap) ArrayList(java.util.ArrayList) Update(co.cask.cdap.data2.dataset2.lib.table.Update) IncrementValue(co.cask.cdap.data2.dataset2.lib.table.IncrementValue) PutValue(co.cask.cdap.data2.dataset2.lib.table.PutValue) PutBuilder(co.cask.cdap.data2.util.hbase.PutBuilder) IncrementBuilder(co.cask.cdap.data2.util.hbase.IncrementBuilder) Mutation(org.apache.hadoop.hbase.client.Mutation) Map(java.util.Map) ImmutableMap(com.google.common.collect.ImmutableMap) NavigableMap(java.util.NavigableMap)

Example 4 with Update

use of io.cdap.cdap.data2.dataset2.lib.table.Update in project cdap by caskdata.

the class LevelDBTable method persist.

@Override
protected void persist(NavigableMap<byte[], NavigableMap<byte[], Update>> changes) throws Exception {
    persistedVersion = tx == null ? System.currentTimeMillis() : tx.getWritePointer();
    NavigableMap<byte[], NavigableMap<byte[], byte[]>> puts = Maps.newTreeMap(Bytes.BYTES_COMPARATOR);
    NavigableMap<byte[], NavigableMap<byte[], Long>> increments = Maps.newTreeMap(Bytes.BYTES_COMPARATOR);
    for (Map.Entry<byte[], NavigableMap<byte[], Update>> rowEntry : changes.entrySet()) {
        for (Map.Entry<byte[], Update> colEntry : rowEntry.getValue().entrySet()) {
            Update val = colEntry.getValue();
            if (val instanceof IncrementValue) {
                NavigableMap<byte[], Long> incrCols = increments.get(rowEntry.getKey());
                if (incrCols == null) {
                    incrCols = Maps.newTreeMap(Bytes.BYTES_COMPARATOR);
                    increments.put(rowEntry.getKey(), incrCols);
                }
                incrCols.put(colEntry.getKey(), ((IncrementValue) val).getValue());
            } else if (val instanceof PutValue) {
                NavigableMap<byte[], byte[]> putCols = puts.get(rowEntry.getKey());
                if (putCols == null) {
                    putCols = Maps.newTreeMap(Bytes.BYTES_COMPARATOR);
                    puts.put(rowEntry.getKey(), putCols);
                }
                putCols.put(colEntry.getKey(), ((PutValue) val).getValue());
            }
        }
    }
    if (!increments.isEmpty() || !puts.isEmpty()) {
        persist(increments, puts);
    }
}
Also used : IncrementValue(co.cask.cdap.data2.dataset2.lib.table.IncrementValue) PutValue(co.cask.cdap.data2.dataset2.lib.table.PutValue) NavigableMap(java.util.NavigableMap) Update(co.cask.cdap.data2.dataset2.lib.table.Update) NavigableMap(java.util.NavigableMap) Map(java.util.Map)

Example 5 with Update

use of io.cdap.cdap.data2.dataset2.lib.table.Update in project cdap by caskdata.

the class InMemoryTableService method increment.

// todo: remove it from here: only used by "system" metrics table, which should be revised
@Deprecated
public static synchronized Map<byte[], Long> increment(String tableName, byte[] row, Map<byte[], Long> increments) {
    Map<byte[], Long> resultMap = Maps.newTreeMap(Bytes.BYTES_COMPARATOR);
    ConcurrentNavigableMap<byte[], NavigableMap<byte[], NavigableMap<Long, Update>>> table = tables.get(tableName);
    // get the correct row from the table, create it if it doesn't exist
    NavigableMap<byte[], NavigableMap<Long, Update>> rowMap = table.get(row);
    if (rowMap == null) {
        rowMap = Maps.newTreeMap(Bytes.BYTES_COMPARATOR);
        table.put(row, rowMap);
    }
    // now increment each column, one by one
    long versionForWrite = System.currentTimeMillis();
    for (Map.Entry<byte[], Long> inc : increments.entrySet()) {
        IncrementValue increment = new IncrementValue(inc.getValue());
        // create the column in the row if it does not exist
        NavigableMap<Long, Update> colMap = rowMap.get(inc.getKey());
        Update last = null;
        if (colMap == null) {
            colMap = Maps.newTreeMap();
            rowMap.put(inc.getKey(), colMap);
        } else {
            last = colMap.lastEntry().getValue();
        }
        Update merged = Updates.mergeUpdates(last, increment);
        // put into the column with given version
        long newValue = Bytes.toLong(merged.getBytes());
        resultMap.put(inc.getKey(), newValue);
        colMap.put(versionForWrite, merged);
    }
    return resultMap;
}
Also used : IncrementValue(co.cask.cdap.data2.dataset2.lib.table.IncrementValue) ConcurrentNavigableMap(java.util.concurrent.ConcurrentNavigableMap) NavigableMap(java.util.NavigableMap) Update(co.cask.cdap.data2.dataset2.lib.table.Update) ConcurrentNavigableMap(java.util.concurrent.ConcurrentNavigableMap) NavigableMap(java.util.NavigableMap) ConcurrentSkipListMap(java.util.concurrent.ConcurrentSkipListMap) Map(java.util.Map) SortedMap(java.util.SortedMap)

Aggregations

NavigableMap (java.util.NavigableMap)15 Map (java.util.Map)13 Test (org.junit.Test)8 Update (co.cask.cdap.data2.dataset2.lib.table.Update)7 Update (io.cdap.cdap.data2.dataset2.lib.table.Update)7 DatasetSpecification (io.cdap.cdap.api.dataset.DatasetSpecification)6 IOException (java.io.IOException)6 ArrayList (java.util.ArrayList)6 ConcurrentNavigableMap (java.util.concurrent.ConcurrentNavigableMap)6 PutValue (co.cask.cdap.data2.dataset2.lib.table.PutValue)4 PutValue (io.cdap.cdap.data2.dataset2.lib.table.PutValue)4 Collection (java.util.Collection)4 HashSet (java.util.HashSet)4 Set (java.util.Set)4 DatasetManagementException (co.cask.cdap.api.dataset.DatasetManagementException)3 IncrementValue (co.cask.cdap.data2.dataset2.lib.table.IncrementValue)3 SortedMap (java.util.SortedMap)3 ConcurrentSkipListMap (java.util.concurrent.ConcurrentSkipListMap)3 Table (co.cask.cdap.api.dataset.table.Table)2 InMemoryTableModule (co.cask.cdap.data2.dataset2.module.lib.inmemory.InMemoryTableModule)2