Search in sources :

Example 1 with Delete

use of co.cask.cdap.api.dataset.table.Delete in project cdap by caskdata.

the class IndexedTable method compareAndSwap.

/**
 * Perform a swap operation by primary key.
 * Parameters are as if they were on a non-indexed table.
 * Note that if the swap is on the secondary key column,
 * then the index must be updated; otherwise, this is a
 * pass-through to the underlying table.
 */
@ReadWrite
@Override
public boolean compareAndSwap(byte[] row, byte[] column, byte[] expected, byte[] newValue) {
    // is the same as the new value, then the index is not affected either.
    if (!indexedColumns.contains(column) || Arrays.equals(expected, newValue)) {
        return table.compareAndSwap(row, column, expected, newValue);
    }
    // the swap is on the index column. it will only succeed if the current
    // value matches the expected value of the swap. if that value is not null,
    // then we must remove the row key from the index for that value.
    Delete idxDelete = null;
    if (expected != null) {
        idxDelete = new Delete(createIndexKey(row, column, expected), IDX_COL);
    }
    // if the new value is not null, then we must add the rowkey to the index
    // for that value.
    Put idxPut = null;
    if (newValue != null) {
        idxPut = new Put(createIndexKey(row, column, newValue), IDX_COL, row);
    }
    // apply all operations to both tables
    boolean success = table.compareAndSwap(row, column, expected, newValue);
    if (!success) {
        // do nothing: no changes
        return false;
    }
    if (idxDelete != null) {
        index.delete(idxDelete);
    }
    if (idxPut != null) {
        index.put(idxPut);
    }
    return true;
}
Also used : Delete(co.cask.cdap.api.dataset.table.Delete) Put(co.cask.cdap.api.dataset.table.Put) ReadWrite(co.cask.cdap.api.annotation.ReadWrite)

Example 2 with Delete

use of co.cask.cdap.api.dataset.table.Delete in project cdap by caskdata.

the class ProgramScheduleStoreDataset method deleteSchedules.

/**
 * Removes one or more schedules from the store. Succeeds whether the schedules exist or not.
 *
 * @param scheduleIds the schedules to delete
 * @throws NotFoundException if one of the schedules does not exist in the store
 */
public void deleteSchedules(Iterable<? extends ScheduleId> scheduleIds) throws NotFoundException {
    for (ScheduleId scheduleId : scheduleIds) {
        String scheduleKey = rowKeyForSchedule(scheduleId);
        if (store.get(new Get(scheduleKey)).isEmpty()) {
            throw new NotFoundException(scheduleId);
        }
        store.delete(new Delete(scheduleKey));
        byte[] prefix = keyPrefixForTriggerScan(scheduleKey);
        try (Scanner scanner = store.scan(new Scan(prefix, Bytes.stopKeyForPrefix(prefix)))) {
            Row row;
            while ((row = scanner.next()) != null) {
                store.delete(row.getRow());
            }
        }
    }
}
Also used : Delete(co.cask.cdap.api.dataset.table.Delete) Scanner(co.cask.cdap.api.dataset.table.Scanner) Get(co.cask.cdap.api.dataset.table.Get) NotFoundException(co.cask.cdap.common.NotFoundException) Scan(co.cask.cdap.api.dataset.table.Scan) Row(co.cask.cdap.api.dataset.table.Row) ScheduleId(co.cask.cdap.proto.id.ScheduleId)

Example 3 with Delete

use of co.cask.cdap.api.dataset.table.Delete in project cdap by caskdata.

the class TableTest method testEmptyDelete.

@Test
public void testEmptyDelete() throws Exception {
    DatasetAdmin admin = getTableAdmin(CONTEXT1, MY_TABLE);
    admin.create();
    try {
        Transaction tx = txClient.startShort();
        Table myTable = getTable(CONTEXT1, MY_TABLE);
        ((TransactionAware) myTable).startTx(tx);
        myTable.put(R1, C1, V1);
        myTable.put(R1, C2, V2);
        myTable.put(R1, C3, V3);
        // specifying empty columns means to delete nothing
        myTable.delete(R1, new byte[][] {});
        myTable.delete(new Delete(R1, new byte[][] {}));
        myTable.delete(new Delete(R1, ImmutableList.<byte[]>of()));
        myTable.delete(new Delete(Bytes.toString(R1), new String[] {}));
        myTable.delete(new Delete(Bytes.toString(R1), ImmutableList.<String>of()));
        // verify the above delete calls deleted none of the rows
        Row row = myTable.get(R1);
        Assert.assertEquals(3, row.getColumns().size());
        Assert.assertArrayEquals(R1, row.getRow());
        Assert.assertArrayEquals(V1, row.get(C1));
        Assert.assertArrayEquals(V2, row.get(C2));
        Assert.assertArrayEquals(V3, row.get(C3));
        // test deletion of only one column
        Delete delete = new Delete(R1);
        Assert.assertNull(delete.getColumns());
        delete.add(C1);
        Assert.assertNotNull(delete.getColumns());
        myTable.delete(delete);
        row = myTable.get(R1);
        Assert.assertEquals(2, row.getColumns().size());
        Assert.assertArrayEquals(R1, row.getRow());
        Assert.assertArrayEquals(V2, row.get(C2));
        Assert.assertArrayEquals(V3, row.get(C3));
        // test delete of all columns
        myTable.delete(new Delete(R1));
        Assert.assertEquals(0, myTable.get(R1).getColumns().size());
        txClient.abort(tx);
    } finally {
        admin.drop();
    }
}
Also used : Delete(co.cask.cdap.api.dataset.table.Delete) Table(co.cask.cdap.api.dataset.table.Table) HBaseTable(co.cask.cdap.data2.dataset2.lib.table.hbase.HBaseTable) Transaction(org.apache.tephra.Transaction) TransactionAware(org.apache.tephra.TransactionAware) DatasetAdmin(co.cask.cdap.api.dataset.DatasetAdmin) Row(co.cask.cdap.api.dataset.table.Row) Test(org.junit.Test)

Example 4 with Delete

use of co.cask.cdap.api.dataset.table.Delete in project cdap by caskdata.

the class TableTest method testMetrics.

private void testMetrics(boolean readless) throws Exception {
    final String tableName = "survive";
    DatasetProperties props = TableProperties.builder().setReadlessIncrementSupport(readless).build();
    DatasetAdmin admin = getTableAdmin(CONTEXT1, tableName, props);
    admin.create();
    Table table = getTable(CONTEXT1, tableName, props);
    final Map<String, Long> metrics = Maps.newHashMap();
    ((MeteredDataset) table).setMetricsCollector(new MetricsCollector() {

        @Override
        public void increment(String metricName, long value) {
            Long old = metrics.get(metricName);
            metrics.put(metricName, old == null ? value : old + value);
        }

        @Override
        public void gauge(String metricName, long value) {
            metrics.put(metricName, value);
        }
    });
    // Note that we don't need to finish tx for metrics to be reported
    Transaction tx0 = txClient.startShort();
    ((TransactionAware) table).startTx(tx0);
    int writes = 0;
    int reads = 0;
    table.put(new Put(R1, C1, V1));
    verifyDatasetMetrics(metrics, ++writes, reads);
    table.compareAndSwap(R1, C1, V1, V2);
    verifyDatasetMetrics(metrics, ++writes, ++reads);
    // note: will not write anything as expected value will not match
    table.compareAndSwap(R1, C1, V1, V2);
    verifyDatasetMetrics(metrics, writes, ++reads);
    table.increment(new Increment(R2, C2, 1L));
    if (readless) {
        verifyDatasetMetrics(metrics, ++writes, reads);
    } else {
        verifyDatasetMetrics(metrics, ++writes, ++reads);
    }
    table.incrementAndGet(new Increment(R2, C2, 1L));
    verifyDatasetMetrics(metrics, ++writes, ++reads);
    table.get(new Get(R1, C1, V1));
    verifyDatasetMetrics(metrics, writes, ++reads);
    Scanner scanner = table.scan(new Scan(null, null));
    while (scanner.next() != null) {
        verifyDatasetMetrics(metrics, writes, ++reads);
    }
    table.delete(new Delete(R1, C1, V1));
    verifyDatasetMetrics(metrics, ++writes, reads);
    // drop table
    admin.drop();
}
Also used : MetricsCollector(co.cask.cdap.api.metrics.MetricsCollector) Delete(co.cask.cdap.api.dataset.table.Delete) Scanner(co.cask.cdap.api.dataset.table.Scanner) Table(co.cask.cdap.api.dataset.table.Table) HBaseTable(co.cask.cdap.data2.dataset2.lib.table.hbase.HBaseTable) DatasetProperties(co.cask.cdap.api.dataset.DatasetProperties) DatasetAdmin(co.cask.cdap.api.dataset.DatasetAdmin) Put(co.cask.cdap.api.dataset.table.Put) Transaction(org.apache.tephra.Transaction) TransactionAware(org.apache.tephra.TransactionAware) Increment(co.cask.cdap.api.dataset.table.Increment) Get(co.cask.cdap.api.dataset.table.Get) MeteredDataset(co.cask.cdap.api.dataset.metrics.MeteredDataset) Scan(co.cask.cdap.api.dataset.table.Scan)

Example 5 with Delete

use of co.cask.cdap.api.dataset.table.Delete in project cdap by caskdata.

the class IndexedTableTest method testIndexedOperations.

@Test
public void testIndexedOperations() throws Exception {
    TransactionExecutor txnl = dsFrameworkUtil.newTransactionExecutor(table);
    // start a new transaction
    txnl.execute(new TransactionExecutor.Subroutine() {

        @Override
        public void apply() throws Exception {
            // add a value c with idx = 1, and b with idx = 2
            table.put(new Put(keyC).add(idxCol, idx1).add(valCol, valC));
            table.put(new Put(keyB).add(idxCol, idx2).add(valCol, valB));
        }
    });
    txnl.execute(new TransactionExecutor.Subroutine() {

        @Override
        public void apply() throws Exception {
            // read by key c
            Row row = table.get(new Get(keyC, colIdxVal));
            TableAssert.assertColumns(row, colIdxVal, new byte[][] { idx1, valC });
            // read by key b
            row = table.get(new Get(keyB, colIdxVal));
            TableAssert.assertColumns(row, colIdxVal, new byte[][] { idx2, valB });
            // read by idx 1 -> c
            row = readFirst(table.readByIndex(idxCol, idx1));
            TableAssert.assertColumns(row, colIdxVal, new byte[][] { idx1, valC });
            // read by idx 2 -> b
            row = readFirst(table.readByIndex(idxCol, idx2));
            TableAssert.assertColumns(row, colIdxVal, new byte[][] { idx2, valB });
            // test read over empty index (idx 3)
            assertEmpty(table.readByIndex(idxCol, idx3));
        }
    });
    txnl.execute(new TransactionExecutor.Subroutine() {

        @Override
        public void apply() throws Exception {
            // add a value a with idx = 1
            table.put(new Put(keyA).add(idxCol, idx1).add(valCol, valA));
        }
    });
    // read by idx 1 -> a
    txnl.execute(new TransactionExecutor.Subroutine() {

        @Override
        public void apply() throws Exception {
            Row row = readFirst(table.readByIndex(idxCol, idx1));
            TableAssert.assertColumns(row, colIdxVal, new byte[][] { idx1, valA });
        }
    });
    // start a new transaction
    txnl.execute(new TransactionExecutor.Subroutine() {

        @Override
        public void apply() throws Exception {
            // delete value a
            table.delete(new Delete(keyA, colIdxVal));
        }
    });
    // read by idx 1 -> c
    txnl.execute(new TransactionExecutor.Subroutine() {

        @Override
        public void apply() throws Exception {
            Row row = readFirst(table.readByIndex(idxCol, idx1));
            TableAssert.assertColumns(row, colIdxVal, new byte[][] { idx1, valC });
        }
    });
    // start a new transaction
    txnl.execute(new TransactionExecutor.Subroutine() {

        @Override
        public void apply() throws Exception {
            // add a value aa with idx 2
            table.put(new Put(keyAA).add(idxCol, idx2).add(valCol, valAA));
        }
    });
    // read by idx 2 -> aa
    txnl.execute(new TransactionExecutor.Subroutine() {

        @Override
        public void apply() throws Exception {
            Row row = readFirst(table.readByIndex(idxCol, idx2));
            TableAssert.assertColumns(row, colIdxVal, new byte[][] { idx2, valAA });
        }
    });
    // start a new transaction
    txnl.execute(new TransactionExecutor.Subroutine() {

        @Override
        public void apply() throws Exception {
            // swap value for aa to ab
            Assert.assertTrue(table.compareAndSwap(keyAA, valCol, valAA, valAB));
        }
    });
    // read by idx 2 -> ab
    txnl.execute(new TransactionExecutor.Subroutine() {

        @Override
        public void apply() throws Exception {
            Row row = readFirst(table.readByIndex(idxCol, idx2));
            TableAssert.assertColumns(row, colIdxVal, new byte[][] { idx2, valAB });
        }
    });
    // start a new transaction
    txnl.execute(new TransactionExecutor.Subroutine() {

        @Override
        public void apply() throws Exception {
            // swap value for aa to bb
            Assert.assertTrue(table.compareAndSwap(keyAA, valCol, valAB, valBB));
        }
    });
    // read by idx 2 -> bb (value of key aa)
    txnl.execute(new TransactionExecutor.Subroutine() {

        @Override
        public void apply() throws Exception {
            Row row = readFirst(table.readByIndex(idxCol, idx2));
            TableAssert.assertColumns(row, colIdxVal, new byte[][] { idx2, valBB });
        }
    });
    // start a new transaction
    txnl.execute(new TransactionExecutor.Subroutine() {

        @Override
        public void apply() throws Exception {
            // swap value for aa to null
            Assert.assertTrue(table.compareAndSwap(keyAA, valCol, valBB, null));
        }
    });
    // read by idx 2 -> null (value of b)
    txnl.execute(new TransactionExecutor.Subroutine() {

        @Override
        public void apply() throws Exception {
            Row row = readFirst(table.readByIndex(idxCol, idx2));
            TableAssert.assertColumn(row, idxCol, idx2);
        }
    });
    // start a new transaction
    txnl.execute(new TransactionExecutor.Subroutine() {

        @Override
        public void apply() throws Exception {
            // swap idx for c to 3
            Assert.assertTrue(table.compareAndSwap(keyC, idxCol, idx1, idx3));
        }
    });
    // read by idx 1 -> null (no row has that any more)
    txnl.execute(new TransactionExecutor.Subroutine() {

        @Override
        public void apply() throws Exception {
            assertEmpty(table.readByIndex(idxCol, idx1));
            // read by idx 3 > c
            Row row = readFirst(table.readByIndex(idxCol, idx3));
            TableAssert.assertColumns(row, new byte[][] { idxCol, valCol }, new byte[][] { idx3, valC });
        }
    });
}
Also used : Delete(co.cask.cdap.api.dataset.table.Delete) Get(co.cask.cdap.api.dataset.table.Get) TransactionExecutor(org.apache.tephra.TransactionExecutor) Row(co.cask.cdap.api.dataset.table.Row) Put(co.cask.cdap.api.dataset.table.Put) Test(org.junit.Test)

Aggregations

Delete (co.cask.cdap.api.dataset.table.Delete)7 Row (co.cask.cdap.api.dataset.table.Row)5 Scanner (co.cask.cdap.api.dataset.table.Scanner)4 Get (co.cask.cdap.api.dataset.table.Get)3 Put (co.cask.cdap.api.dataset.table.Put)3 DatasetAdmin (co.cask.cdap.api.dataset.DatasetAdmin)2 Scan (co.cask.cdap.api.dataset.table.Scan)2 Table (co.cask.cdap.api.dataset.table.Table)2 HBaseTable (co.cask.cdap.data2.dataset2.lib.table.hbase.HBaseTable)2 Transaction (org.apache.tephra.Transaction)2 TransactionAware (org.apache.tephra.TransactionAware)2 Test (org.junit.Test)2 ReadWrite (co.cask.cdap.api.annotation.ReadWrite)1 DatasetProperties (co.cask.cdap.api.dataset.DatasetProperties)1 MeteredDataset (co.cask.cdap.api.dataset.metrics.MeteredDataset)1 Increment (co.cask.cdap.api.dataset.table.Increment)1 MetricsCollector (co.cask.cdap.api.metrics.MetricsCollector)1 NotFoundException (co.cask.cdap.common.NotFoundException)1 MDSKey (co.cask.cdap.data2.dataset2.lib.table.MDSKey)1 ScheduleId (co.cask.cdap.proto.id.ScheduleId)1