Search in sources :

Example 1 with Get

use of co.cask.cdap.api.dataset.table.Get in project cdap by caskdata.

the class ProgramScheduleStoreDataset method addSchedules.

/**
   * Add one or more schedules to the store.
   *
   * @param schedules the schedules to add
   * @return the new schedules' last modified timestamp
   * @throws AlreadyExistsException if one of the schedules already exists
   */
public long addSchedules(Iterable<? extends ProgramSchedule> schedules) throws AlreadyExistsException {
    long currentTime = System.currentTimeMillis();
    for (ProgramSchedule schedule : schedules) {
        byte[] scheduleKey = rowKeyBytesForSchedule(schedule.getProgramId().getParent().schedule(schedule.getName()));
        if (!store.get(new Get(scheduleKey)).isEmpty()) {
            throw new AlreadyExistsException(schedule.getProgramId().getParent().schedule(schedule.getName()));
        }
        Put schedulePut = new Put(scheduleKey);
        schedulePut.add(SCHEDULE_COLUMN_BYTES, GSON.toJson(schedule));
        schedulePut.add(UPDATED_COLUMN_BYTES, currentTime);
        // initially suspended
        schedulePut.add(STATUS_COLUMN_BYTES, ProgramScheduleStatus.SUSPENDED.toString());
        store.put(schedulePut);
        int count = 0;
        for (String triggerKey : extractTriggerKeys(schedule)) {
            byte[] triggerRowKey = rowKeyBytesForTrigger(scheduleKey, count++);
            store.put(new Put(triggerRowKey, TRIGGER_KEY_COLUMN_BYTES, triggerKey));
        }
    }
    return currentTime;
}
Also used : AlreadyExistsException(co.cask.cdap.common.AlreadyExistsException) ProgramSchedule(co.cask.cdap.internal.app.runtime.schedule.ProgramSchedule) Get(co.cask.cdap.api.dataset.table.Get) Put(co.cask.cdap.api.dataset.table.Put) Constraint(co.cask.cdap.internal.schedule.constraint.Constraint)

Example 2 with Get

use of co.cask.cdap.api.dataset.table.Get in project cdap by caskdata.

the class CharCountProgram method initialize.

@Override
public void initialize() throws Exception {
    SparkClientContext context = getContext();
    context.setSparkConf(new SparkConf().set("spark.io.compression.codec", "org.apache.spark.io.LZFCompressionCodec"));
    Table totals = context.getDataset("totals");
    totals.get(new Get("total").add("total")).getLong("total");
    totals.put(new Put("total").add("total", 0L));
}
Also used : Table(co.cask.cdap.api.dataset.table.Table) Get(co.cask.cdap.api.dataset.table.Get) SparkClientContext(co.cask.cdap.api.spark.SparkClientContext) SparkConf(org.apache.spark.SparkConf) Put(co.cask.cdap.api.dataset.table.Put)

Example 3 with Get

use of co.cask.cdap.api.dataset.table.Get in project cdap by caskdata.

the class TableTest method testEmptyGet.

@Test
public void testEmptyGet() throws Exception {
    DatasetAdmin admin = getTableAdmin(CONTEXT1, MY_TABLE);
    admin.create();
    try {
        Transaction tx = txClient.startShort();
        Table myTable = getTable(CONTEXT1, MY_TABLE);
        ((TransactionAware) myTable).startTx(tx);
        myTable.put(R1, C1, V1);
        myTable.put(R1, C2, V2);
        // to be used for validation later
        TreeMap<byte[], byte[]> expectedColumns = new TreeMap<>(Bytes.BYTES_COMPARATOR);
        expectedColumns.put(C1, V1);
        expectedColumns.put(C2, V2);
        Result expectedResult = new Result(R1, expectedColumns);
        Result emptyResult = new Result(R1, ImmutableMap.<byte[], byte[]>of());
        ((TransactionAware) myTable).commitTx();
        txClient.commitOrThrow(tx);
        // start another transaction, so that the buffering table doesn't cache the values; the underlying Table
        // implementations are tested this way.
        tx = txClient.startShort();
        ((TransactionAware) myTable).startTx(tx);
        Row row = myTable.get(R1, new byte[][] { C1, C2 });
        assertEquals(expectedResult, row);
        // passing in empty columns returns empty result
        row = myTable.get(R1, new byte[][] {});
        assertEquals(emptyResult, row);
        // test all the Get constructors and their behavior
        // constructors specifying only rowkey retrieve all columns
        Get get = new Get(R1);
        Assert.assertNull(get.getColumns());
        assertEquals(expectedResult, myTable.get(get));
        get = new Get(Bytes.toString(R1));
        Assert.assertNull(get.getColumns());
        assertEquals(expectedResult, myTable.get(get));
        get.add(C1);
        get.add(Bytes.toString(C2));
        assertEquals(expectedResult, myTable.get(get));
        // constructor specifying columns, but with an empty array/collection retrieve 0 columns
        get = new Get(R1, new byte[][] {});
        Assert.assertNotNull(get.getColumns());
        assertEquals(emptyResult, myTable.get(get));
        get = new Get(R1, ImmutableList.<byte[]>of());
        Assert.assertNotNull(get.getColumns());
        assertEquals(emptyResult, myTable.get(get));
        get = new Get(Bytes.toString(R1), new String[] {});
        Assert.assertNotNull(get.getColumns());
        assertEquals(emptyResult, myTable.get(get));
        get = new Get(Bytes.toString(R1), ImmutableList.<String>of());
        Assert.assertNotNull(get.getColumns());
        assertEquals(emptyResult, myTable.get(get));
        row = myTable.get(R1, new byte[][] {});
        assertEquals(emptyResult, row);
        txClient.abort(tx);
    } finally {
        admin.drop();
    }
}
Also used : Table(co.cask.cdap.api.dataset.table.Table) HBaseTable(co.cask.cdap.data2.dataset2.lib.table.hbase.HBaseTable) Transaction(org.apache.tephra.Transaction) TransactionAware(org.apache.tephra.TransactionAware) Get(co.cask.cdap.api.dataset.table.Get) DatasetAdmin(co.cask.cdap.api.dataset.DatasetAdmin) Row(co.cask.cdap.api.dataset.table.Row) TreeMap(java.util.TreeMap) Result(co.cask.cdap.api.dataset.table.Result) Test(org.junit.Test)

Example 4 with Get

use of co.cask.cdap.api.dataset.table.Get in project cdap by caskdata.

the class TableTest method testMultiIncrementWithFlush.

private void testMultiIncrementWithFlush(boolean readless) throws Exception {
    final String tableName = "incrFlush";
    DatasetProperties props = TableProperties.builder().setReadlessIncrementSupport(readless).build();
    DatasetAdmin admin = getTableAdmin(CONTEXT1, tableName, props);
    admin.create();
    Map<String, String> args = new HashMap<>();
    if (readless) {
        args.put(HBaseTable.SAFE_INCREMENTS, "true");
    }
    Table table = getTable(CONTEXT1, tableName, props, args);
    Transaction tx = txClient.startShort();
    try {
        ((TransactionAware) table).startTx(tx);
        // Write an increment, then flush it by calling commitTx.
        table.increment(new Increment(R1, C1, 10L));
        ((TransactionAware) table).commitTx();
    } finally {
        // invalidate the tx, leaving an excluded write in the table
        txClient.invalidate(tx.getTransactionId());
    }
    // validate the first write is not visible
    tx = txClient.startShort();
    try {
        ((TransactionAware) table).startTx(tx);
        Assert.assertEquals(null, table.get(new Get(R1, C1)).getLong(C1));
    } finally {
        txClient.commitOrThrow(tx);
    }
    tx = txClient.startShort();
    try {
        ((TransactionAware) table).startTx(tx);
        // Write an increment, then flush it by calling commitTx.
        table.increment(new Increment(R1, C1, 1L));
        ((TransactionAware) table).commitTx();
        // Write another increment, from both table instances
        table.increment(new Increment(R1, C1, 1L));
        if (readless) {
            Table table2 = getTable(CONTEXT1, tableName, props, args);
            ((TransactionAware) table2).startTx(tx);
            table2.increment(new Increment(R1, C1, 1L));
            ((TransactionAware) table2).commitTx();
        }
        ((TransactionAware) table).commitTx();
    } finally {
        txClient.commitOrThrow(tx);
    }
    // validate all increments are visible to a new tx
    tx = txClient.startShort();
    try {
        ((TransactionAware) table).startTx(tx);
        Assert.assertEquals(new Long(readless ? 3L : 2L), table.get(new Get(R1, C1)).getLong(C1));
    } finally {
        txClient.commitOrThrow(tx);
    }
    // drop table
    admin.drop();
}
Also used : Table(co.cask.cdap.api.dataset.table.Table) HBaseTable(co.cask.cdap.data2.dataset2.lib.table.hbase.HBaseTable) Transaction(org.apache.tephra.Transaction) HashMap(java.util.HashMap) TransactionAware(org.apache.tephra.TransactionAware) DatasetProperties(co.cask.cdap.api.dataset.DatasetProperties) Increment(co.cask.cdap.api.dataset.table.Increment) Get(co.cask.cdap.api.dataset.table.Get) DatasetAdmin(co.cask.cdap.api.dataset.DatasetAdmin)

Example 5 with Get

use of co.cask.cdap.api.dataset.table.Get in project cdap by caskdata.

the class TableTest method testMultiGetWithTx.

@Test
public void testMultiGetWithTx() throws Exception {
    String testMultiGet = "testMultiGet";
    DatasetAdmin admin = getTableAdmin(CONTEXT1, testMultiGet);
    admin.create();
    try {
        Transaction tx = txClient.startShort();
        Table table = getTable(CONTEXT1, testMultiGet);
        ((TransactionAware) table).startTx(tx);
        for (int i = 0; i < 100; i++) {
            table.put(new Put(Bytes.toBytes("r" + i)).add(C1, V1).add(C2, V2));
        }
        txClient.canCommitOrThrow(tx, ((TransactionAware) table).getTxChanges());
        Assert.assertTrue(((TransactionAware) table).commitTx());
        txClient.commitOrThrow(tx);
        Transaction tx2 = txClient.startShort();
        ((TransactionAware) table).startTx(tx2);
        List<Get> gets = Lists.newArrayListWithCapacity(100);
        for (int i = 0; i < 100; i++) {
            gets.add(new Get(Bytes.toBytes("r" + i)));
        }
        List<Row> results = table.get(gets);
        txClient.commitOrThrow(tx2);
        for (int i = 0; i < 100; i++) {
            Row row = results.get(i);
            Assert.assertArrayEquals(Bytes.toBytes("r" + i), row.getRow());
            byte[] val = row.get(C1);
            Assert.assertNotNull(val);
            Assert.assertArrayEquals(V1, val);
            byte[] val2 = row.get(C2);
            Assert.assertNotNull(val2);
            Assert.assertArrayEquals(V2, val2);
        }
        Transaction tx3 = txClient.startShort();
        ((TransactionAware) table).startTx(tx3);
        gets = Lists.newArrayListWithCapacity(100);
        for (int i = 0; i < 100; i++) {
            gets.add(new Get("r" + i).add(C1));
        }
        results = table.get(gets);
        txClient.commitOrThrow(tx3);
        for (int i = 0; i < 100; i++) {
            Row row = results.get(i);
            Assert.assertArrayEquals(Bytes.toBytes("r" + i), row.getRow());
            byte[] val = row.get(C1);
            Assert.assertNotNull(val);
            Assert.assertArrayEquals(V1, val);
            // should have only returned column 1
            byte[] val2 = row.get(C2);
            Assert.assertNull(val2);
        }
        // retrieve different columns per row
        Transaction tx4 = txClient.startShort();
        ((TransactionAware) table).startTx(tx4);
        gets = Lists.newArrayListWithCapacity(100);
        for (int i = 0; i < 100; i++) {
            Get get = new Get("r" + i);
            // evens get C1, odds get C2
            get.add(i % 2 == 0 ? C1 : C2);
            gets.add(get);
        }
        results = table.get(gets);
        txClient.commitOrThrow(tx4);
        for (int i = 0; i < 100; i++) {
            Row row = results.get(i);
            Assert.assertArrayEquals(Bytes.toBytes("r" + i), row.getRow());
            byte[] val1 = row.get(C1);
            byte[] val2 = row.get(C2);
            if (i % 2 == 0) {
                Assert.assertNotNull(val1);
                Assert.assertArrayEquals(V1, val1);
                Assert.assertNull(val2);
            } else {
                Assert.assertNull(val1);
                Assert.assertNotNull(val2);
                Assert.assertArrayEquals(V2, val2);
            }
        }
    } finally {
        admin.drop();
    }
}
Also used : Table(co.cask.cdap.api.dataset.table.Table) HBaseTable(co.cask.cdap.data2.dataset2.lib.table.hbase.HBaseTable) Transaction(org.apache.tephra.Transaction) TransactionAware(org.apache.tephra.TransactionAware) Get(co.cask.cdap.api.dataset.table.Get) DatasetAdmin(co.cask.cdap.api.dataset.DatasetAdmin) Row(co.cask.cdap.api.dataset.table.Row) Put(co.cask.cdap.api.dataset.table.Put) Test(org.junit.Test)

Aggregations

Get (co.cask.cdap.api.dataset.table.Get)36 Row (co.cask.cdap.api.dataset.table.Row)19 Table (co.cask.cdap.api.dataset.table.Table)18 Test (org.junit.Test)17 Put (co.cask.cdap.api.dataset.table.Put)15 Transaction (org.apache.tephra.Transaction)10 TransactionAware (org.apache.tephra.TransactionAware)10 DatasetAdmin (co.cask.cdap.api.dataset.DatasetAdmin)8 KeyValueTable (co.cask.cdap.api.dataset.lib.KeyValueTable)7 HBaseTable (co.cask.cdap.data2.dataset2.lib.table.hbase.HBaseTable)7 TransactionExecutor (org.apache.tephra.TransactionExecutor)7 IOException (java.io.IOException)6 NotFoundException (co.cask.cdap.common.NotFoundException)5 DatasetProperties (co.cask.cdap.api.dataset.DatasetProperties)4 Scanner (co.cask.cdap.api.dataset.table.Scanner)4 ReadOnly (co.cask.cdap.api.annotation.ReadOnly)3 Delete (co.cask.cdap.api.dataset.table.Delete)3 Scan (co.cask.cdap.api.dataset.table.Scan)3 ApplicationWithPrograms (co.cask.cdap.internal.app.deploy.pipeline.ApplicationWithPrograms)3 ProgramSchedule (co.cask.cdap.internal.app.runtime.schedule.ProgramSchedule)3