Search in sources :

Example 16 with Row

use of co.cask.cdap.api.dataset.table.Row in project cdap by caskdata.

the class TableTest method testMultiGetWithTx.

@Test
public void testMultiGetWithTx() throws Exception {
    String testMultiGet = "testMultiGet";
    DatasetAdmin admin = getTableAdmin(CONTEXT1, testMultiGet);
    admin.create();
    try {
        Transaction tx = txClient.startShort();
        Table table = getTable(CONTEXT1, testMultiGet);
        ((TransactionAware) table).startTx(tx);
        for (int i = 0; i < 100; i++) {
            table.put(new Put(Bytes.toBytes("r" + i)).add(C1, V1).add(C2, V2));
        }
        Assert.assertTrue(txClient.canCommit(tx, ((TransactionAware) table).getTxChanges()));
        Assert.assertTrue(((TransactionAware) table).commitTx());
        Assert.assertTrue(txClient.commit(tx));
        Transaction tx2 = txClient.startShort();
        ((TransactionAware) table).startTx(tx2);
        List<Get> gets = Lists.newArrayListWithCapacity(100);
        for (int i = 0; i < 100; i++) {
            gets.add(new Get(Bytes.toBytes("r" + i)));
        }
        List<Row> results = table.get(gets);
        Assert.assertTrue(txClient.commit(tx2));
        for (int i = 0; i < 100; i++) {
            Row row = results.get(i);
            Assert.assertArrayEquals(Bytes.toBytes("r" + i), row.getRow());
            byte[] val = row.get(C1);
            Assert.assertNotNull(val);
            Assert.assertArrayEquals(V1, val);
            byte[] val2 = row.get(C2);
            Assert.assertNotNull(val2);
            Assert.assertArrayEquals(V2, val2);
        }
        Transaction tx3 = txClient.startShort();
        ((TransactionAware) table).startTx(tx3);
        gets = Lists.newArrayListWithCapacity(100);
        for (int i = 0; i < 100; i++) {
            gets.add(new Get("r" + i).add(C1));
        }
        results = table.get(gets);
        Assert.assertTrue(txClient.commit(tx3));
        for (int i = 0; i < 100; i++) {
            Row row = results.get(i);
            Assert.assertArrayEquals(Bytes.toBytes("r" + i), row.getRow());
            byte[] val = row.get(C1);
            Assert.assertNotNull(val);
            Assert.assertArrayEquals(V1, val);
            // should have only returned column 1
            byte[] val2 = row.get(C2);
            Assert.assertNull(val2);
        }
        // retrieve different columns per row
        Transaction tx4 = txClient.startShort();
        ((TransactionAware) table).startTx(tx4);
        gets = Lists.newArrayListWithCapacity(100);
        for (int i = 0; i < 100; i++) {
            Get get = new Get("r" + i);
            // evens get C1, odds get C2
            get.add(i % 2 == 0 ? C1 : C2);
            gets.add(get);
        }
        results = table.get(gets);
        Assert.assertTrue(txClient.commit(tx4));
        for (int i = 0; i < 100; i++) {
            Row row = results.get(i);
            Assert.assertArrayEquals(Bytes.toBytes("r" + i), row.getRow());
            byte[] val1 = row.get(C1);
            byte[] val2 = row.get(C2);
            if (i % 2 == 0) {
                Assert.assertNotNull(val1);
                Assert.assertArrayEquals(V1, val1);
                Assert.assertNull(val2);
            } else {
                Assert.assertNull(val1);
                Assert.assertNotNull(val2);
                Assert.assertArrayEquals(V2, val2);
            }
        }
    } finally {
        admin.drop();
    }
}
Also used : Table(co.cask.cdap.api.dataset.table.Table) HBaseTable(co.cask.cdap.data2.dataset2.lib.table.hbase.HBaseTable) Transaction(org.apache.tephra.Transaction) TransactionAware(org.apache.tephra.TransactionAware) Get(co.cask.cdap.api.dataset.table.Get) DatasetAdmin(co.cask.cdap.api.dataset.DatasetAdmin) Row(co.cask.cdap.api.dataset.table.Row) Put(co.cask.cdap.api.dataset.table.Put) Test(org.junit.Test)

Example 17 with Row

use of co.cask.cdap.api.dataset.table.Row in project cdap by caskdata.

the class JobQueueDataset method retrieveSubscriberState.

@Override
public String retrieveSubscriberState(String topic) {
    Row row = table.get(getRowKey(topic));
    byte[] messageIdBytes = row.get(COL);
    return messageIdBytes == null ? null : Bytes.toString(messageIdBytes);
}
Also used : Row(co.cask.cdap.api.dataset.table.Row)

Example 18 with Row

use of co.cask.cdap.api.dataset.table.Row in project cdap by caskdata.

the class JobQueueDataset method markJobsForDeletion.

@Override
public void markJobsForDeletion(ScheduleId scheduleId, long markedTime) {
    byte[] keyPrefix = getRowKeyPrefix(scheduleId);
    Row row;
    try (Scanner scanner = table.scan(keyPrefix, Bytes.stopKeyForPrefix(keyPrefix))) {
        while ((row = scanner.next()) != null) {
            Job job = fromRow(row);
            // only mark jobs that are not marked yet to avoid chance of conflict with concurrent delete
            if (job.getState() != Job.State.PENDING_LAUNCH && row.get(TO_DELETE_COL) == null) {
                // jobs that are pending launch will be deleted by the launcher anyway
                table.put(row.getRow(), TO_DELETE_COL, Bytes.toBytes(markedTime));
            }
        }
    }
}
Also used : Scanner(co.cask.cdap.api.dataset.table.Scanner) Row(co.cask.cdap.api.dataset.table.Row)

Example 19 with Row

use of co.cask.cdap.api.dataset.table.Row in project cdap by caskdata.

the class DatasetBasedStreamSizeScheduleStore method upgradeVersionKeys.

// Return whether the upgrade process is complete - determined by checking if there were no rows that were
// upgraded after the invocation of this method.
private boolean upgradeVersionKeys(Table table, int maxNumberUpdateRows) {
    int numRowsUpgraded = 0;
    try (Scanner scan = getScannerWithPrefix(table, KEY_PREFIX)) {
        Row next;
        // Upgrade only N rows in one transaction to reduce the probability of conflicts with regular Store operations.
        while (((next = scan.next()) != null) && (numRowsUpgraded < maxNumberUpdateRows)) {
            if (isInvalidRow(next)) {
                LIMITED_LOG.debug("Stream Sized Schedule entry with Row key {} does not have all columns.", Bytes.toString(next.getRow()));
                continue;
            }
            byte[] oldRowKey = next.getRow();
            String oldRowKeyString = Bytes.toString(next.getRow());
            String[] splits = oldRowKeyString.split(":");
            // streamSizeSchedule:namespace:application:type:program:schedule
            if (splits.length != 6) {
                LIMITED_LOG.debug("Skip upgrading StreamSizeSchedule {}. Expected row key " + "format 'streamSizeSchedule:namespace:application:type:program:schedule'", oldRowKeyString);
                continue;
            }
            // append application version after application name
            byte[] newRowKey = Bytes.toBytes(ScheduleUpgradeUtil.getNameWithDefaultVersion(splits, 3));
            // Check if a newRowKey is already present, if it is present, then simply delete the oldRowKey and continue;
            Row row = table.get(newRowKey);
            if (!row.isEmpty()) {
                table.delete(oldRowKey);
                numRowsUpgraded++;
                continue;
            }
            Put put = new Put(newRowKey);
            for (Map.Entry<byte[], byte[]> colValEntry : next.getColumns().entrySet()) {
                put.add(colValEntry.getKey(), colValEntry.getValue());
            }
            table.put(put);
            table.delete(oldRowKey);
            numRowsUpgraded++;
        }
    }
    // If no rows were upgraded, notify that the upgrade process has completed.
    return (numRowsUpgraded == 0);
}
Also used : Scanner(co.cask.cdap.api.dataset.table.Scanner) Row(co.cask.cdap.api.dataset.table.Row) Map(java.util.Map) Put(co.cask.cdap.api.dataset.table.Put)

Example 20 with Row

use of co.cask.cdap.api.dataset.table.Row in project cdap by caskdata.

the class DatasetBasedTimeScheduleStore method readJob.

private JobDetail readJob(Table table, JobKey key) {
    byte[][] col = new byte[1][];
    col[0] = Bytes.toBytes(key.toString());
    Row row = table.get(JOB_KEY, col);
    byte[] bytes = null;
    if (!row.isEmpty()) {
        bytes = row.get(col[0]);
    }
    if (bytes != null) {
        return (JobDetail) SerializationUtils.deserialize(bytes);
    } else {
        return null;
    }
}
Also used : JobDetail(org.quartz.JobDetail) Row(co.cask.cdap.api.dataset.table.Row)

Aggregations

Row (co.cask.cdap.api.dataset.table.Row)111 Scanner (co.cask.cdap.api.dataset.table.Scanner)60 Test (org.junit.Test)23 Table (co.cask.cdap.api.dataset.table.Table)20 Get (co.cask.cdap.api.dataset.table.Get)16 ArrayList (java.util.ArrayList)16 TransactionExecutor (org.apache.tephra.TransactionExecutor)16 Map (java.util.Map)15 Put (co.cask.cdap.api.dataset.table.Put)14 HashMap (java.util.HashMap)10 Scan (co.cask.cdap.api.dataset.table.Scan)9 TransactionAware (org.apache.tephra.TransactionAware)9 MDSKey (co.cask.cdap.data2.dataset2.lib.table.MDSKey)8 QueueEntryRow (co.cask.cdap.data2.transaction.queue.QueueEntryRow)8 DatasetId (co.cask.cdap.proto.id.DatasetId)8 IOException (java.io.IOException)8 ImmutableMap (com.google.common.collect.ImmutableMap)7 Transaction (org.apache.tephra.Transaction)7 WriteOnly (co.cask.cdap.api.annotation.WriteOnly)6 Schema (co.cask.cdap.api.data.schema.Schema)6