Search in sources :

Example 46 with TableId

use of co.cask.cdap.data2.util.TableId in project cdap by caskdata.

the class IncrementSummingScannerTest method testMultiColumnFlushAndCompact.

@Test
public void testMultiColumnFlushAndCompact() throws Exception {
    TableId tableId = TableId.from(NamespaceId.DEFAULT.getNamespace(), "testMultiColumnFlushAndCompact");
    byte[] familyBytes = Bytes.toBytes("f");
    byte[] columnBytes = Bytes.toBytes("c");
    byte[] columnBytes2 = Bytes.toBytes("c2");
    HRegion region = createRegion(tableId, familyBytes);
    try {
        region.initialize();
        long now = 1;
        byte[] row1 = Bytes.toBytes("row1");
        byte[] row2 = Bytes.toBytes("row2");
        // Initial put to row1,c2
        Put row1P = new Put(row1);
        row1P.add(familyBytes, columnBytes2, now - 1, Bytes.toBytes(5L));
        region.put(row1P);
        // Initial put to row2,c
        Put row2P = new Put(row2);
        row2P.add(familyBytes, columnBytes, now - 1, Bytes.toBytes(10L));
        region.put(row2P);
        // Generate some increments
        long ts = now;
        for (int i = 0; i < 50; i++) {
            region.put(generateIncrementPut(familyBytes, columnBytes, row1, ts));
            region.put(generateIncrementPut(familyBytes, columnBytes, row2, ts));
            region.put(generateIncrementPut(familyBytes, columnBytes2, row1, ts));
            ts++;
        }
        // First scanner represents flush scanner
        RegionScanner scanner = new IncrementSummingScanner(region, -1, region.getScanner(new Scan().setMaxVersions()), ScanType.COMPACT_RETAIN_DELETES, now + 15, -1);
        // Second scanner is a user scan, this is to help in easy asserts
        scanner = new IncrementSummingScanner(region, -1, scanner, ScanType.USER_SCAN);
        List<Cell> results = Lists.newArrayList();
        assertTrue(scanner.next(results, 10));
        assertEquals(2, results.size());
        Cell cell = results.get(0);
        assertNotNull(cell);
        assertEquals("row1", Bytes.toString(cell.getRow()));
        assertEquals("c", Bytes.toString(cell.getQualifier()));
        assertEquals(50, Bytes.toLong(cell.getValue()));
        cell = results.get(1);
        assertNotNull(cell);
        assertEquals("row1", Bytes.toString(cell.getRow()));
        assertEquals("c2", Bytes.toString(cell.getQualifier()));
        assertEquals(55, Bytes.toLong(cell.getValue()));
        results.clear();
        assertFalse(scanner.next(results, 10));
        assertEquals(1, results.size());
        cell = results.get(0);
        assertNotNull(cell);
        assertEquals("row2", Bytes.toString(cell.getRow()));
        assertEquals(60, Bytes.toLong(cell.getValue()));
    } finally {
        region.close();
    }
}
Also used : TableId(co.cask.cdap.data2.util.TableId) HRegion(org.apache.hadoop.hbase.regionserver.HRegion) RegionScanner(org.apache.hadoop.hbase.regionserver.RegionScanner) Scan(org.apache.hadoop.hbase.client.Scan) Cell(org.apache.hadoop.hbase.Cell) Put(org.apache.hadoop.hbase.client.Put) HBase96Test(co.cask.cdap.data.hbase.HBase96Test) Test(org.junit.Test)

Example 47 with TableId

use of co.cask.cdap.data2.util.TableId in project cdap by caskdata.

the class IncrementHandlerTest method createTable.

@Override
public HTable createTable(TableId tableId) throws Exception {
    HBaseTableUtil tableUtil = new HBaseTableUtilFactory(cConf).get();
    HTableDescriptorBuilder tableDesc = tableUtil.buildHTableDescriptor(tableId);
    HColumnDescriptor columnDesc = new HColumnDescriptor(FAMILY);
    columnDesc.setMaxVersions(Integer.MAX_VALUE);
    columnDesc.setValue(IncrementHandlerState.PROPERTY_TRANSACTIONAL, "false");
    tableDesc.addFamily(columnDesc);
    tableDesc.addCoprocessor(IncrementHandler.class.getName());
    HTableDescriptor htd = tableDesc.build();
    TEST_HBASE.getHBaseAdmin().createTable(htd);
    TEST_HBASE.waitUntilTableAvailable(htd.getName(), 5000);
    return tableUtil.createHTable(conf, tableId);
}
Also used : HTableDescriptorBuilder(co.cask.cdap.data2.util.hbase.HTableDescriptorBuilder) HColumnDescriptor(org.apache.hadoop.hbase.HColumnDescriptor) HBaseTableUtilFactory(co.cask.cdap.data2.util.hbase.HBaseTableUtilFactory) HBaseTableUtil(co.cask.cdap.data2.util.hbase.HBaseTableUtil) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor)

Example 48 with TableId

use of co.cask.cdap.data2.util.TableId in project cdap by caskdata.

the class HBaseTableAdmin method create.

@Override
public void create() throws IOException {
    String columnFamily = Bytes.toString(TableProperties.getColumnFamilyBytes(spec.getProperties()));
    ColumnFamilyDescriptorBuilder cfdBuilder = HBaseTableUtil.getColumnFamilyDescriptorBuilder(columnFamily, hConf);
    if (TableProperties.getReadlessIncrementSupport(spec.getProperties())) {
        cfdBuilder.setMaxVersions(Integer.MAX_VALUE);
    } else if (DatasetsUtil.isTransactional(spec.getProperties())) {
        // NOTE: we cannot limit number of versions as there's no hard limit on # of excluded from read txs
        cfdBuilder.setMaxVersions(Integer.MAX_VALUE);
    } else {
        cfdBuilder.setMaxVersions(1);
    }
    cfdBuilder.setBloomType(ColumnFamilyDescriptor.BloomType.ROW);
    Long ttl = TableProperties.getTTL(spec.getProperties());
    if (ttl != null) {
        // convert ttl from seconds to milli-seconds
        ttl = TimeUnit.SECONDS.toMillis(ttl);
        cfdBuilder.addProperty(TxConstants.PROPERTY_TTL, String.valueOf(ttl));
    }
    final TableDescriptorBuilder tdBuilder = HBaseTableUtil.getTableDescriptorBuilder(tableId, cConf);
    // if the dataset is configured for read-less increments, then set the table property to support upgrades
    boolean supportsReadlessIncrements = TableProperties.getReadlessIncrementSupport(spec.getProperties());
    if (supportsReadlessIncrements) {
        tdBuilder.addProperty(Table.PROPERTY_READLESS_INCREMENT, "true");
    }
    // if the dataset is configured to be non-transactional, then set the table property to support upgrades
    if (!DatasetsUtil.isTransactional(spec.getProperties())) {
        tdBuilder.addProperty(Constants.Dataset.TABLE_TX_DISABLED, "true");
        if (supportsReadlessIncrements) {
            // read-less increments CPs by default assume that table is transactional
            cfdBuilder.addProperty("dataset.table.readless.increment.transactional", "false");
        }
    }
    tdBuilder.addColumnFamily(cfdBuilder.build());
    CoprocessorJar coprocessorJar = createCoprocessorJar();
    for (Class<? extends Coprocessor> coprocessor : coprocessorJar.getCoprocessors()) {
        tdBuilder.addCoprocessor(coprocessorManager.getCoprocessorDescriptor(coprocessor, coprocessorJar.getPriority(coprocessor)));
    }
    byte[][] splits = null;
    String splitsProperty = spec.getProperty(PROPERTY_SPLITS);
    if (splitsProperty != null) {
        splits = GSON.fromJson(splitsProperty, byte[][].class);
    }
    try (HBaseDDLExecutor ddlExecutor = ddlExecutorFactory.get()) {
        ddlExecutor.createTableIfNotExists(tdBuilder.build(), splits);
        try {
            Map<String, String> permissions = TableProperties.getTablePermissions(spec.getProperties());
            if (permissions != null && !permissions.isEmpty()) {
                tableUtil.grantPermissions(ddlExecutor, tableId, permissions);
            }
        } catch (IOException | RuntimeException e) {
            try {
                drop();
            } catch (Throwable t) {
                e.addSuppressed(t);
            }
            throw e;
        }
    }
}
Also used : HBaseDDLExecutor(co.cask.cdap.spi.hbase.HBaseDDLExecutor) ColumnFamilyDescriptorBuilder(co.cask.cdap.data2.util.hbase.ColumnFamilyDescriptorBuilder) TableDescriptorBuilder(co.cask.cdap.data2.util.hbase.TableDescriptorBuilder) IOException(java.io.IOException)

Example 49 with TableId

use of co.cask.cdap.data2.util.TableId in project cdap by caskdata.

the class HBaseMetricsTable method put.

@Override
public void put(SortedMap<byte[], ? extends SortedMap<byte[], Long>> updates) {
    List<Put> puts = Lists.newArrayList();
    for (Map.Entry<byte[], ? extends SortedMap<byte[], Long>> row : updates.entrySet()) {
        PutBuilder put = tableUtil.buildPut(row.getKey());
        for (Map.Entry<byte[], Long> column : row.getValue().entrySet()) {
            put.add(columnFamily, column.getKey(), Bytes.toBytes(column.getValue()));
        }
        puts.add(put.build());
    }
    try {
        hTable.put(puts);
        hTable.flushCommits();
    } catch (IOException e) {
        throw new DataSetException("Put failed on table " + tableId, e);
    }
}
Also used : PutBuilder(co.cask.cdap.data2.util.hbase.PutBuilder) DataSetException(co.cask.cdap.api.dataset.DataSetException) IOException(java.io.IOException) Map(java.util.Map) NavigableMap(java.util.NavigableMap) SortedMap(java.util.SortedMap) Put(org.apache.hadoop.hbase.client.Put)

Example 50 with TableId

use of co.cask.cdap.data2.util.TableId in project cdap by caskdata.

the class HBaseMetricsTable method putBytes.

@Override
public void putBytes(SortedMap<byte[], ? extends SortedMap<byte[], byte[]>> updates) {
    List<Put> puts = Lists.newArrayList();
    for (Map.Entry<byte[], ? extends SortedMap<byte[], byte[]>> row : updates.entrySet()) {
        PutBuilder put = tableUtil.buildPut(row.getKey());
        for (Map.Entry<byte[], byte[]> column : row.getValue().entrySet()) {
            put.add(columnFamily, column.getKey(), column.getValue());
        }
        puts.add(put.build());
    }
    try {
        hTable.put(puts);
        hTable.flushCommits();
    } catch (IOException e) {
        throw new DataSetException("Put failed on table " + tableId, e);
    }
}
Also used : PutBuilder(co.cask.cdap.data2.util.hbase.PutBuilder) DataSetException(co.cask.cdap.api.dataset.DataSetException) IOException(java.io.IOException) Map(java.util.Map) NavigableMap(java.util.NavigableMap) SortedMap(java.util.SortedMap) Put(org.apache.hadoop.hbase.client.Put)

Aggregations

TableId (co.cask.cdap.data2.util.TableId)100 Test (org.junit.Test)49 HTableDescriptor (org.apache.hadoop.hbase.HTableDescriptor)40 HRegion (org.apache.hadoop.hbase.regionserver.HRegion)39 Put (org.apache.hadoop.hbase.client.Put)34 NamespaceId (co.cask.cdap.proto.id.NamespaceId)26 Cell (org.apache.hadoop.hbase.Cell)24 Scan (org.apache.hadoop.hbase.client.Scan)24 HTableDescriptorBuilder (co.cask.cdap.data2.util.hbase.HTableDescriptorBuilder)18 RegionScanner (org.apache.hadoop.hbase.regionserver.RegionScanner)17 HBaseTableUtil (co.cask.cdap.data2.util.hbase.HBaseTableUtil)15 HBaseTableUtilFactory (co.cask.cdap.data2.util.hbase.HBaseTableUtilFactory)15 IOException (java.io.IOException)13 HTable (org.apache.hadoop.hbase.client.HTable)11 Delete (org.apache.hadoop.hbase.client.Delete)9 HBaseAdmin (org.apache.hadoop.hbase.client.HBaseAdmin)9 HBaseDDLExecutor (co.cask.cdap.spi.hbase.HBaseDDLExecutor)8 Path (org.apache.hadoop.fs.Path)8 HColumnDescriptor (org.apache.hadoop.hbase.HColumnDescriptor)8 HRegionInfo (org.apache.hadoop.hbase.HRegionInfo)8