use of co.cask.cdap.data2.util.TableId in project cdap by caskdata.
the class IncrementSummingScannerTest method testMultiColumnFlushAndCompact.
@Test
public void testMultiColumnFlushAndCompact() throws Exception {
TableId tableId = TableId.from(NamespaceId.DEFAULT.getNamespace(), "testMultiColumnFlushAndCompact");
byte[] familyBytes = Bytes.toBytes("f");
byte[] columnBytes = Bytes.toBytes("c");
byte[] columnBytes2 = Bytes.toBytes("c2");
HRegion region = createRegion(tableId, familyBytes);
try {
region.initialize();
long now = 1;
byte[] row1 = Bytes.toBytes("row1");
byte[] row2 = Bytes.toBytes("row2");
// Initial put to row1,c2
Put row1P = new Put(row1);
row1P.add(familyBytes, columnBytes2, now - 1, Bytes.toBytes(5L));
region.put(row1P);
// Initial put to row2,c
Put row2P = new Put(row2);
row2P.add(familyBytes, columnBytes, now - 1, Bytes.toBytes(10L));
region.put(row2P);
// Generate some increments
long ts = now;
for (int i = 0; i < 50; i++) {
region.put(generateIncrementPut(familyBytes, columnBytes, row1, ts));
region.put(generateIncrementPut(familyBytes, columnBytes, row2, ts));
region.put(generateIncrementPut(familyBytes, columnBytes2, row1, ts));
ts++;
}
// First scanner represents flush scanner
RegionScanner scanner = new IncrementSummingScanner(region, -1, region.getScanner(new Scan().setMaxVersions()), ScanType.COMPACT_RETAIN_DELETES, now + 15, -1);
// Second scanner is a user scan, this is to help in easy asserts
scanner = new IncrementSummingScanner(region, -1, scanner, ScanType.USER_SCAN);
List<Cell> results = Lists.newArrayList();
assertTrue(scanner.next(results, 10));
assertEquals(2, results.size());
Cell cell = results.get(0);
assertNotNull(cell);
assertEquals("row1", Bytes.toString(cell.getRow()));
assertEquals("c", Bytes.toString(cell.getQualifier()));
assertEquals(50, Bytes.toLong(cell.getValue()));
cell = results.get(1);
assertNotNull(cell);
assertEquals("row1", Bytes.toString(cell.getRow()));
assertEquals("c2", Bytes.toString(cell.getQualifier()));
assertEquals(55, Bytes.toLong(cell.getValue()));
results.clear();
assertFalse(scanner.next(results, 10));
assertEquals(1, results.size());
cell = results.get(0);
assertNotNull(cell);
assertEquals("row2", Bytes.toString(cell.getRow()));
assertEquals(60, Bytes.toLong(cell.getValue()));
} finally {
region.close();
}
}
use of co.cask.cdap.data2.util.TableId in project cdap by caskdata.
the class IncrementHandlerTest method createTable.
@Override
public HTable createTable(TableId tableId) throws Exception {
HBaseTableUtil tableUtil = new HBaseTableUtilFactory(cConf).get();
HTableDescriptorBuilder tableDesc = tableUtil.buildHTableDescriptor(tableId);
HColumnDescriptor columnDesc = new HColumnDescriptor(FAMILY);
columnDesc.setMaxVersions(Integer.MAX_VALUE);
columnDesc.setValue(IncrementHandlerState.PROPERTY_TRANSACTIONAL, "false");
tableDesc.addFamily(columnDesc);
tableDesc.addCoprocessor(IncrementHandler.class.getName());
HTableDescriptor htd = tableDesc.build();
TEST_HBASE.getHBaseAdmin().createTable(htd);
TEST_HBASE.waitUntilTableAvailable(htd.getName(), 5000);
return tableUtil.createHTable(conf, tableId);
}
use of co.cask.cdap.data2.util.TableId in project cdap by caskdata.
the class HBaseTableAdmin method create.
@Override
public void create() throws IOException {
String columnFamily = Bytes.toString(TableProperties.getColumnFamilyBytes(spec.getProperties()));
ColumnFamilyDescriptorBuilder cfdBuilder = HBaseTableUtil.getColumnFamilyDescriptorBuilder(columnFamily, hConf);
if (TableProperties.getReadlessIncrementSupport(spec.getProperties())) {
cfdBuilder.setMaxVersions(Integer.MAX_VALUE);
} else if (DatasetsUtil.isTransactional(spec.getProperties())) {
// NOTE: we cannot limit number of versions as there's no hard limit on # of excluded from read txs
cfdBuilder.setMaxVersions(Integer.MAX_VALUE);
} else {
cfdBuilder.setMaxVersions(1);
}
cfdBuilder.setBloomType(ColumnFamilyDescriptor.BloomType.ROW);
Long ttl = TableProperties.getTTL(spec.getProperties());
if (ttl != null) {
// convert ttl from seconds to milli-seconds
ttl = TimeUnit.SECONDS.toMillis(ttl);
cfdBuilder.addProperty(TxConstants.PROPERTY_TTL, String.valueOf(ttl));
}
final TableDescriptorBuilder tdBuilder = HBaseTableUtil.getTableDescriptorBuilder(tableId, cConf);
// if the dataset is configured for read-less increments, then set the table property to support upgrades
boolean supportsReadlessIncrements = TableProperties.getReadlessIncrementSupport(spec.getProperties());
if (supportsReadlessIncrements) {
tdBuilder.addProperty(Table.PROPERTY_READLESS_INCREMENT, "true");
}
// if the dataset is configured to be non-transactional, then set the table property to support upgrades
if (!DatasetsUtil.isTransactional(spec.getProperties())) {
tdBuilder.addProperty(Constants.Dataset.TABLE_TX_DISABLED, "true");
if (supportsReadlessIncrements) {
// read-less increments CPs by default assume that table is transactional
cfdBuilder.addProperty("dataset.table.readless.increment.transactional", "false");
}
}
tdBuilder.addColumnFamily(cfdBuilder.build());
CoprocessorJar coprocessorJar = createCoprocessorJar();
for (Class<? extends Coprocessor> coprocessor : coprocessorJar.getCoprocessors()) {
tdBuilder.addCoprocessor(coprocessorManager.getCoprocessorDescriptor(coprocessor, coprocessorJar.getPriority(coprocessor)));
}
byte[][] splits = null;
String splitsProperty = spec.getProperty(PROPERTY_SPLITS);
if (splitsProperty != null) {
splits = GSON.fromJson(splitsProperty, byte[][].class);
}
try (HBaseDDLExecutor ddlExecutor = ddlExecutorFactory.get()) {
ddlExecutor.createTableIfNotExists(tdBuilder.build(), splits);
try {
Map<String, String> permissions = TableProperties.getTablePermissions(spec.getProperties());
if (permissions != null && !permissions.isEmpty()) {
tableUtil.grantPermissions(ddlExecutor, tableId, permissions);
}
} catch (IOException | RuntimeException e) {
try {
drop();
} catch (Throwable t) {
e.addSuppressed(t);
}
throw e;
}
}
}
use of co.cask.cdap.data2.util.TableId in project cdap by caskdata.
the class HBaseMetricsTable method put.
@Override
public void put(SortedMap<byte[], ? extends SortedMap<byte[], Long>> updates) {
List<Put> puts = Lists.newArrayList();
for (Map.Entry<byte[], ? extends SortedMap<byte[], Long>> row : updates.entrySet()) {
PutBuilder put = tableUtil.buildPut(row.getKey());
for (Map.Entry<byte[], Long> column : row.getValue().entrySet()) {
put.add(columnFamily, column.getKey(), Bytes.toBytes(column.getValue()));
}
puts.add(put.build());
}
try {
hTable.put(puts);
hTable.flushCommits();
} catch (IOException e) {
throw new DataSetException("Put failed on table " + tableId, e);
}
}
use of co.cask.cdap.data2.util.TableId in project cdap by caskdata.
the class HBaseMetricsTable method putBytes.
@Override
public void putBytes(SortedMap<byte[], ? extends SortedMap<byte[], byte[]>> updates) {
List<Put> puts = Lists.newArrayList();
for (Map.Entry<byte[], ? extends SortedMap<byte[], byte[]>> row : updates.entrySet()) {
PutBuilder put = tableUtil.buildPut(row.getKey());
for (Map.Entry<byte[], byte[]> column : row.getValue().entrySet()) {
put.add(columnFamily, column.getKey(), column.getValue());
}
puts.add(put.build());
}
try {
hTable.put(puts);
hTable.flushCommits();
} catch (IOException e) {
throw new DataSetException("Put failed on table " + tableId, e);
}
}
Aggregations