use of co.cask.cdap.data2.util.TableId in project cdap by caskdata.
the class HBaseStreamConsumerStateStoreFactory method create.
@Override
public synchronized StreamConsumerStateStore create(StreamConfig streamConfig) throws IOException {
NamespaceId namespace = streamConfig.getStreamId().getParent();
TableId streamStateStoreTableId = StreamUtils.getStateStoreTableId(namespace);
TableId hbaseTableId = tableUtil.createHTableId(new NamespaceId(streamStateStoreTableId.getNamespace()), streamStateStoreTableId.getTableName());
boolean tableExist;
try (HBaseAdmin admin = new HBaseAdmin(hConf)) {
tableExist = tableUtil.tableExists(admin, hbaseTableId);
}
if (!tableExist) {
try (HBaseDDLExecutor ddlExecutor = ddlExecutorFactory.get()) {
TableDescriptorBuilder tdBuilder = HBaseTableUtil.getTableDescriptorBuilder(hbaseTableId, cConf);
ColumnFamilyDescriptorBuilder cfdBuilder = HBaseTableUtil.getColumnFamilyDescriptorBuilder(Bytes.toString(QueueEntryRow.COLUMN_FAMILY), hConf);
tdBuilder.addColumnFamily(cfdBuilder.build());
ddlExecutor.createTableIfNotExists(tdBuilder.build(), null);
}
}
HTable hTable = tableUtil.createHTable(hConf, hbaseTableId);
hTable.setWriteBufferSize(Constants.Stream.HBASE_WRITE_BUFFER_SIZE);
hTable.setAutoFlushTo(false);
return new HBaseStreamConsumerStateStore(streamConfig, hTable);
}
use of co.cask.cdap.data2.util.TableId in project cdap by caskdata.
the class IncrementSummingScannerTest method createRegion.
static HRegion createRegion(Configuration hConf, CConfiguration cConf, TableId tableId, HColumnDescriptor cfd) throws Exception {
HBaseTableUtil tableUtil = new HBaseTableUtilFactory(cConf).get();
HTableDescriptorBuilder htd = tableUtil.buildHTableDescriptor(tableId);
cfd.setMaxVersions(Integer.MAX_VALUE);
cfd.setKeepDeletedCells(true);
htd.addFamily(cfd);
htd.addCoprocessor(IncrementHandler.class.getName());
HTableDescriptor desc = htd.build();
String tableName = desc.getNameAsString();
Path tablePath = new Path("/tmp/" + tableName);
Path hlogPath = new Path("/tmp/hlog-" + tableName);
FileSystem fs = FileSystem.get(hConf);
assertTrue(fs.mkdirs(tablePath));
HLog hLog = HLogFactory.createHLog(fs, hlogPath, tableName, hConf);
HRegionInfo regionInfo = new HRegionInfo(desc.getTableName());
HRegionFileSystem regionFS = HRegionFileSystem.createRegionOnFileSystem(hConf, fs, tablePath, regionInfo);
return new HRegion(regionFS, hLog, hConf, desc, new MockRegionServerServices(hConf, null));
}
use of co.cask.cdap.data2.util.TableId in project cdap by caskdata.
the class IncrementSummingScannerTest method testFlushAndCompact.
@Test
public void testFlushAndCompact() throws Exception {
TableId tableId = TableId.from(NamespaceId.DEFAULT.getNamespace(), "TestFlushAndCompact");
byte[] familyBytes = Bytes.toBytes("f");
byte[] columnBytes = Bytes.toBytes("c");
HRegion region = createRegion(tableId, familyBytes);
try {
region.initialize();
// load an initial set of increments
long ts = System.currentTimeMillis();
byte[] row1 = Bytes.toBytes("row1");
for (int i = 0; i < 50; i++) {
Put p = new Put(row1);
p.add(familyBytes, columnBytes, ts, Bytes.toBytes(1L));
p.setAttribute(HBaseTable.DELTA_WRITE, TRUE);
ts++;
region.put(p);
}
byte[] row2 = Bytes.toBytes("row2");
ts = System.currentTimeMillis();
// start with a full put
Put row2P = new Put(row2);
row2P.add(familyBytes, columnBytes, ts++, Bytes.toBytes(10L));
region.put(row2P);
for (int i = 0; i < 10; i++) {
Put p = new Put(row2);
p.add(familyBytes, columnBytes, ts++, Bytes.toBytes(1L));
p.setAttribute(HBaseTable.DELTA_WRITE, TRUE);
region.put(p);
}
// force a region flush
region.flushcache();
region.waitForFlushesAndCompactions();
Result r1 = region.get(new Get(row1));
assertNotNull(r1);
assertFalse(r1.isEmpty());
// row1 should have a full put aggregating all 50 incrments
Cell r1Cell = r1.getColumnLatestCell(familyBytes, columnBytes);
assertNotNull(r1Cell);
assertEquals(50L, Bytes.toLong(r1Cell.getValue()));
Result r2 = region.get(new Get(row2));
assertNotNull(r2);
assertFalse(r2.isEmpty());
// row2 should have a full put aggregating prior put + 10 increments
Cell r2Cell = r2.getColumnLatestCell(familyBytes, columnBytes);
assertNotNull(r2Cell);
assertEquals(20L, Bytes.toLong(r2Cell.getValue()));
// add 30 more increments to row2
for (int i = 0; i < 30; i++) {
Put p = new Put(row2);
p.add(familyBytes, columnBytes, ts++, Bytes.toBytes(1L));
p.setAttribute(HBaseTable.DELTA_WRITE, TRUE);
region.put(p);
}
// row2 should now have a full put aggregating prior 20 value + 30 increments
r2 = region.get(new Get(row2));
assertNotNull(r2);
assertFalse(r2.isEmpty());
r2Cell = r2.getColumnLatestCell(familyBytes, columnBytes);
assertNotNull(r2Cell);
assertEquals(50L, Bytes.toLong(r2Cell.getValue()));
// force another region flush
region.flushcache();
region.waitForFlushesAndCompactions();
// add 100 more increments to row2
for (int i = 0; i < 100; i++) {
Put p = new Put(row2);
p.add(familyBytes, columnBytes, ts++, Bytes.toBytes(1L));
p.setAttribute(HBaseTable.DELTA_WRITE, TRUE);
region.put(p);
}
// row2 should now have a full put aggregating prior 50 value + 100 increments
r2 = region.get(new Get(row2));
assertNotNull(r2);
assertFalse(r2.isEmpty());
r2Cell = r2.getColumnLatestCell(familyBytes, columnBytes);
assertNotNull(r2Cell);
assertEquals(150L, Bytes.toLong(r2Cell.getValue()));
} finally {
region.close();
}
}
use of co.cask.cdap.data2.util.TableId in project cdap by caskdata.
the class IncrementSummingScannerTest method testIncrementScanningWithBatchAndUVB.
@Test
public void testIncrementScanningWithBatchAndUVB() throws Exception {
TableId tableId = TableId.from(NamespaceId.DEFAULT.getNamespace(), "TestIncrementSummingScannerWithUpperVisibilityBound");
byte[] familyBytes = Bytes.toBytes("f");
byte[] columnBytes = Bytes.toBytes("c");
HRegion region = createRegion(tableId, familyBytes);
try {
region.initialize();
long start = 0;
long now = start;
long counter1 = 0;
// adding 5 delta increments
for (int i = 0; i < 5; i++) {
Put p = new Put(Bytes.toBytes("r1"), now++);
p.add(familyBytes, columnBytes, Bytes.toBytes(1L));
p.setAttribute(HBaseTable.DELTA_WRITE, TRUE);
region.put(p);
counter1++;
}
// Also: we want different combinations of batch limit & uvbs
for (int i = 0; i < 7; i++) {
for (int k = 0; k < 4; k++) {
long[] uvbs = new long[k];
for (int l = 0; l < uvbs.length; l++) {
uvbs[l] = start + (k + 1) * (l + 1);
}
verifyCounts(region, new Scan().setMaxVersions(), new long[] { counter1 }, i > 0 ? i : -1, uvbs);
}
}
// Now test same with two groups of increments
int counter2 = 0;
for (int i = 0; i < 5; i++) {
Put p = new Put(Bytes.toBytes("r2"), now + i);
p.add(familyBytes, columnBytes, Bytes.toBytes(2L));
p.setAttribute(HBaseTable.DELTA_WRITE, TRUE);
region.put(p);
counter2 += 2;
}
for (int i = 0; i < 12; i++) {
for (int k = 0; k < 4; k++) {
long[] uvbs = new long[k];
for (int l = 0; l < uvbs.length; l++) {
uvbs[l] = start + (k + 1) * (l + 1);
}
verifyCounts(region, new Scan().setMaxVersions(), new long[] { counter1, counter2 }, i > 0 ? i : -1, uvbs);
}
}
} finally {
region.close();
}
}
use of co.cask.cdap.data2.util.TableId in project cdap by caskdata.
the class IncrementSummingScannerTest method testIncrementScanningWithBatchAndUVB.
@Test
public void testIncrementScanningWithBatchAndUVB() throws Exception {
TableId tableId = TableId.from(NamespaceId.DEFAULT.getNamespace(), "TestIncrementSummingScannerWithUpperVisibilityBound");
byte[] familyBytes = Bytes.toBytes("f");
byte[] columnBytes = Bytes.toBytes("c");
HRegion region = createRegion(tableId, familyBytes);
try {
region.initialize();
long start = 0;
long now = start;
long counter1 = 0;
// adding 5 delta increments
for (int i = 0; i < 5; i++) {
Put p = new Put(Bytes.toBytes("r1"), now++);
p.add(familyBytes, columnBytes, Bytes.toBytes(1L));
p.setAttribute(HBaseTable.DELTA_WRITE, TRUE);
region.put(p);
counter1++;
}
// Also: we want different combinations of batch limit & uvbs
for (int i = 0; i < 7; i++) {
for (int k = 0; k < 4; k++) {
long[] uvbs = new long[k];
for (int l = 0; l < uvbs.length; l++) {
uvbs[l] = start + (k + 1) * (l + 1);
}
verifyCounts(region, new Scan().setMaxVersions(), new long[] { counter1 }, i > 0 ? i : -1, uvbs);
}
}
// Now test same with two groups of increments
int counter2 = 0;
for (int i = 0; i < 5; i++) {
Put p = new Put(Bytes.toBytes("r2"), now + i);
p.add(familyBytes, columnBytes, Bytes.toBytes(2L));
p.setAttribute(HBaseTable.DELTA_WRITE, TRUE);
region.put(p);
counter2 += 2;
}
for (int i = 0; i < 12; i++) {
for (int k = 0; k < 4; k++) {
long[] uvbs = new long[k];
for (int l = 0; l < uvbs.length; l++) {
uvbs[l] = start + (k + 1) * (l + 1);
}
verifyCounts(region, new Scan().setMaxVersions(), new long[] { counter1, counter2 }, i > 0 ? i : -1, uvbs);
}
}
} finally {
region.close();
}
}
Aggregations