use of org.apache.hadoop.hbase.client.Increment in project pinpoint by naver.
the class SizeLimitedBulkIncrementerTest method multipleTablesConcurrent.
@Test
public void multipleTablesConcurrent() throws Exception {
// Given
final int numTables = 50;
final int numRowIds = 100;
final int numColumnIds = 20;
final int maxCallCount = 200;
List<TestDataSet> testDataSets = BulkIncrementerTestClazz.createRandomTestDataSetList(numTables, numRowIds, numColumnIds, maxCallCount);
List<TableName> tableNames = new ArrayList<>(numTables);
for (int i = 0; i < numTables; i++) {
tableNames.add(TableName.valueOf(i + ""));
}
final int maxNumTestDatas = numTables * numRowIds * numColumnIds * maxCallCount;
List<TestData> testDatas = new ArrayList<>(maxNumTestDatas);
for (TestDataSet testDataSet : testDataSets) {
testDatas.addAll(testDataSet.getTestDatas());
}
Collections.shuffle(testDatas);
// When
final int numIncrementers = 16;
List<List<TestData>> testDataPartitions = ListUtils.partition(testDatas, testDatas.size() / (numIncrementers - 1));
final CountDownLatch incrementorLatch = new CountDownLatch(testDataPartitions.size());
final CountDownLatch flusherLatch = new CountDownLatch(1);
FutureTask<Map<TableName, List<Increment>>> flushTask = new FutureTask<>(new Flusher(bulkIncrementer, rowKeyDistributor, incrementorLatch, flusherLatch));
new Thread(flushTask, "Flusher").start();
int counter = 0;
for (List<TestData> testDataPartition : testDataPartitions) {
Incrementer incrementer = new Incrementer(bulkIncrementer, incrementorLatch, testDataPartition);
new Thread(incrementer, "Incrementer-" + counter++).start();
}
flusherLatch.await(30L, TimeUnit.SECONDS);
// Then
Map<TableName, List<Increment>> incrementMap = flushTask.get(5L, TimeUnit.SECONDS);
TestVerifier verifier = new TestVerifier(incrementMap);
long actualTotalCount = 0;
for (TestDataSet testDataSet : testDataSets) {
TableName expectedTableName = testDataSet.getTableName();
RowKey expectedRowKey = testDataSet.getRowKey();
ColumnName expectedColumnName = testDataSet.getColumnName();
Map<TableName, Map<ByteBuffer, Map<ByteBuffer, Long>>> resultMap = verifier.getResultMap();
Map<ByteBuffer, Map<ByteBuffer, Long>> rows = resultMap.get(expectedTableName);
if (rows == null) {
continue;
}
Map<ByteBuffer, Long> keyValues = rows.get(ByteBuffer.wrap(expectedRowKey.getRowKey()));
if (keyValues == null) {
continue;
}
Long actualCount = keyValues.get(ByteBuffer.wrap(expectedColumnName.getColumnName()));
if (actualCount == null) {
continue;
}
actualTotalCount += actualCount;
}
Assert.assertTrue(actualTotalCount > bulkLimitSize);
}
use of org.apache.hadoop.hbase.client.Increment in project pinpoint by naver.
the class DefaultBulkWriter method flushLink.
@Override
public void flushLink() {
// update statistics by rowkey and column for now. need to update it by rowkey later.
Map<TableName, List<Increment>> incrementMap = bulkIncrementer.getIncrements(rowKeyDistributorByHashPrefix);
for (Map.Entry<TableName, List<Increment>> entry : incrementMap.entrySet()) {
TableName tableName = entry.getKey();
List<Increment> increments = entry.getValue();
if (logger.isDebugEnabled()) {
logger.debug("flush {} to [{}] Increment:{}", this.getClass().getSimpleName(), tableName, increments.size());
}
hbaseTemplate.increment(tableName, increments);
}
}
use of org.apache.hadoop.hbase.client.Increment in project pinpoint by naver.
the class RowKeyMerge method createBulkIncrement.
public Map<TableName, List<Increment>> createBulkIncrement(Map<RowInfo, Long> data, RowKeyDistributorByHashPrefix rowKeyDistributorByHashPrefix) {
if (data.isEmpty()) {
return Collections.emptyMap();
}
final Map<TableName, List<Increment>> tableIncrementMap = new HashMap<>();
final Map<TableName, Map<RowKey, List<ColumnName>>> tableRowKeyMap = mergeRowKeys(data);
for (Map.Entry<TableName, Map<RowKey, List<ColumnName>>> tableRowKeys : tableRowKeyMap.entrySet()) {
final TableName tableName = tableRowKeys.getKey();
final List<Increment> incrementList = new ArrayList<>();
for (Map.Entry<RowKey, List<ColumnName>> rowKeyEntry : tableRowKeys.getValue().entrySet()) {
Increment increment = createIncrement(rowKeyEntry, rowKeyDistributorByHashPrefix);
incrementList.add(increment);
}
tableIncrementMap.put(tableName, incrementList);
}
return tableIncrementMap;
}
use of org.apache.hadoop.hbase.client.Increment in project hbase by apache.
the class RowResource method increment.
/**
* Validates the input request parameters, parses columns from CellSetModel,
* and invokes Increment on HTable.
*
* @param model instance of CellSetModel
* @return Response 200 OK, 304 Not modified, 400 Bad request
*/
Response increment(final CellSetModel model) {
Table table = null;
Increment increment = null;
try {
table = servlet.getTable(tableResource.getName());
if (model.getRows().size() != 1) {
servlet.getMetrics().incrementFailedIncrementRequests(1);
return Response.status(Response.Status.BAD_REQUEST).type(MIMETYPE_TEXT).entity("Bad request: Number of rows specified is not 1." + CRLF).build();
}
RowModel rowModel = model.getRows().get(0);
byte[] key = rowModel.getKey();
if (key == null) {
key = rowspec.getRow();
}
if (key == null) {
servlet.getMetrics().incrementFailedIncrementRequests(1);
return Response.status(Response.Status.BAD_REQUEST).type(MIMETYPE_TEXT).entity("Bad request: Row key found to be null." + CRLF).build();
}
increment = new Increment(key);
increment.setReturnResults(returnResult);
int i = 0;
for (CellModel cell : rowModel.getCells()) {
byte[] col = cell.getColumn();
if (col == null) {
try {
col = rowspec.getColumns()[i++];
} catch (ArrayIndexOutOfBoundsException e) {
col = null;
}
}
if (col == null) {
servlet.getMetrics().incrementFailedIncrementRequests(1);
return Response.status(Response.Status.BAD_REQUEST).type(MIMETYPE_TEXT).entity("Bad request: Column found to be null." + CRLF).build();
}
byte[][] parts = CellUtil.parseColumn(col);
if (parts.length != 2) {
servlet.getMetrics().incrementFailedIncrementRequests(1);
return Response.status(Response.Status.BAD_REQUEST).type(MIMETYPE_TEXT).entity("Bad request: Column incorrectly specified." + CRLF).build();
}
increment.addColumn(parts[0], parts[1], Long.parseLong(Bytes.toStringBinary(cell.getValue())));
}
if (LOG.isDebugEnabled()) {
LOG.debug("INCREMENT " + increment.toString());
}
Result result = table.increment(increment);
if (returnResult) {
if (result.isEmpty()) {
servlet.getMetrics().incrementFailedIncrementRequests(1);
return Response.status(Response.Status.NOT_MODIFIED).type(MIMETYPE_TEXT).entity("Increment return empty." + CRLF).build();
}
CellSetModel rModel = new CellSetModel();
RowModel rRowModel = new RowModel(result.getRow());
for (Cell cell : result.listCells()) {
rRowModel.addCell(new CellModel(CellUtil.cloneFamily(cell), CellUtil.cloneQualifier(cell), cell.getTimestamp(), CellUtil.cloneValue(cell)));
}
rModel.addRow(rowModel);
servlet.getMetrics().incrementSucessfulIncrementRequests(1);
return Response.ok(rModel).build();
}
ResponseBuilder response = Response.ok();
servlet.getMetrics().incrementSucessfulIncrementRequests(1);
return response.build();
} catch (Exception e) {
servlet.getMetrics().incrementFailedIncrementRequests(1);
return processException(e);
} finally {
if (table != null)
try {
table.close();
} catch (IOException ioe) {
LOG.debug("Exception received while closing the table " + table.getName(), ioe);
}
}
}
use of org.apache.hadoop.hbase.client.Increment in project hbase by apache.
the class TestProtobufUtil method testIncrementNoTimestamp.
/**
* Older clients may not send along a timestamp in the MutationProto. Check that we
* default correctly.
*/
@Test
public void testIncrementNoTimestamp() throws IOException {
MutationProto mutation = getIncrementMutation(null);
Increment increment = ProtobufUtil.toIncrement(mutation, null);
assertEquals(HConstants.LATEST_TIMESTAMP, increment.getTimestamp());
increment.getFamilyCellMap().values().forEach(cells -> cells.forEach(cell -> assertEquals(HConstants.LATEST_TIMESTAMP, cell.getTimestamp())));
}
Aggregations