use of com.navercorp.pinpoint.collector.dao.hbase.statistics.BulkIncrementerTestClazz.Flusher in project pinpoint by naver.
the class SizeLimitedBulkIncrementerTest method multipleTablesConcurrent.
@Test
public void multipleTablesConcurrent() throws Exception {
// Given
final int numTables = 50;
final int numRowIds = 100;
final int numColumnIds = 20;
final int maxCallCount = 200;
List<TestDataSet> testDataSets = BulkIncrementerTestClazz.createRandomTestDataSetList(numTables, numRowIds, numColumnIds, maxCallCount);
List<TableName> tableNames = new ArrayList<>(numTables);
for (int i = 0; i < numTables; i++) {
tableNames.add(TableName.valueOf(i + ""));
}
final int maxNumTestDatas = numTables * numRowIds * numColumnIds * maxCallCount;
List<TestData> testDatas = new ArrayList<>(maxNumTestDatas);
for (TestDataSet testDataSet : testDataSets) {
testDatas.addAll(testDataSet.getTestDatas());
}
Collections.shuffle(testDatas);
// When
final int numIncrementers = 16;
List<List<TestData>> testDataPartitions = ListUtils.partition(testDatas, testDatas.size() / (numIncrementers - 1));
final CountDownLatch incrementorLatch = new CountDownLatch(testDataPartitions.size());
final CountDownLatch flusherLatch = new CountDownLatch(1);
FutureTask<Map<TableName, List<Increment>>> flushTask = new FutureTask<>(new Flusher(bulkIncrementer, rowKeyDistributor, incrementorLatch, flusherLatch));
new Thread(flushTask, "Flusher").start();
int counter = 0;
for (List<TestData> testDataPartition : testDataPartitions) {
Incrementer incrementer = new Incrementer(bulkIncrementer, incrementorLatch, testDataPartition);
new Thread(incrementer, "Incrementer-" + counter++).start();
}
flusherLatch.await(30L, TimeUnit.SECONDS);
// Then
Map<TableName, List<Increment>> incrementMap = flushTask.get(5L, TimeUnit.SECONDS);
TestVerifier verifier = new TestVerifier(incrementMap);
long actualTotalCount = 0;
for (TestDataSet testDataSet : testDataSets) {
TableName expectedTableName = testDataSet.getTableName();
RowKey expectedRowKey = testDataSet.getRowKey();
ColumnName expectedColumnName = testDataSet.getColumnName();
Map<TableName, Map<ByteBuffer, Map<ByteBuffer, Long>>> resultMap = verifier.getResultMap();
Map<ByteBuffer, Map<ByteBuffer, Long>> rows = resultMap.get(expectedTableName);
if (rows == null) {
continue;
}
Map<ByteBuffer, Long> keyValues = rows.get(ByteBuffer.wrap(expectedRowKey.getRowKey()));
if (keyValues == null) {
continue;
}
Long actualCount = keyValues.get(ByteBuffer.wrap(expectedColumnName.getColumnName()));
if (actualCount == null) {
continue;
}
actualTotalCount += actualCount;
}
Assert.assertTrue(actualTotalCount > bulkLimitSize);
}
use of com.navercorp.pinpoint.collector.dao.hbase.statistics.BulkIncrementerTestClazz.Flusher in project pinpoint by naver.
the class SizeLimitedBulkIncrementerTest method singleTableConcurrent.
@Test
public void singleTableConcurrent() throws Exception {
// Given
TableName tableA = TableName.valueOf("A");
TestDataSet testDataSetA_0_0 = new TestDataSet(tableA, 0, 0, 1000000);
TestDataSet testDataSetA_0_1 = new TestDataSet(tableA, 0, 1, 1000001);
List<TestData> testDatas = new ArrayList<>();
testDatas.addAll(testDataSetA_0_0.getTestDatas());
testDatas.addAll(testDataSetA_0_1.getTestDatas());
Collections.shuffle(testDatas);
// When
final int numIncrementers = 16;
List<List<BulkIncrementerTestClazz.TestData>> testDataPartitions = ListUtils.partition(testDatas, testDatas.size() / (numIncrementers - 1));
final CountDownLatch completeLatch = new CountDownLatch(testDataPartitions.size());
final CountDownLatch flusherLatch = new CountDownLatch(1);
FutureTask<Map<TableName, List<Increment>>> flushTask = new FutureTask<>(new Flusher(bulkIncrementer, rowKeyDistributor, completeLatch, flusherLatch));
new Thread(flushTask, "Flusher").start();
int counter = 0;
for (List<TestData> testDataPartition : testDataPartitions) {
Incrementer incrementer = new Incrementer(bulkIncrementer, completeLatch, testDataPartition);
new Thread(incrementer, "Incrementer-" + counter++).start();
}
flusherLatch.await(30L, TimeUnit.SECONDS);
// Then
Map<TableName, List<Increment>> incrementMap = flushTask.get(5L, TimeUnit.SECONDS);
TestVerifier verifier = new TestVerifier(incrementMap);
verifier.verify(testDataSetA_0_0);
verifier.verify(testDataSetA_0_1);
}
use of com.navercorp.pinpoint.collector.dao.hbase.statistics.BulkIncrementerTestClazz.Flusher in project pinpoint by naver.
the class DefaultBulkUpdaterTest method singleTableConcurrent.
@Test
public void singleTableConcurrent() throws Exception {
// Given
TableName tableA = TableName.valueOf("A");
TestDataSet testDataSetA_0_0 = new TestDataSet(tableA, 0, 0, 1000000);
TestDataSet testDataSetA_0_1 = new TestDataSet(tableA, 0, 1, 1000001);
List<TestData> testDatas = new ArrayList<>();
testDatas.addAll(testDataSetA_0_0.getTestDatas());
testDatas.addAll(testDataSetA_0_1.getTestDatas());
Collections.shuffle(testDatas);
// When
final int numIncrementers = 16;
List<List<BulkIncrementerTestClazz.TestData>> testDataPartitions = ListUtils.partition(testDatas, testDatas.size() / (numIncrementers - 1));
final CountDownLatch completeLatch = new CountDownLatch(testDataPartitions.size());
final CountDownLatch flusherLatch = new CountDownLatch(1);
FutureTask<Map<TableName, List<Increment>>> flushTask = new FutureTask<>(new Flusher(bulkIncrementer, rowKeyDistributor, completeLatch, flusherLatch));
new Thread(flushTask, "Flusher").start();
int counter = 0;
for (List<TestData> testDataPartition : testDataPartitions) {
Incrementer incrementer = new Incrementer(bulkIncrementer, completeLatch, testDataPartition);
new Thread(incrementer, "Incrementer-" + counter++).start();
}
flusherLatch.await(30L, TimeUnit.SECONDS);
// Then
Map<TableName, List<Increment>> incrementMap = flushTask.get(5L, TimeUnit.SECONDS);
TestVerifier verifier = new TestVerifier(incrementMap);
verifier.verify(testDataSetA_0_0);
verifier.verify(testDataSetA_0_1);
}
use of com.navercorp.pinpoint.collector.dao.hbase.statistics.BulkIncrementerTestClazz.Flusher in project pinpoint by naver.
the class DefaultBulkUpdaterTest method multipleTablesConcurrent.
@Test
public void multipleTablesConcurrent() throws Exception {
// Given
final int numTables = 50;
final int numRowIds = 100;
final int numColumnIds = 20;
final int maxCallCount = 200;
List<TestDataSet> testDataSets = BulkIncrementerTestClazz.createRandomTestDataSetList(numTables, numRowIds, numColumnIds, maxCallCount);
List<TableName> tableNames = new ArrayList<>(numTables);
for (int i = 0; i < numTables; i++) {
tableNames.add(TableName.valueOf(i + ""));
}
final int maxNumTestDatas = numTables * numRowIds * numColumnIds * maxCallCount;
List<TestData> testDatas = new ArrayList<>(maxNumTestDatas);
for (TestDataSet testDataSet : testDataSets) {
testDatas.addAll(testDataSet.getTestDatas());
}
Collections.shuffle(testDatas);
// When
final int numIncrementers = 16;
List<List<TestData>> testDataPartitions = ListUtils.partition(testDatas, testDatas.size() / (numIncrementers - 1));
final CountDownLatch incrementorLatch = new CountDownLatch(testDataPartitions.size());
final CountDownLatch flusherLatch = new CountDownLatch(1);
FutureTask<Map<TableName, List<Increment>>> flushTask = new FutureTask<>(new Flusher(bulkIncrementer, rowKeyDistributor, incrementorLatch, flusherLatch));
new Thread(flushTask, "Flusher").start();
int counter = 0;
for (List<TestData> testDataPartition : testDataPartitions) {
Incrementer incrementer = new Incrementer(bulkIncrementer, incrementorLatch, testDataPartition);
new Thread(incrementer, "Incrementer-" + counter++).start();
}
flusherLatch.await(30L, TimeUnit.SECONDS);
// Then
Map<TableName, List<Increment>> incrementMap = flushTask.get(5L, TimeUnit.SECONDS);
TestVerifier verifier = new TestVerifier(incrementMap);
for (TestDataSet testDataSet : testDataSets) {
verifier.verify(testDataSet);
}
}
Aggregations