use of com.google.common.collect.Multimap in project android by JetBrains.
the class IdAnalyzer method reserveAllIdsWithTransitiveReferencesToSelectedIds.
// Avoid creating a circular list of references
private void reserveAllIdsWithTransitiveReferencesToSelectedIds(@NotNull NlComponent parent) {
Multimap<String, String> referenceMap = HashMultimap.create();
for (NlComponent component : parent.getChildren()) {
String id = component.getId();
if (!StringUtil.isEmpty(id)) {
for (String attribute : myPropertyGroup.myAttributes) {
String referenced = NlComponent.stripId(component.getAttribute(myPropertyGroup.myNamespace, attribute));
if (referenced != null) {
referenceMap.put(referenced, id);
}
}
}
}
Set<String> references = new HashSet<>(myReservedIds);
while (!references.isEmpty()) {
String reference = references.iterator().next();
references.remove(reference);
myReservedIds.add(reference);
referenceMap.get(reference).stream().filter(id -> !myReservedIds.contains(id)).forEach(references::add);
}
}
use of com.google.common.collect.Multimap in project cdap by caskdata.
the class ServiceLifeCycleTestRun method testContentConsumerLifecycle.
@Test
public void testContentConsumerLifecycle() throws Exception {
// Set to have one thread only for testing context capture and release
System.setProperty(ServiceHttpServer.THREAD_POOL_SIZE, "1");
try {
ApplicationManager appManager = deployWithArtifact(ServiceLifecycleApp.class, artifactJar);
final ServiceManager serviceManager = appManager.getServiceManager("test").start();
CountDownLatch uploadLatch = new CountDownLatch(1);
// Create five concurrent upload
List<ListenableFuture<Integer>> completions = new ArrayList<>();
for (int i = 0; i < 5; i++) {
completions.add(slowUpload(serviceManager, "PUT", "upload", uploadLatch));
}
// Get the states, there should be six handler instances initialized.
// Five for the in-progress upload, one for the getStates call
Tasks.waitFor(6, new Callable<Integer>() {
@Override
public Integer call() throws Exception {
return getStates(serviceManager).size();
}
}, 5, TimeUnit.SECONDS, 100, TimeUnit.MILLISECONDS);
// Finish the upload
uploadLatch.countDown();
Futures.successfulAsList(completions).get(10, TimeUnit.SECONDS);
// Verify the result
for (ListenableFuture<Integer> future : completions) {
Assert.assertEquals(200, future.get().intValue());
}
// Get the states, there should still be six handler instances initialized.
final Multimap<Integer, String> states = getStates(serviceManager);
Assert.assertEquals(6, states.size());
// Do another round of six concurrent upload. It should reuse all of the existing six contexts
completions.clear();
uploadLatch = new CountDownLatch(1);
for (int i = 0; i < 6; i++) {
completions.add(slowUpload(serviceManager, "PUT", "upload", uploadLatch));
}
// Get the states, there should be seven handler instances initialized.
// Six for the in-progress upload, one for the getStates call
// Out of the 7 states, six of them should be the same as the old one
Tasks.waitFor(true, new Callable<Boolean>() {
@Override
public Boolean call() throws Exception {
Multimap<Integer, String> newStates = getStates(serviceManager);
if (newStates.size() != 7) {
return false;
}
for (Map.Entry<Integer, String> entry : states.entries()) {
if (!newStates.containsEntry(entry.getKey(), entry.getValue())) {
return false;
}
}
return true;
}
}, 5, TimeUnit.SECONDS, 100, TimeUnit.MILLISECONDS);
// Complete the upload
uploadLatch.countDown();
Futures.successfulAsList(completions).get(10, TimeUnit.SECONDS);
// Verify the result
for (ListenableFuture<Integer> future : completions) {
Assert.assertEquals(200, future.get().intValue());
}
// Query the queue size metrics. Expect the maximum be 6.
// This is because only the six from the concurrent upload will get captured added back to the queue,
// while the one created for the getState() call will be stated in the thread cache, but not in the queue.
Tasks.waitFor(6L, new Callable<Long>() {
@Override
public Long call() throws Exception {
Map<String, String> context = ImmutableMap.of(Constants.Metrics.Tag.NAMESPACE, Id.Namespace.DEFAULT.getId(), Constants.Metrics.Tag.APP, ServiceLifecycleApp.class.getSimpleName(), Constants.Metrics.Tag.SERVICE, "test");
MetricDataQuery metricQuery = new MetricDataQuery(0, Integer.MAX_VALUE, Integer.MAX_VALUE, "system.context.pool.size", AggregationFunction.MAX, context, ImmutableList.<String>of());
Iterator<MetricTimeSeries> result = getMetricsManager().query(metricQuery).iterator();
return result.hasNext() ? result.next().getTimeValues().get(0).getValue() : 0L;
}
}, 5, TimeUnit.SECONDS, 100, TimeUnit.MILLISECONDS);
} finally {
System.clearProperty(ServiceHttpServer.THREAD_POOL_SIZE);
}
}
use of com.google.common.collect.Multimap in project hbase by apache.
the class TestLoadIncrementalHFilesSplitRecovery method testGroupOrSplitFailure.
/**
* This simulates an remote exception which should cause LIHF to exit with an
* exception.
*/
@Test(expected = IOException.class, timeout = 120000)
public void testGroupOrSplitFailure() throws Exception {
final TableName tableName = TableName.valueOf(name.getMethodName());
try (Connection connection = ConnectionFactory.createConnection(util.getConfiguration())) {
setupTable(connection, tableName, 10);
LoadIncrementalHFiles lih = new LoadIncrementalHFiles(util.getConfiguration()) {
int i = 0;
@Override
protected Pair<List<LoadQueueItem>, String> groupOrSplit(Multimap<ByteBuffer, LoadQueueItem> regionGroups, final LoadQueueItem item, final Table table, final Pair<byte[][], byte[][]> startEndKeys) throws IOException {
i++;
if (i == 5) {
throw new IOException("failure");
}
return super.groupOrSplit(regionGroups, item, table, startEndKeys);
}
};
// create HFiles for different column families
Path dir = buildBulkFiles(tableName, 1);
try (Table t = connection.getTable(tableName);
RegionLocator locator = connection.getRegionLocator(tableName);
Admin admin = connection.getAdmin()) {
lih.doBulkLoad(dir, admin, t, locator);
}
}
fail("doBulkLoad should have thrown an exception");
}
use of com.google.common.collect.Multimap in project hbase by apache.
the class TestLoadIncrementalHFilesSplitRecovery method testSplitWhileBulkLoadPhase.
/**
* This test exercises the path where there is a split after initial
* validation but before the atomic bulk load call. We cannot use presplitting
* to test this path, so we actually inject a split just before the atomic
* region load.
*/
@Test(timeout = 120000)
public void testSplitWhileBulkLoadPhase() throws Exception {
final TableName table = TableName.valueOf(name.getMethodName());
try (Connection connection = ConnectionFactory.createConnection(util.getConfiguration())) {
setupTable(connection, table, 10);
populateTable(connection, table, 1);
assertExpectedTable(table, ROWCOUNT, 1);
// Now let's cause trouble. This will occur after checks and cause bulk
// files to fail when attempt to atomically import. This is recoverable.
final AtomicInteger attemptedCalls = new AtomicInteger();
LoadIncrementalHFiles lih2 = new LoadIncrementalHFiles(util.getConfiguration()) {
@Override
protected void bulkLoadPhase(final Table htable, final Connection conn, ExecutorService pool, Deque<LoadQueueItem> queue, final Multimap<ByteBuffer, LoadQueueItem> regionGroups, boolean copyFile, Map<LoadQueueItem, ByteBuffer> item2RegionMap) throws IOException {
int i = attemptedCalls.incrementAndGet();
if (i == 1) {
// On first attempt force a split.
forceSplit(table);
}
super.bulkLoadPhase(htable, conn, pool, queue, regionGroups, copyFile, item2RegionMap);
}
};
// create HFiles for different column families
try (Table t = connection.getTable(table);
RegionLocator locator = connection.getRegionLocator(table);
Admin admin = connection.getAdmin()) {
Path bulk = buildBulkFiles(table, 2);
lih2.doBulkLoad(bulk, admin, t, locator);
}
// check that data was loaded
// The three expected attempts are 1) failure because need to split, 2)
// load of split top 3) load of split bottom
assertEquals(attemptedCalls.get(), 3);
assertExpectedTable(table, ROWCOUNT, 2);
}
}
use of com.google.common.collect.Multimap in project hbase by apache.
the class TestLoadIncrementalHFilesSplitRecovery method testGroupOrSplitPresplit.
/**
* This test splits a table and attempts to bulk load. The bulk import files
* should be split before atomically importing.
*/
@Test(timeout = 120000)
public void testGroupOrSplitPresplit() throws Exception {
final TableName table = TableName.valueOf(name.getMethodName());
try (Connection connection = ConnectionFactory.createConnection(util.getConfiguration())) {
setupTable(connection, table, 10);
populateTable(connection, table, 1);
assertExpectedTable(connection, table, ROWCOUNT, 1);
forceSplit(table);
final AtomicInteger countedLqis = new AtomicInteger();
LoadIncrementalHFiles lih = new LoadIncrementalHFiles(util.getConfiguration()) {
@Override
protected Pair<List<LoadQueueItem>, String> groupOrSplit(Multimap<ByteBuffer, LoadQueueItem> regionGroups, final LoadQueueItem item, final Table htable, final Pair<byte[][], byte[][]> startEndKeys) throws IOException {
Pair<List<LoadQueueItem>, String> lqis = super.groupOrSplit(regionGroups, item, htable, startEndKeys);
if (lqis != null && lqis.getFirst() != null) {
countedLqis.addAndGet(lqis.getFirst().size());
}
return lqis;
}
};
// create HFiles for different column families
Path bulk = buildBulkFiles(table, 2);
try (Table t = connection.getTable(table);
RegionLocator locator = connection.getRegionLocator(table);
Admin admin = connection.getAdmin()) {
lih.doBulkLoad(bulk, admin, t, locator);
}
assertExpectedTable(connection, table, ROWCOUNT, 2);
assertEquals(20, countedLqis.get());
}
}
Aggregations