Search in sources :

Example 21 with Multimap

use of com.google.common.collect.Multimap in project android by JetBrains.

the class IdAnalyzer method reserveAllIdsWithTransitiveReferencesToSelectedIds.

// Avoid creating a circular list of references
private void reserveAllIdsWithTransitiveReferencesToSelectedIds(@NotNull NlComponent parent) {
    Multimap<String, String> referenceMap = HashMultimap.create();
    for (NlComponent component : parent.getChildren()) {
        String id = component.getId();
        if (!StringUtil.isEmpty(id)) {
            for (String attribute : myPropertyGroup.myAttributes) {
                String referenced = NlComponent.stripId(component.getAttribute(myPropertyGroup.myNamespace, attribute));
                if (referenced != null) {
                    referenceMap.put(referenced, id);
                }
            }
        }
    }
    Set<String> references = new HashSet<>(myReservedIds);
    while (!references.isEmpty()) {
        String reference = references.iterator().next();
        references.remove(reference);
        myReservedIds.add(reference);
        referenceMap.get(reference).stream().filter(id -> !myReservedIds.contains(id)).forEach(references::add);
    }
}
Also used : HashSet(java.util.HashSet) List(java.util.List) HashMultimap(com.google.common.collect.HashMultimap) ImmutableList(com.google.common.collect.ImmutableList) NlComponent(com.android.tools.idea.uibuilder.model.NlComponent) SdkConstants(com.android.SdkConstants) StringUtil(com.intellij.openapi.util.text.StringUtil) Set(java.util.Set) NlProperty(com.android.tools.idea.uibuilder.property.NlProperty) Multimap(com.google.common.collect.Multimap) NotNull(org.jetbrains.annotations.NotNull) Collectors(java.util.stream.Collectors) NlComponent(com.android.tools.idea.uibuilder.model.NlComponent) HashSet(java.util.HashSet)

Example 22 with Multimap

use of com.google.common.collect.Multimap in project cdap by caskdata.

the class ServiceLifeCycleTestRun method testContentConsumerLifecycle.

@Test
public void testContentConsumerLifecycle() throws Exception {
    // Set to have one thread only for testing context capture and release
    System.setProperty(ServiceHttpServer.THREAD_POOL_SIZE, "1");
    try {
        ApplicationManager appManager = deployWithArtifact(ServiceLifecycleApp.class, artifactJar);
        final ServiceManager serviceManager = appManager.getServiceManager("test").start();
        CountDownLatch uploadLatch = new CountDownLatch(1);
        // Create five concurrent upload
        List<ListenableFuture<Integer>> completions = new ArrayList<>();
        for (int i = 0; i < 5; i++) {
            completions.add(slowUpload(serviceManager, "PUT", "upload", uploadLatch));
        }
        // Get the states, there should be six handler instances initialized.
        // Five for the in-progress upload, one for the getStates call
        Tasks.waitFor(6, new Callable<Integer>() {

            @Override
            public Integer call() throws Exception {
                return getStates(serviceManager).size();
            }
        }, 5, TimeUnit.SECONDS, 100, TimeUnit.MILLISECONDS);
        // Finish the upload
        uploadLatch.countDown();
        Futures.successfulAsList(completions).get(10, TimeUnit.SECONDS);
        // Verify the result
        for (ListenableFuture<Integer> future : completions) {
            Assert.assertEquals(200, future.get().intValue());
        }
        // Get the states, there should still be six handler instances initialized.
        final Multimap<Integer, String> states = getStates(serviceManager);
        Assert.assertEquals(6, states.size());
        // Do another round of six concurrent upload. It should reuse all of the existing six contexts
        completions.clear();
        uploadLatch = new CountDownLatch(1);
        for (int i = 0; i < 6; i++) {
            completions.add(slowUpload(serviceManager, "PUT", "upload", uploadLatch));
        }
        // Get the states, there should be seven handler instances initialized.
        // Six for the in-progress upload, one for the getStates call
        // Out of the 7 states, six of them should be the same as the old one
        Tasks.waitFor(true, new Callable<Boolean>() {

            @Override
            public Boolean call() throws Exception {
                Multimap<Integer, String> newStates = getStates(serviceManager);
                if (newStates.size() != 7) {
                    return false;
                }
                for (Map.Entry<Integer, String> entry : states.entries()) {
                    if (!newStates.containsEntry(entry.getKey(), entry.getValue())) {
                        return false;
                    }
                }
                return true;
            }
        }, 5, TimeUnit.SECONDS, 100, TimeUnit.MILLISECONDS);
        // Complete the upload
        uploadLatch.countDown();
        Futures.successfulAsList(completions).get(10, TimeUnit.SECONDS);
        // Verify the result
        for (ListenableFuture<Integer> future : completions) {
            Assert.assertEquals(200, future.get().intValue());
        }
        // Query the queue size metrics. Expect the maximum be 6.
        // This is because only the six from the concurrent upload will get captured added back to the queue,
        // while the one created for the getState() call will be stated in the thread cache, but not in the queue.
        Tasks.waitFor(6L, new Callable<Long>() {

            @Override
            public Long call() throws Exception {
                Map<String, String> context = ImmutableMap.of(Constants.Metrics.Tag.NAMESPACE, Id.Namespace.DEFAULT.getId(), Constants.Metrics.Tag.APP, ServiceLifecycleApp.class.getSimpleName(), Constants.Metrics.Tag.SERVICE, "test");
                MetricDataQuery metricQuery = new MetricDataQuery(0, Integer.MAX_VALUE, Integer.MAX_VALUE, "system.context.pool.size", AggregationFunction.MAX, context, ImmutableList.<String>of());
                Iterator<MetricTimeSeries> result = getMetricsManager().query(metricQuery).iterator();
                return result.hasNext() ? result.next().getTimeValues().get(0).getValue() : 0L;
            }
        }, 5, TimeUnit.SECONDS, 100, TimeUnit.MILLISECONDS);
    } finally {
        System.clearProperty(ServiceHttpServer.THREAD_POOL_SIZE);
    }
}
Also used : ApplicationManager(co.cask.cdap.test.ApplicationManager) ArrayList(java.util.ArrayList) CountDownLatch(java.util.concurrent.CountDownLatch) IOException(java.io.IOException) Multimap(com.google.common.collect.Multimap) LinkedListMultimap(com.google.common.collect.LinkedListMultimap) ServiceManager(co.cask.cdap.test.ServiceManager) Iterator(java.util.Iterator) ListenableFuture(com.google.common.util.concurrent.ListenableFuture) MetricDataQuery(co.cask.cdap.api.metrics.MetricDataQuery) Map(java.util.Map) ImmutableMap(com.google.common.collect.ImmutableMap) Test(org.junit.Test)

Example 23 with Multimap

use of com.google.common.collect.Multimap in project hbase by apache.

the class TestLoadIncrementalHFilesSplitRecovery method testGroupOrSplitFailure.

/**
   * This simulates an remote exception which should cause LIHF to exit with an
   * exception.
   */
@Test(expected = IOException.class, timeout = 120000)
public void testGroupOrSplitFailure() throws Exception {
    final TableName tableName = TableName.valueOf(name.getMethodName());
    try (Connection connection = ConnectionFactory.createConnection(util.getConfiguration())) {
        setupTable(connection, tableName, 10);
        LoadIncrementalHFiles lih = new LoadIncrementalHFiles(util.getConfiguration()) {

            int i = 0;

            @Override
            protected Pair<List<LoadQueueItem>, String> groupOrSplit(Multimap<ByteBuffer, LoadQueueItem> regionGroups, final LoadQueueItem item, final Table table, final Pair<byte[][], byte[][]> startEndKeys) throws IOException {
                i++;
                if (i == 5) {
                    throw new IOException("failure");
                }
                return super.groupOrSplit(regionGroups, item, table, startEndKeys);
            }
        };
        // create HFiles for different column families
        Path dir = buildBulkFiles(tableName, 1);
        try (Table t = connection.getTable(tableName);
            RegionLocator locator = connection.getRegionLocator(tableName);
            Admin admin = connection.getAdmin()) {
            lih.doBulkLoad(dir, admin, t, locator);
        }
    }
    fail("doBulkLoad should have thrown an exception");
}
Also used : Path(org.apache.hadoop.fs.Path) RegionLocator(org.apache.hadoop.hbase.client.RegionLocator) Table(org.apache.hadoop.hbase.client.Table) ClusterConnection(org.apache.hadoop.hbase.client.ClusterConnection) Connection(org.apache.hadoop.hbase.client.Connection) IOException(java.io.IOException) Admin(org.apache.hadoop.hbase.client.Admin) TableName(org.apache.hadoop.hbase.TableName) Multimap(com.google.common.collect.Multimap) List(java.util.List) Pair(org.apache.hadoop.hbase.util.Pair) Test(org.junit.Test)

Example 24 with Multimap

use of com.google.common.collect.Multimap in project hbase by apache.

the class TestLoadIncrementalHFilesSplitRecovery method testSplitWhileBulkLoadPhase.

/**
   * This test exercises the path where there is a split after initial
   * validation but before the atomic bulk load call. We cannot use presplitting
   * to test this path, so we actually inject a split just before the atomic
   * region load.
   */
@Test(timeout = 120000)
public void testSplitWhileBulkLoadPhase() throws Exception {
    final TableName table = TableName.valueOf(name.getMethodName());
    try (Connection connection = ConnectionFactory.createConnection(util.getConfiguration())) {
        setupTable(connection, table, 10);
        populateTable(connection, table, 1);
        assertExpectedTable(table, ROWCOUNT, 1);
        // Now let's cause trouble.  This will occur after checks and cause bulk
        // files to fail when attempt to atomically import.  This is recoverable.
        final AtomicInteger attemptedCalls = new AtomicInteger();
        LoadIncrementalHFiles lih2 = new LoadIncrementalHFiles(util.getConfiguration()) {

            @Override
            protected void bulkLoadPhase(final Table htable, final Connection conn, ExecutorService pool, Deque<LoadQueueItem> queue, final Multimap<ByteBuffer, LoadQueueItem> regionGroups, boolean copyFile, Map<LoadQueueItem, ByteBuffer> item2RegionMap) throws IOException {
                int i = attemptedCalls.incrementAndGet();
                if (i == 1) {
                    // On first attempt force a split.
                    forceSplit(table);
                }
                super.bulkLoadPhase(htable, conn, pool, queue, regionGroups, copyFile, item2RegionMap);
            }
        };
        // create HFiles for different column families
        try (Table t = connection.getTable(table);
            RegionLocator locator = connection.getRegionLocator(table);
            Admin admin = connection.getAdmin()) {
            Path bulk = buildBulkFiles(table, 2);
            lih2.doBulkLoad(bulk, admin, t, locator);
        }
        // check that data was loaded
        // The three expected attempts are 1) failure because need to split, 2)
        // load of split top 3) load of split bottom
        assertEquals(attemptedCalls.get(), 3);
        assertExpectedTable(table, ROWCOUNT, 2);
    }
}
Also used : Path(org.apache.hadoop.fs.Path) RegionLocator(org.apache.hadoop.hbase.client.RegionLocator) Table(org.apache.hadoop.hbase.client.Table) ClusterConnection(org.apache.hadoop.hbase.client.ClusterConnection) Connection(org.apache.hadoop.hbase.client.Connection) Admin(org.apache.hadoop.hbase.client.Admin) Deque(java.util.Deque) TableName(org.apache.hadoop.hbase.TableName) Multimap(com.google.common.collect.Multimap) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) ExecutorService(java.util.concurrent.ExecutorService) Map(java.util.Map) NavigableMap(java.util.NavigableMap) Test(org.junit.Test)

Example 25 with Multimap

use of com.google.common.collect.Multimap in project hbase by apache.

the class TestLoadIncrementalHFilesSplitRecovery method testGroupOrSplitPresplit.

/**
   * This test splits a table and attempts to bulk load.  The bulk import files
   * should be split before atomically importing.
   */
@Test(timeout = 120000)
public void testGroupOrSplitPresplit() throws Exception {
    final TableName table = TableName.valueOf(name.getMethodName());
    try (Connection connection = ConnectionFactory.createConnection(util.getConfiguration())) {
        setupTable(connection, table, 10);
        populateTable(connection, table, 1);
        assertExpectedTable(connection, table, ROWCOUNT, 1);
        forceSplit(table);
        final AtomicInteger countedLqis = new AtomicInteger();
        LoadIncrementalHFiles lih = new LoadIncrementalHFiles(util.getConfiguration()) {

            @Override
            protected Pair<List<LoadQueueItem>, String> groupOrSplit(Multimap<ByteBuffer, LoadQueueItem> regionGroups, final LoadQueueItem item, final Table htable, final Pair<byte[][], byte[][]> startEndKeys) throws IOException {
                Pair<List<LoadQueueItem>, String> lqis = super.groupOrSplit(regionGroups, item, htable, startEndKeys);
                if (lqis != null && lqis.getFirst() != null) {
                    countedLqis.addAndGet(lqis.getFirst().size());
                }
                return lqis;
            }
        };
        // create HFiles for different column families
        Path bulk = buildBulkFiles(table, 2);
        try (Table t = connection.getTable(table);
            RegionLocator locator = connection.getRegionLocator(table);
            Admin admin = connection.getAdmin()) {
            lih.doBulkLoad(bulk, admin, t, locator);
        }
        assertExpectedTable(connection, table, ROWCOUNT, 2);
        assertEquals(20, countedLqis.get());
    }
}
Also used : Path(org.apache.hadoop.fs.Path) RegionLocator(org.apache.hadoop.hbase.client.RegionLocator) Table(org.apache.hadoop.hbase.client.Table) ClusterConnection(org.apache.hadoop.hbase.client.ClusterConnection) Connection(org.apache.hadoop.hbase.client.Connection) Admin(org.apache.hadoop.hbase.client.Admin) TableName(org.apache.hadoop.hbase.TableName) Multimap(com.google.common.collect.Multimap) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) List(java.util.List) Pair(org.apache.hadoop.hbase.util.Pair) Test(org.junit.Test)

Aggregations

Multimap (com.google.common.collect.Multimap)47 HashMultimap (com.google.common.collect.HashMultimap)16 Test (org.junit.Test)15 List (java.util.List)13 InetAddress (java.net.InetAddress)11 Map (java.util.Map)9 IOException (java.io.IOException)8 ImmutableList (com.google.common.collect.ImmutableList)7 Collection (java.util.Collection)7 ArrayListMultimap (com.google.common.collect.ArrayListMultimap)6 ImmutableMap (com.google.common.collect.ImmutableMap)6 ImmutableMultimap (com.google.common.collect.ImmutableMultimap)6 ArrayList (java.util.ArrayList)6 Set (java.util.Set)6 ImmutableSet (com.google.common.collect.ImmutableSet)5 LinkedListMultimap (com.google.common.collect.LinkedListMultimap)5 AtomicInteger (java.util.concurrent.atomic.AtomicInteger)5 Collectors (java.util.stream.Collectors)5 Token (org.apache.cassandra.dht.Token)4 HashMap (java.util.HashMap)3