Search in sources :

Example 36 with Multimap

use of com.google.common.collect.Multimap in project hbase by apache.

the class TestLoadIncrementalHFilesSplitRecovery method testGroupOrSplitFailure.

/**
   * This simulates an remote exception which should cause LIHF to exit with an
   * exception.
   */
@Test(expected = IOException.class, timeout = 120000)
public void testGroupOrSplitFailure() throws Exception {
    final TableName tableName = TableName.valueOf(name.getMethodName());
    try (Connection connection = ConnectionFactory.createConnection(util.getConfiguration())) {
        setupTable(connection, tableName, 10);
        LoadIncrementalHFiles lih = new LoadIncrementalHFiles(util.getConfiguration()) {

            int i = 0;

            @Override
            protected Pair<List<LoadQueueItem>, String> groupOrSplit(Multimap<ByteBuffer, LoadQueueItem> regionGroups, final LoadQueueItem item, final Table table, final Pair<byte[][], byte[][]> startEndKeys) throws IOException {
                i++;
                if (i == 5) {
                    throw new IOException("failure");
                }
                return super.groupOrSplit(regionGroups, item, table, startEndKeys);
            }
        };
        // create HFiles for different column families
        Path dir = buildBulkFiles(tableName, 1);
        try (Table t = connection.getTable(tableName);
            RegionLocator locator = connection.getRegionLocator(tableName);
            Admin admin = connection.getAdmin()) {
            lih.doBulkLoad(dir, admin, t, locator);
        }
    }
    fail("doBulkLoad should have thrown an exception");
}
Also used : Path(org.apache.hadoop.fs.Path) RegionLocator(org.apache.hadoop.hbase.client.RegionLocator) Table(org.apache.hadoop.hbase.client.Table) ClusterConnection(org.apache.hadoop.hbase.client.ClusterConnection) Connection(org.apache.hadoop.hbase.client.Connection) IOException(java.io.IOException) Admin(org.apache.hadoop.hbase.client.Admin) TableName(org.apache.hadoop.hbase.TableName) Multimap(com.google.common.collect.Multimap) List(java.util.List) Pair(org.apache.hadoop.hbase.util.Pair) Test(org.junit.Test)

Example 37 with Multimap

use of com.google.common.collect.Multimap in project hbase by apache.

the class TestLoadIncrementalHFilesSplitRecovery method testSplitWhileBulkLoadPhase.

/**
   * This test exercises the path where there is a split after initial
   * validation but before the atomic bulk load call. We cannot use presplitting
   * to test this path, so we actually inject a split just before the atomic
   * region load.
   */
@Test(timeout = 120000)
public void testSplitWhileBulkLoadPhase() throws Exception {
    final TableName table = TableName.valueOf(name.getMethodName());
    try (Connection connection = ConnectionFactory.createConnection(util.getConfiguration())) {
        setupTable(connection, table, 10);
        populateTable(connection, table, 1);
        assertExpectedTable(table, ROWCOUNT, 1);
        // Now let's cause trouble.  This will occur after checks and cause bulk
        // files to fail when attempt to atomically import.  This is recoverable.
        final AtomicInteger attemptedCalls = new AtomicInteger();
        LoadIncrementalHFiles lih2 = new LoadIncrementalHFiles(util.getConfiguration()) {

            @Override
            protected void bulkLoadPhase(final Table htable, final Connection conn, ExecutorService pool, Deque<LoadQueueItem> queue, final Multimap<ByteBuffer, LoadQueueItem> regionGroups, boolean copyFile, Map<LoadQueueItem, ByteBuffer> item2RegionMap) throws IOException {
                int i = attemptedCalls.incrementAndGet();
                if (i == 1) {
                    // On first attempt force a split.
                    forceSplit(table);
                }
                super.bulkLoadPhase(htable, conn, pool, queue, regionGroups, copyFile, item2RegionMap);
            }
        };
        // create HFiles for different column families
        try (Table t = connection.getTable(table);
            RegionLocator locator = connection.getRegionLocator(table);
            Admin admin = connection.getAdmin()) {
            Path bulk = buildBulkFiles(table, 2);
            lih2.doBulkLoad(bulk, admin, t, locator);
        }
        // check that data was loaded
        // The three expected attempts are 1) failure because need to split, 2)
        // load of split top 3) load of split bottom
        assertEquals(attemptedCalls.get(), 3);
        assertExpectedTable(table, ROWCOUNT, 2);
    }
}
Also used : Path(org.apache.hadoop.fs.Path) RegionLocator(org.apache.hadoop.hbase.client.RegionLocator) Table(org.apache.hadoop.hbase.client.Table) ClusterConnection(org.apache.hadoop.hbase.client.ClusterConnection) Connection(org.apache.hadoop.hbase.client.Connection) Admin(org.apache.hadoop.hbase.client.Admin) Deque(java.util.Deque) TableName(org.apache.hadoop.hbase.TableName) Multimap(com.google.common.collect.Multimap) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) ExecutorService(java.util.concurrent.ExecutorService) Map(java.util.Map) NavigableMap(java.util.NavigableMap) Test(org.junit.Test)

Example 38 with Multimap

use of com.google.common.collect.Multimap in project hbase by apache.

the class TestLoadIncrementalHFilesSplitRecovery method testGroupOrSplitPresplit.

/**
   * This test splits a table and attempts to bulk load.  The bulk import files
   * should be split before atomically importing.
   */
@Test(timeout = 120000)
public void testGroupOrSplitPresplit() throws Exception {
    final TableName table = TableName.valueOf(name.getMethodName());
    try (Connection connection = ConnectionFactory.createConnection(util.getConfiguration())) {
        setupTable(connection, table, 10);
        populateTable(connection, table, 1);
        assertExpectedTable(connection, table, ROWCOUNT, 1);
        forceSplit(table);
        final AtomicInteger countedLqis = new AtomicInteger();
        LoadIncrementalHFiles lih = new LoadIncrementalHFiles(util.getConfiguration()) {

            @Override
            protected Pair<List<LoadQueueItem>, String> groupOrSplit(Multimap<ByteBuffer, LoadQueueItem> regionGroups, final LoadQueueItem item, final Table htable, final Pair<byte[][], byte[][]> startEndKeys) throws IOException {
                Pair<List<LoadQueueItem>, String> lqis = super.groupOrSplit(regionGroups, item, htable, startEndKeys);
                if (lqis != null && lqis.getFirst() != null) {
                    countedLqis.addAndGet(lqis.getFirst().size());
                }
                return lqis;
            }
        };
        // create HFiles for different column families
        Path bulk = buildBulkFiles(table, 2);
        try (Table t = connection.getTable(table);
            RegionLocator locator = connection.getRegionLocator(table);
            Admin admin = connection.getAdmin()) {
            lih.doBulkLoad(bulk, admin, t, locator);
        }
        assertExpectedTable(connection, table, ROWCOUNT, 2);
        assertEquals(20, countedLqis.get());
    }
}
Also used : Path(org.apache.hadoop.fs.Path) RegionLocator(org.apache.hadoop.hbase.client.RegionLocator) Table(org.apache.hadoop.hbase.client.Table) ClusterConnection(org.apache.hadoop.hbase.client.ClusterConnection) Connection(org.apache.hadoop.hbase.client.Connection) Admin(org.apache.hadoop.hbase.client.Admin) TableName(org.apache.hadoop.hbase.TableName) Multimap(com.google.common.collect.Multimap) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) List(java.util.List) Pair(org.apache.hadoop.hbase.util.Pair) Test(org.junit.Test)

Example 39 with Multimap

use of com.google.common.collect.Multimap in project cassandra by apache.

the class MoveTest method testSimultaneousMove.

/*
     * Test ranges and write endpoints when multiple nodes are on the move simultaneously
     */
@Test
public void testSimultaneousMove() throws UnknownHostException {
    StorageService ss = StorageService.instance;
    final int RING_SIZE = 10;
    TokenMetadata tmd = ss.getTokenMetadata();
    IPartitioner partitioner = RandomPartitioner.instance;
    VersionedValue.VersionedValueFactory valueFactory = new VersionedValue.VersionedValueFactory(partitioner);
    ArrayList<Token> endpointTokens = new ArrayList<Token>();
    ArrayList<Token> keyTokens = new ArrayList<Token>();
    List<InetAddress> hosts = new ArrayList<InetAddress>();
    List<UUID> hostIds = new ArrayList<UUID>();
    // create a ring or 10 nodes
    Util.createInitialRing(ss, partitioner, endpointTokens, keyTokens, hosts, hostIds, RING_SIZE);
    // nodes 6, 8 and 9 leave
    final int[] MOVING = new int[] { 6, 8, 9 };
    Map<Integer, Token> newTokens = new HashMap<Integer, Token>();
    for (int movingIndex : MOVING) {
        Token newToken = positionToken(movingIndex);
        ss.onChange(hosts.get(movingIndex), ApplicationState.STATUS, valueFactory.moving(newToken));
        // storing token associated with a node index
        newTokens.put(movingIndex, newToken);
    }
    Collection<InetAddress> endpoints;
    tmd = tmd.cloneAfterAllSettled();
    ss.setTokenMetadataUnsafe(tmd);
    // boot two new nodes with keyTokens.get(5) and keyTokens.get(7)
    InetAddress boot1 = InetAddress.getByName("127.0.1.1");
    Gossiper.instance.initializeNodeUnsafe(boot1, UUID.randomUUID(), 1);
    Gossiper.instance.injectApplicationState(boot1, ApplicationState.TOKENS, valueFactory.tokens(Collections.singleton(keyTokens.get(5))));
    ss.onChange(boot1, ApplicationState.STATUS, valueFactory.bootstrapping(Collections.<Token>singleton(keyTokens.get(5))));
    PendingRangeCalculatorService.instance.blockUntilFinished();
    InetAddress boot2 = InetAddress.getByName("127.0.1.2");
    Gossiper.instance.initializeNodeUnsafe(boot2, UUID.randomUUID(), 1);
    Gossiper.instance.injectApplicationState(boot2, ApplicationState.TOKENS, valueFactory.tokens(Collections.singleton(keyTokens.get(7))));
    ss.onChange(boot2, ApplicationState.STATUS, valueFactory.bootstrapping(Collections.<Token>singleton(keyTokens.get(7))));
    PendingRangeCalculatorService.instance.blockUntilFinished();
    // don't require test update every time a new keyspace is added to test/conf/cassandra.yaml
    Map<String, AbstractReplicationStrategy> keyspaceStrategyMap = new HashMap<String, AbstractReplicationStrategy>();
    for (int i = 1; i <= 4; i++) {
        keyspaceStrategyMap.put("MoveTestKeyspace" + i, getStrategy("MoveTestKeyspace" + i, tmd));
    }
    /**
        *  Keyspace1 & Keyspace2 RF=1
        *  {
        *      /127.0.0.1=[(97,0]],
        *      /127.0.0.2=[(0,10]],
        *      /127.0.0.3=[(10,20]],
        *      /127.0.0.4=[(20,30]],
        *      /127.0.0.5=[(30,40]],
        *      /127.0.0.6=[(40,50]],
        *      /127.0.0.7=[(50,67]],
        *      /127.0.0.8=[(67,70]],
        *      /127.0.0.9=[(70,87]],
        *      /127.0.0.10=[(87,97]]
        *  }
        */
    Multimap<InetAddress, Range<Token>> keyspace1ranges = keyspaceStrategyMap.get(Simple_RF1_KeyspaceName).getAddressRanges();
    Collection<Range<Token>> ranges1 = keyspace1ranges.get(InetAddress.getByName("127.0.0.1"));
    assertEquals(1, collectionSize(ranges1));
    assertEquals(generateRange(97, 0), ranges1.iterator().next());
    Collection<Range<Token>> ranges2 = keyspace1ranges.get(InetAddress.getByName("127.0.0.2"));
    assertEquals(1, collectionSize(ranges2));
    assertEquals(generateRange(0, 10), ranges2.iterator().next());
    Collection<Range<Token>> ranges3 = keyspace1ranges.get(InetAddress.getByName("127.0.0.3"));
    assertEquals(1, collectionSize(ranges3));
    assertEquals(generateRange(10, 20), ranges3.iterator().next());
    Collection<Range<Token>> ranges4 = keyspace1ranges.get(InetAddress.getByName("127.0.0.4"));
    assertEquals(1, collectionSize(ranges4));
    assertEquals(generateRange(20, 30), ranges4.iterator().next());
    Collection<Range<Token>> ranges5 = keyspace1ranges.get(InetAddress.getByName("127.0.0.5"));
    assertEquals(1, collectionSize(ranges5));
    assertEquals(generateRange(30, 40), ranges5.iterator().next());
    Collection<Range<Token>> ranges6 = keyspace1ranges.get(InetAddress.getByName("127.0.0.6"));
    assertEquals(1, collectionSize(ranges6));
    assertEquals(generateRange(40, 50), ranges6.iterator().next());
    Collection<Range<Token>> ranges7 = keyspace1ranges.get(InetAddress.getByName("127.0.0.7"));
    assertEquals(1, collectionSize(ranges7));
    assertEquals(generateRange(50, 67), ranges7.iterator().next());
    Collection<Range<Token>> ranges8 = keyspace1ranges.get(InetAddress.getByName("127.0.0.8"));
    assertEquals(1, collectionSize(ranges8));
    assertEquals(generateRange(67, 70), ranges8.iterator().next());
    Collection<Range<Token>> ranges9 = keyspace1ranges.get(InetAddress.getByName("127.0.0.9"));
    assertEquals(1, collectionSize(ranges9));
    assertEquals(generateRange(70, 87), ranges9.iterator().next());
    Collection<Range<Token>> ranges10 = keyspace1ranges.get(InetAddress.getByName("127.0.0.10"));
    assertEquals(1, collectionSize(ranges10));
    assertEquals(generateRange(87, 97), ranges10.iterator().next());
    /**
        * Keyspace3 RF=5
        * {
        *      /127.0.0.1=[(97,0], (70,87], (50,67], (87,97], (67,70]],
        *      /127.0.0.2=[(97,0], (70,87], (87,97], (0,10], (67,70]],
        *      /127.0.0.3=[(97,0], (70,87], (87,97], (0,10], (10,20]],
        *      /127.0.0.4=[(97,0], (20,30], (87,97], (0,10], (10,20]],
        *      /127.0.0.5=[(97,0], (30,40], (20,30], (0,10], (10,20]],
        *      /127.0.0.6=[(40,50], (30,40], (20,30], (0,10], (10,20]],
        *      /127.0.0.7=[(40,50], (30,40], (50,67], (20,30], (10,20]],
        *      /127.0.0.8=[(40,50], (30,40], (50,67], (20,30], (67,70]],
        *      /127.0.0.9=[(40,50], (70,87], (30,40], (50,67], (67,70]],
        *      /127.0.0.10=[(40,50], (70,87], (50,67], (87,97], (67,70]]
        * }
        */
    Multimap<InetAddress, Range<Token>> keyspace3ranges = keyspaceStrategyMap.get(KEYSPACE3).getAddressRanges();
    ranges1 = keyspace3ranges.get(InetAddress.getByName("127.0.0.1"));
    assertEquals(collectionSize(ranges1), 5);
    assertTrue(ranges1.equals(generateRanges(97, 0, 70, 87, 50, 67, 87, 97, 67, 70)));
    ranges2 = keyspace3ranges.get(InetAddress.getByName("127.0.0.2"));
    assertEquals(collectionSize(ranges2), 5);
    assertTrue(ranges2.equals(generateRanges(97, 0, 70, 87, 87, 97, 0, 10, 67, 70)));
    ranges3 = keyspace3ranges.get(InetAddress.getByName("127.0.0.3"));
    assertEquals(collectionSize(ranges3), 5);
    assertTrue(ranges3.equals(generateRanges(97, 0, 70, 87, 87, 97, 0, 10, 10, 20)));
    ranges4 = keyspace3ranges.get(InetAddress.getByName("127.0.0.4"));
    assertEquals(collectionSize(ranges4), 5);
    assertTrue(ranges4.equals(generateRanges(97, 0, 20, 30, 87, 97, 0, 10, 10, 20)));
    ranges5 = keyspace3ranges.get(InetAddress.getByName("127.0.0.5"));
    assertEquals(collectionSize(ranges5), 5);
    assertTrue(ranges5.equals(generateRanges(97, 0, 30, 40, 20, 30, 0, 10, 10, 20)));
    ranges6 = keyspace3ranges.get(InetAddress.getByName("127.0.0.6"));
    assertEquals(collectionSize(ranges6), 5);
    assertTrue(ranges6.equals(generateRanges(40, 50, 30, 40, 20, 30, 0, 10, 10, 20)));
    ranges7 = keyspace3ranges.get(InetAddress.getByName("127.0.0.7"));
    assertEquals(collectionSize(ranges7), 5);
    assertTrue(ranges7.equals(generateRanges(40, 50, 30, 40, 50, 67, 20, 30, 10, 20)));
    ranges8 = keyspace3ranges.get(InetAddress.getByName("127.0.0.8"));
    assertEquals(collectionSize(ranges8), 5);
    assertTrue(ranges8.equals(generateRanges(40, 50, 30, 40, 50, 67, 20, 30, 67, 70)));
    ranges9 = keyspace3ranges.get(InetAddress.getByName("127.0.0.9"));
    assertEquals(collectionSize(ranges9), 5);
    assertTrue(ranges9.equals(generateRanges(40, 50, 70, 87, 30, 40, 50, 67, 67, 70)));
    ranges10 = keyspace3ranges.get(InetAddress.getByName("127.0.0.10"));
    assertEquals(collectionSize(ranges10), 5);
    assertTrue(ranges10.equals(generateRanges(40, 50, 70, 87, 50, 67, 87, 97, 67, 70)));
    /**
         * Keyspace4 RF=3
         * {
         *      /127.0.0.1=[(97,0], (70,87], (87,97]],
         *      /127.0.0.2=[(97,0], (87,97], (0,10]],
         *      /127.0.0.3=[(97,0], (0,10], (10,20]],
         *      /127.0.0.4=[(20,30], (0,10], (10,20]],
         *      /127.0.0.5=[(30,40], (20,30], (10,20]],
         *      /127.0.0.6=[(40,50], (30,40], (20,30]],
         *      /127.0.0.7=[(40,50], (30,40], (50,67]],
         *      /127.0.0.8=[(40,50], (50,67], (67,70]],
         *      /127.0.0.9=[(70,87], (50,67], (67,70]],
         *      /127.0.0.10=[(70,87], (87,97], (67,70]]
         *  }
         */
    Multimap<InetAddress, Range<Token>> keyspace4ranges = keyspaceStrategyMap.get(Simple_RF3_KeyspaceName).getAddressRanges();
    ranges1 = keyspace4ranges.get(InetAddress.getByName("127.0.0.1"));
    assertEquals(collectionSize(ranges1), 3);
    assertTrue(ranges1.equals(generateRanges(97, 0, 70, 87, 87, 97)));
    ranges2 = keyspace4ranges.get(InetAddress.getByName("127.0.0.2"));
    assertEquals(collectionSize(ranges2), 3);
    assertTrue(ranges2.equals(generateRanges(97, 0, 87, 97, 0, 10)));
    ranges3 = keyspace4ranges.get(InetAddress.getByName("127.0.0.3"));
    assertEquals(collectionSize(ranges3), 3);
    assertTrue(ranges3.equals(generateRanges(97, 0, 0, 10, 10, 20)));
    ranges4 = keyspace4ranges.get(InetAddress.getByName("127.0.0.4"));
    assertEquals(collectionSize(ranges4), 3);
    assertTrue(ranges4.equals(generateRanges(20, 30, 0, 10, 10, 20)));
    ranges5 = keyspace4ranges.get(InetAddress.getByName("127.0.0.5"));
    assertEquals(collectionSize(ranges5), 3);
    assertTrue(ranges5.equals(generateRanges(30, 40, 20, 30, 10, 20)));
    ranges6 = keyspace4ranges.get(InetAddress.getByName("127.0.0.6"));
    assertEquals(collectionSize(ranges6), 3);
    assertTrue(ranges6.equals(generateRanges(40, 50, 30, 40, 20, 30)));
    ranges7 = keyspace4ranges.get(InetAddress.getByName("127.0.0.7"));
    assertEquals(collectionSize(ranges7), 3);
    assertTrue(ranges7.equals(generateRanges(40, 50, 30, 40, 50, 67)));
    ranges8 = keyspace4ranges.get(InetAddress.getByName("127.0.0.8"));
    assertEquals(collectionSize(ranges8), 3);
    assertTrue(ranges8.equals(generateRanges(40, 50, 50, 67, 67, 70)));
    ranges9 = keyspace4ranges.get(InetAddress.getByName("127.0.0.9"));
    assertEquals(collectionSize(ranges9), 3);
    assertTrue(ranges9.equals(generateRanges(70, 87, 50, 67, 67, 70)));
    ranges10 = keyspace4ranges.get(InetAddress.getByName("127.0.0.10"));
    assertEquals(collectionSize(ranges10), 3);
    assertTrue(ranges10.equals(generateRanges(70, 87, 87, 97, 67, 70)));
    // pre-calculate the results.
    Map<String, Multimap<Token, InetAddress>> expectedEndpoints = new HashMap<String, Multimap<Token, InetAddress>>();
    expectedEndpoints.put(Simple_RF1_KeyspaceName, HashMultimap.<Token, InetAddress>create());
    expectedEndpoints.get(Simple_RF1_KeyspaceName).putAll(new BigIntegerToken("5"), makeAddrs("127.0.0.2"));
    expectedEndpoints.get(Simple_RF1_KeyspaceName).putAll(new BigIntegerToken("15"), makeAddrs("127.0.0.3"));
    expectedEndpoints.get(Simple_RF1_KeyspaceName).putAll(new BigIntegerToken("25"), makeAddrs("127.0.0.4"));
    expectedEndpoints.get(Simple_RF1_KeyspaceName).putAll(new BigIntegerToken("35"), makeAddrs("127.0.0.5"));
    expectedEndpoints.get(Simple_RF1_KeyspaceName).putAll(new BigIntegerToken("45"), makeAddrs("127.0.0.6"));
    expectedEndpoints.get(Simple_RF1_KeyspaceName).putAll(new BigIntegerToken("55"), makeAddrs("127.0.0.7", "127.0.1.1"));
    expectedEndpoints.get(Simple_RF1_KeyspaceName).putAll(new BigIntegerToken("65"), makeAddrs("127.0.0.7"));
    expectedEndpoints.get(Simple_RF1_KeyspaceName).putAll(new BigIntegerToken("75"), makeAddrs("127.0.0.9", "127.0.1.2"));
    expectedEndpoints.get(Simple_RF1_KeyspaceName).putAll(new BigIntegerToken("85"), makeAddrs("127.0.0.9"));
    expectedEndpoints.get(Simple_RF1_KeyspaceName).putAll(new BigIntegerToken("95"), makeAddrs("127.0.0.10"));
    expectedEndpoints.put(KEYSPACE2, HashMultimap.<Token, InetAddress>create());
    expectedEndpoints.get(KEYSPACE2).putAll(new BigIntegerToken("5"), makeAddrs("127.0.0.2"));
    expectedEndpoints.get(KEYSPACE2).putAll(new BigIntegerToken("15"), makeAddrs("127.0.0.3"));
    expectedEndpoints.get(KEYSPACE2).putAll(new BigIntegerToken("25"), makeAddrs("127.0.0.4"));
    expectedEndpoints.get(KEYSPACE2).putAll(new BigIntegerToken("35"), makeAddrs("127.0.0.5"));
    expectedEndpoints.get(KEYSPACE2).putAll(new BigIntegerToken("45"), makeAddrs("127.0.0.6"));
    expectedEndpoints.get(KEYSPACE2).putAll(new BigIntegerToken("55"), makeAddrs("127.0.0.7", "127.0.1.1"));
    expectedEndpoints.get(KEYSPACE2).putAll(new BigIntegerToken("65"), makeAddrs("127.0.0.7"));
    expectedEndpoints.get(KEYSPACE2).putAll(new BigIntegerToken("75"), makeAddrs("127.0.0.9", "127.0.1.2"));
    expectedEndpoints.get(KEYSPACE2).putAll(new BigIntegerToken("85"), makeAddrs("127.0.0.9"));
    expectedEndpoints.get(KEYSPACE2).putAll(new BigIntegerToken("95"), makeAddrs("127.0.0.10"));
    expectedEndpoints.put(KEYSPACE3, HashMultimap.<Token, InetAddress>create());
    expectedEndpoints.get(KEYSPACE3).putAll(new BigIntegerToken("5"), makeAddrs("127.0.0.2", "127.0.0.3", "127.0.0.4", "127.0.0.5", "127.0.0.6"));
    expectedEndpoints.get(KEYSPACE3).putAll(new BigIntegerToken("15"), makeAddrs("127.0.0.3", "127.0.0.4", "127.0.0.5", "127.0.0.6", "127.0.0.7", "127.0.1.1"));
    expectedEndpoints.get(KEYSPACE3).putAll(new BigIntegerToken("25"), makeAddrs("127.0.0.4", "127.0.0.5", "127.0.0.6", "127.0.0.7", "127.0.0.8", "127.0.1.1"));
    expectedEndpoints.get(KEYSPACE3).putAll(new BigIntegerToken("35"), makeAddrs("127.0.0.5", "127.0.0.6", "127.0.0.7", "127.0.0.8", "127.0.0.9", "127.0.1.1", "127.0.1.2"));
    expectedEndpoints.get(KEYSPACE3).putAll(new BigIntegerToken("45"), makeAddrs("127.0.0.6", "127.0.0.7", "127.0.0.8", "127.0.0.9", "127.0.0.10", "127.0.1.1", "127.0.1.2"));
    expectedEndpoints.get(KEYSPACE3).putAll(new BigIntegerToken("55"), makeAddrs("127.0.0.7", "127.0.0.8", "127.0.0.9", "127.0.0.10", "127.0.0.1", "127.0.1.1", "127.0.1.2"));
    expectedEndpoints.get(KEYSPACE3).putAll(new BigIntegerToken("65"), makeAddrs("127.0.0.7", "127.0.0.8", "127.0.0.9", "127.0.0.10", "127.0.0.1", "127.0.1.2"));
    expectedEndpoints.get(KEYSPACE3).putAll(new BigIntegerToken("75"), makeAddrs("127.0.0.9", "127.0.0.10", "127.0.0.1", "127.0.0.2", "127.0.0.3", "127.0.1.2"));
    expectedEndpoints.get(KEYSPACE3).putAll(new BigIntegerToken("85"), makeAddrs("127.0.0.9", "127.0.0.10", "127.0.0.1", "127.0.0.2", "127.0.0.3"));
    expectedEndpoints.get(KEYSPACE3).putAll(new BigIntegerToken("95"), makeAddrs("127.0.0.10", "127.0.0.1", "127.0.0.2", "127.0.0.3", "127.0.0.4"));
    expectedEndpoints.put(Simple_RF3_KeyspaceName, HashMultimap.<Token, InetAddress>create());
    expectedEndpoints.get(Simple_RF3_KeyspaceName).putAll(new BigIntegerToken("5"), makeAddrs("127.0.0.2", "127.0.0.3", "127.0.0.4"));
    expectedEndpoints.get(Simple_RF3_KeyspaceName).putAll(new BigIntegerToken("15"), makeAddrs("127.0.0.3", "127.0.0.4", "127.0.0.5"));
    expectedEndpoints.get(Simple_RF3_KeyspaceName).putAll(new BigIntegerToken("25"), makeAddrs("127.0.0.4", "127.0.0.5", "127.0.0.6"));
    expectedEndpoints.get(Simple_RF3_KeyspaceName).putAll(new BigIntegerToken("35"), makeAddrs("127.0.0.5", "127.0.0.6", "127.0.0.7", "127.0.1.1"));
    expectedEndpoints.get(Simple_RF3_KeyspaceName).putAll(new BigIntegerToken("45"), makeAddrs("127.0.0.6", "127.0.0.7", "127.0.0.8", "127.0.1.1"));
    expectedEndpoints.get(Simple_RF3_KeyspaceName).putAll(new BigIntegerToken("55"), makeAddrs("127.0.0.7", "127.0.0.8", "127.0.0.9", "127.0.1.1", "127.0.1.2"));
    expectedEndpoints.get(Simple_RF3_KeyspaceName).putAll(new BigIntegerToken("65"), makeAddrs("127.0.0.7", "127.0.0.8", "127.0.0.9", "127.0.1.2"));
    expectedEndpoints.get(Simple_RF3_KeyspaceName).putAll(new BigIntegerToken("75"), makeAddrs("127.0.0.9", "127.0.0.10", "127.0.0.1", "127.0.1.2"));
    expectedEndpoints.get(Simple_RF3_KeyspaceName).putAll(new BigIntegerToken("85"), makeAddrs("127.0.0.9", "127.0.0.10", "127.0.0.1"));
    expectedEndpoints.get(Simple_RF3_KeyspaceName).putAll(new BigIntegerToken("95"), makeAddrs("127.0.0.10", "127.0.0.1", "127.0.0.2"));
    for (Map.Entry<String, AbstractReplicationStrategy> keyspaceStrategy : keyspaceStrategyMap.entrySet()) {
        String keyspaceName = keyspaceStrategy.getKey();
        AbstractReplicationStrategy strategy = keyspaceStrategy.getValue();
        for (Token token : keyTokens) {
            endpoints = tmd.getWriteEndpoints(token, keyspaceName, strategy.getNaturalEndpoints(token));
            assertEquals(expectedEndpoints.get(keyspaceName).get(token).size(), endpoints.size());
            assertTrue(expectedEndpoints.get(keyspaceName).get(token).containsAll(endpoints));
        }
        // just to be sure that things still work according to the old tests, run them:
        if (strategy.getReplicationFactor() != 3)
            continue;
        // tokens 5, 15 and 25 should go three nodes
        for (int i = 0; i < 3; i++) {
            endpoints = tmd.getWriteEndpoints(keyTokens.get(i), keyspaceName, strategy.getNaturalEndpoints(keyTokens.get(i)));
            assertEquals(3, endpoints.size());
            assertTrue(endpoints.contains(hosts.get(i + 1)));
            assertTrue(endpoints.contains(hosts.get(i + 2)));
            assertTrue(endpoints.contains(hosts.get(i + 3)));
        }
        // token 35 should go to nodes 4, 5, 6 and boot1
        endpoints = tmd.getWriteEndpoints(keyTokens.get(3), keyspaceName, strategy.getNaturalEndpoints(keyTokens.get(3)));
        assertEquals(4, endpoints.size());
        assertTrue(endpoints.contains(hosts.get(4)));
        assertTrue(endpoints.contains(hosts.get(5)));
        assertTrue(endpoints.contains(hosts.get(6)));
        assertTrue(endpoints.contains(boot1));
        // token 45 should go to nodes 5, 6, 7 boot1
        endpoints = tmd.getWriteEndpoints(keyTokens.get(4), keyspaceName, strategy.getNaturalEndpoints(keyTokens.get(4)));
        assertEquals(4, endpoints.size());
        assertTrue(endpoints.contains(hosts.get(5)));
        assertTrue(endpoints.contains(hosts.get(6)));
        assertTrue(endpoints.contains(hosts.get(7)));
        assertTrue(endpoints.contains(boot1));
        // token 55 should go to nodes 6, 7, 8 boot1 and boot2
        endpoints = tmd.getWriteEndpoints(keyTokens.get(5), keyspaceName, strategy.getNaturalEndpoints(keyTokens.get(5)));
        assertEquals(5, endpoints.size());
        assertTrue(endpoints.contains(hosts.get(6)));
        assertTrue(endpoints.contains(hosts.get(7)));
        assertTrue(endpoints.contains(hosts.get(8)));
        assertTrue(endpoints.contains(boot1));
        assertTrue(endpoints.contains(boot2));
        // token 65 should go to nodes 6, 7, 8 and boot2
        endpoints = tmd.getWriteEndpoints(keyTokens.get(6), keyspaceName, strategy.getNaturalEndpoints(keyTokens.get(6)));
        assertEquals(4, endpoints.size());
        assertTrue(endpoints.contains(hosts.get(6)));
        assertTrue(endpoints.contains(hosts.get(7)));
        assertTrue(endpoints.contains(hosts.get(8)));
        assertTrue(endpoints.contains(boot2));
        // token 75 should to go nodes 8, 9, 0 and boot2
        endpoints = tmd.getWriteEndpoints(keyTokens.get(7), keyspaceName, strategy.getNaturalEndpoints(keyTokens.get(7)));
        assertEquals(4, endpoints.size());
        assertTrue(endpoints.contains(hosts.get(8)));
        assertTrue(endpoints.contains(hosts.get(9)));
        assertTrue(endpoints.contains(hosts.get(0)));
        assertTrue(endpoints.contains(boot2));
        // token 85 should go to nodes 8, 9 and 0
        endpoints = tmd.getWriteEndpoints(keyTokens.get(8), keyspaceName, strategy.getNaturalEndpoints(keyTokens.get(8)));
        assertEquals(3, endpoints.size());
        assertTrue(endpoints.contains(hosts.get(8)));
        assertTrue(endpoints.contains(hosts.get(9)));
        assertTrue(endpoints.contains(hosts.get(0)));
        // token 95 should go to nodes 9, 0 and 1
        endpoints = tmd.getWriteEndpoints(keyTokens.get(9), keyspaceName, strategy.getNaturalEndpoints(keyTokens.get(9)));
        assertEquals(3, endpoints.size());
        assertTrue(endpoints.contains(hosts.get(9)));
        assertTrue(endpoints.contains(hosts.get(0)));
        assertTrue(endpoints.contains(hosts.get(1)));
    }
    // all moving nodes are back to the normal state
    for (Integer movingIndex : MOVING) {
        ss.onChange(hosts.get(movingIndex), ApplicationState.STATUS, valueFactory.normal(Collections.singleton(newTokens.get(movingIndex))));
    }
}
Also used : BigIntegerToken(org.apache.cassandra.dht.RandomPartitioner.BigIntegerToken) Token(org.apache.cassandra.dht.Token) IPartitioner(org.apache.cassandra.dht.IPartitioner) VersionedValue(org.apache.cassandra.gms.VersionedValue) BigIntegerToken(org.apache.cassandra.dht.RandomPartitioner.BigIntegerToken) TokenMetadata(org.apache.cassandra.locator.TokenMetadata) Range(org.apache.cassandra.dht.Range) Multimap(com.google.common.collect.Multimap) HashMultimap(com.google.common.collect.HashMultimap) AbstractReplicationStrategy(org.apache.cassandra.locator.AbstractReplicationStrategy) InetAddress(java.net.InetAddress) Test(org.junit.Test)

Example 40 with Multimap

use of com.google.common.collect.Multimap in project disconf by knightliao.

the class ScanPrinterUtils method printStoreMap.

/**
     * 打印出StoreMap的数据
     */
public static void printStoreMap(Reflections reflections) {
    LOGGER.info("Now we will print store map......");
    Store store = reflections.getStore();
    Map<String, Multimap<String, String>> /* indexName */
    storeMap = store.getStoreMap();
    for (String indexName : storeMap.keySet()) {
        LOGGER.info("====================================");
        LOGGER.info("indexName:" + indexName);
        Multimap<String, String> multimap = storeMap.get(indexName);
        for (String firstName : multimap.keySet()) {
            Collection<String> lastNames = multimap.get(firstName);
            LOGGER.info("\t\t" + firstName + ": " + lastNames);
        }
    }
}
Also used : Multimap(com.google.common.collect.Multimap) Store(org.reflections.Store)

Aggregations

Multimap (com.google.common.collect.Multimap)61 HashMultimap (com.google.common.collect.HashMultimap)25 List (java.util.List)25 Test (org.junit.Test)20 Map (java.util.Map)17 ImmutableList (com.google.common.collect.ImmutableList)15 Collection (java.util.Collection)14 Set (java.util.Set)14 ImmutableMap (com.google.common.collect.ImmutableMap)13 HashMap (java.util.HashMap)13 ImmutableMultimap (com.google.common.collect.ImmutableMultimap)12 ImmutableSet (com.google.common.collect.ImmutableSet)11 ArrayList (java.util.ArrayList)11 Collectors (java.util.stream.Collectors)11 ArrayListMultimap (com.google.common.collect.ArrayListMultimap)10 Nullable (javax.annotation.Nullable)10 IOException (java.io.IOException)9 Stream (java.util.stream.Stream)9 InetAddress (java.net.InetAddress)8 Objects (java.util.Objects)8