Search in sources :

Example 66 with VoldemortServer

use of voldemort.server.VoldemortServer in project voldemort by voldemort.

the class RestServerProtocolTests method oneTimeSetUp.

/*
     * TODO REST-Server: Hard coded storeName and urlStr. They must be retrieved
     * from config
     */
@BeforeClass
public static void oneTimeSetUp() {
    storeNameStr = "test";
    urlStr = "http://localhost:8085/";
    config = VoldemortConfig.loadFromVoldemortHome("config/single_node_rest_server/");
    key1 = "The longest key ";
    vectorClock = new VectorClock();
    vectorClock.incrementVersion(config.getNodeId(), System.currentTimeMillis());
    eTag = RestUtils.getSerializedVectorClock(vectorClock);
    value1 = "The longest value";
    timeOut = 10000L;
    contentType = "text";
    routingType = 2;
    server = new VoldemortServer(config);
    if (!server.isStarted())
        server.start();
    System.out.println("********************Starting REST Server********************");
}
Also used : VectorClock(voldemort.versioning.VectorClock) VoldemortServer(voldemort.server.VoldemortServer) BeforeClass(org.junit.BeforeClass)

Example 67 with VoldemortServer

use of voldemort.server.VoldemortServer in project voldemort by voldemort.

the class NioStatsJmxTest method setUp.

@Before
public void setUp() throws Exception {
    String storesXmlfile = "test/common/voldemort/config/single-store.xml";
    ClientConfig clientConfig = new ClientConfig().setMaxConnectionsPerNode(1).setMaxThreads(1);
    SocketStoreFactory socketStoreFactory = new ClientRequestExecutorPool(clientConfig.getSelectors(), clientConfig.getMaxConnectionsPerNode(), clientConfig.getConnectionTimeout(TimeUnit.MILLISECONDS), clientConfig.getSocketTimeout(TimeUnit.MILLISECONDS), clientConfig.getSocketBufferSize(), clientConfig.getSocketKeepAlive());
    Properties props = new Properties();
    props.put("jmx.enable", "true");
    int numServers = 1;
    VoldemortServer[] servers = new VoldemortServer[numServers];
    Cluster cluster = ServerTestUtils.startVoldemortCluster(numServers, servers, null, socketStoreFactory, true, null, storesXmlfile, props);
    server = servers[0];
    for (Node node : cluster.getNodes()) {
        socketStore = ServerTestUtils.getSocketStore(socketStoreFactory, "test", node.getSocketPort(), clientConfig.getRequestFormatType());
    }
}
Also used : ClientRequestExecutorPool(voldemort.store.socket.clientrequest.ClientRequestExecutorPool) Node(voldemort.cluster.Node) Cluster(voldemort.cluster.Cluster) SocketStoreFactory(voldemort.store.socket.SocketStoreFactory) ClientConfig(voldemort.client.ClientConfig) Properties(java.util.Properties) VoldemortServer(voldemort.server.VoldemortServer) Before(org.junit.Before)

Example 68 with VoldemortServer

use of voldemort.server.VoldemortServer in project voldemort by voldemort.

the class AbstractZonedRebalanceTest method testProxyPutDuringRebalancing.

@Test(timeout = 600000)
public void testProxyPutDuringRebalancing() throws Exception {
    logger.info("Starting testProxyPutDuringRebalancing");
    try {
        Cluster currentCluster = ServerTestUtils.getLocalZonedCluster(6, 2, new int[] { 0, 0, 0, 1, 1, 1 }, new int[][] { { 0 }, { 1, 6 }, { 2 }, { 3 }, { 4, 7 }, { 5 } });
        Cluster finalCluster = UpdateClusterUtils.createUpdatedCluster(currentCluster, 2, Lists.newArrayList(7));
        finalCluster = UpdateClusterUtils.createUpdatedCluster(finalCluster, 5, Lists.newArrayList(6));
        /**
             * Original partition map
             * 
             * [s0 : p0] [s1 : p1, p6] [s2 : p2]
             * 
             * [s3 : p3] [s4 : p4, p7] [s5 : p5]
             * 
             * final server partition ownership
             * 
             * [s0 : p0] [s1 : p1] [s2 : p2, p7]
             * 
             * [s3 : p3] [s4 : p4] [s5 : p5, p6]
             * 
             * Note that rwStoreDefFileWithReplication is a "2/1/1" store def.
             * 
             * Original server n-ary partition ownership
             * 
             * [s0 : p0, p3-7] [s1 : p0-p7] [s2 : p1-2]
             * 
             * [s3 : p0-3, p6-7] [s4 : p0-p7] [s5 : p4-5]
             * 
             * final server n-ary partition ownership
             * 
             * [s0 : p0, p2-7] [s1 : p0-1] [s2 : p1-p7]
             * 
             * [s3 : p0-3, p5-7] [s4 : p0-4, p7] [s5 : p4-6]
             */
        List<Integer> serverList = Arrays.asList(0, 1, 2, 3, 4, 5);
        Map<String, String> configProps = new HashMap<String, String>();
        configProps.put("admin.max.threads", "5");
        final Cluster updatedCurrentCluster = startServers(currentCluster, rwStoreDefFileWithReplication, serverList, configProps);
        ExecutorService executors = Executors.newFixedThreadPool(2);
        final AtomicBoolean rebalancingComplete = new AtomicBoolean(false);
        final List<Exception> exceptions = Collections.synchronizedList(new ArrayList<Exception>());
        // Its is imperative that we test in a single shot since multiple
        // batches would mean the proxy bridges being torn down and
        // established multiple times and we cannot test against the source
        // cluster topology then. getRebalanceKit uses batch size of
        // infinite, so this should be fine.
        String bootstrapUrl = getBootstrapUrl(updatedCurrentCluster, 0);
        int maxParallel = 2;
        final ClusterTestUtils.RebalanceKit rebalanceKit = ClusterTestUtils.getRebalanceKit(bootstrapUrl, maxParallel, finalCluster);
        populateData(currentCluster, rwStoreDefWithReplication);
        final AdminClient adminClient = rebalanceKit.controller.getAdminClient();
        // the plan would cause these partitions to move:
        // Partition : Donor -> stealer
        //
        // p2 (Z-SEC) : s1 -> s0
        // p3-6 (Z-PRI) : s1 -> s2
        // p7 (Z-PRI) : s0 -> s2
        //
        // p5 (Z-SEC): s4 -> s3
        // p6 (Z-PRI): s4 -> s5
        //
        // :. rebalancing will run on servers 0, 2, 3, & 5
        final List<ByteArray> movingKeysList = sampleKeysFromPartition(adminClient, 1, rwStoreDefWithReplication.getName(), Arrays.asList(6), 20);
        assertTrue("Empty list of moving keys...", movingKeysList.size() > 0);
        final AtomicBoolean rebalancingStarted = new AtomicBoolean(false);
        final AtomicBoolean proxyWritesDone = new AtomicBoolean(false);
        final HashMap<String, String> baselineTuples = new HashMap<String, String>(testEntries);
        final HashMap<String, VectorClock> baselineVersions = new HashMap<String, VectorClock>();
        for (String key : baselineTuples.keySet()) {
            baselineVersions.put(key, new VectorClock());
        }
        final CountDownLatch latch = new CountDownLatch(2);
        // start get operation.
        executors.execute(new Runnable() {

            @Override
            public void run() {
                SocketStoreClientFactory factory = null;
                try {
                    // wait for the rebalancing to begin
                    List<VoldemortServer> serverList = Lists.newArrayList(serverMap.get(0), serverMap.get(2), serverMap.get(3), serverMap.get(5));
                    while (!rebalancingComplete.get()) {
                        Iterator<VoldemortServer> serverIterator = serverList.iterator();
                        while (serverIterator.hasNext()) {
                            VoldemortServer server = serverIterator.next();
                            if (ByteUtils.getString(server.getMetadataStore().get(MetadataStore.SERVER_STATE_KEY, null).get(0).getValue(), "UTF-8").compareTo(VoldemortState.REBALANCING_MASTER_SERVER.toString()) == 0) {
                                logger.info("Server " + server.getIdentityNode().getId() + " transitioned into REBALANCING MODE");
                                serverIterator.remove();
                            }
                        }
                        if (serverList.size() == 0) {
                            rebalancingStarted.set(true);
                            break;
                        }
                    }
                    if (rebalancingStarted.get()) {
                        factory = new SocketStoreClientFactory(new ClientConfig().setBootstrapUrls(getBootstrapUrl(updatedCurrentCluster, 0)).setEnableLazy(false).setSocketTimeout(120, TimeUnit.SECONDS).setClientZoneId(1));
                        final StoreClient<String, String> storeClientRW = new DefaultStoreClient<String, String>(testStoreNameRW, null, factory, 3);
                        // now with zero vector clock
                        for (ByteArray movingKey : movingKeysList) {
                            try {
                                String keyStr = ByteUtils.getString(movingKey.get(), "UTF-8");
                                String valStr = "proxy_write";
                                storeClientRW.put(keyStr, valStr);
                                baselineTuples.put(keyStr, valStr);
                                // all these keys will have [5:1] vector
                                // clock is node 5 is the new pseudo master
                                baselineVersions.get(keyStr).incrementVersion(5, System.currentTimeMillis());
                                proxyWritesDone.set(true);
                                if (rebalancingComplete.get()) {
                                    break;
                                }
                            } catch (InvalidMetadataException e) {
                                // let this go
                                logger.error("Encountered an invalid metadata exception.. ", e);
                            }
                        }
                    }
                } catch (Exception e) {
                    logger.error("Exception in proxy write thread..", e);
                    exceptions.add(e);
                } finally {
                    if (factory != null)
                        factory.close();
                    latch.countDown();
                }
            }
        });
        executors.execute(new Runnable() {

            @Override
            public void run() {
                try {
                    rebalanceKit.rebalance();
                } catch (Exception e) {
                    logger.error("Error in rebalancing... ", e);
                    exceptions.add(e);
                } finally {
                    rebalancingComplete.set(true);
                    latch.countDown();
                }
            }
        });
        latch.await();
        executors.shutdown();
        executors.awaitTermination(300, TimeUnit.SECONDS);
        assertEquals("Client did not see all server transition into rebalancing state", rebalancingStarted.get(), true);
        assertEquals("Not enough time to begin proxy writing", proxyWritesDone.get(), true);
        checkEntriesPostRebalance(updatedCurrentCluster, finalCluster, Lists.newArrayList(rwStoreDefWithReplication), Arrays.asList(0, 1, 2, 3, 4, 5), baselineTuples, baselineVersions);
        checkConsistentMetadata(finalCluster, serverList);
        // check No Exception
        if (exceptions.size() > 0) {
            for (Exception e : exceptions) {
                e.printStackTrace();
            }
            fail("Should not see any exceptions.");
        }
        // check that the proxy writes were made to the original donor, node
        // 1
        List<ClockEntry> clockEntries = new ArrayList<ClockEntry>(serverList.size());
        for (Integer nodeid : serverList) clockEntries.add(new ClockEntry(nodeid.shortValue(), System.currentTimeMillis()));
        VectorClock clusterXmlClock = new VectorClock(clockEntries, System.currentTimeMillis());
        for (Integer nodeid : serverList) adminClient.metadataMgmtOps.updateRemoteCluster(nodeid, currentCluster, clusterXmlClock);
        adminClient.setAdminClientCluster(currentCluster);
        checkForTupleEquivalence(adminClient, 1, testStoreNameRW, movingKeysList, baselineTuples, baselineVersions);
        // stop servers
        try {
            stopServer(serverList);
        } catch (Exception e) {
            throw new RuntimeException(e);
        }
    } catch (AssertionError ae) {
        logger.error("Assertion broken in testProxyPutDuringRebalancing ", ae);
        throw ae;
    }
}
Also used : DefaultStoreClient(voldemort.client.DefaultStoreClient) StoreClient(voldemort.client.StoreClient) HashMap(java.util.HashMap) InvalidMetadataException(voldemort.store.InvalidMetadataException) ArrayList(java.util.ArrayList) VoldemortServer(voldemort.server.VoldemortServer) SocketStoreClientFactory(voldemort.client.SocketStoreClientFactory) Iterator(java.util.Iterator) ByteArray(voldemort.utils.ByteArray) List(java.util.List) ArrayList(java.util.ArrayList) ClientConfig(voldemort.client.ClientConfig) VectorClock(voldemort.versioning.VectorClock) Cluster(voldemort.cluster.Cluster) CountDownLatch(java.util.concurrent.CountDownLatch) ObsoleteVersionException(voldemort.versioning.ObsoleteVersionException) IOException(java.io.IOException) InvalidMetadataException(voldemort.store.InvalidMetadataException) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) ClusterTestUtils(voldemort.ClusterTestUtils) ExecutorService(java.util.concurrent.ExecutorService) ClockEntry(voldemort.versioning.ClockEntry) AdminClient(voldemort.client.protocol.admin.AdminClient) Test(org.junit.Test)

Example 69 with VoldemortServer

use of voldemort.server.VoldemortServer in project voldemort by voldemort.

the class AdminRebalanceTest method testRebalanceNodeRW2.

@Test(timeout = 60000)
public void testRebalanceNodeRW2() throws IOException {
    try {
        startFourNodeRW();
        // Start another node for only this unit test
        HashMap<ByteArray, byte[]> entrySet = ServerTestUtils.createRandomKeyValuePairs(TEST_SIZE);
        SocketStoreClientFactory factory = new SocketStoreClientFactory(new ClientConfig().setBootstrapUrls(Lists.newArrayList("tcp://" + currentCluster.getNodeById(0).getHost() + ":" + currentCluster.getNodeById(0).getSocketPort())));
        StoreClient<Object, Object> storeClient1 = factory.getStoreClient("test"), storeClient2 = factory.getStoreClient("test2");
        List<Integer> primaryPartitionsMoved = Lists.newArrayList(0);
        List<Integer> secondaryPartitionsMoved = Lists.newArrayList(8, 9, 10, 11);
        List<Integer> tertiaryPartitionsMoved = Lists.newArrayList(4, 5, 6, 7);
        HashMap<ByteArray, byte[]> primaryEntriesMoved = Maps.newHashMap();
        HashMap<ByteArray, byte[]> secondaryEntriesMoved = Maps.newHashMap();
        HashMap<ByteArray, byte[]> tertiaryEntriesMoved = Maps.newHashMap();
        RoutingStrategy strategy = new RoutingStrategyFactory().updateRoutingStrategy(storeDef2, currentCluster);
        for (Entry<ByteArray, byte[]> entry : entrySet.entrySet()) {
            storeClient1.put(new String(entry.getKey().get()), new String(entry.getValue()));
            storeClient2.put(new String(entry.getKey().get()), new String(entry.getValue()));
            List<Integer> pList = strategy.getPartitionList(entry.getKey().get());
            if (primaryPartitionsMoved.contains(pList.get(0))) {
                primaryEntriesMoved.put(entry.getKey(), entry.getValue());
            } else if (secondaryPartitionsMoved.contains(pList.get(0))) {
                secondaryEntriesMoved.put(entry.getKey(), entry.getValue());
            } else if (tertiaryPartitionsMoved.contains(pList.get(0))) {
                tertiaryEntriesMoved.put(entry.getKey(), entry.getValue());
            }
        }
        // Set into rebalancing state
        for (RebalanceTaskInfo partitionPlan : plans) {
            getServer(partitionPlan.getStealerId()).getMetadataStore().put(MetadataStore.SERVER_STATE_KEY, MetadataStore.VoldemortState.REBALANCING_MASTER_SERVER);
            getServer(partitionPlan.getStealerId()).getMetadataStore().put(MetadataStore.REBALANCING_STEAL_INFO, new RebalancerState(Lists.newArrayList(RebalanceTaskInfo.create(partitionPlan.toJsonString()))));
            getServer(partitionPlan.getStealerId()).getMetadataStore().put(MetadataStore.REBALANCING_SOURCE_CLUSTER_XML, partitionPlan.getInitialCluster());
        }
        // Update the cluster metadata on all three nodes
        for (VoldemortServer server : servers) {
            server.getMetadataStore().put(MetadataStore.CLUSTER_KEY, finalCluster);
        }
        // Actually run it
        try {
            for (RebalanceTaskInfo currentPlan : plans) {
                int asyncId = adminClient.rebalanceOps.rebalanceNode(currentPlan);
                assertNotSame("Got a valid rebalanceAsyncId", -1, asyncId);
                getAdminClient().rpcOps.waitForCompletion(currentPlan.getStealerId(), asyncId, 300, TimeUnit.SECONDS);
                // Test that plan has been removed from the list
                assertFalse(getServer(currentPlan.getStealerId()).getMetadataStore().getRebalancerState().getAll().contains(currentPlan));
            }
        } catch (Exception e) {
            e.printStackTrace();
            fail("Should not throw any exceptions");
        }
        Store<ByteArray, byte[], byte[]> storeTest0 = getStore(0, "test2");
        Store<ByteArray, byte[], byte[]> storeTest1 = getStore(1, "test2");
        Store<ByteArray, byte[], byte[]> storeTest3 = getStore(3, "test2");
        Store<ByteArray, byte[], byte[]> storeTest00 = getStore(0, "test");
        Store<ByteArray, byte[], byte[]> storeTest30 = getStore(3, "test");
        // Primary
        for (Entry<ByteArray, byte[]> entry : primaryEntriesMoved.entrySet()) {
            // Test 2
            // Present on Node 0
            assertSame("entry should be present at store", 1, storeTest0.get(entry.getKey(), null).size());
            assertEquals("entry value should match", new String(entry.getValue()), new String(storeTest0.get(entry.getKey(), null).get(0).getValue()));
            // Present on Node 1
            assertSame("entry should be present at store", 1, storeTest1.get(entry.getKey(), null).size());
            assertEquals("entry value should match", new String(entry.getValue()), new String(storeTest1.get(entry.getKey(), null).get(0).getValue()));
            // Present on Node 3
            assertSame("entry should be present at store", 1, storeTest3.get(entry.getKey(), null).size());
            assertEquals("entry value should match", new String(entry.getValue()), new String(storeTest3.get(entry.getKey(), null).get(0).getValue()));
            // Test
            // Present on Node 0
            assertSame("entry should be present at store", 1, storeTest00.get(entry.getKey(), null).size());
            assertEquals("entry value should match", new String(entry.getValue()), new String(storeTest00.get(entry.getKey(), null).get(0).getValue()));
            // Present on Node 3
            assertSame("entry should be present at store", 1, storeTest30.get(entry.getKey(), null).size());
            assertEquals("entry value should match", new String(entry.getValue()), new String(storeTest30.get(entry.getKey(), null).get(0).getValue()));
        }
        // Secondary
        for (Entry<ByteArray, byte[]> entry : secondaryEntriesMoved.entrySet()) {
            // Test 2
            // Present on Node 0
            assertSame("entry should be present at store", 1, storeTest0.get(entry.getKey(), null).size());
            assertEquals("entry value should match", new String(entry.getValue()), new String(storeTest0.get(entry.getKey(), null).get(0).getValue()));
            // Present on Node 3
            assertSame("entry should be present at store", 1, storeTest3.get(entry.getKey(), null).size());
            assertEquals("entry value should match", new String(entry.getValue()), new String(storeTest3.get(entry.getKey(), null).get(0).getValue()));
            // Test
            // Present on Node 3
            assertSame("entry should be present at store", 1, storeTest30.get(entry.getKey(), null).size());
            assertEquals("entry value should match", new String(entry.getValue()), new String(storeTest30.get(entry.getKey(), null).get(0).getValue()));
        }
        // Tertiary
        for (Entry<ByteArray, byte[]> entry : tertiaryEntriesMoved.entrySet()) {
            // Test 2
            // Present on Node 3
            assertSame("entry should be present at store", 1, storeTest3.get(entry.getKey(), null).size());
            assertEquals("entry value should match", new String(entry.getValue()), new String(storeTest3.get(entry.getKey(), null).get(0).getValue()));
        }
        // All servers should be back to normal state
        for (VoldemortServer server : servers) {
            assertEquals(server.getMetadataStore().getRebalancerState(), new RebalancerState(new ArrayList<RebalanceTaskInfo>()));
            assertEquals(server.getMetadataStore().getServerStateUnlocked(), MetadataStore.VoldemortState.NORMAL_SERVER);
        }
    } finally {
        shutDown();
    }
}
Also used : RoutingStrategyFactory(voldemort.routing.RoutingStrategyFactory) ArrayList(java.util.ArrayList) VoldemortServer(voldemort.server.VoldemortServer) AlreadyRebalancingException(voldemort.server.rebalance.AlreadyRebalancingException) VoldemortRebalancingException(voldemort.server.rebalance.VoldemortRebalancingException) VoldemortException(voldemort.VoldemortException) IOException(java.io.IOException) SocketStoreClientFactory(voldemort.client.SocketStoreClientFactory) RoutingStrategy(voldemort.routing.RoutingStrategy) ByteArray(voldemort.utils.ByteArray) RebalancerState(voldemort.server.rebalance.RebalancerState) ClientConfig(voldemort.client.ClientConfig) Test(org.junit.Test)

Example 70 with VoldemortServer

use of voldemort.server.VoldemortServer in project voldemort by voldemort.

the class AdminRebalanceTest method testRebalanceNodeRW.

@Test(timeout = 60000)
public void testRebalanceNodeRW() throws IOException {
    try {
        startThreeNodeRW();
        // Start another node for only this unit test
        HashMap<ByteArray, byte[]> entrySet = ServerTestUtils.createRandomKeyValuePairs(TEST_SIZE);
        SocketStoreClientFactory factory = new SocketStoreClientFactory(new ClientConfig().setBootstrapUrls(Lists.newArrayList("tcp://" + currentCluster.getNodeById(0).getHost() + ":" + currentCluster.getNodeById(0).getSocketPort())));
        StoreClient<Object, Object> storeClient1 = factory.getStoreClient("test"), storeClient2 = factory.getStoreClient("test2");
        List<Integer> primaryPartitionsMoved = Lists.newArrayList(0);
        List<Integer> secondaryPartitionsMoved = Lists.newArrayList(4, 5, 6, 7);
        HashMap<ByteArray, byte[]> primaryEntriesMoved = Maps.newHashMap();
        HashMap<ByteArray, byte[]> secondaryEntriesMoved = Maps.newHashMap();
        RoutingStrategy strategy = new RoutingStrategyFactory().updateRoutingStrategy(storeDef2, currentCluster);
        for (Entry<ByteArray, byte[]> entry : entrySet.entrySet()) {
            storeClient1.put(new String(entry.getKey().get()), new String(entry.getValue()));
            storeClient2.put(new String(entry.getKey().get()), new String(entry.getValue()));
            List<Integer> pList = strategy.getPartitionList(entry.getKey().get());
            if (primaryPartitionsMoved.contains(pList.get(0))) {
                primaryEntriesMoved.put(entry.getKey(), entry.getValue());
            } else if (secondaryPartitionsMoved.contains(pList.get(0))) {
                secondaryEntriesMoved.put(entry.getKey(), entry.getValue());
            }
        }
        try {
            adminClient.rebalanceOps.rebalanceNode(plans.get(0));
            fail("Should have thrown an exception since not in rebalancing state");
        } catch (VoldemortException e) {
        }
        // Set into rebalancing state
        for (RebalanceTaskInfo partitionPlan : plans) {
            getServer(partitionPlan.getStealerId()).getMetadataStore().put(MetadataStore.SERVER_STATE_KEY, MetadataStore.VoldemortState.REBALANCING_MASTER_SERVER);
            getServer(partitionPlan.getStealerId()).getMetadataStore().put(MetadataStore.REBALANCING_SOURCE_CLUSTER_XML, partitionPlan.getInitialCluster());
        }
        try {
            adminClient.rebalanceOps.rebalanceNode(plans.get(0));
            fail("Should have thrown an exception since no steal info");
        } catch (VoldemortException e) {
        }
        // Put a plan different from the plan that we actually want to
        // execute
        int incorrectStealerId = (plans.get(0).getStealerId() + 1) % 3;
        getServer(plans.get(0).getStealerId()).getMetadataStore().put(MetadataStore.REBALANCING_STEAL_INFO, new RebalancerState(Lists.newArrayList(new RebalanceTaskInfo(incorrectStealerId, plans.get(0).getDonorId(), plans.get(0).getStoreToPartitionIds(), plans.get(0).getInitialCluster()))));
        try {
            adminClient.rebalanceOps.rebalanceNode(plans.get(0));
            fail("Should have thrown an exception since the two plans eventhough have the same donor are different");
        } catch (VoldemortException e) {
        }
        // Set the rebalance info on the stealer node
        for (RebalanceTaskInfo partitionPlan : plans) {
            getServer(partitionPlan.getStealerId()).getMetadataStore().put(MetadataStore.REBALANCING_STEAL_INFO, new RebalancerState(Lists.newArrayList(RebalanceTaskInfo.create(partitionPlan.toJsonString()))));
        }
        // Update the cluster metadata on all three nodes
        for (VoldemortServer server : servers) {
            server.getMetadataStore().put(MetadataStore.CLUSTER_KEY, finalCluster);
        }
        // Actually run it
        try {
            for (RebalanceTaskInfo currentPlan : plans) {
                int asyncId = adminClient.rebalanceOps.rebalanceNode(currentPlan);
                // AlreadyRebalancingException
                try {
                    adminClient.rebalanceOps.rebalanceNode(currentPlan);
                    fail("Should have thrown an exception since it is already rebalancing");
                } catch (AlreadyRebalancingException e) {
                }
                assertNotSame("Got a valid rebalanceAsyncId", -1, asyncId);
                getAdminClient().rpcOps.waitForCompletion(currentPlan.getStealerId(), asyncId, 300, TimeUnit.SECONDS);
                // Test that plan has been removed from the list
                assertFalse(getServer(currentPlan.getStealerId()).getMetadataStore().getRebalancerState().getAll().contains(currentPlan));
            }
        } catch (Exception e) {
            e.printStackTrace();
            fail("Should not throw any exceptions");
        }
        Store<ByteArray, byte[], byte[]> storeTest0 = getStore(0, "test2");
        Store<ByteArray, byte[], byte[]> storeTest2 = getStore(2, "test2");
        Store<ByteArray, byte[], byte[]> storeTest20 = getStore(2, "test");
        // Primary is on Node 0 and not on Node 1
        for (Entry<ByteArray, byte[]> entry : primaryEntriesMoved.entrySet()) {
            assertSame("entry should be present at store", 1, storeTest0.get(entry.getKey(), null).size());
            assertEquals("entry value should match", new String(entry.getValue()), new String(storeTest0.get(entry.getKey(), null).get(0).getValue()));
            // Check in other store
            assertSame("entry should be present in store test2 ", 1, storeTest20.get(entry.getKey(), null).size());
            assertEquals("entry value should match", new String(entry.getValue()), new String(storeTest20.get(entry.getKey(), null).get(0).getValue()));
        }
        // Secondary is on Node 2 and not on Node 0
        for (Entry<ByteArray, byte[]> entry : secondaryEntriesMoved.entrySet()) {
            assertSame("entry should be present at store", 1, storeTest2.get(entry.getKey(), null).size());
            assertEquals("entry value should match", new String(entry.getValue()), new String(storeTest2.get(entry.getKey(), null).get(0).getValue()));
        }
        // All servers should be back to normal state
        for (VoldemortServer server : servers) {
            assertEquals(server.getMetadataStore().getRebalancerState(), new RebalancerState(new ArrayList<RebalanceTaskInfo>()));
            assertEquals(server.getMetadataStore().getServerStateUnlocked(), MetadataStore.VoldemortState.NORMAL_SERVER);
        }
    } finally {
        shutDown();
    }
}
Also used : RoutingStrategyFactory(voldemort.routing.RoutingStrategyFactory) ArrayList(java.util.ArrayList) VoldemortServer(voldemort.server.VoldemortServer) VoldemortException(voldemort.VoldemortException) AlreadyRebalancingException(voldemort.server.rebalance.AlreadyRebalancingException) VoldemortRebalancingException(voldemort.server.rebalance.VoldemortRebalancingException) VoldemortException(voldemort.VoldemortException) IOException(java.io.IOException) AlreadyRebalancingException(voldemort.server.rebalance.AlreadyRebalancingException) SocketStoreClientFactory(voldemort.client.SocketStoreClientFactory) RoutingStrategy(voldemort.routing.RoutingStrategy) ByteArray(voldemort.utils.ByteArray) RebalancerState(voldemort.server.rebalance.RebalancerState) ClientConfig(voldemort.client.ClientConfig) Test(org.junit.Test)

Aggregations

VoldemortServer (voldemort.server.VoldemortServer)86 Properties (java.util.Properties)36 VoldemortConfig (voldemort.server.VoldemortConfig)24 Test (org.junit.Test)23 Node (voldemort.cluster.Node)23 Before (org.junit.Before)21 SocketStoreFactory (voldemort.store.socket.SocketStoreFactory)20 ClientRequestExecutorPool (voldemort.store.socket.clientrequest.ClientRequestExecutorPool)18 ClientConfig (voldemort.client.ClientConfig)17 ByteArray (voldemort.utils.ByteArray)17 ArrayList (java.util.ArrayList)16 After (org.junit.After)15 StoreDefinition (voldemort.store.StoreDefinition)15 SocketStoreClientFactory (voldemort.client.SocketStoreClientFactory)14 Cluster (voldemort.cluster.Cluster)14 IOException (java.io.IOException)11 HashMap (java.util.HashMap)11 AdminClient (voldemort.client.protocol.admin.AdminClient)11 VectorClock (voldemort.versioning.VectorClock)10 StoreDefinitionsMapper (voldemort.xml.StoreDefinitionsMapper)9