Search in sources :

Example 46 with AdminClient

use of voldemort.client.protocol.admin.AdminClient in project voldemort by voldemort.

the class ConsistencyFixWorkerTest method testRepair.

public void testRepair(int[] putNodes, boolean orphan) {
    byte[] bKey = TestUtils.randomBytes(10);
    String hexKey = ByteUtils.toHexString(bKey);
    ByteArray baKey = new ByteArray(bKey);
    BadKey badKey;
    QueryKeyResult queryKeyResult;
    if (!orphan) {
        badKey = new BadKey(hexKey, hexKey + "\n");
        queryKeyResult = null;
    } else {
        StringBuilder orphanInput = new StringBuilder();
        orphanInput.append(hexKey + "," + "1\n");
        List<Versioned<byte[]>> values = new ArrayList<Versioned<byte[]>>(0);
        int arbitraryNodeId = 2;
        Versioned<byte[]> versioned = TestUtils.getVersioned(TestUtils.randomBytes(25), arbitraryNodeId);
        orphanInput.append(ByteUtils.toHexString(versioned.getValue()));
        orphanInput.append("," + versioned.toString() + "\n");
        values.add(versioned);
        badKey = new BadKey(hexKey, orphanInput.toString());
        queryKeyResult = new QueryKeyResult(baKey, values);
    }
    Versioned<byte[]> value = TestUtils.getVersioned(TestUtils.randomBytes(25), 0);
    String url = ConsistencyFixTest.setUpCluster();
    ConsistencyFix consistencyFix = new ConsistencyFix(url, ConsistencyFixTest.STORE_NAME, 100, 100, false, false);
    AdminClient adminClient = consistencyFix.getAdminClient();
    System.out.println("Initial get");
    for (int i = 0; i < 4; ++i) {
        List<Versioned<byte[]>> results;
        results = adminClient.storeOps.getNodeKey(ConsistencyFixTest.STORE_NAME, i, baKey);
        assertTrue(results.size() == 0);
    }
    System.out.println("Puts");
    for (int putNode : putNodes) {
        NodeValue<ByteArray, byte[]> nodeKeyValue;
        nodeKeyValue = new NodeValue<ByteArray, byte[]>(putNode, baKey, value);
        adminClient.storeOps.putNodeKeyValue(ConsistencyFixTest.STORE_NAME, nodeKeyValue);
    }
    // Construct normal consistency fix worker
    ConsistencyFixWorker consistencyFixWorker = null;
    if (!orphan) {
        consistencyFixWorker = new ConsistencyFixWorker(badKey, consistencyFix, null);
    } else {
        consistencyFixWorker = new ConsistencyFixWorker(badKey, consistencyFix, null, queryKeyResult);
    }
    consistencyFixWorker.run();
    System.out.println("Second get");
    int expectedNumVersions = 0;
    if (putNodes.length > 0) {
        expectedNumVersions++;
    }
    if (orphan) {
        expectedNumVersions++;
    }
    for (int i = 0; i < 4; ++i) {
        System.out.println("Node : " + i);
        List<Versioned<byte[]>> results;
        results = adminClient.storeOps.getNodeKey(ConsistencyFixTest.STORE_NAME, i, baKey);
        for (Versioned<byte[]> v : results) {
            System.out.println("\t" + v.getVersion());
        }
        assertTrue(results.size() == expectedNumVersions);
    }
}
Also used : Versioned(voldemort.versioning.Versioned) ArrayList(java.util.ArrayList) QueryKeyResult(voldemort.client.protocol.admin.QueryKeyResult) BadKey(voldemort.utils.ConsistencyFix.BadKey) AdminClient(voldemort.client.protocol.admin.AdminClient)

Example 47 with AdminClient

use of voldemort.client.protocol.admin.AdminClient in project voldemort by voldemort.

the class StreamingSlopPusherJob method run.

public void run() {
    // load the metadata before each run, in case the cluster is changed
    loadMetadata();
    // don't try to run slop pusher job when rebalancing
    if (metadataStore.getServerStateUnlocked().equals(MetadataStore.VoldemortState.REBALANCING_MASTER_SERVER)) {
        logger.error("Cannot run slop pusher job since Voldemort server is rebalancing");
        return;
    }
    boolean terminatedEarly = false;
    Date startTime = new Date();
    logger.info("Started streaming slop pusher job at " + startTime);
    SlopStorageEngine slopStorageEngine = storeRepo.getSlopStore();
    ClosableIterator<Pair<ByteArray, Versioned<Slop>>> iterator = null;
    if (adminClient == null) {
        adminClient = new AdminClient(cluster, new AdminClientConfig().setMaxConnectionsPerNode(1));
    }
    if (voldemortConfig.getSlopZonesDownToTerminate() > 0) {
        // Populating the zone mapping for early termination
        zoneMapping.clear();
        for (Node n : cluster.getNodes()) {
            if (failureDetector.isAvailable(n)) {
                Set<Integer> nodes = zoneMapping.get(n.getZoneId());
                if (nodes == null) {
                    nodes = Sets.newHashSet();
                    zoneMapping.put(n.getZoneId(), nodes);
                }
                nodes.add(n.getId());
            }
        }
        // Check how many zones are down
        int zonesDown = 0;
        for (Zone zone : cluster.getZones()) {
            if (zoneMapping.get(zone.getId()) == null || zoneMapping.get(zone.getId()).size() == 0)
                zonesDown++;
        }
        // Terminate early
        if (voldemortConfig.getSlopZonesDownToTerminate() <= zoneMapping.size() && zonesDown >= voldemortConfig.getSlopZonesDownToTerminate()) {
            logger.info("Completed streaming slop pusher job at " + startTime + " early because " + zonesDown + " zones are down");
            stopAdminClient();
            return;
        }
    }
    // Clearing the statistics
    AtomicLong attemptedPushes = new AtomicLong(0);
    for (Node node : cluster.getNodes()) {
        attemptedByNode.put(node.getId(), 0L);
        succeededByNode.put(node.getId(), 0L);
    }
    Set<String> storeNames = StoreDefinitionUtils.getStoreNamesSet(metadataStore.getStoreDefList());
    acquireRepairPermit();
    try {
        StorageEngine<ByteArray, Slop, byte[]> slopStore = slopStorageEngine.asSlopStore();
        iterator = slopStore.entries();
        while (iterator.hasNext()) {
            Pair<ByteArray, Versioned<Slop>> keyAndVal;
            try {
                keyAndVal = iterator.next();
                Versioned<Slop> versioned = keyAndVal.getSecond();
                // Track the scan progress
                if (this.streamStats != null) {
                    this.streamStats.reportStreamingSlopScan();
                }
                // Retrieve the node
                int nodeId = versioned.getValue().getNodeId();
                // check for dead slops
                if (isSlopDead(cluster, storeNames, versioned.getValue())) {
                    handleDeadSlop(slopStorageEngine, keyAndVal);
                    // ignore it.
                    continue;
                }
                Node node = cluster.getNodeById(nodeId);
                attemptedPushes.incrementAndGet();
                Long attempted = attemptedByNode.get(nodeId);
                attemptedByNode.put(nodeId, attempted + 1L);
                if (attemptedPushes.get() % 10000 == 0)
                    logger.info("Attempted pushing " + attemptedPushes + " slops");
                if (logger.isTraceEnabled())
                    logger.trace("Pushing slop for " + versioned.getValue().getNodeId() + " and store  " + versioned.getValue().getStoreName() + " of key: " + versioned.getValue().getKey());
                if (failureDetector.isAvailable(node)) {
                    SynchronousQueue<Versioned<Slop>> slopQueue = slopQueues.get(nodeId);
                    if (slopQueue == null) {
                        // No previous slop queue, add one
                        slopQueue = new SynchronousQueue<Versioned<Slop>>();
                        slopQueues.put(nodeId, slopQueue);
                        consumerResults.add(consumerExecutor.submit(new SlopConsumer(nodeId, slopQueue, slopStorageEngine)));
                    }
                    boolean offered = slopQueue.offer(versioned, voldemortConfig.getClientRoutingTimeoutMs(), TimeUnit.MILLISECONDS);
                    if (!offered) {
                        if (logger.isDebugEnabled())
                            logger.debug("No consumer appeared for slop in " + voldemortConfig.getClientConnectionTimeoutMs() + " ms");
                    }
                    readThrottler.maybeThrottle(nBytesRead(keyAndVal));
                } else {
                    logger.trace(node + " declared down, won't push slop");
                }
            } catch (RejectedExecutionException e) {
                throw new VoldemortException("Ran out of threads in executor", e);
            }
        }
    } catch (InterruptedException e) {
        logger.warn("Interrupted exception", e);
        terminatedEarly = true;
    } catch (Exception e) {
        logger.error(e, e);
        terminatedEarly = true;
    } finally {
        try {
            if (iterator != null)
                iterator.close();
        } catch (Exception e) {
            logger.warn("Failed to close iterator cleanly as database might be closed", e);
        }
        // Adding the poison pill
        for (SynchronousQueue<Versioned<Slop>> slopQueue : slopQueues.values()) {
            try {
                slopQueue.put(END);
            } catch (InterruptedException e) {
                logger.warn("Error putting poison pill", e);
            }
        }
        for (Future result : consumerResults) {
            try {
                result.get();
            } catch (Exception e) {
                logger.warn("Exception in consumer", e);
            }
        }
        // Only if exception didn't take place do we update the counts
        if (!terminatedEarly) {
            Map<Integer, Long> outstanding = Maps.newHashMapWithExpectedSize(cluster.getNumberOfNodes());
            for (int nodeId : succeededByNode.keySet()) {
                logger.info("Slops to node " + nodeId + " - Succeeded - " + succeededByNode.get(nodeId) + " - Attempted - " + attemptedByNode.get(nodeId));
                outstanding.put(nodeId, attemptedByNode.get(nodeId) - succeededByNode.get(nodeId));
            }
            slopStorageEngine.resetStats(outstanding);
            logger.info("Completed streaming slop pusher job which started at " + startTime);
        } else {
            for (int nodeId : succeededByNode.keySet()) {
                logger.info("Slops to node " + nodeId + " - Succeeded - " + succeededByNode.get(nodeId) + " - Attempted - " + attemptedByNode.get(nodeId));
            }
            logger.info("Completed early streaming slop pusher job which started at " + startTime);
        }
        // Shut down admin client as not to waste connections
        consumerResults.clear();
        slopQueues.clear();
        stopAdminClient();
        this.repairPermits.release(this.getClass().getCanonicalName());
    }
}
Also used : Versioned(voldemort.versioning.Versioned) Node(voldemort.cluster.Node) VoldemortException(voldemort.VoldemortException) ByteArray(voldemort.utils.ByteArray) SlopStorageEngine(voldemort.store.slop.SlopStorageEngine) Pair(voldemort.utils.Pair) AdminClientConfig(voldemort.client.protocol.admin.AdminClientConfig) Zone(voldemort.cluster.Zone) Date(java.util.Date) RejectedExecutionException(java.util.concurrent.RejectedExecutionException) RejectedExecutionException(java.util.concurrent.RejectedExecutionException) VoldemortException(voldemort.VoldemortException) UnreachableStoreException(voldemort.store.UnreachableStoreException) AtomicLong(java.util.concurrent.atomic.AtomicLong) AtomicLong(java.util.concurrent.atomic.AtomicLong) Future(java.util.concurrent.Future) Slop(voldemort.store.slop.Slop) AdminClient(voldemort.client.protocol.admin.AdminClient)

Example 48 with AdminClient

use of voldemort.client.protocol.admin.AdminClient in project voldemort by voldemort.

the class ZoneShrinkageEndToEndTest method executeShrinkZone.

public void executeShrinkZone() {
    AdminClient adminClient;
    logger.info("-------------------------------");
    logger.info("        UPDATING BOTH XML      ");
    logger.info("-------------------------------");
    adminClient = new AdminClient(bootstrapURL);
    // set stores metadata (simulating admin tools)
    String validatedStoresXML = storeDefinitionsMapper.writeStoreList(storeDefinitionsMapper.readStoreList(new StringReader(finalStoresXML)));
    String validatedClusterXML = clusterMapper.writeCluster(clusterMapper.readCluster(new StringReader(finalClusterXML)));
    VoldemortAdminTool.executeSetMetadataPair(-1, adminClient, "cluster.xml", validatedClusterXML, "stores.xml", validatedStoresXML);
    adminClient.close();
    logger.info("-------------------------------");
    logger.info("        UPDATED BOTH XML       ");
    logger.info("-------------------------------");
}
Also used : StringReader(java.io.StringReader) AdminClient(voldemort.client.protocol.admin.AdminClient)

Example 49 with AdminClient

use of voldemort.client.protocol.admin.AdminClient in project voldemort by voldemort.

the class ZonedRebalanceNonContiguousZonesTest method testRebalanceCleanPrimarySecondary.

@Test(timeout = 600000)
public void testRebalanceCleanPrimarySecondary() throws Exception {
    logger.info("Starting testRebalanceCleanPrimary");
    try {
        int[] zoneIds = new int[] { 1, 3 };
        int[][] nodesPerZone = new int[][] { { 3, 4, 5 }, { 9, 10, 11 } };
        int[][] partitionMap = new int[][] { { 0 }, { 1, 6 }, { 2 }, { 3 }, { 4, 7 }, { 5 } };
        Cluster currentCluster = ServerTestUtils.getLocalNonContiguousZonedCluster(zoneIds, nodesPerZone, partitionMap, ClusterTestUtils.getClusterPorts());
        Cluster finalCluster = UpdateClusterUtils.createUpdatedCluster(currentCluster, 5, Lists.newArrayList(7));
        finalCluster = UpdateClusterUtils.createUpdatedCluster(finalCluster, 11, Lists.newArrayList(6));
        /**
         * original server partition ownership
         *
         * [s3 : p0,p3,p4,p5,p6,p7] [s4 : p1-p7] [s5 : p1,p2] [s9 : p0,p1,p2,p3,p6,p7] [s10 : p1-p7] [s11 : p4,p5]
         *
         * final server partition ownership
         *
         * [s3 : p0,p2,p3,p4,p5,p6,p7] [s4 : p0,p1] [s5 : p1-p7] [s9 : p0.p1,p2,p3,p5,p6,p7]
         * [s10 : p0,p1,p2,p3,p4,p7] [s11 : p4,p5,p6]
         */
        // start servers
        List<Integer> serverList = Arrays.asList(3, 4, 5, 9, 10, 11);
        Map<String, String> configProps = new HashMap<String, String>();
        configProps.put("enable.repair", "true");
        currentCluster = startServers(currentCluster, rwStoreDefFileWithReplication, serverList, configProps);
        String bootstrapUrl = getBootstrapUrl(currentCluster, 3);
        ClusterTestUtils.RebalanceKit rebalanceKit = ClusterTestUtils.getRebalanceKit(bootstrapUrl, finalCluster);
        try {
            populateData(currentCluster, rwStoreDefWithReplication);
            AdminClient admin = rebalanceKit.controller.getAdminClient();
            List<ByteArray> p6KeySamples = sampleKeysFromPartition(admin, 4, rwStoreDefWithReplication.getName(), Arrays.asList(6), 20);
            List<ByteArray> p1KeySamples = sampleKeysFromPartition(admin, 4, rwStoreDefWithReplication.getName(), Arrays.asList(1), 20);
            List<ByteArray> p3KeySamples = sampleKeysFromPartition(admin, 3, rwStoreDefWithReplication.getName(), Arrays.asList(3), 20);
            List<ByteArray> p2KeySamples = sampleKeysFromPartition(admin, 4, rwStoreDefWithReplication.getName(), Arrays.asList(2), 20);
            List<ByteArray> p7KeySamples = sampleKeysFromPartition(admin, 10, rwStoreDefWithReplication.getName(), Arrays.asList(7), 20);
            rebalanceAndCheck(rebalanceKit.plan, rebalanceKit.controller, Arrays.asList(3, 4, 5, 9));
            checkConsistentMetadata(finalCluster, serverList);
            // Do the cleanup operation
            for (int i = 0; i < 6; i++) {
                admin.storeMntOps.repairJob(serverList.get(i));
            }
            // wait for the repairs to complete
            for (int i = 0; i < 6; i++) {
                ServerTestUtils.waitForAsyncOperationOnServer(serverMap.get(serverList.get(i)), "Repair", 5000);
            }
            // confirm a primary movement in zone 1 : P6 : s4 -> S5. The zone 1 primary changes when
            // p6 moves cross zone check for existence of p6 in server 5,
            checkForKeyExistence(admin, 5, rwStoreDefWithReplication.getName(), p6KeySamples);
            // confirm a secondary movement in zone 1.. p2 : s4 -> s3 check
            // for its existence in server 3
            checkForKeyExistence(admin, 3, rwStoreDefWithReplication.getName(), p2KeySamples);
            // check for its absernce in server 4
            // also check that p1 is stable in server 4 [primary stability]
            checkForKeyExistence(admin, 4, rwStoreDefWithReplication.getName(), p1KeySamples);
            // check that p3 is stable in server 3 [Secondary stability]
            checkForKeyExistence(admin, 3, rwStoreDefWithReplication.getName(), p3KeySamples);
            // finally, test for server 10 which now became the secondary
            // for p7 from being a primary before
            checkForKeyExistence(admin, 10, rwStoreDefWithReplication.getName(), p7KeySamples);
        } finally {
            // stop servers
            stopServer(serverList);
        }
    } catch (AssertionError ae) {
        logger.error("Assertion broken in testRebalanceCleanPrimarySecondary ", ae);
        throw ae;
    }
}
Also used : HashMap(java.util.HashMap) Cluster(voldemort.cluster.Cluster) ClusterTestUtils(voldemort.ClusterTestUtils) ByteArray(voldemort.utils.ByteArray) AdminClient(voldemort.client.protocol.admin.AdminClient) Test(org.junit.Test)

Example 50 with AdminClient

use of voldemort.client.protocol.admin.AdminClient in project voldemort by voldemort.

the class AbstractZonedRebalanceTest method testProxyPutDuringRebalancing.

@Test(timeout = 600000)
public void testProxyPutDuringRebalancing() throws Exception {
    logger.info("Starting testProxyPutDuringRebalancing");
    try {
        Cluster currentCluster = ServerTestUtils.getLocalZonedCluster(6, 2, new int[] { 0, 0, 0, 1, 1, 1 }, new int[][] { { 0 }, { 1, 6 }, { 2 }, { 3 }, { 4, 7 }, { 5 } });
        Cluster finalCluster = UpdateClusterUtils.createUpdatedCluster(currentCluster, 2, Lists.newArrayList(7));
        finalCluster = UpdateClusterUtils.createUpdatedCluster(finalCluster, 5, Lists.newArrayList(6));
        /**
         * Original partition map
         *
         * [s0 : p0] [s1 : p1, p6] [s2 : p2]
         *
         * [s3 : p3] [s4 : p4, p7] [s5 : p5]
         *
         * final server partition ownership
         *
         * [s0 : p0] [s1 : p1] [s2 : p2, p7]
         *
         * [s3 : p3] [s4 : p4] [s5 : p5, p6]
         *
         * Note that rwStoreDefFileWithReplication is a "2/1/1" store def.
         *
         * Original server n-ary partition ownership
         *
         * [s0 : p0, p3-7] [s1 : p0-p7] [s2 : p1-2]
         *
         * [s3 : p0-3, p6-7] [s4 : p0-p7] [s5 : p4-5]
         *
         * final server n-ary partition ownership
         *
         * [s0 : p0, p2-7] [s1 : p0-1] [s2 : p1-p7]
         *
         * [s3 : p0-3, p5-7] [s4 : p0-4, p7] [s5 : p4-6]
         */
        List<Integer> serverList = Arrays.asList(0, 1, 2, 3, 4, 5);
        Map<String, String> configProps = new HashMap<String, String>();
        configProps.put("admin.max.threads", "5");
        final Cluster updatedCurrentCluster = startServers(currentCluster, rwStoreDefFileWithReplication, serverList, configProps);
        ExecutorService executors = Executors.newFixedThreadPool(2);
        final AtomicBoolean rebalancingComplete = new AtomicBoolean(false);
        final List<Exception> exceptions = Collections.synchronizedList(new ArrayList<Exception>());
        // Its is imperative that we test in a single shot since multiple
        // batches would mean the proxy bridges being torn down and
        // established multiple times and we cannot test against the source
        // cluster topology then. getRebalanceKit uses batch size of
        // infinite, so this should be fine.
        String bootstrapUrl = getBootstrapUrl(updatedCurrentCluster, 0);
        int maxParallel = 2;
        final ClusterTestUtils.RebalanceKit rebalanceKit = ClusterTestUtils.getRebalanceKit(bootstrapUrl, maxParallel, finalCluster);
        populateData(currentCluster, rwStoreDefWithReplication);
        final AdminClient adminClient = rebalanceKit.controller.getAdminClient();
        // the plan would cause these partitions to move:
        // Partition : Donor -> stealer
        // 
        // p2 (Z-SEC) : s1 -> s0
        // p3-6 (Z-PRI) : s1 -> s2
        // p7 (Z-PRI) : s0 -> s2
        // 
        // p5 (Z-SEC): s4 -> s3
        // p6 (Z-PRI): s4 -> s5
        // 
        // :. rebalancing will run on servers 0, 2, 3, & 5
        final List<ByteArray> movingKeysList = sampleKeysFromPartition(adminClient, 1, rwStoreDefWithReplication.getName(), Arrays.asList(6), 20);
        assertTrue("Empty list of moving keys...", movingKeysList.size() > 0);
        final AtomicBoolean rebalancingStarted = new AtomicBoolean(false);
        final AtomicBoolean proxyWritesDone = new AtomicBoolean(false);
        final HashMap<String, String> baselineTuples = new HashMap<String, String>(testEntries);
        final HashMap<String, VectorClock> baselineVersions = new HashMap<String, VectorClock>();
        for (String key : baselineTuples.keySet()) {
            baselineVersions.put(key, new VectorClock());
        }
        final CountDownLatch latch = new CountDownLatch(2);
        // start get operation.
        executors.execute(new Runnable() {

            @Override
            public void run() {
                SocketStoreClientFactory factory = null;
                try {
                    // wait for the rebalancing to begin
                    List<VoldemortServer> serverList = Lists.newArrayList(serverMap.get(0), serverMap.get(2), serverMap.get(3), serverMap.get(5));
                    while (!rebalancingComplete.get()) {
                        Iterator<VoldemortServer> serverIterator = serverList.iterator();
                        while (serverIterator.hasNext()) {
                            VoldemortServer server = serverIterator.next();
                            if (ByteUtils.getString(server.getMetadataStore().get(MetadataStore.SERVER_STATE_KEY, null).get(0).getValue(), "UTF-8").compareTo(VoldemortState.REBALANCING_MASTER_SERVER.toString()) == 0) {
                                logger.info("Server " + server.getIdentityNode().getId() + " transitioned into REBALANCING MODE");
                                serverIterator.remove();
                            }
                        }
                        if (serverList.size() == 0) {
                            rebalancingStarted.set(true);
                            break;
                        }
                    }
                    if (rebalancingStarted.get()) {
                        factory = new SocketStoreClientFactory(new ClientConfig().setBootstrapUrls(getBootstrapUrl(updatedCurrentCluster, 0)).setEnableLazy(false).setSocketTimeout(120, TimeUnit.SECONDS).setClientZoneId(1));
                        final StoreClient<String, String> storeClientRW = new DefaultStoreClient<String, String>(testStoreNameRW, null, factory, 3);
                        // now with zero vector clock
                        for (ByteArray movingKey : movingKeysList) {
                            try {
                                String keyStr = ByteUtils.getString(movingKey.get(), "UTF-8");
                                String valStr = "proxy_write";
                                storeClientRW.put(keyStr, valStr);
                                baselineTuples.put(keyStr, valStr);
                                // all these keys will have [5:1] vector
                                // clock is node 5 is the new pseudo master
                                baselineVersions.get(keyStr).incrementVersion(5, System.currentTimeMillis());
                                proxyWritesDone.set(true);
                                if (rebalancingComplete.get()) {
                                    break;
                                }
                            } catch (InvalidMetadataException e) {
                                // let this go
                                logger.error("Encountered an invalid metadata exception.. ", e);
                            }
                        }
                    }
                } catch (Exception e) {
                    logger.error("Exception in proxy write thread..", e);
                    exceptions.add(e);
                } finally {
                    if (factory != null)
                        factory.close();
                    latch.countDown();
                }
            }
        });
        executors.execute(new Runnable() {

            @Override
            public void run() {
                try {
                    rebalanceKit.rebalance();
                } catch (Exception e) {
                    logger.error("Error in rebalancing... ", e);
                    exceptions.add(e);
                } finally {
                    rebalancingComplete.set(true);
                    latch.countDown();
                }
            }
        });
        latch.await();
        executors.shutdown();
        executors.awaitTermination(300, TimeUnit.SECONDS);
        assertEquals("Client did not see all server transition into rebalancing state", rebalancingStarted.get(), true);
        assertEquals("Not enough time to begin proxy writing", proxyWritesDone.get(), true);
        checkEntriesPostRebalance(updatedCurrentCluster, finalCluster, Lists.newArrayList(rwStoreDefWithReplication), Arrays.asList(0, 1, 2, 3, 4, 5), baselineTuples, baselineVersions);
        checkConsistentMetadata(finalCluster, serverList);
        // check No Exception
        if (exceptions.size() > 0) {
            for (Exception e : exceptions) {
                e.printStackTrace();
            }
            fail("Should not see any exceptions.");
        }
        // check that the proxy writes were made to the original donor, node
        // 1
        List<ClockEntry> clockEntries = new ArrayList<ClockEntry>(serverList.size());
        for (Integer nodeid : serverList) clockEntries.add(new ClockEntry(nodeid.shortValue(), System.currentTimeMillis()));
        VectorClock clusterXmlClock = new VectorClock(clockEntries, System.currentTimeMillis());
        for (Integer nodeid : serverList) adminClient.metadataMgmtOps.updateRemoteCluster(nodeid, currentCluster, clusterXmlClock);
        adminClient.setAdminClientCluster(currentCluster);
        checkForTupleEquivalence(adminClient, 1, testStoreNameRW, movingKeysList, baselineTuples, baselineVersions);
        // stop servers
        try {
            stopServer(serverList);
        } catch (Exception e) {
            throw new RuntimeException(e);
        }
    } catch (AssertionError ae) {
        logger.error("Assertion broken in testProxyPutDuringRebalancing ", ae);
        throw ae;
    }
}
Also used : DefaultStoreClient(voldemort.client.DefaultStoreClient) StoreClient(voldemort.client.StoreClient) HashMap(java.util.HashMap) InvalidMetadataException(voldemort.store.InvalidMetadataException) ArrayList(java.util.ArrayList) VoldemortServer(voldemort.server.VoldemortServer) SocketStoreClientFactory(voldemort.client.SocketStoreClientFactory) Iterator(java.util.Iterator) ByteArray(voldemort.utils.ByteArray) List(java.util.List) ArrayList(java.util.ArrayList) ClientConfig(voldemort.client.ClientConfig) VectorClock(voldemort.versioning.VectorClock) Cluster(voldemort.cluster.Cluster) CountDownLatch(java.util.concurrent.CountDownLatch) ObsoleteVersionException(voldemort.versioning.ObsoleteVersionException) IOException(java.io.IOException) InvalidMetadataException(voldemort.store.InvalidMetadataException) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) ClusterTestUtils(voldemort.ClusterTestUtils) ExecutorService(java.util.concurrent.ExecutorService) ClockEntry(voldemort.versioning.ClockEntry) AdminClient(voldemort.client.protocol.admin.AdminClient) Test(org.junit.Test)

Aggregations

AdminClient (voldemort.client.protocol.admin.AdminClient)80 Test (org.junit.Test)35 Cluster (voldemort.cluster.Cluster)26 Node (voldemort.cluster.Node)26 Properties (java.util.Properties)19 StoreDefinition (voldemort.store.StoreDefinition)19 ArrayList (java.util.ArrayList)18 AdminClientConfig (voldemort.client.protocol.admin.AdminClientConfig)18 VoldemortException (voldemort.VoldemortException)17 IOException (java.io.IOException)14 Before (org.junit.Before)14 ByteArray (voldemort.utils.ByteArray)14 HashMap (java.util.HashMap)13 StoreDefinitionsMapper (voldemort.xml.StoreDefinitionsMapper)13 File (java.io.File)11 VoldemortServer (voldemort.server.VoldemortServer)11 ClientConfig (voldemort.client.ClientConfig)10 VectorClock (voldemort.versioning.VectorClock)10 Versioned (voldemort.versioning.Versioned)9 ClusterMapper (voldemort.xml.ClusterMapper)9