Search in sources :

Example 26 with VectorClock

use of voldemort.versioning.VectorClock in project voldemort by voldemort.

the class ZoneShrinkageEndToEndTest method testAllServersSendingOutSlopsCorrectly.

@Test(timeout = 60000)
public void testAllServersSendingOutSlopsCorrectly() throws InterruptedException {
    final Serializer<ByteArray> slopKeySerializer = new ByteArraySerializer();
    final Serializer<Slop> slopValueSerializer = new SlopSerializer();
    final SlopSerializer slopSerializer = new SlopSerializer();
    StoreDefinition storeDef = storeDefs.get(0);
    TestSocketStoreFactory ssf = new TestSocketStoreFactory();
    Map<Integer, SocketStore> slopStoresCreatedBeforeShrink = new HashMap<Integer, SocketStore>();
    Map<Integer, SocketStore> slopStoresCreatedAfterShrink = new HashMap<Integer, SocketStore>();
    // generate for keys each all servers that will be hosted on each server
    // except itself (2*N*(N-1) keys)
    // Map<Integer slopFinalDestinationNodeId, List<Pair<ByteArray key,
    // Integer hostNodeId>>>
    Map<Integer, List<Pair<ByteArray, Integer>>> serverKeys = new HashMap<Integer, List<Pair<ByteArray, Integer>>>();
    for (Node slopFinalDestinationNode : cluster.getNodes()) {
        serverKeys.put(slopFinalDestinationNode.getId(), new ArrayList<Pair<ByteArray, Integer>>());
    }
    // make socket stores to all servers before shrink
    for (Integer nodeId : vservers.keySet()) {
        SocketStore slopStore = ssf.createSocketStore(vservers.get(nodeId).getIdentityNode(), "slop");
        SerializingStore.wrap(slopStore, slopKeySerializer, slopValueSerializer, new IdentitySerializer());
        slopStoresCreatedBeforeShrink.put(nodeId, slopStore);
    }
    for (int i = 0; i < 2; i++) {
        for (Integer slopHostId : vservers.keySet()) {
            SocketStore slopStore = slopStoresCreatedBeforeShrink.get(slopHostId);
            for (Integer destinationNodeId : vservers.keySet()) {
                if (!destinationNodeId.equals(slopHostId)) {
                    ByteArray key = generateRandomKey(cluster, destinationNodeId, storeDef.getReplicationFactor());
                    serverKeys.get(destinationNodeId).add(new Pair<ByteArray, Integer>(key, slopHostId));
                    Slop slop = new Slop(storeDef.getName(), Slop.Operation.PUT, key.get(), key.get(), destinationNodeId, new Date());
                    slopStore.put(slop.makeKey(), new Versioned<byte[]>(slopSerializer.toBytes(slop), new VectorClock()), null);
                }
            }
        }
    }
    // update metadata
    executeShrinkZone();
    logger.info("-------------------------------");
    logger.info("    CONNECTING SLOP STORES     ");
    logger.info("-------------------------------");
    // make socket stores to all servers after shrink
    for (Integer nodeId : vservers.keySet()) {
        SocketStore slopStore = ssf.createSocketStore(vservers.get(nodeId).getIdentityNode(), "slop");
        SerializingStore.wrap(slopStore, slopKeySerializer, slopValueSerializer, new IdentitySerializer());
        slopStoresCreatedAfterShrink.put(nodeId, slopStore);
    }
    logger.info("-------------------------------");
    logger.info("     CONNECTED SLOP STORES     ");
    logger.info("-------------------------------");
    logger.info("-------------------------------");
    logger.info("         SENDING SLOPS         ");
    logger.info("-------------------------------");
    for (int i = 0; i < 2; i++) {
        for (Integer slopHostId : vservers.keySet()) {
            SocketStore slopStore = slopStoresCreatedAfterShrink.get(slopHostId);
            for (Integer destinationNodeId : vservers.keySet()) {
                if (!destinationNodeId.equals(slopHostId)) {
                    ByteArray key = generateRandomKey(cluster, destinationNodeId, storeDef.getReplicationFactor());
                    serverKeys.get(destinationNodeId).add(new Pair<ByteArray, Integer>(key, slopHostId));
                    Slop slop = new Slop(storeDef.getName(), Slop.Operation.PUT, key.get(), key.get(), destinationNodeId, new Date());
                    slopStore.put(slop.makeKey(), new Versioned<byte[]>(slopSerializer.toBytes(slop), new VectorClock()), null);
                }
            }
        }
    }
    logger.info("-------------------------------");
    logger.info("           SENT SLOPS          ");
    logger.info("-------------------------------");
    ServerTestUtils.waitForSlopDrain(vservers, 30000L);
    // verify all proper slops is processed properly (arrived or dropped)
    boolean hasError = false;
    int goodCount = 0;
    int errorCount = 0;
    for (Integer nodeId : serverKeys.keySet()) {
        VoldemortServer vs = vservers.get(nodeId);
        Store<ByteArray, byte[], byte[]> store = vs.getStoreRepository().getStorageEngine(storeDef.getName());
        List<Pair<ByteArray, Integer>> keySet = serverKeys.get(nodeId);
        for (Pair<ByteArray, Integer> keyHostIdPair : keySet) {
            ByteArray key = keyHostIdPair.getFirst();
            Integer hostId = keyHostIdPair.getSecond();
            Integer nodeZoneId = cluster.getNodeById(nodeId).getZoneId();
            List<Versioned<byte[]>> result = store.get(key, null);
            if (cluster.getNodeById(nodeId).getZoneId() == droppingZoneId) {
                if (!result.isEmpty()) {
                    logger.error(String.format("Key %s for Node %d (zone %d) slopped on Node %d should be gone but exists\n", key.toString(), nodeId, nodeZoneId, hostId));
                    hasError = true;
                    errorCount++;
                } else {
                    goodCount++;
                }
            } else {
                if (result.isEmpty()) {
                    logger.error(String.format("Key %s for Node %d (zone %d) slopped on Node %d should exist but not\n", key.toString(), nodeId, nodeZoneId, hostId));
                    hasError = true;
                    errorCount++;
                } else {
                    goodCount++;
                }
            }
        }
    }
    logger.info(String.format("Good keys count: %d; Error keys count: %d", goodCount, errorCount));
    Assert.assertFalse("Error Occurred BAD:" + errorCount + "; GOOD: " + goodCount + ". Check log.", hasError);
}
Also used : Versioned(voldemort.versioning.Versioned) HashMap(java.util.HashMap) Node(voldemort.cluster.Node) VoldemortServer(voldemort.server.VoldemortServer) ByteArraySerializer(voldemort.serialization.ByteArraySerializer) TestSocketStoreFactory(voldemort.store.socket.TestSocketStoreFactory) StoreDefinition(voldemort.store.StoreDefinition) ByteArray(voldemort.utils.ByteArray) ArrayList(java.util.ArrayList) List(java.util.List) IdentitySerializer(voldemort.serialization.IdentitySerializer) Pair(voldemort.utils.Pair) VectorClock(voldemort.versioning.VectorClock) SocketStore(voldemort.store.socket.SocketStore) Date(java.util.Date) SlopSerializer(voldemort.serialization.SlopSerializer) Slop(voldemort.store.slop.Slop) Test(org.junit.Test)

Example 27 with VectorClock

use of voldemort.versioning.VectorClock in project voldemort by voldemort.

the class ZonedRebalanceNonContiguousZonesTest method testProxyPutDuringRebalancing.

@Test(timeout = 600000)
public void testProxyPutDuringRebalancing() throws Exception {
    logger.info("Starting testProxyPutDuringRebalancing");
    try {
        int[] zoneIds = new int[] { 1, 3 };
        int[][] nodesPerZone = new int[][] { { 3, 4, 5 }, { 9, 10, 11 } };
        int[][] partitionMap = new int[][] { { 0 }, { 1, 6 }, { 2 }, { 3 }, { 4, 7 }, { 5 } };
        Cluster currentCluster = ServerTestUtils.getLocalNonContiguousZonedCluster(zoneIds, nodesPerZone, partitionMap, ClusterTestUtils.getClusterPorts());
        Cluster finalCluster = UpdateClusterUtils.createUpdatedCluster(currentCluster, 5, Lists.newArrayList(7));
        finalCluster = UpdateClusterUtils.createUpdatedCluster(finalCluster, 11, Lists.newArrayList(6));
        /**
             * Original partition map
             *
             * [s3 : p0] [s4 : p1, p6] [s5 : p2]
             *
             * [s9 : p3] [s10 : p4, p7] [s11 : p5]
             *
             * final server partition ownership
             *
             * [s3 : p0] [s4 : p1] [s5 : p2, p7]
             *
             * [s9 : p3] [s10 : p4] [s11 : p5, p6]
             *
             * Note that rwStoreDefFileWithReplication is a "2/1/1" store def.
             *
             * Original server n-ary partition ownership
             *
             * [s3 : p0, p3-7] [s4 : p0-p7] [s5 : p1-2]
             *
             * [s9 : p0-3, p6-7] [s10 : p0-p7] [s11 : p4-5]
             *
             * final server n-ary partition ownership
             *
             * [s3 : p0, p2-7] [s4 : p0-1] [s5 : p1-p7]
             *
             * [s9 : p0-3, p5-7] [s10 : p0-4, p7] [s11 : p4-6]
             */
        List<Integer> serverList = Arrays.asList(3, 4, 5, 9, 10, 11);
        Map<String, String> configProps = new HashMap<String, String>();
        configProps.put("admin.max.threads", "5");
        final Cluster updatedCurrentCluster = startServers(currentCluster, rwStoreDefFileWithReplication, serverList, configProps);
        ExecutorService executors = Executors.newFixedThreadPool(2);
        final AtomicBoolean rebalancingComplete = new AtomicBoolean(false);
        final List<Exception> exceptions = Collections.synchronizedList(new ArrayList<Exception>());
        // Its is imperative that we test in a single shot since multiple batches would mean the proxy bridges
        // being torn down and established multiple times and we cannot test against the source
        // cluster topology then. getRebalanceKit uses batch size of infinite, so this should be fine.
        String bootstrapUrl = getBootstrapUrl(updatedCurrentCluster, 3);
        int maxParallel = 2;
        final ClusterTestUtils.RebalanceKit rebalanceKit = ClusterTestUtils.getRebalanceKit(bootstrapUrl, maxParallel, finalCluster);
        populateData(currentCluster, rwStoreDefWithReplication);
        final AdminClient adminClient = rebalanceKit.controller.getAdminClient();
        // the plan would cause these partitions to move:
        // Partition : Donor -> stealer
        //
        // p2 (Z-SEC) : s4 -> s3
        // p3-6 (Z-PRI) : s4 -> s5
        // p7 (Z-PRI) : s3 -> s5
        //
        // p5 (Z-SEC): s10 -> s9
        // p6 (Z-PRI): s10 -> s11
        //
        // Rebalancing will run on servers 3, 5, 9, & 11
        final List<ByteArray> movingKeysList = sampleKeysFromPartition(adminClient, 4, rwStoreDefWithReplication.getName(), Arrays.asList(6), 20);
        assertTrue("Empty list of moving keys...", movingKeysList.size() > 0);
        final AtomicBoolean rebalancingStarted = new AtomicBoolean(false);
        final AtomicBoolean proxyWritesDone = new AtomicBoolean(false);
        final HashMap<String, String> baselineTuples = new HashMap<String, String>(testEntries);
        final HashMap<String, VectorClock> baselineVersions = new HashMap<String, VectorClock>();
        for (String key : baselineTuples.keySet()) {
            baselineVersions.put(key, new VectorClock());
        }
        final CountDownLatch latch = new CountDownLatch(2);
        // start get operation.
        executors.execute(new Runnable() {

            @Override
            public void run() {
                SocketStoreClientFactory factory = null;
                try {
                    // wait for the rebalancing to begin
                    List<VoldemortServer> serverList = Lists.newArrayList(serverMap.get(3), serverMap.get(5), serverMap.get(9), serverMap.get(11));
                    while (!rebalancingComplete.get()) {
                        Iterator<VoldemortServer> serverIterator = serverList.iterator();
                        while (serverIterator.hasNext()) {
                            VoldemortServer server = serverIterator.next();
                            if (ByteUtils.getString(server.getMetadataStore().get(MetadataStore.SERVER_STATE_KEY, null).get(0).getValue(), "UTF-8").compareTo(VoldemortState.REBALANCING_MASTER_SERVER.toString()) == 0) {
                                logger.info("Server " + server.getIdentityNode().getId() + " transitioned into REBALANCING MODE");
                                serverIterator.remove();
                            }
                        }
                        if (serverList.size() == 0) {
                            rebalancingStarted.set(true);
                            break;
                        }
                    }
                    if (rebalancingStarted.get()) {
                        factory = new SocketStoreClientFactory(new ClientConfig().setBootstrapUrls(getBootstrapUrl(updatedCurrentCluster, 3)).setEnableLazy(false).setSocketTimeout(120, TimeUnit.SECONDS).setClientZoneId(3));
                        final StoreClient<String, String> storeClientRW = new DefaultStoreClient<String, String>(testStoreNameRW, null, factory, 3);
                        // Initially, all data now with zero vector clock
                        for (ByteArray movingKey : movingKeysList) {
                            try {
                                String keyStr = ByteUtils.getString(movingKey.get(), "UTF-8");
                                String valStr = "proxy_write";
                                storeClientRW.put(keyStr, valStr);
                                baselineTuples.put(keyStr, valStr);
                                baselineVersions.get(keyStr).incrementVersion(11, System.currentTimeMillis());
                                proxyWritesDone.set(true);
                                if (rebalancingComplete.get()) {
                                    break;
                                }
                            } catch (InvalidMetadataException e) {
                                logger.error("Encountered an invalid metadata exception.. ", e);
                            }
                        }
                    }
                } catch (Exception e) {
                    logger.error("Exception in proxy write thread..", e);
                    exceptions.add(e);
                } finally {
                    if (factory != null)
                        factory.close();
                    latch.countDown();
                }
            }
        });
        executors.execute(new Runnable() {

            @Override
            public void run() {
                try {
                    rebalanceKit.rebalance();
                } catch (Exception e) {
                    logger.error("Error in rebalancing... ", e);
                    exceptions.add(e);
                } finally {
                    rebalancingComplete.set(true);
                    latch.countDown();
                }
            }
        });
        latch.await();
        executors.shutdown();
        executors.awaitTermination(300, TimeUnit.SECONDS);
        assertEquals("Client did not see all server transition into rebalancing state", rebalancingStarted.get(), true);
        assertEquals("Not enough time to begin proxy writing", proxyWritesDone.get(), true);
        checkEntriesPostRebalance(updatedCurrentCluster, finalCluster, Lists.newArrayList(rwStoreDefWithReplication), Arrays.asList(3, 4, 5, 9, 10, 11), baselineTuples, baselineVersions);
        checkConsistentMetadata(finalCluster, serverList);
        // check No Exception
        if (exceptions.size() > 0) {
            for (Exception e : exceptions) {
                e.printStackTrace();
            }
            fail("Should not see any exceptions.");
        }
        // check that the proxy writes were made to the original donor, node 4
        List<ClockEntry> clockEntries = new ArrayList<ClockEntry>(serverList.size());
        for (Integer nodeid : serverList) clockEntries.add(new ClockEntry(nodeid.shortValue(), System.currentTimeMillis()));
        VectorClock clusterXmlClock = new VectorClock(clockEntries, System.currentTimeMillis());
        for (Integer nodeid : serverList) adminClient.metadataMgmtOps.updateRemoteCluster(nodeid, currentCluster, clusterXmlClock);
        adminClient.setAdminClientCluster(currentCluster);
        checkForTupleEquivalence(adminClient, 4, testStoreNameRW, movingKeysList, baselineTuples, baselineVersions);
        // stop servers
        try {
            stopServer(serverList);
        } catch (Exception e) {
            throw new RuntimeException(e);
        }
    } catch (AssertionError ae) {
        logger.error("Assertion broken in testProxyPutDuringRebalancing ", ae);
        throw ae;
    }
}
Also used : DefaultStoreClient(voldemort.client.DefaultStoreClient) StoreClient(voldemort.client.StoreClient) HashMap(java.util.HashMap) InvalidMetadataException(voldemort.store.InvalidMetadataException) ArrayList(java.util.ArrayList) VoldemortServer(voldemort.server.VoldemortServer) SocketStoreClientFactory(voldemort.client.SocketStoreClientFactory) Iterator(java.util.Iterator) ByteArray(voldemort.utils.ByteArray) List(java.util.List) ArrayList(java.util.ArrayList) ClientConfig(voldemort.client.ClientConfig) VectorClock(voldemort.versioning.VectorClock) Cluster(voldemort.cluster.Cluster) CountDownLatch(java.util.concurrent.CountDownLatch) ObsoleteVersionException(voldemort.versioning.ObsoleteVersionException) IOException(java.io.IOException) InvalidMetadataException(voldemort.store.InvalidMetadataException) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) ClusterTestUtils(voldemort.ClusterTestUtils) ExecutorService(java.util.concurrent.ExecutorService) ClockEntry(voldemort.versioning.ClockEntry) AdminClient(voldemort.client.protocol.admin.AdminClient) Test(org.junit.Test)

Example 28 with VectorClock

use of voldemort.versioning.VectorClock in project voldemort by voldemort.

the class CoordinatorRestAPITest method doPut.

private VectorClock doPut(String key, String payload, VectorClock vc, Map<String, Object> options) {
    VectorClock successfulPutVC = null;
    int expectedResponseCode = 201;
    try {
        // Create the right URL and Http connection
        HttpURLConnection conn = null;
        String base64Key = new String(Base64.encodeBase64(key.getBytes()));
        URL url = new URL(this.coordinatorURL + "/" + STORE_NAME + "/" + base64Key);
        conn = (HttpURLConnection) url.openConnection();
        // Set the right headers
        conn.setRequestMethod("POST");
        conn.setDoOutput(true);
        conn.setDoInput(true);
        conn.setRequestProperty("Content-Type", "binary");
        conn.setRequestProperty("Content-Length", "" + payload.length());
        conn.setRequestProperty(RestMessageHeaders.X_VOLD_REQUEST_TIMEOUT_MS, "1000");
        conn.setRequestProperty(RestMessageHeaders.X_VOLD_REQUEST_ORIGIN_TIME_MS, Long.toString(System.currentTimeMillis()));
        // options
        if (options != null) {
            if (options.get("timeout") != null && options.get("timeout") instanceof String) {
                conn.setRequestProperty(RestMessageHeaders.X_VOLD_REQUEST_TIMEOUT_MS, (String) options.get("timeout"));
            }
            if (options.get("responseCode") != null && options.get("responseCode") instanceof Integer) {
                expectedResponseCode = (Integer) options.get("responseCode");
            }
        }
        if (vc != null) {
            String eTag = RestUtils.getSerializedVectorClock(vc);
            conn.setRequestProperty("ETag", eTag);
        }
        // Write the payload
        OutputStream out = conn.getOutputStream();
        out.write(payload.getBytes());
        out.close();
        if (conn.getResponseCode() != expectedResponseCode) {
            System.err.println("Illegal response during PUT : " + conn.getResponseMessage());
            fail("Incorrect response received for a HTTP put request :" + conn.getResponseCode());
        }
    } catch (Exception e) {
        e.printStackTrace();
        fail("Error in sending the REST request");
    }
    return successfulPutVC;
}
Also used : HttpURLConnection(java.net.HttpURLConnection) VectorClock(voldemort.versioning.VectorClock) OutputStream(java.io.OutputStream) URL(java.net.URL)

Example 29 with VectorClock

use of voldemort.versioning.VectorClock in project voldemort by voldemort.

the class CoordinatorRestAPITest method doGet.

private TestVersionedValue doGet(String key, Map<String, Object> options) {
    HttpURLConnection conn = null;
    String response = null;
    TestVersionedValue responseObj = null;
    int expectedResponseCode = 200;
    try {
        // Create the right URL and Http connection
        String base64Key = new String(Base64.encodeBase64(key.getBytes()));
        URL url = new URL(this.coordinatorURL + "/" + STORE_NAME + "/" + base64Key);
        conn = (HttpURLConnection) url.openConnection();
        // Set the right headers
        conn.setRequestMethod("GET");
        conn.setDoInput(true);
        conn.setRequestProperty(RestMessageHeaders.X_VOLD_REQUEST_TIMEOUT_MS, "1000");
        conn.setRequestProperty(RestMessageHeaders.X_VOLD_REQUEST_ORIGIN_TIME_MS, Long.toString(System.currentTimeMillis()));
        // options
        if (options != null) {
            if (options.get("timeout") != null && options.get("timeout") instanceof String) {
                conn.setRequestProperty(RestMessageHeaders.X_VOLD_REQUEST_TIMEOUT_MS, (String) options.get("timeout"));
            }
            if (options.get("responseCode") != null && options.get("responseCode") instanceof Integer) {
                expectedResponseCode = (Integer) options.get("responseCode");
            }
        }
        // Check for the right response code
        if (conn.getResponseCode() != expectedResponseCode) {
            System.err.println("Illegal response during GET : " + conn.getResponseMessage());
            fail("Incorrect response received for a HTTP GET request :" + conn.getResponseCode());
        }
        if (conn.getResponseCode() == 404 || conn.getResponseCode() == 408) {
            return null;
        }
        // Buffer the result into a string
        ByteArrayDataSource ds = new ByteArrayDataSource(conn.getInputStream(), "multipart/mixed");
        MimeMultipart mp = new MimeMultipart(ds);
        assertEquals("The number of body parts expected is not 1", 1, mp.getCount());
        MimeBodyPart part = (MimeBodyPart) mp.getBodyPart(0);
        VectorClock vc = RestUtils.deserializeVectorClock(part.getHeader(RestMessageHeaders.X_VOLD_VECTOR_CLOCK)[0]);
        int contentLength = Integer.parseInt(part.getHeader(RestMessageHeaders.CONTENT_LENGTH)[0]);
        byte[] bodyPartBytes = new byte[contentLength];
        part.getInputStream().read(bodyPartBytes);
        response = new String(bodyPartBytes);
        responseObj = new TestVersionedValue(response, vc);
    } catch (Exception e) {
        e.printStackTrace();
        fail("Error in sending the REST request");
    } finally {
        if (conn != null) {
            conn.disconnect();
        }
    }
    return responseObj;
}
Also used : HttpURLConnection(java.net.HttpURLConnection) MimeMultipart(javax.mail.internet.MimeMultipart) VectorClock(voldemort.versioning.VectorClock) MimeBodyPart(javax.mail.internet.MimeBodyPart) ByteArrayDataSource(javax.mail.util.ByteArrayDataSource) URL(java.net.URL)

Example 30 with VectorClock

use of voldemort.versioning.VectorClock in project voldemort by voldemort.

the class AbstractRequestFormatTest method testDeleteRequests.

@Test
public void testDeleteRequests() throws Exception {
    // test pre-existing are deleted
    testDeleteRequest(new ByteArray(), new VectorClock(), new Versioned<byte[]>("hello".getBytes()), true);
    testDeleteRequest(TestUtils.toByteArray("hello"), new VectorClock(), new Versioned<byte[]>("world".getBytes()), true);
    // test non-existant aren't deleted
    testDeleteRequest(TestUtils.toByteArray("hello"), new VectorClock(), null, false);
}
Also used : VectorClock(voldemort.versioning.VectorClock) ByteArray(voldemort.utils.ByteArray) Test(org.junit.Test)

Aggregations

VectorClock (voldemort.versioning.VectorClock)162 Versioned (voldemort.versioning.Versioned)73 Test (org.junit.Test)65 ByteArray (voldemort.utils.ByteArray)65 ArrayList (java.util.ArrayList)33 IOException (java.io.IOException)25 VoldemortException (voldemort.VoldemortException)24 List (java.util.List)22 HashMap (java.util.HashMap)21 ObsoleteVersionException (voldemort.versioning.ObsoleteVersionException)21 Version (voldemort.versioning.Version)16 Node (voldemort.cluster.Node)15 AbstractByteArrayStoreTest (voldemort.store.AbstractByteArrayStoreTest)11 StoreDefinition (voldemort.store.StoreDefinition)11 AdminClient (voldemort.client.protocol.admin.AdminClient)10 VoldemortServer (voldemort.server.VoldemortServer)10 Pair (voldemort.utils.Pair)10 ClockEntry (voldemort.versioning.ClockEntry)10 File (java.io.File)8 Map (java.util.Map)8