Search in sources :

Example 6 with Pair

use of voldemort.utils.Pair in project voldemort by voldemort.

the class CatBdbStore method main.

public static void main(String[] args) throws Exception {
    if (args.length != 2)
        Utils.croak("USAGE: java " + CatBdbStore.class.getName() + " bdb_dir" + " storeName" + " server.properties.path");
    String bdbDir = args[0];
    String storeName = args[1];
    String serverProperties = args[2];
    VoldemortConfig config = new VoldemortConfig(new Props(new File(serverProperties)));
    EnvironmentConfig environmentConfig = new EnvironmentConfig();
    environmentConfig.setDurability(Durability.COMMIT_NO_SYNC);
    environmentConfig.setAllowCreate(true);
    environmentConfig.setTransactional(config.isBdbWriteTransactionsEnabled());
    Environment environment = new Environment(new File(bdbDir), environmentConfig);
    DatabaseConfig databaseConfig = new DatabaseConfig();
    databaseConfig.setAllowCreate(true);
    databaseConfig.setTransactional(config.isBdbWriteTransactionsEnabled());
    databaseConfig.setSortedDuplicates(false);
    Database database = environment.openDatabase(null, storeName, databaseConfig);
    StorageEngine<ByteArray, byte[], byte[]> store = null;
    if (config.getBdbPrefixKeysWithPartitionId()) {
        store = new PartitionPrefixedBdbStorageEngine(storeName, environment, database, new BdbRuntimeConfig(), TestUtils.makeSingleNodeRoutingStrategy());
    } else {
        store = new BdbStorageEngine(storeName, environment, database, new BdbRuntimeConfig());
    }
    StorageEngine<String, String, String> stringStore = SerializingStorageEngine.wrap(store, new StringSerializer(), new StringSerializer(), new StringSerializer());
    Iterator<Pair<String, Versioned<String>>> iter = stringStore.entries();
    while (iter.hasNext()) {
        Pair<String, Versioned<String>> entry = iter.next();
        System.out.println(entry.getFirst() + " => " + entry.getSecond().getValue());
    }
}
Also used : BdbRuntimeConfig(voldemort.store.bdb.BdbRuntimeConfig) Versioned(voldemort.versioning.Versioned) EnvironmentConfig(com.sleepycat.je.EnvironmentConfig) Props(voldemort.utils.Props) VoldemortConfig(voldemort.server.VoldemortConfig) PartitionPrefixedBdbStorageEngine(voldemort.store.bdb.PartitionPrefixedBdbStorageEngine) BdbStorageEngine(voldemort.store.bdb.BdbStorageEngine) PartitionPrefixedBdbStorageEngine(voldemort.store.bdb.PartitionPrefixedBdbStorageEngine) Database(com.sleepycat.je.Database) Environment(com.sleepycat.je.Environment) ByteArray(voldemort.utils.ByteArray) File(java.io.File) StringSerializer(voldemort.serialization.StringSerializer) DatabaseConfig(com.sleepycat.je.DatabaseConfig) Pair(voldemort.utils.Pair)

Example 7 with Pair

use of voldemort.utils.Pair in project voldemort by voldemort.

the class StorageService method calculateStats.

private DataSetStats calculateStats(StorageEngine<ByteArray, byte[], byte[]> store) {
    DataSetStats stats = new DataSetStats();
    ClosableIterator<Pair<ByteArray, Versioned<byte[]>>> iter = store.entries();
    try {
        int count = 0;
        while (iter.hasNext()) {
            Pair<ByteArray, Versioned<byte[]>> pair = iter.next();
            VectorClock clock = (VectorClock) pair.getSecond().getVersion();
            stats.countEntry(pair.getFirst().length(), pair.getSecond().getValue().length + clock.sizeInBytes());
            if (count % 10000 == 0)
                logger.debug("Processing key " + count);
            count++;
        }
    } finally {
        iter.close();
    }
    return stats;
}
Also used : Versioned(voldemort.versioning.Versioned) VectorClock(voldemort.versioning.VectorClock) ByteArray(voldemort.utils.ByteArray) DataSetStats(voldemort.store.stats.DataSetStats) Pair(voldemort.utils.Pair)

Example 8 with Pair

use of voldemort.utils.Pair in project voldemort by voldemort.

the class BlockingSlopPusherJob method run.

/**
     * Loop over entries in the slop table and attempt to push them to the
     * deserving server
     */
public void run() {
    // don't try to run slop pusher job when rebalancing
    if (metadataStore.getServerStateUnlocked().equals(MetadataStore.VoldemortState.REBALANCING_MASTER_SERVER)) {
        logger.error("Cannot run slop pusher job since Voldemort server is rebalancing");
        return;
    }
    logger.info("Started blocking slop pusher job at " + new Date());
    Cluster cluster = metadataStore.getCluster();
    failureDetector.getConfig().setCluster(cluster);
    Set<String> storeNames = StoreDefinitionUtils.getStoreNamesSet(metadataStore.getStoreDefList());
    ClosableIterator<Pair<ByteArray, Versioned<Slop>>> iterator = null;
    Map<Integer, Long> attemptedByNode = Maps.newHashMapWithExpectedSize(cluster.getNumberOfNodes());
    Map<Integer, Long> succeededByNode = Maps.newHashMapWithExpectedSize(cluster.getNumberOfNodes());
    long slopsPushed = 0L;
    long attemptedPushes = 0L;
    for (Node node : cluster.getNodes()) {
        attemptedByNode.put(node.getId(), 0L);
        succeededByNode.put(node.getId(), 0L);
    }
    acquireRepairPermit();
    try {
        SlopStorageEngine slopStorageEngine = storeRepo.getSlopStore();
        StorageEngine<ByteArray, Slop, byte[]> slopStore = slopStorageEngine.asSlopStore();
        EventThrottler throttler = new EventThrottler(maxWriteBytesPerSec);
        iterator = slopStore.entries();
        while (iterator.hasNext()) {
            if (Thread.interrupted())
                throw new InterruptedException("Slop pusher job cancelled");
            try {
                Pair<ByteArray, Versioned<Slop>> keyAndVal;
                try {
                    keyAndVal = iterator.next();
                } catch (Exception e) {
                    logger.error("Exception in iterator, escaping the loop ", e);
                    break;
                }
                Versioned<Slop> versioned = keyAndVal.getSecond();
                Slop slop = versioned.getValue();
                int nodeId = slop.getNodeId();
                // check for dead slops
                if (isSlopDead(cluster, storeNames, versioned.getValue())) {
                    handleDeadSlop(slopStorageEngine, keyAndVal);
                    // the next slop.
                    continue;
                }
                Node node = cluster.getNodeById(nodeId);
                attemptedPushes++;
                if (attemptedPushes % 10000 == 0) {
                    logger.info("Attempted pushing " + attemptedPushes + " slops");
                }
                Long attempted = attemptedByNode.get(nodeId);
                attemptedByNode.put(nodeId, attempted + 1L);
                if (failureDetector.isAvailable(node)) {
                    Store<ByteArray, byte[], byte[]> store = storeRepo.getNodeStore(slop.getStoreName(), node.getId());
                    Long startNs = System.nanoTime();
                    int nBytes = 0;
                    try {
                        nBytes = slop.getKey().length();
                        if (slop.getOperation() == Operation.PUT) {
                            store.put(slop.getKey(), new Versioned<byte[]>(slop.getValue(), versioned.getVersion()), slop.getTransforms());
                            nBytes += slop.getValue().length + ((VectorClock) versioned.getVersion()).sizeInBytes() + 1;
                        } else if (slop.getOperation() == Operation.DELETE) {
                            nBytes += ((VectorClock) versioned.getVersion()).sizeInBytes() + 1;
                            store.delete(slop.getKey(), versioned.getVersion());
                        } else {
                            logger.error("Unknown slop operation: " + slop.getOperation());
                            continue;
                        }
                        failureDetector.recordSuccess(node, deltaMs(startNs));
                        slopStore.delete(slop.makeKey(), versioned.getVersion());
                        slopsPushed++;
                        // Increment succeeded
                        Long succeeded = succeededByNode.get(nodeId);
                        succeededByNode.put(nodeId, succeeded + 1L);
                        // Throttle the bytes...
                        throttler.maybeThrottle(nBytes);
                    } catch (ObsoleteVersionException e) {
                        // okay it is old, just delete it
                        slopStore.delete(slop.makeKey(), versioned.getVersion());
                        slopsPushed++;
                        // Increment succeeded
                        Long succeeded = succeededByNode.get(nodeId);
                        succeededByNode.put(nodeId, succeeded + 1L);
                        // Throttle the bytes...
                        throttler.maybeThrottle(nBytes);
                    } catch (UnreachableStoreException e) {
                        failureDetector.recordException(node, deltaMs(startNs), e);
                    }
                }
            } catch (Exception e) {
                logger.error(e, e);
            }
        }
        // Only if we reached here do we update stats
        logger.log(attemptedPushes > 0 ? Level.INFO : Level.DEBUG, "Attempted " + attemptedPushes + " hinted handoff pushes of which " + slopsPushed + " succeeded.");
        Map<Integer, Long> outstanding = Maps.newHashMapWithExpectedSize(cluster.getNumberOfNodes());
        for (int nodeId : succeededByNode.keySet()) {
            outstanding.put(nodeId, attemptedByNode.get(nodeId) - succeededByNode.get(nodeId));
        }
        slopStorageEngine.resetStats(outstanding);
    } catch (Exception e) {
        logger.error(e, e);
    } finally {
        try {
            if (iterator != null)
                iterator.close();
        } catch (Exception e) {
            logger.error("Failed to close iterator.", e);
        }
        this.repairPermits.release(this.getClass().getCanonicalName());
    }
}
Also used : Versioned(voldemort.versioning.Versioned) Node(voldemort.cluster.Node) ByteArray(voldemort.utils.ByteArray) SlopStorageEngine(voldemort.store.slop.SlopStorageEngine) UnreachableStoreException(voldemort.store.UnreachableStoreException) Pair(voldemort.utils.Pair) EventThrottler(voldemort.utils.EventThrottler) Cluster(voldemort.cluster.Cluster) Date(java.util.Date) ObsoleteVersionException(voldemort.versioning.ObsoleteVersionException) UnreachableStoreException(voldemort.store.UnreachableStoreException) ObsoleteVersionException(voldemort.versioning.ObsoleteVersionException) Slop(voldemort.store.slop.Slop)

Example 9 with Pair

use of voldemort.utils.Pair in project voldemort by voldemort.

the class AbstractRebalanceTest method checkEntriesPostRebalance.

/**
     * Makes sure that all expected partition-stores are on each server after
     * the rebalance.
     * 
     * @param currentCluster
     * @param finalCluster
     * @param storeDefs
     * @param nodeCheckList
     * @param baselineTuples
     * @param baselineVersions
     */
protected void checkEntriesPostRebalance(Cluster currentCluster, Cluster finalCluster, List<StoreDefinition> storeDefs, List<Integer> nodeCheckList, HashMap<String, String> baselineTuples, HashMap<String, VectorClock> baselineVersions) {
    for (StoreDefinition storeDef : storeDefs) {
        Map<Integer, Set<Pair<Integer, Integer>>> currentNodeToPartitionTuples = ROTestUtils.getNodeIdToAllPartitions(currentCluster, storeDef, true);
        Map<Integer, Set<Pair<Integer, Integer>>> finalNodeToPartitionTuples = ROTestUtils.getNodeIdToAllPartitions(finalCluster, storeDef, true);
        for (int nodeId : nodeCheckList) {
            Set<Pair<Integer, Integer>> currentPartitionTuples = currentNodeToPartitionTuples.get(nodeId);
            Set<Pair<Integer, Integer>> finalPartitionTuples = finalNodeToPartitionTuples.get(nodeId);
            HashMap<Integer, List<Integer>> flattenedPresentTuples = ROTestUtils.flattenPartitionTuples(Utils.getAddedInTarget(currentPartitionTuples, finalPartitionTuples));
            Store<ByteArray, byte[], byte[]> store = getSocketStore(storeDef.getName(), finalCluster.getNodeById(nodeId).getHost(), finalCluster.getNodeById(nodeId).getSocketPort());
            checkGetEntries(finalCluster.getNodeById(nodeId), finalCluster, storeDef, store, flattenedPresentTuples, baselineTuples, baselineVersions);
        }
    }
}
Also used : Set(java.util.Set) StoreDefinition(voldemort.store.StoreDefinition) ByteArray(voldemort.utils.ByteArray) ArrayList(java.util.ArrayList) List(java.util.List) Pair(voldemort.utils.Pair)

Example 10 with Pair

use of voldemort.utils.Pair in project voldemort by voldemort.

the class ZoneShrinkageEndToEndTest method testAllServersSendingOutSlopsCorrectly.

@Test(timeout = 60000)
public void testAllServersSendingOutSlopsCorrectly() throws InterruptedException {
    final Serializer<ByteArray> slopKeySerializer = new ByteArraySerializer();
    final Serializer<Slop> slopValueSerializer = new SlopSerializer();
    final SlopSerializer slopSerializer = new SlopSerializer();
    StoreDefinition storeDef = storeDefs.get(0);
    TestSocketStoreFactory ssf = new TestSocketStoreFactory();
    Map<Integer, SocketStore> slopStoresCreatedBeforeShrink = new HashMap<Integer, SocketStore>();
    Map<Integer, SocketStore> slopStoresCreatedAfterShrink = new HashMap<Integer, SocketStore>();
    // generate for keys each all servers that will be hosted on each server
    // except itself (2*N*(N-1) keys)
    // Map<Integer slopFinalDestinationNodeId, List<Pair<ByteArray key,
    // Integer hostNodeId>>>
    Map<Integer, List<Pair<ByteArray, Integer>>> serverKeys = new HashMap<Integer, List<Pair<ByteArray, Integer>>>();
    for (Node slopFinalDestinationNode : cluster.getNodes()) {
        serverKeys.put(slopFinalDestinationNode.getId(), new ArrayList<Pair<ByteArray, Integer>>());
    }
    // make socket stores to all servers before shrink
    for (Integer nodeId : vservers.keySet()) {
        SocketStore slopStore = ssf.createSocketStore(vservers.get(nodeId).getIdentityNode(), "slop");
        SerializingStore.wrap(slopStore, slopKeySerializer, slopValueSerializer, new IdentitySerializer());
        slopStoresCreatedBeforeShrink.put(nodeId, slopStore);
    }
    for (int i = 0; i < 2; i++) {
        for (Integer slopHostId : vservers.keySet()) {
            SocketStore slopStore = slopStoresCreatedBeforeShrink.get(slopHostId);
            for (Integer destinationNodeId : vservers.keySet()) {
                if (!destinationNodeId.equals(slopHostId)) {
                    ByteArray key = generateRandomKey(cluster, destinationNodeId, storeDef.getReplicationFactor());
                    serverKeys.get(destinationNodeId).add(new Pair<ByteArray, Integer>(key, slopHostId));
                    Slop slop = new Slop(storeDef.getName(), Slop.Operation.PUT, key.get(), key.get(), destinationNodeId, new Date());
                    slopStore.put(slop.makeKey(), new Versioned<byte[]>(slopSerializer.toBytes(slop), new VectorClock()), null);
                }
            }
        }
    }
    // update metadata
    executeShrinkZone();
    logger.info("-------------------------------");
    logger.info("    CONNECTING SLOP STORES     ");
    logger.info("-------------------------------");
    // make socket stores to all servers after shrink
    for (Integer nodeId : vservers.keySet()) {
        SocketStore slopStore = ssf.createSocketStore(vservers.get(nodeId).getIdentityNode(), "slop");
        SerializingStore.wrap(slopStore, slopKeySerializer, slopValueSerializer, new IdentitySerializer());
        slopStoresCreatedAfterShrink.put(nodeId, slopStore);
    }
    logger.info("-------------------------------");
    logger.info("     CONNECTED SLOP STORES     ");
    logger.info("-------------------------------");
    logger.info("-------------------------------");
    logger.info("         SENDING SLOPS         ");
    logger.info("-------------------------------");
    for (int i = 0; i < 2; i++) {
        for (Integer slopHostId : vservers.keySet()) {
            SocketStore slopStore = slopStoresCreatedAfterShrink.get(slopHostId);
            for (Integer destinationNodeId : vservers.keySet()) {
                if (!destinationNodeId.equals(slopHostId)) {
                    ByteArray key = generateRandomKey(cluster, destinationNodeId, storeDef.getReplicationFactor());
                    serverKeys.get(destinationNodeId).add(new Pair<ByteArray, Integer>(key, slopHostId));
                    Slop slop = new Slop(storeDef.getName(), Slop.Operation.PUT, key.get(), key.get(), destinationNodeId, new Date());
                    slopStore.put(slop.makeKey(), new Versioned<byte[]>(slopSerializer.toBytes(slop), new VectorClock()), null);
                }
            }
        }
    }
    logger.info("-------------------------------");
    logger.info("           SENT SLOPS          ");
    logger.info("-------------------------------");
    ServerTestUtils.waitForSlopDrain(vservers, 30000L);
    // verify all proper slops is processed properly (arrived or dropped)
    boolean hasError = false;
    int goodCount = 0;
    int errorCount = 0;
    for (Integer nodeId : serverKeys.keySet()) {
        VoldemortServer vs = vservers.get(nodeId);
        Store<ByteArray, byte[], byte[]> store = vs.getStoreRepository().getStorageEngine(storeDef.getName());
        List<Pair<ByteArray, Integer>> keySet = serverKeys.get(nodeId);
        for (Pair<ByteArray, Integer> keyHostIdPair : keySet) {
            ByteArray key = keyHostIdPair.getFirst();
            Integer hostId = keyHostIdPair.getSecond();
            Integer nodeZoneId = cluster.getNodeById(nodeId).getZoneId();
            List<Versioned<byte[]>> result = store.get(key, null);
            if (cluster.getNodeById(nodeId).getZoneId() == droppingZoneId) {
                if (!result.isEmpty()) {
                    logger.error(String.format("Key %s for Node %d (zone %d) slopped on Node %d should be gone but exists\n", key.toString(), nodeId, nodeZoneId, hostId));
                    hasError = true;
                    errorCount++;
                } else {
                    goodCount++;
                }
            } else {
                if (result.isEmpty()) {
                    logger.error(String.format("Key %s for Node %d (zone %d) slopped on Node %d should exist but not\n", key.toString(), nodeId, nodeZoneId, hostId));
                    hasError = true;
                    errorCount++;
                } else {
                    goodCount++;
                }
            }
        }
    }
    logger.info(String.format("Good keys count: %d; Error keys count: %d", goodCount, errorCount));
    Assert.assertFalse("Error Occurred BAD:" + errorCount + "; GOOD: " + goodCount + ". Check log.", hasError);
}
Also used : Versioned(voldemort.versioning.Versioned) HashMap(java.util.HashMap) Node(voldemort.cluster.Node) VoldemortServer(voldemort.server.VoldemortServer) ByteArraySerializer(voldemort.serialization.ByteArraySerializer) TestSocketStoreFactory(voldemort.store.socket.TestSocketStoreFactory) StoreDefinition(voldemort.store.StoreDefinition) ByteArray(voldemort.utils.ByteArray) ArrayList(java.util.ArrayList) List(java.util.List) IdentitySerializer(voldemort.serialization.IdentitySerializer) Pair(voldemort.utils.Pair) VectorClock(voldemort.versioning.VectorClock) SocketStore(voldemort.store.socket.SocketStore) Date(java.util.Date) SlopSerializer(voldemort.serialization.SlopSerializer) Slop(voldemort.store.slop.Slop) Test(org.junit.Test)

Aggregations

Pair (voldemort.utils.Pair)45 ByteArray (voldemort.utils.ByteArray)28 Versioned (voldemort.versioning.Versioned)25 VoldemortException (voldemort.VoldemortException)15 Node (voldemort.cluster.Node)15 IOException (java.io.IOException)14 StoreDefinition (voldemort.store.StoreDefinition)13 Test (org.junit.Test)11 File (java.io.File)10 VectorClock (voldemort.versioning.VectorClock)10 ArrayList (java.util.ArrayList)8 HashMap (java.util.HashMap)8 RoutingStrategyFactory (voldemort.routing.RoutingStrategyFactory)7 Cluster (voldemort.cluster.Cluster)6 DataOutputStream (java.io.DataOutputStream)5 FileNotFoundException (java.io.FileNotFoundException)5 Map (java.util.Map)5 ExecutionException (java.util.concurrent.ExecutionException)5 VoldemortFilter (voldemort.client.protocol.VoldemortFilter)5 DataInputStream (java.io.DataInputStream)4