use of voldemort.store.StoreDefinition in project voldemort by voldemort.
the class VerifyOrAddStoreTest method verifyStoreAddedOnAllNodes.
private void verifyStoreAddedOnAllNodes() {
// Get from a random node, verify
StoreDefinition retrieved = adminClient.metadataMgmtOps.getStoreDefinition(newStoreName);
assertNotNull("Created store can't be retrieved", retrieved);
assertEquals("Store Created and retrieved are different ", newStoreDef, retrieved);
// Get from one by one verify
for (Integer nodeId : cluster.getNodeIds()) {
retrieved = retrieveStoreOnNode(newStoreName, nodeId);
assertNotNull("Created store can't be retrieved", retrieved);
assertEquals("Store Created and retrieved are different ", newStoreDef, retrieved);
Long quota = getQuotaForNode(newStoreName, QuotaType.STORAGE_SPACE, nodeId);
assertEquals("Default quota mismatch", defaultQuota, quota.longValue());
}
}
use of voldemort.store.StoreDefinition in project voldemort by voldemort.
the class StreamingClientTest method configs.
@Parameterized.Parameters
public static Collection<Object[]> configs() {
StoreDefinition storeDefConsistentStrategy = new StoreDefinitionBuilder().setName(TEST_STORE_NAME).setType(BdbStorageConfiguration.TYPE_NAME).setKeySerializer(new SerializerDefinition("string")).setValueSerializer(new SerializerDefinition("string")).setRoutingPolicy(RoutingTier.CLIENT).setRoutingStrategyType(RoutingStrategyType.CONSISTENT_STRATEGY).setReplicationFactor(2).setPreferredReads(1).setRequiredReads(1).setPreferredWrites(2).setRequiredWrites(2).build();
HashMap<Integer, Integer> zoneReplicationFactor = new HashMap<Integer, Integer>();
zoneReplicationFactor.put(1, 2);
zoneReplicationFactor.put(3, 2);
StoreDefinition storeDefZoneStrategy = new StoreDefinitionBuilder().setName(TEST_STORE_NAME).setType(BdbStorageConfiguration.TYPE_NAME).setKeySerializer(new SerializerDefinition("string")).setValueSerializer(new SerializerDefinition("string")).setRoutingPolicy(RoutingTier.CLIENT).setRoutingStrategyType(RoutingStrategyType.ZONE_STRATEGY).setReplicationFactor(4).setPreferredReads(1).setRequiredReads(1).setPreferredWrites(1).setRequiredWrites(1).setZoneCountReads(0).setZoneCountWrites(0).setZoneReplicationFactor(zoneReplicationFactor).setHintedHandoffStrategy(HintedHandoffStrategyType.PROXIMITY_STRATEGY).build();
return Arrays.asList(new Object[][] { { 2, ServerTestUtils.getLocalCluster(2, new int[][] { { 0, 1, 2, 3 }, { 4, 5, 6, 7 } }), 0, storeDefConsistentStrategy }, { 2, ServerTestUtils.getLocalNonContiguousNodesCluster(new int[] { 1, 3 }, new int[][] { { 0, 1, 2, 3 }, { 4, 5, 6, 7 } }), 1, storeDefConsistentStrategy }, { 6, ClusterTestUtils.getZ1Z3ClusterWithNonContiguousNodeIds(), 3, storeDefZoneStrategy } });
}
use of voldemort.store.StoreDefinition in project voldemort by voldemort.
the class DataCleanupJobTest method updateStoreDef.
private void updateStoreDef(int retentionDays) {
StoreDefinition storeDef = getStoreDef(retentionDays);
StoreDefinitionsMapper mapper = new StoreDefinitionsMapper();
String storeStr = mapper.writeStoreList(Arrays.asList(storeDef));
VectorClock clock = new VectorClock(System.currentTimeMillis());
clock.incrementVersion(0, System.currentTimeMillis());
Versioned<byte[]> storeSerialized = new Versioned<byte[]>(ByteUtils.getBytes(storeStr, "UTF-8"), clock);
metadataStore.updateStoreDefinitions(storeSerialized);
}
use of voldemort.store.StoreDefinition in project voldemort by voldemort.
the class SlopPusherDeadSlopTest method testAutoPurge.
@Test
public void testAutoPurge() {
try {
// generate slops for a non existent node 2.
List<Versioned<Slop>> deadNodeSlops = ServerTestUtils.createRandomSlops(2, 40, false, "test");
// generate slops for a non existent store "deleted_store"
List<Versioned<Slop>> deadStoreSlops = ServerTestUtils.createRandomSlops(0, 40, false, "deleted_store");
// generate some valid slops and make sure they go into the
// destination store
List<Versioned<Slop>> validStoreSlops = ServerTestUtils.createRandomSlops(1, 40, false, "test");
List<Versioned<Slop>> slops = new ArrayList<Versioned<Slop>>();
slops.addAll(deadStoreSlops);
slops.addAll(deadNodeSlops);
slops.addAll(validStoreSlops);
SlopSerializer slopSerializer = new SlopSerializer();
// Populate the store with the slops
for (Versioned<Slop> slop : slops) {
VectorClock clock = TestUtils.getClock(1);
NodeValue<ByteArray, byte[]> nodeValue = new NodeValue<ByteArray, byte[]>(0, slop.getValue().makeKey(), new Versioned<byte[]>(slopSerializer.toBytes(slop.getValue()), clock));
adminClient.storeOps.putNodeKeyValue("slop", nodeValue);
}
// wait for twice the slop interval (in case a slop push was
// underway as we populated)
Thread.sleep(SLOP_FREQUENCY_MS * 2);
// Confirm the dead slops are all gone now..
for (List<Versioned<Slop>> deadSlops : Arrays.asList(deadStoreSlops, deadNodeSlops)) {
for (Versioned<Slop> slop : deadSlops) {
List<Versioned<byte[]>> slopEntry = adminClient.storeOps.getNodeKey("slop", 0, slop.getValue().makeKey());
assertEquals("Slop should be purged", 0, slopEntry.size());
}
}
StoreDefinitionsMapper mapper = new StoreDefinitionsMapper();
List<StoreDefinition> storeDefs = mapper.readStoreList(new StringReader(VoldemortTestConstants.getSingleStoreDefinitionsXml()));
BaseStoreRoutingPlan rPlan = new BaseStoreRoutingPlan(adminClient.getAdminClientCluster(), StoreDefinitionUtils.getStoreDefinitionWithName(storeDefs, "test"));
// Confirm the valid ones made it
for (Versioned<Slop> slop : validStoreSlops) {
ByteArray key = slop.getValue().getKey();
if (rPlan.getReplicationNodeList(key.get()).contains(1)) {
List<Versioned<byte[]>> slopEntry = adminClient.storeOps.getNodeKey("test", 1, key);
if (slop.getValue().getOperation() == Operation.DELETE) {
assertTrue("Delete Slop should have not reached destination", slopEntry.size() == 0);
} else {
assertTrue("Put Slop should have reached destination", slopEntry.size() > 0);
}
}
}
} catch (Exception e) {
logger.error("Test failed with", e);
fail("unexpected exception");
}
}
use of voldemort.store.StoreDefinition in project voldemort by voldemort.
the class AbstractRebalanceTest method checkEntriesPostRebalance.
/**
* Makes sure that all expected partition-stores are on each server after
* the rebalance.
*
* @param currentCluster
* @param finalCluster
* @param storeDefs
* @param nodeCheckList
* @param baselineTuples
* @param baselineVersions
*/
protected void checkEntriesPostRebalance(Cluster currentCluster, Cluster finalCluster, List<StoreDefinition> storeDefs, List<Integer> nodeCheckList, HashMap<String, String> baselineTuples, HashMap<String, VectorClock> baselineVersions) {
for (StoreDefinition storeDef : storeDefs) {
Map<Integer, Set<Pair<Integer, Integer>>> currentNodeToPartitionTuples = ROTestUtils.getNodeIdToAllPartitions(currentCluster, storeDef, true);
Map<Integer, Set<Pair<Integer, Integer>>> finalNodeToPartitionTuples = ROTestUtils.getNodeIdToAllPartitions(finalCluster, storeDef, true);
for (int nodeId : nodeCheckList) {
Set<Pair<Integer, Integer>> currentPartitionTuples = currentNodeToPartitionTuples.get(nodeId);
Set<Pair<Integer, Integer>> finalPartitionTuples = finalNodeToPartitionTuples.get(nodeId);
HashMap<Integer, List<Integer>> flattenedPresentTuples = ROTestUtils.flattenPartitionTuples(Utils.getAddedInTarget(currentPartitionTuples, finalPartitionTuples));
Store<ByteArray, byte[], byte[]> store = getSocketStore(storeDef.getName(), finalCluster.getNodeById(nodeId).getHost(), finalCluster.getNodeById(nodeId).getSocketPort());
checkGetEntries(finalCluster.getNodeById(nodeId), finalCluster, storeDef, store, flattenedPresentTuples, baselineTuples, baselineVersions);
}
}
}
Aggregations