use of voldemort.routing.BaseStoreRoutingPlan in project voldemort by voldemort.
the class VersionedPutPruneJobTest method setup.
@Before
public void setup() throws Exception {
socketStoreMap = new HashMap<Integer, Store<ByteArray, byte[], byte[]>>();
socketStoreFactory = new ClientRequestExecutorPool(2, 10000, 100000, 32 * 1024);
final int numServers = 4;
servers = new VoldemortServer[numServers];
int[][] currentPartitionMap = { { 0, 4 }, { 2, 6 }, { 1, 5 }, { 3, 7 } };
cluster = ServerTestUtils.startVoldemortCluster(numServers, servers, currentPartitionMap, socketStoreFactory, true, null, "test/common/voldemort/config/single-store-322.xml", new Properties());
StringReader reader = new StringReader(VoldemortTestConstants.getSingleStore322Xml());
StoreDefinition storeDef = new StoreDefinitionsMapper().readStoreList(reader).get(0);
currentRoutingPlan = new BaseStoreRoutingPlan(cluster, storeDef);
String bootStrapUrl = "";
for (VoldemortServer server : servers) {
Node node = server.getIdentityNode();
socketStoreMap.put(node.getId(), ServerTestUtils.getSocketStore(socketStoreFactory, "test", node.getHost(), node.getSocketPort(), RequestFormatType.PROTOCOL_BUFFERS, false, true));
bootStrapUrl = "tcp://" + node.getHost() + ":" + node.getSocketPort();
}
testEntries = ServerTestUtils.createRandomKeyValueString(100);
int[][] oldPartitionMap = { { 3, 6 }, { 1, 4 }, { 7, 2 }, { 5, 0 } };
oldRoutingPlan = new BaseStoreRoutingPlan(ServerTestUtils.getLocalCluster(numServers, oldPartitionMap), storeDef);
SocketStoreClientFactory factory = new SocketStoreClientFactory(new ClientConfig().setBootstrapUrls(bootStrapUrl));
storeClient = factory.getStoreClient("test");
}
use of voldemort.routing.BaseStoreRoutingPlan in project voldemort by voldemort.
the class VoldemortAdminTool method executeShowRoutingPlan.
private static void executeShowRoutingPlan(AdminClient adminClient, String storeName, List<String> keyList) throws DecoderException {
Cluster cluster = adminClient.getAdminClientCluster();
List<StoreDefinition> storeDefs = adminClient.metadataMgmtOps.getRemoteStoreDefList().getValue();
StoreDefinition storeDef = StoreDefinitionUtils.getStoreDefinitionWithName(storeDefs, storeName);
StoreRoutingPlan routingPlan = new StoreRoutingPlan(cluster, storeDef);
BaseStoreRoutingPlan bRoutingPlan = new BaseStoreRoutingPlan(cluster, storeDef);
final int COLUMN_WIDTH = 30;
for (String keyStr : keyList) {
byte[] key = ByteUtils.fromHexString(keyStr);
System.out.println("Key :" + keyStr);
System.out.println("Replicating Partitions :" + routingPlan.getReplicatingPartitionList(key));
System.out.println("Replicating Nodes :");
List<Integer> nodeList = routingPlan.getReplicationNodeList(routingPlan.getMasterPartitionId(key));
for (int i = 0; i < nodeList.size(); i++) {
System.out.println(nodeList.get(i) + "\t" + cluster.getNodeById(nodeList.get(i)).getHost());
}
System.out.println("Zone Nary information :");
HashMap<Integer, Integer> zoneRepMap = storeDef.getZoneReplicationFactor();
for (Zone zone : cluster.getZones()) {
System.out.println("\tZone #" + zone.getId());
int numReplicas = -1;
if (zoneRepMap == null) {
// non zoned cluster
numReplicas = storeDef.getReplicationFactor();
} else {
// zoned cluster
if (!zoneRepMap.containsKey(zone.getId())) {
Utils.croak("Repfactor for Zone " + zone.getId() + " not found in storedef");
}
numReplicas = zoneRepMap.get(zone.getId());
}
String FormatString = "%s %s %s\n";
System.out.format(FormatString, Utils.paddedString("REPLICA#", COLUMN_WIDTH), Utils.paddedString("PARTITION", COLUMN_WIDTH), Utils.paddedString("NODE", COLUMN_WIDTH));
for (int i = 0; i < numReplicas; i++) {
Integer nodeId = bRoutingPlan.getNodeIdForZoneNary(zone.getId(), i, key);
Integer partitionId = routingPlan.getNodesPartitionIdForKey(nodeId, key);
System.out.format(FormatString, Utils.paddedString(i + "", COLUMN_WIDTH), Utils.paddedString(partitionId.toString(), COLUMN_WIDTH), Utils.paddedString(nodeId + "(" + cluster.getNodeById(nodeId).getHost() + ")", COLUMN_WIDTH));
}
System.out.println();
}
System.out.println("-----------------------------------------------");
System.out.println();
}
}
use of voldemort.routing.BaseStoreRoutingPlan in project voldemort by voldemort.
the class SlopPusherDeadSlopTest method testAutoPurge.
@Test
public void testAutoPurge() {
try {
// generate slops for a non existent node 2.
List<Versioned<Slop>> deadNodeSlops = ServerTestUtils.createRandomSlops(2, 40, false, "test");
// generate slops for a non existent store "deleted_store"
List<Versioned<Slop>> deadStoreSlops = ServerTestUtils.createRandomSlops(0, 40, false, "deleted_store");
// generate some valid slops and make sure they go into the
// destination store
List<Versioned<Slop>> validStoreSlops = ServerTestUtils.createRandomSlops(1, 40, false, "test");
List<Versioned<Slop>> slops = new ArrayList<Versioned<Slop>>();
slops.addAll(deadStoreSlops);
slops.addAll(deadNodeSlops);
slops.addAll(validStoreSlops);
SlopSerializer slopSerializer = new SlopSerializer();
// Populate the store with the slops
for (Versioned<Slop> slop : slops) {
VectorClock clock = TestUtils.getClock(1);
NodeValue<ByteArray, byte[]> nodeValue = new NodeValue<ByteArray, byte[]>(0, slop.getValue().makeKey(), new Versioned<byte[]>(slopSerializer.toBytes(slop.getValue()), clock));
adminClient.storeOps.putNodeKeyValue("slop", nodeValue);
}
// wait for twice the slop interval (in case a slop push was
// underway as we populated)
Thread.sleep(SLOP_FREQUENCY_MS * 2);
// Confirm the dead slops are all gone now..
for (List<Versioned<Slop>> deadSlops : Arrays.asList(deadStoreSlops, deadNodeSlops)) {
for (Versioned<Slop> slop : deadSlops) {
List<Versioned<byte[]>> slopEntry = adminClient.storeOps.getNodeKey("slop", 0, slop.getValue().makeKey());
assertEquals("Slop should be purged", 0, slopEntry.size());
}
}
StoreDefinitionsMapper mapper = new StoreDefinitionsMapper();
List<StoreDefinition> storeDefs = mapper.readStoreList(new StringReader(VoldemortTestConstants.getSingleStoreDefinitionsXml()));
BaseStoreRoutingPlan rPlan = new BaseStoreRoutingPlan(adminClient.getAdminClientCluster(), StoreDefinitionUtils.getStoreDefinitionWithName(storeDefs, "test"));
// Confirm the valid ones made it
for (Versioned<Slop> slop : validStoreSlops) {
ByteArray key = slop.getValue().getKey();
if (rPlan.getReplicationNodeList(key.get()).contains(1)) {
List<Versioned<byte[]>> slopEntry = adminClient.storeOps.getNodeKey("test", 1, key);
if (slop.getValue().getOperation() == Operation.DELETE) {
assertTrue("Delete Slop should have not reached destination", slopEntry.size() == 0);
} else {
assertTrue("Put Slop should have reached destination", slopEntry.size() > 0);
}
}
}
} catch (Exception e) {
logger.error("Test failed with", e);
fail("unexpected exception");
}
}
use of voldemort.routing.BaseStoreRoutingPlan in project voldemort by voldemort.
the class RoutedStoreTest method testReadRepairWithFailures.
/**
* See Issue #89: Sequential retrieval in RoutedStore.get doesn't consider
* repairReads.
*/
@Test
public void testReadRepairWithFailures() throws Exception {
cluster = getNineNodeCluster();
RoutedStore routedStore = getStore(cluster, 2, 2, 1, 0);
BaseStoreRoutingPlan routingPlan = new BaseStoreRoutingPlan(cluster, this.storeDef);
List<Integer> replicatingNodes = routingPlan.getReplicationNodeList(aKey.get());
// This is node 1
Node primaryNode = Iterables.get(cluster.getNodes(), replicatingNodes.get(0));
// This is node 6
Node secondaryNode = Iterables.get(cluster.getNodes(), replicatingNodes.get(1));
// Disable primary node so that the first put happens with 6 as the
// pseudo master
recordException(failureDetector, primaryNode);
Store<ByteArray, byte[], byte[]> store = new InconsistencyResolvingStore<ByteArray, byte[], byte[]>(routedStore, new VectorClockInconsistencyResolver<byte[]>());
store.put(aKey, new Versioned<byte[]>(aValue), null);
byte[] anotherValue = "john".getBytes();
/*
* Disable the secondary node and enable primary node to prevent the
* secondary from getting the new version
*/
recordException(failureDetector, secondaryNode);
recordSuccess(failureDetector, primaryNode);
// Generate the clock based off secondary so that the resulting clock
// will be [1:1, 6:1] across the replicas, except for the secondary
// which will be [6:1]
VectorClock clock = getClock(6);
store.put(aKey, new Versioned<byte[]>(anotherValue, clock), null);
// Enable secondary and disable primary, the following get should cause
// a read repair on the secondary in the code path that is only executed
// if there are failures. This should repair the secondary with the
// superceding clock [1:1,6:1]
recordException(failureDetector, primaryNode);
recordSuccess(failureDetector, secondaryNode);
List<Versioned<byte[]>> versioneds = store.get(aKey, null);
assertEquals(1, versioneds.size());
assertEquals(new ByteArray(anotherValue), new ByteArray(versioneds.get(0).getValue()));
// Read repairs are done asynchronously, so we sleep for a short period.
// It may be a good idea to use a synchronous executor service.
Thread.sleep(500);
for (Map.Entry<Integer, Store<ByteArray, byte[], byte[]>> innerStoreEntry : routedStore.getInnerStores().entrySet()) {
// Only look at the nodes in the pref list
if (replicatingNodes.contains(innerStoreEntry.getKey())) {
List<Versioned<byte[]>> innerVersioneds = innerStoreEntry.getValue().get(aKey, null);
assertEquals(1, versioneds.size());
assertEquals(new ByteArray(anotherValue), new ByteArray(innerVersioneds.get(0).getValue()));
}
}
}
use of voldemort.routing.BaseStoreRoutingPlan in project voldemort by voldemort.
the class RedirectingStore method getProxyNode.
/**
* Wrapper around
* {@link RedirectingStore#getProxyNode(BaseStoreRoutingPlan, StoreDefinition, byte[])}
*
* @param key
* @return
*/
private Integer getProxyNode(byte[] key) {
Cluster currentCluster = metadata.getCluster();
StoreDefinition storeDef = metadata.getStoreDef(getName());
// TODO Ideally, this object construction should be done only when
// metadata changes using a listener mechanism
BaseStoreRoutingPlan currentRoutingPlan = new BaseStoreRoutingPlan(currentCluster, storeDef);
return getProxyNode(currentRoutingPlan, storeDef, key);
}
Aggregations