use of voldemort.store.StoreDefinition in project voldemort by voldemort.
the class AvroAddStoreTest method testUpdateAvroSchema.
@Test
public void testUpdateAvroSchema() throws Exception {
for (VoldemortServer vs : vservers.values()) {
assertNull(vs.getStoreRepository().getLocalStore("test"));
}
logger.info("Now inserting stores with backward compatible schema. Should not see exception");
adminClient.storeMgmtOps.addStore(new StoreDefinitionsMapper().readStore(new StringReader(storeXmlWithBackwardCompatibleSchema)));
try {
logger.info("Now updating store with non backward compatible schema. Should see exception");
List<StoreDefinition> stores = new ArrayList<StoreDefinition>();
stores.add(new StoreDefinitionsMapper().readStore(new StringReader(storeXmlWithBackwardIncompatibleSchema)));
adminClient.metadataMgmtOps.updateRemoteStoreDefList(stores);
Assert.fail("Did not throw exception");
} catch (VoldemortException e) {
}
for (VoldemortServer vs : vservers.values()) {
assertNotNull(vs.getStoreRepository().getLocalStore("test"));
}
}
use of voldemort.store.StoreDefinition in project voldemort by voldemort.
the class StoreOperationsTest method testStoreAddAndDelete.
@Test
public void testStoreAddAndDelete() throws Exception {
// create new stores_key object
final String newStoreXMLFilePrefix = "updated.stores";
final String newStoreXMLFileSuffix = "xml";
List<StoreDefinition> newStores = new ArrayList<StoreDefinition>();
List<String> newStoreNames = Lists.newArrayList();
for (StoreDefinition storeDef : stores) {
StoreDefinitionBuilder sb = AdminToolTestUtils.storeDefToBuilder(storeDef);
sb.setName(sb.getName() + "_new");
newStores.add(sb.build());
newStoreNames.add(sb.getName());
}
// create stores.xml
File newStoresXMLFolder = TestUtils.createTempDir();
File newStoreXMLFile = File.createTempFile(newStoreXMLFilePrefix, newStoreXMLFileSuffix, newStoresXMLFolder);
FileWriter fwriter = new FileWriter(newStoreXMLFile);
fwriter.write(new StoreDefinitionsMapper().writeStoreList(newStores));
fwriter.close();
// execute store-add command
AdminCommand.executeCommand(new String[] { "store", "add", "-f", newStoreXMLFile.getAbsolutePath(), "-u", bsURL });
// check if stores have been added
Integer nodeId = adminClient.getAdminClientCluster().getNodes().iterator().next().getId();
List<StoreDefinition> newStoresToVerify = adminClient.metadataMgmtOps.getRemoteStoreDefList(nodeId).getValue();
for (StoreDefinition newStore : newStores) {
assertTrue(newStoresToVerify.contains(newStore));
}
// execute store-delete command
AdminCommand.executeCommand(new String[] { "store", "delete", "-s", Joiner.on(",").join(newStoreNames), "-u", bsURL, "--confirm" });
// check if stores have been deleted
newStoresToVerify = adminClient.metadataMgmtOps.getRemoteStoreDefList(nodeId).getValue();
for (StoreDefinition newStore : newStores) {
assertTrue(!newStoresToVerify.contains(newStore));
}
}
use of voldemort.store.StoreDefinition in project voldemort by voldemort.
the class ConsistencyCheckTest method testOnePartitionEndToEndBasedOnVersion.
@Test
public void testOnePartitionEndToEndBasedOnVersion() throws Exception {
long now = System.currentTimeMillis();
// setup four nodes with one store and one partition
final SocketStoreFactory socketStoreFactory = new ClientRequestExecutorPool(2, 10000, 100000, 32 * 1024);
VoldemortServer[] servers = new VoldemortServer[4];
int[][] partitionMap = { { 0 }, { 1 }, { 2 }, { 3 } };
Cluster cluster = ServerTestUtils.startVoldemortCluster(4, servers, partitionMap, socketStoreFactory, true, null, STORES_XML, new Properties());
Node node = cluster.getNodeById(0);
String bootstrapUrl = "tcp://" + node.getHost() + ":" + node.getSocketPort();
AdminClient adminClient = new AdminClient(bootstrapUrl);
byte[] value = { 1, 2, 3, 4, 5, 6, 7, 8, 9 };
byte[] value2 = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 };
// make versions
VectorClock vc1 = new VectorClock();
VectorClock vc2 = new VectorClock();
VectorClock vc3 = new VectorClock();
// [0:1]
vc1.incrementVersion(0, now);
// [1:1]
vc2.incrementVersion(1, now - 5000);
// [0:1], over a day old
vc3.incrementVersion(0, now - 89000000);
ArrayList<Pair<ByteArray, Versioned<byte[]>>> n0store = new ArrayList<Pair<ByteArray, Versioned<byte[]>>>();
ArrayList<Pair<ByteArray, Versioned<byte[]>>> n1store = new ArrayList<Pair<ByteArray, Versioned<byte[]>>>();
ArrayList<Pair<ByteArray, Versioned<byte[]>>> n2store = new ArrayList<Pair<ByteArray, Versioned<byte[]>>>();
ArrayList<Pair<ByteArray, Versioned<byte[]>>> n3store = new ArrayList<Pair<ByteArray, Versioned<byte[]>>>();
ArrayList<ByteArray> keysHashedToPar0 = new ArrayList<ByteArray>();
// find store
Versioned<List<StoreDefinition>> storeDefinitions = adminClient.metadataMgmtOps.getRemoteStoreDefList(0);
List<StoreDefinition> StoreDefinitions = storeDefinitions.getValue();
StoreDefinition storeDefinition = null;
for (StoreDefinition def : StoreDefinitions) {
if (def.getName().equals(STORE_NAME)) {
storeDefinition = def;
break;
}
}
assertNotNull("No such store found: " + STORE_NAME, storeDefinition);
RoutingStrategy router = new RoutingStrategyFactory().updateRoutingStrategy(storeDefinition, cluster);
while (keysHashedToPar0.size() < 7) {
// generate random key
Map<ByteArray, byte[]> map = ServerTestUtils.createRandomKeyValuePairs(1);
ByteArray key = map.keySet().iterator().next();
key.get()[0] = (byte) keysHashedToPar0.size();
Integer masterPartition = router.getMasterPartition(key.get());
if (masterPartition == 0) {
keysHashedToPar0.add(key);
} else {
continue;
}
}
ByteArray k6 = keysHashedToPar0.get(6);
ByteArray k5 = keysHashedToPar0.get(5);
ByteArray k4 = keysHashedToPar0.get(4);
ByteArray k3 = keysHashedToPar0.get(3);
ByteArray k2 = keysHashedToPar0.get(2);
ByteArray k1 = keysHashedToPar0.get(1);
ByteArray k0 = keysHashedToPar0.get(0);
// insert K6 into node 0,1,2
Versioned<byte[]> v6 = new Versioned<byte[]>(value, vc1);
n0store.add(Pair.create(k6, v6));
n1store.add(Pair.create(k6, v6));
n2store.add(Pair.create(k6, v6));
// insert K6(conflicting value and version) into node 0,1,2,3
Versioned<byte[]> v6ConflictEarly = new Versioned<byte[]>(value2, vc2);
n0store.add(Pair.create(k6, v6ConflictEarly));
n1store.add(Pair.create(k6, v6ConflictEarly));
n2store.add(Pair.create(k6, v6ConflictEarly));
n3store.add(Pair.create(k6, v6ConflictEarly));
// insert K4,K5 into four nodes
Versioned<byte[]> v5 = new Versioned<byte[]>(value, vc1);
Versioned<byte[]> v4 = new Versioned<byte[]>(value, vc1);
n0store.add(Pair.create(k5, v5));
n1store.add(Pair.create(k5, v5));
n2store.add(Pair.create(k5, v5));
n3store.add(Pair.create(k5, v5));
n0store.add(Pair.create(k4, v4));
n1store.add(Pair.create(k4, v4));
n2store.add(Pair.create(k4, v4));
n3store.add(Pair.create(k4, v4));
// insert K3 into node 0,1,2
Versioned<byte[]> v3 = new Versioned<byte[]>(value, vc2);
n0store.add(Pair.create(k3, v3));
n1store.add(Pair.create(k3, v3));
n2store.add(Pair.create(k3, v3));
// insert K3(conflicting but latest version) into node 0,1,2,3
Versioned<byte[]> v3ConflictLate = new Versioned<byte[]>(value, vc1);
n0store.add(Pair.create(k3, v3ConflictLate));
n1store.add(Pair.create(k3, v3ConflictLate));
n2store.add(Pair.create(k3, v3ConflictLate));
n3store.add(Pair.create(k3, v3ConflictLate));
// insert K2 into node 0,1
Versioned<byte[]> v2 = new Versioned<byte[]>(value, vc1);
n0store.add(Pair.create(k2, v2));
n1store.add(Pair.create(k2, v2));
// insert K1 into node 0
Versioned<byte[]> v1 = new Versioned<byte[]>(value, vc1);
n0store.add(Pair.create(k1, v1));
// insert K0(out of retention) into node 0,1,2
Versioned<byte[]> v0 = new Versioned<byte[]>(value, vc3);
n0store.add(Pair.create(k0, v0));
n1store.add(Pair.create(k0, v0));
n2store.add(Pair.create(k0, v0));
// stream to store
adminClient.streamingOps.updateEntries(0, STORE_NAME, n0store.iterator(), null);
adminClient.streamingOps.updateEntries(1, STORE_NAME, n1store.iterator(), null);
adminClient.streamingOps.updateEntries(2, STORE_NAME, n2store.iterator(), null);
adminClient.streamingOps.updateEntries(3, STORE_NAME, n3store.iterator(), null);
// should have FULL:2(K4,K5), LATEST_CONSISTENT:1(K3),
// INCONSISTENT:2(K6,K2), ignored(K1,K0)
List<String> urls = new ArrayList<String>();
urls.add(bootstrapUrl);
ConsistencyCheck.ComparisonType[] comparisonTypes = ConsistencyCheck.ComparisonType.values();
for (ConsistencyCheck.ComparisonType type : comparisonTypes) {
StringWriter sw = new StringWriter();
ConsistencyCheck checker = new ConsistencyCheck(urls, STORE_NAME, 0, sw, type);
Reporter reporter = null;
checker.connect();
reporter = checker.execute();
assertEquals(7 - 2, reporter.numTotalKeys);
assertEquals(3, reporter.numGoodKeys);
}
for (VoldemortServer vs : servers) {
vs.stop();
}
}
use of voldemort.store.StoreDefinition in project voldemort by voldemort.
the class RepartitionerTest method testShuffleWithinZoneWithNonContiguousZoneAndNodeIds.
@Test
public void testShuffleWithinZoneWithNonContiguousZoneAndNodeIds() {
// Two zone cluster
Cluster currentCluster = ClusterTestUtils.getZ1Z3ImbalancedClusterWithNonContiguousNodeIds();
List<StoreDefinition> storeDefs = ClusterTestUtils.getZ1Z3StoreDefsInMemory();
List<Integer> swapZoneIds = new ArrayList<Integer>();
// Only shuffle within zone 1
swapZoneIds.add(1);
verifyRandomSwapsWithinZoneOnlyShufflesPartitionsInThatZone(currentCluster, storeDefs, swapZoneIds);
// Three zone cluster
currentCluster = ClusterTestUtils.getZ1Z3Z5ImbalancedClusterWithNonContiguousNodeIds();
storeDefs = ClusterTestUtils.getZ1Z3Z5StoreDefsInMemory();
// Shuffle only within zone 3
swapZoneIds.clear();
swapZoneIds.add(3);
verifyRandomSwapsWithinZoneOnlyShufflesPartitionsInThatZone(currentCluster, storeDefs, swapZoneIds);
// Shuffle only within zone 1, 3
swapZoneIds.clear();
swapZoneIds.add(1);
swapZoneIds.add(3);
verifyRandomSwapsWithinZoneOnlyShufflesPartitionsInThatZone(currentCluster, storeDefs, swapZoneIds);
// Shuffle only within zone 1, 3, 5
swapZoneIds.clear();
swapZoneIds.add(1);
swapZoneIds.add(3);
swapZoneIds.add(5);
verifyRandomSwapsWithinZoneOnlyShufflesPartitionsInThatZone(currentCluster, storeDefs, swapZoneIds);
}
use of voldemort.store.StoreDefinition in project voldemort by voldemort.
the class RepartitionerTest method testShuffleWithNonContiguousZoneAndNodeIds.
@Test
public void testShuffleWithNonContiguousZoneAndNodeIds() {
// Two zone cluster
Cluster currentCluster = ClusterTestUtils.getZ1Z3ImbalancedClusterWithNonContiguousNodeIds();
List<StoreDefinition> storeDefs = ClusterTestUtils.getZ1Z3StoreDefsInMemory();
verifyBalanceZoneAndNode(currentCluster, storeDefs, currentCluster, storeDefs);
verifyBalanceNodesNotZones(currentCluster, storeDefs, currentCluster, storeDefs);
verifyRepartitionNoop(currentCluster, storeDefs, currentCluster, storeDefs);
verifyRandomSwapsImproveBalance(currentCluster, storeDefs);
verifyGreedySwapsImproveBalance(currentCluster, storeDefs);
// Three zone cluster
currentCluster = ClusterTestUtils.getZ1Z3Z5ImbalancedClusterWithNonContiguousNodeIds();
storeDefs = ClusterTestUtils.getZ1Z3Z5StoreDefsInMemory();
verifyBalanceZoneAndNode(currentCluster, storeDefs, currentCluster, storeDefs);
verifyBalanceNodesNotZones(currentCluster, storeDefs, currentCluster, storeDefs);
verifyRepartitionNoop(currentCluster, storeDefs, currentCluster, storeDefs);
verifyRandomSwapsImproveBalance(currentCluster, storeDefs);
verifyGreedySwapsImproveBalance(currentCluster, storeDefs);
}
Aggregations