use of voldemort.xml.StoreDefinitionsMapper in project voldemort by voldemort.
the class SlopPusherDeadSlopTest method testAutoPurge.
@Test
public void testAutoPurge() {
try {
// generate slops for a non existent node 2.
List<Versioned<Slop>> deadNodeSlops = ServerTestUtils.createRandomSlops(2, 40, false, "test");
// generate slops for a non existent store "deleted_store"
List<Versioned<Slop>> deadStoreSlops = ServerTestUtils.createRandomSlops(0, 40, false, "deleted_store");
// generate some valid slops and make sure they go into the
// destination store
List<Versioned<Slop>> validStoreSlops = ServerTestUtils.createRandomSlops(1, 40, false, "test");
List<Versioned<Slop>> slops = new ArrayList<Versioned<Slop>>();
slops.addAll(deadStoreSlops);
slops.addAll(deadNodeSlops);
slops.addAll(validStoreSlops);
SlopSerializer slopSerializer = new SlopSerializer();
// Populate the store with the slops
for (Versioned<Slop> slop : slops) {
VectorClock clock = TestUtils.getClock(1);
NodeValue<ByteArray, byte[]> nodeValue = new NodeValue<ByteArray, byte[]>(0, slop.getValue().makeKey(), new Versioned<byte[]>(slopSerializer.toBytes(slop.getValue()), clock));
adminClient.storeOps.putNodeKeyValue("slop", nodeValue);
}
// wait for twice the slop interval (in case a slop push was
// underway as we populated)
Thread.sleep(SLOP_FREQUENCY_MS * 2);
// Confirm the dead slops are all gone now..
for (List<Versioned<Slop>> deadSlops : Arrays.asList(deadStoreSlops, deadNodeSlops)) {
for (Versioned<Slop> slop : deadSlops) {
List<Versioned<byte[]>> slopEntry = adminClient.storeOps.getNodeKey("slop", 0, slop.getValue().makeKey());
assertEquals("Slop should be purged", 0, slopEntry.size());
}
}
StoreDefinitionsMapper mapper = new StoreDefinitionsMapper();
List<StoreDefinition> storeDefs = mapper.readStoreList(new StringReader(VoldemortTestConstants.getSingleStoreDefinitionsXml()));
BaseStoreRoutingPlan rPlan = new BaseStoreRoutingPlan(adminClient.getAdminClientCluster(), StoreDefinitionUtils.getStoreDefinitionWithName(storeDefs, "test"));
// Confirm the valid ones made it
for (Versioned<Slop> slop : validStoreSlops) {
ByteArray key = slop.getValue().getKey();
if (rPlan.getReplicationNodeList(key.get()).contains(1)) {
List<Versioned<byte[]>> slopEntry = adminClient.storeOps.getNodeKey("test", 1, key);
if (slop.getValue().getOperation() == Operation.DELETE) {
assertTrue("Delete Slop should have not reached destination", slopEntry.size() == 0);
} else {
assertTrue("Put Slop should have reached destination", slopEntry.size() > 0);
}
}
}
} catch (Exception e) {
logger.error("Test failed with", e);
fail("unexpected exception");
}
}
use of voldemort.xml.StoreDefinitionsMapper in project voldemort by voldemort.
the class RedirectingStoreTest method setUp.
@Before
public void setUp() throws IOException, InterruptedException {
currentCluster = ServerTestUtils.getLocalCluster(3, new int[][] { { 0, 1 }, { 2, 3 }, {} });
targetCluster = UpdateClusterUtils.createUpdatedCluster(currentCluster, 2, Arrays.asList(0));
this.primaryPartitionsMoved = Lists.newArrayList(0);
this.secondaryPartitionsMoved = Lists.newArrayList(2, 3);
this.storeDef = new StoreDefinitionBuilder().setName("test").setType(BdbStorageConfiguration.TYPE_NAME).setKeySerializer(new SerializerDefinition("string")).setValueSerializer(new SerializerDefinition("string")).setRoutingPolicy(RoutingTier.CLIENT).setRoutingStrategyType(RoutingStrategyType.CONSISTENT_STRATEGY).setReplicationFactor(2).setPreferredReads(1).setRequiredReads(1).setPreferredWrites(1).setRequiredWrites(1).build();
File tempStoreXml = new File(TestUtils.createTempDir(), "stores.xml");
FileUtils.writeStringToFile(tempStoreXml, new StoreDefinitionsMapper().writeStoreList(Lists.newArrayList(storeDef)));
this.servers = new VoldemortServer[3];
for (int nodeId = 0; nodeId < 3; nodeId++) {
this.servers[nodeId] = startServer(nodeId, tempStoreXml.getAbsolutePath(), currentCluster);
}
// Start another node for only this unit test
HashMap<ByteArray, byte[]> entrySet = ServerTestUtils.createRandomKeyValuePairs(100);
SocketStoreClientFactory factory = new SocketStoreClientFactory(new ClientConfig().setBootstrapUrls(Lists.newArrayList("tcp://" + currentCluster.getNodeById(0).getHost() + ":" + currentCluster.getNodeById(0).getSocketPort())));
StoreClient<Object, Object> storeClient = factory.getStoreClient("test");
this.primaryEntriesMoved = Maps.newHashMap();
this.secondaryEntriesMoved = Maps.newHashMap();
this.proxyPutTestPrimaryEntries = Maps.newHashMap();
this.proxyPutTestSecondaryEntries = Maps.newHashMap();
RoutingStrategy strategy = new RoutingStrategyFactory().updateRoutingStrategy(storeDef, currentCluster);
for (Entry<ByteArray, byte[]> entry : entrySet.entrySet()) {
storeClient.put(new String(entry.getKey().get()), new String(entry.getValue()));
List<Integer> pList = strategy.getPartitionList(entry.getKey().get());
if (primaryPartitionsMoved.contains(pList.get(0))) {
primaryEntriesMoved.put(entry.getKey(), entry.getValue());
} else if (secondaryPartitionsMoved.contains(pList.get(0))) {
secondaryEntriesMoved.put(entry.getKey(), entry.getValue());
}
}
// Sleep a while for the queries to go through...
// Hope the 'God of perfect timing' is on our side
Thread.sleep(500);
// steal a few primary key-value pairs for testing proxy put logic
int cnt = 0;
for (Entry<ByteArray, byte[]> entry : primaryEntriesMoved.entrySet()) {
if (cnt > 3)
break;
this.proxyPutTestPrimaryEntries.put(entry.getKey(), entry.getValue());
cnt++;
}
for (ByteArray key : this.proxyPutTestPrimaryEntries.keySet()) {
this.primaryEntriesMoved.remove(key);
}
assertTrue("Not enough primary entries", primaryEntriesMoved.size() > 1);
// steal a few secondary key-value pairs for testing proxy put logic
cnt = 0;
for (Entry<ByteArray, byte[]> entry : secondaryEntriesMoved.entrySet()) {
if (cnt > 3)
break;
this.proxyPutTestSecondaryEntries.put(entry.getKey(), entry.getValue());
cnt++;
}
for (ByteArray key : this.proxyPutTestSecondaryEntries.keySet()) {
this.secondaryEntriesMoved.remove(key);
}
assertTrue("Not enough secondary entries", primaryEntriesMoved.size() > 1);
RebalanceBatchPlan RebalanceBatchPlan = new RebalanceBatchPlan(currentCluster, targetCluster, Lists.newArrayList(storeDef));
List<RebalanceTaskInfo> plans = Lists.newArrayList(RebalanceBatchPlan.getBatchPlan());
// Set into rebalancing state
for (RebalanceTaskInfo partitionPlan : plans) {
servers[partitionPlan.getStealerId()].getMetadataStore().put(MetadataStore.SERVER_STATE_KEY, MetadataStore.VoldemortState.REBALANCING_MASTER_SERVER);
servers[partitionPlan.getStealerId()].getMetadataStore().put(MetadataStore.REBALANCING_STEAL_INFO, new RebalancerState(Lists.newArrayList(partitionPlan)));
servers[partitionPlan.getStealerId()].getMetadataStore().put(MetadataStore.REBALANCING_SOURCE_CLUSTER_XML, currentCluster);
// update original storedefs
servers[partitionPlan.getStealerId()].getMetadataStore().put(MetadataStore.REBALANCING_SOURCE_STORES_XML, Lists.newArrayList(storeDef));
}
// Update the cluster metadata on all three nodes
for (VoldemortServer server : servers) {
server.getMetadataStore().put(MetadataStore.CLUSTER_KEY, targetCluster);
}
}
use of voldemort.xml.StoreDefinitionsMapper in project voldemort by voldemort.
the class AbstractActionTest method setUp.
@Before
public void setUp() throws Exception {
cluster = VoldemortTestConstants.getThreeNodeCluster();
failureDetector = new BannagePeriodFailureDetector(new FailureDetectorConfig().setCluster(cluster));
clusterWithZones = VoldemortTestConstants.getFourNodeClusterWithZones();
failureDetectorWithZones = new BannagePeriodFailureDetector(new FailureDetectorConfig().setCluster(clusterWithZones));
storeDef = new StoreDefinitionsMapper().readStoreList(new StringReader(VoldemortTestConstants.getSingleStoreWithZonesXml())).get(0);
}
use of voldemort.xml.StoreDefinitionsMapper in project voldemort by voldemort.
the class AvroAddStoreTest method testUpdateAvroSchema.
@Test
public void testUpdateAvroSchema() throws Exception {
for (VoldemortServer vs : vservers.values()) {
assertNull(vs.getStoreRepository().getLocalStore("test"));
}
logger.info("Now inserting stores with backward compatible schema. Should not see exception");
adminClient.storeMgmtOps.addStore(new StoreDefinitionsMapper().readStore(new StringReader(storeXmlWithBackwardCompatibleSchema)));
try {
logger.info("Now updating store with non backward compatible schema. Should see exception");
List<StoreDefinition> stores = new ArrayList<StoreDefinition>();
stores.add(new StoreDefinitionsMapper().readStore(new StringReader(storeXmlWithBackwardIncompatibleSchema)));
adminClient.metadataMgmtOps.updateRemoteStoreDefList(stores);
Assert.fail("Did not throw exception");
} catch (VoldemortException e) {
}
for (VoldemortServer vs : vservers.values()) {
assertNotNull(vs.getStoreRepository().getLocalStore("test"));
}
}
use of voldemort.xml.StoreDefinitionsMapper in project voldemort by voldemort.
the class MetaOperationsTest method testMetaGet.
@Test
public void testMetaGet() throws Exception {
Integer nodeId = 0;
String tempDir = System.getProperty("java.io.tmpdir") + "temp" + System.currentTimeMillis();
// get metadata and write to files
AdminCommand.executeCommand(new String[] { "meta", "get", MetadataStore.CLUSTER_KEY + "," + MetadataStore.STORES_KEY, "-u", bsURL, "-n", nodeId.toString(), "-d", tempDir });
// load metadata files
String clusterFile = tempDir + "/" + MetadataStore.CLUSTER_KEY + "_" + nodeId;
String storesFile = tempDir + "/" + MetadataStore.STORES_KEY + "_" + nodeId;
assertTrue(Utils.isReadableFile(clusterFile));
assertTrue(Utils.isReadableFile(storesFile));
ClusterMapper clusterMapper = new ClusterMapper();
StoreDefinitionsMapper storeDefsMapper = new StoreDefinitionsMapper();
Cluster newCluster = clusterMapper.readCluster(new File(clusterFile));
List<StoreDefinition> newStores = storeDefsMapper.readStoreList(new File(storesFile));
// compare metadata objects
assertTrue(newCluster.equals(cluster));
assertTrue(newStores.size() == stores.size());
for (StoreDefinition store : stores) {
assertTrue(newStores.contains(store));
}
}
Aggregations