use of voldemort.server.VoldemortConfig in project voldemort by voldemort.
the class VoldemortUtilsTest method testModifyURLByConfig.
public void testModifyURLByConfig() {
String url = "swebhdfs://localhost:50470/" + PATH;
Properties properties = new Properties();
properties.setProperty(VoldemortConfig.NODE_ID, "1");
properties.setProperty(VoldemortConfig.VOLDEMORT_HOME, "/test");
VoldemortConfig config = new VoldemortConfig(properties);
// Default configuration. Should not modify url.
String newUrl = VoldemortUtils.modifyURL(url, config);
assertEquals(url, newUrl);
// Enable modify feature. URL should be replace to webhdfs with 50070 port number.
properties.setProperty(VoldemortConfig.READONLY_MODIFY_PROTOCOL, "webhdfs");
properties.setProperty(VoldemortConfig.READONLY_MODIFY_PORT, "50070");
config = new VoldemortConfig(properties);
newUrl = VoldemortUtils.modifyURL(url, config);
assertEquals("webhdfs://localhost:50070/" + PATH, newUrl);
// No modified protocol assigned. Should not modify URL.
properties.remove(VoldemortConfig.READONLY_MODIFY_PORT);
config = new VoldemortConfig(properties);
newUrl = VoldemortUtils.modifyURL(url, config);
assertEquals(url, newUrl);
// No Modified port assigned. Should not modify URL.
properties.remove(VoldemortConfig.READONLY_MODIFY_PORT);
properties.setProperty(VoldemortConfig.READONLY_MODIFY_PROTOCOL, "testprotocol");
config = new VoldemortConfig(properties);
newUrl = VoldemortUtils.modifyURL(url, config);
assertEquals(url, newUrl);
// Omit port set to true should remove the port from the URI
String expectedUrl = "testprotocol://localhost/" + PATH;
properties.setProperty(VoldemortConfig.READONLY_MODIFY_PROTOCOL, "testprotocol");
properties.setProperty(VoldemortConfig.READONLY_OMIT_PORT, "true");
config = new VoldemortConfig(properties);
newUrl = VoldemortUtils.modifyURL(url, config);
assertEquals(expectedUrl, newUrl);
// Local path. Should throw IAE because it's not a valid URL.
url = "/testpath/file";
properties.setProperty(VoldemortConfig.READONLY_MODIFY_PROTOCOL, "webhdfs");
properties.setProperty(VoldemortConfig.READONLY_MODIFY_PORT, "50070");
config = new VoldemortConfig(properties);
try {
VoldemortUtils.modifyURL(url, config);
} catch (IllegalArgumentException iae) {
return;
} catch (Exception e) {
fail("Should met IAE. URL is not valid.");
}
fail("Should met IAE. URL is not valid.");
}
use of voldemort.server.VoldemortConfig in project voldemort by voldemort.
the class RepairJobTest method setUp.
public void setUp() {
File temp = TestUtils.createTempDir();
VoldemortConfig config = new VoldemortConfig(0, temp.getAbsolutePath());
new File(config.getMetadataDirectory()).mkdir();
this.serverMap = new HashMap<Integer, VoldemortServer>();
this.scheduler = new SchedulerService(1, new MockTime());
this.cluster = VoldemortTestConstants.getNineNodeCluster();
StoreDefinitionsMapper mapper = new StoreDefinitionsMapper();
this.storeDefs = mapper.readStoreList(new StringReader((VoldemortTestConstants.getSingleStore322Xml())));
this.storeRepository = new StoreRepository();
this.metadataStore = ServerTestUtils.createMetadataStore(cluster, storeDefs);
storage = new StorageService(storeRepository, metadataStore, scheduler, config);
// Start the storage service
storage.start();
this.socketStoreFactory = new ClientRequestExecutorPool(2, 10000, 100000, 32 * 1024);
String storeDefsString = mapper.writeStoreList(storeDefs);
File file = null;
try {
file = ServerTestUtils.createTempFile("single-store-", ".xml");
FileUtils.writeStringToFile(file, storeDefsString);
String storeDefFile = file.getAbsolutePath();
List<Integer> nodesToStart = Arrays.asList(0, 1, 2, 3, 4, 5, 6, 7, 8);
// Start the servers
startServers(cluster, storeDefFile, nodesToStart, null);
} catch (Exception e) {
e.printStackTrace();
}
}
use of voldemort.server.VoldemortConfig in project voldemort by voldemort.
the class RepairJobTest method startServers.
private Cluster startServers(Cluster cluster, String storeXmlFile, List<Integer> nodeToStart, Map<String, String> configProps) throws Exception {
for (int node : nodeToStart) {
Properties properties = new Properties();
if (null != configProps) {
for (Entry<String, String> property : configProps.entrySet()) {
properties.put(property.getKey(), property.getValue());
}
}
VoldemortConfig config = ServerTestUtils.createServerConfig(true, node, TestUtils.createTempDir().getAbsolutePath(), null, storeXmlFile, properties);
VoldemortServer server = ServerTestUtils.startVoldemortServer(socketStoreFactory, config, cluster);
serverMap.put(node, server);
}
return cluster;
}
use of voldemort.server.VoldemortConfig in project voldemort by voldemort.
the class BdbCachePartitioningTest method testStaticPrivateCaches.
/**
* Tests that, given no data completely fits in memory (realistic prod
* conditions), stores will stay within their limits, no matter how much
* disproportionate traffic you throw at it
*/
@Test
public void testStaticPrivateCaches() {
// total cache size
int totalCache = 20 * ByteUtils.BYTES_PER_MB;
// A reserves 10MB
int shareA = 10 * ByteUtils.BYTES_PER_MB;
// B reserves 5MB
int shareB = 5 * ByteUtils.BYTES_PER_MB;
// the rest, 5 MB
int shareC = totalCache - shareA - shareB;
int numRecords = 40;
BdbStorageEngine storeA = null, storeB = null, storeC = null;
try {
// lets use all the default values.
Props props = new Props();
props.put("node.id", 1);
props.put("voldemort.home", "test/common/voldemort/config");
VoldemortConfig voldemortConfig = new VoldemortConfig(props);
voldemortConfig.setBdbCacheSize(totalCache);
voldemortConfig.setBdbOneEnvPerStore(true);
voldemortConfig.setBdbDataDirectory(bdbMasterDir.toURI().getPath());
voldemortConfig.setBdbPrefixKeysWithPartitionId(prefixPartitionId);
bdbStorage = new BdbStorageConfiguration(voldemortConfig);
StoreDefinition defA = TestUtils.makeStoreDefinition("storeA", shareA / (ByteUtils.BYTES_PER_MB));
storeA = (BdbStorageEngine) bdbStorage.getStore(defA, TestUtils.makeSingleNodeRoutingStrategy());
StoreDefinition defB = TestUtils.makeStoreDefinition("storeB", shareB / (ByteUtils.BYTES_PER_MB));
storeB = (BdbStorageEngine) bdbStorage.getStore(defB, TestUtils.makeSingleNodeRoutingStrategy());
StoreDefinition defC = TestUtils.makeStoreDefinition("storeC");
storeC = (BdbStorageEngine) bdbStorage.getStore(defC, TestUtils.makeSingleNodeRoutingStrategy());
// before any traffic, the cache will not have grown
assertTrue("Store A grew without traffic", Math.abs(shareA - getCacheSize(defA)) > ByteUtils.BYTES_PER_MB);
assertTrue("Store B grew without traffic", Math.abs(shareB - getCacheSize(defB)) > ByteUtils.BYTES_PER_MB);
// sharedCacheSize reading 0 confirms that the store has a private
// cache
assertEquals("Store A has non zero shared cache", 0, getStats(bdbStorage.getEnvironment(defA)).getSharedCacheTotalBytes());
assertEquals("Store B has non zero shared cache", 0, getStats(bdbStorage.getEnvironment(defB)).getSharedCacheTotalBytes());
// load data into the stores; each store is guaranteed to be ~ 40MB.
// Data won't fit in memory
byte[] value = new byte[ByteUtils.BYTES_PER_MB];
for (int i = 0; i < numRecords; i++) {
storeA.put(TestUtils.toByteArray("testKey" + i), new Versioned<byte[]>(value), null);
storeB.put(TestUtils.toByteArray("testKey" + i), new Versioned<byte[]>(value), null);
storeC.put(TestUtils.toByteArray("testKey" + i), new Versioned<byte[]>(value), null);
}
// we will bring all of that data into the cache, by doing a
// keywalk.
// This should expand the cache as much as possible
long cacheSizeA = Long.MIN_VALUE;
long cacheSizeB = Long.MIN_VALUE;
long cacheSizeC = Long.MIN_VALUE;
for (int cycle = 0; cycle < 10; cycle++) {
for (int i = 0; i < numRecords; i++) {
long cycleCacheSizeA = getAndCheckCacheSize(storeA, defA, "testKey" + i);
long cycleCacheSizeB = getAndCheckCacheSize(storeB, defB, "testKey" + i);
long cycleCacheSizeC = getAndCheckCacheSize(storeC, defC, "testKey" + i);
// record the maximum cache size, each store every grew to
cacheSizeA = (cycleCacheSizeA > cacheSizeA) ? cycleCacheSizeA : cacheSizeA;
cacheSizeB = (cycleCacheSizeB > cacheSizeB) ? cycleCacheSizeB : cacheSizeB;
cacheSizeC = (cycleCacheSizeC > cacheSizeC) ? cycleCacheSizeC : cacheSizeC;
}
}
// check that they are certainly less than expected limits.Small
// overflows are okay. But should not be more than a 1MB
assertTrue("Store A not within limits", cacheSizeA <= (shareA + ByteUtils.BYTES_PER_MB));
assertTrue("Store B not within limits", cacheSizeB <= (shareB + ByteUtils.BYTES_PER_MB));
assertTrue("Store C not within limits", cacheSizeC <= (shareC + ByteUtils.BYTES_PER_MB));
// try doing reads on store C alone, for which we have no
// reservations.
// This simulates a spike on one store
long cacheSizeCNow = Long.MIN_VALUE;
for (int cycle = 0; cycle < 10; cycle++) {
for (int i = 0; i < numRecords; i++) {
long cycleCacheSizeCNow = getAndCheckCacheSize(storeC, defC, "testkey" + i);
// record the maximum cache size, each store grew to
cacheSizeCNow = (cycleCacheSizeCNow > cacheSizeCNow) ? cycleCacheSizeCNow : cacheSizeCNow;
}
}
assertTrue("Store C not within limits after spike", cacheSizeCNow <= (shareC + ByteUtils.BYTES_PER_MB));
} finally {
if (storeA != null)
storeA.close();
if (storeB != null)
storeB.close();
if (storeC != null)
storeC.close();
bdbStorage.close();
}
}
use of voldemort.server.VoldemortConfig in project voldemort by voldemort.
the class BdbCachePartitioningTest method testDynamicReservations.
@Test
public void testDynamicReservations() {
// total cache size
int totalCache = 20 * ByteUtils.BYTES_PER_MB;
// A reserves 10MB
int shareA = 10 * ByteUtils.BYTES_PER_MB;
int shareB = totalCache - shareA;
int numRecords = 40;
// lets use all the default values.
Props props = new Props();
props.put("node.id", 1);
props.put("voldemort.home", "test/common/voldemort/config");
VoldemortConfig voldemortConfig = new VoldemortConfig(props);
voldemortConfig.setBdbCacheSize(totalCache);
voldemortConfig.setBdbOneEnvPerStore(true);
voldemortConfig.setBdbDataDirectory(bdbMasterDir.toURI().getPath());
voldemortConfig.setBdbMinimumSharedCache(5 * ByteUtils.BYTES_PER_MB);
voldemortConfig.setBdbPrefixKeysWithPartitionId(prefixPartitionId);
bdbStorage = new BdbStorageConfiguration(voldemortConfig);
StoreDefinition defA = TestUtils.makeStoreDefinition("storeA", shareA / (1024 * 1024));
BdbStorageEngine storeA = (BdbStorageEngine) bdbStorage.getStore(defA, TestUtils.makeSingleNodeRoutingStrategy());
StoreDefinition defB = TestUtils.makeStoreDefinition("storeB");
BdbStorageEngine storeB = (BdbStorageEngine) bdbStorage.getStore(defB, TestUtils.makeSingleNodeRoutingStrategy());
// load data into the stores; each store is guaranteed to be ~ 40MB.
// Data won't fit in memory
byte[] value = new byte[ByteUtils.BYTES_PER_MB];
for (int i = 0; i < numRecords; i++) {
storeA.put(TestUtils.toByteArray("testKey" + i), new Versioned<byte[]>(value), null);
storeB.put(TestUtils.toByteArray("testKey" + i), new Versioned<byte[]>(value), null);
}
// 1. start with 10MB reserved cache for A and the rest 10MB for B
long cacheSizeA = Long.MIN_VALUE;
long cacheSizeB = Long.MIN_VALUE;
for (int cycle = 0; cycle < 10; cycle++) {
for (int i = 0; i < numRecords; i++) {
long cycleCacheSizeA = getAndCheckCacheSize(storeA, defA, "testKey" + i);
long cycleCacheSizeB = getAndCheckCacheSize(storeB, defB, "testKey" + i);
// record the maximum cache size, each store every grew to
cacheSizeA = (cycleCacheSizeA > cacheSizeA) ? cycleCacheSizeA : cacheSizeA;
cacheSizeB = (cycleCacheSizeB > cacheSizeB) ? cycleCacheSizeB : cacheSizeB;
}
}
assertTrue("Store A not within limits ", cacheSizeA <= (shareA + ByteUtils.BYTES_PER_MB));
assertTrue("Store B not within limits", cacheSizeB <= (shareB + ByteUtils.BYTES_PER_MB));
// 2. dynamically grow the cache to 15MB and watch B shrink.
shareA = 15 * ByteUtils.BYTES_PER_MB;
shareB = totalCache - shareA;
defA = TestUtils.makeStoreDefinition("storeA", shareA / (1024 * 1024));
bdbStorage.update(defA);
cacheSizeA = Long.MIN_VALUE;
cacheSizeB = Long.MIN_VALUE;
for (int cycle = 0; cycle < 10; cycle++) {
for (int i = 0; i < numRecords; i++) {
long cycleCacheSizeA = getAndCheckCacheSize(storeA, defA, "testKey" + i);
long cycleCacheSizeB = getAndCheckCacheSize(storeB, defB, "testKey" + i);
// record the maximum cache size, each store every grew to
cacheSizeA = (cycleCacheSizeA > cacheSizeA) ? cycleCacheSizeA : cacheSizeA;
cacheSizeB = (cycleCacheSizeB > cacheSizeB) ? cycleCacheSizeB : cacheSizeB;
}
}
assertTrue("Store A not within limits ", cacheSizeA <= (shareA + ByteUtils.BYTES_PER_MB));
assertTrue("Store B not within limits ", cacheSizeB <= (shareB + ByteUtils.BYTES_PER_MB));
// 3. dynamically shrink it back to 10MB and watch B expand again.
shareA = 10 * ByteUtils.BYTES_PER_MB;
shareB = totalCache - shareA;
defA = TestUtils.makeStoreDefinition("storeA", shareA / (1024 * 1024));
bdbStorage.update(defA);
cacheSizeA = Long.MIN_VALUE;
cacheSizeB = Long.MIN_VALUE;
for (int cycle = 0; cycle < 10; cycle++) {
for (int i = 0; i < numRecords; i++) {
long cycleCacheSizeA = getAndCheckCacheSize(storeA, defA, "testKey" + i);
long cycleCacheSizeB = getAndCheckCacheSize(storeB, defB, "testKey" + i);
// record the maximum cache size, each store every grew to
cacheSizeA = (cycleCacheSizeA > cacheSizeA) ? cycleCacheSizeA : cacheSizeA;
cacheSizeB = (cycleCacheSizeB > cacheSizeB) ? cycleCacheSizeB : cacheSizeB;
}
}
// check that they are not exceedingly high than their limits. Small
// overflows are expected. But should not be more than a 1MB
assertTrue("Store A not within limits ", cacheSizeA <= (shareA + ByteUtils.BYTES_PER_MB));
assertTrue("Store B not within limits ", cacheSizeB <= (shareB + ByteUtils.BYTES_PER_MB));
storeA.close();
storeB.close();
bdbStorage.close();
}
Aggregations