use of voldemort.store.StoreDefinition in project voldemort by voldemort.
the class AdminServiceRequestHandler method handleAddStore.
public VAdminProto.AddStoreResponse handleAddStore(VAdminProto.AddStoreRequest request) {
VAdminProto.AddStoreResponse.Builder response = VAdminProto.AddStoreResponse.newBuilder();
// don't try to add a store when not in normal or offline state
if (!metadataStore.getServerStateUnlocked().equals(MetadataStore.VoldemortState.NORMAL_SERVER) && !metadataStore.getServerStateUnlocked().equals(MetadataStore.VoldemortState.OFFLINE_SERVER)) {
response.setError(ProtoUtils.encodeError(errorCodeMapper, new VoldemortException("Voldemort server is neither in normal state nor in offline state")));
return response.build();
}
AdminClient adminClient = null;
try {
// adding a store requires decoding the passed in store string
StoreDefinitionsMapper mapper = new StoreDefinitionsMapper();
StoreDefinition def = mapper.readStore(new StringReader(request.getStoreDefinition()));
adminClient = new AdminClient(metadataStore.getCluster());
synchronized (lock) {
if (!storeRepository.hasLocalStore(def.getName())) {
if (def.getReplicationFactor() > metadataStore.getCluster().getNumberOfNodes()) {
throw new StoreOperationFailureException("Cannot add a store whose replication factor ( " + def.getReplicationFactor() + " ) is greater than the number of nodes ( " + metadataStore.getCluster().getNumberOfNodes() + " )");
}
logger.info("Adding new store '" + def.getName() + "'");
// open the store
StorageEngine<ByteArray, byte[], byte[]> engine = storageService.openStore(def);
// effect of updating the stores.xml file)
try {
metadataStore.addStoreDefinition(def);
long defaultQuota = voldemortConfig.getDefaultStorageSpaceQuotaInKB();
QuotaUtils.setQuota(def.getName(), QuotaType.STORAGE_SPACE, storeRepository, metadataStore.getCluster().getNodeIds(), defaultQuota);
} catch (Exception e) {
// rollback open store operation
boolean isReadOnly = ReadOnlyStorageConfiguration.TYPE_NAME.equals(def.getType());
storageService.removeEngine(engine, isReadOnly, def.getType(), true);
throw new VoldemortException(e);
}
logger.info("Successfully added new store '" + def.getName() + "'");
} else {
logger.error("Failure to add a store with the same name '" + def.getName() + "'");
throw new StoreOperationFailureException(String.format("Store '%s' already exists on this server", def.getName()));
}
}
} catch (VoldemortException e) {
response.setError(ProtoUtils.encodeError(errorCodeMapper, e));
logger.error("handleAddStore failed for request(" + request.toString() + ")", e);
} finally {
if (adminClient != null) {
IOUtils.closeQuietly(adminClient);
}
}
return response.build();
}
use of voldemort.store.StoreDefinition in project voldemort by voldemort.
the class ReadOnlyStorePerformanceTest method main.
public static void main(String[] args) throws FileNotFoundException, IOException {
OptionParser parser = new OptionParser();
parser.accepts("help", "print usage information");
parser.accepts("threads", "number of threads").withRequiredArg().ofType(Integer.class);
parser.accepts("requests", "[REQUIRED] number of requests").withRequiredArg().ofType(Integer.class);
parser.accepts("store-dir", "[REQUIRED] store directory").withRequiredArg().describedAs("directory");
parser.accepts("cluster-xml", "Path to cluster.xml").withRequiredArg().describedAs("path");
parser.accepts("node-id", "Id of node").withRequiredArg().ofType(Integer.class).describedAs("node-id");
parser.accepts("search-strategy", "class of the search strategy to use").withRequiredArg().describedAs("class_name");
parser.accepts("build", "If present, first build the data");
parser.accepts("num-values", "The number of values in the store").withRequiredArg().describedAs("count").ofType(Integer.class);
parser.accepts("num-chunks", "The number of chunks per partition").withRequiredArg().describedAs("chunks").ofType(Integer.class);
parser.accepts("internal-sort-size", "The number of items to sort in memory at a time").withRequiredArg().describedAs("size").ofType(Integer.class);
parser.accepts("value-size", "The size of the values in the store").withRequiredArg().describedAs("size").ofType(Integer.class);
parser.accepts("working-dir", "The directory in which to store temporary data").withRequiredArg().describedAs("dir");
parser.accepts("gzip", "Compress the intermediate temp files used in building the store");
parser.accepts("request-file", "file get request ids from").withRequiredArg();
parser.accepts("version", "Version of read-only store [" + ReadOnlyStorageFormat.READONLY_V0 + "," + ReadOnlyStorageFormat.READONLY_V1 + "," + ReadOnlyStorageFormat.READONLY_V2 + " (default)]").withRequiredArg().describedAs("version");
parser.accepts("test-gz", "Path to gzip containing data. Works with --build only").withRequiredArg().describedAs("path");
OptionSet options = parser.parse(args);
if (options.has("help")) {
parser.printHelpOn(System.out);
System.exit(0);
}
CmdUtils.croakIfMissing(parser, options, "requests", "store-dir");
final int numThreads = CmdUtils.valueOf(options, "threads", 10);
final int numRequests = (Integer) options.valueOf("requests");
final int internalSortSize = CmdUtils.valueOf(options, "internal-sort-size", 500000);
int numValues = numRequests;
final String inputFile = (String) options.valueOf("request-file");
final String searcherClass = CmdUtils.valueOf(options, "search-strategy", BinarySearchStrategy.class.getName()).trim();
final boolean gzipIntermediate = options.has("gzip");
final SearchStrategy searcher = (SearchStrategy) ReflectUtils.callConstructor(ReflectUtils.loadClass(searcherClass));
final File workingDir = new File(CmdUtils.valueOf(options, "working-dir", System.getProperty("java.io.tmpdir")));
String storeDir = (String) options.valueOf("store-dir");
ReadOnlyStorageFormat format = ReadOnlyStorageFormat.fromCode(CmdUtils.valueOf(options, "version", ReadOnlyStorageFormat.READONLY_V2.toString()));
Cluster cluster = null;
int nodeId = 0;
SerializerDefinition sdef = new SerializerDefinition("json", "'string'");
StoreDefinition storeDef = new StoreDefinitionBuilder().setName("test").setKeySerializer(sdef).setValueSerializer(sdef).setRequiredReads(1).setReplicationFactor(1).setRequiredWrites(1).setType("read-only").setRoutingStrategyType(RoutingStrategyType.CONSISTENT_STRATEGY).setRoutingPolicy(RoutingTier.CLIENT).build();
if (options.has("build")) {
CmdUtils.croakIfMissing(parser, options, "num-values", "value-size");
numValues = (Integer) options.valueOf("num-values");
int numChunks = 1;
if (options.has("num-chunks"))
numChunks = (Integer) options.valueOf("num-chunks");
int valueSize = (Integer) options.valueOf("value-size");
// generate test data
File temp = null;
if (options.has("test-gz")) {
temp = new File((String) options.valueOf("test-gz"));
} else {
temp = File.createTempFile("json-data", ".txt.gz", workingDir);
temp.deleteOnExit();
System.out.println("Generating test data in " + temp);
OutputStream outputStream = new GZIPOutputStream(new FileOutputStream(temp));
Writer writer = new BufferedWriter(new OutputStreamWriter(outputStream), 10 * 1024 * 1024);
String value = TestUtils.randomLetters(valueSize);
for (int i = 0; i < numValues; i++) {
writer.write("\"");
writer.write(Integer.toString(i));
writer.write("\" \"");
writer.write(value);
writer.write("\"");
writer.write("\n");
}
writer.close();
writer = null;
}
System.out.println("Building store.");
InputStream inputStream = new GZIPInputStream(new FileInputStream(temp));
Reader r = new BufferedReader(new InputStreamReader(inputStream), 1 * 1024 * 1024);
File output = TestUtils.createTempDir(workingDir);
File tempDir = TestUtils.createTempDir(workingDir);
cluster = ServerTestUtils.getLocalCluster(1);
nodeId = 0;
JsonStoreBuilder builder = new JsonStoreBuilder(new JsonReader(r), cluster, storeDef, new ConsistentRoutingStrategy(cluster, 1), output, tempDir, internalSortSize, 2, numChunks, 64 * 1024, gzipIntermediate);
builder.build(format);
// copy to store dir
File dir = new File(storeDir);
Utils.rm(dir);
dir.mkdirs();
System.out.println("Moving store data from " + output + " to " + dir);
boolean copyWorked = new File(output, "node-0").renameTo(new File(dir, "version-0"));
if (!copyWorked)
Utils.croak("Copy of data from " + output + " to " + dir + " failed.");
} else {
CmdUtils.croakIfMissing(parser, options, "cluster-xml", "node-id");
String clusterXmlPath = (String) options.valueOf("cluster-xml");
nodeId = (Integer) options.valueOf("node-id");
File clusterXml = new File(clusterXmlPath);
if (!clusterXml.exists()) {
Utils.croak("Cluster.xml does not exist");
}
cluster = new ClusterMapper().readCluster(clusterXml);
}
final Store<ByteArray, byte[], byte[]> store = new ReadOnlyStorageEngine("test", searcher, new RoutingStrategyFactory().updateRoutingStrategy(storeDef, cluster), nodeId, new File(storeDir), 0);
final AtomicInteger obsoletes = new AtomicInteger(0);
final AtomicInteger nullResults = new AtomicInteger(0);
final AtomicInteger totalResults = new AtomicInteger(0);
final BlockingQueue<String> requestIds = new ArrayBlockingQueue<String>(20000);
final Executor executor = Executors.newFixedThreadPool(1);
// if they have given us a file make a request generator that reads from
// it, otherwise just generate random values
final int numVals = numValues;
Runnable requestGenerator;
if (inputFile == null) {
requestGenerator = new Runnable() {
public void run() {
System.out.println("Generating random requests.");
Random random = new Random();
try {
while (true) requestIds.put(Integer.toString(random.nextInt(numRequests) % numVals));
} catch (InterruptedException e) {
e.printStackTrace();
}
}
};
} else {
requestGenerator = new Runnable() {
public void run() {
try {
System.out.println("Using request file to generate requests.");
BufferedReader reader = new BufferedReader(new FileReader(inputFile), 1000000);
while (true) {
String line = reader.readLine();
if (line == null)
return;
requestIds.put(line.trim());
}
} catch (Exception e) {
e.printStackTrace();
}
}
};
}
executor.execute(requestGenerator);
final Serializer<Object> keySerializer = new JsonTypeSerializer(JsonTypeDefinition.fromJson("'string'"), true);
final AtomicInteger current = new AtomicInteger();
final int progressIncrement = numRequests / 5;
PerformanceTest readWriteTest = new PerformanceTest() {
@Override
public void doOperation(int index) throws Exception {
try {
totalResults.incrementAndGet();
int curr = current.getAndIncrement();
List<Versioned<byte[]>> results = store.get(new ByteArray(keySerializer.toBytes(requestIds.take())), null);
if (curr % progressIncrement == 0)
System.out.println(curr);
if (results.size() == 0)
nullResults.incrementAndGet();
} catch (ObsoleteVersionException e) {
obsoletes.incrementAndGet();
}
}
};
System.out.println("Running test...");
readWriteTest.run(numRequests, numThreads);
System.out.println("Random Access Read Only store Results:");
System.out.println("Null reads ratio:" + (nullResults.doubleValue()) / totalResults.doubleValue());
readWriteTest.printStats();
System.exit(0);
}
use of voldemort.store.StoreDefinition in project voldemort by voldemort.
the class StoreRoutingPlanPerf method getStoreDef.
// Construct zone-appropriate 3/2/2 store def.
public StoreDefinition getStoreDef(int numZones) {
HashMap<Integer, Integer> zoneRep = new HashMap<Integer, Integer>();
for (int zoneId = 0; zoneId < numZones; zoneId++) {
zoneRep.put(zoneId, 3);
}
int repFactor = numZones * 3;
StoreDefinition storeDef = new StoreDefinitionBuilder().setName("ZZ322").setType(BdbStorageConfiguration.TYPE_NAME).setRoutingPolicy(RoutingTier.CLIENT).setRoutingStrategyType(RoutingStrategyType.ZONE_STRATEGY).setKeySerializer(new SerializerDefinition("string")).setValueSerializer(new SerializerDefinition("string")).setReplicationFactor(repFactor).setZoneReplicationFactor(zoneRep).setRequiredReads(2).setRequiredWrites(2).setZoneCountReads(0).setZoneCountWrites(0).build();
return storeDef;
}
use of voldemort.store.StoreDefinition in project voldemort by voldemort.
the class Benchmark method initializeStore.
@SuppressWarnings("unchecked")
public void initializeStore(Props benchmarkProps) throws Exception {
this.numThreads = benchmarkProps.getInt(THREADS, MAX_WORKERS);
this.numConnectionsPerNode = benchmarkProps.getInt(NUM_CONNECTIONS_PER_NODE, MAX_CONNECTIONS_PER_NODE);
this.numIterations = benchmarkProps.getInt(ITERATIONS, 1);
this.statusIntervalSec = benchmarkProps.getInt(INTERVAL, 0);
this.verbose = benchmarkProps.getBoolean(VERBOSE, false);
this.verifyRead = benchmarkProps.getBoolean(VERIFY, false);
this.ignoreNulls = benchmarkProps.getBoolean(IGNORE_NULLS, false);
int clientZoneId = benchmarkProps.getInt(CLIENT_ZONE_ID, -1);
if (benchmarkProps.containsKey(URL)) {
// Remote benchmark
if (!benchmarkProps.containsKey(STORE_NAME)) {
throw new VoldemortException("Missing storename");
}
String socketUrl = benchmarkProps.getString(URL);
String storeName = benchmarkProps.getString(STORE_NAME);
ClientConfig clientConfig = new ClientConfig().setMaxThreads(numThreads).setMaxTotalConnections(numThreads).setMaxConnectionsPerNode(numConnectionsPerNode).setRoutingTimeout(1500, TimeUnit.MILLISECONDS).setSocketTimeout(1500, TimeUnit.MILLISECONDS).setConnectionTimeout(500, TimeUnit.MILLISECONDS).setRequestFormatType(RequestFormatType.VOLDEMORT_V3).setBootstrapUrls(socketUrl);
if (clientZoneId >= 0) {
clientConfig.setClientZoneId(clientZoneId);
}
SocketStoreClientFactory socketFactory = new SocketStoreClientFactory(clientConfig);
this.storeClient = socketFactory.getStoreClient(storeName);
StoreDefinition storeDef = getStoreDefinition(socketFactory, storeName);
this.keyType = findKeyType(storeDef);
benchmarkProps.put(Benchmark.KEY_TYPE, this.keyType);
this.factory = socketFactory;
} else {
// Local benchmark
localMode = true;
String storageEngineClass = benchmarkProps.getString(STORAGE_CONFIGURATION_CLASS);
this.keyType = benchmarkProps.getString(KEY_TYPE, STRING_KEY_TYPE);
Serializer serializer = findKeyType(this.keyType);
Store<Object, Object, Object> store = null;
VoldemortConfig voldemortConfig;
if (benchmarkProps.containsKey(LOCAL_SERVER_PROPERTIES)) {
File homeDir = TestUtils.createTempDir();
File configDir = new File(homeDir, "config");
configDir.mkdir();
FileUtils.copyFile(new File(benchmarkProps.get(LOCAL_SERVER_PROPERTIES)), new File(configDir, "server.properties"));
voldemortConfig = VoldemortConfig.loadFromVoldemortHome(homeDir.getAbsolutePath());
} else {
voldemortConfig = ServerTestUtils.getVoldemortConfig();
}
StorageConfiguration conf = (StorageConfiguration) ReflectUtils.callConstructor(ReflectUtils.loadClass(storageEngineClass), new Object[] { voldemortConfig });
StorageEngine<ByteArray, byte[], byte[]> engine = conf.getStore(TestUtils.makeStoreDefinition(DUMMY_DB), TestUtils.makeSingleNodeRoutingStrategy());
if (conf.getType().compareTo(ViewStorageConfiguration.TYPE_NAME) == 0) {
engine = new ViewStorageEngine(STORE_NAME, engine, new StringSerializer(), new StringSerializer(), serializer, new StringSerializer(), null, BenchmarkViews.loadTransformation(benchmarkProps.getString(VIEW_CLASS).trim()));
}
store = SerializingStore.wrap(engine, serializer, new StringSerializer(), new IdentitySerializer());
this.factory = new StaticStoreClientFactory(store);
this.storeClient = factory.getStoreClient(store.getName());
}
this.storeInitialized = true;
}
use of voldemort.store.StoreDefinition in project voldemort by voldemort.
the class Benchmark method getStoreDefinition.
private StoreDefinition getStoreDefinition(AbstractStoreClientFactory factory, String storeName) {
String storesXml = factory.bootstrapMetadataWithRetries(MetadataStore.STORES_KEY);
StoreDefinitionsMapper storeMapper = new StoreDefinitionsMapper();
List<StoreDefinition> storeDefinitionList = storeMapper.readStoreList(new StringReader(storesXml));
StoreDefinition storeDef = null;
for (StoreDefinition storeDefinition : storeDefinitionList) {
if (storeName.equals(storeDefinition.getName())) {
storeDef = storeDefinition;
}
}
return storeDef;
}
Aggregations