use of voldemort.store.compress.CompressingStore in project voldemort by voldemort.
the class RESTClientFactory method getRawStore.
@Override
public <K, V, T> Store<K, V, T> getRawStore(String storeName, InconsistencyResolver<Versioned<V>> resolver) {
Store<K, V, T> clientStore = null;
// The lowest layer : Transporting request to coordinator
R2Store r2store = null;
this.d2Client = restClientFactoryConfig.getD2Client();
if (this.d2Client == null) {
logger.info("Using transportclient since d2client is not available");
Map<String, String> properties = new HashMap<String, String>();
properties.put(HttpClientFactory.HTTP_POOL_SIZE, Integer.toString(this.config.getMaxR2ConnectionPoolSize()));
transportClient = _clientFactory.getClient(properties);
r2store = new R2Store(storeName, this.config.getHttpBootstrapURL(), this.transportClient, this.config);
} else {
logger.info("Using d2client");
r2store = new R2Store(storeName, this.config.getHttpBootstrapURL(), this.d2Client, this.config);
}
this.rawStoreList.add(r2store);
// bootstrap from the coordinator and obtain all the serialization
// information.
String serializerInfoXml = r2store.getSerializerInfoXml();
SerializerDefinition keySerializerDefinition = RestUtils.parseKeySerializerDefinition(serializerInfoXml);
SerializerDefinition valueSerializerDefinition = RestUtils.parseValueSerializerDefinition(serializerInfoXml);
synchronized (this) {
keySerializerMap.put(storeName, keySerializerDefinition);
valueSerializerMap.put(storeName, valueSerializerDefinition);
}
if (logger.isDebugEnabled()) {
logger.debug("Bootstrapping for " + storeName + ": Key serializer " + keySerializerDefinition);
logger.debug("Bootstrapping for " + storeName + ": Value serializer " + valueSerializerDefinition);
}
// Start building the stack..
// First, the transport layer
Store<ByteArray, byte[], byte[]> store = r2store;
// TODO: Add identifierString to the Mbean name
if (this.config.isEnableJmx()) {
StatTrackingStore statStore = new StatTrackingStore(store, this.stats);
store = statStore;
JmxUtils.registerMbean(new StoreStatsJmx(statStore.getStats()), JmxUtils.createObjectName(JmxUtils.getPackageName(store.getClass()), store.getName()));
}
// Add compression layer
if (keySerializerDefinition.hasCompression() || valueSerializerDefinition.hasCompression()) {
store = new CompressingStore(store, new CompressionStrategyFactory().get(keySerializerDefinition.getCompression()), new CompressionStrategyFactory().get(valueSerializerDefinition.getCompression()));
}
// Add Serialization layer
Serializer<K> keySerializer = (Serializer<K>) serializerFactory.getSerializer(keySerializerDefinition);
Serializer<V> valueSerializer = (Serializer<V>) serializerFactory.getSerializer(valueSerializerDefinition);
clientStore = SerializingStore.wrap(store, keySerializer, valueSerializer, null);
// Add inconsistency Resolving layer
InconsistencyResolver<Versioned<V>> secondaryResolver = resolver == null ? new TimeBasedInconsistencyResolver<V>() : resolver;
clientStore = new InconsistencyResolvingStore<K, V, T>(clientStore, new ChainedResolver<Versioned<V>>(new VectorClockInconsistencyResolver<V>(), secondaryResolver));
return clientStore;
}
use of voldemort.store.compress.CompressingStore in project voldemort by voldemort.
the class ReadOnlyStorageEngineTestInstance method create.
public static ReadOnlyStorageEngineTestInstance create(SearchStrategy strategy, File baseDir, int testSize, int numNodes, int repFactor, SerializerDefinition keySerDef, SerializerDefinition valueSerDef, ReadOnlyStorageFormat type, int[][] partitionMap) throws Exception {
// create some test data
Map<String, String> data = createTestData(testSize);
JsonReader reader = makeTestDataReader(data, baseDir);
// set up definitions for cluster and store
List<Node> nodes = new ArrayList<Node>();
for (int i = 0; i < numNodes; i++) {
List<Integer> partitions = new ArrayList<Integer>(partitionMap[i].length);
for (int p : partitionMap[i]) {
partitions.add(p);
}
nodes.add(new Node(i, "localhost", 8080 + i, 6666 + i, 7000 + i, partitions));
}
Cluster cluster = new Cluster("test", nodes);
StoreDefinition storeDef = new StoreDefinitionBuilder().setName("test").setType(ReadOnlyStorageConfiguration.TYPE_NAME).setKeySerializer(keySerDef).setValueSerializer(valueSerDef).setRoutingPolicy(RoutingTier.CLIENT).setRoutingStrategyType(RoutingStrategyType.CONSISTENT_STRATEGY).setReplicationFactor(repFactor).setPreferredReads(1).setRequiredReads(1).setPreferredWrites(1).setRequiredWrites(1).build();
RoutingStrategy router = new RoutingStrategyFactory().updateRoutingStrategy(storeDef, cluster);
// build store files in outputDir
File outputDir = TestUtils.createTempDir(baseDir);
JsonStoreBuilder storeBuilder = new JsonStoreBuilder(reader, cluster, storeDef, router, outputDir, null, testSize / 5, 1, 2, 10000, false);
storeBuilder.build(type);
File nodeDir = TestUtils.createTempDir(baseDir);
@SuppressWarnings("unchecked") Serializer<String> keySerializer = (Serializer<String>) new DefaultSerializerFactory().getSerializer(keySerDef);
@SuppressWarnings("unchecked") Serializer<String> valueSerializer = (Serializer<String>) new DefaultSerializerFactory().getSerializer(valueSerDef);
Serializer<String> transSerializer = new StringSerializer();
Map<Integer, Store<String, String, String>> nodeStores = Maps.newHashMap();
Map<Integer, ReadOnlyStorageEngine> readOnlyStores = Maps.newHashMap();
for (int i = 0; i < numNodes; i++) {
File currNode = new File(nodeDir, Integer.toString(i));
currNode.mkdirs();
currNode.deleteOnExit();
Utils.move(new File(outputDir, "node-" + Integer.toString(i)), new File(currNode, "version-0"));
CompressionStrategyFactory compressionStrategyFactory = new CompressionStrategyFactory();
CompressionStrategy keyCompressionStrat = compressionStrategyFactory.get(keySerDef.getCompression());
CompressionStrategy valueCompressionStrat = compressionStrategyFactory.get(valueSerDef.getCompression());
ReadOnlyStorageEngine readOnlyStorageEngine = new ReadOnlyStorageEngine("test", strategy, router, i, currNode, 1);
readOnlyStores.put(i, readOnlyStorageEngine);
Store<ByteArray, byte[], byte[]> innerStore = new CompressingStore(readOnlyStorageEngine, keyCompressionStrat, valueCompressionStrat);
nodeStores.put(i, SerializingStore.wrap(innerStore, keySerializer, valueSerializer, transSerializer));
}
return new ReadOnlyStorageEngineTestInstance(data, baseDir, readOnlyStores, nodeStores, router, keySerializer);
}
use of voldemort.store.compress.CompressingStore in project voldemort by voldemort.
the class AbstractStoreClientFactory method getRawStore.
@SuppressWarnings("unchecked")
public <K, V, T> Store<K, V, T> getRawStore(String storeName, InconsistencyResolver<Versioned<V>> resolver, String customStoresXml, String clusterXmlString, FailureDetector fd) {
logger.info("Client zone-id [" + this.routedStoreConfig.getClientZoneId() + "] Attempting to get raw store [" + storeName + "] ");
if (logger.isDebugEnabled()) {
for (URI uri : bootstrapUrls) {
logger.debug("Client Bootstrap url [" + uri + "]");
}
}
// Get cluster and store metadata
String clusterXml = clusterXmlString;
if (clusterXml == null) {
logger.debug("Fetching cluster.xml ...");
clusterXml = bootstrapMetadataWithRetries(MetadataStore.CLUSTER_KEY, bootstrapUrls);
}
this.cluster = clusterMapper.readCluster(new StringReader(clusterXml), false);
String storesXml = customStoresXml;
if (storesXml == null) {
String storesKey = storeName;
if (config.isFetchAllStoresXmlInBootstrap()) {
storesKey = MetadataStore.STORES_KEY;
}
if (logger.isDebugEnabled()) {
logger.debug("Fetching store definition for Store " + storeName + " key " + storesKey);
}
storesXml = bootstrapMetadataWithRetries(storesKey, bootstrapUrls);
}
if (logger.isDebugEnabled()) {
logger.debug("Obtained cluster metadata xml" + clusterXml);
logger.debug("Obtained stores metadata xml" + storesXml);
}
storeDefs = storeMapper.readStoreList(new StringReader(storesXml), false);
StoreDefinition storeDef = null;
for (StoreDefinition d : storeDefs) if (d.getName().equals(storeName))
storeDef = d;
if (storeDef == null) {
logger.error("Bootstrap - unknown store: " + storeName);
throw new BootstrapFailureException("Unknown store '" + storeName + "'.");
}
if (logger.isDebugEnabled()) {
logger.debug(this.cluster.toString(true));
logger.debug(storeDef.toString());
}
boolean repairReads = !storeDef.isView();
// construct mapping
Map<Integer, Store<ByteArray, byte[], byte[]>> clientMapping = Maps.newHashMap();
Map<Integer, NonblockingStore> nonblockingStores = Maps.newHashMap();
Map<Integer, NonblockingStore> nonblockingSlopStores = Maps.newHashMap();
Map<Integer, Store<ByteArray, Slop, byte[]>> slopStores = null;
if (storeDef.hasHintedHandoffStrategyType())
slopStores = Maps.newHashMap();
for (Node node : this.cluster.getNodes()) {
Store<ByteArray, byte[], byte[]> store = getStore(storeDef.getName(), node.getHost(), getPort(node), this.requestFormatType);
clientMapping.put(node.getId(), store);
NonblockingStore nonblockingStore = routedStoreFactory.toNonblockingStore(store);
nonblockingStores.put(node.getId(), nonblockingStore);
if (slopStores != null) {
Store<ByteArray, byte[], byte[]> rawSlopStore = getStore("slop", node.getHost(), getPort(node), this.requestFormatType);
Store<ByteArray, Slop, byte[]> slopStore = SerializingStore.wrap(rawSlopStore, slopKeySerializer, slopValueSerializer, new IdentitySerializer());
slopStores.put(node.getId(), slopStore);
nonblockingSlopStores.put(node.getId(), routedStoreFactory.toNonblockingStore(rawSlopStore));
}
}
/*
* Check if we need to retrieve a reference to the failure detector. For
* system stores - the FD reference would be passed in.
*/
FailureDetector failureDetectorRef = fd;
if (failureDetectorRef == null) {
failureDetectorRef = getFailureDetector();
} else {
logger.debug("Using existing failure detector.");
}
this.routedStoreConfig.setRepairReads(repairReads);
Store<ByteArray, byte[], byte[]> store = routedStoreFactory.create(this.cluster, storeDef, clientMapping, nonblockingStores, slopStores, nonblockingSlopStores, failureDetectorRef, this.routedStoreConfig);
store = new LoggingStore(store);
if (isJmxEnabled) {
StatTrackingStore statStore = new StatTrackingStore(store, this.aggregateStats, this.cachedStoreStats);
statStore.getStats().registerJmx(identifierString);
store = statStore;
}
if (this.config.isEnableCompressionLayer()) {
if (storeDef.getKeySerializer().hasCompression() || storeDef.getValueSerializer().hasCompression()) {
store = new CompressingStore(store, getCompressionStrategy(storeDef.getKeySerializer()), getCompressionStrategy(storeDef.getValueSerializer()));
}
}
/*
* Initialize the finalstore object only once the store object itself is
* wrapped by a StatrackingStore seems like the finalstore object is
* redundant?
*/
Store<K, V, T> finalStore = (Store<K, V, T>) store;
if (this.config.isEnableSerializationLayer()) {
Serializer<K> keySerializer = (Serializer<K>) serializerFactory.getSerializer(storeDef.getKeySerializer());
Serializer<V> valueSerializer = (Serializer<V>) serializerFactory.getSerializer(storeDef.getValueSerializer());
if (storeDef.isView() && (storeDef.getTransformsSerializer() == null))
throw new SerializationException("Transforms serializer must be specified with a view ");
Serializer<T> transformsSerializer = (Serializer<T>) serializerFactory.getSerializer(storeDef.getTransformsSerializer() != null ? storeDef.getTransformsSerializer() : new SerializerDefinition("identity"));
finalStore = SerializingStore.wrap(store, keySerializer, valueSerializer, transformsSerializer);
}
// resolver (if they gave us one)
if (this.config.isEnableInconsistencyResolvingLayer()) {
InconsistencyResolver<Versioned<V>> secondaryResolver = resolver == null ? new TimeBasedInconsistencyResolver() : resolver;
finalStore = new InconsistencyResolvingStore<K, V, T>(finalStore, new ChainedResolver<Versioned<V>>(new VectorClockInconsistencyResolver(), secondaryResolver));
}
return finalStore;
}
Aggregations