use of voldemort.store.StoreDefinition in project voldemort by voldemort.
the class AvroStoreBuilderMapper method configure.
@Override
public void configure(JobConf conf) {
super.setConf(conf);
this.mapper.configure(conf);
List<StoreDefinition> storeDefs = new StoreDefinitionsMapper().readStoreList(new StringReader(conf.get("stores.xml")));
if (storeDefs.size() != 1)
throw new IllegalStateException("Expected to find only a single store, but found multiple!");
this.storeDef = storeDefs.get(0);
keySerializerDefinition = getStoreDef().getKeySerializer();
valueSerializerDefinition = getStoreDef().getValueSerializer();
try {
keyField = conf.get(VoldemortBuildAndPushJob.AVRO_KEY_FIELD);
valField = conf.get(VoldemortBuildAndPushJob.AVRO_VALUE_FIELD);
keySchema = conf.get(AVRO_KEY_SCHEMA);
valSchema = conf.get(AVRO_VALUE_SCHEMA);
if (keySerializerDefinition.getName().equals(DefaultSerializerFactory.AVRO_GENERIC_TYPE_NAME)) {
keySerializer = new AvroGenericSerializer(keySchema);
valueSerializer = new AvroGenericSerializer(valSchema);
} else {
if (keySerializerDefinition.hasVersion()) {
Map<Integer, String> versions = new HashMap<Integer, String>();
for (Map.Entry<Integer, String> entry : keySerializerDefinition.getAllSchemaInfoVersions().entrySet()) versions.put(entry.getKey(), entry.getValue());
keySerializer = new AvroVersionedGenericSerializer(versions);
} else
keySerializer = new AvroVersionedGenericSerializer(keySerializerDefinition.getCurrentSchemaInfo());
if (valueSerializerDefinition.hasVersion()) {
Map<Integer, String> versions = new HashMap<Integer, String>();
for (Map.Entry<Integer, String> entry : valueSerializerDefinition.getAllSchemaInfoVersions().entrySet()) versions.put(entry.getKey(), entry.getValue());
valueSerializer = new AvroVersionedGenericSerializer(versions);
} else
valueSerializer = new AvroVersionedGenericSerializer(valueSerializerDefinition.getCurrentSchemaInfo());
}
} catch (Exception e) {
throw new RuntimeException(e);
}
}
use of voldemort.store.StoreDefinition in project voldemort by voldemort.
the class AbstractStoreBuilderConfigurable method configure.
public void configure(JobConf conf) {
this.cluster = new ClusterMapper().readCluster(new StringReader(conf.get("cluster.xml")));
List<StoreDefinition> storeDefs = new StoreDefinitionsMapper().readStoreList(new StringReader(conf.get("stores.xml")));
if (storeDefs.size() != 1)
throw new IllegalStateException("Expected to find only a single store, but found multiple!");
this.storeDef = storeDefs.get(0);
this.numChunks = conf.getInt(NUM_CHUNKS, -1);
if (this.numChunks < 1) {
// be overridden by the user.
throw new VoldemortException(NUM_CHUNKS + " not specified in the MapReduce JobConf (should NEVER happen)");
}
this.saveKeys = conf.getBoolean(VoldemortBuildAndPushJob.SAVE_KEYS, true);
this.reducerPerBucket = conf.getBoolean(VoldemortBuildAndPushJob.REDUCER_PER_BUCKET, true);
this.buildPrimaryReplicasOnly = conf.getBoolean(VoldemortBuildAndPushJob.BUILD_PRIMARY_REPLICAS_ONLY, false);
if (buildPrimaryReplicasOnly && !saveKeys) {
throw new IllegalStateException(VoldemortBuildAndPushJob.BUILD_PRIMARY_REPLICAS_ONLY + " can only be true if " + VoldemortBuildAndPushJob.SAVE_KEYS + " is also true.");
}
}
use of voldemort.store.StoreDefinition in project voldemort by voldemort.
the class RedirectingStore method redirectingPut.
/**
* This is slightly different from other redirecting*** methods in that,
* this updates the remote proxy node, with this put request, so we can
* switch back to the old cluster topology if needed
*
* @param key
* @param value
* @param transforms
* @throws VoldemortException
*/
private void redirectingPut(ByteArray key, Versioned<byte[]> value, byte[] transforms) throws VoldemortException {
Cluster currentCluster = metadata.getCluster();
// TODO:refactor O(n) linear lookup of storedef here. Ideally should be
// a hash lookup.
StoreDefinition storeDef = metadata.getStoreDef(getName());
/*
* defensively, error out if this is a read-only store and someone is
* doing puts against it. We don't to do extra work and fill the log
* with errors in that case.
*/
if (storeDef.getType().compareTo(ReadOnlyStorageConfiguration.TYPE_NAME) == 0) {
throw new UnsupportedOperationException("put() not supported on read-only store");
}
BaseStoreRoutingPlan currentRoutingPlan = new BaseStoreRoutingPlan(currentCluster, storeDef);
Integer redirectNode = getProxyNode(currentRoutingPlan, storeDef, key.get());
/**
* If I am rebalancing for this key, try to do remote get() if this node
* does not have the key , put it locally first to get the correct
* version ignoring any {@link ObsoleteVersionException}
*/
if (redirectNode != null) {
/*
* first check if the key exists locally. If so, it means, it has
* been moved over (either by a proxy fetch or background fetch) and
* we are good simply issuing the put on top of that.
*/
List<Versioned<byte[]>> vals = getInnerStore().get(key, transforms);
if (vals.isEmpty()) {
// if not, then go proxy fetch it
if (logger.isTraceEnabled()) {
logger.trace("Proxying GET (before PUT) on stealer:" + metadata.getNodeId() + " for key " + ByteUtils.toHexString(key.get()) + " to node:" + redirectNode);
}
proxyGetAndLocalPut(key, redirectNode, transforms);
}
}
// Here we are sure that the current node has caught up with the proxy
// for this key. Moving on to the put logic.
// put the data locally, if this step fails, there will be no proxy puts
getInnerStore().put(key, value, transforms);
// OVE). So do not send proxy puts in those cases.
if (redirectNode != null && !currentRoutingPlan.getReplicationNodeList(key.get()).contains(redirectNode)) {
AsyncProxyPutTask asyncProxyPutTask = new AsyncProxyPutTask(this, key, value, transforms, redirectNode);
proxyPutStats.reportProxyPutSubmission();
proxyPutWorkerPool.submit(asyncProxyPutTask);
}
}
use of voldemort.store.StoreDefinition in project voldemort by voldemort.
the class RedirectingStore method getProxyNode.
/**
* Checks if the server has to do any proxying of gets/puts to another
* server, as a part of an ongoing rebalance operation.
*
* Basic idea : Any given node which is a stealer of a partition, as the ith
* replica of a given zone, will proxy to the old ith replica of the
* partition in the given zone, as per the source cluster metadata.
* Exception : if this amounts to proxying to itself.
*
* Note on Zone Expansion : For zone expansion, there will be no proxying
* within the new zone. This is a practical assumption since if we fail, we
* fallback to a cluster topology without the new zone. As a result, reads
* from the new zone are not guaranteed to return some values during the
* course of zone expansion. This is a also reasonable since any
* organization undertaking such effort would need to have the data in place
* in the new zone, before the client apps are moved over.
*
* TODO:refactor Add helper methods to StoreRoutingPlan to simplify this
* code
*
* @param currentRoutingPlan routing plan object based on cluster's current
* topology
* @param storeDef definition of the store being redirected
* @param key to decide where to proxy to
* @return Null if no proxying is required else node id of the server to
* proxy to
*/
private Integer getProxyNode(BaseStoreRoutingPlan currentRoutingPlan, StoreDefinition storeDef, byte[] key) {
// get out if redirecting is disabled.
if (!isRedirectingStoreEnabled.get()) {
return null;
}
// TODO a better design would be to get these state changes from
// metadata listener callbacks, so we need not allocate these objects
// all the time. This has been implemented, but not integration tested,
// on the following branch:
// https://github.com/voldemort/voldemort/compare/proxy-put-metadata-listener
Cluster sourceCluster = metadata.getRebalancingSourceCluster();
// Logic to get the old storedef
List<StoreDefinition> sourceStoreDefs = metadata.getRebalancingSourceStores();
if (sourceCluster == null) {
if (logger.isTraceEnabled()) {
logger.trace("Old Cluster is null.. bail");
}
return null;
}
if (sourceStoreDefs == null) {
if (logger.isTraceEnabled()) {
logger.trace("Old stores def is null.. bail");
}
return null;
}
StoreDefinition sourceStoreDef = null;
sourceStoreDef = StoreUtils.getStoreDef(sourceStoreDefs, storeDef.getName());
Integer nodeId = metadata.getNodeId();
Integer zoneId = currentRoutingPlan.getCluster().getNodeById(nodeId).getZoneId();
// Use the old store definition to get the routing object
BaseStoreRoutingPlan oldRoutingPlan = new BaseStoreRoutingPlan(sourceCluster, sourceStoreDef);
// Check the current node's relationship to the key.
int zoneNAry = currentRoutingPlan.getZoneNAry(zoneId, nodeId, key);
// Determine which node held the key with the same relationship in the
// old cluster. That is your man!
Integer redirectNodeId;
try {
redirectNodeId = oldRoutingPlan.getNodeIdForZoneNary(zoneId, zoneNAry, key);
} catch (VoldemortException ve) {
/*
* If the zone does not exist, as in the case of Zone Expansion,
* there will be no proxy bridges built. The only other time an
* exception can be thrown here is when the replicaType is invalid.
* But that would mean we are changing say a 2/1/1 store to 3/2/2,
* which Voldemort currently does not support anyway
*/
return null;
}
// Unless he is the same as this node (where this is meaningless effort)
if (redirectNodeId == nodeId) {
return null;
}
return redirectNodeId;
}
use of voldemort.store.StoreDefinition in project voldemort by voldemort.
the class RoutedStoreTest method testGetAndPutTimeoutZZZ.
@Test
public void testGetAndPutTimeoutZZZ() throws Exception {
int timeout = 50;
// Set replication factors for a 3 zone cluster
HashMap<Integer, Integer> zoneReplicationFactor = Maps.newHashMap();
zoneReplicationFactor.put(0, 1);
zoneReplicationFactor.put(1, 1);
zoneReplicationFactor.put(2, 1);
// Create a store with RF=3, Required reads = 3 and zone count reads = 2
// This ensures that a GET operation requires a response from all 3
// nodes (from the respective 3 zones)
StoreDefinition definition = new StoreDefinitionBuilder().setName("test").setType("foo").setKeySerializer(new SerializerDefinition("test")).setValueSerializer(new SerializerDefinition("test")).setRoutingPolicy(RoutingTier.CLIENT).setRoutingStrategyType(RoutingStrategyType.ZONE_STRATEGY).setHintedHandoffStrategy(HintedHandoffStrategyType.PROXIMITY_STRATEGY).setReplicationFactor(3).setPreferredReads(3).setRequiredReads(3).setPreferredWrites(3).setRequiredWrites(3).setZoneCountReads(2).setZoneCountWrites(2).setZoneReplicationFactor(zoneReplicationFactor).build();
Map<Integer, Store<ByteArray, byte[], byte[]>> stores = new HashMap<Integer, Store<ByteArray, byte[], byte[]>>();
List<Node> nodes = new ArrayList<Node>();
int totalDelay = 0;
for (int i = 0; i < 3; i++) {
int delay = 4 + i * timeout;
totalDelay += delay;
Store<ByteArray, byte[], byte[]> store = new SleepyStore<ByteArray, byte[], byte[]>(delay, new InMemoryStorageEngine<ByteArray, byte[], byte[]>("test"));
stores.put(i, store);
List<Integer> partitions = Arrays.asList(i);
nodes.add(new Node(i, "none", 0, 0, 0, i, partitions));
}
setFailureDetector(stores);
routedStoreThreadPool = Executors.newFixedThreadPool(3);
RoutedStoreFactory routedStoreFactory = createFactory();
List<Zone> zones = Lists.newArrayList();
for (int i = 0; i < 3; i++) {
LinkedList<Integer> zoneProximityList = Lists.newLinkedList();
Set<Integer> zoneIds = Sets.newHashSet(0, 1, 2);
zoneIds.remove(i);
zoneProximityList.addAll(zoneIds);
zones.add(new Zone(i, zoneProximityList));
}
RoutedStore routedStore = routedStoreFactory.create(new Cluster("test", nodes, zones), definition, stores, failureDetector, createConfig(timeout));
long start = System.nanoTime();
try {
routedStore.get(new ByteArray("test".getBytes()), null);
fail("Should have thrown");
} catch (InsufficientOperationalNodesException e) {
long elapsed = (System.nanoTime() - start) / Time.NS_PER_MS;
assertTrue(elapsed + " < " + totalDelay, elapsed < totalDelay);
}
start = System.nanoTime();
try {
routedStore.put(new ByteArray("test".getBytes()), new Versioned<byte[]>(new byte[] { 1 }), null);
fail("Should have thrown");
} catch (InsufficientOperationalNodesException e) {
long elapsed = (System.nanoTime() - start) / Time.NS_PER_MS;
assertTrue(elapsed + " < " + totalDelay, elapsed < totalDelay);
}
}
Aggregations