use of voldemort.routing.RoutingStrategyFactory in project voldemort by voldemort.
the class AdminFetchTest method setUp.
@Before
public void setUp() throws IOException {
partitionToKeysMap = new HashMap<Integer, Set<String>>();
SocketStoreFactory socketStoreFactory = new ClientRequestExecutorPool(2, 10000, 100000, 32 * 1024);
final int numServers = 2;
servers = new VoldemortServer[numServers];
int[][] partitionMap = { { 0, 1, 2, 3 }, { 4, 5, 6, 7 } };
cluster = ServerTestUtils.startVoldemortCluster(numServers, servers, partitionMap, socketStoreFactory, this.useNio, null, storesXmlfile, properties);
List<StoreDefinition> storeDefs = new StoreDefinitionsMapper().readStoreList(new File(storesXmlfile));
for (StoreDefinition storeDef : storeDefs) if (storeDef.getName().equals(testStoreName))
testStoreDef = storeDef;
routingStrategy = new RoutingStrategyFactory().updateRoutingStrategy(testStoreDef, cluster);
adminClient = ServerTestUtils.getAdminClient(cluster);
// load data into the servers
Node firstServer = cluster.getNodes().iterator().next();
String bootstrapUrl = "tcp://" + firstServer.getHost() + ":" + firstServer.getSocketPort();
StoreClientFactory factory = new SocketStoreClientFactory(new ClientConfig().setBootstrapUrls(bootstrapUrl).setSelectors(2));
// create a client that executes operations on a single store
StoreClient<String, String> voldClient = factory.getStoreClient("users");
for (int i = 0; i < TEST_STREAM_KEYS_SIZE; i++) {
String key = "key" + i;
byte[] bkey = key.getBytes("UTF-8");
int partition = routingStrategy.getPartitionList(bkey).get(0);
if (!partitionToKeysMap.containsKey(partition))
partitionToKeysMap.put(partition, new HashSet<String>());
partitionToKeysMap.get(partition).add(key);
voldClient.put(key, "value" + i);
}
}
use of voldemort.routing.RoutingStrategyFactory in project voldemort by voldemort.
the class StorageService method startInner.
@Override
protected void startInner() {
registerInternalEngine(metadata, false, "metadata");
/* Initialize storage configurations */
for (String configClassName : voldemortConfig.getStorageConfigurations()) initStorageConfig(configClassName);
/* Initialize view storage configuration */
storageConfigs.put(ViewStorageConfiguration.TYPE_NAME, new ViewStorageConfiguration(voldemortConfig, metadata.getStoreDefList(), storeRepository));
/* Initialize system stores */
initSystemStores();
/* Register slop store */
if (voldemortConfig.isSlopEnabled()) {
logger.info("Initializing the slop store using " + voldemortConfig.getSlopStoreType());
StorageConfiguration config = storageConfigs.get(voldemortConfig.getSlopStoreType());
if (config == null)
throw new ConfigurationException("Attempt to open store " + SlopStorageEngine.SLOP_STORE_NAME + " but " + voldemortConfig.getSlopStoreType() + " storage engine has not been enabled.");
// make a dummy store definition object
StoreDefinition slopStoreDefinition = new StoreDefinition(SlopStorageEngine.SLOP_STORE_NAME, null, null, null, null, null, null, RoutingStrategyType.CONSISTENT_STRATEGY, 0, null, 0, null, 0, null, null, null, null, null, null, null, null, null, null, null, null, 0);
SlopStorageEngine slopEngine = new SlopStorageEngine(config.getStore(slopStoreDefinition, new RoutingStrategyFactory().updateRoutingStrategy(slopStoreDefinition, metadata.getCluster())), metadata.getCluster());
registerInternalEngine(slopEngine, false, "slop");
storeRepository.setSlopStore(slopEngine);
if (voldemortConfig.isSlopPusherJobEnabled()) {
// Now initialize the pusher job after some time
GregorianCalendar cal = new GregorianCalendar();
cal.add(Calendar.SECOND, (int) (voldemortConfig.getSlopFrequencyMs() / Time.MS_PER_SECOND));
Date nextRun = cal.getTime();
logger.info("Initializing slop pusher job type " + voldemortConfig.getPusherType() + " at " + nextRun);
scheduler.schedule("slop", (voldemortConfig.getPusherType().compareTo(BlockingSlopPusherJob.TYPE_NAME) == 0) ? new BlockingSlopPusherJob(storeRepository, metadata, failureDetector, voldemortConfig, scanPermitWrapper) : new StreamingSlopPusherJob(storeRepository, metadata, slopStreamingFailureDetector, voldemortConfig, scanPermitWrapper), nextRun, voldemortConfig.getSlopFrequencyMs());
}
// Create a SlopPurgeJob object and register it
if (voldemortConfig.isSlopPurgeJobEnabled()) {
logger.info("Initializing Slop Purge job");
SlopPurgeJob job = new SlopPurgeJob(storeRepository, metadata, scanPermitWrapper, voldemortConfig.getSlopPurgeJobMaxKeysScannedPerSec());
JmxUtils.registerMbean(job, JmxUtils.createObjectName(job.getClass()));
storeRepository.registerSlopPurgeJob(job);
}
}
// Create a repair job object and register it with Store repository
if (voldemortConfig.isRepairEnabled()) {
logger.info("Initializing repair job.");
RepairJob job = new RepairJob(storeRepository, metadata, scanPermitWrapper, voldemortConfig.getRepairJobMaxKeysScannedPerSec());
JmxUtils.registerMbean(job, JmxUtils.createObjectName(job.getClass()));
storeRepository.registerRepairJob(job);
}
// Create a prune job object and register it
if (voldemortConfig.isPruneJobEnabled()) {
logger.info("Intializing prune job");
VersionedPutPruneJob job = new VersionedPutPruneJob(storeRepository, metadata, scanPermitWrapper, voldemortConfig.getPruneJobMaxKeysScannedPerSec());
JmxUtils.registerMbean(job, JmxUtils.createObjectName(job.getClass()));
storeRepository.registerPruneJob(job);
}
List<StoreDefinition> storeDefs = new ArrayList<StoreDefinition>(this.metadata.getStoreDefList());
logger.info("Initializing stores:");
logger.info("Validating schemas:");
StoreDefinitionUtils.validateSchemasAsNeeded(storeDefs);
// first initialize non-view stores
for (StoreDefinition def : storeDefs) if (!def.isView())
openStore(def);
// those stores
for (StoreDefinition def : storeDefs) {
if (def.isView())
openStore(def);
}
initializeMetadataVersions(storeDefs);
// enable aggregate jmx statistics
if (voldemortConfig.isStatTrackingEnabled())
if (this.voldemortConfig.isEnableJmxClusterName())
JmxUtils.registerMbean(new StoreStatsJmx(this.storeStats), JmxUtils.createObjectName(metadata.getCluster().getName() + ".voldemort.store.stats.aggregate", "aggregate-perf"));
else
JmxUtils.registerMbean(new StoreStatsJmx(this.storeStats), JmxUtils.createObjectName("voldemort.store.stats.aggregate", "aggregate-perf"));
List<StorageEngine> listOfDisabledStores = Lists.newArrayList();
for (StorageEngine storageEngine : storeRepository.getAllStorageEngines()) {
try {
StoreVersionManager storeVersionManager = (StoreVersionManager) storageEngine.getCapability(StoreCapabilityType.DISABLE_STORE_VERSION);
if (storeVersionManager.hasAnyDisabledVersion()) {
listOfDisabledStores.add(storageEngine);
logger.warn("The following store is marked as disabled: " + storageEngine.getName());
// Must put server in offline mode.
}
} catch (NoSuchCapabilityException e) {
// Not a read-only store: no-op
}
}
if (listOfDisabledStores.isEmpty()) {
logger.info("All stores initialized.");
} else {
throw new DisabledStoreException("All stores initialized, but the server needs to go " + "in offline mode because some store(s) are disabled.");
}
}
use of voldemort.routing.RoutingStrategyFactory in project voldemort by voldemort.
the class StorageService method openStore.
public StorageEngine<ByteArray, byte[], byte[]> openStore(StoreDefinition storeDef) {
logger.info("Opening store '" + storeDef.getName() + "' (" + storeDef.getType() + ").");
StorageConfiguration config = storageConfigs.get(storeDef.getType());
if (config == null)
throw new ConfigurationException("Attempt to open store " + storeDef.getName() + " but " + storeDef.getType() + " storage engine has not been enabled.");
boolean isReadOnly = storeDef.getType().compareTo(ReadOnlyStorageConfiguration.TYPE_NAME) == 0;
final RoutingStrategy routingStrategy = new RoutingStrategyFactory().updateRoutingStrategy(storeDef, metadata.getCluster());
final StorageEngine<ByteArray, byte[], byte[]> engine = config.getStore(storeDef, routingStrategy);
// Update the routing strategy + add listener to metadata
if (storeDef.getType().compareTo(ReadOnlyStorageConfiguration.TYPE_NAME) == 0) {
metadata.addMetadataStoreListener(storeDef.getName(), new MetadataStoreListener() {
public void updateRoutingStrategy(RoutingStrategy updatedRoutingStrategy) {
((ReadOnlyStorageEngine) engine).setRoutingStrategy(updatedRoutingStrategy);
}
public void updateStoreDefinition(StoreDefinition storeDef) {
return;
}
});
}
// openStore() should have atomic semantics
try {
registerEngine(engine, isReadOnly, storeDef.getType(), storeDef);
if (voldemortConfig.isServerRoutingEnabled())
registerNodeStores(storeDef, metadata.getCluster(), voldemortConfig.getNodeId());
if (storeDef.hasRetentionPeriod())
scheduleCleanupJob(storeDef, engine);
} catch (Exception e) {
removeEngine(engine, isReadOnly, storeDef.getType(), false);
throw new VoldemortException(e);
}
return engine;
}
use of voldemort.routing.RoutingStrategyFactory in project voldemort by voldemort.
the class AbstractNonZonedRebalanceTest method populateData.
protected void populateData(Cluster cluster, StoreDefinition storeDef, AdminClient adminClient, boolean isReadOnly) throws Exception {
// Populate Read write stores
if (!isReadOnly) {
// Create SocketStores for each Node first
Map<Integer, Store<ByteArray, byte[], byte[]>> storeMap = new HashMap<Integer, Store<ByteArray, byte[], byte[]>>();
for (Node node : cluster.getNodes()) {
storeMap.put(node.getId(), getSocketStore(storeDef.getName(), node.getHost(), node.getSocketPort()));
}
BaseStoreRoutingPlan storeInstance = new BaseStoreRoutingPlan(cluster, storeDef);
for (Entry<String, String> entry : testEntries.entrySet()) {
ByteArray keyBytes = new ByteArray(ByteUtils.getBytes(entry.getKey(), "UTF-8"));
List<Integer> preferenceNodes = storeInstance.getReplicationNodeList(keyBytes.get());
// Go over every node
for (int nodeId : preferenceNodes) {
try {
storeMap.get(nodeId).put(keyBytes, new Versioned<byte[]>(ByteUtils.getBytes(entry.getValue(), "UTF-8")), null);
} catch (ObsoleteVersionException e) {
logger.info("Why are we seeing this at all here ?? ");
e.printStackTrace();
}
}
}
// close all socket stores
for (Store<ByteArray, byte[], byte[]> store : storeMap.values()) {
store.close();
}
} else {
// Populate Read only stores
File baseDir = TestUtils.createTempDir();
JsonReader reader = ReadOnlyStorageEngineTestInstance.makeTestDataReader(testEntries, baseDir);
RoutingStrategy router = new RoutingStrategyFactory().updateRoutingStrategy(storeDef, cluster);
File outputDir = TestUtils.createTempDir(baseDir);
JsonStoreBuilder storeBuilder = new JsonStoreBuilder(reader, cluster, storeDef, router, outputDir, null, testEntries.size() / 5, 1, NUM_RO_CHUNKS_PER_BUCKET, 10000, false);
storeBuilder.build(ReadOnlyStorageFormat.READONLY_V2);
AdminStoreSwapper swapper = new AdminStoreSwapper(Executors.newFixedThreadPool(cluster.getNumberOfNodes()), adminClient, 100000);
swapper.fetchAndSwapStoreData(testStoreNameRO, outputDir.getAbsolutePath(), 1L);
}
}
use of voldemort.routing.RoutingStrategyFactory in project voldemort by voldemort.
the class ROTestUtils method getNodeIdToAllPartitions.
/**
* For a particular cluster creates a mapping of node id to their
* corresponding list of [ replicaType, partition ] tuple
*
* @param cluster The cluster metadata
* @param storeDef The store definition
* @param includePrimary Include the primary partition?
* @return Map of node id to set of [ replicaType, partition ] tuple
*/
public static Map<Integer, Set<Pair<Integer, Integer>>> getNodeIdToAllPartitions(final Cluster cluster, final StoreDefinition storeDef, boolean includePrimary) {
final RoutingStrategy routingStrategy = new RoutingStrategyFactory().updateRoutingStrategy(storeDef, cluster);
final Map<Integer, Set<Pair<Integer, Integer>>> nodeIdToReplicas = new HashMap<Integer, Set<Pair<Integer, Integer>>>();
final Map<Integer, Integer> partitionToNodeIdMap = cluster.getPartitionIdToNodeIdMap();
// Map initialization.
for (Node node : cluster.getNodes()) {
nodeIdToReplicas.put(node.getId(), new HashSet<Pair<Integer, Integer>>());
}
// Track how many zones actually have partitions (and so replica types)
// in them.
int zonesWithPartitions = 0;
for (Integer zoneId : cluster.getZoneIds()) {
if (cluster.getNumberOfPartitionsInZone(zoneId) > 0) {
zonesWithPartitions++;
}
}
// Loops through all nodes
for (Node node : cluster.getNodes()) {
// Gets the partitions that this node was configured with.
for (Integer primary : node.getPartitionIds()) {
// Gets the list of replicating partitions.
List<Integer> replicaPartitionList = routingStrategy.getReplicatingPartitionList(primary);
if ((replicaPartitionList.size() % zonesWithPartitions != 0) || ((replicaPartitionList.size() / zonesWithPartitions) != (storeDef.getReplicationFactor() / cluster.getNumberOfZones()))) {
// For zone expansion & shrinking, this warning is expected
// in some cases. For other use cases (shuffling, cluster
// expansion), this warning indicates that something
// is wrong between the clusters and store defs.
logger.warn("Number of replicas returned (" + replicaPartitionList.size() + ") does not make sense given the replication factor (" + storeDef.getReplicationFactor() + ") and that there are " + cluster.getNumberOfZones() + " zones of which " + zonesWithPartitions + " have partitions (and of which " + (cluster.getNumberOfZones() - zonesWithPartitions) + " are empty).");
}
int replicaType = 0;
if (!includePrimary) {
replicaPartitionList.remove(primary);
replicaType = 1;
}
// Get the node that this replicating partition belongs to.
for (Integer replicaPartition : replicaPartitionList) {
Integer replicaNodeId = partitionToNodeIdMap.get(replicaPartition);
// The replicating node will have a copy of primary.
nodeIdToReplicas.get(replicaNodeId).add(Pair.create(replicaType, primary));
replicaType++;
}
}
}
return nodeIdToReplicas;
}
Aggregations