use of voldemort.xml.ClusterMapper in project voldemort by voldemort.
the class RebalanceTaskInfo method create.
// TODO : Need to think through which of these "SerDe" methods are needed for on-the-wire and
// which are needed for on-disk. Neither seems like standard "SerDe". And then come up with a
// standard way to perform Serialization/De-serialization
public static RebalanceTaskInfo create(Map<?, ?> map) {
int stealerId = (Integer) map.get("stealerId");
int donorId = (Integer) map.get("donorId");
List<String> unbalancedStoreList = Utils.uncheckedCast(map.get("unbalancedStores"));
Cluster initialCluster = new ClusterMapper().readCluster(new StringReader((String) map.get("initialCluster")));
HashMap<String, List<Integer>> storeToPartitionIds = Maps.newHashMap();
for (String unbalancedStore : unbalancedStoreList) {
List<Integer> partitionList = Utils.uncheckedCast(map.get(unbalancedStore + "partitionList"));
if (partitionList.size() > 0)
storeToPartitionIds.put(unbalancedStore, partitionList);
}
return new RebalanceTaskInfo(stealerId, donorId, storeToPartitionIds, initialCluster);
}
use of voldemort.xml.ClusterMapper in project voldemort by voldemort.
the class AdminServiceRequestHandler method handleFetchAndUpdate.
public VAdminProto.AsyncOperationStatusResponse handleFetchAndUpdate(VAdminProto.InitiateFetchAndUpdateRequest request) {
final int nodeId = request.getNodeId();
final List<Integer> partitionIds = request.getPartitionIdsList();
final VoldemortFilter filter = request.hasFilter() ? getFilterFromRequest(request.getFilter(), voldemortConfig, networkClassLoader) : new DefaultVoldemortFilter();
final String storeName = request.getStore();
final Cluster initialCluster = request.hasInitialCluster() ? new ClusterMapper().readCluster(new StringReader(request.getInitialCluster())) : null;
int requestId = asyncService.getUniqueRequestId();
VAdminProto.AsyncOperationStatusResponse.Builder response = VAdminProto.AsyncOperationStatusResponse.newBuilder().setRequestId(requestId).setComplete(false).setDescription("Fetch and update").setStatus("Started");
final StoreDefinition storeDef = metadataStore.getStoreDef(storeName);
final boolean isReadOnlyStore = storeDef.getType().compareTo(ReadOnlyStorageConfiguration.TYPE_NAME) == 0;
final StreamingStats streamingStats = voldemortConfig.isJmxEnabled() ? storeRepository.getStreamingStats(storeName) : null;
try {
asyncService.submitOperation(requestId, new AsyncOperation(requestId, "Fetch and Update") {
private final AtomicBoolean running = new AtomicBoolean(true);
@Override
public void stop() {
running.set(false);
logger.info("Stopping fetch and update for store " + storeName + " from node " + nodeId + "( " + partitionIds + " )");
}
@Override
public void operate() {
AdminClient adminClient = AdminClient.createTempAdminClient(voldemortConfig, metadataStore.getCluster(), voldemortConfig.getClientMaxConnectionsPerNode());
try {
StorageEngine<ByteArray, byte[], byte[]> storageEngine = getStorageEngine(storeRepository, storeName);
EventThrottler throttler = new EventThrottler(voldemortConfig.getStreamMaxWriteBytesPerSec());
if (isReadOnlyStore) {
ReadOnlyStorageEngine readOnlyStorageEngine = ((ReadOnlyStorageEngine) storageEngine);
String destinationDir = readOnlyStorageEngine.getCurrentDirPath();
logger.info("Fetching files for RO store '" + storeName + "' from node " + nodeId + " ( " + partitionIds + " )");
updateStatus("Fetching files for RO store '" + storeName + "' from node " + nodeId + " ( " + partitionIds + " )");
adminClient.readonlyOps.fetchPartitionFiles(nodeId, storeName, partitionIds, destinationDir, readOnlyStorageEngine.getChunkedFileSet().getChunkIdToNumChunks().keySet(), running);
} else {
logger.info("Fetching entries for RW store '" + storeName + "' from node " + nodeId + " ( " + partitionIds + " )");
updateStatus("Fetching entries for RW store '" + storeName + "' from node " + nodeId + " ( " + partitionIds + " ) ");
if (partitionIds.size() > 0) {
Iterator<Pair<ByteArray, Versioned<byte[]>>> entriesIterator = adminClient.bulkFetchOps.fetchEntries(nodeId, storeName, partitionIds, filter, false, initialCluster, 0);
long numTuples = 0;
long startTime = System.currentTimeMillis();
long startNs = System.nanoTime();
while (running.get() && entriesIterator.hasNext()) {
Pair<ByteArray, Versioned<byte[]>> entry = entriesIterator.next();
if (streamingStats != null) {
streamingStats.reportNetworkTime(Operation.UPDATE_ENTRIES, Utils.elapsedTimeNs(startNs, System.nanoTime()));
}
ByteArray key = entry.getFirst();
Versioned<byte[]> value = entry.getSecond();
startNs = System.nanoTime();
try {
/**
* TODO This also needs to be fixed to
* use the atomic multi version puts
*/
storageEngine.put(key, value, null);
} catch (ObsoleteVersionException e) {
// log and ignore
if (logger.isDebugEnabled()) {
logger.debug("Fetch and update threw Obsolete version exception. Ignoring");
}
} finally {
if (streamingStats != null) {
streamingStats.reportStreamingPut(Operation.UPDATE_ENTRIES);
streamingStats.reportStorageTime(Operation.UPDATE_ENTRIES, Utils.elapsedTimeNs(startNs, System.nanoTime()));
}
}
long totalTime = (System.currentTimeMillis() - startTime) / 1000;
throttler.maybeThrottle(key.length() + valueSize(value));
if ((numTuples % 100000) == 0 && numTuples > 0) {
logger.info(numTuples + " entries copied from node " + nodeId + " for store '" + storeName + "' in " + totalTime + " seconds");
updateStatus(numTuples + " entries copied from node " + nodeId + " for store '" + storeName + "' in " + totalTime + " seconds");
}
numTuples++;
}
long totalTime = (System.currentTimeMillis() - startTime) / 1000;
if (running.get()) {
logger.info("Completed fetching " + numTuples + " entries from node " + nodeId + " for store '" + storeName + "' in " + totalTime + " seconds");
} else {
logger.info("Fetch and update stopped after fetching " + numTuples + " entries for node " + nodeId + " for store '" + storeName + "' in " + totalTime + " seconds");
}
} else {
logger.info("No entries to fetch from node " + nodeId + " for store '" + storeName + "'");
}
}
} finally {
adminClient.close();
}
}
});
} catch (VoldemortException e) {
response.setError(ProtoUtils.encodeError(errorCodeMapper, e));
logger.error("handleFetchAndUpdate failed for request(" + request.toString() + ")", e);
}
return response.build();
}
use of voldemort.xml.ClusterMapper in project voldemort by voldemort.
the class AdminServiceRequestHandler method handleDeletePartitionEntries.
// TODO : Add ability to use partition scans
public VAdminProto.DeletePartitionEntriesResponse handleDeletePartitionEntries(VAdminProto.DeletePartitionEntriesRequest request) {
VAdminProto.DeletePartitionEntriesResponse.Builder response = VAdminProto.DeletePartitionEntriesResponse.newBuilder();
ClosableIterator<Pair<ByteArray, Versioned<byte[]>>> iterator = null;
try {
String storeName = request.getStore();
final List<Integer> partitionsIds = request.getPartitionIdsList();
final boolean isReadWriteStore = metadataStore.getStoreDef(storeName).getType().compareTo(ReadOnlyStorageConfiguration.TYPE_NAME) != 0;
if (!isReadWriteStore) {
throw new VoldemortException("Cannot delete partitions for store " + storeName + " on node " + metadataStore.getNodeId() + " since it is not a RW store");
}
StorageEngine<ByteArray, byte[], byte[]> storageEngine = getStorageEngine(storeRepository, storeName);
VoldemortFilter filter = (request.hasFilter()) ? getFilterFromRequest(request.getFilter(), voldemortConfig, networkClassLoader) : new DefaultVoldemortFilter();
EventThrottler throttler = new EventThrottler(voldemortConfig.getStreamMaxReadBytesPerSec());
iterator = storageEngine.entries();
long deleteSuccess = 0;
logger.info("Deleting entries for RW store " + storeName + " from node " + metadataStore.getNodeId() + " ( " + storeName + " )");
while (iterator.hasNext()) {
Pair<ByteArray, Versioned<byte[]>> entry = iterator.next();
ByteArray key = entry.getFirst();
Versioned<byte[]> value = entry.getSecond();
throttler.maybeThrottle(key.length() + valueSize(value));
if (StoreRoutingPlan.checkKeyBelongsToNode(key.get(), metadataStore.getNodeId(), request.hasInitialCluster() ? new ClusterMapper().readCluster(new StringReader(request.getInitialCluster())) : metadataStore.getCluster(), metadataStore.getStoreDef(storeName)) && filter.accept(key, value)) {
if (storageEngine.delete(key, value.getVersion())) {
deleteSuccess++;
if ((deleteSuccess % 10000) == 0) {
logger.info(deleteSuccess + " entries deleted from node " + metadataStore.getNodeId() + " for store " + storeName);
}
}
}
}
logger.info("Completed deletion of entries for RW store " + storeName + " from node " + metadataStore.getNodeId() + " ( " + partitionsIds + " )");
response.setCount(deleteSuccess);
} catch (VoldemortException e) {
response.setError(ProtoUtils.encodeError(errorCodeMapper, e));
logger.error("handleDeletePartitionEntries failed for request(" + request.toString() + ")", e);
} finally {
if (null != iterator)
iterator.close();
}
return response.build();
}
use of voldemort.xml.ClusterMapper in project voldemort by voldemort.
the class ReplaceNodeCLI method updateClusterXML.
private String updateClusterXML() {
Node nodeToAdd = newCluster.getNodeById(newNodeId);
List<Node> nodes = new ArrayList<Node>(cluster.getNodes());
List<Zone> zones = new ArrayList<Zone>(cluster.getZones());
Node nodeToRemove = cluster.getNodeById(nodeId);
Node newNode = new Node(nodeId, nodeToAdd.getHost(), nodeToAdd.getHttpPort(), nodeToAdd.getSocketPort(), nodeToAdd.getAdminPort(), nodeToRemove.getZoneId(), nodeToRemove.getPartitionIds(), nodeToAdd.getRestPort());
boolean isInserted = false;
for (int i = 0; i < nodes.size(); i++) {
if (nodes.get(i).getId() == nodeId) {
nodes.remove(i);
nodes.add(i, newNode);
isInserted = true;
break;
}
}
if (isInserted == false) {
logger.error("Unable to insert the new node, something odd happened");
throw new VoldemortApplicationException("Unable to insert the new node, something odd happened");
}
Cluster updatedCluster = new Cluster(cluster.getName(), nodes, zones);
return new ClusterMapper().writeCluster(updatedCluster);
}
use of voldemort.xml.ClusterMapper in project voldemort by voldemort.
the class ZoneClipperCLI method main.
public static void main(String[] args) throws Exception {
setupParser();
OptionSet options = getValidOptions(args);
int dropZoneId = CmdUtils.valueOf(options, "drop-zoneid", Zone.UNSET_ZONE_ID);
String outputDir = null;
if (options.has("output-dir")) {
outputDir = (String) options.valueOf("output-dir");
}
/*
* A. Generate the clipped cluster.xml
*/
String initialClusterXML = (String) options.valueOf("current-cluster");
Cluster initialCluster = new ClusterMapper().readCluster(new File(initialClusterXML));
// Create a list of current partition ids. We will use this set to
// compare partitions ids in final cluster
Set<Integer> originalPartitions = new HashSet<Integer>();
for (Integer zoneId : initialCluster.getZoneIds()) {
originalPartitions.addAll(initialCluster.getPartitionIdsInZone(zoneId));
}
// Get an intermediate cluster where partitions that belong to the zone
// that is being dropped have been moved to the existing zones
Cluster intermediateCluster = RebalanceUtils.vacateZone(initialCluster, dropZoneId);
Cluster finalCluster = RebalanceUtils.dropZone(intermediateCluster, dropZoneId);
// Make sure everything is fine
if (initialCluster.getNumberOfPartitions() != finalCluster.getNumberOfPartitions()) {
logger.error("The number of partitions in the initial and the final cluster is not equal \n");
}
Set<Integer> finalPartitions = new HashSet<Integer>();
for (Integer zoneId : finalCluster.getZoneIds()) {
finalPartitions.addAll(finalCluster.getPartitionIdsInZone(zoneId));
}
// Compare to original partition ids list
if (!originalPartitions.equals(finalPartitions)) {
logger.error("The list of partition ids in the initial and the final cluster doesn't match \n ");
}
// Finally write the final cluster to a xml file
RebalanceUtils.dumpClusterToFile(outputDir, RebalanceUtils.finalClusterFileName, finalCluster);
/*
* B. Generate the clipped stores.xml
*/
logger.info("Generating the adjusted stores.xml..");
String initialStoresXML = (String) options.valueOf("current-stores");
List<StoreDefinition> initialStoreDefs = new StoreDefinitionsMapper().readStoreList(new File(initialStoresXML));
List<StoreDefinition> finalStoreDefs = RebalanceUtils.dropZone(initialStoreDefs, dropZoneId);
RebalanceUtils.dumpStoreDefsToFile(outputDir, RebalanceUtils.finalStoresFileName, finalStoreDefs);
}
Aggregations