use of voldemort.versioning.Versioned in project voldemort by voldemort.
the class PerformParallelGetAllRequests method execute.
@SuppressWarnings("unchecked")
public void execute(final Pipeline pipeline) {
int attempts = pipelineData.getNodeToKeysMap().size();
final Map<Integer, Response<Iterable<ByteArray>, Object>> responses = new ConcurrentHashMap<Integer, Response<Iterable<ByteArray>, Object>>();
final CountDownLatch latch = new CountDownLatch(attempts);
if (logger.isTraceEnabled())
logger.trace("Attempting " + attempts + " " + pipeline.getOperation().getSimpleName() + " operations in parallel");
Map<ByteArray, byte[]> transforms = pipelineData.getTransforms();
final AtomicBoolean isResponseProcessed = new AtomicBoolean(false);
for (Map.Entry<Node, List<ByteArray>> entry : pipelineData.getNodeToKeysMap().entrySet()) {
final Node node = entry.getKey();
final Collection<ByteArray> keys = entry.getValue();
NonblockingStoreCallback callback = new NonblockingStoreCallback() {
public void requestComplete(Object result, long requestTime) {
if (logger.isTraceEnabled())
logger.trace(pipeline.getOperation().getSimpleName() + " response received (" + requestTime + " ms.) from node " + node.getId());
Response<Iterable<ByteArray>, Object> response = new Response<Iterable<ByteArray>, Object>(node, keys, result, requestTime);
responses.put(node.getId(), response);
latch.countDown();
// This reduces the window where an exception is lost
if (isResponseProcessed.get() && response.getValue() instanceof Exception)
if (response.getValue() instanceof InvalidMetadataException) {
pipelineData.reportException((InvalidMetadataException) response.getValue());
logger.warn("Received invalid metadata problem after a successful " + pipeline.getOperation().getSimpleName() + " call on node " + node.getId() + ", store '" + pipelineData.getStoreName() + "'");
} else {
handleResponseError(response, pipeline, failureDetector);
}
}
};
if (logger.isTraceEnabled())
logger.trace("Submitting " + pipeline.getOperation().getSimpleName() + " request on node " + node.getId());
NonblockingStore store = nonblockingStores.get(node.getId());
store.submitGetAllRequest(keys, transforms, callback, timeoutMs);
}
try {
latch.await(timeoutMs, TimeUnit.MILLISECONDS);
} catch (InterruptedException e) {
if (logger.isEnabledFor(Level.WARN))
logger.warn(e, e);
}
for (Response<Iterable<ByteArray>, Object> response : responses.values()) {
if (response.getValue() instanceof Exception) {
if (handleResponseError(response, pipeline, failureDetector))
return;
} else {
Map<ByteArray, List<Versioned<byte[]>>> values = (Map<ByteArray, List<Versioned<byte[]>>>) response.getValue();
for (ByteArray key : response.getKey()) {
MutableInt successCount = pipelineData.getSuccessCount(key);
successCount.increment();
List<Versioned<byte[]>> retrieved = values.get(key);
/*
* retrieved can be null if there are no values for the key
* provided
*/
if (retrieved != null) {
List<Versioned<byte[]>> existing = pipelineData.getResult().get(key);
if (existing == null)
pipelineData.getResult().put(key, Lists.newArrayList(retrieved));
else
existing.addAll(retrieved);
}
HashSet<Integer> zoneResponses = null;
if (pipelineData.getKeyToZoneResponse().containsKey(key)) {
zoneResponses = pipelineData.getKeyToZoneResponse().get(key);
} else {
zoneResponses = new HashSet<Integer>();
pipelineData.getKeyToZoneResponse().put(key, zoneResponses);
}
zoneResponses.add(response.getNode().getZoneId());
}
pipelineData.getResponses().add(new Response<Iterable<ByteArray>, Map<ByteArray, List<Versioned<byte[]>>>>(response.getNode(), response.getKey(), values, response.getRequestTime()));
failureDetector.recordSuccess(response.getNode(), response.getRequestTime());
}
}
isResponseProcessed.set(true);
pipeline.addEvent(completeEvent);
}
use of voldemort.versioning.Versioned in project voldemort by voldemort.
the class PerformSerialPutRequests method execute.
public void execute(Pipeline pipeline) {
int currentNode = 0;
List<Node> nodes = pipelineData.getNodes();
long startMasterMs = -1;
long startMasterNs = -1;
if (logger.isDebugEnabled()) {
startMasterMs = System.currentTimeMillis();
startMasterNs = System.nanoTime();
}
if (logger.isDebugEnabled())
logger.debug("Performing serial put requests to determine master");
Node node = null;
for (; currentNode < nodes.size(); currentNode++) {
node = nodes.get(currentNode);
pipelineData.incrementNodeIndex();
VectorClock versionedClock = (VectorClock) versioned.getVersion();
final Versioned<byte[]> versionedCopy = new Versioned<byte[]>(versioned.getValue(), versionedClock.incremented(node.getId(), time.getMilliseconds()));
if (logger.isDebugEnabled())
logger.debug("Attempt #" + (currentNode + 1) + " to perform put (node " + node.getId() + ")");
long start = System.nanoTime();
try {
stores.get(node.getId()).put(key, versionedCopy, transforms);
long requestTime = (System.nanoTime() - start) / Time.NS_PER_MS;
pipelineData.incrementSuccesses();
failureDetector.recordSuccess(node, requestTime);
if (logger.isDebugEnabled())
logger.debug("Put on node " + node.getId() + " succeeded, using as master");
pipelineData.setMaster(node);
pipelineData.setVersionedCopy(versionedCopy);
pipelineData.getZoneResponses().add(node.getZoneId());
currentNode++;
break;
} catch (Exception e) {
long requestTime = (System.nanoTime() - start) / Time.NS_PER_MS;
if (logger.isDebugEnabled())
logger.debug("Master PUT at node " + currentNode + "(" + node.getHost() + ")" + " failed (" + e.getMessage() + ") in " + (System.nanoTime() - start) + " ns" + " (keyRef: " + System.identityHashCode(key) + ")");
if (PipelineRoutedStore.isSlopableFailure(e)) {
pipelineData.getSynchronizer().tryDelegateSlop(node);
}
if (handleResponseError(e, node, requestTime, pipeline, failureDetector))
return;
}
}
if (logger.isTraceEnabled()) {
logger.trace("PUT {key:" + key + "} currentNode=" + currentNode + " nodes.size()=" + nodes.size());
}
if (pipelineData.getSuccesses() < 1) {
List<Exception> failures = pipelineData.getFailures();
pipelineData.setFatalError(new InsufficientOperationalNodesException("No master node succeeded!", failures.size() > 0 ? failures.get(0) : null));
pipeline.abort();
return;
}
// There aren't any more requests to make...
if (currentNode == nodes.size()) {
if (pipelineData.getSuccesses() < required) {
pipelineData.setFatalError(new InsufficientOperationalNodesException(required + " " + pipeline.getOperation().getSimpleName() + "s required, but only " + pipelineData.getSuccesses() + " succeeded", pipelineData.getReplicationSet(), pipelineData.getNodes(), pipelineData.getFailedNodes(), pipelineData.getFailures()));
pipeline.abort();
} else {
if (pipelineData.getZonesRequired() != null) {
int zonesSatisfied = pipelineData.getZoneResponses().size();
if (zonesSatisfied >= (pipelineData.getZonesRequired() + 1)) {
pipeline.addEvent(completeEvent);
} else {
pipelineData.setFatalError(new InsufficientZoneResponsesException((pipelineData.getZonesRequired() + 1) + " " + pipeline.getOperation().getSimpleName() + "s required zone, but only " + zonesSatisfied + " succeeded"));
pipeline.abort();
}
} else {
if (logger.isDebugEnabled())
logger.debug("Finished master PUT for key " + ByteUtils.toHexString(key.get()) + " (keyRef: " + System.identityHashCode(key) + "); started at " + startMasterMs + " took " + (System.nanoTime() - startMasterNs) + " ns on node " + (node == null ? "NULL" : node.getId()) + "(" + (node == null ? "NULL" : node.getHost()) + "); now complete");
pipeline.addEvent(completeEvent);
}
}
} else {
if (logger.isDebugEnabled())
logger.debug("Finished master PUT for key " + ByteUtils.toHexString(key.get()) + " (keyRef: " + System.identityHashCode(key) + "); started at " + startMasterMs + " took " + (System.nanoTime() - startMasterNs) + " ns on node " + (node == null ? "NULL" : node.getId()) + "(" + (node == null ? "NULL" : node.getHost()) + ")");
pipeline.addEvent(masterDeterminedEvent);
}
}
use of voldemort.versioning.Versioned in project voldemort by voldemort.
the class ExportBDBToTextDump method main.
public static void main(String[] argv) throws Exception {
OptionParser parser = getParser();
OptionSet options = parser.parse(argv);
validateOptions(options);
// bdb_folder output_folder
String storeBdbFolderPath = (String) options.valueOf("bdb");
String outputFolderPath = (String) options.valueOf("output");
File storeBdbFolder = new File(storeBdbFolderPath);
File outputFolder = new File(outputFolderPath);
final String storeName = storeBdbFolder.getName();
Properties properties = new Properties();
properties.put("node.id", "0");
properties.put("voldemort.home", storeBdbFolder.getParent());
VoldemortConfig voldemortConfig = new VoldemortConfig(properties);
voldemortConfig.setBdbDataDirectory(storeBdbFolder.getParent());
voldemortConfig.setEnableJmx(false);
voldemortConfig.setBdbOneEnvPerStore(true);
BdbStorageConfiguration bdbConfiguration = new BdbStorageConfiguration(voldemortConfig);
class MockStoreDefinition extends StoreDefinition {
public MockStoreDefinition() {
super(storeName, null, null, null, null, null, null, null, 0, null, 0, null, 0, null, null, null, null, null, null, null, null, null, null, null, null, 0);
}
@Override
public boolean hasMemoryFootprint() {
return false;
}
}
StoreDefinition storeDef = new MockStoreDefinition();
StorageEngine<ByteArray, byte[], byte[]> engine = bdbConfiguration.getStore(storeDef, null);
long reportIntervalMs = 10000L;
long lastCount = 0;
Reporter<Boolean> rp = new Reporter<Boolean>(reportIntervalMs);
long count = 0;
BufferedWriter splitFileWriter = null;
ClosableIterator<Pair<ByteArray, Versioned<byte[]>>> entries = engine.entries();
while (entries.hasNext()) {
if (splitFileWriter == null) {
long splitId = count / SPLIT_SIZE;
File splitFile = new File(outputFolder, makeSplitFileName(splitId));
splitFileWriter = new BufferedWriter(new FileWriter(splitFile), WRITER_BUFFER_SIZE);
}
Pair<ByteArray, Versioned<byte[]>> pair = entries.next();
String line = makeLine(pair);
splitFileWriter.write(line);
if ((count + 1) % SPLIT_SIZE == 0) {
splitFileWriter.close();
splitFileWriter = null;
}
count++;
final Long countObject = count;
Boolean reported = rp.tryReport(new Callable<Boolean>() {
@Override
public Boolean call() throws Exception {
System.out.print(String.format("Exported %15d entries", countObject));
return true;
}
});
if (reported != null) {
System.out.println(String.format("; Speed: %8d/s", (count - lastCount) / (reportIntervalMs / 1000)));
lastCount = count;
}
}
entries.close();
if (splitFileWriter != null) {
splitFileWriter.close();
}
System.out.println(String.format("Finished exporting %d entries", count));
}
use of voldemort.versioning.Versioned in project voldemort by voldemort.
the class ImportTextDumpToBDB method main.
public static void main(String[] argv) throws Exception {
OptionParser parser = getParser();
OptionSet options = parser.parse(argv);
validateOptions(options);
String inputPath = (String) options.valueOf("input");
String storeBdbFolderPath = (String) options.valueOf("bdb");
String clusterXmlPath = (String) options.valueOf("cluster-xml");
String storesXmlPath = (String) options.valueOf("stores-xml");
Integer nodeId = (Integer) options.valueOf("node-id");
File input = new File(inputPath);
List<File> dataFiles = new ArrayList<File>();
if (input.isDirectory()) {
File[] files = input.listFiles();
if (files != null)
Collections.addAll(dataFiles, files);
} else if (input.isFile()) {
dataFiles.add(input);
} else {
System.err.println(inputPath + "is not file or directory");
}
File storeBdbFolder = new File(storeBdbFolderPath);
final String storeName = storeBdbFolder.getName();
Cluster cluster = new ClusterMapper().readCluster(new File(clusterXmlPath));
List<StoreDefinition> storeDefs = new StoreDefinitionsMapper().readStoreList(new File(storesXmlPath));
StoreDefinition storeDef = null;
for (StoreDefinition sd : storeDefs) {
if (sd.getName() != null && sd.getName().equals(storeName)) {
storeDef = sd;
}
}
if (storeDef == null) {
throw new VoldemortException("StoreNotfound: " + storeName);
}
RoutingStrategy routingStrategy = new RoutingStrategyFactory().updateRoutingStrategy(storeDef, cluster);
Properties properties = new Properties();
properties.put("node.id", "0");
properties.put("voldemort.home", storeBdbFolder.getParent());
VoldemortConfig voldemortConfig = new VoldemortConfig(properties);
voldemortConfig.setBdbDataDirectory(storeBdbFolder.getParent());
voldemortConfig.setEnableJmx(false);
voldemortConfig.setBdbOneEnvPerStore(true);
BdbStorageConfiguration bdbConfiguration = new BdbStorageConfiguration(voldemortConfig);
class MockStoreDefinition extends StoreDefinition {
public MockStoreDefinition() {
super(storeName, null, null, null, null, null, null, null, 0, null, 0, null, 0, null, null, null, null, null, null, null, null, null, null, null, null, 0);
}
@Override
public boolean hasMemoryFootprint() {
return false;
}
}
StoreDefinition mockStoreDef = new MockStoreDefinition();
StorageEngine<ByteArray, byte[], byte[]> engine = bdbConfiguration.getStore(mockStoreDef, routingStrategy);
long reportIntervalMs = 10000L;
long lastCount = 0;
long lastInserted = 0;
Reporter<Boolean> rp = new Reporter<Boolean>(reportIntervalMs);
long count = 0;
long inserted = 0;
for (File f : dataFiles) {
try {
BufferedReader bufferedReader = new BufferedReader(new FileReader(f), READER_BUFFER_SIZE);
engine.beginBatchModifications();
while (true) {
String line = bufferedReader.readLine();
if (line == null) {
break;
}
Pair<ByteArray, Versioned<byte[]>> entry;
try {
entry = lineToEntry(line);
} catch (Exception e) {
System.err.println("Skipping line: " + line);
e.printStackTrace();
continue;
}
ByteArray key = entry.getFirst();
List<Node> nodeList = routingStrategy.routeRequest(key.get());
for (Node node : nodeList) {
if (nodeId == node.getId()) {
try {
engine.put(key, entry.getSecond(), null);
inserted++;
} catch (ObsoleteVersionException e) {
e.printStackTrace();
}
break;
}
}
count++;
final Long countObject = count;
final Long insertedObject = inserted;
Boolean reported = rp.tryReport(new Callable<Boolean>() {
@Override
public Boolean call() throws Exception {
System.out.print(String.format("Imported %15d entries; Inserted %15d entries", countObject, insertedObject));
return true;
}
});
if (reported != null) {
long importSpeed = (count - lastCount) / (reportIntervalMs / 1000);
long insertSpeed = (inserted - lastInserted) / (reportIntervalMs / 1000);
System.out.println(String.format("; ImportSpeed: %8d/s; InsertSpeed: %8d/s ", importSpeed, insertSpeed));
lastCount = count;
lastInserted = inserted;
}
}
bufferedReader.close();
} catch (IOException e) {
e.printStackTrace();
} finally {
engine.endBatchModifications();
}
}
engine.close();
System.out.println(String.format("Finished importing %d entries (%d inserted, rest discarded)", count, inserted));
}
use of voldemort.versioning.Versioned in project voldemort by voldemort.
the class StreamingSlopPusherTest method testNormalPushBothWays.
@Test
public void testNormalPushBothWays() throws InterruptedException, IOException {
startServers(0, 1);
// Get both slop stores
StorageEngine<ByteArray, Slop, byte[]> slopStoreNode0 = getVoldemortServer(0).getStoreRepository().getSlopStore().asSlopStore();
StorageEngine<ByteArray, Slop, byte[]> slopStoreNode1 = getVoldemortServer(1).getStoreRepository().getSlopStore().asSlopStore();
// Generate slops for 0
final List<Versioned<Slop>> entrySetNode0 = ServerTestUtils.createRandomSlops(1, 100, "test-readrepair-memory", "test-consistent", "test-consistent-with-pref-list");
final List<Versioned<Slop>> entrySetNode1 = ServerTestUtils.createRandomSlops(0, 100, "test-replication-memory", "users", "test-replication-persistent");
// Populated the slop stores
populateSlops(0, slopStoreNode0, entrySetNode0);
populateSlops(1, slopStoreNode1, entrySetNode1);
StreamingSlopPusherJob pusher0 = new StreamingSlopPusherJob(getVoldemortServer(0).getStoreRepository(), getVoldemortServer(0).getMetadataStore(), new BannagePeriodFailureDetector(new FailureDetectorConfig().setCluster(cluster).setConnectionVerifier(new ServerStoreConnectionVerifier(socketStoreFactory, metadataStore, configs[0]))), configs[0], new ScanPermitWrapper(1)), pusher1 = new StreamingSlopPusherJob(getVoldemortServer(1).getStoreRepository(), getVoldemortServer(1).getMetadataStore(), new BannagePeriodFailureDetector(new FailureDetectorConfig().setCluster(cluster).setConnectionVerifier(new ServerStoreConnectionVerifier(socketStoreFactory, metadataStore, configs[1]))), configs[1], new ScanPermitWrapper(1));
pusher0.run();
pusher1.run();
// Give some time for the slops to go over
Thread.sleep(2000);
// Now check if the slops worked
Iterator<Versioned<Slop>> entryIterator0 = entrySetNode0.listIterator();
while (entryIterator0.hasNext()) {
Versioned<Slop> versionedSlop = entryIterator0.next();
Slop nextSlop = versionedSlop.getValue();
StorageEngine<ByteArray, byte[], byte[]> store = getVoldemortServer(1).getStoreRepository().getStorageEngine(nextSlop.getStoreName());
if (nextSlop.getOperation().equals(Slop.Operation.PUT)) {
assertNotSame("entry should be present at store", 0, store.get(nextSlop.getKey(), null).size());
assertEquals("entry value should match", new String(nextSlop.getValue()), new String(store.get(nextSlop.getKey(), null).get(0).getValue()));
} else if (nextSlop.getOperation().equals(Slop.Operation.DELETE)) {
assertEquals("entry value should match", 0, store.get(nextSlop.getKey(), null).size());
}
// did it get deleted correctly
assertEquals("slop should have gone", 0, slopStoreNode0.get(nextSlop.makeKey(), null).size());
}
Iterator<Versioned<Slop>> entryIterator1 = entrySetNode1.listIterator();
while (entryIterator1.hasNext()) {
Versioned<Slop> versionedSlop = entryIterator1.next();
Slop nextSlop = versionedSlop.getValue();
StorageEngine<ByteArray, byte[], byte[]> store = getVoldemortServer(0).getStoreRepository().getStorageEngine(nextSlop.getStoreName());
if (nextSlop.getOperation().equals(Slop.Operation.PUT)) {
assertNotSame("entry should be present at store", 0, store.get(nextSlop.getKey(), null).size());
assertEquals("entry value should match", new String(nextSlop.getValue()), new String(store.get(nextSlop.getKey(), null).get(0).getValue()));
} else if (nextSlop.getOperation().equals(Slop.Operation.DELETE)) {
assertEquals("entry value should match", 0, store.get(nextSlop.getKey(), null).size());
}
// did it get deleted correctly
assertEquals("slop should have gone", 0, slopStoreNode1.get(nextSlop.makeKey(), null).size());
}
// Check counts
SlopStorageEngine slopEngine = getVoldemortServer(0).getStoreRepository().getSlopStore();
assertEquals(slopEngine.getOutstandingTotal(), 0);
assertEquals(slopEngine.getOutstandingByNode().get(1), new Long(0));
slopEngine = getVoldemortServer(1).getStoreRepository().getSlopStore();
assertEquals(slopEngine.getOutstandingTotal(), 0);
assertEquals(slopEngine.getOutstandingByNode().get(0), new Long(0));
stopServers(0, 1);
}
Aggregations