use of voldemort.store.slop.Slop in project voldemort by voldemort.
the class SlopPurgeJob method operate.
@Override
public void operate() throws Exception {
logger.info("Purging slops that match any of the following. {Nodes:" + nodesToPurge + "} {Zone:" + zoneToPurge + "} {Stores:" + storesToPurge + "}");
SlopStorageEngine slopStorageEngine = storeRepo.getSlopStore();
StorageEngine<ByteArray, Slop, byte[]> slopStore = slopStorageEngine.asSlopStore();
ClosableIterator<Pair<ByteArray, Versioned<Slop>>> slopIterator = slopStore.entries();
Set<Integer> nodesInPurgeZone = null;
if (zoneToPurge != Zone.UNSET_ZONE_ID) {
nodesInPurgeZone = metadataStore.getCluster().getNodeIdsInZone(zoneToPurge);
}
try {
while (slopIterator.hasNext()) {
Pair<ByteArray, Versioned<Slop>> keyAndVal = slopIterator.next();
Versioned<Slop> versioned = keyAndVal.getSecond();
Slop slop = versioned.getValue();
// Determine if the slop qualifies for purging..
boolean purge = false;
if (nodesToPurge.contains(slop.getNodeId())) {
purge = true;
} else if (nodesInPurgeZone != null && nodesInPurgeZone.contains(slop.getNodeId())) {
purge = true;
} else if (storesToPurge.contains(slop.getStoreName())) {
purge = true;
}
// if any one of the filters were met, delete
if (purge) {
numKeysUpdatedThisRun.incrementAndGet();
slopStorageEngine.delete(keyAndVal.getFirst(), versioned.getVersion());
}
numKeysScannedThisRun.incrementAndGet();
throttler.maybeThrottle(1);
if (numKeysScannedThisRun.get() % STAT_RECORDS_INTERVAL == 0) {
logger.info("#Scanned:" + numKeysScannedThisRun + " #PurgedSlops:" + numKeysUpdatedThisRun);
}
}
} catch (Exception e) {
logger.error("Error while purging slops", e);
} finally {
slopIterator.close();
}
logger.info("Completed purging slops. " + "#Scanned:" + numKeysScannedThisRun + " #PurgedSlops:" + numKeysUpdatedThisRun);
}
use of voldemort.store.slop.Slop in project voldemort by voldemort.
the class StreamingSlopPusherJob method run.
public void run() {
// load the metadata before each run, in case the cluster is changed
loadMetadata();
// don't try to run slop pusher job when rebalancing
if (metadataStore.getServerStateUnlocked().equals(MetadataStore.VoldemortState.REBALANCING_MASTER_SERVER)) {
logger.error("Cannot run slop pusher job since Voldemort server is rebalancing");
return;
}
boolean terminatedEarly = false;
Date startTime = new Date();
logger.info("Started streaming slop pusher job at " + startTime);
SlopStorageEngine slopStorageEngine = storeRepo.getSlopStore();
ClosableIterator<Pair<ByteArray, Versioned<Slop>>> iterator = null;
if (adminClient == null) {
adminClient = new AdminClient(cluster, new AdminClientConfig().setMaxConnectionsPerNode(1));
}
if (voldemortConfig.getSlopZonesDownToTerminate() > 0) {
// Populating the zone mapping for early termination
zoneMapping.clear();
for (Node n : cluster.getNodes()) {
if (failureDetector.isAvailable(n)) {
Set<Integer> nodes = zoneMapping.get(n.getZoneId());
if (nodes == null) {
nodes = Sets.newHashSet();
zoneMapping.put(n.getZoneId(), nodes);
}
nodes.add(n.getId());
}
}
// Check how many zones are down
int zonesDown = 0;
for (Zone zone : cluster.getZones()) {
if (zoneMapping.get(zone.getId()) == null || zoneMapping.get(zone.getId()).size() == 0)
zonesDown++;
}
// Terminate early
if (voldemortConfig.getSlopZonesDownToTerminate() <= zoneMapping.size() && zonesDown >= voldemortConfig.getSlopZonesDownToTerminate()) {
logger.info("Completed streaming slop pusher job at " + startTime + " early because " + zonesDown + " zones are down");
stopAdminClient();
return;
}
}
// Clearing the statistics
AtomicLong attemptedPushes = new AtomicLong(0);
for (Node node : cluster.getNodes()) {
attemptedByNode.put(node.getId(), 0L);
succeededByNode.put(node.getId(), 0L);
}
Set<String> storeNames = StoreDefinitionUtils.getStoreNamesSet(metadataStore.getStoreDefList());
acquireRepairPermit();
try {
StorageEngine<ByteArray, Slop, byte[]> slopStore = slopStorageEngine.asSlopStore();
iterator = slopStore.entries();
while (iterator.hasNext()) {
Pair<ByteArray, Versioned<Slop>> keyAndVal;
try {
keyAndVal = iterator.next();
Versioned<Slop> versioned = keyAndVal.getSecond();
// Track the scan progress
if (this.streamStats != null) {
this.streamStats.reportStreamingSlopScan();
}
// Retrieve the node
int nodeId = versioned.getValue().getNodeId();
// check for dead slops
if (isSlopDead(cluster, storeNames, versioned.getValue())) {
handleDeadSlop(slopStorageEngine, keyAndVal);
// ignore it.
continue;
}
Node node = cluster.getNodeById(nodeId);
attemptedPushes.incrementAndGet();
Long attempted = attemptedByNode.get(nodeId);
attemptedByNode.put(nodeId, attempted + 1L);
if (attemptedPushes.get() % 10000 == 0)
logger.info("Attempted pushing " + attemptedPushes + " slops");
if (logger.isTraceEnabled())
logger.trace("Pushing slop for " + versioned.getValue().getNodeId() + " and store " + versioned.getValue().getStoreName() + " of key: " + versioned.getValue().getKey());
if (failureDetector.isAvailable(node)) {
SynchronousQueue<Versioned<Slop>> slopQueue = slopQueues.get(nodeId);
if (slopQueue == null) {
// No previous slop queue, add one
slopQueue = new SynchronousQueue<Versioned<Slop>>();
slopQueues.put(nodeId, slopQueue);
consumerResults.add(consumerExecutor.submit(new SlopConsumer(nodeId, slopQueue, slopStorageEngine)));
}
boolean offered = slopQueue.offer(versioned, voldemortConfig.getClientRoutingTimeoutMs(), TimeUnit.MILLISECONDS);
if (!offered) {
if (logger.isDebugEnabled())
logger.debug("No consumer appeared for slop in " + voldemortConfig.getClientConnectionTimeoutMs() + " ms");
}
readThrottler.maybeThrottle(nBytesRead(keyAndVal));
} else {
logger.trace(node + " declared down, won't push slop");
}
} catch (RejectedExecutionException e) {
throw new VoldemortException("Ran out of threads in executor", e);
}
}
} catch (InterruptedException e) {
logger.warn("Interrupted exception", e);
terminatedEarly = true;
} catch (Exception e) {
logger.error(e, e);
terminatedEarly = true;
} finally {
try {
if (iterator != null)
iterator.close();
} catch (Exception e) {
logger.warn("Failed to close iterator cleanly as database might be closed", e);
}
// Adding the poison pill
for (SynchronousQueue<Versioned<Slop>> slopQueue : slopQueues.values()) {
try {
slopQueue.put(END);
} catch (InterruptedException e) {
logger.warn("Error putting poison pill", e);
}
}
for (Future result : consumerResults) {
try {
result.get();
} catch (Exception e) {
logger.warn("Exception in consumer", e);
}
}
// Only if exception didn't take place do we update the counts
if (!terminatedEarly) {
Map<Integer, Long> outstanding = Maps.newHashMapWithExpectedSize(cluster.getNumberOfNodes());
for (int nodeId : succeededByNode.keySet()) {
logger.info("Slops to node " + nodeId + " - Succeeded - " + succeededByNode.get(nodeId) + " - Attempted - " + attemptedByNode.get(nodeId));
outstanding.put(nodeId, attemptedByNode.get(nodeId) - succeededByNode.get(nodeId));
}
slopStorageEngine.resetStats(outstanding);
logger.info("Completed streaming slop pusher job which started at " + startTime);
} else {
for (int nodeId : succeededByNode.keySet()) {
logger.info("Slops to node " + nodeId + " - Succeeded - " + succeededByNode.get(nodeId) + " - Attempted - " + attemptedByNode.get(nodeId));
}
logger.info("Completed early streaming slop pusher job which started at " + startTime);
}
// Shut down admin client as not to waste connections
consumerResults.clear();
slopQueues.clear();
stopAdminClient();
this.repairPermits.release(this.getClass().getCanonicalName());
}
}
use of voldemort.store.slop.Slop in project voldemort by voldemort.
the class StreamingSlopPusherJob method slopSize.
/**
* Returns the approximate size of slop to help in throttling
*
* @param slopVersioned The versioned slop whose size we want
* @return Size in bytes
*/
private int slopSize(Versioned<Slop> slopVersioned) {
int nBytes = 0;
Slop slop = slopVersioned.getValue();
nBytes += slop.getKey().length();
nBytes += ((VectorClock) slopVersioned.getVersion()).sizeInBytes();
switch(slop.getOperation()) {
case PUT:
{
nBytes += slop.getValue().length;
break;
}
case DELETE:
{
break;
}
default:
logger.error("Unknown slop operation: " + slop.getOperation());
}
return nBytes;
}
use of voldemort.store.slop.Slop in project voldemort by voldemort.
the class HintedHandoffFailureTest method testSlopViaSerialHint_3_2_2.
/**
* Test to ensure that when an asynchronous put completes (with a failure)
* after the pipeline completes, a slop is still registered (via a serial
* hint).
*
* This is for the 3-2-2 configuration
*/
@Test
public void testSlopViaSerialHint_3_2_2() {
String key = "testSlopViaSerialHint_3_2_2";
String val = "xyz";
Versioned<byte[]> versionedVal = new Versioned<byte[]>(val.getBytes());
ByteArray keyByteArray = new ByteArray(key.getBytes());
List<Integer> failingNodeIdList = null;
try {
failingNodeIdList = customSetup(keyByteArray, get322Replica(), 0);
} catch (Exception e) {
logger.info(e.getMessage());
fail("Error in setup.");
}
this.store.put(keyByteArray, versionedVal, null);
// Give enough time for the serial hint to work.
try {
logger.info("Sleeping for 5 seconds to wait for the serial hint to finish");
Thread.sleep(1000);
} catch (Exception e) {
}
// Check the slop stores
Set<ByteArray> failedKeys = Sets.newHashSet();
failedKeys.add(keyByteArray);
Set<ByteArray> slopKeys = makeSlopKeys(keyByteArray, failingNodeIdList);
Set<Slop> registeredSlops = getAllSlops(slopKeys);
if (registeredSlops.size() == 0) {
fail("Should have seen some slops. But could not find any.");
} else if (registeredSlops.size() != 1) {
fail("Number of slops registered != 1");
}
}
use of voldemort.store.slop.Slop in project voldemort by voldemort.
the class HintedHandoffFailureTest method testSlopOnDelayedFailingAsyncPut_2_1_1.
/**
* Test to ensure that when an asynchronous put completes (with a failure)
* after PerformParallelPut has finished processing the responses and before
* the hinted handoff actually begins, a slop is still registered for the
* same.
*
* This is for the 2-1-1 configuration.
*/
@Test
public void testSlopOnDelayedFailingAsyncPut_2_1_1() throws Exception {
String key = "testSlopOnDelayedFailingAsyncPut_2_1_1";
String val = "xyz";
Versioned<byte[]> versionedVal = new Versioned<byte[]>(val.getBytes());
ByteArray keyByteArray = new ByteArray(key.getBytes());
List<Integer> failingNodeIdList = null;
try {
failingNodeIdList = customSetup(keyByteArray, get211Replica(), HINT_DELAY_TIME_MS);
} catch (Exception e) {
logger.info(e.getMessage());
fail("Error in setup.");
}
this.store.put(keyByteArray, versionedVal, null);
Thread.sleep(HINT_DELAY_TIME_MS + 100);
// Check the slop stores
Set<ByteArray> failedKeys = Sets.newHashSet();
failedKeys.add(keyByteArray);
Set<ByteArray> slopKeys = makeSlopKeys(keyByteArray, failingNodeIdList);
Set<Slop> registeredSlops = getAllSlops(slopKeys);
if (registeredSlops.size() == 0) {
fail("Should have seen some slops. But could not find any.");
} else if (registeredSlops.size() != 1) {
fail("Number of slops registered != 1");
}
}
Aggregations