use of voldemort.utils.ByteArray in project voldemort by voldemort.
the class UpdateSlopEntriesRequestHandler method handleRequest.
public StreamRequestHandlerState handleRequest(DataInputStream inputStream, DataOutputStream outputStream) throws IOException {
if (!metadataStore.getSlopStreamingEnabledUnlocked()) {
throw new SlopStreamingDisabledException("Slop streaming is disabled on node " + metadataStore.getNodeId() + " under " + metadataStore.getServerStateUnlocked() + " state.");
}
long startNs = System.nanoTime();
if (request == null) {
int size = 0;
try {
size = inputStream.readInt();
} catch (EOFException e) {
if (logger.isTraceEnabled())
logger.trace("Incomplete read for message size");
networkTimeNs += System.nanoTime() - startNs;
return StreamRequestHandlerState.INCOMPLETE_READ;
}
if (size == -1) {
if (logger.isTraceEnabled())
logger.trace("Message size -1, completed slop update");
return StreamRequestHandlerState.COMPLETE;
}
if (logger.isTraceEnabled())
logger.trace("UpdateSlopEntriesRequest message size: " + size);
byte[] input = new byte[size];
try {
ByteUtils.read(inputStream, input);
networkTimeNs += Utils.elapsedTimeNs(startNs, System.nanoTime());
} catch (EOFException e) {
if (logger.isTraceEnabled())
logger.trace("Incomplete read for message");
return StreamRequestHandlerState.INCOMPLETE_READ;
}
VAdminProto.UpdateSlopEntriesRequest.Builder builder = VAdminProto.UpdateSlopEntriesRequest.newBuilder();
builder.mergeFrom(input);
request = builder.build();
}
StorageEngine<ByteArray, byte[], byte[]> storageEngine = AdminServiceRequestHandler.getStorageEngine(storeRepository, request.getStore());
StreamingStats streamStats = null;
if (isJmxEnabled) {
streamStats = storeRepository.getStreamingStats(storageEngine.getName());
streamStats.reportNetworkTime(Operation.SLOP_UPDATE, networkTimeNs);
}
networkTimeNs = 0;
ByteArray key = ProtoUtils.decodeBytes(request.getKey());
VectorClock vectorClock = ProtoUtils.decodeClock(request.getVersion());
switch(request.getRequestType()) {
case PUT:
try {
// Retrieve the transform if its exists
byte[] transforms = null;
if (request.hasTransform()) {
transforms = ProtoUtils.decodeBytes(request.getTransform()).get();
}
// Retrieve the value
byte[] value = ProtoUtils.decodeBytes(request.getValue()).get();
startNs = System.nanoTime();
storageEngine.put(key, Versioned.value(value, vectorClock), transforms);
if (isJmxEnabled)
streamStats.reportStorageTime(Operation.SLOP_UPDATE, Utils.elapsedTimeNs(startNs, System.nanoTime()));
if (logger.isTraceEnabled())
logger.trace("updateSlopEntries (Streaming put) successful on key:" + key + " of store: " + request.getStore());
} catch (ObsoleteVersionException e) {
// log and ignore
if (logger.isDebugEnabled())
logger.debug("updateSlopEntries (Streaming put) threw ObsoleteVersionException, Ignoring.");
}
break;
case DELETE:
try {
startNs = System.nanoTime();
storageEngine.delete(key, vectorClock);
if (isJmxEnabled)
streamStats.reportStorageTime(Operation.SLOP_UPDATE, System.nanoTime() - startNs);
if (logger.isTraceEnabled())
logger.trace("updateSlopEntries (Streaming delete) successful");
} catch (ObsoleteVersionException e) {
// log and ignore
if (logger.isDebugEnabled())
logger.debug("updateSlopEntries (Streaming delete) threw ObsoleteVersionException, Ignoring.");
}
break;
default:
throw new VoldemortException("Unsupported operation ");
}
// log progress
counter++;
if (isJmxEnabled)
streamStats.reportStreamingPut(Operation.SLOP_UPDATE);
if (0 == counter % 100000) {
long totalTime = (System.currentTimeMillis() - startTime) / 1000;
if (logger.isDebugEnabled())
logger.debug("updateSlopEntries() updated " + counter + " entries in " + totalTime + " s");
}
request = null;
return StreamRequestHandlerState.READING;
}
use of voldemort.utils.ByteArray in project voldemort by voldemort.
the class VersionedPutPruneJob method operate.
@Override
public void operate() throws Exception {
StoreDefinition storeDef = StoreDefinitionUtils.getStoreDefinitionWithName(metadataStore.getStoreDefList(), storeName);
if (storeDef == null) {
throw new VoldemortException("Unknown store " + storeName);
}
if (isWritableStore(storeDef)) {
// Lets generate routing strategy for this storage engine
StoreRoutingPlan routingPlan = new StoreRoutingPlan(metadataStore.getCluster(), storeDef);
logger.info("Pruning store " + storeDef.getName());
StorageEngine<ByteArray, byte[], byte[]> engine = storeRepo.getStorageEngine(storeDef.getName());
iterator = engine.keys();
long itemsScanned = 0;
long numPrunedKeys = 0;
while (iterator.hasNext()) {
ByteArray key = iterator.next();
KeyLockHandle<byte[]> lockHandle = null;
try {
lockHandle = engine.getAndLock(key);
List<Versioned<byte[]>> vals = lockHandle.getValues();
List<Integer> keyReplicas = routingPlan.getReplicationNodeList(routingPlan.getMasterPartitionId(key.get()));
MutableBoolean didPrune = new MutableBoolean(false);
List<Versioned<byte[]>> prunedVals = pruneNonReplicaEntries(vals, keyReplicas, didPrune);
// happened. Optimization to reduce load on storage
if (didPrune.booleanValue()) {
List<Versioned<byte[]>> resolvedVals = VectorClockUtils.resolveVersions(prunedVals);
// TODO this is only implemented for BDB for now
lockHandle.setValues(resolvedVals);
engine.putAndUnlock(key, lockHandle);
numPrunedKeys = this.numKeysUpdatedThisRun.incrementAndGet();
} else {
// if we did not prune, still need to let go of the lock
engine.releaseLock(lockHandle);
}
itemsScanned = this.numKeysScannedThisRun.incrementAndGet();
throttler.maybeThrottle(1);
if (itemsScanned % STAT_RECORDS_INTERVAL == 0) {
logger.info("#Scanned:" + itemsScanned + " #Pruned:" + numPrunedKeys);
}
} catch (Exception e) {
throw e;
} finally {
if (lockHandle != null && !lockHandle.isClosed()) {
engine.releaseLock(lockHandle);
}
}
}
logger.info("Completed store " + storeDef.getName() + " #Scanned:" + itemsScanned + " #Pruned:" + numPrunedKeys);
}
}
use of voldemort.utils.ByteArray in project voldemort by voldemort.
the class AbstractNonZonedRebalanceTest method testRebalanceCleanPrimary.
@Test(timeout = 600000)
public void testRebalanceCleanPrimary() throws Exception {
logger.info("Starting testRebalanceCleanPrimary");
try {
Cluster currentCluster = ServerTestUtils.getLocalCluster(3, new int[][] { { 0 }, { 1, 3 }, { 2 } });
Cluster finalCluster = UpdateClusterUtils.createUpdatedCluster(currentCluster, 2, Lists.newArrayList(3));
// start servers 0 , 1, 2
Map<String, String> configProps = new HashMap<String, String>();
configProps.put("enable.repair", "true");
List<Integer> serverList = Arrays.asList(0, 1, 2);
currentCluster = startServers(currentCluster, rwStoreDefFileWithReplication, serverList, configProps);
String bootstrapUrl = getBootstrapUrl(currentCluster, 0);
final ClusterTestUtils.RebalanceKit rebalanceKit = ClusterTestUtils.getRebalanceKit(bootstrapUrl, finalCluster);
try {
AdminClient adminClient = rebalanceKit.controller.getAdminClient();
populateData(currentCluster, rwStoreDefWithReplication, adminClient, false);
// Figure out the positive keys to check
List<ByteArray> positiveTestKeyList = sampleKeysFromPartition(adminClient, 1, rwStoreDefWithReplication.getName(), Arrays.asList(1), 20);
rebalanceAndCheck(rebalanceKit.plan, rebalanceKit.controller, Arrays.asList(0, 1, 2));
checkConsistentMetadata(finalCluster, serverList);
// Do the cleanup operation
for (int i = 0; i < 3; i++) {
adminClient.storeMntOps.repairJob(i);
}
// wait for the repairs to complete
for (int i = 0; i < 3; i++) {
ServerTestUtils.waitForAsyncOperationOnServer(serverMap.get(i), "Repair", 5000);
}
// do the positive tests
checkForKeyExistence(adminClient, 1, rwStoreDefWithReplication.getName(), positiveTestKeyList);
logger.info("[Primary] Successful clean after Rebalancing");
} finally {
// stop servers
stopServer(serverList);
}
} catch (AssertionError ae) {
logger.error("Assertion broken in testRebalanceCleanPrimary ", ae);
throw ae;
}
}
use of voldemort.utils.ByteArray in project voldemort by voldemort.
the class AbstractNonZonedRebalanceTest method populateData.
protected void populateData(Cluster cluster, StoreDefinition storeDef, AdminClient adminClient, boolean isReadOnly) throws Exception {
// Populate Read write stores
if (!isReadOnly) {
// Create SocketStores for each Node first
Map<Integer, Store<ByteArray, byte[], byte[]>> storeMap = new HashMap<Integer, Store<ByteArray, byte[], byte[]>>();
for (Node node : cluster.getNodes()) {
storeMap.put(node.getId(), getSocketStore(storeDef.getName(), node.getHost(), node.getSocketPort()));
}
BaseStoreRoutingPlan storeInstance = new BaseStoreRoutingPlan(cluster, storeDef);
for (Entry<String, String> entry : testEntries.entrySet()) {
ByteArray keyBytes = new ByteArray(ByteUtils.getBytes(entry.getKey(), "UTF-8"));
List<Integer> preferenceNodes = storeInstance.getReplicationNodeList(keyBytes.get());
// Go over every node
for (int nodeId : preferenceNodes) {
try {
storeMap.get(nodeId).put(keyBytes, new Versioned<byte[]>(ByteUtils.getBytes(entry.getValue(), "UTF-8")), null);
} catch (ObsoleteVersionException e) {
logger.info("Why are we seeing this at all here ?? ");
e.printStackTrace();
}
}
}
// close all socket stores
for (Store<ByteArray, byte[], byte[]> store : storeMap.values()) {
store.close();
}
} else {
// Populate Read only stores
File baseDir = TestUtils.createTempDir();
JsonReader reader = ReadOnlyStorageEngineTestInstance.makeTestDataReader(testEntries, baseDir);
RoutingStrategy router = new RoutingStrategyFactory().updateRoutingStrategy(storeDef, cluster);
File outputDir = TestUtils.createTempDir(baseDir);
JsonStoreBuilder storeBuilder = new JsonStoreBuilder(reader, cluster, storeDef, router, outputDir, null, testEntries.size() / 5, 1, NUM_RO_CHUNKS_PER_BUCKET, 10000, false);
storeBuilder.build(ReadOnlyStorageFormat.READONLY_V2);
AdminStoreSwapper swapper = new AdminStoreSwapper(Executors.newFixedThreadPool(cluster.getNumberOfNodes()), adminClient, 100000);
swapper.fetchAndSwapStoreData(testStoreNameRO, outputDir.getAbsolutePath(), 1L);
}
}
use of voldemort.utils.ByteArray in project voldemort by voldemort.
the class AbstractNonZonedRebalanceTest method testServerSideRouting.
@Test(timeout = 600000)
public void testServerSideRouting() throws Exception {
logger.info("Starting testServerSideRouting");
try {
final Cluster currentCluster = ServerTestUtils.getLocalCluster(2, new int[][] { { 0, 1, 2, 3, 4, 5, 6 }, { 7, 8 } });
final Cluster finalCluster = UpdateClusterUtils.createUpdatedCluster(currentCluster, 1, Lists.newArrayList(2, 3));
final List<Integer> serverList = Arrays.asList(0, 1);
Map<String, String> configProps = new HashMap<String, String>();
configProps.put("admin.max.threads", "50");
configProps.put("enable.server.routing", "true");
final Cluster updatedCurrentCluster = startServers(currentCluster, storeDefFileWithReplication, serverList, configProps);
ExecutorService executors = Executors.newFixedThreadPool(2);
final AtomicBoolean rebalancingToken = new AtomicBoolean(false);
final List<Exception> exceptions = Collections.synchronizedList(new ArrayList<Exception>());
String bootstrapUrl = getBootstrapUrl(currentCluster, 0);
int maxParallel = 2;
final ClusterTestUtils.RebalanceKit rebalanceKit = ClusterTestUtils.getRebalanceKit(bootstrapUrl, maxParallel, finalCluster);
// Populate the two stores
populateData(updatedCurrentCluster, roStoreDefWithReplication, rebalanceKit.controller.getAdminClient(), true);
populateData(updatedCurrentCluster, rwStoreDefWithReplication, rebalanceKit.controller.getAdminClient(), false);
Node node = updatedCurrentCluster.getNodeById(1);
final Store<ByteArray, byte[], byte[]> serverSideRoutingStoreRW = getSocketStore(testStoreNameRW, node.getHost(), node.getSocketPort(), true);
final Store<ByteArray, byte[], byte[]> serverSideRoutingStoreRO = getSocketStore(testStoreNameRO, node.getHost(), node.getSocketPort(), true);
final CountDownLatch latch = new CountDownLatch(1);
// start get operation.
executors.execute(new Runnable() {
@Override
public void run() {
try {
List<String> keys = new ArrayList<String>(testEntries.keySet());
while (!rebalancingToken.get()) {
// should always able to get values.
int index = (int) (Math.random() * keys.size());
// should get a valid value
try {
List<Versioned<byte[]>> values = serverSideRoutingStoreRW.get(new ByteArray(ByteUtils.getBytes(keys.get(index), "UTF-8")), null);
assertEquals("serverSideRoutingStore should return value.", 1, values.size());
assertEquals("Value returned should be good", new Versioned<String>(testEntries.get(keys.get(index))), new Versioned<String>(ByteUtils.getString(values.get(0).getValue(), "UTF-8"), values.get(0).getVersion()));
values = serverSideRoutingStoreRO.get(new ByteArray(ByteUtils.getBytes(keys.get(index), "UTF-8")), null);
assertEquals("serverSideRoutingStore should return value.", 1, values.size());
assertEquals("Value returned should be good", new Versioned<String>(testEntries.get(keys.get(index))), new Versioned<String>(ByteUtils.getString(values.get(0).getValue(), "UTF-8"), values.get(0).getVersion()));
} catch (UnreachableStoreException e) {
// ignore
} catch (Exception e) {
exceptions.add(e);
}
}
latch.countDown();
} catch (Exception e) {
exceptions.add(e);
}
}
});
executors.execute(new Runnable() {
@Override
public void run() {
try {
Thread.sleep(500);
rebalanceAndCheck(rebalanceKit.plan, rebalanceKit.controller, Arrays.asList(0, 1));
Thread.sleep(500);
rebalancingToken.set(true);
checkConsistentMetadata(finalCluster, serverList);
} catch (Exception e) {
exceptions.add(e);
} finally {
// loop.
try {
latch.await(300, TimeUnit.SECONDS);
stopServer(serverList);
} catch (Exception e) {
throw new RuntimeException(e);
}
}
}
});
executors.shutdown();
executors.awaitTermination(300, TimeUnit.SECONDS);
// check No Exception
if (exceptions.size() > 0) {
for (Exception e : exceptions) {
e.printStackTrace();
}
fail("Should not see any exceptions !!");
}
} catch (AssertionError ae) {
logger.error("Assertion broken in testServerSideRouting ", ae);
throw ae;
}
}
Aggregations