Search in sources :

Example 1 with NonblockingStore

use of voldemort.store.nonblockingstore.NonblockingStore in project voldemort by voldemort.

the class AbstractReadRepair method execute.

public void execute(Pipeline pipeline) {
    insertNodeValues();
    long startTimeNs = -1;
    if (logger.isDebugEnabled())
        startTimeNs = System.nanoTime();
    if (nodeValues.size() > 1 && preferred > 1) {
        List<NodeValue<ByteArray, byte[]>> toReadRepair = Lists.newArrayList();
        /*
             * We clone after computing read repairs in the assumption that the
             * output will be smaller than the input. Note that we clone the
             * version, but not the key or value as the latter two are not
             * mutated.
             */
        for (NodeValue<ByteArray, byte[]> v : readRepairer.getRepairs(nodeValues)) {
            Versioned<byte[]> versioned = Versioned.value(v.getVersioned().getValue(), ((VectorClock) v.getVersion()).clone());
            toReadRepair.add(new NodeValue<ByteArray, byte[]>(v.getNodeId(), v.getKey(), versioned));
        }
        for (NodeValue<ByteArray, byte[]> v : toReadRepair) {
            try {
                if (logger.isDebugEnabled())
                    logger.debug("Doing read repair on node " + v.getNodeId() + " for key '" + ByteUtils.toHexString(v.getKey().get()) + "' with version " + v.getVersion() + ".");
                NonblockingStore store = nonblockingStores.get(v.getNodeId());
                store.submitPutRequest(v.getKey(), v.getVersioned(), null, null, timeoutMs);
            } catch (VoldemortApplicationException e) {
                if (logger.isDebugEnabled())
                    logger.debug("Read repair cancelled due to application level exception on node " + v.getNodeId() + " for key '" + ByteUtils.toHexString(v.getKey().get()) + "' with version " + v.getVersion() + ": " + e.getMessage());
            } catch (Exception e) {
                logger.debug("Read repair failed: ", e);
            }
        }
        if (logger.isDebugEnabled()) {
            String logStr = "Repaired (node, key, version): (";
            for (NodeValue<ByteArray, byte[]> v : toReadRepair) {
                logStr += "(" + v.getNodeId() + ", " + v.getKey() + "," + v.getVersion() + ") ";
            }
            logStr += "in " + (System.nanoTime() - startTimeNs) + " ns";
            logger.debug(logStr);
        }
    }
    pipeline.addEvent(completeEvent);
}
Also used : NodeValue(voldemort.store.routed.NodeValue) NonblockingStore(voldemort.store.nonblockingstore.NonblockingStore) VoldemortApplicationException(voldemort.VoldemortApplicationException) ByteArray(voldemort.utils.ByteArray) VoldemortApplicationException(voldemort.VoldemortApplicationException)

Example 2 with NonblockingStore

use of voldemort.store.nonblockingstore.NonblockingStore in project voldemort by voldemort.

the class AbstractStoreClientFactory method getRawStore.

@SuppressWarnings("unchecked")
public <K, V, T> Store<K, V, T> getRawStore(String storeName, InconsistencyResolver<Versioned<V>> resolver, String customStoresXml, String clusterXmlString, FailureDetector fd) {
    logger.info("Client zone-id [" + this.routedStoreConfig.getClientZoneId() + "] Attempting to get raw store [" + storeName + "] ");
    if (logger.isDebugEnabled()) {
        for (URI uri : bootstrapUrls) {
            logger.debug("Client Bootstrap url [" + uri + "]");
        }
    }
    // Get cluster and store metadata
    String clusterXml = clusterXmlString;
    if (clusterXml == null) {
        logger.debug("Fetching cluster.xml ...");
        clusterXml = bootstrapMetadataWithRetries(MetadataStore.CLUSTER_KEY, bootstrapUrls);
    }
    this.cluster = clusterMapper.readCluster(new StringReader(clusterXml), false);
    String storesXml = customStoresXml;
    if (storesXml == null) {
        String storesKey = storeName;
        if (config.isFetchAllStoresXmlInBootstrap()) {
            storesKey = MetadataStore.STORES_KEY;
        }
        if (logger.isDebugEnabled()) {
            logger.debug("Fetching store definition for Store " + storeName + " key " + storesKey);
        }
        storesXml = bootstrapMetadataWithRetries(storesKey, bootstrapUrls);
    }
    if (logger.isDebugEnabled()) {
        logger.debug("Obtained cluster metadata xml" + clusterXml);
        logger.debug("Obtained stores  metadata xml" + storesXml);
    }
    storeDefs = storeMapper.readStoreList(new StringReader(storesXml), false);
    StoreDefinition storeDef = null;
    for (StoreDefinition d : storeDefs) if (d.getName().equals(storeName))
        storeDef = d;
    if (storeDef == null) {
        logger.error("Bootstrap - unknown store: " + storeName);
        throw new BootstrapFailureException("Unknown store '" + storeName + "'.");
    }
    if (logger.isDebugEnabled()) {
        logger.debug(this.cluster.toString(true));
        logger.debug(storeDef.toString());
    }
    boolean repairReads = !storeDef.isView();
    // construct mapping
    Map<Integer, Store<ByteArray, byte[], byte[]>> clientMapping = Maps.newHashMap();
    Map<Integer, NonblockingStore> nonblockingStores = Maps.newHashMap();
    Map<Integer, NonblockingStore> nonblockingSlopStores = Maps.newHashMap();
    Map<Integer, Store<ByteArray, Slop, byte[]>> slopStores = null;
    if (storeDef.hasHintedHandoffStrategyType())
        slopStores = Maps.newHashMap();
    for (Node node : this.cluster.getNodes()) {
        Store<ByteArray, byte[], byte[]> store = getStore(storeDef.getName(), node.getHost(), getPort(node), this.requestFormatType);
        clientMapping.put(node.getId(), store);
        NonblockingStore nonblockingStore = routedStoreFactory.toNonblockingStore(store);
        nonblockingStores.put(node.getId(), nonblockingStore);
        if (slopStores != null) {
            Store<ByteArray, byte[], byte[]> rawSlopStore = getStore("slop", node.getHost(), getPort(node), this.requestFormatType);
            Store<ByteArray, Slop, byte[]> slopStore = SerializingStore.wrap(rawSlopStore, slopKeySerializer, slopValueSerializer, new IdentitySerializer());
            slopStores.put(node.getId(), slopStore);
            nonblockingSlopStores.put(node.getId(), routedStoreFactory.toNonblockingStore(rawSlopStore));
        }
    }
    /*
         * Check if we need to retrieve a reference to the failure detector. For
         * system stores - the FD reference would be passed in.
         */
    FailureDetector failureDetectorRef = fd;
    if (failureDetectorRef == null) {
        failureDetectorRef = getFailureDetector();
    } else {
        logger.debug("Using existing failure detector.");
    }
    this.routedStoreConfig.setRepairReads(repairReads);
    Store<ByteArray, byte[], byte[]> store = routedStoreFactory.create(this.cluster, storeDef, clientMapping, nonblockingStores, slopStores, nonblockingSlopStores, failureDetectorRef, this.routedStoreConfig);
    store = new LoggingStore(store);
    if (isJmxEnabled) {
        StatTrackingStore statStore = new StatTrackingStore(store, this.aggregateStats, this.cachedStoreStats);
        statStore.getStats().registerJmx(identifierString);
        store = statStore;
    }
    if (this.config.isEnableCompressionLayer()) {
        if (storeDef.getKeySerializer().hasCompression() || storeDef.getValueSerializer().hasCompression()) {
            store = new CompressingStore(store, getCompressionStrategy(storeDef.getKeySerializer()), getCompressionStrategy(storeDef.getValueSerializer()));
        }
    }
    /*
         * Initialize the finalstore object only once the store object itself is
         * wrapped by a StatrackingStore seems like the finalstore object is
         * redundant?
         */
    Store<K, V, T> finalStore = (Store<K, V, T>) store;
    if (this.config.isEnableSerializationLayer()) {
        Serializer<K> keySerializer = (Serializer<K>) serializerFactory.getSerializer(storeDef.getKeySerializer());
        Serializer<V> valueSerializer = (Serializer<V>) serializerFactory.getSerializer(storeDef.getValueSerializer());
        if (storeDef.isView() && (storeDef.getTransformsSerializer() == null))
            throw new SerializationException("Transforms serializer must be specified with a view ");
        Serializer<T> transformsSerializer = (Serializer<T>) serializerFactory.getSerializer(storeDef.getTransformsSerializer() != null ? storeDef.getTransformsSerializer() : new SerializerDefinition("identity"));
        finalStore = SerializingStore.wrap(store, keySerializer, valueSerializer, transformsSerializer);
    }
    // resolver (if they gave us one)
    if (this.config.isEnableInconsistencyResolvingLayer()) {
        InconsistencyResolver<Versioned<V>> secondaryResolver = resolver == null ? new TimeBasedInconsistencyResolver() : resolver;
        finalStore = new InconsistencyResolvingStore<K, V, T>(finalStore, new ChainedResolver<Versioned<V>>(new VectorClockInconsistencyResolver(), secondaryResolver));
    }
    return finalStore;
}
Also used : Versioned(voldemort.versioning.Versioned) Node(voldemort.cluster.Node) Store(voldemort.store.Store) LoggingStore(voldemort.store.logging.LoggingStore) SerializingStore(voldemort.store.serialized.SerializingStore) StatTrackingStore(voldemort.store.stats.StatTrackingStore) MetadataStore(voldemort.store.metadata.MetadataStore) CompressingStore(voldemort.store.compress.CompressingStore) InconsistencyResolvingStore(voldemort.store.versioned.InconsistencyResolvingStore) NonblockingStore(voldemort.store.nonblockingstore.NonblockingStore) URI(java.net.URI) CompressingStore(voldemort.store.compress.CompressingStore) StoreDefinition(voldemort.store.StoreDefinition) FailureDetector(voldemort.cluster.failuredetector.FailureDetector) StringReader(java.io.StringReader) ByteArray(voldemort.utils.ByteArray) IdentitySerializer(voldemort.serialization.IdentitySerializer) IdentitySerializer(voldemort.serialization.IdentitySerializer) Serializer(voldemort.serialization.Serializer) SlopSerializer(voldemort.serialization.SlopSerializer) ByteArraySerializer(voldemort.serialization.ByteArraySerializer) StringSerializer(voldemort.serialization.StringSerializer) ChainedResolver(voldemort.versioning.ChainedResolver) SerializationException(voldemort.serialization.SerializationException) NonblockingStore(voldemort.store.nonblockingstore.NonblockingStore) LoggingStore(voldemort.store.logging.LoggingStore) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) StatTrackingStore(voldemort.store.stats.StatTrackingStore) TimeBasedInconsistencyResolver(voldemort.versioning.TimeBasedInconsistencyResolver) Slop(voldemort.store.slop.Slop) SerializerDefinition(voldemort.serialization.SerializerDefinition) VectorClockInconsistencyResolver(voldemort.versioning.VectorClockInconsistencyResolver)

Example 3 with NonblockingStore

use of voldemort.store.nonblockingstore.NonblockingStore in project voldemort by voldemort.

the class PerformParallelDeleteRequests method executeInternal.

private void executeInternal(final Pipeline pipeline) {
    List<Node> nodes = pipelineData.getNodes();
    final Map<Integer, Response<ByteArray, Object>> responses = new ConcurrentHashMap<Integer, Response<ByteArray, Object>>();
    int attempts = nodes.size();
    int blocks = Math.min(preferred, attempts);
    final CountDownLatch attemptsLatch = new CountDownLatch(attempts);
    final CountDownLatch blocksLatch = new CountDownLatch(blocks);
    if (logger.isTraceEnabled())
        logger.trace("Attempting " + attempts + " " + pipeline.getOperation().getSimpleName() + " operations in parallel");
    long beginTime = System.nanoTime();
    for (int i = 0; i < attempts; i++) {
        final Node node = nodes.get(i);
        pipelineData.incrementNodeIndex();
        NonblockingStoreCallback callback = new NonblockingStoreCallback() {

            public void requestComplete(Object result, long requestTime) {
                if (logger.isTraceEnabled())
                    logger.trace(pipeline.getOperation().getSimpleName() + " response received (" + requestTime + " ms.) from node " + node.getId());
                Response<ByteArray, Object> response = new Response<ByteArray, Object>(node, key, result, requestTime);
                if (logger.isTraceEnabled()) {
                    logger.trace(attemptsLatch.getCount() + " attempts remaining. Will block " + " for " + blocksLatch.getCount() + " more ");
                }
                responses.put(node.getId(), response);
                if (response.getValue() instanceof Exception && isOperationCompleted.get()) {
                    handleException(response, pipeline);
                }
                attemptsLatch.countDown();
                blocksLatch.countDown();
            }
        };
        if (logger.isTraceEnabled())
            logger.info("Submitting " + pipeline.getOperation().getSimpleName() + " request on node " + node.getId());
        NonblockingStore store = nonblockingStores.get(node.getId());
        store.submitDeleteRequest(key, version, callback, timeoutMs);
    }
    try {
        long ellapsedNs = System.nanoTime() - beginTime;
        long remainingNs = (timeoutMs * Time.NS_PER_MS) - ellapsedNs;
        if (remainingNs > 0) {
            blocksLatch.await(remainingNs, TimeUnit.NANOSECONDS);
        }
    } catch (InterruptedException e) {
        if (logger.isEnabledFor(Level.WARN))
            logger.warn(e, e);
    }
    if (processResponses(responses, pipeline))
        return;
    // wait for more responses in case we did not have enough successful
    // response to achieve the required count
    boolean quorumSatisfied = true;
    if (pipelineData.getSuccesses() < required) {
        long ellapsedNs = System.nanoTime() - beginTime;
        long remainingNs = (timeoutMs * Time.NS_PER_MS) - ellapsedNs;
        if (remainingNs > 0) {
            try {
                attemptsLatch.await(remainingNs, TimeUnit.NANOSECONDS);
            } catch (InterruptedException e) {
                if (logger.isEnabledFor(Level.WARN))
                    logger.warn(e, e);
            }
            if (processResponses(responses, pipeline))
                return;
        }
        if (pipelineData.getSuccesses() < required) {
            pipelineData.setFatalError(new InsufficientOperationalNodesException(required + " " + pipeline.getOperation().getSimpleName() + "s required, but only " + pipelineData.getSuccesses() + " succeeded", pipelineData.getReplicationSet(), pipelineData.getNodes(), pipelineData.getFailedNodes(), pipelineData.getFailures()));
            abortPipeline(pipeline);
            quorumSatisfied = false;
        }
    }
    if (quorumSatisfied) {
        if (pipelineData.getZonesRequired() != null) {
            int zonesSatisfied = pipelineData.getZoneResponses().size();
            if (zonesSatisfied >= (pipelineData.getZonesRequired() + 1)) {
                completePipeline(pipeline);
            } else {
                long timeMs = (System.nanoTime() - beginTime) / Time.NS_PER_MS;
                if ((timeoutMs - timeMs) > 0) {
                    try {
                        attemptsLatch.await(timeoutMs - timeMs, TimeUnit.MILLISECONDS);
                    } catch (InterruptedException e) {
                        if (logger.isEnabledFor(Level.WARN))
                            logger.warn(e, e);
                    }
                    if (processResponses(responses, pipeline))
                        return;
                }
                if (pipelineData.getZoneResponses().size() >= (pipelineData.getZonesRequired() + 1)) {
                    completePipeline(pipeline);
                } else {
                    pipelineData.setFatalError(new InsufficientZoneResponsesException((pipelineData.getZonesRequired() + 1) + " " + pipeline.getOperation().getSimpleName() + "s required zone, but only " + zonesSatisfied + " succeeded"));
                    abortPipeline(pipeline);
                }
            }
        } else {
            completePipeline(pipeline);
        }
    }
}
Also used : NonblockingStore(voldemort.store.nonblockingstore.NonblockingStore) Node(voldemort.cluster.Node) CountDownLatch(java.util.concurrent.CountDownLatch) ObsoleteVersionException(voldemort.versioning.ObsoleteVersionException) InsufficientOperationalNodesException(voldemort.store.InsufficientOperationalNodesException) InsufficientZoneResponsesException(voldemort.store.InsufficientZoneResponsesException) UnreachableStoreException(voldemort.store.UnreachableStoreException) InvalidMetadataException(voldemort.store.InvalidMetadataException) QuotaExceededException(voldemort.store.quota.QuotaExceededException) Response(voldemort.store.routed.Response) NonblockingStoreCallback(voldemort.store.nonblockingstore.NonblockingStoreCallback) InsufficientZoneResponsesException(voldemort.store.InsufficientZoneResponsesException) InsufficientOperationalNodesException(voldemort.store.InsufficientOperationalNodesException) ByteArray(voldemort.utils.ByteArray) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap)

Example 4 with NonblockingStore

use of voldemort.store.nonblockingstore.NonblockingStore in project voldemort by voldemort.

the class PerformParallelGetAllRequests method execute.

@SuppressWarnings("unchecked")
public void execute(final Pipeline pipeline) {
    int attempts = pipelineData.getNodeToKeysMap().size();
    final Map<Integer, Response<Iterable<ByteArray>, Object>> responses = new ConcurrentHashMap<Integer, Response<Iterable<ByteArray>, Object>>();
    final CountDownLatch latch = new CountDownLatch(attempts);
    if (logger.isTraceEnabled())
        logger.trace("Attempting " + attempts + " " + pipeline.getOperation().getSimpleName() + " operations in parallel");
    Map<ByteArray, byte[]> transforms = pipelineData.getTransforms();
    final AtomicBoolean isResponseProcessed = new AtomicBoolean(false);
    for (Map.Entry<Node, List<ByteArray>> entry : pipelineData.getNodeToKeysMap().entrySet()) {
        final Node node = entry.getKey();
        final Collection<ByteArray> keys = entry.getValue();
        NonblockingStoreCallback callback = new NonblockingStoreCallback() {

            public void requestComplete(Object result, long requestTime) {
                if (logger.isTraceEnabled())
                    logger.trace(pipeline.getOperation().getSimpleName() + " response received (" + requestTime + " ms.) from node " + node.getId());
                Response<Iterable<ByteArray>, Object> response = new Response<Iterable<ByteArray>, Object>(node, keys, result, requestTime);
                responses.put(node.getId(), response);
                latch.countDown();
                // This reduces the window where an exception is lost
                if (isResponseProcessed.get() && response.getValue() instanceof Exception)
                    if (response.getValue() instanceof InvalidMetadataException) {
                        pipelineData.reportException((InvalidMetadataException) response.getValue());
                        logger.warn("Received invalid metadata problem after a successful " + pipeline.getOperation().getSimpleName() + " call on node " + node.getId() + ", store '" + pipelineData.getStoreName() + "'");
                    } else {
                        handleResponseError(response, pipeline, failureDetector);
                    }
            }
        };
        if (logger.isTraceEnabled())
            logger.trace("Submitting " + pipeline.getOperation().getSimpleName() + " request on node " + node.getId());
        NonblockingStore store = nonblockingStores.get(node.getId());
        store.submitGetAllRequest(keys, transforms, callback, timeoutMs);
    }
    try {
        latch.await(timeoutMs, TimeUnit.MILLISECONDS);
    } catch (InterruptedException e) {
        if (logger.isEnabledFor(Level.WARN))
            logger.warn(e, e);
    }
    for (Response<Iterable<ByteArray>, Object> response : responses.values()) {
        if (response.getValue() instanceof Exception) {
            if (handleResponseError(response, pipeline, failureDetector))
                return;
        } else {
            Map<ByteArray, List<Versioned<byte[]>>> values = (Map<ByteArray, List<Versioned<byte[]>>>) response.getValue();
            for (ByteArray key : response.getKey()) {
                MutableInt successCount = pipelineData.getSuccessCount(key);
                successCount.increment();
                List<Versioned<byte[]>> retrieved = values.get(key);
                /*
                     * retrieved can be null if there are no values for the key
                     * provided
                     */
                if (retrieved != null) {
                    List<Versioned<byte[]>> existing = pipelineData.getResult().get(key);
                    if (existing == null)
                        pipelineData.getResult().put(key, Lists.newArrayList(retrieved));
                    else
                        existing.addAll(retrieved);
                }
                HashSet<Integer> zoneResponses = null;
                if (pipelineData.getKeyToZoneResponse().containsKey(key)) {
                    zoneResponses = pipelineData.getKeyToZoneResponse().get(key);
                } else {
                    zoneResponses = new HashSet<Integer>();
                    pipelineData.getKeyToZoneResponse().put(key, zoneResponses);
                }
                zoneResponses.add(response.getNode().getZoneId());
            }
            pipelineData.getResponses().add(new Response<Iterable<ByteArray>, Map<ByteArray, List<Versioned<byte[]>>>>(response.getNode(), response.getKey(), values, response.getRequestTime()));
            failureDetector.recordSuccess(response.getNode(), response.getRequestTime());
        }
    }
    isResponseProcessed.set(true);
    pipeline.addEvent(completeEvent);
}
Also used : Versioned(voldemort.versioning.Versioned) Node(voldemort.cluster.Node) InvalidMetadataException(voldemort.store.InvalidMetadataException) NonblockingStoreCallback(voldemort.store.nonblockingstore.NonblockingStoreCallback) ByteArray(voldemort.utils.ByteArray) List(java.util.List) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) NonblockingStore(voldemort.store.nonblockingstore.NonblockingStore) CountDownLatch(java.util.concurrent.CountDownLatch) InvalidMetadataException(voldemort.store.InvalidMetadataException) Response(voldemort.store.routed.Response) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) MutableInt(org.apache.commons.lang.mutable.MutableInt) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) Map(java.util.Map)

Example 5 with NonblockingStore

use of voldemort.store.nonblockingstore.NonblockingStore in project voldemort by voldemort.

the class PerformParallelPutRequests method execute.

@Override
public void execute(final Pipeline pipeline) {
    final Node masterNode = pipelineData.getMaster();
    final List<Node> nodes = pipelineData.getNodes();
    final Versioned<byte[]> versionedCopy = pipelineData.getVersionedCopy();
    final Integer numNodesTouchedInSerialPut = nodes.indexOf(masterNode) + 1;
    numNodesPendingResponse = nodes.size() - numNodesTouchedInSerialPut;
    if (logger.isDebugEnabled())
        logger.debug("PUT {key:" + key + "} MasterNode={id:" + masterNode.getId() + "} totalNodesToAsyncPut=" + numNodesPendingResponse);
    // initiate parallel puts
    for (int i = numNodesTouchedInSerialPut; i < nodes.size(); i++) {
        final Node node = nodes.get(i);
        pipelineData.incrementNodeIndex();
        NonblockingStoreCallback callback = new NonblockingStoreCallback() {

            @Override
            public void requestComplete(Object result, long requestTime) {
                boolean responseHandledByMaster = false;
                if (logger.isDebugEnabled())
                    logger.debug("PUT {key:" + key + "} response received from node={id:" + node.getId() + "} in " + requestTime + " ms)");
                Response<ByteArray, Object> response;
                response = new Response<ByteArray, Object>(node, key, result, requestTime);
                if (logger.isDebugEnabled()) {
                    logger.debug("PUT {key:" + key + "} Parallel put thread trying to return result to main thread");
                }
                responseHandledByMaster = pipelineData.getSynchronizer().tryDelegateResponseHandling(response);
                if (logger.isDebugEnabled()) {
                    logger.debug("PUT {key:" + key + "} Master thread accepted the response: " + responseHandledByMaster);
                }
                if (!responseHandledByMaster) {
                    if (logger.isDebugEnabled()) {
                        logger.debug("PUT {key:" + key + "} Master thread did not accept the response: will handle in worker thread");
                    }
                    if (PipelineRoutedStore.isSlopableFailure(response.getValue()) || response.getValue() instanceof QuotaExceededException) {
                        if (logger.isDebugEnabled())
                            logger.debug("PUT {key:" + key + "} failed on node={id:" + node.getId() + ",host:" + node.getHost() + "}");
                        if (isHintedHandoffEnabled()) {
                            boolean triedDelegateSlop = pipelineData.getSynchronizer().tryDelegateSlop(node);
                            if (logger.isDebugEnabled()) {
                                logger.debug("PUT {key:" + key + "} triedDelegateSlop: " + triedDelegateSlop);
                            }
                            if (!triedDelegateSlop) {
                                Slop slop = new Slop(pipelineData.getStoreName(), Slop.Operation.PUT, key, versionedCopy.getValue(), transforms, node.getId(), new Date());
                                pipelineData.addFailedNode(node);
                                if (logger.isDebugEnabled())
                                    logger.debug("PUT {key:" + key + "} Start registering Slop(node:" + node.getId() + ",host:" + node.getHost() + ")");
                                hintedHandoff.sendHintParallel(node, versionedCopy.getVersion(), slop);
                                if (logger.isDebugEnabled())
                                    logger.debug("PUT {key:" + key + "} Sent out request to register Slop(node: " + node.getId() + ",host:" + node.getHost() + ")");
                            }
                        }
                    } else {
                        // the exception is ignorable
                        if (logger.isDebugEnabled()) {
                            if (result instanceof Exception) {
                                logger.debug("PUT {key:" + key + "} will not send hint. Response is ignorable exception: " + result.getClass().toString());
                            } else {
                                logger.debug("PUT {key:" + key + "} will not send hint. Response is success");
                            }
                        }
                    }
                    if (result instanceof Exception && !(result instanceof ObsoleteVersionException)) {
                        if (response.getValue() instanceof InvalidMetadataException) {
                            pipelineData.reportException((InvalidMetadataException) response.getValue());
                            logger.warn("Received invalid metadata problem after a successful " + pipeline.getOperation().getSimpleName() + " call on node " + node.getId() + ", store '" + pipelineData.getStoreName() + "'");
                        } else if (response.getValue() instanceof QuotaExceededException) {
                            /**
                                 * TODO Not sure if we need to count this
                                 * Exception for stats or silently ignore and
                                 * just log a warning. While
                                 * QuotaExceededException thrown from other
                                 * places mean the operation failed, this one
                                 * does not fail the operation but instead
                                 * stores slops. Introduce a new Exception in
                                 * client side to just monitor how mamy Async
                                 * writes fail on exceeding Quota?
                                 * 
                                 */
                            logger.warn("Received QuotaExceededException after a successful " + pipeline.getOperation().getSimpleName() + " call on node " + node.getId() + ", store '" + pipelineData.getStoreName() + "', master-node '" + masterNode.getId() + "'");
                        } else {
                            handleResponseError(response, pipeline, failureDetector);
                        }
                    }
                }
            }
        };
        if (logger.isTraceEnabled())
            logger.trace("Submitting " + pipeline.getOperation().getSimpleName() + " request on node " + node.getId() + " for key " + key);
        NonblockingStore store = nonblockingStores.get(node.getId());
        store.submitPutRequest(key, versionedCopy, transforms, callback, timeoutMs);
    }
    try {
        boolean preferredSatisfied = false;
        while (true) {
            long elapsedNs = System.nanoTime() - pipelineData.getStartTimeNs();
            long remainingNs = (timeoutMs * Time.NS_PER_MS) - elapsedNs;
            remainingNs = Math.max(0, remainingNs);
            // preferred check
            if (numResponsesGot >= preferred - 1) {
                preferredSatisfied = true;
            }
            quorumSatisfied = isQuorumSatisfied();
            zonesSatisfied = isZonesSatisfied();
            if (quorumSatisfied && zonesSatisfied && preferredSatisfied || remainingNs <= 0 || numNodesPendingResponse <= 0) {
                pipelineData.getSynchronizer().cutoffHandling();
                break;
            } else {
                if (logger.isTraceEnabled()) {
                    logger.trace("PUT {key:" + key + "} trying to poll from queue");
                }
                Response<ByteArray, Object> response = pipelineData.getSynchronizer().responseQueuePoll(remainingNs, TimeUnit.NANOSECONDS);
                processResponse(response, pipeline);
                if (logger.isTraceEnabled()) {
                    logger.trace("PUT {key:" + key + "} tried to poll from queue. Null?: " + (response == null) + " numResponsesGot:" + numResponsesGot + " parallelResponseToWait: " + numNodesPendingResponse + "; preferred-1: " + (preferred - 1) + "; preferredOK: " + preferredSatisfied + " quorumOK: " + quorumSatisfied + "; zoneOK: " + zonesSatisfied);
                }
            }
        }
        // leftover)
        while (!pipelineData.getSynchronizer().responseQueueIsEmpty()) {
            Response<ByteArray, Object> response = pipelineData.getSynchronizer().responseQueuePoll(0, TimeUnit.NANOSECONDS);
            processResponse(response, pipeline);
        }
        quorumSatisfied = isQuorumSatisfied();
        zonesSatisfied = isZonesSatisfied();
        if (quorumSatisfied && zonesSatisfied) {
            if (logger.isDebugEnabled()) {
                logger.debug("PUT {key:" + key + "} succeeded at parallel put stage");
            }
            pipelineData.getSynchronizer().disallowDelegateSlop();
            pipeline.addEvent(completeEvent);
        } else {
            VoldemortException fatalError;
            if (!quorumSatisfied) {
                if (logger.isDebugEnabled()) {
                    logger.debug("PUT {key:" + key + "} failed due to insufficient nodes. required=" + required + " success=" + pipelineData.getSuccesses());
                }
                fatalError = new InsufficientOperationalNodesException(required + " " + pipeline.getOperation().getSimpleName() + "s required, but only " + pipelineData.getSuccesses() + " succeeded", pipelineData.getReplicationSet(), pipelineData.getNodes(), pipelineData.getFailedNodes(), pipelineData.getFailures());
                pipelineData.setFatalError(fatalError);
            } else if (!zonesSatisfied) {
                if (logger.isDebugEnabled()) {
                    logger.debug("PUT {key:" + key + "} failed due to insufficient zones. required=" + pipelineData.getZonesRequired() + 1 + " success=" + pipelineData.getZoneResponses().size());
                }
                fatalError = new InsufficientZoneResponsesException((pipelineData.getZonesRequired() + 1) + " " + pipeline.getOperation().getSimpleName() + "s required zone, but only " + (pipelineData.getZoneResponses().size()) + " succeeded. Failing nodes : " + pipelineData.getFailedNodes());
                pipelineData.setFatalError(fatalError);
            }
            pipeline.abort();
        }
    } catch (InterruptedException e) {
        if (logger.isEnabledFor(Level.WARN))
            logger.warn(e, e);
    } catch (NoSuchElementException e) {
        if (logger.isEnabledFor(Level.ERROR))
            logger.error("Response Queue is empty. There may be a bug in PerformParallelPutRequest", e);
    } finally {
        if (logger.isDebugEnabled()) {
            logger.debug("PUT {key:" + key + "} marking parallel put stage finished");
        }
    }
}
Also used : QuotaExceededException(voldemort.store.quota.QuotaExceededException) NonblockingStore(voldemort.store.nonblockingstore.NonblockingStore) Node(voldemort.cluster.Node) InvalidMetadataException(voldemort.store.InvalidMetadataException) VoldemortException(voldemort.VoldemortException) Date(java.util.Date) ObsoleteVersionException(voldemort.versioning.ObsoleteVersionException) InsufficientOperationalNodesException(voldemort.store.InsufficientOperationalNodesException) InsufficientZoneResponsesException(voldemort.store.InsufficientZoneResponsesException) NoSuchElementException(java.util.NoSuchElementException) VoldemortException(voldemort.VoldemortException) InvalidMetadataException(voldemort.store.InvalidMetadataException) QuotaExceededException(voldemort.store.quota.QuotaExceededException) NonblockingStoreCallback(voldemort.store.nonblockingstore.NonblockingStoreCallback) ObsoleteVersionException(voldemort.versioning.ObsoleteVersionException) InsufficientZoneResponsesException(voldemort.store.InsufficientZoneResponsesException) InsufficientOperationalNodesException(voldemort.store.InsufficientOperationalNodesException) ByteArray(voldemort.utils.ByteArray) Slop(voldemort.store.slop.Slop) NoSuchElementException(java.util.NoSuchElementException)

Aggregations

NonblockingStore (voldemort.store.nonblockingstore.NonblockingStore)10 ByteArray (voldemort.utils.ByteArray)9 Node (voldemort.cluster.Node)8 ConcurrentHashMap (java.util.concurrent.ConcurrentHashMap)5 NonblockingStoreCallback (voldemort.store.nonblockingstore.NonblockingStoreCallback)5 VoldemortException (voldemort.VoldemortException)4 InvalidMetadataException (voldemort.store.InvalidMetadataException)4 Response (voldemort.store.routed.Response)4 CountDownLatch (java.util.concurrent.CountDownLatch)3 InsufficientOperationalNodesException (voldemort.store.InsufficientOperationalNodesException)3 InsufficientZoneResponsesException (voldemort.store.InsufficientZoneResponsesException)3 Store (voldemort.store.Store)3 UnreachableStoreException (voldemort.store.UnreachableStoreException)3 LoggingStore (voldemort.store.logging.LoggingStore)3 Slop (voldemort.store.slop.Slop)3 ObsoleteVersionException (voldemort.versioning.ObsoleteVersionException)3 Map (java.util.Map)2 AtomicBoolean (java.util.concurrent.atomic.AtomicBoolean)2 MetadataStore (voldemort.store.metadata.MetadataStore)2 QuotaExceededException (voldemort.store.quota.QuotaExceededException)2