Search in sources :

Example 41 with Pair

use of voldemort.utils.Pair in project voldemort by voldemort.

the class VoldemortAdminTool method executeFetchEntries.

private static void executeFetchEntries(Integer nodeId, AdminClient adminClient, List<Integer> partitionIdList, String outputDir, List<String> storeNames, boolean useAscii, boolean fetchOrphaned) throws IOException {
    List<StoreDefinition> storeDefinitionList = getStoreDefinitions(adminClient, nodeId);
    HashMap<String, StoreDefinition> storeDefinitionMap = Maps.newHashMap();
    for (StoreDefinition storeDefinition : storeDefinitionList) {
        storeDefinitionMap.put(storeDefinition.getName(), storeDefinition);
    }
    File directory = null;
    if (outputDir != null) {
        directory = new File(outputDir);
        if (!(directory.exists() || directory.mkdir())) {
            Utils.croak("Can't find or create directory " + outputDir);
        }
    }
    List<String> stores = storeNames;
    if (stores == null) {
        // when no stores specified, all user defined store will be fetched,
        // but not system stores.
        stores = Lists.newArrayList();
        stores.addAll(storeDefinitionMap.keySet());
    } else {
        // add system stores to the map so they can be fetched when
        // specified explicitly
        storeDefinitionMap.putAll(getSystemStoreDefs());
    }
    // Pick up all the partitions
    if (partitionIdList == null) {
        partitionIdList = Lists.newArrayList();
        for (Node node : adminClient.getAdminClientCluster().getNodes()) {
            partitionIdList.addAll(node.getPartitionIds());
        }
    }
    StoreDefinition storeDefinition = null;
    for (String store : stores) {
        storeDefinition = storeDefinitionMap.get(store);
        if (null == storeDefinition) {
            System.out.println("No store found under the name \'" + store + "\'");
            continue;
        }
        Iterator<Pair<ByteArray, Versioned<byte[]>>> entriesIteratorRef = null;
        if (fetchOrphaned) {
            System.out.println("Fetching orphaned entries of " + store);
            entriesIteratorRef = adminClient.bulkFetchOps.fetchOrphanedEntries(nodeId, store);
        } else {
            System.out.println("Fetching entries in partitions " + Joiner.on(", ").join(partitionIdList) + " of " + store);
            entriesIteratorRef = adminClient.bulkFetchOps.fetchEntries(nodeId, store, partitionIdList, null, false);
        }
        final Iterator<Pair<ByteArray, Versioned<byte[]>>> entriesIterator = entriesIteratorRef;
        File outputFile = null;
        if (directory != null) {
            outputFile = new File(directory, store + ".entries");
        }
        if (useAscii) {
            // k-v serializer
            SerializerDefinition keySerializerDef = storeDefinition.getKeySerializer();
            SerializerDefinition valueSerializerDef = storeDefinition.getValueSerializer();
            SerializerFactory serializerFactory = new DefaultSerializerFactory();
            @SuppressWarnings("unchecked") final Serializer<Object> keySerializer = (Serializer<Object>) serializerFactory.getSerializer(keySerializerDef);
            @SuppressWarnings("unchecked") final Serializer<Object> valueSerializer = (Serializer<Object>) serializerFactory.getSerializer(valueSerializerDef);
            // compression strategy
            final CompressionStrategy keyCompressionStrategy;
            final CompressionStrategy valueCompressionStrategy;
            if (keySerializerDef != null && keySerializerDef.hasCompression()) {
                keyCompressionStrategy = new CompressionStrategyFactory().get(keySerializerDef.getCompression());
            } else {
                keyCompressionStrategy = null;
            }
            if (valueSerializerDef != null && valueSerializerDef.hasCompression()) {
                valueCompressionStrategy = new CompressionStrategyFactory().get(valueSerializerDef.getCompression());
            } else {
                valueCompressionStrategy = null;
            }
            writeAscii(outputFile, new Writable() {

                @Override
                public void writeTo(BufferedWriter out) throws IOException {
                    while (entriesIterator.hasNext()) {
                        final JsonGenerator generator = new JsonFactory(new ObjectMapper()).createJsonGenerator(out);
                        Pair<ByteArray, Versioned<byte[]>> kvPair = entriesIterator.next();
                        byte[] keyBytes = kvPair.getFirst().get();
                        byte[] valueBytes = kvPair.getSecond().getValue();
                        VectorClock version = (VectorClock) kvPair.getSecond().getVersion();
                        Object keyObject = keySerializer.toObject((null == keyCompressionStrategy) ? keyBytes : keyCompressionStrategy.inflate(keyBytes));
                        Object valueObject = valueSerializer.toObject((null == valueCompressionStrategy) ? valueBytes : valueCompressionStrategy.inflate(valueBytes));
                        if (keyObject instanceof GenericRecord) {
                            out.write(keyObject.toString());
                        } else {
                            generator.writeObject(keyObject);
                        }
                        out.write(' ' + version.toString() + ' ');
                        if (valueObject instanceof GenericRecord) {
                            out.write(valueObject.toString());
                        } else {
                            generator.writeObject(valueObject);
                        }
                        out.write('\n');
                    }
                }
            });
        } else {
            writeBinary(outputFile, new Printable() {

                @Override
                public void printTo(DataOutputStream out) throws IOException {
                    while (entriesIterator.hasNext()) {
                        Pair<ByteArray, Versioned<byte[]>> kvPair = entriesIterator.next();
                        byte[] keyBytes = kvPair.getFirst().get();
                        VectorClock clock = ((VectorClock) kvPair.getSecond().getVersion());
                        byte[] valueBytes = kvPair.getSecond().getValue();
                        out.writeChars(ByteUtils.toHexString(keyBytes));
                        out.writeChars(",");
                        out.writeChars(clock.toString());
                        out.writeChars(",");
                        out.writeChars(ByteUtils.toHexString(valueBytes));
                        out.writeChars("\n");
                    }
                }
            });
        }
        if (outputFile != null)
            System.out.println("Fetched keys from " + store + " to " + outputFile);
    }
}
Also used : DataOutputStream(java.io.DataOutputStream) Node(voldemort.cluster.Node) JsonFactory(org.codehaus.jackson.JsonFactory) CompressionStrategy(voldemort.store.compress.CompressionStrategy) CompressionStrategyFactory(voldemort.store.compress.CompressionStrategyFactory) BufferedWriter(java.io.BufferedWriter) StoreDefinition(voldemort.store.StoreDefinition) JsonGenerator(org.codehaus.jackson.JsonGenerator) GenericRecord(org.apache.avro.generic.GenericRecord) ObjectMapper(org.codehaus.jackson.map.ObjectMapper) Pair(voldemort.utils.Pair) Serializer(voldemort.serialization.Serializer) StringSerializer(voldemort.serialization.StringSerializer) DefaultSerializerFactory(voldemort.serialization.DefaultSerializerFactory) SerializerFactory(voldemort.serialization.SerializerFactory) VectorClock(voldemort.versioning.VectorClock) IOException(java.io.IOException) DefaultSerializerFactory(voldemort.serialization.DefaultSerializerFactory) File(java.io.File) SerializerDefinition(voldemort.serialization.SerializerDefinition)

Example 42 with Pair

use of voldemort.utils.Pair in project voldemort by voldemort.

the class VoldemortClientShell method evaluateCommand.

// useful as this separates the repeated prompt from the evaluation
// using no modifier as no sub-class will have access but all classes within
// package will
boolean evaluateCommand(String line, boolean printCommands) {
    try {
        if (line.toLowerCase().startsWith("put")) {
            processPut(line.substring("put".length()));
        } else if (line.toLowerCase().startsWith("getall")) {
            processGetAll(line.substring("getall".length()));
        } else if (line.toLowerCase().startsWith("getmetadata")) {
            String[] args = line.substring("getmetadata".length() + 1).split("\\s+");
            int remoteNodeId = Integer.valueOf(args[0]);
            String key = args[1];
            Versioned<String> versioned = adminClient.metadataMgmtOps.getRemoteMetadata(remoteNodeId, key);
            if (versioned == null) {
                commandOutput.println("null");
            } else {
                commandOutput.println(versioned.getVersion());
                commandOutput.print(": ");
                commandOutput.println(versioned.getValue());
                commandOutput.println();
            }
        } else if (line.toLowerCase().startsWith("get")) {
            processGet(line.substring("get".length()));
        } else if (line.toLowerCase().startsWith("delete")) {
            processDelete(line.substring("delete".length()));
        } else if (line.startsWith("preflist")) {
            processPreflist(line.substring("preflist".length()));
        } else if (line.toLowerCase().startsWith("fetchkeys")) {
            String[] args = line.substring("fetchkeys".length() + 1).split("\\s+");
            int remoteNodeId = Integer.valueOf(args[0]);
            String storeName = args[1];
            List<Integer> partititionList = parseCsv(args[2]);
            Iterator<ByteArray> partitionKeys = adminClient.bulkFetchOps.fetchKeys(remoteNodeId, storeName, partititionList, null, false);
            BufferedWriter writer = null;
            try {
                if (args.length > 3) {
                    writer = new BufferedWriter(new FileWriter(new File(args[3])));
                } else
                    writer = new BufferedWriter(new OutputStreamWriter(commandOutput));
            } catch (IOException e) {
                errorStream.println("Failed to open the output stream");
                e.printStackTrace(errorStream);
            }
            if (writer != null) {
                while (partitionKeys.hasNext()) {
                    ByteArray keyByteArray = partitionKeys.next();
                    StringBuilder lineBuilder = new StringBuilder();
                    lineBuilder.append(ByteUtils.getString(keyByteArray.get(), "UTF-8"));
                    lineBuilder.append("\n");
                    writer.write(lineBuilder.toString());
                }
                writer.flush();
            }
        } else if (line.toLowerCase().startsWith("fetch")) {
            String[] args = line.substring("fetch".length() + 1).split("\\s+");
            int remoteNodeId = Integer.valueOf(args[0]);
            String storeName = args[1];
            List<Integer> partititionList = parseCsv(args[2]);
            Iterator<Pair<ByteArray, Versioned<byte[]>>> partitionEntries = adminClient.bulkFetchOps.fetchEntries(remoteNodeId, storeName, partititionList, null, false);
            BufferedWriter writer = null;
            try {
                if (args.length > 3) {
                    writer = new BufferedWriter(new FileWriter(new File(args[3])));
                } else
                    writer = new BufferedWriter(new OutputStreamWriter(commandOutput));
            } catch (IOException e) {
                errorStream.println("Failed to open the output stream");
                e.printStackTrace(errorStream);
            }
            if (writer != null) {
                while (partitionEntries.hasNext()) {
                    Pair<ByteArray, Versioned<byte[]>> pair = partitionEntries.next();
                    ByteArray keyByteArray = pair.getFirst();
                    Versioned<byte[]> versioned = pair.getSecond();
                    StringBuilder lineBuilder = new StringBuilder();
                    lineBuilder.append(ByteUtils.getString(keyByteArray.get(), "UTF-8"));
                    lineBuilder.append("\t");
                    lineBuilder.append(versioned.getVersion());
                    lineBuilder.append("\t");
                    lineBuilder.append(ByteUtils.getString(versioned.getValue(), "UTF-8"));
                    lineBuilder.append("\n");
                    writer.write(lineBuilder.toString());
                }
                writer.flush();
            }
        } else if (line.startsWith("help")) {
            commandOutput.println();
            commandOutput.println("Commands:");
            commandOutput.println(PROMPT + "put key value --- Associate the given value with the key.");
            commandOutput.println(PROMPT + "get key --- Retrieve the value associated with the key.");
            commandOutput.println(PROMPT + "getall key1 [key2...] --- Retrieve the value(s) associated with the key(s).");
            commandOutput.println(PROMPT + "delete key --- Remove all values associated with the key.");
            commandOutput.println(PROMPT + "preflist key --- Get node preference list for given key.");
            String metaKeyValues = voldemort.store.metadata.MetadataStore.METADATA_KEYS.toString();
            commandOutput.println(PROMPT + "getmetadata node_id meta_key --- Get store metadata associated " + "with meta_key from node_id. meta_key may be one of " + metaKeyValues.substring(1, metaKeyValues.length() - 1) + ".");
            commandOutput.println(PROMPT + "fetchkeys node_id store_name partitions <file_name> --- Fetch all keys " + "from given partitions (a comma separated list) of store_name on " + "node_id. Optionally, write to file_name. " + "Use getmetadata to determine appropriate values for store_name and partitions");
            commandOutput.println(PROMPT + "fetch node_id store_name partitions <file_name> --- Fetch all entries " + "from given partitions (a comma separated list) of store_name on " + "node_id. Optionally, write to file_name. " + "Use getmetadata to determine appropriate values for store_name and partitions");
            commandOutput.println(PROMPT + "help --- Print this message.");
            commandOutput.println(PROMPT + "exit --- Exit from this shell.");
            commandOutput.println();
            commandOutput.println("Avro usage:");
            commandOutput.println("For avro keys or values, ensure that the entire json string is enclosed within single quotes (').");
            commandOutput.println("Also, the field names and strings should STRICTLY be enclosed by double quotes(\")");
            commandOutput.println("eg: > put '{\"id\":1,\"name\":\"Vinoth Chandar\"}' '[{\"skill\":\"java\", \"score\":90.27, \"isendorsed\": true}]'");
        } else if (line.equals("quit") || line.equals("exit")) {
            commandOutput.println("bye.");
            System.exit(0);
        } else {
            errorStream.println("Invalid command. (Try 'help' for usage.)");
            return false;
        }
    } catch (EndOfFileException e) {
        errorStream.println("Expected additional token.");
    } catch (SerializationException e) {
        errorStream.print("Error serializing values: ");
        e.printStackTrace(errorStream);
    } catch (VoldemortException e) {
        errorStream.println("Exception thrown during operation.");
        e.printStackTrace(errorStream);
    } catch (ArrayIndexOutOfBoundsException e) {
        errorStream.println("Invalid command. (Try 'help' for usage.)");
    } catch (Exception e) {
        errorStream.println("Unexpected error:");
        e.printStackTrace(errorStream);
    }
    return true;
}
Also used : SerializationException(voldemort.serialization.SerializationException) Versioned(voldemort.versioning.Versioned) EndOfFileException(voldemort.serialization.json.EndOfFileException) FileWriter(java.io.FileWriter) IOException(java.io.IOException) SerializationException(voldemort.serialization.SerializationException) EndOfFileException(voldemort.serialization.json.EndOfFileException) IOException(java.io.IOException) BufferedWriter(java.io.BufferedWriter) Iterator(java.util.Iterator) ByteArray(voldemort.utils.ByteArray) OutputStreamWriter(java.io.OutputStreamWriter) List(java.util.List) ArrayList(java.util.ArrayList) File(java.io.File) Pair(voldemort.utils.Pair)

Example 43 with Pair

use of voldemort.utils.Pair in project voldemort by voldemort.

the class AbstractStorageEngineTest method testEntryIteration.

@Test
public void testEntryIteration() {
    final int numPut = 10000;
    final StorageEngine<ByteArray, byte[], byte[]> store = getStorageEngine();
    for (int i = 0; i < numPut; i++) {
        String key = "key-" + i;
        String value = "Value for " + key;
        store.put(new ByteArray(key.getBytes()), new Versioned<byte[]>(value.getBytes()), null);
    }
    int numGet = 0;
    ClosableIterator<Pair<ByteArray, Versioned<byte[]>>> it = null;
    try {
        it = store.entries();
        while (it.hasNext()) {
            it.next();
            numGet++;
        }
    } finally {
        if (it != null) {
            it.close();
        }
    }
    assertEquals("Iterator returned by the call to entries() did not contain the expected number of values", numPut, numGet);
}
Also used : ByteArray(voldemort.utils.ByteArray) Pair(voldemort.utils.Pair) Test(org.junit.Test)

Example 44 with Pair

use of voldemort.utils.Pair in project voldemort by voldemort.

the class AdminServiceRequestHandler method handleFetchAndUpdate.

public VAdminProto.AsyncOperationStatusResponse handleFetchAndUpdate(VAdminProto.InitiateFetchAndUpdateRequest request) {
    final int nodeId = request.getNodeId();
    final List<Integer> partitionIds = request.getPartitionIdsList();
    final VoldemortFilter filter = request.hasFilter() ? getFilterFromRequest(request.getFilter(), voldemortConfig, networkClassLoader) : new DefaultVoldemortFilter();
    final String storeName = request.getStore();
    final Cluster initialCluster = request.hasInitialCluster() ? new ClusterMapper().readCluster(new StringReader(request.getInitialCluster())) : null;
    int requestId = asyncService.getUniqueRequestId();
    VAdminProto.AsyncOperationStatusResponse.Builder response = VAdminProto.AsyncOperationStatusResponse.newBuilder().setRequestId(requestId).setComplete(false).setDescription("Fetch and update").setStatus("Started");
    final StoreDefinition storeDef = metadataStore.getStoreDef(storeName);
    final boolean isReadOnlyStore = storeDef.getType().compareTo(ReadOnlyStorageConfiguration.TYPE_NAME) == 0;
    final StreamingStats streamingStats = voldemortConfig.isJmxEnabled() ? storeRepository.getStreamingStats(storeName) : null;
    try {
        asyncService.submitOperation(requestId, new AsyncOperation(requestId, "Fetch and Update") {

            private final AtomicBoolean running = new AtomicBoolean(true);

            @Override
            public void stop() {
                running.set(false);
                logger.info("Stopping fetch and update for store " + storeName + " from node " + nodeId + "( " + partitionIds + " )");
            }

            @Override
            public void operate() {
                AdminClient adminClient = AdminClient.createTempAdminClient(voldemortConfig, metadataStore.getCluster(), voldemortConfig.getClientMaxConnectionsPerNode());
                try {
                    StorageEngine<ByteArray, byte[], byte[]> storageEngine = getStorageEngine(storeRepository, storeName);
                    EventThrottler throttler = new EventThrottler(voldemortConfig.getStreamMaxWriteBytesPerSec());
                    if (isReadOnlyStore) {
                        ReadOnlyStorageEngine readOnlyStorageEngine = ((ReadOnlyStorageEngine) storageEngine);
                        String destinationDir = readOnlyStorageEngine.getCurrentDirPath();
                        logger.info("Fetching files for RO store '" + storeName + "' from node " + nodeId + " ( " + partitionIds + " )");
                        updateStatus("Fetching files for RO store '" + storeName + "' from node " + nodeId + " ( " + partitionIds + " )");
                        adminClient.readonlyOps.fetchPartitionFiles(nodeId, storeName, partitionIds, destinationDir, readOnlyStorageEngine.getChunkedFileSet().getChunkIdToNumChunks().keySet(), running);
                    } else {
                        logger.info("Fetching entries for RW store '" + storeName + "' from node " + nodeId + " ( " + partitionIds + " )");
                        updateStatus("Fetching entries for RW store '" + storeName + "' from node " + nodeId + " ( " + partitionIds + " ) ");
                        if (partitionIds.size() > 0) {
                            Iterator<Pair<ByteArray, Versioned<byte[]>>> entriesIterator = adminClient.bulkFetchOps.fetchEntries(nodeId, storeName, partitionIds, filter, false, initialCluster, 0);
                            long numTuples = 0;
                            long startTime = System.currentTimeMillis();
                            long startNs = System.nanoTime();
                            while (running.get() && entriesIterator.hasNext()) {
                                Pair<ByteArray, Versioned<byte[]>> entry = entriesIterator.next();
                                if (streamingStats != null) {
                                    streamingStats.reportNetworkTime(Operation.UPDATE_ENTRIES, Utils.elapsedTimeNs(startNs, System.nanoTime()));
                                }
                                ByteArray key = entry.getFirst();
                                Versioned<byte[]> value = entry.getSecond();
                                startNs = System.nanoTime();
                                try {
                                    /**
                                         * TODO This also needs to be fixed to
                                         * use the atomic multi version puts
                                         */
                                    storageEngine.put(key, value, null);
                                } catch (ObsoleteVersionException e) {
                                    // log and ignore
                                    if (logger.isDebugEnabled()) {
                                        logger.debug("Fetch and update threw Obsolete version exception. Ignoring");
                                    }
                                } finally {
                                    if (streamingStats != null) {
                                        streamingStats.reportStreamingPut(Operation.UPDATE_ENTRIES);
                                        streamingStats.reportStorageTime(Operation.UPDATE_ENTRIES, Utils.elapsedTimeNs(startNs, System.nanoTime()));
                                    }
                                }
                                long totalTime = (System.currentTimeMillis() - startTime) / 1000;
                                throttler.maybeThrottle(key.length() + valueSize(value));
                                if ((numTuples % 100000) == 0 && numTuples > 0) {
                                    logger.info(numTuples + " entries copied from node " + nodeId + " for store '" + storeName + "' in " + totalTime + " seconds");
                                    updateStatus(numTuples + " entries copied from node " + nodeId + " for store '" + storeName + "' in " + totalTime + " seconds");
                                }
                                numTuples++;
                            }
                            long totalTime = (System.currentTimeMillis() - startTime) / 1000;
                            if (running.get()) {
                                logger.info("Completed fetching " + numTuples + " entries from node " + nodeId + " for store '" + storeName + "' in " + totalTime + " seconds");
                            } else {
                                logger.info("Fetch and update stopped after fetching " + numTuples + " entries for node " + nodeId + " for store '" + storeName + "' in " + totalTime + " seconds");
                            }
                        } else {
                            logger.info("No entries to fetch from node " + nodeId + " for store '" + storeName + "'");
                        }
                    }
                } finally {
                    adminClient.close();
                }
            }
        });
    } catch (VoldemortException e) {
        response.setError(ProtoUtils.encodeError(errorCodeMapper, e));
        logger.error("handleFetchAndUpdate failed for request(" + request.toString() + ")", e);
    }
    return response.build();
}
Also used : Versioned(voldemort.versioning.Versioned) MysqlStorageEngine(voldemort.store.mysql.MysqlStorageEngine) StorageEngine(voldemort.store.StorageEngine) SlopStorageEngine(voldemort.store.slop.SlopStorageEngine) ReadOnlyStorageEngine(voldemort.store.readonly.ReadOnlyStorageEngine) DefaultVoldemortFilter(voldemort.client.protocol.admin.filter.DefaultVoldemortFilter) VoldemortFilter(voldemort.client.protocol.VoldemortFilter) VoldemortException(voldemort.VoldemortException) StreamingStats(voldemort.store.stats.StreamingStats) StoreDefinition(voldemort.store.StoreDefinition) StringReader(java.io.StringReader) ClosableIterator(voldemort.utils.ClosableIterator) Iterator(java.util.Iterator) ByteArray(voldemort.utils.ByteArray) Pair(voldemort.utils.Pair) EventThrottler(voldemort.utils.EventThrottler) Cluster(voldemort.cluster.Cluster) ReadOnlyStorageEngine(voldemort.store.readonly.ReadOnlyStorageEngine) ClusterMapper(voldemort.xml.ClusterMapper) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) ObsoleteVersionException(voldemort.versioning.ObsoleteVersionException) DefaultVoldemortFilter(voldemort.client.protocol.admin.filter.DefaultVoldemortFilter) AdminClient(voldemort.client.protocol.admin.AdminClient)

Example 45 with Pair

use of voldemort.utils.Pair in project voldemort by voldemort.

the class AdminServiceRequestHandler method handleDeletePartitionEntries.

// TODO : Add ability to use partition scans
public VAdminProto.DeletePartitionEntriesResponse handleDeletePartitionEntries(VAdminProto.DeletePartitionEntriesRequest request) {
    VAdminProto.DeletePartitionEntriesResponse.Builder response = VAdminProto.DeletePartitionEntriesResponse.newBuilder();
    ClosableIterator<Pair<ByteArray, Versioned<byte[]>>> iterator = null;
    try {
        String storeName = request.getStore();
        final List<Integer> partitionsIds = request.getPartitionIdsList();
        final boolean isReadWriteStore = metadataStore.getStoreDef(storeName).getType().compareTo(ReadOnlyStorageConfiguration.TYPE_NAME) != 0;
        if (!isReadWriteStore) {
            throw new VoldemortException("Cannot delete partitions for store " + storeName + " on node " + metadataStore.getNodeId() + " since it is not a RW store");
        }
        StorageEngine<ByteArray, byte[], byte[]> storageEngine = getStorageEngine(storeRepository, storeName);
        VoldemortFilter filter = (request.hasFilter()) ? getFilterFromRequest(request.getFilter(), voldemortConfig, networkClassLoader) : new DefaultVoldemortFilter();
        EventThrottler throttler = new EventThrottler(voldemortConfig.getStreamMaxReadBytesPerSec());
        iterator = storageEngine.entries();
        long deleteSuccess = 0;
        logger.info("Deleting entries for RW store " + storeName + " from node " + metadataStore.getNodeId() + " ( " + storeName + " )");
        while (iterator.hasNext()) {
            Pair<ByteArray, Versioned<byte[]>> entry = iterator.next();
            ByteArray key = entry.getFirst();
            Versioned<byte[]> value = entry.getSecond();
            throttler.maybeThrottle(key.length() + valueSize(value));
            if (StoreRoutingPlan.checkKeyBelongsToNode(key.get(), metadataStore.getNodeId(), request.hasInitialCluster() ? new ClusterMapper().readCluster(new StringReader(request.getInitialCluster())) : metadataStore.getCluster(), metadataStore.getStoreDef(storeName)) && filter.accept(key, value)) {
                if (storageEngine.delete(key, value.getVersion())) {
                    deleteSuccess++;
                    if ((deleteSuccess % 10000) == 0) {
                        logger.info(deleteSuccess + " entries deleted from node " + metadataStore.getNodeId() + " for store " + storeName);
                    }
                }
            }
        }
        logger.info("Completed deletion of entries for RW store " + storeName + " from node " + metadataStore.getNodeId() + " ( " + partitionsIds + " )");
        response.setCount(deleteSuccess);
    } catch (VoldemortException e) {
        response.setError(ProtoUtils.encodeError(errorCodeMapper, e));
        logger.error("handleDeletePartitionEntries failed for request(" + request.toString() + ")", e);
    } finally {
        if (null != iterator)
            iterator.close();
    }
    return response.build();
}
Also used : Versioned(voldemort.versioning.Versioned) EventThrottler(voldemort.utils.EventThrottler) ClusterMapper(voldemort.xml.ClusterMapper) VoldemortException(voldemort.VoldemortException) DefaultVoldemortFilter(voldemort.client.protocol.admin.filter.DefaultVoldemortFilter) VoldemortFilter(voldemort.client.protocol.VoldemortFilter) StringReader(java.io.StringReader) ByteArray(voldemort.utils.ByteArray) DefaultVoldemortFilter(voldemort.client.protocol.admin.filter.DefaultVoldemortFilter) Pair(voldemort.utils.Pair)

Aggregations

Pair (voldemort.utils.Pair)45 ByteArray (voldemort.utils.ByteArray)28 Versioned (voldemort.versioning.Versioned)25 VoldemortException (voldemort.VoldemortException)15 Node (voldemort.cluster.Node)15 IOException (java.io.IOException)14 StoreDefinition (voldemort.store.StoreDefinition)13 Test (org.junit.Test)11 File (java.io.File)10 VectorClock (voldemort.versioning.VectorClock)10 ArrayList (java.util.ArrayList)8 HashMap (java.util.HashMap)8 RoutingStrategyFactory (voldemort.routing.RoutingStrategyFactory)7 Cluster (voldemort.cluster.Cluster)6 DataOutputStream (java.io.DataOutputStream)5 FileNotFoundException (java.io.FileNotFoundException)5 Map (java.util.Map)5 ExecutionException (java.util.concurrent.ExecutionException)5 VoldemortFilter (voldemort.client.protocol.VoldemortFilter)5 DataInputStream (java.io.DataInputStream)4