Search in sources :

Example 21 with Pair

use of voldemort.utils.Pair in project voldemort by voldemort.

the class VoldemortAdminTool method executeUpdateEntries.

private static void executeUpdateEntries(Integer nodeId, AdminClient adminClient, List<String> storeNames, String inputDirPath) throws IOException {
    List<StoreDefinition> storeDefinitionList = getStoreDefinitions(adminClient, nodeId);
    Map<String, StoreDefinition> storeDefinitionMap = Maps.newHashMap();
    for (StoreDefinition storeDefinition : storeDefinitionList) {
        storeDefinitionMap.put(storeDefinition.getName(), storeDefinition);
    }
    File inputDir = new File(inputDirPath);
    if (!inputDir.exists()) {
        throw new FileNotFoundException("input directory " + inputDirPath + " doesn't exist");
    }
    if (storeNames == null) {
        storeNames = Lists.newArrayList();
        for (File storeFile : inputDir.listFiles()) {
            String fileName = storeFile.getName();
            if (fileName.endsWith(".entries")) {
                int extPosition = fileName.lastIndexOf(".entries");
                storeNames.add(fileName.substring(0, extPosition));
            }
        }
    }
    for (String storeName : storeNames) {
        Iterator<Pair<ByteArray, Versioned<byte[]>>> iterator = readEntriesBinary(inputDir, storeName);
        adminClient.streamingOps.updateEntries(nodeId, storeName, iterator, null);
    }
}
Also used : StoreDefinition(voldemort.store.StoreDefinition) FileNotFoundException(java.io.FileNotFoundException) File(java.io.File) Pair(voldemort.utils.Pair)

Example 22 with Pair

use of voldemort.utils.Pair in project voldemort by voldemort.

the class ExportBDBToTextDump method main.

public static void main(String[] argv) throws Exception {
    OptionParser parser = getParser();
    OptionSet options = parser.parse(argv);
    validateOptions(options);
    // bdb_folder output_folder
    String storeBdbFolderPath = (String) options.valueOf("bdb");
    String outputFolderPath = (String) options.valueOf("output");
    File storeBdbFolder = new File(storeBdbFolderPath);
    File outputFolder = new File(outputFolderPath);
    final String storeName = storeBdbFolder.getName();
    Properties properties = new Properties();
    properties.put("node.id", "0");
    properties.put("voldemort.home", storeBdbFolder.getParent());
    VoldemortConfig voldemortConfig = new VoldemortConfig(properties);
    voldemortConfig.setBdbDataDirectory(storeBdbFolder.getParent());
    voldemortConfig.setEnableJmx(false);
    voldemortConfig.setBdbOneEnvPerStore(true);
    BdbStorageConfiguration bdbConfiguration = new BdbStorageConfiguration(voldemortConfig);
    class MockStoreDefinition extends StoreDefinition {

        public MockStoreDefinition() {
            super(storeName, null, null, null, null, null, null, null, 0, null, 0, null, 0, null, null, null, null, null, null, null, null, null, null, null, null, 0);
        }

        @Override
        public boolean hasMemoryFootprint() {
            return false;
        }
    }
    StoreDefinition storeDef = new MockStoreDefinition();
    StorageEngine<ByteArray, byte[], byte[]> engine = bdbConfiguration.getStore(storeDef, null);
    long reportIntervalMs = 10000L;
    long lastCount = 0;
    Reporter<Boolean> rp = new Reporter<Boolean>(reportIntervalMs);
    long count = 0;
    BufferedWriter splitFileWriter = null;
    ClosableIterator<Pair<ByteArray, Versioned<byte[]>>> entries = engine.entries();
    while (entries.hasNext()) {
        if (splitFileWriter == null) {
            long splitId = count / SPLIT_SIZE;
            File splitFile = new File(outputFolder, makeSplitFileName(splitId));
            splitFileWriter = new BufferedWriter(new FileWriter(splitFile), WRITER_BUFFER_SIZE);
        }
        Pair<ByteArray, Versioned<byte[]>> pair = entries.next();
        String line = makeLine(pair);
        splitFileWriter.write(line);
        if ((count + 1) % SPLIT_SIZE == 0) {
            splitFileWriter.close();
            splitFileWriter = null;
        }
        count++;
        final Long countObject = count;
        Boolean reported = rp.tryReport(new Callable<Boolean>() {

            @Override
            public Boolean call() throws Exception {
                System.out.print(String.format("Exported %15d entries", countObject));
                return true;
            }
        });
        if (reported != null) {
            System.out.println(String.format("; Speed: %8d/s", (count - lastCount) / (reportIntervalMs / 1000)));
            lastCount = count;
        }
    }
    entries.close();
    if (splitFileWriter != null) {
        splitFileWriter.close();
    }
    System.out.println(String.format("Finished exporting %d entries", count));
}
Also used : Versioned(voldemort.versioning.Versioned) Properties(java.util.Properties) OptionParser(joptsimple.OptionParser) VoldemortConfig(voldemort.server.VoldemortConfig) StoreDefinition(voldemort.store.StoreDefinition) ByteArray(voldemort.utils.ByteArray) BdbStorageConfiguration(voldemort.store.bdb.BdbStorageConfiguration) Pair(voldemort.utils.Pair) OptionSet(joptsimple.OptionSet)

Example 23 with Pair

use of voldemort.utils.Pair in project voldemort by voldemort.

the class ExceededQuotaSlopTest method setGetPutQuotasForEachServer.

public void setGetPutQuotasForEachServer() throws Exception {
    Properties adminProperties = new Properties();
    adminProperties.setProperty("max_connections", "2");
    adminClient = new AdminClient(cluster, new AdminClientConfig().setMaxConnectionsPerNode(2));
    Map<Pair<Integer, QuotaType>, Integer> throughPutMap = new HashMap<Pair<Integer, QuotaType>, Integer>();
    // Set Node0 Quota
    throughPutMap.put(new Pair<Integer, QuotaType>(0, QuotaType.PUT_THROUGHPUT), 5);
    throughPutMap.put(new Pair<Integer, QuotaType>(0, QuotaType.GET_THROUGHPUT), 20);
    // Set Node1 Quota
    throughPutMap.put(new Pair<Integer, QuotaType>(1, QuotaType.PUT_THROUGHPUT), 2);
    throughPutMap.put(new Pair<Integer, QuotaType>(1, QuotaType.GET_THROUGHPUT), 20);
    for (Entry<Pair<Integer, QuotaType>, Integer> throughPut : throughPutMap.entrySet()) {
        int nodeId = throughPut.getKey().getFirst();
        QuotaType type = throughPut.getKey().getSecond();
        int value = throughPut.getValue();
        VectorClock clock = VectorClockUtils.makeClockWithCurrentTime(cluster.getNodeIds());
        NodeValue<ByteArray, byte[]> operationValue = new NodeValue<ByteArray, byte[]>(nodeId, new ByteArray(getKeyBytes(type)), new Versioned<byte[]>(ByteUtils.getBytes(Integer.toString(value), encodingType), clock));
        try {
            adminClient.storeOps.putNodeKeyValue(quotaStoreName, operationValue);
        } catch (Exception e) {
            throw new Exception("Exception when setting put quota for node " + nodeId + " Operation " + type + "." + e.getMessage());
        }
    }
}
Also used : NodeValue(voldemort.store.routed.NodeValue) AdminClientConfig(voldemort.client.protocol.admin.AdminClientConfig) HashMap(java.util.HashMap) QuotaType(voldemort.store.quota.QuotaType) VectorClock(voldemort.versioning.VectorClock) Properties(java.util.Properties) VoldemortException(voldemort.VoldemortException) IOException(java.io.IOException) ByteArray(voldemort.utils.ByteArray) AdminClient(voldemort.client.protocol.admin.AdminClient) Pair(voldemort.utils.Pair)

Example 24 with Pair

use of voldemort.utils.Pair in project voldemort by voldemort.

the class JsonStoreBuilder method buildVersion2.

public void buildVersion2() throws IOException {
    logger.info("Building store " + storeDefinition.getName() + " for " + cluster.getNumberOfPartitions() + " partitions, " + storeDefinition.getReplicationFactor() + " replica types, " + numChunks + " chunks per partitions per replica type and type " + ReadOnlyStorageFormat.READONLY_V2);
    // Initialize files
    DataOutputStream[][] indexes = new DataOutputStream[cluster.getNumberOfPartitions()][];
    DataOutputStream[][] datas = new DataOutputStream[cluster.getNumberOfPartitions()][];
    int[][] positions = new int[cluster.getNumberOfPartitions()][];
    File tempDirectory = new File(Utils.notNull(System.getProperty("java.io.tmpdir")), "tempDir-" + Integer.toString(new Random().nextInt()));
    Utils.mkdirs(tempDirectory);
    for (int partitionId = 0; partitionId < cluster.getNumberOfPartitions(); partitionId++) {
        indexes[partitionId] = new DataOutputStream[storeDefinition.getReplicationFactor() * numChunks];
        datas[partitionId] = new DataOutputStream[storeDefinition.getReplicationFactor() * numChunks];
        positions[partitionId] = new int[storeDefinition.getReplicationFactor() * numChunks];
        int globalChunkId = 0;
        for (int repType = 0; repType < storeDefinition.getReplicationFactor(); repType++) {
            for (int chunk = 0; chunk < numChunks; chunk++) {
                File indexFile = new File(tempDirectory, Integer.toString(partitionId) + "_" + Integer.toString(repType) + "_" + Integer.toString(chunk) + ".index");
                File dataFile = new File(tempDirectory, Integer.toString(partitionId) + "_" + Integer.toString(repType) + "_" + Integer.toString(chunk) + ".data");
                positions[partitionId][globalChunkId] = 0;
                indexes[partitionId][globalChunkId] = new DataOutputStream(new BufferedOutputStream(new FileOutputStream(indexFile), ioBufferSize));
                datas[partitionId][globalChunkId] = new DataOutputStream(new BufferedOutputStream(new FileOutputStream(dataFile), ioBufferSize));
                globalChunkId++;
            }
        }
    }
    logger.info("Reading items...");
    ExternalSorter<KeyValuePair> sorter = new ExternalSorter<KeyValuePair>(new KeyValuePairSerializer(), new KeyMd5Comparator(), internalSortSize, tempDir.getAbsolutePath(), ioBufferSize, numThreads, gzipIntermediate);
    JsonObjectIterator iter = new JsonObjectIterator(reader, storeDefinition);
    int count = 0;
    HashMap<Pair<Integer, Integer>, Pair<byte[], byte[]>> previousElements = Maps.newHashMap();
    for (KeyValuePair currentElement : sorter.sorted(iter)) {
        List<Integer> partitionIds = this.routingStrategy.getPartitionList(currentElement.getKey());
        int masterPartition = partitionIds.get(0);
        int localChunkId = ReadOnlyUtils.chunk(currentElement.getKeyMd5(), numChunks);
        for (int replicaType = 0; replicaType < partitionIds.size(); replicaType++) {
            int globalChunkId = (replicaType * numChunks) + localChunkId;
            Pair<Integer, Integer> key = Pair.create(masterPartition, globalChunkId);
            if (!previousElements.containsKey(key)) {
                // First element, lets write it to map
                previousElements.put(key, Pair.create(ByteUtils.copy(currentElement.getKeyMd5(), 0, 2 * ByteUtils.SIZE_OF_INT), generateFirstElement(currentElement)));
            } else {
                Pair<byte[], byte[]> previousElement = previousElements.get(key);
                // append it...
                if (ByteUtils.compare(previousElement.getFirst(), currentElement.getKeyMd5(), 0, 2 * ByteUtils.SIZE_OF_INT) == 0) {
                    short numKeys = ByteUtils.readShort(previousElement.getSecond(), 0);
                    ByteArrayOutputStream stream = new ByteArrayOutputStream();
                    DataOutputStream valueStream = new DataOutputStream(stream);
                    valueStream.writeShort(numKeys + 1);
                    // Append the previous tuples
                    valueStream.write(ByteUtils.copy(previousElement.getSecond(), ByteUtils.SIZE_OF_SHORT, previousElement.getSecond().length));
                    valueStream.writeInt(currentElement.getKey().length);
                    valueStream.writeInt(currentElement.getValue().length);
                    valueStream.write(currentElement.getKey());
                    valueStream.write(currentElement.getValue());
                    valueStream.flush();
                    previousElements.put(key, Pair.create(previousElement.getFirst(), stream.toByteArray()));
                } else {
                    // ...else, flush the previous element to disk
                    indexes[masterPartition][globalChunkId].write(previousElement.getFirst());
                    indexes[masterPartition][globalChunkId].writeInt(positions[masterPartition][globalChunkId]);
                    datas[masterPartition][globalChunkId].write(previousElement.getSecond());
                    positions[masterPartition][globalChunkId] += previousElement.getSecond().length;
                    // ...and add current element as previous element
                    previousElements.put(key, Pair.create(ByteUtils.copy(currentElement.getKeyMd5(), 0, 2 * ByteUtils.SIZE_OF_INT), generateFirstElement(currentElement)));
                }
            }
        }
        count++;
    }
    logger.info(count + " items read.");
    // files
    for (Entry<Pair<Integer, Integer>, Pair<byte[], byte[]>> entry : previousElements.entrySet()) {
        int partitionId = entry.getKey().getFirst();
        int globalChunkId = entry.getKey().getSecond();
        byte[] keyMd5 = entry.getValue().getFirst();
        byte[] value = entry.getValue().getSecond();
        indexes[partitionId][globalChunkId].write(keyMd5);
        indexes[partitionId][globalChunkId].writeInt(positions[partitionId][globalChunkId]);
        datas[partitionId][globalChunkId].write(value);
    }
    // Create node folders
    Map<Integer, File> nodeDirs = new HashMap<Integer, File>(cluster.getNumberOfNodes());
    for (Node node : cluster.getNodes()) {
        int nodeId = node.getId();
        // Create data directory
        File nodeDir = new File(outputDir, "node-" + Integer.toString(nodeId));
        nodeDir.mkdirs();
        // Add the data directory to the array
        nodeDirs.put(node.getId(), nodeDir);
        // Create metadata file
        BufferedWriter writer = new BufferedWriter(new FileWriter(new File(nodeDir, ".metadata")));
        ReadOnlyStorageMetadata metadata = new ReadOnlyStorageMetadata();
        metadata.add(ReadOnlyStorageMetadata.FORMAT, ReadOnlyStorageFormat.READONLY_V2.getCode());
        writer.write(metadata.toJsonString());
        writer.close();
    }
    // Close everything
    logger.info("Closing all store files.");
    for (int partitionId = 0; partitionId < cluster.getNumberOfPartitions(); partitionId++) {
        for (int chunk = 0; chunk < numChunks * storeDefinition.getReplicationFactor(); chunk++) {
            indexes[partitionId][chunk].close();
            datas[partitionId][chunk].close();
        }
    }
    // Start moving files over to their correct node
    RoutingStrategy strategy = new RoutingStrategyFactory().updateRoutingStrategy(storeDefinition, cluster);
    Map<Integer, Integer> replicaMapping = cluster.getPartitionIdToNodeIdMap();
    for (File file : tempDirectory.listFiles()) {
        String fileName = file.getName();
        if (fileName.matches("^[\\d]+_[\\d]+_[\\d]+\\.(data|index)")) {
            String[] props = fileName.split("_");
            int partitionId = Integer.parseInt(props[0]);
            int replicaType = Integer.parseInt(props[1]);
            int nodeId = replicaMapping.get(strategy.getReplicatingPartitionList(partitionId).get(replicaType));
            Utils.move(file, new File(nodeDirs.get(nodeId), fileName));
        }
    }
}
Also used : HashMap(java.util.HashMap) DataOutputStream(java.io.DataOutputStream) RoutingStrategyFactory(voldemort.routing.RoutingStrategyFactory) Node(voldemort.cluster.Node) FileWriter(java.io.FileWriter) BufferedWriter(java.io.BufferedWriter) Random(java.util.Random) RoutingStrategy(voldemort.routing.RoutingStrategy) BufferedOutputStream(java.io.BufferedOutputStream) Pair(voldemort.utils.Pair) ByteArrayOutputStream(java.io.ByteArrayOutputStream) FileOutputStream(java.io.FileOutputStream) File(java.io.File)

Example 25 with Pair

use of voldemort.utils.Pair in project voldemort by voldemort.

the class VoldemortAdminTool method getMetadataVersionsForNode.

private static String getMetadataVersionsForNode(AdminClient adminClient, int nodeId) {
    List<Integer> partitionIdList = Lists.newArrayList();
    for (Node node : adminClient.getAdminClientCluster().getNodes()) {
        partitionIdList.addAll(node.getPartitionIds());
    }
    Iterator<Pair<ByteArray, Versioned<byte[]>>> entriesIterator = adminClient.bulkFetchOps.fetchEntries(nodeId, SystemStoreConstants.SystemStoreName.voldsys$_metadata_version_persistence.name(), partitionIdList, null, true);
    Serializer<String> serializer = new StringSerializer("UTF8");
    String keyObject = null;
    String valueObject = null;
    while (entriesIterator.hasNext()) {
        try {
            Pair<ByteArray, Versioned<byte[]>> kvPair = entriesIterator.next();
            byte[] keyBytes = kvPair.getFirst().get();
            byte[] valueBytes = kvPair.getSecond().getValue();
            keyObject = serializer.toObject(keyBytes);
            if (!keyObject.equals(SystemStoreConstants.VERSIONS_METADATA_KEY)) {
                continue;
            }
            valueObject = serializer.toObject(valueBytes);
        } catch (Exception e) {
            System.err.println("Error while retrieving Metadata versions from node : " + nodeId + ". Exception = \n");
            e.printStackTrace();
            System.exit(-1);
        }
    }
    return valueObject;
}
Also used : Versioned(voldemort.versioning.Versioned) Node(voldemort.cluster.Node) StoreNotFoundException(com.sleepycat.persist.StoreNotFoundException) DecoderException(org.apache.commons.codec.DecoderException) EOFException(java.io.EOFException) SerializationException(voldemort.serialization.SerializationException) FileNotFoundException(java.io.FileNotFoundException) IOException(java.io.IOException) InvalidMetadataException(voldemort.store.InvalidMetadataException) ByteArray(voldemort.utils.ByteArray) StringSerializer(voldemort.serialization.StringSerializer) Pair(voldemort.utils.Pair)

Aggregations

Pair (voldemort.utils.Pair)45 ByteArray (voldemort.utils.ByteArray)28 Versioned (voldemort.versioning.Versioned)25 VoldemortException (voldemort.VoldemortException)15 Node (voldemort.cluster.Node)15 IOException (java.io.IOException)14 StoreDefinition (voldemort.store.StoreDefinition)13 Test (org.junit.Test)11 File (java.io.File)10 VectorClock (voldemort.versioning.VectorClock)10 ArrayList (java.util.ArrayList)8 HashMap (java.util.HashMap)8 RoutingStrategyFactory (voldemort.routing.RoutingStrategyFactory)7 Cluster (voldemort.cluster.Cluster)6 DataOutputStream (java.io.DataOutputStream)5 FileNotFoundException (java.io.FileNotFoundException)5 Map (java.util.Map)5 ExecutionException (java.util.concurrent.ExecutionException)5 VoldemortFilter (voldemort.client.protocol.VoldemortFilter)5 DataInputStream (java.io.DataInputStream)4