Search in sources :

Example 36 with VoldemortException

use of voldemort.VoldemortException in project voldemort by voldemort.

the class AdminServiceBasicTest method testReplicationMapping.

@Test
public void testReplicationMapping() {
    List<Zone> zones = ServerTestUtils.getZones(2);
    List<Node> nodes = Lists.newArrayList();
    nodes.add(new Node(0, "localhost", 1, 2, 3, 0, Lists.newArrayList(0, 4, 8)));
    nodes.add(new Node(1, "localhost", 1, 2, 3, 0, Lists.newArrayList(1, 5, 9)));
    nodes.add(new Node(2, "localhost", 1, 2, 3, 1, Lists.newArrayList(2, 6, 10)));
    nodes.add(new Node(3, "localhost", 1, 2, 3, 1, Lists.newArrayList(3, 7, 11)));
    // Test 0 - With rep-factor 1
    StoreDefinition storeDef = ServerTestUtils.getStoreDef("consistent", 1, 1, 1, 1, 1, RoutingStrategyType.CONSISTENT_STRATEGY);
    Cluster newCluster = new Cluster("single_zone_cluster", nodes, zones);
    try {
        adminClient.replicaOps.getReplicationMapping(0, newCluster, storeDef);
        fail("Should have thrown an exception since rep-factor = 1");
    } catch (VoldemortException e) {
    }
    // Test 1 - With consistent routing strategy
    storeDef = ServerTestUtils.getStoreDef("consistent", 2, 1, 1, 1, 1, RoutingStrategyType.CONSISTENT_STRATEGY);
    // On node 0
    Map<Integer, List<Integer>> replicationMapping = adminClient.replicaOps.getReplicationMapping(0, newCluster, storeDef);
    {
        HashMap<Integer, List<Integer>> expectedMapping = Maps.newHashMap();
        expectedMapping.put(1, Lists.newArrayList(0, 4, 8));
        expectedMapping.put(3, Lists.newArrayList(3, 7, 11));
        assertEquals(expectedMapping, replicationMapping);
    }
    {
        // On node 1
        replicationMapping = adminClient.replicaOps.getReplicationMapping(1, newCluster, storeDef);
        HashMap<Integer, List<Integer>> expectedMapping = Maps.newHashMap();
        expectedMapping.put(0, Lists.newArrayList(0, 4, 8));
        expectedMapping.put(2, Lists.newArrayList(1, 5, 9));
        assertEquals(expectedMapping, replicationMapping);
    }
    {
        // On node 2
        replicationMapping = adminClient.replicaOps.getReplicationMapping(2, newCluster, storeDef);
        HashMap<Integer, List<Integer>> expectedMapping = Maps.newHashMap();
        expectedMapping.put(1, Lists.newArrayList(1, 5, 9));
        expectedMapping.put(3, Lists.newArrayList(2, 6, 10));
        assertEquals(expectedMapping, replicationMapping);
    }
    {
        // On node 3
        replicationMapping = adminClient.replicaOps.getReplicationMapping(3, newCluster, storeDef);
        HashMap<Integer, List<Integer>> expectedMapping = Maps.newHashMap();
        expectedMapping.put(0, Lists.newArrayList(3, 7, 11));
        expectedMapping.put(2, Lists.newArrayList(2, 6, 10));
        assertEquals(expectedMapping, replicationMapping);
    }
    // Test 2 - With zone routing strategy
    HashMap<Integer, Integer> zoneReplicationFactors = Maps.newHashMap();
    for (int zoneIds = 0; zoneIds < 2; zoneIds++) {
        zoneReplicationFactors.put(zoneIds, 1);
    }
    storeDef = ServerTestUtils.getStoreDef("zone", 2, 1, 1, 1, 0, 0, zoneReplicationFactors, HintedHandoffStrategyType.PROXIMITY_STRATEGY, RoutingStrategyType.ZONE_STRATEGY);
    newCluster = new Cluster("multi_zone_cluster", nodes, zones);
    {
        // On node 0
        replicationMapping = adminClient.replicaOps.getReplicationMapping(0, newCluster, storeDef);
        HashMap<Integer, List<Integer>> expectedMapping = Maps.newHashMap();
        expectedMapping.put(2, Lists.newArrayList(0, 4, 8, 2, 6, 10));
        expectedMapping.put(3, Lists.newArrayList(3, 7, 11));
        assertEquals(expectedMapping, replicationMapping);
    }
    {
        // On node 1
        replicationMapping = adminClient.replicaOps.getReplicationMapping(1, newCluster, storeDef);
        HashMap<Integer, List<Integer>> expectedMapping = Maps.newHashMap();
        expectedMapping.put(2, Lists.newArrayList(1, 5, 9));
        assertEquals(expectedMapping, replicationMapping);
    }
    {
        // On node 2
        replicationMapping = adminClient.replicaOps.getReplicationMapping(2, newCluster, storeDef);
        HashMap<Integer, List<Integer>> expectedMapping = Maps.newHashMap();
        expectedMapping.put(0, Lists.newArrayList(0, 4, 8, 2, 6, 10));
        expectedMapping.put(1, Lists.newArrayList(1, 5, 9));
        assertEquals(expectedMapping, replicationMapping);
    }
    {
        // On node 3
        replicationMapping = adminClient.replicaOps.getReplicationMapping(3, newCluster, storeDef);
        HashMap<Integer, List<Integer>> expectedMapping = Maps.newHashMap();
        expectedMapping.put(0, Lists.newArrayList(3, 7, 11));
        assertEquals(expectedMapping, replicationMapping);
    }
    // Test 3 - Consistent with rep factor 3
    storeDef = ServerTestUtils.getStoreDef("consistent", 3, 1, 1, 1, 1, RoutingStrategyType.CONSISTENT_STRATEGY);
    newCluster = new Cluster("single_zone_cluster", nodes, zones);
    {
        replicationMapping = adminClient.replicaOps.getReplicationMapping(0, newCluster, storeDef);
        HashMap<Integer, List<Integer>> expectedMapping = Maps.newHashMap();
        expectedMapping.put(1, Lists.newArrayList(0, 4, 8));
        expectedMapping.put(3, Lists.newArrayList(3, 7, 11));
        expectedMapping.put(2, Lists.newArrayList(2, 6, 10));
        assertEquals(expectedMapping, replicationMapping);
    }
    {
        replicationMapping = adminClient.replicaOps.getReplicationMapping(1, newCluster, storeDef);
        HashMap<Integer, List<Integer>> expectedMapping = Maps.newHashMap();
        expectedMapping.put(0, Lists.newArrayList(0, 4, 8));
        expectedMapping.put(3, Lists.newArrayList(3, 7, 11));
        expectedMapping.put(2, Lists.newArrayList(1, 5, 9));
        assertEquals(expectedMapping, replicationMapping);
    }
    {
        replicationMapping = adminClient.replicaOps.getReplicationMapping(2, newCluster, storeDef);
        HashMap<Integer, List<Integer>> expectedMapping = Maps.newHashMap();
        expectedMapping.put(0, Lists.newArrayList(0, 4, 8));
        expectedMapping.put(1, Lists.newArrayList(1, 5, 9));
        expectedMapping.put(3, Lists.newArrayList(2, 6, 10));
        assertEquals(expectedMapping, replicationMapping);
    }
    {
        replicationMapping = adminClient.replicaOps.getReplicationMapping(3, newCluster, storeDef);
        HashMap<Integer, List<Integer>> expectedMapping = Maps.newHashMap();
        expectedMapping.put(0, Lists.newArrayList(3, 7, 11));
        expectedMapping.put(1, Lists.newArrayList(1, 5, 9));
        expectedMapping.put(2, Lists.newArrayList(2, 6, 10));
        assertEquals(expectedMapping, replicationMapping);
    }
    zoneReplicationFactors = Maps.newHashMap();
    for (int zoneIds = 0; zoneIds < 2; zoneIds++) {
        zoneReplicationFactors.put(zoneIds, 2);
    }
    storeDef = ServerTestUtils.getStoreDef("zone", 1, 1, 1, 1, 0, 0, zoneReplicationFactors, HintedHandoffStrategyType.PROXIMITY_STRATEGY, RoutingStrategyType.ZONE_STRATEGY);
    newCluster = new Cluster("multi_zone_cluster", nodes, zones);
    {
        replicationMapping = adminClient.replicaOps.getReplicationMapping(0, newCluster, storeDef);
        HashMap<Integer, List<Integer>> expectedMapping = Maps.newHashMap();
        expectedMapping.put(1, Lists.newArrayList(0, 4, 8, 1, 5, 9));
        expectedMapping.put(2, Lists.newArrayList(2, 6, 10));
        expectedMapping.put(3, Lists.newArrayList(3, 7, 11));
        assertEquals(expectedMapping, replicationMapping);
    }
    {
        replicationMapping = adminClient.replicaOps.getReplicationMapping(1, newCluster, storeDef);
        HashMap<Integer, List<Integer>> expectedMapping = Maps.newHashMap();
        expectedMapping.put(0, Lists.newArrayList(0, 4, 8));
        expectedMapping.put(2, Lists.newArrayList(1, 5, 9, 2, 6, 10));
        expectedMapping.put(3, Lists.newArrayList(3, 7, 11));
        assertEquals(expectedMapping, replicationMapping);
    }
    {
        replicationMapping = adminClient.replicaOps.getReplicationMapping(2, newCluster, storeDef);
        HashMap<Integer, List<Integer>> expectedMapping = Maps.newHashMap();
        expectedMapping.put(0, Lists.newArrayList(0, 4, 8));
        expectedMapping.put(1, Lists.newArrayList(1, 5, 9));
        expectedMapping.put(3, Lists.newArrayList(2, 6, 10, 3, 7, 11));
        assertEquals(expectedMapping, replicationMapping);
    }
    {
        replicationMapping = adminClient.replicaOps.getReplicationMapping(3, newCluster, storeDef);
        HashMap<Integer, List<Integer>> expectedMapping = Maps.newHashMap();
        expectedMapping.put(0, Lists.newArrayList(0, 4, 8, 3, 7, 11));
        expectedMapping.put(1, Lists.newArrayList(1, 5, 9));
        expectedMapping.put(2, Lists.newArrayList(2, 6, 10));
        assertEquals(expectedMapping, replicationMapping);
    }
}
Also used : HashMap(java.util.HashMap) Zone(voldemort.cluster.Zone) Node(voldemort.cluster.Node) Cluster(voldemort.cluster.Cluster) VoldemortException(voldemort.VoldemortException) StoreDefinition(voldemort.store.StoreDefinition) List(java.util.List) ArrayList(java.util.ArrayList) Test(org.junit.Test)

Example 37 with VoldemortException

use of voldemort.VoldemortException in project voldemort by voldemort.

the class HdfsFetcher method fetchFromSource.

private File fetchFromSource(String sourceFileUrl, String destinationFile, AsyncOperationStatus status, String storeName, long pushVersion, Long diskQuotaSizeInKB, MetadataStore metadataStore) throws Exception {
    ObjectName jmxName = null;
    HdfsCopyStats stats = null;
    FileSystem fs = null;
    sourceFileUrl = VoldemortUtils.modifyURL(sourceFileUrl, voldemortConfig);
    // Flag to indicate whether the fetch is complete or not
    boolean isCompleteFetch = false;
    try {
        // Record as one store fetch
        HdfsCopyStats.storeFetch();
        fs = HadoopUtils.getHadoopFileSystem(voldemortConfig, sourceFileUrl);
        final Path rootPath = new Path(sourceFileUrl);
        File destination = new File(destinationFile);
        if (destination.exists()) {
            throw new VoldemortException("Version directory " + destination.getAbsolutePath() + " already exists");
        }
        boolean isFile = isFile(fs, rootPath);
        stats = new HdfsCopyStats(sourceFileUrl, destination, // stats file initially disabled, to fetch just the first metadata file
        false, maxVersionsStatsFile, isFile, null);
        jmxName = JmxUtils.registerMbean("hdfs-copy-" + copyCount.getAndIncrement(), stats);
        logger.info("Starting fetch for : " + sourceFileUrl);
        FetchStrategy fetchStrategy = new BasicFetchStrategy(this, fs, stats, status, bufferSize);
        if (!isFile) {
            // We are asked to fetch a directory
            Utils.mkdirs(destination);
            HdfsDirectory rootDirectory = new HdfsDirectory(fs, rootPath, this.voldemortConfig);
            List<HdfsDirectory> directoriesToFetch = Lists.newArrayList();
            HdfsFile metadataFile = rootDirectory.getMetadataFile();
            Long expectedDiskSize = -1L;
            if (metadataFile != null) {
                File copyLocation = new File(destination, metadataFile.getPath().getName());
                fetchStrategy.fetch(metadataFile, copyLocation, null);
                rootDirectory.initializeMetadata(copyLocation);
                if (metadataFile.getDiskFileName().equals(ReadOnlyUtils.FULL_STORE_METADATA_FILE)) {
                    // Then we are in build.primary.replica.only mode, and we need to determine which
                    // partition sub-directories to download
                    Set<Integer> partitions = getPartitionsForCurrentNode(metadataStore, storeName);
                    for (int partitionId : partitions) {
                        String partitionKey = ReadOnlyUtils.PARTITION_DIRECTORY_PREFIX + partitionId;
                        ReadOnlyStorageMetadata partitionMetadata = rootDirectory.getMetadata().getNestedMetadata(partitionKey);
                        String diskSizeInBytes = (String) partitionMetadata.get(ReadOnlyStorageMetadata.DISK_SIZE_IN_BYTES);
                        if (diskSizeInBytes != null && diskSizeInBytes != "") {
                            logger.debug("Partition " + partitionId + " is served by this node and is not empty, so it will be downloaded.");
                            if (expectedDiskSize == -1) {
                                expectedDiskSize = Long.parseLong(diskSizeInBytes);
                            } else {
                                expectedDiskSize += Long.parseLong(diskSizeInBytes);
                            }
                            HdfsDirectory partitionDirectory = new HdfsDirectory(fs, new Path(rootPath, partitionKey), this.voldemortConfig);
                            partitionDirectory.initializeMetadata(partitionMetadata);
                            directoriesToFetch.add(partitionDirectory);
                        } else {
                            logger.debug("Partition " + partitionId + " is served by this node but it is empty, so it will be skipped.");
                        }
                    }
                } else {
                    // Then we are not in build.primary.replica.only mode (old behavior), and we
                    // need to download the entire node directory we're currently in.
                    String diskSizeInBytes = (String) rootDirectory.getMetadata().get(ReadOnlyStorageMetadata.DISK_SIZE_IN_BYTES);
                    if (diskSizeInBytes != null && diskSizeInBytes != "") {
                        expectedDiskSize = Long.parseLong(diskSizeInBytes);
                    }
                    directoriesToFetch.add(rootDirectory);
                }
            }
            checkIfQuotaExceeded(diskQuotaSizeInKB, storeName, destination, expectedDiskSize);
            stats = new HdfsCopyStats(sourceFileUrl, destination, enableStatsFile, maxVersionsStatsFile, isFile, new HdfsPathInfo(directoriesToFetch));
            fetchStrategy = new BasicFetchStrategy(this, fs, stats, status, bufferSize);
            logger.debug("directoriesToFetch for store '" + storeName + "': " + Arrays.toString(directoriesToFetch.toArray()));
            for (HdfsDirectory directoryToFetch : directoriesToFetch) {
                Map<HdfsFile, byte[]> fileCheckSumMap = fetchStrategy.fetch(directoryToFetch, destination);
                if (directoryToFetch.validateCheckSum(fileCheckSumMap)) {
                    logger.info("Completed fetch: " + sourceFileUrl);
                } else {
                    stats.checkSumFailed();
                    logger.error("Checksum did not match for " + directoryToFetch.toString() + " !");
                    return null;
                }
            }
            isCompleteFetch = true;
            return destination;
        } else if (allowFetchingOfSingleFile) {
            /** This code path is only used by {@link #main(String[])} */
            Utils.mkdirs(destination);
            HdfsFile file = new HdfsFile(fs.getFileStatus(rootPath));
            String fileName = file.getDiskFileName();
            File copyLocation = new File(destination, fileName);
            fetchStrategy.fetch(file, copyLocation, CheckSumType.NONE);
            logger.info("Completed fetch : " + sourceFileUrl);
            isCompleteFetch = true;
            return destination;
        } else {
            logger.error("Source " + rootPath.toString() + " should be a directory");
            return null;
        }
    } catch (Exception e) {
        if (stats != null) {
            stats.reportError("File fetcher failed for destination " + destinationFile, e);
        }
        // Since AuthenticationException may happen before stats object initialization (HadoopUtils.getHadoopFileSystem),
        // we use the static method to capture all the exceptions here.
        HdfsCopyStats.reportExceptionForStats(e);
        if (e instanceof VoldemortException) {
            throw e;
        } else {
            throw new VoldemortException("Error thrown while trying to get data from Hadoop filesystem: " + e.getMessage(), e);
        }
    } finally {
        if (jmxName != null)
            JmxUtils.unregisterMbean(jmxName);
        if (stats != null) {
            stats.complete();
        }
        if (!isCompleteFetch) {
            HdfsCopyStats.incompleteFetch();
        }
        if (fs != null) {
            try {
                fs.close();
            } catch (Exception e) {
                String errorMessage = "Caught " + e.getClass().getSimpleName() + " while trying to close the filesystem instance (harmless).";
                if (stats != null) {
                    stats.reportError(errorMessage, e);
                }
                logger.debug(errorMessage, e);
            }
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) VoldemortException(voldemort.VoldemortException) UnauthorizedStoreException(voldemort.store.readonly.UnauthorizedStoreException) VoldemortException(voldemort.VoldemortException) IOException(java.io.IOException) QuotaExceededException(voldemort.store.quota.QuotaExceededException) ObjectName(javax.management.ObjectName) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) ReadOnlyStorageMetadata(voldemort.store.readonly.ReadOnlyStorageMetadata) FileSystem(org.apache.hadoop.fs.FileSystem) File(java.io.File)

Example 38 with VoldemortException

use of voldemort.VoldemortException in project voldemort by voldemort.

the class AbstractStoreBuilderConfigurable method getPartition.

/**
     * This function computes which reduce task to shuffle a record to.
     */
public int getPartition(byte[] key, byte[] value, int numReduceTasks) {
    try {
        /**
             * {@link partitionId} is the Voldemort primary partition that this
             * record belongs to.
             */
        int partitionId = ByteUtils.readInt(value, ByteUtils.SIZE_OF_INT);
        /**
             * This is the base number we will ultimately mod by {@link numReduceTasks}
             * to determine which reduce task to shuffle to.
             */
        int magicNumber = partitionId;
        if (getSaveKeys() && !buildPrimaryReplicasOnly) {
            /**
                 * When saveKeys is enabled (which also implies we are generating
                 * READ_ONLY_V2 format files), then we are generating files with
                 * a replica type, with one file per replica.
                 *
                 * Each replica is sent to a different reducer, and thus the
                 * {@link magicNumber} is scaled accordingly.
                 *
                 * The downside to this is that it is pretty wasteful. The files
                 * generated for each replicas are identical to one another, so
                 * there's no point in generating them independently in many
                 * reducers.
                 *
                 * This is one of the reasons why buildPrimaryReplicasOnly was
                 * written. In this mode, we only generate the files for the
                 * primary replica, which means the number of reducers is
                 * minimized and {@link magicNumber} does not need to be scaled.
                 */
            int replicaType = (int) ByteUtils.readBytes(value, 2 * ByteUtils.SIZE_OF_INT, ByteUtils.SIZE_OF_BYTE);
            magicNumber = magicNumber * getStoreDef().getReplicationFactor() + replicaType;
        }
        if (!getReducerPerBucket()) {
            /**
                 * Partition files can be split in many chunks in order to limit the
                 * maximum file size downloaded and handled by Voldemort servers.
                 *
                 * {@link chunkId} represents which chunk of partition then current
                 * record belongs to.
                 */
            int chunkId = ReadOnlyUtils.chunk(key, getNumChunks());
            /**
                 * When reducerPerBucket is disabled, all chunks are sent to a
                 * different reducer. This increases parallelism at the expense
                 * of adding more load on Hadoop.
                 *
                 * {@link magicNumber} is thus scaled accordingly, in order to
                 * leverage the extra reducers available to us.
                 */
            magicNumber = magicNumber * getNumChunks() + chunkId;
        }
        /**
             * Finally, we mod {@link magicNumber} by {@link numReduceTasks},
             * since the MapReduce framework expects the return of this function
             * to be bounded by the number of reduce tasks running in the job.
             */
        return magicNumber % numReduceTasks;
    } catch (Exception e) {
        throw new VoldemortException("Caught exception in getPartition()!" + " key: " + ByteUtils.toHexString(key) + ", value: " + ByteUtils.toHexString(value) + ", numReduceTasks: " + numReduceTasks, e);
    }
}
Also used : VoldemortException(voldemort.VoldemortException) VoldemortException(voldemort.VoldemortException) IOException(java.io.IOException)

Example 39 with VoldemortException

use of voldemort.VoldemortException in project voldemort by voldemort.

the class AbstractStoreBuilderConfigurable method configure.

public void configure(JobConf conf) {
    this.cluster = new ClusterMapper().readCluster(new StringReader(conf.get("cluster.xml")));
    List<StoreDefinition> storeDefs = new StoreDefinitionsMapper().readStoreList(new StringReader(conf.get("stores.xml")));
    if (storeDefs.size() != 1)
        throw new IllegalStateException("Expected to find only a single store, but found multiple!");
    this.storeDef = storeDefs.get(0);
    this.numChunks = conf.getInt(NUM_CHUNKS, -1);
    if (this.numChunks < 1) {
        // be overridden by the user.
        throw new VoldemortException(NUM_CHUNKS + " not specified in the MapReduce JobConf (should NEVER happen)");
    }
    this.saveKeys = conf.getBoolean(VoldemortBuildAndPushJob.SAVE_KEYS, true);
    this.reducerPerBucket = conf.getBoolean(VoldemortBuildAndPushJob.REDUCER_PER_BUCKET, true);
    this.buildPrimaryReplicasOnly = conf.getBoolean(VoldemortBuildAndPushJob.BUILD_PRIMARY_REPLICAS_ONLY, false);
    if (buildPrimaryReplicasOnly && !saveKeys) {
        throw new IllegalStateException(VoldemortBuildAndPushJob.BUILD_PRIMARY_REPLICAS_ONLY + " can only be true if " + VoldemortBuildAndPushJob.SAVE_KEYS + " is also true.");
    }
}
Also used : StoreDefinition(voldemort.store.StoreDefinition) StringReader(java.io.StringReader) StoreDefinitionsMapper(voldemort.xml.StoreDefinitionsMapper) ClusterMapper(voldemort.xml.ClusterMapper) VoldemortException(voldemort.VoldemortException)

Example 40 with VoldemortException

use of voldemort.VoldemortException in project voldemort by voldemort.

the class AvroStoreBuilderMapper method map.

/**
     * Create the voldemort key and value from the input Avro record by
     * extracting the key and value and map it out for each of the responsible
     * voldemort nodes
     *
     *
     * The output value is the node_id & partition_id of the responsible node
     * followed by serialized value
     */
@Override
public void map(GenericData.Record record, AvroCollector<Pair<ByteBuffer, ByteBuffer>> collector, Reporter reporter) throws IOException {
    byte[] keyBytes = null;
    byte[] valBytes = null;
    Object keyRecord = null;
    Object valRecord = null;
    try {
        keyRecord = record.get(keyField);
        valRecord = record.get(valField);
        keyBytes = keySerializer.toBytes(keyRecord);
        valBytes = valueSerializer.toBytes(valRecord);
        this.collectorWrapper.setCollector(collector);
        this.mapper.map(keyBytes, valBytes, this.collectorWrapper);
        recordCounter++;
    } catch (OutOfMemoryError oom) {
        logger.error(oomErrorMessage(reporter));
        if (keyBytes == null) {
            logger.error("keyRecord caused OOM!");
        } else {
            logger.error("keyRecord: " + keyRecord);
            logger.error("valRecord: " + (valBytes == null ? "caused OOM" : valRecord));
        }
        throw new VoldemortException(oomErrorMessage(reporter), oom);
    }
}
Also used : VoldemortException(voldemort.VoldemortException)

Aggregations

VoldemortException (voldemort.VoldemortException)247 IOException (java.io.IOException)63 ByteArray (voldemort.utils.ByteArray)52 File (java.io.File)46 Node (voldemort.cluster.Node)42 StoreDefinition (voldemort.store.StoreDefinition)39 Versioned (voldemort.versioning.Versioned)38 ArrayList (java.util.ArrayList)34 Test (org.junit.Test)30 ObsoleteVersionException (voldemort.versioning.ObsoleteVersionException)26 List (java.util.List)21 HashMap (java.util.HashMap)20 Cluster (voldemort.cluster.Cluster)20 VectorClock (voldemort.versioning.VectorClock)16 NoSuchCapabilityException (voldemort.store.NoSuchCapabilityException)15 ReadOnlyStorageEngine (voldemort.store.readonly.ReadOnlyStorageEngine)14 ExecutionException (java.util.concurrent.ExecutionException)13 StoreDefinitionsMapper (voldemort.xml.StoreDefinitionsMapper)13 Map (java.util.Map)12 Path (org.apache.hadoop.fs.Path)12