Search in sources :

Example 11 with ReadOnlyStorageEngine

use of voldemort.store.readonly.ReadOnlyStorageEngine in project voldemort by voldemort.

the class ReadOnlyStoreManagementServlet method getReadOnlyStores.

private List<ReadOnlyStorageEngine> getReadOnlyStores(VoldemortServer server) {
    StorageService storage = (StorageService) Utils.notNull(server).getService(ServiceType.STORAGE);
    List<ReadOnlyStorageEngine> l = Lists.newArrayList();
    for (StorageEngine<ByteArray, byte[], byte[]> engine : storage.getStoreRepository().getStorageEnginesByClass(ReadOnlyStorageEngine.class)) {
        l.add((ReadOnlyStorageEngine) engine);
    }
    return l;
}
Also used : ReadOnlyStorageEngine(voldemort.store.readonly.ReadOnlyStorageEngine) ByteArray(voldemort.utils.ByteArray) StorageService(voldemort.server.storage.StorageService)

Example 12 with ReadOnlyStorageEngine

use of voldemort.store.readonly.ReadOnlyStorageEngine in project voldemort by voldemort.

the class HadoopStoreBuilderTest method testHadoopBuild.

@Test
public void testHadoopBuild() throws Exception {
    // create test data
    Map<String, String> values = new HashMap<String, String>();
    File testDir = TestUtils.createTempDir();
    File tempDir = new File(testDir, "temp"), tempDir2 = new File(testDir, "temp2");
    File outputDir = new File(testDir, "output"), outputDir2 = new File(testDir, "output2");
    File storeDir = TestUtils.createTempDir(testDir);
    for (int i = 0; i < 200; i++) values.put(Integer.toString(i), Integer.toBinaryString(i));
    // write test data to text file
    File inputFile = File.createTempFile("input", ".txt", testDir);
    inputFile.deleteOnExit();
    StringBuilder contents = new StringBuilder();
    for (Map.Entry<String, String> entry : values.entrySet()) contents.append(entry.getKey() + "\t" + entry.getValue() + "\n");
    FileUtils.writeStringToFile(inputFile, contents.toString());
    String storeName = "test";
    SerializerDefinition serDef = new SerializerDefinition("string");
    Cluster cluster = ServerTestUtils.getLocalCluster(1);
    // Test backwards compatibility
    StoreDefinition def = new StoreDefinitionBuilder().setName(storeName).setType(ReadOnlyStorageConfiguration.TYPE_NAME).setKeySerializer(serDef).setValueSerializer(serDef).setRoutingPolicy(RoutingTier.CLIENT).setRoutingStrategyType(RoutingStrategyType.CONSISTENT_STRATEGY).setReplicationFactor(1).setPreferredReads(1).setRequiredReads(1).setPreferredWrites(1).setRequiredWrites(1).build();
    HadoopStoreBuilder builder = new HadoopStoreBuilder("testHadoopBuild", new Props(), new JobConf(), TextStoreMapper.class, TextInputFormat.class, cluster, def, new Path(tempDir2.getAbsolutePath()), new Path(outputDir2.getAbsolutePath()), new Path(inputFile.getAbsolutePath()), CheckSumType.MD5, saveKeys, false, 64 * 1024, false, null, false);
    builder.build();
    builder = new HadoopStoreBuilder("testHadoopBuild", new Props(), new JobConf(), TextStoreMapper.class, TextInputFormat.class, cluster, def, new Path(tempDir.getAbsolutePath()), new Path(outputDir.getAbsolutePath()), new Path(inputFile.getAbsolutePath()), CheckSumType.MD5, saveKeys, false, 64 * 1024, false, null, false);
    builder.build();
    // Check if checkSum is generated in outputDir
    File nodeFile = new File(outputDir, "node-0");
    // Check if metadata file exists
    File metadataFile = new File(nodeFile, ".metadata");
    Assert.assertTrue("Metadata file should exist!", metadataFile.exists());
    ReadOnlyStorageMetadata metadata = new ReadOnlyStorageMetadata(metadataFile);
    if (saveKeys)
        Assert.assertEquals("In saveKeys mode, the metadata format should be READONLY_V2!", metadata.get(ReadOnlyStorageMetadata.FORMAT), ReadOnlyStorageFormat.READONLY_V2.getCode());
    else
        Assert.assertEquals("In legacy mode (saveKeys==false), the metadata format should be READONLY_V1!", metadata.get(ReadOnlyStorageMetadata.FORMAT), ReadOnlyStorageFormat.READONLY_V1.getCode());
    Assert.assertEquals("Checksum type should be MD5!", metadata.get(ReadOnlyStorageMetadata.CHECKSUM_TYPE), CheckSum.toString(CheckSumType.MD5));
    // Check contents of checkSum file
    byte[] md5 = Hex.decodeHex(((String) metadata.get(ReadOnlyStorageMetadata.CHECKSUM)).toCharArray());
    byte[] checkSumBytes = CheckSumTests.calculateCheckSum(nodeFile.listFiles(), CheckSumType.MD5);
    Assert.assertEquals("Checksum is not as excepted!", 0, ByteUtils.compare(checkSumBytes, md5));
    // check if fetching works
    HdfsFetcher fetcher = new HdfsFetcher();
    // Fetch to version directory
    File versionDir = new File(storeDir, "version-0");
    fetcher.fetch(nodeFile.getAbsolutePath(), versionDir.getAbsolutePath());
    Assert.assertTrue("Version directory should exist!", versionDir.exists());
    // open store
    @SuppressWarnings("unchecked") Serializer<Object> serializer = (Serializer<Object>) new DefaultSerializerFactory().getSerializer(serDef);
    ReadOnlyStorageEngine engine = new ReadOnlyStorageEngine(storeName, searchStrategy, new RoutingStrategyFactory().updateRoutingStrategy(def, cluster), 0, storeDir, 1);
    Store<Object, Object, Object> store = SerializingStore.wrap(engine, serializer, serializer, serializer);
    // check values
    for (Map.Entry<String, String> entry : values.entrySet()) {
        String key = entry.getKey();
        try {
            List<Versioned<Object>> found = store.get(key, null);
            Assert.assertEquals("Incorrect number of results", 1, found.size());
            Assert.assertEquals(entry.getValue(), found.get(0).getValue());
        } catch (VoldemortException e) {
            throw new VoldemortException("Got an exception while trying to get key '" + key + "'.", e);
        }
    }
    // also check the iterator - first key iterator...
    try {
        ClosableIterator<ByteArray> keyIterator = engine.keys();
        if (!saveKeys) {
            fail("Should have thrown an exception since this RO format does not support iterators");
        }
        int numElements = 0;
        while (keyIterator.hasNext()) {
            Assert.assertTrue(values.containsKey(serializer.toObject(keyIterator.next().get())));
            numElements++;
        }
        Assert.assertEquals(numElements, values.size());
    } catch (UnsupportedOperationException e) {
        if (saveKeys) {
            fail("Should not have thrown an exception since this RO format does support iterators");
        }
    }
    // ... and entry iterator
    try {
        ClosableIterator<Pair<ByteArray, Versioned<byte[]>>> entryIterator = engine.entries();
        if (!saveKeys) {
            fail("Should have thrown an exception since this RO format does not support iterators");
        }
        int numElements = 0;
        while (entryIterator.hasNext()) {
            Pair<ByteArray, Versioned<byte[]>> entry = entryIterator.next();
            Assert.assertEquals(values.get(serializer.toObject(entry.getFirst().get())), serializer.toObject(entry.getSecond().getValue()));
            numElements++;
        }
        Assert.assertEquals(numElements, values.size());
    } catch (UnsupportedOperationException e) {
        if (saveKeys) {
            fail("Should not have thrown an exception since this RO format does support iterators");
        }
    }
}
Also used : Versioned(voldemort.versioning.Versioned) HashMap(java.util.HashMap) RoutingStrategyFactory(voldemort.routing.RoutingStrategyFactory) Props(voldemort.utils.Props) VoldemortException(voldemort.VoldemortException) ReadOnlyStorageMetadata(voldemort.store.readonly.ReadOnlyStorageMetadata) StoreDefinition(voldemort.store.StoreDefinition) ByteArray(voldemort.utils.ByteArray) JobConf(org.apache.hadoop.mapred.JobConf) Serializer(voldemort.serialization.Serializer) Pair(voldemort.utils.Pair) StoreDefinitionBuilder(voldemort.store.StoreDefinitionBuilder) Path(org.apache.hadoop.fs.Path) Cluster(voldemort.cluster.Cluster) ReadOnlyStorageEngine(voldemort.store.readonly.ReadOnlyStorageEngine) DefaultSerializerFactory(voldemort.serialization.DefaultSerializerFactory) HdfsFetcher(voldemort.store.readonly.fetcher.HdfsFetcher) TextInputFormat(org.apache.hadoop.mapred.TextInputFormat) File(java.io.File) Map(java.util.Map) HashMap(java.util.HashMap) SerializerDefinition(voldemort.serialization.SerializerDefinition) Test(org.junit.Test)

Example 13 with ReadOnlyStorageEngine

use of voldemort.store.readonly.ReadOnlyStorageEngine in project voldemort by voldemort.

the class AdminRebalanceTest method checkRO.

private void checkRO(Cluster cluster) {
    for (StoreDefinition storeDef : Lists.newArrayList(storeDef1, storeDef2)) {
        Map<Integer, Set<Pair<Integer, Integer>>> nodeToPartitions = ROTestUtils.getNodeIdToAllPartitions(cluster, storeDef, true);
        for (Map.Entry<Integer, Set<Pair<Integer, Integer>>> entry : nodeToPartitions.entrySet()) {
            int nodeId = entry.getKey();
            Set<Pair<Integer, Integer>> buckets = entry.getValue();
            assertEquals(servers[nodeId].getMetadataStore().getCluster(), cluster);
            ReadOnlyStorageEngine engine = (ReadOnlyStorageEngine) servers[nodeId].getStoreRepository().getStorageEngine(storeDef.getName());
            HashMap<Object, Integer> storeBuckets = engine.getChunkedFileSet().getChunkIdToNumChunks();
            for (Pair<Integer, Integer> bucket : buckets) {
                if (bucket.getFirst() < storeDef.getReplicationFactor())
                    assertEquals(storeBuckets.containsKey(Pair.create(bucket.getSecond(), bucket.getFirst())), true);
            }
        }
    }
}
Also used : Set(java.util.Set) ReadOnlyStorageEngine(voldemort.store.readonly.ReadOnlyStorageEngine) StoreDefinition(voldemort.store.StoreDefinition) Map(java.util.Map) HashMap(java.util.HashMap) Pair(voldemort.utils.Pair)

Example 14 with ReadOnlyStorageEngine

use of voldemort.store.readonly.ReadOnlyStorageEngine in project voldemort by voldemort.

the class AdminServiceRequestHandler method handleFailedROFetch.

public VAdminProto.FailedFetchStoreResponse handleFailedROFetch(VAdminProto.FailedFetchStoreRequest request) {
    final String storeDir = request.getStoreDir();
    final String storeName = request.getStoreName();
    VAdminProto.FailedFetchStoreResponse.Builder response = VAdminProto.FailedFetchStoreResponse.newBuilder();
    try {
        if (!Utils.isReadableDir(storeDir))
            throw new VoldemortException("Could not read folder " + storeDir + " correctly to delete it");
        final ReadOnlyStorageEngine store = getReadOnlyStorageEngine(metadataStore, storeRepository, storeName);
        if (store.getCurrentVersionId() == ReadOnlyUtils.getVersionId(new File(storeDir))) {
            logger.warn("Cannot delete " + storeDir + " for " + storeName + " since it is the current dir");
            return response.build();
        }
        logger.info("Deleting data from failed fetch for RO store '" + storeName + "' and directory '" + storeDir + "'");
        // Lets delete the folder
        Utils.rm(new File(storeDir));
        logger.info("Successfully deleted data from failed fetch for RO store '" + storeName + "' and directory '" + storeDir + "'");
    } catch (VoldemortException e) {
        response.setError(ProtoUtils.encodeError(errorCodeMapper, e));
        logger.error("handleFailedFetch failed for request(" + request.toString() + ")", e);
    }
    return response.build();
}
Also used : ReadOnlyStorageEngine(voldemort.store.readonly.ReadOnlyStorageEngine) VoldemortException(voldemort.VoldemortException) File(java.io.File)

Example 15 with ReadOnlyStorageEngine

use of voldemort.store.readonly.ReadOnlyStorageEngine in project voldemort by voldemort.

the class AdminServiceRequestHandler method handleFetchAndUpdate.

public VAdminProto.AsyncOperationStatusResponse handleFetchAndUpdate(VAdminProto.InitiateFetchAndUpdateRequest request) {
    final int nodeId = request.getNodeId();
    final List<Integer> partitionIds = request.getPartitionIdsList();
    final VoldemortFilter filter = request.hasFilter() ? getFilterFromRequest(request.getFilter(), voldemortConfig, networkClassLoader) : new DefaultVoldemortFilter();
    final String storeName = request.getStore();
    final Cluster initialCluster = request.hasInitialCluster() ? new ClusterMapper().readCluster(new StringReader(request.getInitialCluster())) : null;
    int requestId = asyncService.getUniqueRequestId();
    VAdminProto.AsyncOperationStatusResponse.Builder response = VAdminProto.AsyncOperationStatusResponse.newBuilder().setRequestId(requestId).setComplete(false).setDescription("Fetch and update").setStatus("Started");
    final StoreDefinition storeDef = metadataStore.getStoreDef(storeName);
    final boolean isReadOnlyStore = storeDef.getType().compareTo(ReadOnlyStorageConfiguration.TYPE_NAME) == 0;
    final StreamingStats streamingStats = voldemortConfig.isJmxEnabled() ? storeRepository.getStreamingStats(storeName) : null;
    try {
        asyncService.submitOperation(requestId, new AsyncOperation(requestId, "Fetch and Update") {

            private final AtomicBoolean running = new AtomicBoolean(true);

            @Override
            public void stop() {
                running.set(false);
                logger.info("Stopping fetch and update for store " + storeName + " from node " + nodeId + "( " + partitionIds + " )");
            }

            @Override
            public void operate() {
                AdminClient adminClient = AdminClient.createTempAdminClient(voldemortConfig, metadataStore.getCluster(), voldemortConfig.getClientMaxConnectionsPerNode());
                try {
                    StorageEngine<ByteArray, byte[], byte[]> storageEngine = getStorageEngine(storeRepository, storeName);
                    EventThrottler throttler = new EventThrottler(voldemortConfig.getStreamMaxWriteBytesPerSec());
                    if (isReadOnlyStore) {
                        ReadOnlyStorageEngine readOnlyStorageEngine = ((ReadOnlyStorageEngine) storageEngine);
                        String destinationDir = readOnlyStorageEngine.getCurrentDirPath();
                        logger.info("Fetching files for RO store '" + storeName + "' from node " + nodeId + " ( " + partitionIds + " )");
                        updateStatus("Fetching files for RO store '" + storeName + "' from node " + nodeId + " ( " + partitionIds + " )");
                        adminClient.readonlyOps.fetchPartitionFiles(nodeId, storeName, partitionIds, destinationDir, readOnlyStorageEngine.getChunkedFileSet().getChunkIdToNumChunks().keySet(), running);
                    } else {
                        logger.info("Fetching entries for RW store '" + storeName + "' from node " + nodeId + " ( " + partitionIds + " )");
                        updateStatus("Fetching entries for RW store '" + storeName + "' from node " + nodeId + " ( " + partitionIds + " ) ");
                        if (partitionIds.size() > 0) {
                            Iterator<Pair<ByteArray, Versioned<byte[]>>> entriesIterator = adminClient.bulkFetchOps.fetchEntries(nodeId, storeName, partitionIds, filter, false, initialCluster, 0);
                            long numTuples = 0;
                            long startTime = System.currentTimeMillis();
                            long startNs = System.nanoTime();
                            while (running.get() && entriesIterator.hasNext()) {
                                Pair<ByteArray, Versioned<byte[]>> entry = entriesIterator.next();
                                if (streamingStats != null) {
                                    streamingStats.reportNetworkTime(Operation.UPDATE_ENTRIES, Utils.elapsedTimeNs(startNs, System.nanoTime()));
                                }
                                ByteArray key = entry.getFirst();
                                Versioned<byte[]> value = entry.getSecond();
                                startNs = System.nanoTime();
                                try {
                                    /**
                                         * TODO This also needs to be fixed to
                                         * use the atomic multi version puts
                                         */
                                    storageEngine.put(key, value, null);
                                } catch (ObsoleteVersionException e) {
                                    // log and ignore
                                    if (logger.isDebugEnabled()) {
                                        logger.debug("Fetch and update threw Obsolete version exception. Ignoring");
                                    }
                                } finally {
                                    if (streamingStats != null) {
                                        streamingStats.reportStreamingPut(Operation.UPDATE_ENTRIES);
                                        streamingStats.reportStorageTime(Operation.UPDATE_ENTRIES, Utils.elapsedTimeNs(startNs, System.nanoTime()));
                                    }
                                }
                                long totalTime = (System.currentTimeMillis() - startTime) / 1000;
                                throttler.maybeThrottle(key.length() + valueSize(value));
                                if ((numTuples % 100000) == 0 && numTuples > 0) {
                                    logger.info(numTuples + " entries copied from node " + nodeId + " for store '" + storeName + "' in " + totalTime + " seconds");
                                    updateStatus(numTuples + " entries copied from node " + nodeId + " for store '" + storeName + "' in " + totalTime + " seconds");
                                }
                                numTuples++;
                            }
                            long totalTime = (System.currentTimeMillis() - startTime) / 1000;
                            if (running.get()) {
                                logger.info("Completed fetching " + numTuples + " entries from node " + nodeId + " for store '" + storeName + "' in " + totalTime + " seconds");
                            } else {
                                logger.info("Fetch and update stopped after fetching " + numTuples + " entries for node " + nodeId + " for store '" + storeName + "' in " + totalTime + " seconds");
                            }
                        } else {
                            logger.info("No entries to fetch from node " + nodeId + " for store '" + storeName + "'");
                        }
                    }
                } finally {
                    adminClient.close();
                }
            }
        });
    } catch (VoldemortException e) {
        response.setError(ProtoUtils.encodeError(errorCodeMapper, e));
        logger.error("handleFetchAndUpdate failed for request(" + request.toString() + ")", e);
    }
    return response.build();
}
Also used : Versioned(voldemort.versioning.Versioned) MysqlStorageEngine(voldemort.store.mysql.MysqlStorageEngine) StorageEngine(voldemort.store.StorageEngine) SlopStorageEngine(voldemort.store.slop.SlopStorageEngine) ReadOnlyStorageEngine(voldemort.store.readonly.ReadOnlyStorageEngine) DefaultVoldemortFilter(voldemort.client.protocol.admin.filter.DefaultVoldemortFilter) VoldemortFilter(voldemort.client.protocol.VoldemortFilter) VoldemortException(voldemort.VoldemortException) StreamingStats(voldemort.store.stats.StreamingStats) StoreDefinition(voldemort.store.StoreDefinition) StringReader(java.io.StringReader) ClosableIterator(voldemort.utils.ClosableIterator) Iterator(java.util.Iterator) ByteArray(voldemort.utils.ByteArray) Pair(voldemort.utils.Pair) EventThrottler(voldemort.utils.EventThrottler) Cluster(voldemort.cluster.Cluster) ReadOnlyStorageEngine(voldemort.store.readonly.ReadOnlyStorageEngine) ClusterMapper(voldemort.xml.ClusterMapper) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) ObsoleteVersionException(voldemort.versioning.ObsoleteVersionException) DefaultVoldemortFilter(voldemort.client.protocol.admin.filter.DefaultVoldemortFilter) AdminClient(voldemort.client.protocol.admin.AdminClient)

Aggregations

ReadOnlyStorageEngine (voldemort.store.readonly.ReadOnlyStorageEngine)21 VoldemortException (voldemort.VoldemortException)15 File (java.io.File)12 StoreDefinition (voldemort.store.StoreDefinition)6 ByteArray (voldemort.utils.ByteArray)5 IOException (java.io.IOException)4 ServletException (javax.servlet.ServletException)4 Cluster (voldemort.cluster.Cluster)4 Pair (voldemort.utils.Pair)4 Versioned (voldemort.versioning.Versioned)4 HashMap (java.util.HashMap)3 Map (java.util.Map)3 VAdminProto (voldemort.client.protocol.pb.VAdminProto)3 RoutingStrategyFactory (voldemort.routing.RoutingStrategyFactory)3 SerializerDefinition (voldemort.serialization.SerializerDefinition)3 StoreDefinitionBuilder (voldemort.store.StoreDefinitionBuilder)3 Set (java.util.Set)2 Path (org.apache.hadoop.fs.Path)2 JobConf (org.apache.hadoop.mapred.JobConf)2 Test (org.junit.Test)2