Search in sources :

Example 41 with VoldemortException

use of voldemort.VoldemortException in project voldemort by voldemort.

the class HadoopStoreBuilderUtils method getDataFileChunkSet.

/**
     * Convert list of FileStatus[] files to DataFileChunkSet. The input to this
     * is generally the output of getChunkFiles function.
     * 
     * Works only for {@link ReadOnlyStorageFormat.READONLY_V2}
     * 
     * @param fs Filesystem used
     * @param files List of data chunk files
     * @return DataFileChunkSet Returns the corresponding data chunk set
     * @throws IOException
     */
public static DataFileChunkSet getDataFileChunkSet(FileSystem fs, FileStatus[] files) throws IOException {
    // Make sure it satisfies the partitionId_replicaType format
    List<FileStatus> fileList = Lists.newArrayList();
    for (FileStatus file : files) {
        if (!ReadOnlyUtils.isFormatCorrect(file.getPath().getName(), ReadOnlyStorageFormat.READONLY_V2)) {
            throw new VoldemortException("Incorrect data file name format for " + file.getPath().getName() + ". Unsupported by " + ReadOnlyStorageFormat.READONLY_V2);
        }
        fileList.add(file);
    }
    // Return it in sorted order
    Collections.sort(fileList, new Comparator<FileStatus>() {

        public int compare(FileStatus f1, FileStatus f2) {
            int chunkId1 = ReadOnlyUtils.getChunkId(f1.getPath().getName());
            int chunkId2 = ReadOnlyUtils.getChunkId(f2.getPath().getName());
            return chunkId1 - chunkId2;
        }
    });
    List<DataFileChunk> dataFiles = Lists.newArrayList();
    List<Integer> dataFileSizes = Lists.newArrayList();
    for (FileStatus file : fileList) {
        dataFiles.add(new HdfsDataFileChunk(fs, file));
        dataFileSizes.add((int) file.getLen());
    }
    return new DataFileChunkSet(dataFiles, dataFileSizes);
}
Also used : FileStatus(org.apache.hadoop.fs.FileStatus) DataFileChunk(voldemort.store.readonly.chunk.DataFileChunk) DataFileChunkSet(voldemort.store.readonly.chunk.DataFileChunkSet) VoldemortException(voldemort.VoldemortException)

Example 42 with VoldemortException

use of voldemort.VoldemortException in project voldemort by voldemort.

the class VoldemortBuildAndPushJob method run.

@Override
public void run() throws Exception {
    invokeHooks(BuildAndPushStatus.STARTING);
    if (hooks.size() > 0) {
        heartBeatHookFuture = executorService.submit(heartBeatHookRunnable);
    }
    try {
        // These two options control the build and push phases of the job respectively.
        boolean build = props.getBoolean(BUILD, true);
        boolean push = props.getBoolean(PUSH, true);
        checkForPreconditions(build, push);
        negotiateJobSettingsWithServers();
        try {
            // The cluster equality check is performed after negotiating job settings
            // with the servers because the constraints are relaxed if the servers
            // support/enable the 'build.primary.replicas.only' mode.
            allClustersEqual(clusterURLs);
        } catch (VoldemortException e) {
            log.error("Exception during cluster equality check", e);
            fail("Exception during cluster equality check: " + e.toString());
            throw e;
        }
        // Create a hashmap to capture exception per url
        String buildOutputDir = null;
        Map<String, Future<Boolean>> tasks = Maps.newHashMap();
        for (int index = 0; index < clusterURLs.size(); index++) {
            String url = clusterURLs.get(index);
            if (isAvroJob) {
                // Verify the schema if the store exists or else add the new store
                verifyOrAddAvroStore(url, isAvroVersioned);
            } else {
                // Verify the schema if the store exists or else add the new store
                verifyOrAddJsonStore(url);
            }
            if (build) {
                // it to only once
                if (!push || buildOutputDir == null) {
                    try {
                        invokeHooks(BuildAndPushStatus.BUILDING);
                        buildOutputDir = runBuildStore(props, url);
                    } catch (Exception e) {
                        log.error("Exception during build for URL: " + url, e);
                        exceptions.put(url, e);
                    }
                }
            }
            if (push) {
                log.info("Pushing to cluster URL: " + clusterURLs.get(index));
                // from the dataDirs, or else we will just the one that we built earlier
                if (!build) {
                    buildOutputDir = dataDirs.get(index);
                }
                // if that's the case, if yes then continue and don't even try pushing
                if (buildOutputDir == null) {
                    continue;
                }
                tasks.put(url, executorService.submit(new StorePushTask(props, url, buildOutputDir)));
            }
        }
        // We can safely shutdown storeVerificationExecutorService here since all the verifications are done.
        if (null != storeVerificationExecutorService) {
            storeVerificationExecutorService.shutdownNow();
            storeVerificationExecutorService = null;
        }
        for (Map.Entry<String, Future<Boolean>> task : tasks.entrySet()) {
            String url = task.getKey();
            Boolean success = false;
            try {
                success = task.getValue().get();
            } catch (Exception e) {
                exceptions.put(url, e);
            }
            if (success) {
                log.info("Successfully pushed to cluster URL: " + url);
            }
        }
        if (build && push && buildOutputDir != null && !props.getBoolean(BUILD_OUTPUT_KEEP, false)) {
            JobConf jobConf = new JobConf();
            if (props.containsKey(HADOOP_JOB_UGI)) {
                jobConf.set(HADOOP_JOB_UGI, props.getString(HADOOP_JOB_UGI));
            }
            log.info("Cleaning up: Deleting BnP output and temp files from HDFS: " + buildOutputDir);
            HadoopUtils.deletePathIfExists(jobConf, buildOutputDir);
            log.info("Deleted " + buildOutputDir);
        }
        if (exceptions.size() == 0) {
            invokeHooks(BuildAndPushStatus.FINISHED);
            cleanUp();
        } else {
            log.error("Got exceptions during Build and Push:");
            for (Map.Entry<String, Exception> entry : exceptions.entrySet()) {
                log.error("Exception for cluster: " + entry.getKey(), entry.getValue());
            }
            throw new VoldemortException("Got exceptions during Build and Push");
        }
    } catch (Exception e) {
        fail(e.toString());
        throw new VoldemortException("An exception occurred during Build and Push !!", e);
    } catch (Throwable t) {
        // This is for OOMs, StackOverflows and other uber nasties...
        // We'll try to invoke hooks but all bets are off at this point :/
        fail(t.toString());
        // N.B.: Azkaban's AbstractJob#run throws Exception, not Throwable, so we can't rethrow directly...
        throw new Exception("A non-Exception Throwable was caught! Bubbling it up as an Exception...", t);
    }
}
Also used : VoldemortException(voldemort.VoldemortException) RecoverableFailedFetchException(voldemort.store.readonly.swapper.RecoverableFailedFetchException) UnreachableStoreException(voldemort.store.UnreachableStoreException) UninitializedMessageException(com.google.protobuf.UninitializedMessageException) VoldemortException(voldemort.VoldemortException) IOException(java.io.IOException) BootstrapFailureException(voldemort.client.BootstrapFailureException) Future(java.util.concurrent.Future) Map(java.util.Map) HashMap(java.util.HashMap) JobConf(org.apache.hadoop.mapred.JobConf)

Example 43 with VoldemortException

use of voldemort.VoldemortException in project voldemort by voldemort.

the class HadoopStoreWriter method write.

@Override
public void write(BytesWritable key, Iterator<BytesWritable> iterator, Reporter reporter) throws IOException {
    // Read chunk id
    int chunkId = ReadOnlyUtils.chunk(key.getBytes(), getNumChunks());
    initFileStreams(chunkId);
    // Write key and position
    this.indexFileStream[chunkId].write(key.getBytes(), 0, key.getLength());
    this.indexFileSizeInBytes[chunkId] += key.getLength();
    this.indexFileStream[chunkId].writeInt(this.position[chunkId]);
    this.indexFileSizeInBytes[chunkId] += ByteUtils.SIZE_OF_INT;
    // Run key through checksum digest
    if (this.checkSumDigestIndex[chunkId] != null) {
        this.checkSumDigestIndex[chunkId].update(key.getBytes(), 0, key.getLength());
        this.checkSumDigestIndex[chunkId].update(this.position[chunkId]);
    }
    short numTuples = 0;
    ByteArrayOutputStream stream = new ByteArrayOutputStream();
    DataOutputStream valueStream = new DataOutputStream(stream);
    while (iterator.hasNext()) {
        BytesWritable writable = iterator.next();
        byte[] valueBytes = writable.getBytes();
        int offsetTillNow = 0;
        /**
             * Below, we read the node id, partition id and replica type of each record
             * coming in, and validate that it is consistent with the other IDs seen so
             * far. This is to catch potential regressions to the shuffling logic in:
             *
             * {@link AbstractStoreBuilderConfigurable#getPartition(byte[], byte[], int)}
             */
        // Read node Id
        int currentNodeId = ByteUtils.readInt(valueBytes, offsetTillNow);
        if (this.nodeId == -1) {
            this.nodeId = currentNodeId;
        } else if (this.nodeId != currentNodeId) {
            throw new IllegalArgumentException("Should not get various nodeId shuffled to us! " + "First nodeId seen: " + this.nodeId + ", currentNodeId: " + currentNodeId);
        }
        offsetTillNow += ByteUtils.SIZE_OF_INT;
        // Read partition id
        int currentPartitionId = ByteUtils.readInt(valueBytes, offsetTillNow);
        if (this.partitionId == -1) {
            this.partitionId = currentPartitionId;
        } else if (this.partitionId != currentPartitionId) {
            throw new IllegalArgumentException("Should not get various partitionId shuffled to us! " + "First partitionId seen: " + this.partitionId + ", currentPartitionId: " + currentPartitionId);
        }
        offsetTillNow += ByteUtils.SIZE_OF_INT;
        // Read replica type
        if (getSaveKeys()) {
            int currentReplicaType = (int) ByteUtils.readBytes(valueBytes, offsetTillNow, ByteUtils.SIZE_OF_BYTE);
            if (this.replicaType == -1) {
                this.replicaType = currentReplicaType;
            } else if (this.replicaType != currentReplicaType) {
                throw new IllegalArgumentException("Should not get various replicaType shuffled to us! " + "First replicaType seen: " + this.replicaType + ", currentReplicaType: " + currentReplicaType);
            }
            if (getBuildPrimaryReplicasOnly() && this.replicaType > 0) {
                throw new IllegalArgumentException("Should not get any replicaType > 0 shuffled to us" + " when buildPrimaryReplicasOnly mode is enabled!");
            }
            offsetTillNow += ByteUtils.SIZE_OF_BYTE;
        }
        int valueLength = writable.getLength() - offsetTillNow;
        if (getSaveKeys()) {
            // Write ( key_length, value_length, key, value )
            valueStream.write(valueBytes, offsetTillNow, valueLength);
        } else {
            // Write (value_length + value)
            valueStream.writeInt(valueLength);
            valueStream.write(valueBytes, offsetTillNow, valueLength);
        }
        numTuples++;
        // malicious ( We obviously expect collisions when we save keys )
        if (!getSaveKeys() && numTuples > 1)
            throw new VoldemortException("Duplicate keys detected for md5 sum " + ByteUtils.toHexString(ByteUtils.copy(key.getBytes(), 0, key.getLength())));
    }
    if (numTuples < 0) {
        // Overflow
        throw new VoldemortException("Found too many collisions: chunk " + chunkId + " has exceeded " + MAX_HASH_COLLISIONS + " collisions.");
    } else if (numTuples > 1) {
        // Update number of collisions + max keys per collision
        reporter.incrCounter(CollisionCounter.NUM_COLLISIONS, 1);
        long numCollisions = reporter.getCounter(CollisionCounter.MAX_COLLISIONS).getCounter();
        if (numTuples > numCollisions) {
            reporter.incrCounter(CollisionCounter.MAX_COLLISIONS, numTuples - numCollisions);
        }
    }
    // Flush the value
    valueStream.flush();
    byte[] value = stream.toByteArray();
    // First, if save keys flag set the number of keys
    if (getSaveKeys()) {
        this.valueFileStream[chunkId].writeShort(numTuples);
        this.valueFileSizeInBytes[chunkId] += ByteUtils.SIZE_OF_SHORT;
        this.position[chunkId] += ByteUtils.SIZE_OF_SHORT;
        if (this.checkSumDigestValue[chunkId] != null) {
            this.checkSumDigestValue[chunkId].update(numTuples);
        }
    }
    this.valueFileStream[chunkId].write(value);
    this.valueFileSizeInBytes[chunkId] += value.length;
    this.position[chunkId] += value.length;
    if (this.checkSumDigestValue[chunkId] != null) {
        this.checkSumDigestValue[chunkId].update(value);
    }
    if (this.position[chunkId] < 0)
        throw new VoldemortException("Chunk overflow exception: chunk " + chunkId + " has exceeded " + MAX_CHUNK_SIZE + " bytes.");
}
Also used : FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) DataOutputStream(java.io.DataOutputStream) BytesWritable(org.apache.hadoop.io.BytesWritable) ByteArrayOutputStream(java.io.ByteArrayOutputStream) VoldemortException(voldemort.VoldemortException)

Example 44 with VoldemortException

use of voldemort.VoldemortException in project voldemort by voldemort.

the class StorageService method forceCleanupOldDataThrottled.

@JmxOperation(description = "Force cleanup of old data based on retention policy.", impact = MBeanOperationInfo.ACTION)
public void forceCleanupOldDataThrottled(String storeName, int entryScanThrottleRate) {
    logger.info("forceCleanupOldData() called for store " + storeName + " with retention scan throttle rate:" + entryScanThrottleRate + " Entries/second.");
    try {
        StoreDefinition storeDef = getMetadataStore().getStoreDef(storeName);
        StorageEngine<ByteArray, byte[], byte[]> engine = storeRepository.getStorageEngine(storeName);
        if (null != engine) {
            if (storeDef.hasRetentionPeriod()) {
                ExecutorService executor = Executors.newFixedThreadPool(1);
                try {
                    if (scanPermitWrapper.availablePermits() >= 1) {
                        executor.execute(new DataCleanupJob<ByteArray, byte[], byte[]>(engine, scanPermitWrapper, storeName, SystemTime.INSTANCE, metadata));
                    } else {
                        logger.error("forceCleanupOldData() No permit available to run cleanJob already running multiple instance." + engine.getName());
                    }
                } finally {
                    executor.shutdown();
                }
            } else {
                logger.error("forceCleanupOldData() No retention policy found for " + storeName);
            }
        }
    } catch (Exception e) {
        logger.error("Error while running forceCleanupOldData()", e);
        throw new VoldemortException(e);
    }
}
Also used : StoreDefinition(voldemort.store.StoreDefinition) ExecutorService(java.util.concurrent.ExecutorService) ByteArray(voldemort.utils.ByteArray) VoldemortException(voldemort.VoldemortException) ConfigurationException(voldemort.utils.ConfigurationException) NoSuchCapabilityException(voldemort.store.NoSuchCapabilityException) DisabledStoreException(voldemort.store.DisabledStoreException) VoldemortException(voldemort.VoldemortException) JmxOperation(voldemort.annotations.jmx.JmxOperation)

Example 45 with VoldemortException

use of voldemort.VoldemortException in project voldemort by voldemort.

the class StorageService method stopInner.

@Override
protected void stopInner() {
    /*
         * We may end up closing a given store more than once, but that is cool
         * because close() is idempotent
         */
    Exception lastException = null;
    logger.info("Closing all stores.");
    /* This will also close the node stores including local stores */
    for (Store<ByteArray, byte[], byte[]> store : this.storeRepository.getAllRoutedStores()) {
        logger.info("Closing routed store for " + store.getName());
        try {
            store.close();
        } catch (Exception e) {
            logger.error(e);
            lastException = e;
        }
    }
    /* This will also close the storage engines */
    for (Store<ByteArray, byte[], byte[]> store : this.storeRepository.getAllStorageEngines()) {
        logger.info("Closing storage engine for " + store.getName());
        try {
            store.close();
        } catch (Exception e) {
            logger.error(e);
            lastException = e;
        }
    }
    logger.info("All stores closed.");
    /* Close slop store if necessary */
    if (this.storeRepository.hasSlopStore()) {
        try {
            this.storeRepository.getSlopStore().close();
        } catch (Exception e) {
            logger.error(e);
            lastException = e;
        }
    }
    /* Close all storage configs */
    logger.info("Closing storage configurations.");
    for (StorageConfiguration config : storageConfigs.values()) {
        logger.info("Closing " + config.getType() + " storage config.");
        try {
            config.close();
        } catch (Exception e) {
            logger.error(e);
            lastException = e;
        }
    }
    this.clientThreadPool.shutdown();
    try {
        if (!this.clientThreadPool.awaitTermination(10, TimeUnit.SECONDS))
            this.clientThreadPool.shutdownNow();
    } catch (InterruptedException e) {
        // okay, fine, playing nice didn't work
        this.clientThreadPool.shutdownNow();
    }
    logger.info("Closed client threadpool.");
    storeFactory.close();
    if (this.failureDetector != null) {
        try {
            this.failureDetector.destroy();
        } catch (Exception e) {
            lastException = e;
        }
    }
    logger.info("Closed failure detector.");
    // shut down the proxy put thread pool
    this.proxyPutWorkerPool.shutdown();
    try {
        if (!this.proxyPutWorkerPool.awaitTermination(10, TimeUnit.SECONDS))
            this.proxyPutWorkerPool.shutdownNow();
    } catch (InterruptedException e) {
        this.proxyPutWorkerPool.shutdownNow();
    }
    logger.info("Closed proxy put thread pool.");
    /* If there is an exception, throw it */
    if (lastException instanceof VoldemortException)
        throw (VoldemortException) lastException;
    else if (lastException != null)
        throw new VoldemortException(lastException);
}
Also used : ByteArray(voldemort.utils.ByteArray) StorageConfiguration(voldemort.store.StorageConfiguration) FileBackedCachingStorageConfiguration(voldemort.store.configuration.FileBackedCachingStorageConfiguration) InMemoryStorageConfiguration(voldemort.store.memory.InMemoryStorageConfiguration) ViewStorageConfiguration(voldemort.store.views.ViewStorageConfiguration) ReadOnlyStorageConfiguration(voldemort.store.readonly.ReadOnlyStorageConfiguration) VoldemortException(voldemort.VoldemortException) ConfigurationException(voldemort.utils.ConfigurationException) NoSuchCapabilityException(voldemort.store.NoSuchCapabilityException) DisabledStoreException(voldemort.store.DisabledStoreException) VoldemortException(voldemort.VoldemortException)

Aggregations

VoldemortException (voldemort.VoldemortException)247 IOException (java.io.IOException)63 ByteArray (voldemort.utils.ByteArray)52 File (java.io.File)46 Node (voldemort.cluster.Node)42 StoreDefinition (voldemort.store.StoreDefinition)39 Versioned (voldemort.versioning.Versioned)38 ArrayList (java.util.ArrayList)34 Test (org.junit.Test)30 ObsoleteVersionException (voldemort.versioning.ObsoleteVersionException)26 List (java.util.List)21 HashMap (java.util.HashMap)20 Cluster (voldemort.cluster.Cluster)20 VectorClock (voldemort.versioning.VectorClock)16 NoSuchCapabilityException (voldemort.store.NoSuchCapabilityException)15 ReadOnlyStorageEngine (voldemort.store.readonly.ReadOnlyStorageEngine)14 ExecutionException (java.util.concurrent.ExecutionException)13 StoreDefinitionsMapper (voldemort.xml.StoreDefinitionsMapper)13 Map (java.util.Map)12 Path (org.apache.hadoop.fs.Path)12