Search in sources :

Example 1 with SupplierWithIOException

use of org.apache.hadoop.hdds.function.SupplierWithIOException in project ozone by apache.

the class OzoneManagerDoubleBuffer method flushTransactions.

/**
 * Runs in a background thread and batches the transaction in currentBuffer
 * and commit to DB.
 */
private void flushTransactions() {
    while (isRunning.get()) {
        try {
            if (canFlush()) {
                Map<String, List<Long>> cleanupEpochs = new HashMap<>();
                setReadyBuffer();
                List<Long> flushedEpochs = null;
                try (BatchOperation batchOperation = omMetadataManager.getStore().initBatchOperation()) {
                    AtomicReference<String> lastTraceId = new AtomicReference<>();
                    readyBuffer.iterator().forEachRemaining((entry) -> {
                        try {
                            OMResponse omResponse = entry.getResponse().getOMResponse();
                            lastTraceId.set(omResponse.getTraceID());
                            addToBatchWithTrace(omResponse, (SupplierWithIOException<Void>) () -> {
                                entry.getResponse().checkAndUpdateDB(omMetadataManager, batchOperation);
                                return null;
                            });
                            addCleanupEntry(entry, cleanupEpochs);
                        } catch (IOException ex) {
                            // During Adding to RocksDB batch entry got an exception.
                            // We should terminate the OM.
                            terminate(ex);
                        }
                    });
                    // Commit transaction info to DB.
                    flushedEpochs = readyBuffer.stream().map(DoubleBufferEntry::getTrxLogIndex).sorted().collect(Collectors.toList());
                    long lastRatisTransactionIndex = flushedEpochs.get(flushedEpochs.size() - 1);
                    long term = isRatisEnabled ? indexToTerm.apply(lastRatisTransactionIndex) : -1;
                    addToBatchTransactionInfoWithTrace(lastTraceId.get(), lastRatisTransactionIndex, (SupplierWithIOException<Void>) () -> {
                        omMetadataManager.getTransactionInfoTable().putWithBatch(batchOperation, TRANSACTION_INFO_KEY, new TransactionInfo.Builder().setTransactionIndex(lastRatisTransactionIndex).setCurrentTerm(term).build());
                        return null;
                    });
                    long startTime = Time.monotonicNow();
                    flushBatchWithTrace(lastTraceId.get(), readyBuffer.size(), (SupplierWithIOException<Void>) () -> {
                        omMetadataManager.getStore().commitBatchOperation(batchOperation);
                        return null;
                    });
                    ozoneManagerDoubleBufferMetrics.updateFlushTime(Time.monotonicNow() - startTime);
                }
                // handler threads will be released.
                if (!isRatisEnabled) {
                    // Once all entries are flushed, we can complete their future.
                    readyFutureQueue.iterator().forEachRemaining((entry) -> {
                        entry.complete(null);
                    });
                    readyFutureQueue.clear();
                }
                int flushedTransactionsSize = readyBuffer.size();
                flushedTransactionCount.addAndGet(flushedTransactionsSize);
                flushIterations.incrementAndGet();
                if (LOG.isDebugEnabled()) {
                    LOG.debug("Sync Iteration {} flushed transactions in this " + "iteration {}", flushIterations.get(), flushedTransactionsSize);
                }
                // handler threads.
                if (!isRatisEnabled) {
                    flushedEpochs = readyBuffer.stream().map(DoubleBufferEntry::getTrxLogIndex).sorted().collect(Collectors.toList());
                }
                // Clean up committed transactions.
                cleanupCache(cleanupEpochs);
                readyBuffer.clear();
                // update the last updated index in OzoneManagerStateMachine.
                ozoneManagerRatisSnapShot.updateLastAppliedIndex(flushedEpochs);
                // set metrics.
                updateMetrics(flushedTransactionsSize);
            }
        } catch (InterruptedException ex) {
            Thread.currentThread().interrupt();
            if (isRunning.get()) {
                final String message = "OMDoubleBuffer flush thread " + Thread.currentThread().getName() + " encountered Interrupted " + "exception while running";
                ExitUtils.terminate(1, message, ex, LOG);
            } else {
                LOG.info("OMDoubleBuffer flush thread {} is interrupted and will " + "exit. {}", Thread.currentThread().getName(), Thread.currentThread().getName());
            }
        } catch (IOException ex) {
            terminate(ex);
        } catch (Throwable t) {
            final String s = "OMDoubleBuffer flush thread" + Thread.currentThread().getName() + "encountered Throwable error";
            ExitUtils.terminate(2, s, t, LOG);
        }
    }
}
Also used : HashMap(java.util.HashMap) AtomicReference(java.util.concurrent.atomic.AtomicReference) BatchOperation(org.apache.hadoop.hdds.utils.db.BatchOperation) IOException(java.io.IOException) SupplierWithIOException(org.apache.hadoop.hdds.function.SupplierWithIOException) OMResponse(org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse) AtomicLong(java.util.concurrent.atomic.AtomicLong) TransactionInfo(org.apache.hadoop.hdds.utils.TransactionInfo) ArrayList(java.util.ArrayList) List(java.util.List)

Aggregations

IOException (java.io.IOException)1 ArrayList (java.util.ArrayList)1 HashMap (java.util.HashMap)1 List (java.util.List)1 AtomicLong (java.util.concurrent.atomic.AtomicLong)1 AtomicReference (java.util.concurrent.atomic.AtomicReference)1 SupplierWithIOException (org.apache.hadoop.hdds.function.SupplierWithIOException)1 TransactionInfo (org.apache.hadoop.hdds.utils.TransactionInfo)1 BatchOperation (org.apache.hadoop.hdds.utils.db.BatchOperation)1 OMResponse (org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse)1