Search in sources :

Example 6 with DataStorageManagerException

use of herddb.storage.DataStorageManagerException in project herddb by diennea.

the class TableManager method accessTableData.

private void accessTableData(ScanStatement statement, StatementEvaluationContext context, ScanResultOperation consumer, Transaction transaction, boolean lockRequired, boolean forWrite) throws StatementExecutionException {
    statement.validateContext(context);
    Predicate predicate = statement.getPredicate();
    long _start = System.currentTimeMillis();
    boolean acquireLock = transaction != null || forWrite || lockRequired;
    LocalScanPageCache lastPageRead = acquireLock ? null : new LocalScanPageCache();
    AtomicInteger count = new AtomicInteger();
    try {
        IndexOperation indexOperation = predicate != null ? predicate.getIndexOperation() : null;
        boolean primaryIndexSeek = indexOperation instanceof PrimaryIndexSeek;
        AbstractIndexManager useIndex = getIndexForTbleAccess(indexOperation);
        class RecordProcessor implements BatchOrderedExecutor.Executor<Entry<Bytes, Long>>, Consumer<Map.Entry<Bytes, Long>> {

            @Override
            public void execute(List<Map.Entry<Bytes, Long>> batch) throws HerdDBInternalException {
                batch.forEach((entry) -> {
                    accept(entry);
                });
            }

            @Override
            public void accept(Entry<Bytes, Long> entry) throws DataStorageManagerException, StatementExecutionException, LogNotAvailableException {
                if (transaction != null && count.incrementAndGet() % 1000 == 0) {
                    transaction.touch();
                }
                Bytes key = entry.getKey();
                boolean already_locked = transaction != null && transaction.lookupLock(table.name, key) != null;
                boolean record_discarded = !already_locked;
                LockHandle lock = acquireLock ? (forWrite ? lockForWrite(key, transaction) : lockForRead(key, transaction)) : null;
                // LOGGER.log(Level.SEVERE, "CREATED LOCK " + lock + " for " + key);
                try {
                    if (transaction != null) {
                        if (transaction.recordDeleted(table.name, key)) {
                            // skip this record. inside current transaction it has been deleted
                            return;
                        }
                        Record record = transaction.recordUpdated(table.name, key);
                        if (record != null) {
                            // use current transaction version of the record
                            if (predicate == null || predicate.evaluate(record, context)) {
                                // now the consumer is the owner of the lock on the record
                                record_discarded = false;
                                consumer.accept(record, null);
                            }
                            return;
                        }
                    }
                    Long pageId = entry.getValue();
                    if (pageId != null) {
                        boolean pkFilterCompleteMatch = false;
                        if (!primaryIndexSeek && predicate != null) {
                            Predicate.PrimaryKeyMatchOutcome outcome = predicate.matchesRawPrimaryKey(key, context);
                            if (outcome == Predicate.PrimaryKeyMatchOutcome.FAILED) {
                                return;
                            } else if (outcome == Predicate.PrimaryKeyMatchOutcome.FULL_CONDITION_VERIFIED) {
                                pkFilterCompleteMatch = true;
                            }
                        }
                        Record record = fetchRecord(key, pageId, lastPageRead);
                        if (record != null && (pkFilterCompleteMatch || predicate == null || predicate.evaluate(record, context))) {
                            // now the consumer is the owner of the lock on the record
                            record_discarded = false;
                            consumer.accept(record, transaction == null ? lock : null);
                        }
                    }
                } finally {
                    // release the lock on the key if it did not match scan criteria
                    if (record_discarded) {
                        if (transaction == null) {
                            locksManager.releaseLock(lock);
                        } else if (!already_locked) {
                            transaction.releaseLockOnKey(table.name, key, locksManager);
                        }
                    }
                }
            }
        }
        RecordProcessor scanExecutor = new RecordProcessor();
        boolean exit = false;
        try {
            if (primaryIndexSeek) {
                // we are expecting at most one record, no need for BatchOrderedExecutor
                // this is the most common case for UPDATE-BY-PK and SELECT-BY-PK
                // no need to craete and use Streams
                PrimaryIndexSeek seek = (PrimaryIndexSeek) indexOperation;
                Bytes value = Bytes.from_array(seek.value.computeNewValue(null, context, tableContext));
                Long page = keyToPage.get(value);
                if (page != null) {
                    Map.Entry<Bytes, Long> singleEntry = new AbstractMap.SimpleImmutableEntry<>(value, page);
                    scanExecutor.accept(singleEntry);
                }
            } else {
                Stream<Map.Entry<Bytes, Long>> scanner = keyToPage.scanner(indexOperation, context, tableContext, useIndex);
                BatchOrderedExecutor<Map.Entry<Bytes, Long>> executor = new BatchOrderedExecutor<>(SORTED_PAGE_ACCESS_WINDOW_SIZE, scanExecutor, SORTED_PAGE_ACCESS_COMPARATOR);
                scanner.forEach(executor);
                executor.finish();
            }
        } catch (ExitLoop exitLoop) {
            exit = !exitLoop.continueWithTransactionData;
            if (LOGGER.isLoggable(Level.FINEST)) {
                LOGGER.log(Level.FINEST, "exit loop during scan {0}, started at {1}: {2}", new Object[] { statement, new java.sql.Timestamp(_start), exitLoop.toString() });
            }
        } catch (final HerdDBInternalException error) {
            LOGGER.log(Level.SEVERE, "error during scan", error);
            if (error.getCause() instanceof StatementExecutionException) {
                throw (StatementExecutionException) error.getCause();
            } else if (error.getCause() instanceof DataStorageManagerException) {
                throw (DataStorageManagerException) error.getCause();
            } else if (error instanceof StatementExecutionException) {
                throw error;
            } else if (error instanceof DataStorageManagerException) {
                throw error;
            } else {
                throw new StatementExecutionException(error);
            }
        }
        if (!exit && transaction != null) {
            consumer.beginNewRecordsInTransactionBlock();
            Collection<Record> newRecordsForTable = transaction.getNewRecordsForTable(table.name);
            if (newRecordsForTable != null) {
                newRecordsForTable.forEach(record -> {
                    if (!transaction.recordDeleted(table.name, record.key) && (predicate == null || predicate.evaluate(record, context))) {
                        consumer.accept(record, null);
                    }
                });
            }
        }
    } catch (ExitLoop exitLoop) {
        if (LOGGER.isLoggable(Level.FINEST)) {
            LOGGER.log(Level.FINEST, "exit loop during scan {0}, started at {1}: {2}", new Object[] { statement, new java.sql.Timestamp(_start), exitLoop.toString() });
        }
    } catch (StatementExecutionException err) {
        LOGGER.log(Level.SEVERE, "error during scan {0}, started at {1}: {2}", new Object[] { statement, new java.sql.Timestamp(_start), err.toString() });
        throw err;
    } catch (HerdDBInternalException err) {
        LOGGER.log(Level.SEVERE, "error during scan {0}, started at {1}: {2}", new Object[] { statement, new java.sql.Timestamp(_start), err.toString() });
        throw new StatementExecutionException(err);
    }
}
Also used : DataStorageManagerException(herddb.storage.DataStorageManagerException) StatementExecutionException(herddb.model.StatementExecutionException) Predicate(herddb.model.Predicate) PrimaryIndexSeek(herddb.index.PrimaryIndexSeek) Bytes(herddb.utils.Bytes) LogEntry(herddb.log.LogEntry) Entry(java.util.Map.Entry) BatchOrderedExecutor(herddb.utils.BatchOrderedExecutor) FullTableScanConsumer(herddb.storage.FullTableScanConsumer) Consumer(java.util.function.Consumer) ArrayList(java.util.ArrayList) List(java.util.List) Record(herddb.model.Record) LockHandle(herddb.utils.LockHandle) BatchOrderedExecutor(herddb.utils.BatchOrderedExecutor) IndexOperation(herddb.index.IndexOperation) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) AtomicLong(java.util.concurrent.atomic.AtomicLong) Map(java.util.Map) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) HashMap(java.util.HashMap) ConcurrentMap(java.util.concurrent.ConcurrentMap) AbstractMap(java.util.AbstractMap)

Example 7 with DataStorageManagerException

use of herddb.storage.DataStorageManagerException in project herddb by diennea.

the class TableManager method executeUpdateAsync.

private CompletableFuture<StatementExecutionResult> executeUpdateAsync(UpdateStatement update, Transaction transaction, StatementEvaluationContext context) throws StatementExecutionException, DataStorageManagerException {
    // LOGGER.log(Level.SEVERE, "executeUpdateAsync, " + update + ", transaction " + transaction);
    AtomicInteger updateCount = new AtomicInteger();
    Holder<Bytes> lastKey = new Holder<>();
    Holder<byte[]> lastValue = new Holder<>();
    /*
         an update can succeed only if the row is valid, the key is contains in the "keys" structure
         the update will simply override the value of the row, assigning a null page to the row
         the update can have a 'where' predicate which is to be evaluated against the decoded row, the update will be executed only if the predicate returns boolean 'true' value  (CAS operation)
         locks: the update  uses a lock on the the key
         */
    RecordFunction function = update.getFunction();
    long transactionId = transaction != null ? transaction.transactionId : 0;
    Predicate predicate = update.getPredicate();
    Map<String, AbstractIndexManager> indexes = tableSpaceManager.getIndexesOnTable(table.name);
    ScanStatement scan = new ScanStatement(table.tablespace, table, predicate);
    List<CompletableFuture<PendingLogEntryWork>> writes = new ArrayList<>();
    try {
        accessTableData(scan, context, new ScanResultOperation() {

            @Override
            public void accept(Record current, LockHandle lockHandle) throws StatementExecutionException, LogNotAvailableException, DataStorageManagerException {
                List<UniqueIndexLockReference> uniqueIndexes = null;
                byte[] newValue;
                try {
                    if (childrenTables != null) {
                        DataAccessor currentValues = current.getDataAccessor(table);
                        for (Table childTable : childrenTables) {
                            executeForeignKeyConstraintsAsParentTable(childTable, currentValues, context, transaction, false);
                        }
                    }
                    newValue = function.computeNewValue(current, context, tableContext);
                    if (indexes != null || table.foreignKeys != null) {
                        DataAccessor values = new Record(current.key, Bytes.from_array(newValue)).getDataAccessor(table);
                        if (table.foreignKeys != null) {
                            for (ForeignKeyDef fk : table.foreignKeys) {
                                checkForeignKeyConstraintsAsChildTable(fk, values, context, transaction);
                            }
                        }
                        if (indexes != null) {
                            for (AbstractIndexManager index : indexes.values()) {
                                if (index.isUnique()) {
                                    Bytes indexKey = RecordSerializer.serializeIndexKey(values, index.getIndex(), index.getColumnNames());
                                    if (uniqueIndexes == null) {
                                        uniqueIndexes = new ArrayList<>(1);
                                    }
                                    UniqueIndexLockReference uniqueIndexLock = new UniqueIndexLockReference(index, indexKey);
                                    uniqueIndexes.add(uniqueIndexLock);
                                    LockHandle lockForIndex = lockForWrite(uniqueIndexLock.key, transaction, index.getIndexName(), index.getLockManager());
                                    if (transaction == null) {
                                        uniqueIndexLock.lockHandle = lockForIndex;
                                    }
                                    if (index.valueAlreadyMapped(indexKey, current.key)) {
                                        throw new UniqueIndexContraintViolationException(index.getIndexName(), indexKey, "Value " + indexKey + " already present in index " + index.getIndexName());
                                    }
                                } else {
                                    RecordSerializer.validateIndexableValue(values, index.getIndex(), index.getColumnNames());
                                }
                            }
                        }
                    }
                } catch (IllegalArgumentException | herddb.utils.IllegalDataAccessException | StatementExecutionException err) {
                    locksManager.releaseLock(lockHandle);
                    StatementExecutionException finalError;
                    if (!(err instanceof StatementExecutionException)) {
                        finalError = new StatementExecutionException(err.getMessage(), err);
                    } else {
                        finalError = (StatementExecutionException) err;
                    }
                    CompletableFuture<PendingLogEntryWork> res = Futures.exception(finalError);
                    if (uniqueIndexes != null) {
                        for (UniqueIndexLockReference lock : uniqueIndexes) {
                            res = releaseWriteLock(res, lock.lockHandle, lock.indexManager.getLockManager());
                        }
                    }
                    writes.add(res);
                    return;
                }
                final long size = DataPage.estimateEntrySize(current.key, newValue);
                if (size > maxLogicalPageSize) {
                    locksManager.releaseLock(lockHandle);
                    writes.add(Futures.exception(new RecordTooBigException("New version of record " + current.key + " is to big to be update: new size " + size + ", actual size " + DataPage.estimateEntrySize(current) + ", max size " + maxLogicalPageSize)));
                    return;
                }
                LogEntry entry = LogEntryFactory.update(table, current.key, Bytes.from_array(newValue), transaction);
                CommitLogResult pos = log.log(entry, entry.transactionId <= 0);
                final List<UniqueIndexLockReference> _uniqueIndexes = uniqueIndexes;
                writes.add(pos.logSequenceNumber.thenApply(lsn -> new PendingLogEntryWork(entry, pos, lockHandle, _uniqueIndexes)));
                lastKey.value = current.key;
                lastValue.value = newValue;
                updateCount.incrementAndGet();
            }
        }, transaction, true, true);
    } catch (HerdDBInternalException err) {
        LOGGER.log(Level.SEVERE, "bad error during an update", err);
        return Futures.exception(err);
    }
    if (writes.isEmpty()) {
        return CompletableFuture.completedFuture(new DMLStatementExecutionResult(transactionId, 0, null, null));
    }
    if (writes.size() == 1) {
        return writes.get(0).whenCompleteAsync((pending, error) -> {
            try {
                // apply any of the DML operations
                if (error == null) {
                    apply(pending.pos, pending.entry, false);
                }
            } finally {
                releaseMultiplePendingLogEntryWorks(writes);
            }
        }, tableSpaceManager.getCallbacksExecutor()).thenApply((pending) -> {
            return new DMLStatementExecutionResult(transactionId, updateCount.get(), lastKey.value, update.isReturnValues() ? (lastValue.value != null ? Bytes.from_array(lastValue.value) : null) : null);
        });
    } else {
        return Futures.collect(writes).whenCompleteAsync((pendings, error) -> {
            try {
                // apply any of the DML operations
                if (error == null) {
                    for (PendingLogEntryWork pending : pendings) {
                        apply(pending.pos, pending.entry, false);
                    }
                }
            } finally {
                releaseMultiplePendingLogEntryWorks(writes);
            }
        }, tableSpaceManager.getCallbacksExecutor()).thenApply((pendings) -> {
            return new DMLStatementExecutionResult(transactionId, updateCount.get(), lastKey.value, update.isReturnValues() ? (lastValue.value != null ? Bytes.from_array(lastValue.value) : null) : null);
        });
    }
}
Also used : Arrays(java.util.Arrays) NullLockManager(herddb.utils.NullLockManager) Table(herddb.model.Table) TruncateTableStatement(herddb.model.commands.TruncateTableStatement) DuplicatePrimaryKeyException(herddb.model.DuplicatePrimaryKeyException) TableStatus(herddb.storage.TableStatus) Map(java.util.Map) DataAccessor(herddb.utils.DataAccessor) LogNotAvailableException(herddb.log.LogNotAvailableException) CommitLogResult(herddb.log.CommitLogResult) LogSequenceNumber(herddb.log.LogSequenceNumber) UniqueIndexContraintViolationException(herddb.model.UniqueIndexContraintViolationException) Set(java.util.Set) RecordSerializer(herddb.codec.RecordSerializer) JSQLParserPlanner.delimit(herddb.sql.JSQLParserPlanner.delimit) DataPageMetaData(herddb.core.PageSet.DataPageMetaData) ScanStatement(herddb.model.commands.ScanStatement) Stream(java.util.stream.Stream) StatsLogger(org.apache.bookkeeper.stats.StatsLogger) Bytes(herddb.utils.Bytes) Holder(herddb.utils.Holder) LockHandle(herddb.utils.LockHandle) ForeignKeyViolationException(herddb.model.ForeignKeyViolationException) LogEntry(herddb.log.LogEntry) ArrayList(java.util.ArrayList) TransactionContext(herddb.model.TransactionContext) Transaction(herddb.model.Transaction) ThreadLocalRandom(java.util.concurrent.ThreadLocalRandom) Projection(herddb.model.Projection) ForeignKeyDef(herddb.model.ForeignKeyDef) EnsureLongIncrementAccumulator(herddb.utils.EnsureLongIncrementAccumulator) LogEntryType(herddb.log.LogEntryType) Record(herddb.model.Record) LogEntryFactory(herddb.log.LogEntryFactory) KeyToPageIndex(herddb.index.KeyToPageIndex) DataStorageManager(herddb.storage.DataStorageManager) ColumnTypes(herddb.model.ColumnTypes) ILocalLockManager(herddb.utils.ILocalLockManager) AtomicLong(java.util.concurrent.atomic.AtomicLong) Lock(java.util.concurrent.locks.Lock) IndexOperation(herddb.index.IndexOperation) Column(herddb.model.Column) StampedLock(java.util.concurrent.locks.StampedLock) UpdateStatement(herddb.model.commands.UpdateStatement) ScanLimitsImpl(herddb.model.ScanLimitsImpl) TupleComparator(herddb.model.TupleComparator) ServerConfiguration(herddb.server.ServerConfiguration) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) RecordTooBigException(herddb.model.RecordTooBigException) DMLStatementExecutionResult(herddb.model.DMLStatementExecutionResult) LocalLockManager(herddb.utils.LocalLockManager) Futures(herddb.utils.Futures) DataStorageManagerException(herddb.storage.DataStorageManagerException) Index(herddb.model.Index) InsertStatement(herddb.model.commands.InsertStatement) DataScanner(herddb.model.DataScanner) DDLException(herddb.model.DDLException) RecordFunction(herddb.model.RecordFunction) StatementExecutionException(herddb.model.StatementExecutionException) TableContext(herddb.model.TableContext) Collection(java.util.Collection) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) Logger(java.util.logging.Logger) Collectors(java.util.stream.Collectors) List(java.util.List) FullTableScanConsumer(herddb.storage.FullTableScanConsumer) GetStatement(herddb.model.commands.GetStatement) Entry(java.util.Map.Entry) Statement(herddb.model.Statement) LongAdder(java.util.concurrent.atomic.LongAdder) DataScannerException(herddb.model.DataScannerException) GetResult(herddb.model.GetResult) PrimaryIndexSeek(herddb.index.PrimaryIndexSeek) HashMap(java.util.HashMap) CompletableFuture(java.util.concurrent.CompletableFuture) Function(java.util.function.Function) BatchOrderedExecutor(herddb.utils.BatchOrderedExecutor) ConcurrentMap(java.util.concurrent.ConcurrentMap) Level(java.util.logging.Level) HashSet(java.util.HashSet) BooleanHolder(herddb.utils.BooleanHolder) ScanLimits(herddb.model.ScanLimits) DeleteStatement(herddb.model.commands.DeleteStatement) Iterator(java.util.Iterator) ReentrantLock(java.util.concurrent.locks.ReentrantLock) Semaphore(java.util.concurrent.Semaphore) DataPageDoesNotExistException(herddb.storage.DataPageDoesNotExistException) Counter(org.apache.bookkeeper.stats.Counter) StatementExecutionResult(herddb.model.StatementExecutionResult) TimeUnit(java.util.concurrent.TimeUnit) Consumer(java.util.function.Consumer) CommitLog(herddb.log.CommitLog) AbstractMap(java.util.AbstractMap) TableConsistencyCheckStatement(herddb.model.commands.TableConsistencyCheckStatement) Predicate(herddb.model.Predicate) StatementEvaluationContext(herddb.model.StatementEvaluationContext) Comparator(java.util.Comparator) Collections(java.util.Collections) SECONDS(java.util.concurrent.TimeUnit.SECONDS) TableManagerStats(herddb.core.stats.TableManagerStats) SystemProperties(herddb.utils.SystemProperties) DataStorageManagerException(herddb.storage.DataStorageManagerException) DataAccessor(herddb.utils.DataAccessor) ArrayList(java.util.ArrayList) CommitLogResult(herddb.log.CommitLogResult) RecordTooBigException(herddb.model.RecordTooBigException) StatementExecutionException(herddb.model.StatementExecutionException) Predicate(herddb.model.Predicate) Bytes(herddb.utils.Bytes) CompletableFuture(java.util.concurrent.CompletableFuture) Record(herddb.model.Record) ArrayList(java.util.ArrayList) List(java.util.List) RecordFunction(herddb.model.RecordFunction) LogEntry(herddb.log.LogEntry) ScanStatement(herddb.model.commands.ScanStatement) LockHandle(herddb.utils.LockHandle) Table(herddb.model.Table) Holder(herddb.utils.Holder) BooleanHolder(herddb.utils.BooleanHolder) UniqueIndexContraintViolationException(herddb.model.UniqueIndexContraintViolationException) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) DMLStatementExecutionResult(herddb.model.DMLStatementExecutionResult) ForeignKeyDef(herddb.model.ForeignKeyDef) LogNotAvailableException(herddb.log.LogNotAvailableException)

Example 8 with DataStorageManagerException

use of herddb.storage.DataStorageManagerException in project herddb by diennea.

the class TableManager method onTransactionCommit.

@Override
public void onTransactionCommit(Transaction transaction, boolean recovery) throws DataStorageManagerException {
    if (transaction == null) {
        throw new DataStorageManagerException("transaction cannot be null");
    }
    boolean forceFlushTableData = false;
    if (createdInTransaction > 0) {
        if (transaction.transactionId != createdInTransaction) {
            throw new DataStorageManagerException("table " + table.tablespace + "." + table.name + " is available only on transaction " + createdInTransaction);
        }
        createdInTransaction = 0;
        forceFlushTableData = true;
    }
    if (!transaction.lastSequenceNumber.after(bootSequenceNumber)) {
        if (recovery) {
            LOGGER.log(Level.FINER, "ignoring transaction {0} commit on recovery, {1}.{2} data is newer: transaction {3}, table {4}", new Object[] { transaction.transactionId, table.tablespace, table.name, transaction.lastSequenceNumber, bootSequenceNumber });
            return;
        } else {
            throw new DataStorageManagerException("corrupted commit log " + table.tablespace + "." + table.name + " data is newer than transaction " + transaction.transactionId + " transaction " + transaction.lastSequenceNumber + " table " + bootSequenceNumber);
        }
    }
    boolean lockAcquired;
    try {
        lockAcquired = checkpointLock.asReadLock().tryLock(CHECKPOINT_LOCK_READ_TIMEOUT, SECONDS);
    } catch (InterruptedException err) {
        throw new DataStorageManagerException("interrupted while acquiring checkpoint lock during a commit", err);
    }
    if (!lockAcquired) {
        throw new DataStorageManagerException("timed out while acquiring checkpoint lock during a commit");
    }
    try {
        Map<Bytes, Record> changedRecords = transaction.changedRecords.get(table.name);
        // transaction is still holding locks on each record, so we can change records
        Map<Bytes, Record> newRecords = transaction.newRecords.get(table.name);
        if (newRecords != null) {
            for (Record record : newRecords.values()) {
                applyInsert(record.key, record.value, true);
            }
        }
        if (changedRecords != null) {
            for (Record r : changedRecords.values()) {
                applyUpdate(r.key, r.value);
            }
        }
        Set<Bytes> deletedRecords = transaction.deletedRecords.get(table.name);
        if (deletedRecords != null) {
            for (Bytes key : deletedRecords) {
                applyDelete(key);
            }
        }
    } finally {
        checkpointLock.asReadLock().unlock();
    }
    transaction.releaseLocksOnTable(table.name, locksManager);
    if (forceFlushTableData) {
        LOGGER.log(Level.FINE, "forcing local checkpoint, table " + table.name + " will be visible to all transactions now");
        checkpoint(false);
    }
}
Also used : Bytes(herddb.utils.Bytes) DataStorageManagerException(herddb.storage.DataStorageManagerException) Record(herddb.model.Record)

Example 9 with DataStorageManagerException

use of herddb.storage.DataStorageManagerException in project herddb by diennea.

the class DBManager method start.

/**
 * Initial boot of the system
 *
 * @throws herddb.storage.DataStorageManagerException
 * @throws herddb.log.LogNotAvailableException
 * @throws herddb.metadata.MetadataStorageManagerException
 */
public void start() throws DataStorageManagerException, LogNotAvailableException, MetadataStorageManagerException {
    LOGGER.log(Level.FINE, "Starting DBManager at {0}", nodeId);
    if (serverConfiguration.getBoolean(ServerConfiguration.PROPERTY_JMX_ENABLE, ServerConfiguration.PROPERTY_JMX_ENABLE_DEFAULT)) {
        JMXUtils.registerDBManagerStatsMXBean(stats);
    }
    final long maxHeap = ManagementFactory.getMemoryMXBean().getHeapMemoryUsage().getMax();
    /* If max memory isn't configured or is too high default it to maximum heap */
    if (maxMemoryReference == 0 || maxMemoryReference > maxHeap) {
        maxMemoryReference = maxHeap;
    }
    LOGGER.log(Level.INFO, ServerConfiguration.PROPERTY_MEMORY_LIMIT_REFERENCE + "= {0} bytes", Long.toString(maxMemoryReference));
    /* If max data memory for pages isn't configured or is too high default it to a maxMemoryReference percentage */
    if (maxDataUsedMemory == 0 || maxDataUsedMemory > maxMemoryReference) {
        maxDataUsedMemory = (long) (maxDataUsedMemoryPercentage * maxMemoryReference);
    }
    /* If max index memory for pages isn't configured or is too high default it to a maxMemoryReference percentage */
    if (maxIndexUsedMemory == 0 || maxIndexUsedMemory > maxMemoryReference) {
        maxIndexUsedMemory = (long) (maxIndexUsedMemoryPercentage * maxMemoryReference);
    }
    /* If max index memory for pages isn't configured or is too high default it to a maxMemoryReference percentage */
    if (maxPKUsedMemory == 0 || maxPKUsedMemory > maxMemoryReference) {
        maxPKUsedMemory = (long) (maxPKUsedMemoryPercentage * maxMemoryReference);
    }
    /* If max used memory is too high lower index and data accordingly */
    if (maxDataUsedMemory + maxIndexUsedMemory + maxPKUsedMemory > maxMemoryReference) {
        long data = (int) ((double) maxDataUsedMemory / ((double) (maxDataUsedMemory + maxIndexUsedMemory + maxPKUsedMemory)) * maxMemoryReference);
        long index = (int) ((double) maxIndexUsedMemory / ((double) (maxDataUsedMemory + maxIndexUsedMemory + maxPKUsedMemory)) * maxMemoryReference);
        long pk = (int) ((double) maxPKUsedMemory / ((double) (maxDataUsedMemory + maxIndexUsedMemory + maxPKUsedMemory)) * maxMemoryReference);
        maxDataUsedMemory = data;
        maxIndexUsedMemory = data;
        maxPKUsedMemory = pk;
    }
    memoryManager = new MemoryManager(maxDataUsedMemory, maxIndexUsedMemory, maxPKUsedMemory, maxLogicalPageSize);
    metadataStorageManager.start();
    if (clearAtBoot) {
        metadataStorageManager.clear();
    }
    metadataStorageManager.setMetadataChangeListener(this);
    NodeMetadata nodeMetadata = NodeMetadata.builder().host(hostData.getHost()).port(hostData.getPort()).ssl(hostData.isSsl()).nodeId(nodeId).build();
    if (!mode.equals(ServerConfiguration.PROPERTY_MODE_LOCAL)) {
        LOGGER.log(Level.INFO, "Registering on metadata storage manager my data: {0}", nodeMetadata);
    }
    metadataStorageManager.registerNode(nodeMetadata);
    try {
        TableSpaceManager local_node_virtual_tables_manager = new TableSpaceManager(nodeId, virtualTableSpaceId, virtualTableSpaceId, 1, /* expectedreplicacount*/
        metadataStorageManager, dataStorageManager, null, this, true);
        tablesSpaces.put(virtualTableSpaceId, local_node_virtual_tables_manager);
        local_node_virtual_tables_manager.start();
    } catch (DDLException | DataStorageManagerException | LogNotAvailableException | MetadataStorageManagerException error) {
        throw new IllegalStateException("cannot boot local virtual tablespace manager");
    }
    String initialDefaultTableSpaceLeader = nodeId;
    String initialDefaultTableSpaceReplicaNode = initialDefaultTableSpaceLeader;
    // disabled
    long initialDefaultTableSpaceMaxLeaderInactivityTime = 0;
    int expectedreplicacount = 1;
    // that if you lose the node with the server you will have at most 1 minute of unavailability (no leader)
    if (ServerConfiguration.PROPERTY_MODE_DISKLESSCLUSTER.equals(mode)) {
        initialDefaultTableSpaceReplicaNode = TableSpace.ANY_NODE;
        // 1 minute
        initialDefaultTableSpaceMaxLeaderInactivityTime = 60_000;
        expectedreplicacount = serverConfiguration.getInt(ServerConfiguration.PROPERTY_DEFAULT_REPLICA_COUNT, ServerConfiguration.PROPERTY_DEFAULT_REPLICA_COUNT_DEFAULT);
    } else if (ServerConfiguration.PROPERTY_MODE_CLUSTER.equals(mode)) {
        expectedreplicacount = serverConfiguration.getInt(ServerConfiguration.PROPERTY_DEFAULT_REPLICA_COUNT, ServerConfiguration.PROPERTY_DEFAULT_REPLICA_COUNT_DEFAULT);
    }
    boolean created = metadataStorageManager.ensureDefaultTableSpace(initialDefaultTableSpaceLeader, initialDefaultTableSpaceReplicaNode, initialDefaultTableSpaceMaxLeaderInactivityTime, expectedreplicacount);
    if (created && !ServerConfiguration.PROPERTY_MODE_LOCAL.equals(mode)) {
        LOGGER.log(Level.INFO, "Created default tablespace " + TableSpace.DEFAULT + " with expectedreplicacount={0}, leader={1}, replica={2}, maxleaderinactivitytime={3}", new Object[] { expectedreplicacount, initialDefaultTableSpaceLeader, initialDefaultTableSpaceReplicaNode, initialDefaultTableSpaceMaxLeaderInactivityTime });
    }
    commitLogManager.start();
    generalLock.writeLock().lock();
    try {
        dataStorageManager.start();
    } finally {
        generalLock.writeLock().unlock();
    }
    activator.start();
    triggerActivator(ActivatorRunRequest.FULL);
}
Also used : DataStorageManagerException(herddb.storage.DataStorageManagerException) NodeMetadata(herddb.model.NodeMetadata) MetadataStorageManagerException(herddb.metadata.MetadataStorageManagerException) DDLException(herddb.model.DDLException) LogNotAvailableException(herddb.log.LogNotAvailableException)

Example 10 with DataStorageManagerException

use of herddb.storage.DataStorageManagerException in project herddb by diennea.

the class DBManager method handleTableSpace.

private void handleTableSpace(TableSpace tableSpace) throws DataStorageManagerException, LogNotAvailableException, MetadataStorageManagerException, DDLException {
    String tableSpaceName = tableSpace.name;
    TableSpaceManager actual_manager = tablesSpaces.get(tableSpaceName);
    if (actual_manager != null && actual_manager.isFailed()) {
        LOGGER.log(Level.INFO, "Tablespace {0} is in 'Failed' status", new Object[] { tableSpaceName, nodeId });
        return;
    }
    if (actual_manager != null && actual_manager.isLeader() && !tableSpace.leaderId.equals(nodeId)) {
        LOGGER.log(Level.INFO, "Tablespace {0} leader is no more {1}, it changed to {2}", new Object[] { tableSpaceName, nodeId, tableSpace.leaderId });
        stopTableSpace(tableSpaceName, tableSpace.uuid);
        return;
    }
    if (actual_manager != null && !actual_manager.isLeader() && tableSpace.leaderId.equals(nodeId)) {
        LOGGER.log(Level.INFO, "Tablespace {0} need to switch to leadership on node {1}", new Object[] { tableSpaceName, nodeId });
        stopTableSpace(tableSpaceName, tableSpace.uuid);
        return;
    }
    if (tableSpace.isNodeAssignedToTableSpace(nodeId) && !tablesSpaces.containsKey(tableSpaceName)) {
        LOGGER.log(Level.INFO, "Booting tablespace {0} on {1}, uuid {2}", new Object[] { tableSpaceName, nodeId, tableSpace.uuid });
        long _start = System.currentTimeMillis();
        CommitLog commitLog = commitLogManager.createCommitLog(tableSpace.uuid, tableSpace.name, nodeId);
        TableSpaceManager manager = new TableSpaceManager(nodeId, tableSpaceName, tableSpace.uuid, tableSpace.expectedReplicaCount, metadataStorageManager, dataStorageManager, commitLog, this, false);
        try {
            manager.start();
            LOGGER.log(Level.INFO, "Boot success tablespace {0} on {1}, uuid {2}, time {3} ms leader:{4}", new Object[] { tableSpaceName, nodeId, tableSpace.uuid, (System.currentTimeMillis() - _start) + "", manager.isLeader() });
            tablesSpaces.put(tableSpaceName, manager);
            if (serverConfiguration.getBoolean(ServerConfiguration.PROPERTY_JMX_ENABLE, ServerConfiguration.PROPERTY_JMX_ENABLE_DEFAULT)) {
                JMXUtils.registerTableSpaceManagerStatsMXBean(tableSpaceName, manager.getStats());
            }
        } catch (DataStorageManagerException | LogNotAvailableException | MetadataStorageManagerException | DDLException t) {
            LOGGER.log(Level.SEVERE, "Error Booting tablespace {0} on {1}", new Object[] { tableSpaceName, nodeId });
            LOGGER.log(Level.SEVERE, "Error", t);
            tablesSpaces.remove(tableSpaceName);
            try {
                manager.close();
            } catch (Throwable t2) {
                LOGGER.log(Level.SEVERE, "Other Error", t2);
                t.addSuppressed(t2);
            }
            throw t;
        }
        return;
    }
    if (tablesSpaces.containsKey(tableSpaceName) && !tableSpace.isNodeAssignedToTableSpace(nodeId)) {
        LOGGER.log(Level.INFO, "Tablespace {0} on {1} is not more in replica list {3}, uuid {2}", new Object[] { tableSpaceName, nodeId, tableSpace.uuid, tableSpace.replicas + "" });
        stopTableSpace(tableSpaceName, tableSpace.uuid);
        return;
    }
    if (!tableSpace.isNodeAssignedToTableSpace("*") && tableSpace.replicas.size() < tableSpace.expectedReplicaCount) {
        List<NodeMetadata> nodes = metadataStorageManager.listNodes();
        LOGGER.log(Level.WARNING, "Tablespace {0} is underreplicated expectedReplicaCount={1}, replicas={2}, nodes={3}", new Object[] { tableSpaceName, tableSpace.expectedReplicaCount, tableSpace.replicas, nodes });
        List<String> availableOtherNodes = nodes.stream().map(n -> {
            return n.nodeId;
        }).filter(n -> {
            return !tableSpace.replicas.contains(n);
        }).collect(Collectors.toList());
        Collections.shuffle(availableOtherNodes);
        int countMissing = tableSpace.expectedReplicaCount - tableSpace.replicas.size();
        LOGGER.log(Level.WARNING, "Tablespace {0} is underreplicated expectedReplicaCount={1}, replicas={2}, missing {3}, availableOtherNodes={4}", new Object[] { tableSpaceName, tableSpace.expectedReplicaCount, tableSpace.replicas, countMissing, availableOtherNodes });
        if (!availableOtherNodes.isEmpty()) {
            TableSpace.Builder newTableSpaceBuilder = TableSpace.builder().cloning(tableSpace);
            while (!availableOtherNodes.isEmpty() && countMissing-- > 0) {
                String node = availableOtherNodes.remove(0);
                LOGGER.log(Level.WARNING, "Tablespace {0} adding {1} node as replica", new Object[] { tableSpaceName, node });
                newTableSpaceBuilder.replica(node);
            }
            TableSpace newTableSpace = newTableSpaceBuilder.build();
            boolean ok = metadataStorageManager.updateTableSpace(newTableSpace, tableSpace);
            if (!ok) {
                LOGGER.log(Level.SEVERE, "updating tableSpace {0} metadata failed, someone else altered metadata", tableSpaceName);
            }
        }
    }
    if (actual_manager != null && !actual_manager.isFailed() && actual_manager.isLeader()) {
        actual_manager.metadataUpdated(tableSpace);
    }
}
Also used : ExecutionPlan(herddb.model.ExecutionPlan) FileMetadataStorageManager(herddb.file.FileMetadataStorageManager) ServerSidePreparedStatementCache(herddb.server.ServerSidePreparedStatementCache) Table(herddb.model.Table) JMXUtils(herddb.jmx.JMXUtils) ClientConfiguration(herddb.client.ClientConfiguration) MetadataStorageManager(herddb.metadata.MetadataStorageManager) Channel(herddb.network.Channel) ServerConfiguration(herddb.server.ServerConfiguration) FastThreadLocalThread(io.netty.util.concurrent.FastThreadLocalThread) Map(java.util.Map) DDLStatementExecutionResult(herddb.model.DDLStatementExecutionResult) ConnectionsInfoProvider(herddb.core.stats.ConnectionsInfoProvider) DMLStatementExecutionResult(herddb.model.DMLStatementExecutionResult) ThreadFactory(java.util.concurrent.ThreadFactory) Futures(herddb.utils.Futures) Path(java.nio.file.Path) DataStorageManagerException(herddb.storage.DataStorageManagerException) DropTableSpaceStatement(herddb.model.commands.DropTableSpaceStatement) ScanResult(herddb.model.ScanResult) LogNotAvailableException(herddb.log.LogNotAvailableException) PduCodec(herddb.proto.PduCodec) NullStatsLogger(org.apache.bookkeeper.stats.NullStatsLogger) DataScanner(herddb.model.DataScanner) DDLException(herddb.model.DDLException) StatementExecutionException(herddb.model.StatementExecutionException) AbstractSQLPlanner(herddb.sql.AbstractSQLPlanner) Collection(java.util.Collection) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) LogSequenceNumber(herddb.log.LogSequenceNumber) Set(java.util.Set) BlockingQueue(java.util.concurrent.BlockingQueue) Logger(java.util.logging.Logger) Collectors(java.util.stream.Collectors) Executors(java.util.concurrent.Executors) ScanStatement(herddb.model.commands.ScanStatement) ArrayBlockingQueue(java.util.concurrent.ArrayBlockingQueue) List(java.util.List) NotLeaderException(herddb.model.NotLeaderException) GetStatement(herddb.model.commands.GetStatement) NodeMetadata(herddb.model.NodeMetadata) TranslatedQuery(herddb.sql.TranslatedQuery) StatsLogger(org.apache.bookkeeper.stats.StatsLogger) DefaultJVMHalt(herddb.utils.DefaultJVMHalt) DDLStatement(herddb.model.DDLStatement) DataConsistencyStatementResult(herddb.model.DataConsistencyStatementResult) TableSpace(herddb.model.TableSpace) SuppressFBWarnings(edu.umd.cs.findbugs.annotations.SuppressFBWarnings) Statement(herddb.model.Statement) AlterTableSpaceStatement(herddb.model.commands.AlterTableSpaceStatement) CalcitePlanner(herddb.sql.CalcitePlanner) CreateTableSpaceStatement(herddb.model.commands.CreateTableSpaceStatement) MoreExecutors(com.google.common.util.concurrent.MoreExecutors) MetadataStorageManagerException(herddb.metadata.MetadataStorageManagerException) DataScannerException(herddb.model.DataScannerException) MetadataChangeListener(herddb.metadata.MetadataChangeListener) GetResult(herddb.model.GetResult) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) HashMap(java.util.HashMap) CompletableFuture(java.util.concurrent.CompletableFuture) ReentrantReadWriteLock(java.util.concurrent.locks.ReentrantReadWriteLock) TableSpaceReplicaState(herddb.model.TableSpaceReplicaState) Pdu(herddb.proto.Pdu) ArrayList(java.util.ArrayList) Level(java.util.logging.Level) HashSet(java.util.HashSet) TableSpaceDoesNotExistException(herddb.model.TableSpaceDoesNotExistException) RejectedExecutionException(java.util.concurrent.RejectedExecutionException) ByteBuf(io.netty.buffer.ByteBuf) TransactionContext(herddb.model.TransactionContext) CommitLogManager(herddb.log.CommitLogManager) ManagementFactory(java.lang.management.ManagementFactory) ExecutorService(java.util.concurrent.ExecutorService) ReentrantLock(java.util.concurrent.locks.ReentrantLock) StatementExecutionResult(herddb.model.StatementExecutionResult) IOException(java.io.IOException) DMLStatement(herddb.model.DMLStatement) DataStorageManager(herddb.storage.DataStorageManager) ExecutionException(java.util.concurrent.ExecutionException) TimeUnit(java.util.concurrent.TimeUnit) DBManagerStatsMXBean(herddb.jmx.DBManagerStatsMXBean) AtomicLong(java.util.concurrent.atomic.AtomicLong) CommitLog(herddb.log.CommitLog) Condition(java.util.concurrent.locks.Condition) Lock(java.util.concurrent.locks.Lock) TableConsistencyCheckStatement(herddb.model.commands.TableConsistencyCheckStatement) JSQLParserPlanner(herddb.sql.JSQLParserPlanner) StatementEvaluationContext(herddb.model.StatementEvaluationContext) NullSQLPlanner(herddb.sql.NullSQLPlanner) PlansCache(herddb.sql.PlansCache) Collections(java.util.Collections) SECONDS(java.util.concurrent.TimeUnit.SECONDS) MemoryMetadataStorageManager(herddb.mem.MemoryMetadataStorageManager) TableSpaceConsistencyCheckStatement(herddb.model.commands.TableSpaceConsistencyCheckStatement) ServerHostData(herddb.network.ServerHostData) DataStorageManagerException(herddb.storage.DataStorageManagerException) TableSpace(herddb.model.TableSpace) MetadataStorageManagerException(herddb.metadata.MetadataStorageManagerException) NodeMetadata(herddb.model.NodeMetadata) CommitLog(herddb.log.CommitLog) DDLException(herddb.model.DDLException) LogNotAvailableException(herddb.log.LogNotAvailableException)

Aggregations

DataStorageManagerException (herddb.storage.DataStorageManagerException)140 IOException (java.io.IOException)92 ArrayList (java.util.ArrayList)46 LogSequenceNumber (herddb.log.LogSequenceNumber)42 Bytes (herddb.utils.Bytes)30 Path (java.nio.file.Path)30 ExtendedDataInputStream (herddb.utils.ExtendedDataInputStream)25 SimpleByteArrayInputStream (herddb.utils.SimpleByteArrayInputStream)24 InputStream (java.io.InputStream)24 StatementExecutionException (herddb.model.StatementExecutionException)23 Table (herddb.model.Table)22 HashMap (java.util.HashMap)22 LogEntry (herddb.log.LogEntry)21 ConcurrentHashMap (java.util.concurrent.ConcurrentHashMap)21 LogNotAvailableException (herddb.log.LogNotAvailableException)20 Record (herddb.model.Record)20 Index (herddb.model.Index)19 CommitLogResult (herddb.log.CommitLogResult)18 Transaction (herddb.model.Transaction)18 PostCheckpointAction (herddb.core.PostCheckpointAction)16