Search in sources :

Example 1 with Segment

use of com.cinchapi.concourse.server.storage.db.kernel.Segment in project concourse by cinchapi.

the class Database method start.

@Override
public void start() {
    if (!running) {
        Logger.info("Database configured to store data in {}", directory);
        running = true;
        this.writer = new AwaitableExecutorService(Executors.newCachedThreadPool(ThreadFactories.namingThreadFactory("DatabaseWriter")));
        this.segments.clear();
        ArrayBuilder<Runnable> tasks = ArrayBuilder.builder();
        List<Segment> segments = Collections.synchronizedList(this.segments);
        Stream<Path> files = storage.files();
        files.forEach(file -> tasks.add(() -> {
            try {
                Segment segment = Segment.load(file);
                segments.add(segment);
            } catch (SegmentLoadingException e) {
                Logger.error("Error when trying to load Segment {}", file);
                Logger.error("", e);
            }
        }));
        files.close();
        if (tasks.length() > 0) {
            AwaitableExecutorService loader = new AwaitableExecutorService(Executors.newCachedThreadPool(ThreadFactories.namingThreadFactory("DatabaseLoader")));
            try {
                loader.await((task, error) -> Logger.error("Unexpected error when trying to load Database Segments: {}", error), tasks.build());
            } catch (InterruptedException e) {
                Logger.error("The Database was interrupted while starting...", e);
                Thread.currentThread().interrupt();
                return;
            } finally {
                loader.shutdown();
            }
        }
        // Sort the segments in chronological order
        Collections.sort(this.segments, Segment.TEMPORAL_COMPARATOR);
        // Remove segments that overlap. Segments may overlap if they are
        // duplicates resulting from a botched upgrade or reindex or if they
        // were involved in an optimization pass, but garbage collection
        // didn't run before the server shutdown.
        ListIterator<Segment> lit = segments.listIterator();
        while (lit.hasNext()) {
            if (lit.hasPrevious()) {
                Segment previous = lit.previous();
                lit.next();
                Segment current = lit.next();
                if (current.intersects(previous)) {
                    lit.previous();
                    lit.previous();
                    lit.remove();
                    Logger.warn("Segment {} was not loaded because it contains duplicate data. It has been scheduled for garbage collection.", previous);
                // TODO: mark #previous for garbage collection
                }
            } else {
                lit.next();
            }
        }
        rotate(false);
        memory = new CacheState();
        /*
             * If enabled, setup Compaction to run continuously in the
             * background; trying to perform both "full" and "incremental"
             * compaction. Incremental compaction is opportunistic; attempting
             * frequently, but only occurring if no other conflicting work is
             * happening and only trying to compact one "shift". On the other
             * hand,full compaction runs less frequently, but is very
             * aggressive: blocking until any other conflicting work is done
             * and trying every possible shift.
             */
        // @formatter:off
        compactor = ENABLE_COMPACTION ? new SimilarityCompactor(storage) : NoOpCompactor.instance();
        fullCompaction = ENABLE_COMPACTION ? Executors.newScheduledThreadPool(1, createCompactionThreadFactory(tag, "Full")) : NoOpScheduledExecutorService.instance();
        fullCompaction.scheduleWithFixedDelay(() -> compactor.executeFullCompaction(), FULL_COMPACTION_INITIAL_DELAY_IN_SECONDS, FULL_COMPACTION_RUN_FREQUENCY_IN_SECONDS, TimeUnit.SECONDS);
        incrementalCompaction = ENABLE_COMPACTION ? Executors.newScheduledThreadPool(1, createCompactionThreadFactory(tag, "Incremental")) : NoOpScheduledExecutorService.instance();
        incrementalCompaction.scheduleWithFixedDelay(() -> compactor.tryIncrementalCompaction(), INCREMENTAL_COMPACTION_INITIAL_DELAY_IN_SECONDS, INCREMENTAL_COMPACTION_RUN_FREQUENCY_IN_SECONDS, TimeUnit.SECONDS);
        // @formatter:on
        Logger.info("Database is running with compaction {}.", ENABLE_COMPACTION ? "ON" : "OFF");
    }
}
Also used : Path(java.nio.file.Path) SegmentLoadingException(com.cinchapi.concourse.server.storage.db.kernel.SegmentLoadingException) AwaitableExecutorService(com.cinchapi.concourse.server.concurrent.AwaitableExecutorService) SimilarityCompactor(com.cinchapi.concourse.server.storage.db.compaction.similarity.SimilarityCompactor) Segment(com.cinchapi.concourse.server.storage.db.kernel.Segment)

Example 2 with Segment

use of com.cinchapi.concourse.server.storage.db.kernel.Segment in project concourse by cinchapi.

the class Database method getLookupRecord.

/**
 * Return a {@link Record} that is guaranteed to have the present state for
 * whether {@code value} is contained for {@code key} in {@code record}. The
 * truth of this query can be obtained using the
 * {@link Record#contains(com.cinchapi.concourse.server.io.Byteable, com.cinchapi.concourse.server.io.Byteable)}
 * method on the returned {@link Record}.
 * <p>
 * The query answered by this {@link Record} can also be answered by that
 * returned from {@link #getTableRecord(Identifier)}
 * and {@link #getTableRecord(Identifier, Text)}, but this method will
 * attempt to short circuit by not loading {@link Revisions} that don't
 * involve {@code record}, {@code key} and {@code value}. As a result, the
 * returned {@link Record} is not cached and cannot be reliably used for
 * other queries.
 * </p>
 *
 * @param record
 * @param key
 * @param value
 * @return the {@link Record}
 */
private Record<Identifier, Text, Value> getLookupRecord(Identifier record, Text key, Value value) {
    masterLock.readLock().lock();
    try {
        // First, see if there is a cached full or partial Record that can
        // allow a lookup to be performed.
        Composite c1 = Composite.create(record);
        Composite c2 = null;
        Composite c3 = null;
        Record<Identifier, Text, Value> lookup = tableCache.getIfPresent(c1);
        if (lookup == null) {
            c2 = Composite.create(record, key);
            lookup = tablePartialCache.getIfPresent(c2);
        }
        if (lookup == null) {
            // Create a LookupRecord to handle this, but DO NOT cache it
            // since it has no other utility.
            c3 = Composite.create(record, key, value);
            lookup = new LookupRecord(record, key, value);
            for (Segment segment : segments) {
                if (segment.table().mightContain(c3)) {
                    // Whenever it is possible that the LKV exists, we must
                    // gather Revisions for LK within a Record so the
                    // current state of LKV can be determined.
                    segment.table().seek(c2, lookup);
                }
            }
        }
        return lookup;
    } finally {
        masterLock.readLock().unlock();
    }
}
Also used : Identifier(com.cinchapi.concourse.server.model.Identifier) Composite(com.cinchapi.concourse.server.io.Composite) Value(com.cinchapi.concourse.server.model.Value) Text(com.cinchapi.concourse.server.model.Text) Segment(com.cinchapi.concourse.server.storage.db.kernel.Segment)

Example 3 with Segment

use of com.cinchapi.concourse.server.storage.db.kernel.Segment in project concourse by cinchapi.

the class Database method getTableRecord.

/**
 * Return the potentially partial TableRecord identified by {@code key} in
 * {@code identifier}.
 * <p>
 * While the returned {@link TableRecord} may not be
 * {@link TableRecord#isPartial() partial}, the caller should interact
 * with it as if it is (e.g. do not perform reads for any other keys besides
 * {@code key}.
 * </p>
 *
 * @param identifier
 * @param key
 * @return the TableRecord
 */
private TableRecord getTableRecord(Identifier identifier, Text key) {
    masterLock.readLock().lock();
    try {
        // Before loading a partial record, see if the full record is
        // present in memory.
        TableRecord table = tableCache.getIfPresent(Composite.create(identifier));
        if (table == null) {
            Composite composite = Composite.create(identifier, key);
            table = tablePartialCache.get(composite, () -> {
                TableRecord $ = TableRecord.createPartial(identifier, key);
                for (Segment segment : segments) {
                    segment.table().seek(composite, $);
                }
                return $;
            });
        }
        return table;
    } catch (ExecutionException e) {
        throw CheckedExceptions.wrapAsRuntimeException(e);
    } finally {
        masterLock.readLock().unlock();
    }
}
Also used : Composite(com.cinchapi.concourse.server.io.Composite) ExecutionException(java.util.concurrent.ExecutionException) Segment(com.cinchapi.concourse.server.storage.db.kernel.Segment)

Example 4 with Segment

use of com.cinchapi.concourse.server.storage.db.kernel.Segment in project concourse by cinchapi.

the class Database method dump.

/**
 * Return dumps for all the blocks identified by {@code id}. This method IS
 * NOT necessarily optimized for performance, so it should be used with
 * caution. Its only really necessary to use this method for debugging.
 *
 * @param id
 * @return the block dumps.
 */
public String dump(String id) {
    Segment segment = findSegment(segments, id);
    Preconditions.checkArgument(segment != null, "No segment identified by %s", id);
    StringBuilder sb = new StringBuilder();
    sb.append(segment.table().dump());
    sb.append(segment.index().dump());
    sb.append(segment.corpus().dump());
    return sb.toString();
}
Also used : Segment(com.cinchapi.concourse.server.storage.db.kernel.Segment)

Example 5 with Segment

use of com.cinchapi.concourse.server.storage.db.kernel.Segment in project concourse by cinchapi.

the class CompactorLogicTest method createTestSegment.

private final Segment createTestSegment() {
    Segment segment = Segment.create();
    for (int i = 0; i < TestData.getScaleCount(); ++i) {
        segment.acquire(TestData.getWriteAdd());
    }
    segment.transfer(Paths.get(TestData.getTemporaryTestFile()));
    return segment;
}
Also used : Segment(com.cinchapi.concourse.server.storage.db.kernel.Segment)

Aggregations

Segment (com.cinchapi.concourse.server.storage.db.kernel.Segment)16 Path (java.nio.file.Path)5 Composite (com.cinchapi.concourse.server.io.Composite)3 Identifier (com.cinchapi.concourse.server.model.Identifier)3 SegmentLoadingException (com.cinchapi.concourse.server.storage.db.kernel.SegmentLoadingException)3 List (java.util.List)3 Test (org.junit.Test)3 AwaitableExecutorService (com.cinchapi.concourse.server.concurrent.AwaitableExecutorService)2 Text (com.cinchapi.concourse.server.model.Text)2 Value (com.cinchapi.concourse.server.model.Value)2 StoreTest (com.cinchapi.concourse.server.storage.StoreTest)2 WriteStreamProfiler (com.cinchapi.concourse.server.storage.WriteStreamProfiler)2 SegmentStorageSystem (com.cinchapi.concourse.server.storage.db.SegmentStorageSystem)2 SimilarityCompactor (com.cinchapi.concourse.server.storage.db.compaction.similarity.SimilarityCompactor)2 Write (com.cinchapi.concourse.server.storage.temp.Write)2 ImmutableList (com.google.common.collect.ImmutableList)2 IOException (java.io.IOException)2 Paths (java.nio.file.Paths)2 Collectors (java.util.stream.Collectors)2 AnyStrings (com.cinchapi.common.base.AnyStrings)1