use of com.cinchapi.concourse.server.storage.db.kernel.Segment in project concourse by cinchapi.
the class Database method start.
@Override
public void start() {
if (!running) {
Logger.info("Database configured to store data in {}", directory);
running = true;
this.writer = new AwaitableExecutorService(Executors.newCachedThreadPool(ThreadFactories.namingThreadFactory("DatabaseWriter")));
this.segments.clear();
ArrayBuilder<Runnable> tasks = ArrayBuilder.builder();
List<Segment> segments = Collections.synchronizedList(this.segments);
Stream<Path> files = storage.files();
files.forEach(file -> tasks.add(() -> {
try {
Segment segment = Segment.load(file);
segments.add(segment);
} catch (SegmentLoadingException e) {
Logger.error("Error when trying to load Segment {}", file);
Logger.error("", e);
}
}));
files.close();
if (tasks.length() > 0) {
AwaitableExecutorService loader = new AwaitableExecutorService(Executors.newCachedThreadPool(ThreadFactories.namingThreadFactory("DatabaseLoader")));
try {
loader.await((task, error) -> Logger.error("Unexpected error when trying to load Database Segments: {}", error), tasks.build());
} catch (InterruptedException e) {
Logger.error("The Database was interrupted while starting...", e);
Thread.currentThread().interrupt();
return;
} finally {
loader.shutdown();
}
}
// Sort the segments in chronological order
Collections.sort(this.segments, Segment.TEMPORAL_COMPARATOR);
// Remove segments that overlap. Segments may overlap if they are
// duplicates resulting from a botched upgrade or reindex or if they
// were involved in an optimization pass, but garbage collection
// didn't run before the server shutdown.
ListIterator<Segment> lit = segments.listIterator();
while (lit.hasNext()) {
if (lit.hasPrevious()) {
Segment previous = lit.previous();
lit.next();
Segment current = lit.next();
if (current.intersects(previous)) {
lit.previous();
lit.previous();
lit.remove();
Logger.warn("Segment {} was not loaded because it contains duplicate data. It has been scheduled for garbage collection.", previous);
// TODO: mark #previous for garbage collection
}
} else {
lit.next();
}
}
rotate(false);
memory = new CacheState();
/*
* If enabled, setup Compaction to run continuously in the
* background; trying to perform both "full" and "incremental"
* compaction. Incremental compaction is opportunistic; attempting
* frequently, but only occurring if no other conflicting work is
* happening and only trying to compact one "shift". On the other
* hand,full compaction runs less frequently, but is very
* aggressive: blocking until any other conflicting work is done
* and trying every possible shift.
*/
// @formatter:off
compactor = ENABLE_COMPACTION ? new SimilarityCompactor(storage) : NoOpCompactor.instance();
fullCompaction = ENABLE_COMPACTION ? Executors.newScheduledThreadPool(1, createCompactionThreadFactory(tag, "Full")) : NoOpScheduledExecutorService.instance();
fullCompaction.scheduleWithFixedDelay(() -> compactor.executeFullCompaction(), FULL_COMPACTION_INITIAL_DELAY_IN_SECONDS, FULL_COMPACTION_RUN_FREQUENCY_IN_SECONDS, TimeUnit.SECONDS);
incrementalCompaction = ENABLE_COMPACTION ? Executors.newScheduledThreadPool(1, createCompactionThreadFactory(tag, "Incremental")) : NoOpScheduledExecutorService.instance();
incrementalCompaction.scheduleWithFixedDelay(() -> compactor.tryIncrementalCompaction(), INCREMENTAL_COMPACTION_INITIAL_DELAY_IN_SECONDS, INCREMENTAL_COMPACTION_RUN_FREQUENCY_IN_SECONDS, TimeUnit.SECONDS);
// @formatter:on
Logger.info("Database is running with compaction {}.", ENABLE_COMPACTION ? "ON" : "OFF");
}
}
use of com.cinchapi.concourse.server.storage.db.kernel.Segment in project concourse by cinchapi.
the class Database method getLookupRecord.
/**
* Return a {@link Record} that is guaranteed to have the present state for
* whether {@code value} is contained for {@code key} in {@code record}. The
* truth of this query can be obtained using the
* {@link Record#contains(com.cinchapi.concourse.server.io.Byteable, com.cinchapi.concourse.server.io.Byteable)}
* method on the returned {@link Record}.
* <p>
* The query answered by this {@link Record} can also be answered by that
* returned from {@link #getTableRecord(Identifier)}
* and {@link #getTableRecord(Identifier, Text)}, but this method will
* attempt to short circuit by not loading {@link Revisions} that don't
* involve {@code record}, {@code key} and {@code value}. As a result, the
* returned {@link Record} is not cached and cannot be reliably used for
* other queries.
* </p>
*
* @param record
* @param key
* @param value
* @return the {@link Record}
*/
private Record<Identifier, Text, Value> getLookupRecord(Identifier record, Text key, Value value) {
masterLock.readLock().lock();
try {
// First, see if there is a cached full or partial Record that can
// allow a lookup to be performed.
Composite c1 = Composite.create(record);
Composite c2 = null;
Composite c3 = null;
Record<Identifier, Text, Value> lookup = tableCache.getIfPresent(c1);
if (lookup == null) {
c2 = Composite.create(record, key);
lookup = tablePartialCache.getIfPresent(c2);
}
if (lookup == null) {
// Create a LookupRecord to handle this, but DO NOT cache it
// since it has no other utility.
c3 = Composite.create(record, key, value);
lookup = new LookupRecord(record, key, value);
for (Segment segment : segments) {
if (segment.table().mightContain(c3)) {
// Whenever it is possible that the LKV exists, we must
// gather Revisions for LK within a Record so the
// current state of LKV can be determined.
segment.table().seek(c2, lookup);
}
}
}
return lookup;
} finally {
masterLock.readLock().unlock();
}
}
use of com.cinchapi.concourse.server.storage.db.kernel.Segment in project concourse by cinchapi.
the class Database method getTableRecord.
/**
* Return the potentially partial TableRecord identified by {@code key} in
* {@code identifier}.
* <p>
* While the returned {@link TableRecord} may not be
* {@link TableRecord#isPartial() partial}, the caller should interact
* with it as if it is (e.g. do not perform reads for any other keys besides
* {@code key}.
* </p>
*
* @param identifier
* @param key
* @return the TableRecord
*/
private TableRecord getTableRecord(Identifier identifier, Text key) {
masterLock.readLock().lock();
try {
// Before loading a partial record, see if the full record is
// present in memory.
TableRecord table = tableCache.getIfPresent(Composite.create(identifier));
if (table == null) {
Composite composite = Composite.create(identifier, key);
table = tablePartialCache.get(composite, () -> {
TableRecord $ = TableRecord.createPartial(identifier, key);
for (Segment segment : segments) {
segment.table().seek(composite, $);
}
return $;
});
}
return table;
} catch (ExecutionException e) {
throw CheckedExceptions.wrapAsRuntimeException(e);
} finally {
masterLock.readLock().unlock();
}
}
use of com.cinchapi.concourse.server.storage.db.kernel.Segment in project concourse by cinchapi.
the class Database method dump.
/**
* Return dumps for all the blocks identified by {@code id}. This method IS
* NOT necessarily optimized for performance, so it should be used with
* caution. Its only really necessary to use this method for debugging.
*
* @param id
* @return the block dumps.
*/
public String dump(String id) {
Segment segment = findSegment(segments, id);
Preconditions.checkArgument(segment != null, "No segment identified by %s", id);
StringBuilder sb = new StringBuilder();
sb.append(segment.table().dump());
sb.append(segment.index().dump());
sb.append(segment.corpus().dump());
return sb.toString();
}
use of com.cinchapi.concourse.server.storage.db.kernel.Segment in project concourse by cinchapi.
the class CompactorLogicTest method createTestSegment.
private final Segment createTestSegment() {
Segment segment = Segment.create();
for (int i = 0; i < TestData.getScaleCount(); ++i) {
segment.acquire(TestData.getWriteAdd());
}
segment.transfer(Paths.get(TestData.getTemporaryTestFile()));
return segment;
}
Aggregations