use of com.cinchapi.concourse.server.concurrent.AwaitableExecutorService in project concourse by cinchapi.
the class Buffer method start.
@Override
public void start() {
if (!running) {
running = true;
Logger.info("Buffer configured to store data in {}", directory);
syncer = new AwaitableExecutorService(Executors.newCachedThreadPool(ThreadFactories.namingThreadFactory(threadNamePrefix + "-%d")));
SortedMap<File, Page> pageSorter = Maps.newTreeMap(NaturalSorter.INSTANCE);
for (File file : new File(directory).listFiles()) {
if (!file.isDirectory()) {
Page page = new Page(file.getAbsolutePath());
pageSorter.put(file, page);
Logger.info("Loading Buffer content from {}...", page);
}
}
pages.clear();
pages.addAll(pageSorter.values());
if (pages.isEmpty()) {
addPage(false);
} else {
currentPage = pages.get(pages.size() - 1);
}
}
}
use of com.cinchapi.concourse.server.concurrent.AwaitableExecutorService in project concourse by cinchapi.
the class Database method start.
@Override
public void start() {
if (!running) {
Logger.info("Database configured to store data in {}", directory);
running = true;
this.writer = new AwaitableExecutorService(Executors.newCachedThreadPool(ThreadFactories.namingThreadFactory("DatabaseWriter")));
this.segments.clear();
ArrayBuilder<Runnable> tasks = ArrayBuilder.builder();
List<Segment> segments = Collections.synchronizedList(this.segments);
Stream<Path> files = storage.files();
files.forEach(file -> tasks.add(() -> {
try {
Segment segment = Segment.load(file);
segments.add(segment);
} catch (SegmentLoadingException e) {
Logger.error("Error when trying to load Segment {}", file);
Logger.error("", e);
}
}));
files.close();
if (tasks.length() > 0) {
AwaitableExecutorService loader = new AwaitableExecutorService(Executors.newCachedThreadPool(ThreadFactories.namingThreadFactory("DatabaseLoader")));
try {
loader.await((task, error) -> Logger.error("Unexpected error when trying to load Database Segments: {}", error), tasks.build());
} catch (InterruptedException e) {
Logger.error("The Database was interrupted while starting...", e);
Thread.currentThread().interrupt();
return;
} finally {
loader.shutdown();
}
}
// Sort the segments in chronological order
Collections.sort(this.segments, Segment.TEMPORAL_COMPARATOR);
// Remove segments that overlap. Segments may overlap if they are
// duplicates resulting from a botched upgrade or reindex or if they
// were involved in an optimization pass, but garbage collection
// didn't run before the server shutdown.
ListIterator<Segment> lit = segments.listIterator();
while (lit.hasNext()) {
if (lit.hasPrevious()) {
Segment previous = lit.previous();
lit.next();
Segment current = lit.next();
if (current.intersects(previous)) {
lit.previous();
lit.previous();
lit.remove();
Logger.warn("Segment {} was not loaded because it contains duplicate data. It has been scheduled for garbage collection.", previous);
// TODO: mark #previous for garbage collection
}
} else {
lit.next();
}
}
rotate(false);
memory = new CacheState();
/*
* If enabled, setup Compaction to run continuously in the
* background; trying to perform both "full" and "incremental"
* compaction. Incremental compaction is opportunistic; attempting
* frequently, but only occurring if no other conflicting work is
* happening and only trying to compact one "shift". On the other
* hand,full compaction runs less frequently, but is very
* aggressive: blocking until any other conflicting work is done
* and trying every possible shift.
*/
// @formatter:off
compactor = ENABLE_COMPACTION ? new SimilarityCompactor(storage) : NoOpCompactor.instance();
fullCompaction = ENABLE_COMPACTION ? Executors.newScheduledThreadPool(1, createCompactionThreadFactory(tag, "Full")) : NoOpScheduledExecutorService.instance();
fullCompaction.scheduleWithFixedDelay(() -> compactor.executeFullCompaction(), FULL_COMPACTION_INITIAL_DELAY_IN_SECONDS, FULL_COMPACTION_RUN_FREQUENCY_IN_SECONDS, TimeUnit.SECONDS);
incrementalCompaction = ENABLE_COMPACTION ? Executors.newScheduledThreadPool(1, createCompactionThreadFactory(tag, "Incremental")) : NoOpScheduledExecutorService.instance();
incrementalCompaction.scheduleWithFixedDelay(() -> compactor.tryIncrementalCompaction(), INCREMENTAL_COMPACTION_INITIAL_DELAY_IN_SECONDS, INCREMENTAL_COMPACTION_RUN_FREQUENCY_IN_SECONDS, TimeUnit.SECONDS);
// @formatter:on
Logger.info("Database is running with compaction {}.", ENABLE_COMPACTION ? "ON" : "OFF");
}
}
use of com.cinchapi.concourse.server.concurrent.AwaitableExecutorService in project concourse by cinchapi.
the class SegmentTest method testConcurrency.
@Test
public void testConcurrency() throws InterruptedException {
AtomicBoolean succeeded = new AtomicBoolean(true);
AwaitableExecutorService executor = new AwaitableExecutorService(Executors.newCachedThreadPool());
try {
for (int i = 0; i < 1000; ++i) {
AtomicBoolean done = new AtomicBoolean(false);
long record = i;
String key = Long.toString(record);
TObject value = Convert.javaToThrift(Long.toString(record));
Write write = Write.add(key, value, record);
Identifier pk = Identifier.of(record);
Text text = Text.wrap(key);
Thread reader = new Thread(() -> {
while (!done.get()) {
TableRecord tr = TableRecord.create(pk);
IndexRecord ir = IndexRecord.create(text);
CorpusRecord cr = CorpusRecord.create(text);
segment.table().seek(Composite.create(pk), tr);
segment.index().seek(Composite.create(text), ir);
segment.corpus().seek(Composite.create(text), cr);
if (!done.get() && tr.isEmpty() != ir.isEmpty()) {
if (!tr.isEmpty() && ir.isEmpty()) {
// Later read is empty
succeeded.set(false);
System.out.println(AnyStrings.format("table empty = {} and index empty = {} for {}", tr.isEmpty(), ir.isEmpty(), record));
}
}
if (!done.get() && ir.isEmpty() != cr.isEmpty()) {
if (!ir.isEmpty() && cr.isEmpty()) {
// Later read is empty
succeeded.set(false);
System.out.println(AnyStrings.format("index empty = {} and corpus empty = {} for {}", tr.isEmpty(), cr.isEmpty(), record));
}
}
}
TableRecord tr = TableRecord.create(pk);
IndexRecord ir = IndexRecord.create(text);
CorpusRecord cr = CorpusRecord.create(text);
segment.table().seek(Composite.create(pk), tr);
segment.index().seek(Composite.create(text), ir);
segment.corpus().seek(Composite.create(text), cr);
if (tr.isEmpty()) {
succeeded.set(false);
System.out.println("After write finished, table still empty for " + record);
}
if (ir.isEmpty()) {
succeeded.set(false);
System.out.println("After write finished, index still empty for " + record);
}
if (cr.isEmpty()) {
succeeded.set(false);
System.out.println("After write finished, corpus still empty for " + record);
}
});
Thread writer = new Thread(() -> {
try {
segment.acquire(write, executor);
done.set(true);
} catch (InterruptedException e) {
e.printStackTrace();
}
});
reader.start();
writer.start();
writer.join();
reader.join();
}
Assert.assertTrue(succeeded.get());
} finally {
executor.shutdown();
}
}
Aggregations