use of com.cinchapi.concourse.server.storage.db.kernel.Segment in project concourse by cinchapi.
the class CompactorTests method getStorageSystem.
/**
* Return a {@link SegmentStorageSystem} to use in a test.
*
* @return the {@link SegmentStorageSystem}
*/
public static SegmentStorageSystem getStorageSystem() {
return new SegmentStorageSystem() {
File fs = new File(FileSystem.tempFile());
List<Segment> segments = new ArrayList<>();
Lock lock = new ReentrantLock();
@Override
public long availableDiskSpace() {
return fs.getUsableSpace();
}
@Override
public Lock lock() {
return lock;
}
@Override
public Path save(Segment segment) {
Path file = Paths.get(FileSystem.tempFile());
segment.transfer(file);
return file;
}
@Override
public List<Segment> segments() {
return segments;
}
@Override
public long totalDiskSpace() {
return fs.getTotalSpace();
}
};
}
use of com.cinchapi.concourse.server.storage.db.kernel.Segment in project concourse by cinchapi.
the class DatabaseTest method testDatabaseRemovesDuplicateSegmentsOnStartupSanityCheck.
@Test
public void testDatabaseRemovesDuplicateSegmentsOnStartupSanityCheck() {
Database db = (Database) store;
int count = 4;
for (int i = 0; i < count; ++i) {
db.accept(Write.add(TestData.getString(), TestData.getTObject(), Time.now()));
db.sync();
}
List<Segment> segments = Reflection.get("segments", db);
Segment seg1 = segments.get(1);
Segment seg2 = segments.get(2);
Segment merged = Segment.create();
seg1.writes().forEach(write -> merged.acquire(write));
seg2.writes().forEach(write -> merged.acquire(write));
merged.transfer(Paths.get(current).resolve("segments").resolve(UUID.randomUUID() + ".seg"));
db.stop();
db.start();
int expected = 3;
segments = Reflection.get("segments", db);
// size includes
Assert.assertEquals(expected + 1, segments.size());
// seg0
}
use of com.cinchapi.concourse.server.storage.db.kernel.Segment in project concourse by cinchapi.
the class Database method stop.
@Override
public void stop() {
if (running) {
running = false;
writer.shutdown();
memory = null;
Streams.concat(ImmutableList.of(tableCache, tablePartialCache, indexCache).stream(), corpusCaches.values().stream()).forEach(cache -> {
cache.invalidateAll();
});
for (Segment segment : segments) {
try {
segment.close();
} catch (IOException e) {
throw CheckedExceptions.wrapAsRuntimeException(e);
}
}
fullCompaction.shutdownNow();
incrementalCompaction.shutdownNow();
}
}
use of com.cinchapi.concourse.server.storage.db.kernel.Segment in project concourse by cinchapi.
the class Database method reconcile.
@Override
public void reconcile(Set<HashCode> hashes) {
Logger.info("Reconciling the states of the Database and Buffer...");
// in the buffer.
if (segments.size() > 1) {
int index = segments.size() - 2;
Segment seg1 = segments.get(index);
if (hashes.containsAll(seg1.hashes())) {
Logger.warn("The data in {} is still completely in the BUFFER so it is being discarded", seg1);
segments.remove(index);
}
}
}
use of com.cinchapi.concourse.server.storage.db.kernel.Segment in project concourse by cinchapi.
the class Database method repair.
@Override
public void repair() {
masterLock.writeLock().lock();
try {
WriteStreamProfiler<Segment> profiler = new WriteStreamProfiler<>(segments);
Map<Segment, Segment> deduped = profiler.deduplicate(() -> Segment.create());
if (!deduped.isEmpty()) {
for (int i = 0; i < segments.size(); ++i) {
Segment segment = segments.get(i);
Segment clean = deduped.get(segment);
if (clean != null) {
clean.transfer(storage.directory().resolve(UUID.randomUUID() + ".seg"));
segments.set(i, clean);
segment.delete();
}
}
int total = profiler.duplicates().size();
Logger.warn("Replaced {} Segments that contained duplicate data. In total, across all Segments, there were {} Write{} duplicated.", deduped.size(), total, total != 1 ? "s" : "");
}
} finally {
masterLock.writeLock().unlock();
}
}
Aggregations