use of com.srotya.sidewinder.core.storage.compression.Writer in project sidewinder by srotya.
the class TestByzantineValueReadWrite method testWriteDataPoint.
@Test
public void testWriteDataPoint() throws IOException {
ByteBuffer buf = ByteBuffer.allocateDirect(1024);
ByzantineValueWriter bwriter = new ByzantineValueWriter();
Writer writer = bwriter;
writer.configure(buf, true, startOffset);
for (long i = 0; i < 10; i++) {
writer.add(i);
}
assertEquals(10, bwriter.getCount());
buf = bwriter.getBuf();
buf.flip();
buf.get();
assertEquals(10, buf.getInt());
}
use of com.srotya.sidewinder.core.storage.compression.Writer in project sidewinder by srotya.
the class TestSeries method testIterator.
@SuppressWarnings("unchecked")
@Test
public void testIterator() throws IOException {
measurement.setTimebucket(1024);
Series series = new Series(new ByteString("idasdasda"), 0);
long ts = 1497720652566L;
for (int i = 0; i < 10000; i++) {
Point dp = Point.newBuilder().setTimestamp(ts + i * 200).addValueFieldName("f1").addFp(false).addValue(i).addValueFieldName("f2").addFp(true).addValue(Double.doubleToLongBits(i * 1.1)).build();
series.addPoint(dp, measurement);
}
// check time buckets
assertEquals(3, series.getBucketMap().size());
// query iterators
FieldReaderIterator[] queryIterators = series.queryIterators(measurement, new ArrayList<>(Arrays.asList("f1", "f2")), Long.MAX_VALUE, Long.MIN_VALUE);
assertEquals(3, queryIterators.length);
// must respond even when there is nothing selectable in time range
queryIterators = series.queryIterators(measurement, Arrays.asList("f1", "f2"), Long.MAX_VALUE, Long.MAX_VALUE);
assertEquals(3, queryIterators.length);
// must respond even when there is nothing selectable in time range
queryIterators = series.queryIterators(measurement, Arrays.asList("f1", "TS"), Long.MAX_VALUE, Long.MAX_VALUE);
assertEquals(2, queryIterators.length);
// no fields should result in no iterators
queryIterators = series.queryIterators(measurement, Arrays.asList(), Long.MAX_VALUE, Long.MAX_VALUE);
assertEquals(0, queryIterators.length);
final List<Writer> compactedWriters = series.compact(measurement);
assertTrue(compactedWriters.size() > 0);
}
use of com.srotya.sidewinder.core.storage.compression.Writer in project sidewinder by srotya.
the class Measurement method collectGarbage.
public default Set<String> collectGarbage(Archiver archiver) throws IOException {
return runCleanupOperation("garbage collection", series -> {
try {
Map<Integer, List<Writer>> collectedGarbage = series.collectGarbage(this);
List<Writer> output = new ArrayList<>();
if (collectedGarbage.size() > 0) {
getLogger().fine("Collected garbage:" + collectedGarbage.size() + " series:" + series.getSeriesId());
}
if (collectedGarbage != null) {
for (Entry<Integer, List<Writer>> entry : collectedGarbage.entrySet()) {
for (Writer writer : entry.getValue()) {
if (archiver != null) {
byte[] buf = Archiver.writerToByteArray(writer);
TimeSeriesArchivalObject archivalObject = new TimeSeriesArchivalObject(getDbName(), getMeasurementName(), series.getSeriesId(), entry.getKey(), buf);
try {
archiver.archive(archivalObject);
} catch (ArchiveException e) {
getLogger().log(Level.SEVERE, "Series failed to archive, series:" + series.getSeriesId() + " db:" + getDbName() + " m:" + getMeasurementName(), e);
}
}
output.add(writer);
}
}
}
return output;
} catch (IOException e) {
throw new RuntimeException(e);
}
});
}
use of com.srotya.sidewinder.core.storage.compression.Writer in project sidewinder by srotya.
the class Series method collectGarbage.
public Map<Integer, List<Writer>> collectGarbage(Measurement measurement) throws IOException {
Map<Integer, List<Writer>> collectedGarbageMap = new HashMap<>();
logger.finer("Retention buckets:" + measurement.getRetentionBuckets().get());
while (getBucketMap().size() > measurement.getRetentionBuckets().get()) {
writeLock.lock();
int oldSize = getBucketMap().size();
Integer key = getBucketMap().firstKey();
Map<String, Field> fieldMap = getBucketMap().remove(key);
List<Writer> gcedBuckets = new ArrayList<>();
collectedGarbageMap.put(key, gcedBuckets);
for (Field field : fieldMap.values()) {
// bucket.close();
gcedBuckets.addAll(field.getWriters());
logger.log(Level.FINEST, "GC," + measurement.getMeasurementName() + ":" + seriesId + " removing bucket:" + key + ": as it passed retention period of:" + measurement.getRetentionBuckets().get() + ":old size:" + oldSize + ":newsize:" + getBucketMap().size() + ":");
}
writeLock.unlock();
}
if (collectedGarbageMap.size() > 0) {
logger.fine(() -> "GC," + measurement.getMeasurementName() + " buckets:" + collectedGarbageMap.size() + " retention size:" + measurement.getRetentionBuckets().get());
}
return collectedGarbageMap;
}
use of com.srotya.sidewinder.core.storage.compression.Writer in project sidewinder by srotya.
the class ValueField method compact.
/**
* Compacts old Writers into one for every single time bucket, this insures the
* buffers are compacted as well as provides an opportunity to use a higher
* compression rate algorithm for the bucket. All Writers but the last are
* read-only therefore performing operations on them does not impact.
*
* @param functions
* @return returns null if nothing to compact or empty list if all compaction
* attempts fail
* @throws IOException
*/
@SafeVarargs
public final List<Writer> compact(Measurement measurement, Lock writeLock, Consumer<List<? extends Writer>>... functions) throws IOException {
if (StorageEngine.ENABLE_METHOD_METRICS) {
// ctx = timerCompaction.time();
}
// size check is to avoid unnecessary calls and exit fast
if (writerList.size() <= 1) {
return null;
}
List<Writer> compactedWriter = new ArrayList<>();
int id = CompressionFactory.getIdByValueClass(compactionClass);
int listSize = writerList.size() - 1;
int pointCount = writerList.subList(0, listSize).stream().mapToInt(s -> s.getCount()).sum();
int total = writerList.subList(0, listSize).stream().mapToInt(s -> s.getPosition()).sum();
if (total == 0) {
logger.warning("Ignoring bucket for compaction, not enough bytes. THIS BUG SHOULD BE INVESTIGATED");
return null;
}
ValueWriter writer = getWriterInstance(compactionClass);
int compactedPoints = 0;
double bufSize = total * compactionRatio;
logger.finer("Allocating buffer:" + total + " Vs. " + pointCount * 16 + " max compacted buffer:" + bufSize);
logger.finer("Getting sublist from:" + 0 + " to:" + (writerList.size() - 1));
ByteBuffer buf = ByteBuffer.allocateDirect((int) bufSize);
buf.put((byte) id);
// since this buffer will be the first one
buf.put(1, (byte) 0);
writer.configure(buf, true, START_OFFSET);
ValueWriter input = writerList.get(0);
// read all but the last writer and insert into new temp writer
try {
for (int i = 0; i < writerList.size() - 1; i++) {
input = writerList.get(i);
Reader reader = input.getReader();
for (int k = 0; k < reader.getCount(); k++) {
long pair = reader.read();
writer.add(pair);
compactedPoints++;
}
}
writer.makeReadOnly(false);
} catch (RollOverException e) {
logger.warning("Buffer filled up; bad compression ratio; not compacting");
return null;
} catch (Exception e) {
logger.log(Level.SEVERE, "Compaction failed due to unknown exception", e);
return null;
}
// get the raw compressed bytes
ByteBuffer rawBytes = writer.getRawBytes();
// limit how much data needs to be read from the buffer
rawBytes.limit(rawBytes.position());
// convert buffer length request to size of 2
int size = rawBytes.limit() + 1;
if (size % 2 != 0) {
size++;
}
rawBytes.rewind();
// create buffer in measurement
BufferObject newBuf = measurement.getMalloc().createNewBuffer(fieldId, tsBucket, size);
logger.fine("Compacted buffer size:" + size + " vs " + total + " countp:" + listSize + " field:" + fieldId);
LinkedByteString bufferId = newBuf.getBufferId();
buf = newBuf.getBuf();
writer = getWriterInstance(compactionClass);
buf.put(rawBytes);
writer.setBufferId(bufferId);
writer.configure(buf, false, START_OFFSET);
writer.makeReadOnly(false);
writeLock.lock();
if (functions != null) {
for (Consumer<List<? extends Writer>> function : functions) {
function.accept(writerList);
}
}
size = listSize - 1;
logger.finest("Compaction debug size differences size:" + size + " listSize:" + listSize + " curr:" + writerList.size());
for (int i = size; i >= 0; i--) {
compactedWriter.add(writerList.remove(i));
}
writerList.add(0, writer);
for (int i = 0; i < writerList.size(); i++) {
writerList.get(i).getRawBytes().put(1, (byte) i);
}
logger.fine("Total points:" + compactedPoints + ", original pair count:" + writer.getReader().getCount() + " compression ratio:" + rawBytes.position() + " original:" + total + " newlistlength:" + writerList.size());
writeLock.unlock();
if (StorageEngine.ENABLE_METHOD_METRICS) {
// ctx.stop();
}
return compactedWriter;
}
Aggregations