use of com.srotya.sidewinder.core.storage.compression.ValueWriter in project sidewinder by srotya.
the class ValueField method loadBucketMap.
/**
* Function to check and recover existing bucket map, if one exists.
*
* @param bufferEntries
* @throws IOException
*/
public void loadBucketMap(Measurement measurement, List<BufferObject> bufferEntries) throws IOException {
logger.fine(() -> "Scanning buffer for:" + fieldId);
for (BufferObject entry : bufferEntries) {
ByteBuffer duplicate = entry.getBuf();
duplicate.rewind();
ByteBuffer slice = duplicate.slice();
int codecId = (int) slice.get();
// int listIndex = (int) slice.get();
Class<ValueWriter> classById = CompressionFactory.getValueClassById(codecId);
ValueWriter writer = getWriterInstance(classById);
if (entry.getBufferId() == null) {
throw new IOException("Buffer id can't be read:" + " series:" + getFieldId());
}
LinkedByteString repairedBufferId = measurement.getMalloc().repairBufferId(fieldId, entry.getBufferId());
logger.fine(() -> "Loading bucketmap:" + fieldId + "\t" + tsBucket + "bufferid:" + entry.getBufferId());
writer.setBufferId(repairedBufferId);
writer.configure(slice, false, START_OFFSET);
writerList.add(writer);
logger.fine(() -> "Loaded bucketmap:" + fieldId + "\t" + " bufferid:" + entry.getBufferId());
}
sortBucketMap();
}
use of com.srotya.sidewinder.core.storage.compression.ValueWriter in project sidewinder by srotya.
the class TestByzantineValueReadWrite method testWriteRead.
@Test
public void testWriteRead() throws IOException {
ByteBuffer buf = ByteBuffer.allocateDirect(1024 * 100);
ValueWriter writer = new ByzantineValueWriter();
writer.configure(buf, true, startOffset);
int LIMIT = 10000;
for (long i = 0; i < LIMIT; i++) {
writer.add(i);
}
System.out.println("Compression Ratio:" + writer.getCompressionRatio());
Reader reader = writer.getReader();
for (int i = 0; i < LIMIT; i++) {
assertEquals(i, reader.read());
}
buf.rewind();
writer = new ByzantineValueWriter();
writer.configure(buf, true, startOffset);
for (int i = 0; i < LIMIT; i++) {
writer.add(i * 1.1);
}
reader = writer.getReader();
for (int i = 0; i < LIMIT; i++) {
assertEquals(i * 1.1, reader.readDouble(), startOffset);
}
buf.rewind();
writer = new ByzantineValueWriter();
writer.configure(buf, true, startOffset);
for (int i = 0; i < LIMIT; i++) {
writer.write(i);
}
reader = writer.getReader();
assertEquals(LIMIT, reader.getCount());
for (int i = 0; i < LIMIT; i++) {
assertEquals(i, reader.read());
}
}
use of com.srotya.sidewinder.core.storage.compression.ValueWriter in project sidewinder by srotya.
the class ValueField method createNewWriter.
private ValueWriter createNewWriter(Measurement measurement, int tsBucket, List<ValueWriter> list) throws IOException {
if (StorageEngine.ENABLE_METHOD_METRICS) {
// ctx = timerCreateWriter.time();
}
BufferObject bufPair = measurement.getMalloc().createNewBuffer(fieldId, tsBucket);
bufPair.getBuf().put((byte) CompressionFactory.getIdByValueClass(compressionClass));
bufPair.getBuf().put((byte) list.size());
ValueWriter writer;
writer = getWriterInstance(compressionClass);
writer.setBufferId(bufPair.getBufferId());
// first byte is used to store compression codec type
writer.configure(bufPair.getBuf(), true, START_OFFSET);
list.add(writer);
logger.fine(() -> "Created new writer for:" + tsBucket + " buckectInfo:" + bufPair.getBufferId());
if (StorageEngine.ENABLE_METHOD_METRICS) {
// ctx.stop();
}
return writer;
}
use of com.srotya.sidewinder.core.storage.compression.ValueWriter in project sidewinder by srotya.
the class ValueField method compact.
/**
* Compacts old Writers into one for every single time bucket, this insures the
* buffers are compacted as well as provides an opportunity to use a higher
* compression rate algorithm for the bucket. All Writers but the last are
* read-only therefore performing operations on them does not impact.
*
* @param functions
* @return returns null if nothing to compact or empty list if all compaction
* attempts fail
* @throws IOException
*/
@SafeVarargs
public final List<Writer> compact(Measurement measurement, Lock writeLock, Consumer<List<? extends Writer>>... functions) throws IOException {
if (StorageEngine.ENABLE_METHOD_METRICS) {
// ctx = timerCompaction.time();
}
// size check is to avoid unnecessary calls and exit fast
if (writerList.size() <= 1) {
return null;
}
List<Writer> compactedWriter = new ArrayList<>();
int id = CompressionFactory.getIdByValueClass(compactionClass);
int listSize = writerList.size() - 1;
int pointCount = writerList.subList(0, listSize).stream().mapToInt(s -> s.getCount()).sum();
int total = writerList.subList(0, listSize).stream().mapToInt(s -> s.getPosition()).sum();
if (total == 0) {
logger.warning("Ignoring bucket for compaction, not enough bytes. THIS BUG SHOULD BE INVESTIGATED");
return null;
}
ValueWriter writer = getWriterInstance(compactionClass);
int compactedPoints = 0;
double bufSize = total * compactionRatio;
logger.finer("Allocating buffer:" + total + " Vs. " + pointCount * 16 + " max compacted buffer:" + bufSize);
logger.finer("Getting sublist from:" + 0 + " to:" + (writerList.size() - 1));
ByteBuffer buf = ByteBuffer.allocateDirect((int) bufSize);
buf.put((byte) id);
// since this buffer will be the first one
buf.put(1, (byte) 0);
writer.configure(buf, true, START_OFFSET);
ValueWriter input = writerList.get(0);
// read all but the last writer and insert into new temp writer
try {
for (int i = 0; i < writerList.size() - 1; i++) {
input = writerList.get(i);
Reader reader = input.getReader();
for (int k = 0; k < reader.getCount(); k++) {
long pair = reader.read();
writer.add(pair);
compactedPoints++;
}
}
writer.makeReadOnly(false);
} catch (RollOverException e) {
logger.warning("Buffer filled up; bad compression ratio; not compacting");
return null;
} catch (Exception e) {
logger.log(Level.SEVERE, "Compaction failed due to unknown exception", e);
return null;
}
// get the raw compressed bytes
ByteBuffer rawBytes = writer.getRawBytes();
// limit how much data needs to be read from the buffer
rawBytes.limit(rawBytes.position());
// convert buffer length request to size of 2
int size = rawBytes.limit() + 1;
if (size % 2 != 0) {
size++;
}
rawBytes.rewind();
// create buffer in measurement
BufferObject newBuf = measurement.getMalloc().createNewBuffer(fieldId, tsBucket, size);
logger.fine("Compacted buffer size:" + size + " vs " + total + " countp:" + listSize + " field:" + fieldId);
LinkedByteString bufferId = newBuf.getBufferId();
buf = newBuf.getBuf();
writer = getWriterInstance(compactionClass);
buf.put(rawBytes);
writer.setBufferId(bufferId);
writer.configure(buf, false, START_OFFSET);
writer.makeReadOnly(false);
writeLock.lock();
if (functions != null) {
for (Consumer<List<? extends Writer>> function : functions) {
function.accept(writerList);
}
}
size = listSize - 1;
logger.finest("Compaction debug size differences size:" + size + " listSize:" + listSize + " curr:" + writerList.size());
for (int i = size; i >= 0; i--) {
compactedWriter.add(writerList.remove(i));
}
writerList.add(0, writer);
for (int i = 0; i < writerList.size(); i++) {
writerList.get(i).getRawBytes().put(1, (byte) i);
}
logger.fine("Total points:" + compactedPoints + ", original pair count:" + writer.getReader().getCount() + " compression ratio:" + rawBytes.position() + " original:" + total + " newlistlength:" + writerList.size());
writeLock.unlock();
if (StorageEngine.ENABLE_METHOD_METRICS) {
// ctx.stop();
}
return compactedWriter;
}
use of com.srotya.sidewinder.core.storage.compression.ValueWriter in project sidewinder by srotya.
the class ValueField method getOrCreateValueWriter.
private ValueWriter getOrCreateValueWriter(Measurement measurement) throws IOException {
ValueWriter ans = null;
if (writerList.isEmpty()) {
ans = createNewWriter(measurement, tsBucket, writerList);
} else {
ans = writerList.get(writerList.size() - 1);
}
if (ans.isFull()) {
final ValueWriter ansTmp = ans;
logger.fine(() -> "Requesting new writer for:" + fieldId + " bucketcount:" + writerList.size() + " pos:" + ansTmp.getPosition());
ans = createNewWriter(measurement, tsBucket, writerList);
}
if (StorageEngine.ENABLE_METHOD_METRICS) {
}
return ans;
}
Aggregations