use of com.srotya.sidewinder.core.storage.compression.RollOverException in project sidewinder by srotya.
the class TestByzantineReadWrite method testBufferFull.
@Test
public void testBufferFull() throws IOException {
ByteBuffer buf = ByteBuffer.allocateDirect(1024 * 1024);
ByzantineWriter writer = new ByzantineWriter();
writer.setTsBucket("asdasdasd");
assertEquals("asdasdasd", writer.getTsBucket());
writer.configure(new HashMap<>(), buf, true, startOffset, true);
long ots = System.currentTimeMillis();
writer.setHeaderTimestamp(ots);
assertEquals(ots, writer.getHeaderTimestamp());
int limit = 1_000_000;
try {
for (int i = 0; i < limit; i++) {
writer.addValue(ots + i * 1000, i);
}
fail("Must fill up buffer");
} catch (RollOverException e) {
}
assertTrue(writer.isFull());
}
use of com.srotya.sidewinder.core.storage.compression.RollOverException in project sidewinder by srotya.
the class TimeSeries method compact.
/**
* Compacts old Writers into one for every single time bucket, this insures the
* buffers are compacted as well as provides an opportunity to use a higher
* compression rate algorithm for the bucket. All Writers but the last are
* read-only therefore performing operations on them does not impact.
*
* @param functions
* @return returns null if nothing to compact or empty list if all compaction
* attempts fail
* @throws IOException
*/
@SafeVarargs
public final List<Writer> compact(Consumer<List<Writer>>... functions) throws IOException {
// size check is to avoid unnecessary calls and exit fast
if (compactionCandidateSet.isEmpty()) {
return null;
}
List<Writer> compactedWriter = new ArrayList<>();
Iterator<Entry<String, List<Writer>>> iterator = compactionCandidateSet.entrySet().iterator();
int id = CompressionFactory.getIdByClass(compactionClass);
while (iterator.hasNext()) {
// entry.getKey() gives tsBucket string
Entry<String, List<Writer>> entry = iterator.next();
// remove this entry from compaction set
iterator.remove();
List<Writer> list = entry.getValue();
int listSize = list.size() - 1;
int pointCount = list.subList(0, listSize).stream().mapToInt(s -> s.getCount()).sum();
int total = list.subList(0, listSize).stream().mapToInt(s -> s.getPosition()).sum();
if (total == 0) {
logger.warning("Ignoring bucket for compaction, not enough bytes. THIS BUG SHOULD BE INVESTIGATED");
continue;
}
Writer writer = getWriterInstance(compactionClass);
int compactedPoints = 0;
double bufSize = total * compactionRatio;
logger.finer("Allocating buffer:" + total + " Vs. " + pointCount * 16 + " max compacted buffer:" + bufSize);
logger.finer("Getting sublist from:" + 0 + " to:" + (list.size() - 1));
ByteBuffer buf = ByteBuffer.allocate((int) bufSize);
buf.put((byte) id);
// since this buffer will be the first one
buf.put(1, (byte) 0);
writer.configure(conf, buf, true, START_OFFSET, false);
Writer input = list.get(0);
// read the header timestamp
long timestamp = input.getHeaderTimestamp();
writer.setHeaderTimestamp(timestamp);
// read all but the last writer and insert into new temp writer
try {
for (int i = 0; i < list.size() - 1; i++) {
input = list.get(i);
Reader reader = input.getReader();
for (int k = 0; k < reader.getPairCount(); k++) {
long[] pair = reader.read();
writer.addValue(pair[0], pair[1]);
compactedPoints++;
}
}
writer.makeReadOnly();
} catch (RollOverException e) {
logger.warning("Buffer filled up; bad compression ratio; not compacting");
continue;
} catch (Exception e) {
logger.log(Level.SEVERE, "Compaction failed due to unknown exception", e);
}
// get the raw compressed bytes
ByteBuffer rawBytes = writer.getRawBytes();
// limit how much data needs to be read from the buffer
rawBytes.limit(rawBytes.position());
// convert buffer length request to size of 2
int size = rawBytes.limit() + 1;
if (size % 2 != 0) {
size++;
}
rawBytes.rewind();
// create buffer in measurement
BufferObject newBuf = measurement.getMalloc().createNewBuffer(seriesId, entry.getKey(), size);
logger.fine("Compacted buffer size:" + size + " vs " + total);
String bufferId = newBuf.getBufferId();
buf = newBuf.getBuf();
writer = getWriterInstance(compactionClass);
buf.put(rawBytes);
writer.setBufferId(bufferId);
writer.configure(conf, buf, false, START_OFFSET, false);
writer.makeReadOnly();
synchronized (list) {
if (functions != null) {
for (Consumer<List<Writer>> function : functions) {
function.accept(list);
}
}
size = listSize - 1;
logger.finest("Compaction debug size differences size:" + size + " listSize:" + listSize + " curr:" + list.size());
for (int i = size; i >= 0; i--) {
compactedWriter.add(list.remove(i));
}
list.add(0, writer);
for (int i = 0; i < list.size(); i++) {
list.get(i).getRawBytes().put(1, (byte) i);
}
// fix bucket count
bucketCount -= size;
logger.fine("Total points:" + compactedPoints + ", original pair count:" + writer.getReader().getPairCount() + " compression ratio:" + rawBytes.position() + " original:" + total);
}
}
return compactedWriter;
}
Aggregations