use of com.srotya.sidewinder.core.storage.compression.Writer in project sidewinder by srotya.
the class TimeSeries method createNewWriter.
private Writer createNewWriter(long timestamp, String tsBucket, List<Writer> list) throws IOException {
BufferObject bufPair = measurement.getMalloc().createNewBuffer(seriesId, tsBucket);
bufPair.getBuf().put((byte) CompressionFactory.getIdByClass(compressionClass));
bufPair.getBuf().put((byte) list.size());
Writer writer;
writer = getWriterInstance(compressionClass);
writer.setBufferId(bufPair.getBufferId());
// first byte is used to store compression codec type
writer.configure(conf, bufPair.getBuf(), true, START_OFFSET, true);
writer.setHeaderTimestamp(timestamp);
list.add(writer);
bucketCount++;
logger.fine(() -> "Created new writer for:" + tsBucket + " timstamp:" + timestamp + " buckectInfo:" + bufPair.getBufferId());
return writer;
}
use of com.srotya.sidewinder.core.storage.compression.Writer in project sidewinder by srotya.
the class TimeField method compact.
/**
* Compacts old Writers into one for every single time bucket, this insures the
* buffers are compacted as well as provides an opportunity to use a higher
* compression rate algorithm for the bucket. All Writers but the last are
* read-only therefore performing operations on them does not impact.
*
* @param functions
* optional functions to execute BEFORE cleaning up list
* @return returns null if nothing to compact or empty list if all compaction
* attempts fail
* @throws IOException
*/
@SafeVarargs
public final List<Writer> compact(Measurement measurement, Lock writeLock, Consumer<List<? extends Writer>>... functions) throws IOException {
if (StorageEngine.ENABLE_METHOD_METRICS) {
// ctx = timerCompaction.time();
}
// size check is to avoid unnecessary calls and exit fast
if (writerList.size() <= 1) {
return null;
}
List<Writer> compactedWriter = new ArrayList<>();
int id = CompressionFactory.getIdByTimeClass(compactionClass);
List<TimeWriter> list = writerList;
int listSize = list.size() - 1;
int pointCount = list.subList(0, listSize).stream().mapToInt(s -> s.getCount()).sum();
int total = list.subList(0, listSize).stream().mapToInt(s -> s.getPosition()).sum();
if (total == 0) {
logger.warning("Ignoring bucket for compaction, not enough bytes. THIS BUG SHOULD BE INVESTIGATED");
return null;
}
TimeWriter writer = getWriterInstance(compactionClass);
int compactedPoints = 0;
double bufSize = total * compactionRatio;
logger.finer("Allocating buffer:" + total + " Vs. " + pointCount * 16 + " max compacted buffer:" + bufSize);
logger.finer("Getting sublist from:" + 0 + " to:" + (list.size() - 1));
ByteBuffer buf = ByteBuffer.allocateDirect((int) bufSize);
buf.put((byte) id);
// since this buffer will be the first one
buf.put(1, (byte) 0);
writer.configure(buf, true, START_OFFSET);
TimeWriter input = list.get(0);
// read the header timestamp
writer.setHeaderTimestamp(input.getHeaderTimestamp());
// read all but the last writer and insert into new temp writer
try {
for (int i = 0; i < list.size() - 1; i++) {
input = list.get(i);
Reader reader = input.getReader();
for (int k = 0; k < reader.getCount(); k++) {
long pair = reader.read();
writer.add(pair);
compactedPoints++;
}
}
writer.makeReadOnly(false);
} catch (RollOverException e) {
logger.warning("Buffer filled up; bad compression ratio; not compacting");
return null;
} catch (Exception e) {
logger.log(Level.SEVERE, "Compaction failed due to unknown exception", e);
return null;
}
// get the raw compressed bytes
ByteBuffer rawBytes = writer.getRawBytes();
// limit how much data needs to be read from the buffer
rawBytes.limit(rawBytes.position());
// convert buffer length request to size of 2
int size = rawBytes.limit() + 1;
if (size % 2 != 0) {
size++;
}
rawBytes.rewind();
// create buffer in measurement
BufferObject newBuf = measurement.getMalloc().createNewBuffer(fieldId, tsBucket, size);
logger.fine("Compacted buffer size:" + size + " vs " + total);
LinkedByteString bufferId = newBuf.getBufferId();
buf = newBuf.getBuf();
writer = getWriterInstance(compactionClass);
buf.put(rawBytes);
writer.setBufferId(bufferId);
writer.configure(buf, false, START_OFFSET);
writer.makeReadOnly(false);
writeLock.lock();
if (functions != null) {
for (Consumer<List<? extends Writer>> function : functions) {
function.accept(list);
}
}
size = listSize - 1;
logger.finest("Compaction debug size differences size:" + size + " listSize:" + listSize + " curr:" + list.size());
for (int i = size; i >= 0; i--) {
compactedWriter.add(list.remove(i));
}
list.add(0, writer);
for (int i = 0; i < list.size(); i++) {
list.get(i).getRawBytes().put(1, (byte) i);
}
logger.fine("Total points:" + compactedPoints + ", original pair count:" + writer.getReader().getCount() + " compression ratio:" + rawBytes.position() + " original:" + total);
writeLock.unlock();
if (StorageEngine.ENABLE_METHOD_METRICS) {
// ctx.stop();
}
return compactedWriter;
}
use of com.srotya.sidewinder.core.storage.compression.Writer in project sidewinder by srotya.
the class Measurement method runCleanupOperation.
public default Set<String> runCleanupOperation(String operation, java.util.function.Function<Series, List<Writer>> op) throws IOException {
Set<String> cleanupList = new HashSet<>();
getLock().lock();
try {
List<Series> seriesList = getSeriesList();
Set<String> temp = new HashSet<>();
for (int i = 0; i < seriesList.size(); i++) {
Series entry = seriesList.get(i);
try {
List<Writer> list = op.apply(entry);
if (list == null) {
continue;
}
for (Writer timeSeriesBucket : list) {
if (getMetricsCleanupBufferCounter() != null) {
getMetricsCleanupBufferCounter().inc();
}
String buf = timeSeriesBucket.getBufferId().toString();
temp.add(buf);
cleanupList.add(buf);
getLogger().fine("Adding buffer to cleanup " + operation + " for bucket:" + entry.getSeriesId() + " Offset:" + timeSeriesBucket.currentOffset());
}
getLogger().fine("Buffers " + operation + " for time series:" + entry.getSeriesId());
if (i % 100 == 0) {
if (temp.size() > 0) {
getMalloc().cleanupBufferIds(temp);
temp = new HashSet<>();
}
}
} catch (Exception e) {
getLogger().log(Level.SEVERE, "Error collecing " + operation, e);
}
}
// cleanup these buffer ids
if (cleanupList.size() > 0) {
getLogger().info("For measurement:" + getMeasurementName() + " cleaned=" + cleanupList.size() + " buffers");
}
getMalloc().cleanupBufferIds(cleanupList);
} finally {
getLock().unlock();
}
return cleanupList;
}
use of com.srotya.sidewinder.core.storage.compression.Writer in project sidewinder by srotya.
the class TestByzantineValueReadWrite method testReadWriteDataPoints.
@Test
public void testReadWriteDataPoints() throws IOException {
ByteBuffer buf = ByteBuffer.allocateDirect(1024);
Writer writer = new ByzantineValueWriter();
writer.configure(buf, true, startOffset);
for (long i = 0; i < 100; i++) {
writer.add(i);
}
Reader reader = writer.getReader();
for (int i = 0; i < 100; i++) {
assertEquals(i, reader.read());
}
for (long i = 0; i < 100; i++) {
writer.write(i);
}
reader = writer.getReader();
for (int i = 0; i < 200; i++) {
assertEquals(i % 100, reader.read());
}
System.out.println("Compression Ratio:" + writer.getCompressionRatio());
}
use of com.srotya.sidewinder.core.storage.compression.Writer in project sidewinder by srotya.
the class WriterServiceImpl method writeSeriesPoint.
@Override
public void writeSeriesPoint(RawTimeSeriesBucket request, StreamObserver<Ack> responseObserver) {
Ack ack;
try {
TimeSeries series = engine.getOrCreateTimeSeries(request.getDbName(), request.getMeasurementName(), request.getValueFieldName(), new ArrayList<>(request.getTagsList()), request.getBucketSize(), request.getFp());
for (Bucket bucket : request.getBucketsList()) {
Writer writer = series.getOrCreateSeriesBucket(TimeUnit.MILLISECONDS, bucket.getHeaderTimestamp());
writer.configure(conf, null, false, 1, true);
writer.setCounter(bucket.getCount());
writer.bootstrap(bucket.getData().asReadOnlyByteBuffer());
}
ack = Ack.newBuilder().setMessageId(request.getMessageId()).setResponseCode(200).build();
} catch (Exception e) {
ack = Ack.newBuilder().setMessageId(request.getMessageId()).setResponseCode(500).build();
}
responseObserver.onNext(ack);
responseObserver.onCompleted();
}
Aggregations