use of com.srotya.sidewinder.core.storage.compression.Reader in project sidewinder by srotya.
the class TestGorillaCompression method testRecovery.
@Test
public void testRecovery() throws IOException {
ByteBuffer buf = ByteBuffer.allocate(1024);
GorillaWriter writer = new GorillaWriter();
writer.configure(new HashMap<>(), buf, true, 0, false);
long ts = System.currentTimeMillis();
writer.setHeaderTimestamp(ts);
for (int i = 0; i < 100; i++) {
writer.addValue(ts + i * 100, i * 1.1);
}
writer.makeReadOnly();
ByteBuffer rawBytes = writer.getRawBytes();
writer = new GorillaWriter();
writer.configure(new HashMap<>(), rawBytes, false, 0, false);
Reader reader = writer.getReader();
assertEquals(100, reader.getPairCount());
for (int i = 0; i < 100; i++) {
DataPoint pair = reader.readPair();
assertEquals(ts + i * 100, pair.getTimestamp());
assertEquals(i * 1.1, Double.longBitsToDouble(pair.getLongValue()), 0.01);
}
}
use of com.srotya.sidewinder.core.storage.compression.Reader in project sidewinder by srotya.
the class TestPersistentMeasurement method testMeasurementRecovery.
@Test
public void testMeasurementRecovery() throws IOException {
MiscUtils.delete(new File("target/db141/"));
PersistentMeasurement m = new PersistentMeasurement();
m.configure(conf, engine, DBNAME, "m1", "target/db141/index", "target/db141/data", metadata, bgTaskPool);
TimeSeries ts = m.getOrCreateTimeSeries("vf1", Arrays.asList("t=1", "t=2"), 4096, false, conf);
long t = System.currentTimeMillis();
for (int i = 0; i < 100; i++) {
ts.addDataPoint(TimeUnit.MILLISECONDS, t + i * 1000, i);
}
List<DataPoint> dps = ts.queryDataPoints("vf1", t, t + 1000 * 100, null);
assertEquals(100, dps.size());
for (int i = 0; i < 100; i++) {
DataPoint dp = dps.get(i);
assertEquals(t + i * 1000, dp.getTimestamp());
assertEquals(i, dp.getLongValue());
}
List<Series> resultMap = new ArrayList<>();
m.queryDataPoints("vf1", t, t + 1000 * 100, null, null, resultMap);
assertEquals(1, resultMap.size());
Series next = resultMap.iterator().next();
for (int i = 0; i < next.getDataPoints().size(); i++) {
DataPoint dp = next.getDataPoints().get(i);
assertEquals(t + i * 1000, dp.getTimestamp());
assertEquals(i, dp.getLongValue());
}
LinkedHashMap<Reader, Boolean> readers = new LinkedHashMap<>();
m.queryReaders("vf1", t, t + 1000 * 100, readers);
for (Reader reader : readers.keySet()) {
assertEquals(100, reader.getPairCount());
}
m.close();
}
use of com.srotya.sidewinder.core.storage.compression.Reader in project sidewinder by srotya.
the class TestTimeSeries method testCompaction.
@Test
public void testCompaction() throws IOException {
DBMetadata metadata = new DBMetadata(28);
MockMeasurement measurement = new MockMeasurement(1024);
HashMap<String, String> conf = new HashMap<>();
conf.put("default.bucket.size", "409600");
conf.put("compaction.enabled", "true");
conf.put("use.query.pool", "false");
conf.put("compaction.ratio", "1.1");
final TimeSeries series = new TimeSeries(measurement, "byzantine", "byzantine", "asdasasd", 409600, metadata, true, conf);
final long curr = 1497720652566L;
String valueFieldName = "value";
for (int i = 1; i <= 10000; i++) {
series.addDataPoint(TimeUnit.MILLISECONDS, curr + i * 1000, i * 1.1);
}
long ts = System.nanoTime();
List<DataPoint> dataPoints = series.queryDataPoints(valueFieldName, curr - 1000, curr + 10000 * 1000 + 1, null);
ts = System.nanoTime() - ts;
System.out.println("Before compaction:" + ts / 1000 + "us");
assertEquals(10000, dataPoints.size());
for (int i = 1; i <= 10000; i++) {
DataPoint dp = dataPoints.get(i - 1);
assertEquals("Bad ts:" + i, curr + i * 1000, dp.getTimestamp());
assertEquals(dp.getValue(), i * 1.1, 0.001);
}
SortedMap<String, List<Writer>> bucketRawMap = series.getBucketRawMap();
assertEquals(1, bucketRawMap.size());
int size = bucketRawMap.values().iterator().next().size();
assertTrue(series.getCompactionSet().size() < size);
assertTrue(size > 2);
series.compact();
ts = System.nanoTime();
dataPoints = series.queryDataPoints(valueFieldName, curr - 1000, curr + 10000 * 1000 + 1, null);
ts = System.nanoTime() - ts;
System.out.println("After compaction:" + ts / 1000 + "us");
bucketRawMap = series.getBucketRawMap();
assertEquals(2, bucketRawMap.values().iterator().next().size());
int count = 0;
for (List<Writer> list : bucketRawMap.values()) {
for (Writer writer : list) {
Reader reader = writer.getReader();
count += reader.getPairCount();
}
}
assertEquals(10000, count);
assertEquals(10000, dataPoints.size());
for (int i = 1; i <= 10000; i++) {
DataPoint dp = dataPoints.get(i - 1);
assertEquals("Bad ts:" + i, curr + i * 1000, dp.getTimestamp());
assertEquals(dp.getValue(), i * 1.1, 0.001);
}
}
use of com.srotya.sidewinder.core.storage.compression.Reader in project sidewinder by srotya.
the class TimeSeries method compact.
/**
* Compacts old Writers into one for every single time bucket, this insures the
* buffers are compacted as well as provides an opportunity to use a higher
* compression rate algorithm for the bucket. All Writers but the last are
* read-only therefore performing operations on them does not impact.
*
* @param functions
* @return returns null if nothing to compact or empty list if all compaction
* attempts fail
* @throws IOException
*/
@SafeVarargs
public final List<Writer> compact(Consumer<List<Writer>>... functions) throws IOException {
// size check is to avoid unnecessary calls and exit fast
if (compactionCandidateSet.isEmpty()) {
return null;
}
List<Writer> compactedWriter = new ArrayList<>();
Iterator<Entry<String, List<Writer>>> iterator = compactionCandidateSet.entrySet().iterator();
int id = CompressionFactory.getIdByClass(compactionClass);
while (iterator.hasNext()) {
// entry.getKey() gives tsBucket string
Entry<String, List<Writer>> entry = iterator.next();
// remove this entry from compaction set
iterator.remove();
List<Writer> list = entry.getValue();
int listSize = list.size() - 1;
int pointCount = list.subList(0, listSize).stream().mapToInt(s -> s.getCount()).sum();
int total = list.subList(0, listSize).stream().mapToInt(s -> s.getPosition()).sum();
if (total == 0) {
logger.warning("Ignoring bucket for compaction, not enough bytes. THIS BUG SHOULD BE INVESTIGATED");
continue;
}
Writer writer = getWriterInstance(compactionClass);
int compactedPoints = 0;
double bufSize = total * compactionRatio;
logger.finer("Allocating buffer:" + total + " Vs. " + pointCount * 16 + " max compacted buffer:" + bufSize);
logger.finer("Getting sublist from:" + 0 + " to:" + (list.size() - 1));
ByteBuffer buf = ByteBuffer.allocate((int) bufSize);
buf.put((byte) id);
// since this buffer will be the first one
buf.put(1, (byte) 0);
writer.configure(conf, buf, true, START_OFFSET, false);
Writer input = list.get(0);
// read the header timestamp
long timestamp = input.getHeaderTimestamp();
writer.setHeaderTimestamp(timestamp);
// read all but the last writer and insert into new temp writer
try {
for (int i = 0; i < list.size() - 1; i++) {
input = list.get(i);
Reader reader = input.getReader();
for (int k = 0; k < reader.getPairCount(); k++) {
long[] pair = reader.read();
writer.addValue(pair[0], pair[1]);
compactedPoints++;
}
}
writer.makeReadOnly();
} catch (RollOverException e) {
logger.warning("Buffer filled up; bad compression ratio; not compacting");
continue;
} catch (Exception e) {
logger.log(Level.SEVERE, "Compaction failed due to unknown exception", e);
}
// get the raw compressed bytes
ByteBuffer rawBytes = writer.getRawBytes();
// limit how much data needs to be read from the buffer
rawBytes.limit(rawBytes.position());
// convert buffer length request to size of 2
int size = rawBytes.limit() + 1;
if (size % 2 != 0) {
size++;
}
rawBytes.rewind();
// create buffer in measurement
BufferObject newBuf = measurement.getMalloc().createNewBuffer(seriesId, entry.getKey(), size);
logger.fine("Compacted buffer size:" + size + " vs " + total);
String bufferId = newBuf.getBufferId();
buf = newBuf.getBuf();
writer = getWriterInstance(compactionClass);
buf.put(rawBytes);
writer.setBufferId(bufferId);
writer.configure(conf, buf, false, START_OFFSET, false);
writer.makeReadOnly();
synchronized (list) {
if (functions != null) {
for (Consumer<List<Writer>> function : functions) {
function.accept(list);
}
}
size = listSize - 1;
logger.finest("Compaction debug size differences size:" + size + " listSize:" + listSize + " curr:" + list.size());
for (int i = size; i >= 0; i--) {
compactedWriter.add(list.remove(i));
}
list.add(0, writer);
for (int i = 0; i < list.size(); i++) {
list.get(i).getRawBytes().put(1, (byte) i);
}
// fix bucket count
bucketCount -= size;
logger.fine("Total points:" + compactedPoints + ", original pair count:" + writer.getReader().getPairCount() + " compression ratio:" + rawBytes.position() + " original:" + total);
}
}
return compactedWriter;
}
use of com.srotya.sidewinder.core.storage.compression.Reader in project sidewinder by srotya.
the class TimeSeries method queryPoints.
public List<long[]> queryPoints(String appendFieldValueName, List<String> appendTags, long startTime, long endTime, Predicate valuePredicate) throws IOException {
if (startTime > endTime) {
// swap start and end times if they are off
startTime = startTime ^ endTime;
endTime = endTime ^ startTime;
startTime = startTime ^ endTime;
}
BetweenPredicate timeRangePredicate = new BetweenPredicate(startTime, endTime);
SortedMap<String, List<Writer>> series = correctTimeRangeScan(startTime, endTime);
List<Reader> readers = new ArrayList<>();
for (List<Writer> writers : series.values()) {
for (Writer writer : writers) {
readers.add(getReader(writer, timeRangePredicate, valuePredicate));
}
}
List<long[]> points = new ArrayList<>();
for (Reader reader : readers) {
readerToPoints(points, reader);
}
return points;
}
Aggregations