use of com.srotya.sidewinder.core.storage.compression.Writer in project sidewinder by srotya.
the class TestTimeSeries method testReplaceSeries.
// @Test
// public void testCompactionGzip() throws IOException {
// DBMetadata metadata = new DBMetadata(28);
// MockMeasurement measurement = new MockMeasurement(1024);
// HashMap<String, String> conf = new HashMap<>();
// conf.put("default.bucket.size", "409600");
// conf.put("compaction.enabled", "true");
// conf.put("use.query.pool", "false");
// conf.put("compaction.ratio", "1.1");
// conf.put("zip.block.size", "8");
//
// final TimeSeries series = new TimeSeries(measurement, "byzantine", "bzip",
// "asdasasd", 409600, metadata, true,
// conf);
// final long curr = 1497720652566L;
//
// String valueFieldName = "value";
//
// for (int i = 1; i <= 10000; i++) {
// series.addDataPoint(TimeUnit.MILLISECONDS, curr + i * 1000, i * 1.1);
// }
//
// SortedMap<String, List<Writer>> bucketRawMap = series.getBucketRawMap();
// assertEquals(1, bucketRawMap.size());
// int size = bucketRawMap.values().iterator().next().size();
// assertTrue(series.getCompactionSet().size() < size);
// assertTrue(size > 2);
// series.compact();
// List<DataPoint> dataPoints = series.queryDataPoints(valueFieldName, curr -
// 1000, curr + 10000 * 1000 + 1, null);
// bucketRawMap = series.getBucketRawMap();
// assertEquals(2, bucketRawMap.values().iterator().next().size());
// int count = 0;
// for (List<Writer> list : bucketRawMap.values()) {
// for (Writer writer : list) {
// Reader reader = writer.getReader();
// count += reader.getPairCount();
// }
// }
// assertEquals(10000, count);
// assertEquals(10000, dataPoints.size());
// for (int i = 1; i <= 10000; i++) {
// DataPoint dp = dataPoints.get(i - 1);
// assertEquals("Bad ts:" + i, curr + i * 1000, dp.getTimestamp());
// assertEquals(dp.getValue(), i * 1.1, 0.001);
// }
// }
@Test
public void testReplaceSeries() throws IOException {
DBMetadata metadata = new DBMetadata(28);
MockMeasurement measurement = new MockMeasurement(1024);
HashMap<String, String> conf = new HashMap<>();
conf.put("default.bucket.size", "409600");
conf.put("compaction.enabled", "true");
conf.put("use.query.pool", "false");
final TimeSeries series = new TimeSeries(measurement, "byzantine", "gorilla", "asdasasd", 409600, metadata, true, conf);
final long curr = 1497720652566L;
String valueFieldName = "value";
for (int i = 1; i <= 10000; i++) {
series.addDataPoint(TimeUnit.MILLISECONDS, curr + i * 1000, i * 1.1);
}
SortedMap<String, List<Writer>> bucketRawMap = series.getBucketRawMap();
int size = bucketRawMap.values().iterator().next().size();
assertTrue(series.getCompactionSet().size() < size);
assertTrue(size > 2);
List<Writer> compact = series.compact();
assertTrue(compact.size() > 0);
assertTrue(bucketRawMap.values().iterator().next().size() == 2);
List<Writer> next = bucketRawMap.values().iterator().next();
Writer writer = next.get(0);
ByteBuffer buf = writer.getRawBytes();
buf.rewind();
int limit = buf.limit();
ByteBuffer allocate = ByteBuffer.allocate(limit);
allocate.put(buf);
allocate.rewind();
byte[] array = allocate.array();
assertEquals(buf.limit(), array.length);
MemMalloc allocator = measurement.getAllocator();
List<Entry<Long, byte[]>> list = new ArrayList<>();
list.add(new AbstractMap.SimpleEntry<Long, byte[]>(writer.getHeaderTimestamp(), array));
try {
series.replaceFirstBuckets(bucketRawMap.firstKey(), list);
} catch (InstantiationException | IllegalAccessException e) {
e.printStackTrace();
fail(e.getMessage());
}
assertEquals(1, allocator.getCleanupCallback().size());
List<DataPoint> dataPoints = series.queryDataPoints(valueFieldName, curr - 1000, curr + 10000 * 1000 + 1, null);
for (int i = 1; i <= 10000; i++) {
DataPoint dp = dataPoints.get(i - 1);
assertEquals("Bad ts:" + i, curr + i * 1000, dp.getTimestamp());
assertEquals(dp.getValue(), i * 1.1, 0.001);
}
}
use of com.srotya.sidewinder.core.storage.compression.Writer in project sidewinder by srotya.
the class TimeSeries method loadBucketMap.
/**
* Function to check and recover existing bucket map, if one exists.
*
* @param bufferEntries
* @throws IOException
*/
public void loadBucketMap(List<Entry<String, BufferObject>> bufferEntries) throws IOException {
Map<String, String> cacheConf = new HashMap<>(conf);
logger.fine(() -> "Scanning buffer for:" + seriesId);
for (Entry<String, BufferObject> entry : bufferEntries) {
ByteBuffer duplicate = entry.getValue().getBuf();
duplicate.rewind();
// String series = getStringFromBuffer(duplicate);
// if (!series.equalsIgnoreCase(seriesId)) {
// continue;
// }
String tsBucket = entry.getKey();
List<Writer> list = bucketMap.get(tsBucket);
if (list == null) {
list = Collections.synchronizedList(new ArrayList<>());
bucketMap.put(tsBucket, list);
}
ByteBuffer slice = duplicate.slice();
int codecId = (int) slice.get();
// int listIndex = (int) slice.get();
Class<Writer> classById = CompressionFactory.getClassById(codecId);
Writer writer = getWriterInstance(classById);
if (entry.getValue().getBufferId() == null) {
throw new IOException("Buffer id can't be read:" + measurement.getDbName() + ":" + measurement.getMeasurementName() + " series:" + getSeriesId());
}
logger.fine(() -> "Loading bucketmap:" + seriesId + "\t" + tsBucket + " bufferid:" + entry.getValue().getBufferId());
writer.setBufferId(entry.getValue().getBufferId());
writer.configure(cacheConf, slice, false, START_OFFSET, true);
list.add(writer);
bucketCount++;
logger.fine(() -> "Loaded bucketmap:" + seriesId + "\t" + tsBucket + " bufferid:" + entry.getValue().getBufferId());
}
sortBucketMap();
}
use of com.srotya.sidewinder.core.storage.compression.Writer in project sidewinder by srotya.
the class TimeSeries method queryDataPoints.
/**
* Extract {@link DataPoint}s for the supplied time range and value predicate.
*
* Each {@link DataPoint} has the appendFieldValue and appendTags set in it.
*
* @param appendFieldValueName
* fieldname to append to each datapoint
* @param startTime
* time range beginning
* @param endTime
* time range end
* @param valuePredicate
* pushed down filter for values
* @return list of datapoints
* @throws IOException
*/
public List<DataPoint> queryDataPoints(String appendFieldValueName, long startTime, long endTime, Predicate valuePredicate) throws IOException {
if (startTime > endTime) {
// swap start and end times if they are off
startTime = startTime ^ endTime;
endTime = endTime ^ startTime;
startTime = startTime ^ endTime;
}
BetweenPredicate timeRangePredicate = new BetweenPredicate(startTime, endTime);
logger.fine(getSeriesId() + " " + bucketMap.size() + " " + bucketCount + " " + startTime + " " + endTime + " " + valuePredicate + " " + timeRangePredicate + " diff:" + (endTime - startTime));
SortedMap<String, List<Writer>> series = correctTimeRangeScan(startTime, endTime);
List<Reader> readers = new ArrayList<>();
for (List<Writer> writers : series.values()) {
for (Writer writer : writers) {
readers.add(getReader(writer, timeRangePredicate, valuePredicate));
}
}
List<DataPoint> points = new ArrayList<>();
for (Reader reader : readers) {
readerToDataPoints(points, reader);
}
return points;
}
use of com.srotya.sidewinder.core.storage.compression.Writer in project sidewinder by srotya.
the class TimeSeries method getBucketMap.
/**
* @return the bucketMap
*/
public SortedMap<String, Writer> getBucketMap() {
SortedMap<String, Writer> map = new TreeMap<>();
for (Entry<String, List<Writer>> entry : bucketMap.entrySet()) {
List<Writer> value = entry.getValue();
for (int i = 0; i < value.size(); i++) {
Writer bucketEntry = value.get(i);
map.put(entry.getKey() + i, bucketEntry);
}
}
return map;
}
use of com.srotya.sidewinder.core.storage.compression.Writer in project sidewinder by srotya.
the class Measurement method runCleanupOperation.
public default Set<String> runCleanupOperation(String operation, java.util.function.Function<TimeSeries, List<Writer>> op) throws IOException {
Set<String> cleanupList = new HashSet<>();
getLock().lock();
try {
for (TimeSeries entry : getTimeSeries()) {
try {
List<Writer> list = op.apply(entry);
if (list == null) {
continue;
}
for (Writer timeSeriesBucket : list) {
cleanupList.add(timeSeriesBucket.getBufferId());
getLogger().fine("Adding buffer to cleanup " + operation + " for bucket:" + entry.getSeriesId() + " Offset:" + timeSeriesBucket.currentOffset());
}
getLogger().fine("Buffers " + operation + " for time series:" + entry.getSeriesId());
} catch (Exception e) {
getLogger().log(Level.SEVERE, "Error collecing " + operation, e);
}
}
// cleanup these buffer ids
if (cleanupList.size() > 0) {
getLogger().info("For measurement:" + getMeasurementName() + " cleaned=" + cleanupList.size() + " buffers");
}
getMalloc().cleanupBufferIds(cleanupList);
} finally {
getLock().unlock();
}
return cleanupList;
}
Aggregations