use of com.srotya.sidewinder.core.storage.Series in project sidewinder by srotya.
the class TestDiskStorageEngine method testSeriesBucketLookups.
@Test
public void testSeriesBucketLookups() throws IOException, ItemNotFoundException {
MiscUtils.delete(new File("targer/db1/"));
DiskStorageEngine engine = new DiskStorageEngine();
HashMap<String, String> map = new HashMap<>();
map.put("metadata.dir", "target/db1/mdq");
map.put("index.dir", "target/db1/index");
map.put("data.dir", "target/db1/data");
map.put(StorageEngine.PERSISTENCE_DISK, "true");
engine.configure(map, bgTasks);
engine.connect();
String dbName = "test1";
String measurementName = "cpu";
List<String> tags = Arrays.asList("test=1");
long ts = 1483923600000L;
System.out.println("Base timestamp=" + new Date(ts));
for (int i = 0; i < 100; i++) {
engine.writeDataPoint(MiscUtils.buildDataPoint(dbName, measurementName, "value", tags, ts + (i * 60000), 2.2));
}
long endTs = ts + 99 * 60000;
// validate all points are returned with a full range query
List<Series> points = engine.queryDataPoints(dbName, measurementName, "value", ts, endTs, new SimpleTagFilter(FilterType.EQUALS, "test", "1"));
assertEquals(ts, points.iterator().next().getDataPoints().get(0).getTimestamp());
assertEquals(endTs, points.iterator().next().getDataPoints().get(points.iterator().next().getDataPoints().size() - 1).getTimestamp());
// validate ts-1 yields the same result
points = engine.queryDataPoints(dbName, measurementName, "value", ts - 1, endTs, new SimpleTagFilter(FilterType.EQUALS, "test", "1"));
assertEquals(ts, points.iterator().next().getDataPoints().get(0).getTimestamp());
System.out.println("Value count:" + points.iterator().next().getDataPoints().size());
assertEquals(endTs, points.iterator().next().getDataPoints().get(points.iterator().next().getDataPoints().size() - 1).getTimestamp());
// validate ts+1 yields correct result
points = engine.queryDataPoints(dbName, measurementName, "value", ts + 1, endTs, new SimpleTagFilter(FilterType.EQUALS, "test", "1"));
assertEquals(ts + 60000, points.iterator().next().getDataPoints().get(0).getTimestamp());
assertEquals(endTs, points.iterator().next().getDataPoints().get(points.iterator().next().getDataPoints().size() - 1).getTimestamp());
// validate that points have been written to 2 different buckets
assertTrue(TimeUtils.getTimeBucket(TimeUnit.MILLISECONDS, ts, 4096) != TimeUtils.getTimeBucket(TimeUnit.MILLISECONDS, endTs, 4096));
// calculate base timestamp for the second bucket
long baseTs2 = ((long) TimeUtils.getTimeBucket(TimeUnit.MILLISECONDS, endTs, 4096)) * 1000;
System.out.println("Bucket2 base timestamp=" + new Date(baseTs2));
// validate random seek with deliberate time offset
points = engine.queryDataPoints(dbName, measurementName, "value", ts, baseTs2, new SimpleTagFilter(FilterType.EQUALS, "test", "1"));
assertEquals("Invalid first entry:" + new Date(points.iterator().next().getDataPoints().get(0).getTimestamp()), ts, points.iterator().next().getDataPoints().get(0).getTimestamp());
assertEquals("Invalid first entry:" + (baseTs2 - ts), (baseTs2 / 60000) * 60000, points.iterator().next().getDataPoints().get(points.iterator().next().getDataPoints().size() - 1).getTimestamp());
points = engine.queryDataPoints(dbName, measurementName, "value", baseTs2, endTs, new SimpleTagFilter(FilterType.EQUALS, "test", "1"));
assertEquals("Invalid first entry:" + new Date(points.iterator().next().getDataPoints().get(0).getTimestamp()), (baseTs2 - ts), (baseTs2 / 60000) * 60000, points.iterator().next().getDataPoints().get(0).getTimestamp());
assertEquals("Invalid first entry:" + endTs, endTs, points.iterator().next().getDataPoints().get(points.iterator().next().getDataPoints().size() - 1).getTimestamp());
// validate correct results when time range is incorrectly swapped i.e.
// end time is smaller than start time
points = engine.queryDataPoints(dbName, measurementName, "value", endTs - 1, baseTs2, new SimpleTagFilter(FilterType.EQUALS, "test", "1"));
assertEquals("Invalid first entry:" + new Date(points.iterator().next().getDataPoints().get(0).getTimestamp()), (baseTs2 - ts), (baseTs2 / 60000) * 60000, points.iterator().next().getDataPoints().get(0).getTimestamp());
assertEquals("Invalid first entry:" + endTs, endTs - 60000, points.iterator().next().getDataPoints().get(points.iterator().next().getDataPoints().size() - 1).getTimestamp());
engine.disconnect();
}
use of com.srotya.sidewinder.core.storage.Series in project sidewinder by srotya.
the class TestPersistentMeasurement method testDataPointsRecoveryPTR.
@Test
public void testDataPointsRecoveryPTR() throws Exception {
long ts = System.currentTimeMillis();
MiscUtils.delete(new File("target/db290/"));
List<String> tags = Arrays.asList("test=1", "test=2");
PersistentMeasurement m = new PersistentMeasurement();
Map<String, String> map = new HashMap<>();
map.put("disk.compression.class", ByzantineWriter.class.getName());
map.put("malloc.file.max", String.valueOf(2 * 1024 * 1024));
map.put("malloc.ptrfile.increment", String.valueOf(2 * 1024));
m.configure(map, null, DBNAME, "m1", "target/db290/index", "target/db290/data", metadata, bgTaskPool);
int LIMIT = 100;
for (int i = 0; i < LIMIT; i++) {
TimeSeries t = m.getOrCreateTimeSeries("value" + i, tags, 4096, false, map);
t.addDataPoint(TimeUnit.MILLISECONDS, ts, 1L);
}
m.close();
m = new PersistentMeasurement();
m.configure(map, null, DBNAME, "m1", "target/db290/index", "target/db290/data", metadata, bgTaskPool);
List<Series> resultMap = new ArrayList<>();
m.queryDataPoints("value.*", ts, ts + 1000, null, null, resultMap);
assertEquals(LIMIT, resultMap.size());
m.close();
}
use of com.srotya.sidewinder.core.storage.Series in project sidewinder by srotya.
the class TestPersistentMeasurement method testDataPointsRecovery.
@Test
public void testDataPointsRecovery() throws Exception {
long ts = System.currentTimeMillis();
MiscUtils.delete(new File("target/db132/"));
List<String> tags = Arrays.asList("test=1", "test=2");
PersistentMeasurement m = new PersistentMeasurement();
Map<String, String> map = new HashMap<>();
map.put("disk.compression.class", ByzantineWriter.class.getName());
map.put("malloc.file.max", String.valueOf(1024 * 1024));
try {
m.configure(map, null, DBNAME, "m1", "target/db132/index", "target/db132/data", metadata, bgTaskPool);
fail("Must throw invalid file max size exception");
} catch (Exception e) {
}
map.put("malloc.file.max", String.valueOf(2 * 1024 * 1024));
m.configure(map, null, DBNAME, "m1", "target/db132/index", "target/db132/data", metadata, bgTaskPool);
int LIMIT = 100000;
for (int i = 0; i < LIMIT; i++) {
TimeSeries t = m.getOrCreateTimeSeries("value", tags, 4096, false, map);
t.addDataPoint(TimeUnit.MILLISECONDS, ts + i * 1000, 1L);
}
m.close();
m = new PersistentMeasurement();
m.configure(map, null, DBNAME, "m1", "target/db132/index", "target/db132/data", metadata, bgTaskPool);
List<Series> resultMap = new ArrayList<>();
m.queryDataPoints("value", ts, ts + 1000 * LIMIT, null, null, resultMap);
Iterator<Series> iterator = resultMap.iterator();
assertEquals(LIMIT, iterator.next().getDataPoints().size());
m.close();
}
use of com.srotya.sidewinder.core.storage.Series in project sidewinder by srotya.
the class TestPersistentMeasurement method testCompaction.
@Test
public void testCompaction() throws IOException {
final long ts = 1484788896586L;
MiscUtils.delete(new File("target/db45/"));
List<String> tags = Arrays.asList("test=1", "test=2");
PersistentMeasurement m = new PersistentMeasurement();
Map<String, String> map = new HashMap<>();
map.put("disk.compression.class", ByzantineWriter.class.getName());
map.put("malloc.file.max", String.valueOf(2 * 1024 * 1024));
map.put("malloc.ptrfile.increment", String.valueOf(1024));
map.put("compaction.ratio", "1.2");
map.put("compaction.enabled", "true");
m.configure(map, null, DBNAME, "m1", "target/db45/index", "target/db45/data", metadata, bgTaskPool);
int LIMIT = 7000;
for (int i = 0; i < LIMIT; i++) {
TimeSeries t = m.getOrCreateTimeSeries("value1", tags, 1024, false, map);
t.addDataPoint(TimeUnit.MILLISECONDS, ts + i, i * 1.2);
}
assertEquals(1, m.getTimeSeries().size());
TimeSeries series = m.getTimeSeries().iterator().next();
assertEquals(1, series.getBucketRawMap().size());
assertEquals(3, series.getBucketCount());
assertEquals(3, series.getBucketRawMap().entrySet().iterator().next().getValue().size());
assertEquals(1, series.getCompactionSet().size());
int maxDp = series.getBucketRawMap().values().stream().flatMap(v -> v.stream()).mapToInt(l -> l.getCount()).max().getAsInt();
// check and read datapoint count before
List<DataPoint> queryDataPoints = series.queryDataPoints("", ts, ts + LIMIT + 1, null);
assertEquals(LIMIT, queryDataPoints.size());
for (int i = 0; i < LIMIT; i++) {
DataPoint dp = queryDataPoints.get(i);
assertEquals(ts + i, dp.getTimestamp());
assertEquals(i * 1.2, dp.getValue(), 0.01);
}
m.compact();
assertEquals(2, series.getBucketCount());
assertEquals(2, series.getBucketRawMap().entrySet().iterator().next().getValue().size());
assertEquals(0, series.getCompactionSet().size());
assertTrue(maxDp <= series.getBucketRawMap().values().stream().flatMap(v -> v.stream()).mapToInt(l -> l.getCount()).max().getAsInt());
// validate query after compaction
queryDataPoints = series.queryDataPoints("", ts, ts + LIMIT + 1, null);
assertEquals(LIMIT, queryDataPoints.size());
for (int i = 0; i < LIMIT; i++) {
DataPoint dp = queryDataPoints.get(i);
assertEquals(ts + i, dp.getTimestamp());
assertEquals(i * 1.2, dp.getValue(), 0.01);
}
// test buffer recovery after compaction, validate count
m = new PersistentMeasurement();
m.configure(map, null, DBNAME, "m1", "target/db45/index", "target/db45/data", metadata, bgTaskPool);
series = m.getTimeSeries().iterator().next();
queryDataPoints = series.queryDataPoints("", ts, ts + LIMIT + 1, null);
assertEquals(LIMIT, queryDataPoints.size());
for (int i = 0; i < LIMIT; i++) {
DataPoint dp = queryDataPoints.get(i);
assertEquals(ts + i, dp.getTimestamp());
assertEquals(i * 1.2, dp.getValue(), 0.01);
}
for (int i = 0; i < LIMIT; i++) {
TimeSeries t = m.getOrCreateTimeSeries("value1", tags, 1024, false, map);
t.addDataPoint(TimeUnit.MILLISECONDS, LIMIT + ts + i, i * 1.2);
}
series.getBucketRawMap().entrySet().iterator().next().getValue().stream().map(v -> "" + v.getCount() + ":" + v.isReadOnly() + ":" + (int) v.getRawBytes().get(1)).forEach(System.out::println);
// test recovery again
m = new PersistentMeasurement();
m.configure(map, null, DBNAME, "m1", "target/db45/index", "target/db45/data", metadata, bgTaskPool);
series = m.getTimeSeries().iterator().next();
queryDataPoints = series.queryDataPoints("", ts - 1, ts + 2 + (LIMIT * 2), null);
assertEquals(LIMIT * 2, queryDataPoints.size());
for (int i = 0; i < LIMIT * 2; i++) {
DataPoint dp = queryDataPoints.get(i);
assertEquals("Error:" + i + " " + (dp.getTimestamp() - ts - i), ts + i, dp.getTimestamp());
}
}
use of com.srotya.sidewinder.core.storage.Series in project sidewinder by srotya.
the class TestMemStorageEngine method testConfigureTimeBuckets.
@Test
public void testConfigureTimeBuckets() throws ItemNotFoundException, IOException {
StorageEngine engine = new MemStorageEngine();
HashMap<String, String> conf = new HashMap<>();
long ts = System.currentTimeMillis();
conf.put(StorageEngine.DEFAULT_BUCKET_SIZE, String.valueOf(4096 * 10));
try {
engine.configure(conf, bgTasks);
} catch (IOException e) {
fail("No IOException should be thrown");
}
try {
for (int i = 0; i < 10; i++) {
engine.writeDataPoint(MiscUtils.buildDataPoint("test", "ss", "value", Arrays.asList("te=2"), ts + (i * 4096 * 1000), 2.2));
}
} catch (Exception e) {
e.printStackTrace();
fail("Engine is initialized, no IO Exception should be thrown:" + e.getMessage());
}
List<Series> queryDataPoints = engine.queryDataPoints("test", "ss", "value", ts, ts + (4096 * 100 * 1000) + 1, null);
assertTrue(queryDataPoints.size() >= 1);
}
Aggregations