Search in sources :

Example 36 with Series

use of com.srotya.sidewinder.core.storage.Series in project sidewinder by srotya.

the class TestDiskStorageEngine method testSeriesBucketLookups.

@Test
public void testSeriesBucketLookups() throws IOException, ItemNotFoundException {
    MiscUtils.delete(new File("targer/db1/"));
    DiskStorageEngine engine = new DiskStorageEngine();
    HashMap<String, String> map = new HashMap<>();
    map.put("metadata.dir", "target/db1/mdq");
    map.put("index.dir", "target/db1/index");
    map.put("data.dir", "target/db1/data");
    map.put(StorageEngine.PERSISTENCE_DISK, "true");
    engine.configure(map, bgTasks);
    engine.connect();
    String dbName = "test1";
    String measurementName = "cpu";
    List<String> tags = Arrays.asList("test=1");
    long ts = 1483923600000L;
    System.out.println("Base timestamp=" + new Date(ts));
    for (int i = 0; i < 100; i++) {
        engine.writeDataPoint(MiscUtils.buildDataPoint(dbName, measurementName, "value", tags, ts + (i * 60000), 2.2));
    }
    long endTs = ts + 99 * 60000;
    // validate all points are returned with a full range query
    List<Series> points = engine.queryDataPoints(dbName, measurementName, "value", ts, endTs, new SimpleTagFilter(FilterType.EQUALS, "test", "1"));
    assertEquals(ts, points.iterator().next().getDataPoints().get(0).getTimestamp());
    assertEquals(endTs, points.iterator().next().getDataPoints().get(points.iterator().next().getDataPoints().size() - 1).getTimestamp());
    // validate ts-1 yields the same result
    points = engine.queryDataPoints(dbName, measurementName, "value", ts - 1, endTs, new SimpleTagFilter(FilterType.EQUALS, "test", "1"));
    assertEquals(ts, points.iterator().next().getDataPoints().get(0).getTimestamp());
    System.out.println("Value count:" + points.iterator().next().getDataPoints().size());
    assertEquals(endTs, points.iterator().next().getDataPoints().get(points.iterator().next().getDataPoints().size() - 1).getTimestamp());
    // validate ts+1 yields correct result
    points = engine.queryDataPoints(dbName, measurementName, "value", ts + 1, endTs, new SimpleTagFilter(FilterType.EQUALS, "test", "1"));
    assertEquals(ts + 60000, points.iterator().next().getDataPoints().get(0).getTimestamp());
    assertEquals(endTs, points.iterator().next().getDataPoints().get(points.iterator().next().getDataPoints().size() - 1).getTimestamp());
    // validate that points have been written to 2 different buckets
    assertTrue(TimeUtils.getTimeBucket(TimeUnit.MILLISECONDS, ts, 4096) != TimeUtils.getTimeBucket(TimeUnit.MILLISECONDS, endTs, 4096));
    // calculate base timestamp for the second bucket
    long baseTs2 = ((long) TimeUtils.getTimeBucket(TimeUnit.MILLISECONDS, endTs, 4096)) * 1000;
    System.out.println("Bucket2 base timestamp=" + new Date(baseTs2));
    // validate random seek with deliberate time offset
    points = engine.queryDataPoints(dbName, measurementName, "value", ts, baseTs2, new SimpleTagFilter(FilterType.EQUALS, "test", "1"));
    assertEquals("Invalid first entry:" + new Date(points.iterator().next().getDataPoints().get(0).getTimestamp()), ts, points.iterator().next().getDataPoints().get(0).getTimestamp());
    assertEquals("Invalid first entry:" + (baseTs2 - ts), (baseTs2 / 60000) * 60000, points.iterator().next().getDataPoints().get(points.iterator().next().getDataPoints().size() - 1).getTimestamp());
    points = engine.queryDataPoints(dbName, measurementName, "value", baseTs2, endTs, new SimpleTagFilter(FilterType.EQUALS, "test", "1"));
    assertEquals("Invalid first entry:" + new Date(points.iterator().next().getDataPoints().get(0).getTimestamp()), (baseTs2 - ts), (baseTs2 / 60000) * 60000, points.iterator().next().getDataPoints().get(0).getTimestamp());
    assertEquals("Invalid first entry:" + endTs, endTs, points.iterator().next().getDataPoints().get(points.iterator().next().getDataPoints().size() - 1).getTimestamp());
    // validate correct results when time range is incorrectly swapped i.e.
    // end time is smaller than start time
    points = engine.queryDataPoints(dbName, measurementName, "value", endTs - 1, baseTs2, new SimpleTagFilter(FilterType.EQUALS, "test", "1"));
    assertEquals("Invalid first entry:" + new Date(points.iterator().next().getDataPoints().get(0).getTimestamp()), (baseTs2 - ts), (baseTs2 / 60000) * 60000, points.iterator().next().getDataPoints().get(0).getTimestamp());
    assertEquals("Invalid first entry:" + endTs, endTs - 60000, points.iterator().next().getDataPoints().get(points.iterator().next().getDataPoints().size() - 1).getTimestamp());
    engine.disconnect();
}
Also used : TimeSeries(com.srotya.sidewinder.core.storage.TimeSeries) Series(com.srotya.sidewinder.core.storage.Series) HashMap(java.util.HashMap) LinkedHashMap(java.util.LinkedHashMap) SimpleTagFilter(com.srotya.sidewinder.core.filters.SimpleTagFilter) File(java.io.File) Date(java.util.Date) DataPoint(com.srotya.sidewinder.core.storage.DataPoint) Point(com.srotya.sidewinder.core.rpc.Point) Test(org.junit.Test)

Example 37 with Series

use of com.srotya.sidewinder.core.storage.Series in project sidewinder by srotya.

the class TestPersistentMeasurement method testDataPointsRecoveryPTR.

@Test
public void testDataPointsRecoveryPTR() throws Exception {
    long ts = System.currentTimeMillis();
    MiscUtils.delete(new File("target/db290/"));
    List<String> tags = Arrays.asList("test=1", "test=2");
    PersistentMeasurement m = new PersistentMeasurement();
    Map<String, String> map = new HashMap<>();
    map.put("disk.compression.class", ByzantineWriter.class.getName());
    map.put("malloc.file.max", String.valueOf(2 * 1024 * 1024));
    map.put("malloc.ptrfile.increment", String.valueOf(2 * 1024));
    m.configure(map, null, DBNAME, "m1", "target/db290/index", "target/db290/data", metadata, bgTaskPool);
    int LIMIT = 100;
    for (int i = 0; i < LIMIT; i++) {
        TimeSeries t = m.getOrCreateTimeSeries("value" + i, tags, 4096, false, map);
        t.addDataPoint(TimeUnit.MILLISECONDS, ts, 1L);
    }
    m.close();
    m = new PersistentMeasurement();
    m.configure(map, null, DBNAME, "m1", "target/db290/index", "target/db290/data", metadata, bgTaskPool);
    List<Series> resultMap = new ArrayList<>();
    m.queryDataPoints("value.*", ts, ts + 1000, null, null, resultMap);
    assertEquals(LIMIT, resultMap.size());
    m.close();
}
Also used : TimeSeries(com.srotya.sidewinder.core.storage.TimeSeries) HashMap(java.util.HashMap) LinkedHashMap(java.util.LinkedHashMap) ByzantineWriter(com.srotya.sidewinder.core.storage.compression.byzantine.ByzantineWriter) ArrayList(java.util.ArrayList) DataPoint(com.srotya.sidewinder.core.storage.DataPoint) TimeSeries(com.srotya.sidewinder.core.storage.TimeSeries) Series(com.srotya.sidewinder.core.storage.Series) File(java.io.File) Test(org.junit.Test)

Example 38 with Series

use of com.srotya.sidewinder.core.storage.Series in project sidewinder by srotya.

the class TestPersistentMeasurement method testDataPointsRecovery.

@Test
public void testDataPointsRecovery() throws Exception {
    long ts = System.currentTimeMillis();
    MiscUtils.delete(new File("target/db132/"));
    List<String> tags = Arrays.asList("test=1", "test=2");
    PersistentMeasurement m = new PersistentMeasurement();
    Map<String, String> map = new HashMap<>();
    map.put("disk.compression.class", ByzantineWriter.class.getName());
    map.put("malloc.file.max", String.valueOf(1024 * 1024));
    try {
        m.configure(map, null, DBNAME, "m1", "target/db132/index", "target/db132/data", metadata, bgTaskPool);
        fail("Must throw invalid file max size exception");
    } catch (Exception e) {
    }
    map.put("malloc.file.max", String.valueOf(2 * 1024 * 1024));
    m.configure(map, null, DBNAME, "m1", "target/db132/index", "target/db132/data", metadata, bgTaskPool);
    int LIMIT = 100000;
    for (int i = 0; i < LIMIT; i++) {
        TimeSeries t = m.getOrCreateTimeSeries("value", tags, 4096, false, map);
        t.addDataPoint(TimeUnit.MILLISECONDS, ts + i * 1000, 1L);
    }
    m.close();
    m = new PersistentMeasurement();
    m.configure(map, null, DBNAME, "m1", "target/db132/index", "target/db132/data", metadata, bgTaskPool);
    List<Series> resultMap = new ArrayList<>();
    m.queryDataPoints("value", ts, ts + 1000 * LIMIT, null, null, resultMap);
    Iterator<Series> iterator = resultMap.iterator();
    assertEquals(LIMIT, iterator.next().getDataPoints().size());
    m.close();
}
Also used : TimeSeries(com.srotya.sidewinder.core.storage.TimeSeries) HashMap(java.util.HashMap) LinkedHashMap(java.util.LinkedHashMap) ByzantineWriter(com.srotya.sidewinder.core.storage.compression.byzantine.ByzantineWriter) ArrayList(java.util.ArrayList) IOException(java.io.IOException) DataPoint(com.srotya.sidewinder.core.storage.DataPoint) TimeSeries(com.srotya.sidewinder.core.storage.TimeSeries) Series(com.srotya.sidewinder.core.storage.Series) File(java.io.File) Test(org.junit.Test)

Example 39 with Series

use of com.srotya.sidewinder.core.storage.Series in project sidewinder by srotya.

the class TestPersistentMeasurement method testCompaction.

@Test
public void testCompaction() throws IOException {
    final long ts = 1484788896586L;
    MiscUtils.delete(new File("target/db45/"));
    List<String> tags = Arrays.asList("test=1", "test=2");
    PersistentMeasurement m = new PersistentMeasurement();
    Map<String, String> map = new HashMap<>();
    map.put("disk.compression.class", ByzantineWriter.class.getName());
    map.put("malloc.file.max", String.valueOf(2 * 1024 * 1024));
    map.put("malloc.ptrfile.increment", String.valueOf(1024));
    map.put("compaction.ratio", "1.2");
    map.put("compaction.enabled", "true");
    m.configure(map, null, DBNAME, "m1", "target/db45/index", "target/db45/data", metadata, bgTaskPool);
    int LIMIT = 7000;
    for (int i = 0; i < LIMIT; i++) {
        TimeSeries t = m.getOrCreateTimeSeries("value1", tags, 1024, false, map);
        t.addDataPoint(TimeUnit.MILLISECONDS, ts + i, i * 1.2);
    }
    assertEquals(1, m.getTimeSeries().size());
    TimeSeries series = m.getTimeSeries().iterator().next();
    assertEquals(1, series.getBucketRawMap().size());
    assertEquals(3, series.getBucketCount());
    assertEquals(3, series.getBucketRawMap().entrySet().iterator().next().getValue().size());
    assertEquals(1, series.getCompactionSet().size());
    int maxDp = series.getBucketRawMap().values().stream().flatMap(v -> v.stream()).mapToInt(l -> l.getCount()).max().getAsInt();
    // check and read datapoint count before
    List<DataPoint> queryDataPoints = series.queryDataPoints("", ts, ts + LIMIT + 1, null);
    assertEquals(LIMIT, queryDataPoints.size());
    for (int i = 0; i < LIMIT; i++) {
        DataPoint dp = queryDataPoints.get(i);
        assertEquals(ts + i, dp.getTimestamp());
        assertEquals(i * 1.2, dp.getValue(), 0.01);
    }
    m.compact();
    assertEquals(2, series.getBucketCount());
    assertEquals(2, series.getBucketRawMap().entrySet().iterator().next().getValue().size());
    assertEquals(0, series.getCompactionSet().size());
    assertTrue(maxDp <= series.getBucketRawMap().values().stream().flatMap(v -> v.stream()).mapToInt(l -> l.getCount()).max().getAsInt());
    // validate query after compaction
    queryDataPoints = series.queryDataPoints("", ts, ts + LIMIT + 1, null);
    assertEquals(LIMIT, queryDataPoints.size());
    for (int i = 0; i < LIMIT; i++) {
        DataPoint dp = queryDataPoints.get(i);
        assertEquals(ts + i, dp.getTimestamp());
        assertEquals(i * 1.2, dp.getValue(), 0.01);
    }
    // test buffer recovery after compaction, validate count
    m = new PersistentMeasurement();
    m.configure(map, null, DBNAME, "m1", "target/db45/index", "target/db45/data", metadata, bgTaskPool);
    series = m.getTimeSeries().iterator().next();
    queryDataPoints = series.queryDataPoints("", ts, ts + LIMIT + 1, null);
    assertEquals(LIMIT, queryDataPoints.size());
    for (int i = 0; i < LIMIT; i++) {
        DataPoint dp = queryDataPoints.get(i);
        assertEquals(ts + i, dp.getTimestamp());
        assertEquals(i * 1.2, dp.getValue(), 0.01);
    }
    for (int i = 0; i < LIMIT; i++) {
        TimeSeries t = m.getOrCreateTimeSeries("value1", tags, 1024, false, map);
        t.addDataPoint(TimeUnit.MILLISECONDS, LIMIT + ts + i, i * 1.2);
    }
    series.getBucketRawMap().entrySet().iterator().next().getValue().stream().map(v -> "" + v.getCount() + ":" + v.isReadOnly() + ":" + (int) v.getRawBytes().get(1)).forEach(System.out::println);
    // test recovery again
    m = new PersistentMeasurement();
    m.configure(map, null, DBNAME, "m1", "target/db45/index", "target/db45/data", metadata, bgTaskPool);
    series = m.getTimeSeries().iterator().next();
    queryDataPoints = series.queryDataPoints("", ts - 1, ts + 2 + (LIMIT * 2), null);
    assertEquals(LIMIT * 2, queryDataPoints.size());
    for (int i = 0; i < LIMIT * 2; i++) {
        DataPoint dp = queryDataPoints.get(i);
        assertEquals("Error:" + i + " " + (dp.getTimestamp() - ts - i), ts + i, dp.getTimestamp());
    }
}
Also used : Measurement(com.srotya.sidewinder.core.storage.Measurement) Arrays(java.util.Arrays) BeforeClass(org.junit.BeforeClass) DBMetadata(com.srotya.sidewinder.core.storage.DBMetadata) MiscUtils(com.srotya.sidewinder.core.utils.MiscUtils) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) HashMap(java.util.HashMap) DataPoint(com.srotya.sidewinder.core.storage.DataPoint) ArrayList(java.util.ArrayList) HashSet(java.util.HashSet) LinkedHashMap(java.util.LinkedHashMap) MetricsRegistryService(com.srotya.sidewinder.core.monitoring.MetricsRegistryService) SetIndex(com.srotya.sidewinder.core.storage.mem.SetIndex) Map(java.util.Map) TagIndex(com.srotya.sidewinder.core.storage.TagIndex) ScheduledExecutorService(java.util.concurrent.ScheduledExecutorService) Assert.fail(org.junit.Assert.fail) MemStorageEngine(com.srotya.sidewinder.core.storage.mem.MemStorageEngine) ExecutorService(java.util.concurrent.ExecutorService) StorageEngine(com.srotya.sidewinder.core.storage.StorageEngine) Reader(com.srotya.sidewinder.core.storage.compression.Reader) Iterator(java.util.Iterator) TimeSeries(com.srotya.sidewinder.core.storage.TimeSeries) Assert.assertTrue(org.junit.Assert.assertTrue) Set(java.util.Set) IOException(java.io.IOException) Test(org.junit.Test) ByzantineWriter(com.srotya.sidewinder.core.storage.compression.byzantine.ByzantineWriter) File(java.io.File) Executors(java.util.concurrent.Executors) TimeUnit(java.util.concurrent.TimeUnit) List(java.util.List) Tag(com.srotya.sidewinder.core.filters.Tag) BackgrounThreadFactory(com.srotya.sidewinder.core.utils.BackgrounThreadFactory) Series(com.srotya.sidewinder.core.storage.Series) Collections(java.util.Collections) Assert.assertEquals(org.junit.Assert.assertEquals) TimeSeries(com.srotya.sidewinder.core.storage.TimeSeries) HashMap(java.util.HashMap) LinkedHashMap(java.util.LinkedHashMap) ByzantineWriter(com.srotya.sidewinder.core.storage.compression.byzantine.ByzantineWriter) DataPoint(com.srotya.sidewinder.core.storage.DataPoint) DataPoint(com.srotya.sidewinder.core.storage.DataPoint) File(java.io.File) Test(org.junit.Test)

Example 40 with Series

use of com.srotya.sidewinder.core.storage.Series in project sidewinder by srotya.

the class TestMemStorageEngine method testConfigureTimeBuckets.

@Test
public void testConfigureTimeBuckets() throws ItemNotFoundException, IOException {
    StorageEngine engine = new MemStorageEngine();
    HashMap<String, String> conf = new HashMap<>();
    long ts = System.currentTimeMillis();
    conf.put(StorageEngine.DEFAULT_BUCKET_SIZE, String.valueOf(4096 * 10));
    try {
        engine.configure(conf, bgTasks);
    } catch (IOException e) {
        fail("No IOException should be thrown");
    }
    try {
        for (int i = 0; i < 10; i++) {
            engine.writeDataPoint(MiscUtils.buildDataPoint("test", "ss", "value", Arrays.asList("te=2"), ts + (i * 4096 * 1000), 2.2));
        }
    } catch (Exception e) {
        e.printStackTrace();
        fail("Engine is initialized, no IO Exception should be thrown:" + e.getMessage());
    }
    List<Series> queryDataPoints = engine.queryDataPoints("test", "ss", "value", ts, ts + (4096 * 100 * 1000) + 1, null);
    assertTrue(queryDataPoints.size() >= 1);
}
Also used : TimeSeries(com.srotya.sidewinder.core.storage.TimeSeries) Series(com.srotya.sidewinder.core.storage.Series) HashMap(java.util.HashMap) LinkedHashMap(java.util.LinkedHashMap) IOException(java.io.IOException) StorageEngine(com.srotya.sidewinder.core.storage.StorageEngine) DataPoint(com.srotya.sidewinder.core.storage.DataPoint) Point(com.srotya.sidewinder.core.rpc.Point) ItemNotFoundException(com.srotya.sidewinder.core.storage.ItemNotFoundException) RejectException(com.srotya.sidewinder.core.storage.RejectException) IOException(java.io.IOException) Test(org.junit.Test)

Aggregations

Series (com.srotya.sidewinder.core.storage.Series)56 DataPoint (com.srotya.sidewinder.core.storage.DataPoint)49 Test (org.junit.Test)47 ArrayList (java.util.ArrayList)37 TimeSeries (com.srotya.sidewinder.core.storage.TimeSeries)21 IOException (java.io.IOException)18 LinkedHashMap (java.util.LinkedHashMap)16 HashMap (java.util.HashMap)15 Point (com.srotya.sidewinder.core.rpc.Point)13 ItemNotFoundException (com.srotya.sidewinder.core.storage.ItemNotFoundException)13 File (java.io.File)13 ReducingWindowedAggregator (com.srotya.sidewinder.core.functions.windowed.ReducingWindowedAggregator)10 RejectException (com.srotya.sidewinder.core.storage.RejectException)10 StorageEngine (com.srotya.sidewinder.core.storage.StorageEngine)7 List (java.util.List)7 ByzantineWriter (com.srotya.sidewinder.core.storage.compression.byzantine.ByzantineWriter)6 Tag (com.srotya.sidewinder.core.filters.Tag)5 SimpleTagFilter (com.srotya.sidewinder.core.filters.SimpleTagFilter)4 Measurement (com.srotya.sidewinder.core.storage.Measurement)4 WriterServiceBlockingStub (com.srotya.sidewinder.core.rpc.WriterServiceGrpc.WriterServiceBlockingStub)3