Search in sources :

Example 6 with Writer

use of com.srotya.sidewinder.core.storage.compression.Writer in project sidewinder by srotya.

the class TestTimeSeries method testCompactionThreadSafety.

@Test
public void testCompactionThreadSafety() throws IOException, InterruptedException {
    DBMetadata metadata = new DBMetadata(28);
    MockMeasurement measurement = new MockMeasurement(1024);
    HashMap<String, String> conf = new HashMap<>();
    conf.put("default.bucket.size", "409600");
    conf.put("compaction.enabled", "true");
    conf.put("use.query.pool", "false");
    final TimeSeries series = new TimeSeries(measurement, "byzantine", "byzantine", "asdasasd", 409600, metadata, true, conf);
    final long curr = 1497720652566L;
    String valueFieldName = "value";
    for (int i = 1; i <= 10000; i++) {
        series.addDataPoint(TimeUnit.MILLISECONDS, curr + i * 1000, i * 1.1);
    }
    long ts = System.nanoTime();
    List<DataPoint> dataPoints = series.queryDataPoints(valueFieldName, curr - 1000, curr + 10000 * 1000 + 1, null);
    ts = System.nanoTime() - ts;
    System.out.println("Before compaction:" + ts / 1000 + "us");
    assertEquals(10000, dataPoints.size());
    for (int i = 1; i <= 10000; i++) {
        DataPoint dp = dataPoints.get(i - 1);
        assertEquals("Bad ts:" + i, curr + i * 1000, dp.getTimestamp());
        assertEquals(dp.getValue(), i * 1.1, 0.001);
    }
    SortedMap<String, List<Writer>> bucketRawMap = series.getBucketRawMap();
    assertEquals(1, bucketRawMap.size());
    int size = bucketRawMap.values().iterator().next().size();
    assertTrue(series.getCompactionSet().size() < size);
    assertTrue(size > 2);
    List<Writer> compact = series.compact();
    System.out.println("Compacted series:" + compact.size() + "\toriginalsize:" + size + " newsize:" + bucketRawMap.values().iterator().next().size());
    ts = System.nanoTime();
    dataPoints = series.queryDataPoints(valueFieldName, curr - 1000, curr + 10000 * 1000 + 1, null);
    ts = System.nanoTime() - ts;
    System.out.println("After compaction:" + ts / 1000 + "us");
    final AtomicBoolean bool = new AtomicBoolean(false);
    Executors.newCachedThreadPool().execute(() -> {
        while (!bool.get()) {
            try {
                Thread.sleep(100);
            } catch (InterruptedException e) {
                break;
            }
        }
        try {
            series.addDataPoint(TimeUnit.MILLISECONDS, curr + 1000 * 10001, 1.11);
            bool.set(false);
        } catch (IOException e) {
            e.printStackTrace();
            return;
        }
    });
    series.compact(l -> {
        bool.set(true);
        try {
            Thread.sleep(1000);
        } catch (InterruptedException e) {
            e.printStackTrace();
        }
        if (!bool.get()) {
            throw new RuntimeException("Synchronized block failed");
        }
    });
    Thread.sleep(100);
    assertTrue(!bool.get());
}
Also used : HashMap(java.util.HashMap) IOException(java.io.IOException) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) ArrayList(java.util.ArrayList) List(java.util.List) Writer(com.srotya.sidewinder.core.storage.compression.Writer) Test(org.junit.Test)

Example 7 with Writer

use of com.srotya.sidewinder.core.storage.compression.Writer in project sidewinder by srotya.

the class TestTimeSeries method testCompaction.

@Test
public void testCompaction() throws IOException {
    DBMetadata metadata = new DBMetadata(28);
    MockMeasurement measurement = new MockMeasurement(1024);
    HashMap<String, String> conf = new HashMap<>();
    conf.put("default.bucket.size", "409600");
    conf.put("compaction.enabled", "true");
    conf.put("use.query.pool", "false");
    conf.put("compaction.ratio", "1.1");
    final TimeSeries series = new TimeSeries(measurement, "byzantine", "byzantine", "asdasasd", 409600, metadata, true, conf);
    final long curr = 1497720652566L;
    String valueFieldName = "value";
    for (int i = 1; i <= 10000; i++) {
        series.addDataPoint(TimeUnit.MILLISECONDS, curr + i * 1000, i * 1.1);
    }
    long ts = System.nanoTime();
    List<DataPoint> dataPoints = series.queryDataPoints(valueFieldName, curr - 1000, curr + 10000 * 1000 + 1, null);
    ts = System.nanoTime() - ts;
    System.out.println("Before compaction:" + ts / 1000 + "us");
    assertEquals(10000, dataPoints.size());
    for (int i = 1; i <= 10000; i++) {
        DataPoint dp = dataPoints.get(i - 1);
        assertEquals("Bad ts:" + i, curr + i * 1000, dp.getTimestamp());
        assertEquals(dp.getValue(), i * 1.1, 0.001);
    }
    SortedMap<String, List<Writer>> bucketRawMap = series.getBucketRawMap();
    assertEquals(1, bucketRawMap.size());
    int size = bucketRawMap.values().iterator().next().size();
    assertTrue(series.getCompactionSet().size() < size);
    assertTrue(size > 2);
    series.compact();
    ts = System.nanoTime();
    dataPoints = series.queryDataPoints(valueFieldName, curr - 1000, curr + 10000 * 1000 + 1, null);
    ts = System.nanoTime() - ts;
    System.out.println("After compaction:" + ts / 1000 + "us");
    bucketRawMap = series.getBucketRawMap();
    assertEquals(2, bucketRawMap.values().iterator().next().size());
    int count = 0;
    for (List<Writer> list : bucketRawMap.values()) {
        for (Writer writer : list) {
            Reader reader = writer.getReader();
            count += reader.getPairCount();
        }
    }
    assertEquals(10000, count);
    assertEquals(10000, dataPoints.size());
    for (int i = 1; i <= 10000; i++) {
        DataPoint dp = dataPoints.get(i - 1);
        assertEquals("Bad ts:" + i, curr + i * 1000, dp.getTimestamp());
        assertEquals(dp.getValue(), i * 1.1, 0.001);
    }
}
Also used : HashMap(java.util.HashMap) Reader(com.srotya.sidewinder.core.storage.compression.Reader) ArrayList(java.util.ArrayList) List(java.util.List) Writer(com.srotya.sidewinder.core.storage.compression.Writer) Test(org.junit.Test)

Example 8 with Writer

use of com.srotya.sidewinder.core.storage.compression.Writer in project sidewinder by srotya.

the class TimeSeries method compact.

/**
 * Compacts old Writers into one for every single time bucket, this insures the
 * buffers are compacted as well as provides an opportunity to use a higher
 * compression rate algorithm for the bucket. All Writers but the last are
 * read-only therefore performing operations on them does not impact.
 *
 * @param functions
 * @return returns null if nothing to compact or empty list if all compaction
 *         attempts fail
 * @throws IOException
 */
@SafeVarargs
public final List<Writer> compact(Consumer<List<Writer>>... functions) throws IOException {
    // size check is to avoid unnecessary calls and exit fast
    if (compactionCandidateSet.isEmpty()) {
        return null;
    }
    List<Writer> compactedWriter = new ArrayList<>();
    Iterator<Entry<String, List<Writer>>> iterator = compactionCandidateSet.entrySet().iterator();
    int id = CompressionFactory.getIdByClass(compactionClass);
    while (iterator.hasNext()) {
        // entry.getKey() gives tsBucket string
        Entry<String, List<Writer>> entry = iterator.next();
        // remove this entry from compaction set
        iterator.remove();
        List<Writer> list = entry.getValue();
        int listSize = list.size() - 1;
        int pointCount = list.subList(0, listSize).stream().mapToInt(s -> s.getCount()).sum();
        int total = list.subList(0, listSize).stream().mapToInt(s -> s.getPosition()).sum();
        if (total == 0) {
            logger.warning("Ignoring bucket for compaction, not enough bytes. THIS BUG SHOULD BE INVESTIGATED");
            continue;
        }
        Writer writer = getWriterInstance(compactionClass);
        int compactedPoints = 0;
        double bufSize = total * compactionRatio;
        logger.finer("Allocating buffer:" + total + " Vs. " + pointCount * 16 + " max compacted buffer:" + bufSize);
        logger.finer("Getting sublist from:" + 0 + " to:" + (list.size() - 1));
        ByteBuffer buf = ByteBuffer.allocate((int) bufSize);
        buf.put((byte) id);
        // since this buffer will be the first one
        buf.put(1, (byte) 0);
        writer.configure(conf, buf, true, START_OFFSET, false);
        Writer input = list.get(0);
        // read the header timestamp
        long timestamp = input.getHeaderTimestamp();
        writer.setHeaderTimestamp(timestamp);
        // read all but the last writer and insert into new temp writer
        try {
            for (int i = 0; i < list.size() - 1; i++) {
                input = list.get(i);
                Reader reader = input.getReader();
                for (int k = 0; k < reader.getPairCount(); k++) {
                    long[] pair = reader.read();
                    writer.addValue(pair[0], pair[1]);
                    compactedPoints++;
                }
            }
            writer.makeReadOnly();
        } catch (RollOverException e) {
            logger.warning("Buffer filled up; bad compression ratio; not compacting");
            continue;
        } catch (Exception e) {
            logger.log(Level.SEVERE, "Compaction failed due to unknown exception", e);
        }
        // get the raw compressed bytes
        ByteBuffer rawBytes = writer.getRawBytes();
        // limit how much data needs to be read from the buffer
        rawBytes.limit(rawBytes.position());
        // convert buffer length request to size of 2
        int size = rawBytes.limit() + 1;
        if (size % 2 != 0) {
            size++;
        }
        rawBytes.rewind();
        // create buffer in measurement
        BufferObject newBuf = measurement.getMalloc().createNewBuffer(seriesId, entry.getKey(), size);
        logger.fine("Compacted buffer size:" + size + " vs " + total);
        String bufferId = newBuf.getBufferId();
        buf = newBuf.getBuf();
        writer = getWriterInstance(compactionClass);
        buf.put(rawBytes);
        writer.setBufferId(bufferId);
        writer.configure(conf, buf, false, START_OFFSET, false);
        writer.makeReadOnly();
        synchronized (list) {
            if (functions != null) {
                for (Consumer<List<Writer>> function : functions) {
                    function.accept(list);
                }
            }
            size = listSize - 1;
            logger.finest("Compaction debug size differences size:" + size + " listSize:" + listSize + " curr:" + list.size());
            for (int i = size; i >= 0; i--) {
                compactedWriter.add(list.remove(i));
            }
            list.add(0, writer);
            for (int i = 0; i < list.size(); i++) {
                list.get(i).getRawBytes().put(1, (byte) i);
            }
            // fix bucket count
            bucketCount -= size;
            logger.fine("Total points:" + compactedPoints + ", original pair count:" + writer.getReader().getPairCount() + " compression ratio:" + rawBytes.position() + " original:" + total);
        }
    }
    return compactedWriter;
}
Also used : HashMap(java.util.HashMap) ByteBuffer(java.nio.ByteBuffer) ArrayList(java.util.ArrayList) Level(java.util.logging.Level) HashSet(java.util.HashSet) Writer(com.srotya.sidewinder.core.storage.compression.Writer) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) Map(java.util.Map) Predicate(com.srotya.sidewinder.core.predicates.Predicate) CompressionFactory(com.srotya.sidewinder.core.storage.compression.CompressionFactory) RollOverException(com.srotya.sidewinder.core.storage.compression.RollOverException) BetweenPredicate(com.srotya.sidewinder.core.predicates.BetweenPredicate) Reader(com.srotya.sidewinder.core.storage.compression.Reader) TimeUtils(com.srotya.sidewinder.core.utils.TimeUtils) Iterator(java.util.Iterator) Collection(java.util.Collection) IOException(java.io.IOException) Logger(java.util.logging.Logger) TimeUnit(java.util.concurrent.TimeUnit) Consumer(java.util.function.Consumer) List(java.util.List) Tag(com.srotya.sidewinder.core.filters.Tag) TreeMap(java.util.TreeMap) Entry(java.util.Map.Entry) Comparator(java.util.Comparator) Collections(java.util.Collections) SortedMap(java.util.SortedMap) ArrayList(java.util.ArrayList) Reader(com.srotya.sidewinder.core.storage.compression.Reader) ByteBuffer(java.nio.ByteBuffer) RollOverException(com.srotya.sidewinder.core.storage.compression.RollOverException) IOException(java.io.IOException) Entry(java.util.Map.Entry) RollOverException(com.srotya.sidewinder.core.storage.compression.RollOverException) ArrayList(java.util.ArrayList) List(java.util.List) Writer(com.srotya.sidewinder.core.storage.compression.Writer)

Example 9 with Writer

use of com.srotya.sidewinder.core.storage.compression.Writer in project sidewinder by srotya.

the class TimeSeries method insertOrOverwriteWriters.

private List<String> insertOrOverwriteWriters(List<Entry<Long, byte[]>> bufList, boolean wasEmpty, List<Writer> list, String tsBucket) throws IOException, InstantiationException, IllegalAccessException {
    List<String> garbageCollectWriters = new ArrayList<>();
    if (!wasEmpty) {
        if (bufList.size() >= list.size()) {
            throw new IllegalArgumentException("Buffer can't be replaced since local buffers are smaller than the replacing buffers");
        }
    }
    for (int i = 0; i < bufList.size(); i++) {
        if (!wasEmpty) {
            Writer removedWriter = list.remove(i);
            garbageCollectWriters.add(removedWriter.getBufferId());
        }
        Entry<Long, byte[]> bs = bufList.get(i);
        BufferObject bufPair = measurement.getMalloc().createNewBuffer(seriesId, tsBucket, bs.getValue().length);
        ByteBuffer buf = bufPair.getBuf();
        buf.put(bs.getValue());
        buf.rewind();
        Writer writer = CompressionFactory.getClassById(buf.get(0)).newInstance();
        writer.setBufferId(bufPair.getBufferId());
        writer.configure(conf, bufPair.getBuf(), false, START_OFFSET, true);
        writer.setHeaderTimestamp(bs.getKey());
        list.add(i, writer);
    }
    return garbageCollectWriters;
}
Also used : ArrayList(java.util.ArrayList) ByteBuffer(java.nio.ByteBuffer) Writer(com.srotya.sidewinder.core.storage.compression.Writer)

Example 10 with Writer

use of com.srotya.sidewinder.core.storage.compression.Writer in project sidewinder by srotya.

the class TimeSeries method sortBucketMap.

private void sortBucketMap() throws IOException {
    for (Entry<String, List<Writer>> entry : bucketMap.entrySet()) {
        Collections.sort(entry.getValue(), new Comparator<Writer>() {

            @Override
            public int compare(Writer o1, Writer o2) {
                return Integer.compare((int) o1.getRawBytes().get(1), (int) o2.getRawBytes().get(1));
            }
        });
        for (int i = 0; i < entry.getValue().size() - 1; i++) {
            Writer writer = entry.getValue().get(i);
            writer.makeReadOnly();
        }
        // #COMPACTHRESHOLD
        if (entry.getValue().size() > COMPACTION_THRESHOLD) {
            compactionCandidateSet.put(entry.getKey(), entry.getValue());
        }
    }
}
Also used : ArrayList(java.util.ArrayList) List(java.util.List) Writer(com.srotya.sidewinder.core.storage.compression.Writer)

Aggregations

Writer (com.srotya.sidewinder.core.storage.compression.Writer)35 ArrayList (java.util.ArrayList)23 List (java.util.List)16 Test (org.junit.Test)16 ByteBuffer (java.nio.ByteBuffer)15 Reader (com.srotya.sidewinder.core.storage.compression.Reader)14 IOException (java.io.IOException)12 HashMap (java.util.HashMap)12 DataPoint (com.srotya.sidewinder.core.storage.DataPoint)7 BetweenPredicate (com.srotya.sidewinder.core.predicates.BetweenPredicate)6 Predicate (com.srotya.sidewinder.core.predicates.Predicate)5 HashSet (java.util.HashSet)5 Point (com.srotya.sidewinder.core.rpc.Point)3 CompressionFactory (com.srotya.sidewinder.core.storage.compression.CompressionFactory)3 RollOverException (com.srotya.sidewinder.core.storage.compression.RollOverException)3 ValueWriter (com.srotya.sidewinder.core.storage.compression.ValueWriter)3 Collections (java.util.Collections)3 Comparator (java.util.Comparator)3 Map (java.util.Map)3 AtomicInteger (java.util.concurrent.atomic.AtomicInteger)3