use of com.srotya.sidewinder.core.storage.compression.Reader in project sidewinder by srotya.
the class TimeSeries method queryReader.
/**
* Extract list of readers for the supplied time range and value predicate.
*
* Each {@link DataPoint} has the appendFieldValue and appendTags set in it.
*
* @param appendFieldValueName
* fieldname to append to each datapoint
* @param appendTags
* tags to append to each datapoint
* @param startTime
* time range beginning
* @param endTime
* time range end
* @param valuePredicate
* pushed down filter for values
* @return list of readers
* @throws IOException
*/
public List<Reader> queryReader(String appendFieldValueName, List<Tag> appendTags, long startTime, long endTime, Predicate valuePredicate) throws IOException {
if (startTime > endTime) {
// swap start and end times if they are off
startTime = startTime ^ endTime;
endTime = endTime ^ startTime;
startTime = startTime ^ endTime;
}
List<Reader> readers = new ArrayList<>();
BetweenPredicate timeRangePredicate = new BetweenPredicate(startTime, endTime);
SortedMap<String, List<Writer>> series = correctTimeRangeScan(startTime, endTime);
for (List<Writer> writers : series.values()) {
for (Writer writer : writers) {
readers.add(getReader(writer, timeRangePredicate, valuePredicate));
}
}
return readers;
}
use of com.srotya.sidewinder.core.storage.compression.Reader in project sidewinder by srotya.
the class TestByzantineReadWrite method testWriteRead.
@Test
public void testWriteRead() throws IOException {
ByteBuffer buf = ByteBuffer.allocateDirect(1024 * 100);
Writer writer = new ByzantineWriter();
writer.configure(new HashMap<>(), buf, true, startOffset, false);
long ts = System.currentTimeMillis();
writer.setHeaderTimestamp(ts);
int LIMIT = 10000;
for (int i = 0; i < LIMIT; i++) {
writer.addValue(ts + i * 1000, i);
}
System.out.println("Compression Ratio:" + writer.getCompressionRatio());
Reader reader = writer.getReader();
for (int i = 0; i < LIMIT; i++) {
DataPoint dp = reader.readPair();
assertEquals(ts + i * 1000, dp.getTimestamp());
}
buf.rewind();
writer = new ByzantineWriter();
writer.configure(new HashMap<>(), buf, true, startOffset, true);
ts = System.currentTimeMillis();
writer.setHeaderTimestamp(ts);
for (int i = 0; i < LIMIT; i++) {
writer.addValue(ts + i * 1000, i * 1.1);
}
reader = writer.getReader();
for (int i = 0; i < LIMIT; i++) {
DataPoint dp = reader.readPair();
assertEquals(ts + i * 1000, dp.getTimestamp());
assertEquals(i * 1.1, dp.getValue(), startOffset);
}
buf.rewind();
writer = new ByzantineWriter();
writer.configure(new HashMap<>(), buf, true, startOffset, false);
ts = System.currentTimeMillis();
writer.setHeaderTimestamp(ts);
for (int i = 0; i < LIMIT; i++) {
DataPoint dp = MiscUtils.buildDataPoint(ts + i * 1000, i);
writer.write(dp);
}
reader = writer.getReader();
assertEquals(LIMIT, reader.getPairCount());
for (int i = 0; i < LIMIT; i++) {
DataPoint dp = reader.readPair();
assertEquals(ts + i * 1000, dp.getTimestamp());
}
buf.rewind();
writer = new ByzantineWriter();
writer.configure(new HashMap<>(), buf, true, startOffset, true);
ts = System.currentTimeMillis();
writer.setHeaderTimestamp(ts);
List<DataPoint> dps = new ArrayList<>();
for (int i = 0; i < LIMIT; i++) {
DataPoint dp = MiscUtils.buildDataPoint(ts + i * 1000, i);
dps.add(dp);
}
writer.write(dps);
reader = writer.getReader();
assertEquals(LIMIT, reader.getPairCount());
for (int i = 0; i < LIMIT; i++) {
DataPoint dp = reader.readPair();
assertEquals(ts + i * 1000, dp.getTimestamp());
}
}
use of com.srotya.sidewinder.core.storage.compression.Reader in project sidewinder by srotya.
the class TestByzantineReadWrite method testWriteReadNoLock.
@Test
public void testWriteReadNoLock() throws IOException {
ByteBuffer buf = ByteBuffer.allocateDirect(1024 * 1000);
Writer writer = new ByzantineWriter();
Map<String, String> conf = new HashMap<>();
writer.configure(conf, buf, true, startOffset, false);
long ts = System.currentTimeMillis();
writer.setHeaderTimestamp(ts);
int LIMIT = 100000;
for (int i = 0; i < LIMIT; i++) {
writer.addValue(ts + i * 1000, i);
}
System.out.println("Compression Ratio:" + writer.getCompressionRatio());
Reader reader = writer.getReader();
for (int i = 0; i < LIMIT; i++) {
DataPoint dp = reader.readPair();
assertEquals("Iteration:" + i, ts + i * 1000, dp.getTimestamp());
}
}
use of com.srotya.sidewinder.core.storage.compression.Reader in project sidewinder by srotya.
the class TestByzantineReadWrite method testDiskRecovery.
@Test
public void testDiskRecovery() throws IOException, InterruptedException {
ByteBuffer buf = ByteBuffer.allocateDirect(1024 * 1024 * 10);
ByzantineWriter writer = new ByzantineWriter();
writer.configure(new HashMap<>(), buf, true, startOffset, true);
long ots = System.currentTimeMillis();
writer.setHeaderTimestamp(ots);
int limit = 1_000_000;
for (int i = 0; i < limit; i++) {
writer.addValue(ots + i * 1000, i);
}
long ts = (System.currentTimeMillis() - ots);
System.out.println("==>Byzantine Write time:" + ts + " data size:" + writer.getBuf().position());
Reader reader = writer.getReader();
assertEquals(limit, reader.getPairCount());
try {
for (int i = 0; i < limit; i++) {
DataPoint pair = reader.readPair();
assertEquals(ots + i * 1000, pair.getTimestamp());
assertEquals(i, pair.getLongValue());
}
} catch (Exception e) {
e.printStackTrace();
throw e;
}
System.out.println("Completed phase 1 reads");
writer = new ByzantineWriter();
// writer.setSeriesId("test_byzantine_disk_writes" + 0);
buf.rewind();
writer.configure(new HashMap<>(), buf, false, startOffset, true);
assertEquals(1000000, writer.getCount());
writer.addValue(ts + 10000, 1);
try {
reader = writer.getReader();
for (int i = 0; i < limit; i++) {
DataPoint pair = reader.readPair();
assertEquals(ots + i * 1000, pair.getTimestamp());
assertEquals(i, pair.getLongValue());
}
} catch (Exception e) {
e.printStackTrace();
throw e;
}
}
use of com.srotya.sidewinder.core.storage.compression.Reader in project sidewinder by srotya.
the class TestByzantineReadWrite method testWriteReadConcurrent.
@Test
public void testWriteReadConcurrent() throws IOException, InterruptedException {
ByteBuffer buf = ByteBuffer.allocateDirect(1024 * 1024);
Writer writer = new ByzantineWriter();
Map<String, String> conf = new HashMap<>();
writer.configure(conf, buf, true, startOffset, true);
final long ts = System.currentTimeMillis();
writer.setHeaderTimestamp(ts);
int LIMIT = 10000;
final AtomicInteger wait = new AtomicInteger(0);
int THREAD_COUNT = 4;
ExecutorService es = Executors.newFixedThreadPool(THREAD_COUNT);
for (int j = 0; j < THREAD_COUNT; j++) {
final int o = j * LIMIT;
es.submit(() -> {
long t = ts + o;
for (int i = 0; i < LIMIT; i++) {
try {
writer.addValue(t + i * 100, i);
} catch (IOException e) {
throw new RuntimeException(e);
}
}
wait.incrementAndGet();
});
}
es.shutdown();
es.awaitTermination(100, TimeUnit.MILLISECONDS);
System.out.println("Compression Ratio:" + writer.getCompressionRatio());
while (wait.get() != THREAD_COUNT) {
Thread.sleep(1000);
}
Reader reader = writer.getReader();
assertEquals(LIMIT * THREAD_COUNT, reader.getPairCount());
}
Aggregations