use of cz.o2.proxima.storage.Partition in project proxima-platform by O2-Czech-Republic.
the class HBaseLogReaderTest method testObserveLast.
@Test(timeout = 30000)
public void testObserveLast() throws InterruptedException, IOException {
long now = 1500000000000L;
write("secon", "dummy", "secon", now);
write("second", "dummy", "second", now);
write("third", "dummy", "third", now);
List<Partition> partitions = reader.getPartitions();
List<String> keys = new ArrayList<>();
CountDownLatch latch = new CountDownLatch(1);
reader.observe(partitions.subList(2, 3), Lists.newArrayList(attr), new BatchLogObserver() {
@Override
public boolean onNext(StreamElement element) {
assertEquals(now, element.getStamp());
keys.add(element.getKey());
return true;
}
@Override
public void onCompleted() {
latch.countDown();
}
});
latch.await();
assertEquals(Lists.newArrayList("second", "third"), keys);
}
use of cz.o2.proxima.storage.Partition in project proxima-platform by O2-Czech-Republic.
the class HBaseLogReaderTest method testObserve.
@Test(timeout = 30000)
public void testObserve() throws InterruptedException, IOException {
long now = 1500000000000L;
write("a", "dummy", "a", now);
write("firs", "wildcard.1", "firs", now);
write("fir", "dummy", "fir", now);
write("first", "dummy", "first", now);
List<Partition> partitions = reader.getPartitions();
List<String> keys = new ArrayList<>();
CountDownLatch latch = new CountDownLatch(1);
reader.observe(partitions.subList(0, 1), Lists.newArrayList(attr), new BatchLogObserver() {
@Override
public boolean onNext(StreamElement element) {
assertEquals(now, element.getStamp());
keys.add(element.getKey());
return true;
}
@Override
public void onCompleted() {
latch.countDown();
}
});
latch.await();
assertEquals(Arrays.asList("a", "fir"), keys);
}
use of cz.o2.proxima.storage.Partition in project proxima-platform by O2-Czech-Republic.
the class HBaseLogReaderTest method testObserveMultiple.
@Test(timeout = 30000)
public void testObserveMultiple() throws IOException, InterruptedException {
long now = 1500000000000L;
write("a", "dummy", "a", now);
write("firs", "wildcard.1", "firs", now);
write("fir", "dummy", "fir", now);
write("first", "dummy", "first", now);
List<Partition> partitions = reader.getPartitions();
List<String> keys = new ArrayList<>();
CountDownLatch latch = new CountDownLatch(1);
assertEquals(3, partitions.size());
reader.observe(partitions.subList(0, 1), Lists.newArrayList(attr, wildcard), new BatchLogObserver() {
@Override
public void onCompleted() {
latch.countDown();
}
@Override
public boolean onNext(StreamElement element) {
assertEquals(now, element.getStamp());
keys.add(element.getKey());
return true;
}
@Override
public boolean onError(Throwable error) {
throw new RuntimeException(error);
}
});
latch.await();
assertEquals(Lists.newArrayList("a", "fir", "firs"), keys);
}
use of cz.o2.proxima.storage.Partition in project proxima-platform by O2-Czech-Republic.
the class HadoopStorageTest method testObserveCancel.
@Test(timeout = 5000L)
public void testObserveCancel() throws InterruptedException {
Map<String, Object> cfg = cfg(HadoopDataAccessor.HADOOP_ROLL_INTERVAL, -1);
HadoopDataAccessor accessor = new HadoopDataAccessor(TestUtils.createTestFamily(entity, uri, cfg));
CountDownLatch latch = new CountDownLatch(1);
writeOneElement(accessor, (success, error) -> {
assertTrue(success);
assertNull(error);
latch.countDown();
}).updateWatermark(Long.MAX_VALUE);
latch.await();
BatchLogReader reader = accessor.getBatchLogReader(direct.getContext()).orElse(null);
assertNotNull(reader);
List<Partition> partitions = reader.getPartitions();
assertEquals(1, partitions.size());
CountDownLatch cancelledLatch = new CountDownLatch(1);
AtomicReference<ObserveHandle> handle = new AtomicReference<>();
handle.set(reader.observe(partitions, Collections.singletonList(attribute), new BatchLogObserver() {
@Override
public boolean onNext(StreamElement element) {
handle.get().close();
return true;
}
@Override
public void onCompleted() {
fail("onCompleted should not have been called");
}
@Override
public void onCancelled() {
cancelledLatch.countDown();
}
@Override
public boolean onError(Throwable error) {
onCancelled();
return true;
}
}));
cancelledLatch.await();
}
use of cz.o2.proxima.storage.Partition in project proxima-platform by O2-Czech-Republic.
the class HadoopStorageTest method testWriteElementNotYetFlushed.
@Test(timeout = 5_000L)
public void testWriteElementNotYetFlushed() throws InterruptedException {
Map<String, Object> cfg = cfg(HadoopDataAccessor.HADOOP_ROLL_INTERVAL, 1000);
HadoopDataAccessor accessor = new HadoopDataAccessor(TestUtils.createTestFamily(entity, uri, cfg));
CountDownLatch latch = new CountDownLatch(1);
BulkAttributeWriter writer = writeOneElement(accessor, ((success, error) -> {
if (error != null) {
log.error("Failed to flush write", error);
}
assertTrue("Error in flush " + error, success);
assertNull(error);
latch.countDown();
}));
assertTrue(root.exists());
List<File> files = listRecursively(root);
assertEquals("Expected single file in " + files, 1, files.size());
assertTrue(Iterables.getOnlyElement(files).getAbsolutePath().contains("_tmp"));
BatchLogReader reader = accessor.getBatchLogReader(direct.getContext()).orElse(null);
assertNotNull(reader);
List<Partition> partitions = reader.getPartitions();
assertTrue("Expected empty partitions, got " + partitions, partitions.isEmpty());
// advance watermark to flush
writer.updateWatermark(Long.MAX_VALUE);
latch.await();
partitions = reader.getPartitions();
assertEquals(1, partitions.size());
BlockingQueue<StreamElement> queue = new SynchronousQueue<>();
reader.observe(partitions, Collections.singletonList(attribute), new BatchLogObserver() {
@Override
public boolean onNext(StreamElement element) {
ExceptionUtils.unchecked(() -> queue.put(element));
return true;
}
});
StreamElement element = queue.take();
assertNotNull(element);
}
Aggregations