use of org.apache.bookkeeper.api.kv.PTable in project bookkeeper by apache.
the class TestPByteBufTableImpl method testBasicOperations.
@SuppressWarnings("unchecked")
@Test
public void testBasicOperations() throws Exception {
when(mockMetaRangeClient.getActiveDataRanges()).thenReturn(FutureUtils.value(streamRanges1));
ConcurrentMap<Long, PTable<ByteBuf, ByteBuf>> tableRanges = Maps.newConcurrentMap();
for (RangeProperties rangeProps : streamRanges1.getRanges().values()) {
tableRanges.put(rangeProps.getRangeId(), mock(PTable.class));
}
RangeRouter<ByteBuf> mockRouter = mock(RangeRouter.class);
when(mockRouter.getRange(any(ByteBuf.class))).thenAnswer(invocationOnMock -> {
ByteBuf key = invocationOnMock.getArgument(0);
byte[] keyData = ByteBufUtil.getBytes(key);
return Bytes.toLong(keyData, 0);
});
TableRangeFactory<ByteBuf, ByteBuf> trFactory = (streamProps1, rangeProps, executor, opFactory, resultFactory, kvFactory) -> tableRanges.get(rangeProps.getRangeId());
PByteBufTableImpl table = new PByteBufTableImpl(runtime.getMethodName(), streamProps, mockClientManager, scheduler.chooseThread(), trFactory, Optional.of(mockRouter));
assertEquals(0, table.getTableRanges().size());
verify(mockRouter, times(0)).setRanges(any(HashStreamRanges.class));
// initialize the table
assertTrue(table == FutureUtils.result(table.initialize()));
verify(mockRouter, times(1)).setRanges(eq(streamRanges1));
assertEquals(4, table.getTableRanges().size());
// test get
for (RangeProperties rangeProps : streamRanges1.getRanges().values()) {
ByteBuf pkey = Unpooled.wrappedBuffer(Bytes.toBytes(rangeProps.getRangeId()));
ByteBuf lkey = Unpooled.wrappedBuffer(Bytes.toBytes(rangeProps.getRangeId()));
try (RangeOption<ByteBuf> option = optionFactory.newRangeOption().build()) {
table.get(pkey, lkey, option);
verify(tableRanges.get(rangeProps.getRangeId()), times(1)).get(eq(pkey), eq(lkey), eq(option));
}
}
// test put
for (RangeProperties rangeProps : streamRanges1.getRanges().values()) {
ByteBuf pkey = Unpooled.wrappedBuffer(Bytes.toBytes(rangeProps.getRangeId()));
ByteBuf lkey = Unpooled.wrappedBuffer(Bytes.toBytes(rangeProps.getRangeId()));
ByteBuf value = Unpooled.wrappedBuffer(Bytes.toBytes(rangeProps.getRangeId()));
try (PutOption<ByteBuf> option = optionFactory.newPutOption().build()) {
table.put(pkey, lkey, value, option);
verify(tableRanges.get(rangeProps.getRangeId()), times(1)).put(eq(pkey), eq(lkey), eq(value), eq(option));
}
}
// test increment
for (RangeProperties rangeProps : streamRanges1.getRanges().values()) {
ByteBuf pkey = Unpooled.wrappedBuffer(Bytes.toBytes(rangeProps.getRangeId()));
ByteBuf lkey = Unpooled.wrappedBuffer(Bytes.toBytes(rangeProps.getRangeId()));
long amount = 100L;
try (IncrementOption<ByteBuf> option = optionFactory.newIncrementOption().build()) {
table.increment(pkey, lkey, amount, option);
verify(tableRanges.get(rangeProps.getRangeId()), times(1)).increment(eq(pkey), eq(lkey), eq(amount), same(option));
}
}
// test delete
for (RangeProperties rangeProps : streamRanges1.getRanges().values()) {
ByteBuf pkey = Unpooled.wrappedBuffer(Bytes.toBytes(rangeProps.getRangeId()));
ByteBuf lkey = Unpooled.wrappedBuffer(Bytes.toBytes(rangeProps.getRangeId()));
try (DeleteOption<ByteBuf> option = optionFactory.newDeleteOption().build()) {
table.delete(pkey, lkey, option);
verify(tableRanges.get(rangeProps.getRangeId()), times(1)).delete(eq(pkey), eq(lkey), eq(option));
}
}
// test txn
for (RangeProperties rangeProps : streamRanges1.getRanges().values()) {
ByteBuf pkey = Unpooled.wrappedBuffer(Bytes.toBytes(rangeProps.getRangeId()));
Txn<ByteBuf, ByteBuf> txn = table.txn(pkey);
verify(tableRanges.get(rangeProps.getRangeId()), times(1)).txn(eq(pkey));
}
}
use of org.apache.bookkeeper.api.kv.PTable in project bookkeeper by apache.
the class PByteBufTableImpl method refreshRangeSpaces.
CompletableFuture<PTable<ByteBuf, ByteBuf>> refreshRangeSpaces(HashStreamRanges newRanges) {
// compare the ranges to see if it requires an update
HashStreamRanges oldRanges = rangeRouter.getRanges();
if (null != oldRanges && oldRanges.getMaxRangeId() >= newRanges.getMaxRangeId()) {
log.info("No new stream ranges found for stream {}.", streamName);
return FutureUtils.value(this);
}
if (log.isInfoEnabled()) {
log.info("Updated the active ranges to {}", newRanges);
}
rangeRouter.setRanges(newRanges);
// add new ranges
Set<Long> activeRanges = Sets.newHashSetWithExpectedSize(newRanges.getRanges().size());
newRanges.getRanges().forEach((rk, range) -> {
activeRanges.add(range.getRangeId());
if (tableRanges.containsKey(range.getRangeId())) {
return;
}
PTable<ByteBuf, ByteBuf> tableRange = trFactory.openTableRange(props, range, executor, opFactory, resultFactory, kvFactory);
if (log.isInfoEnabled()) {
log.info("Create table range client for range {}", range.getRangeId());
}
this.tableRanges.put(range.getRangeId(), tableRange);
});
// remove old ranges
Iterator<Entry<Long, PTable<ByteBuf, ByteBuf>>> rsIter = tableRanges.entrySet().iterator();
while (rsIter.hasNext()) {
Map.Entry<Long, PTable<ByteBuf, ByteBuf>> entry = rsIter.next();
Long rid = entry.getKey();
if (activeRanges.contains(rid)) {
continue;
}
rsIter.remove();
PTable oldRangeSpace = entry.getValue();
oldRangeSpace.close();
}
return FutureUtils.value(this);
}
Aggregations