use of io.pravega.client.tables.impl.HashTableIteratorItem in project pravega by pravega.
the class ControllerMetadataListEntriesCommand method execute.
@Override
public void execute() {
ensureArgCount(3);
final String tableName = getArg(0);
final int entryCount = getIntArg(1);
final String segmentStoreHost = getArg(2);
Preconditions.checkArgument(!isStreamMetadataTableName(tableName), "The given table %s is a stream metadata table. " + "Stream metadata tables are unsupported by this command.", tableName);
@Cleanup CuratorFramework zkClient = createZKClient();
@Cleanup AdminSegmentHelper adminSegmentHelper = instantiateAdminSegmentHelper(zkClient);
HashTableIteratorItem<TableSegmentEntry> entries = completeSafely(adminSegmentHelper.readTableEntries(tableName, new PravegaNodeUri(segmentStoreHost, getServiceConfig().getAdminGatewayPort()), entryCount, HashTableIteratorItem.State.EMPTY, super.authHelper.retrieveMasterToken(), 0L), tableName, null);
if (entries == null) {
return;
}
Map<String, List<String>> stringEntriesMap = entries.getItems().stream().collect(Collectors.toMap(entry -> KEY_SERIALIZER.deserialize(getByteBuffer(entry.getKey().getKey())), entry -> {
ControllerMetadataSerializer serializer = new ControllerMetadataSerializer(tableName, KEY_SERIALIZER.deserialize(getByteBuffer(entry.getKey().getKey())));
return List.of(serializer.deserialize(getByteBuffer(entry.getValue())).toString(), serializer.getMetadataType());
}));
output("List of at most %s entries in %s: ", entryCount, tableName);
stringEntriesMap.forEach((key, value) -> {
output("- %s", key);
userFriendlyOutput(value.get(0), value.get(1));
output("");
});
}
use of io.pravega.client.tables.impl.HashTableIteratorItem in project pravega by pravega.
the class ControllerMetadataListKeysCommand method execute.
@Override
public void execute() {
ensureArgCount(3);
final String tableName = getArg(0);
final int keyCount = getIntArg(1);
final String segmentStoreHost = getArg(2);
@Cleanup CuratorFramework zkClient = createZKClient();
@Cleanup AdminSegmentHelper adminSegmentHelper = instantiateAdminSegmentHelper(zkClient);
HashTableIteratorItem<TableSegmentKey> keys = completeSafely(adminSegmentHelper.readTableKeys(tableName, new PravegaNodeUri(segmentStoreHost, getServiceConfig().getAdminGatewayPort()), keyCount, HashTableIteratorItem.State.EMPTY, super.authHelper.retrieveMasterToken(), 0L), tableName, null);
if (keys == null) {
return;
}
List<String> stringKeys = keys.getItems().stream().map(tableSegmentKey -> KEY_SERIALIZER.deserialize(getByteBuffer(tableSegmentKey.getKey()))).collect(Collectors.toList());
output("List of at most %s keys in %s: ", keyCount, tableName);
stringKeys.forEach(k -> output("- %s", k));
}
use of io.pravega.client.tables.impl.HashTableIteratorItem in project pravega by pravega.
the class ListTableSegmentKeysCommand method execute.
@Override
public void execute() {
ensureArgCount(3);
ensureSerializersExist();
final String fullyQualifiedTableSegmentName = getArg(0);
final int keyCount = getIntArg(1);
final String segmentStoreHost = getArg(2);
@Cleanup CuratorFramework zkClient = createZKClient();
@Cleanup AdminSegmentHelper adminSegmentHelper = instantiateAdminSegmentHelper(zkClient);
CompletableFuture<HashTableIteratorItem<TableSegmentKey>> reply = adminSegmentHelper.readTableKeys(fullyQualifiedTableSegmentName, new PravegaNodeUri(segmentStoreHost, getServiceConfig().getAdminGatewayPort()), keyCount, HashTableIteratorItem.State.EMPTY, super.authHelper.retrieveMasterToken(), 0L);
List<String> keys = reply.join().getItems().stream().map(tableSegmentKey -> getCommandArgs().getState().getKeySerializer().deserialize(getByteBuffer(tableSegmentKey.getKey()))).collect(Collectors.toList());
output("List of at most %s keys in %s: ", keyCount, fullyQualifiedTableSegmentName);
keys.forEach(k -> output("- %s", k));
}
use of io.pravega.client.tables.impl.HashTableIteratorItem in project pravega by pravega.
the class SegmentHelperMock method getSegmentHelperMockForTables.
public static SegmentHelper getSegmentHelperMockForTables(ScheduledExecutorService executor) {
SegmentHelper helper = getSegmentHelperMock();
final Object lock = new Object();
final Map<String, Map<ByteBuffer, TableSegmentEntry>> mapOfTables = new HashMap<>();
final Map<String, Map<ByteBuffer, Long>> mapOfTablesPosition = new HashMap<>();
// region create table
doAnswer(x -> {
String tableName = x.getArgument(0);
return CompletableFuture.runAsync(() -> {
synchronized (lock) {
mapOfTables.putIfAbsent(tableName, new HashMap<>());
mapOfTablesPosition.putIfAbsent(tableName, new HashMap<>());
}
}, executor);
}).when(helper).createTableSegment(anyString(), anyString(), anyLong(), anyBoolean(), anyInt(), anyLong());
// endregion
// region delete table
doAnswer(x -> {
String tableName = x.getArgument(0);
Boolean mustBeEmpty = x.getArgument(1);
final WireCommandType type = WireCommandType.DELETE_TABLE_SEGMENT;
return CompletableFuture.supplyAsync(() -> {
synchronized (lock) {
if (!mapOfTables.containsKey(tableName)) {
throw new WireCommandFailedException(type, WireCommandFailedException.Reason.SegmentDoesNotExist);
}
boolean empty = Optional.ofNullable(mapOfTables.get(tableName)).orElse(Collections.emptyMap()).isEmpty();
if (!mustBeEmpty || empty) {
mapOfTables.remove(tableName);
mapOfTablesPosition.remove(tableName);
return null;
} else {
throw new WireCommandFailedException(type, WireCommandFailedException.Reason.TableSegmentNotEmpty);
}
}
}, executor);
}).when(helper).deleteTableSegment(anyString(), anyBoolean(), anyString(), anyLong());
// endregion
// region update keys
doAnswer(x -> {
final WireCommandType type = WireCommandType.UPDATE_TABLE_ENTRIES;
String tableName = x.getArgument(0);
List<TableSegmentEntry> entries = x.getArgument(1);
return CompletableFuture.supplyAsync(() -> {
synchronized (lock) {
Map<ByteBuffer, TableSegmentEntry> table = mapOfTables.get(tableName);
Map<ByteBuffer, Long> tablePos = mapOfTablesPosition.get(tableName);
if (table == null) {
throw new WireCommandFailedException(type, WireCommandFailedException.Reason.SegmentDoesNotExist);
} else {
List<TableSegmentKeyVersion> resultList = new LinkedList<>();
entries.forEach(entry -> {
ByteBuffer key = entry.getKey().getKey().copy().nioBuffer();
byte[] value = entry.getValue().copy().array();
TableSegmentEntry existingEntry = table.get(key);
if (existingEntry == null) {
if (entry.getKey().getVersion().equals(TableSegmentKeyVersion.NOT_EXISTS)) {
TableSegmentEntry newEntry = TableSegmentEntry.versioned(key.array(), value, 0);
table.put(key, newEntry);
tablePos.put(key, System.nanoTime());
resultList.add(newEntry.getKey().getVersion());
} else {
throw new WireCommandFailedException(type, WireCommandFailedException.Reason.TableKeyDoesNotExist);
}
} else if (existingEntry.getKey().getVersion().equals(entry.getKey().getVersion())) {
TableSegmentKeyVersion newVersion = TableSegmentKeyVersion.from(existingEntry.getKey().getVersion().getSegmentVersion() + 1);
TableSegmentEntry newEntry = TableSegmentEntry.versioned(key.array(), value, newVersion.getSegmentVersion());
table.put(key, newEntry);
tablePos.put(key, System.nanoTime());
resultList.add(newVersion);
} else {
throw new WireCommandFailedException(type, WireCommandFailedException.Reason.TableKeyBadVersion);
}
});
return resultList;
}
}
}, executor);
}).when(helper).updateTableEntries(anyString(), any(), anyString(), anyLong());
// endregion
// region remove keys
doAnswer(x -> {
final WireCommandType type = WireCommandType.REMOVE_TABLE_KEYS;
String tableName = x.getArgument(0);
List<TableSegmentKey> keys = x.getArgument(1);
return CompletableFuture.runAsync(() -> {
synchronized (lock) {
Map<ByteBuffer, TableSegmentEntry> table = mapOfTables.get(tableName);
Map<ByteBuffer, Long> tablePos = mapOfTablesPosition.get(tableName);
if (table == null) {
throw new WireCommandFailedException(type, WireCommandFailedException.Reason.SegmentDoesNotExist);
} else {
keys.forEach(rawKey -> {
ByteBuffer key = rawKey.getKey().copy().nioBuffer();
TableSegmentEntry existingEntry = table.get(key);
if (existingEntry != null) {
if (existingEntry.getKey().getVersion().equals(rawKey.getVersion()) || rawKey.getVersion() == null || rawKey.getVersion().equals(TableSegmentKeyVersion.NO_VERSION)) {
table.remove(key);
tablePos.remove(key);
} else {
throw new WireCommandFailedException(type, WireCommandFailedException.Reason.TableKeyBadVersion);
}
}
});
}
}
}, executor);
}).when(helper).removeTableKeys(anyString(), any(), anyString(), anyLong());
// endregion
// region read keys
doAnswer(x -> {
final WireCommandType type = WireCommandType.READ_TABLE;
String tableName = x.getArgument(0);
List<TableSegmentKey> requestKeys = x.getArgument(1);
return CompletableFuture.supplyAsync(() -> {
synchronized (lock) {
Map<ByteBuffer, TableSegmentEntry> table = mapOfTables.get(tableName);
if (table == null) {
throw new WireCommandFailedException(type, WireCommandFailedException.Reason.SegmentDoesNotExist);
} else {
List<TableSegmentEntry> resultList = new LinkedList<>();
requestKeys.forEach(requestKey -> {
ByteBuffer key = requestKey.getKey().copy().nioBuffer();
TableSegmentEntry existingEntry = table.get(key);
if (existingEntry == null) {
resultList.add(TableSegmentEntry.notExists(key.array(), new byte[0]));
} else if (existingEntry.getKey().getVersion().equals(requestKey.getVersion()) || requestKey.getVersion() == null || requestKey.getVersion().equals(TableSegmentKeyVersion.NO_VERSION)) {
resultList.add(duplicate(existingEntry));
} else {
throw new WireCommandFailedException(type, WireCommandFailedException.Reason.TableKeyBadVersion);
}
});
return resultList;
}
}
}, executor);
}).when(helper).readTable(anyString(), any(), anyString(), anyLong());
// endregion
// region readTableKeys
doAnswer(x -> {
String tableName = x.getArgument(0);
int limit = x.getArgument(1);
HashTableIteratorItem.State state = x.getArgument(2);
final WireCommandType type = WireCommandType.READ_TABLE;
return CompletableFuture.supplyAsync(() -> {
synchronized (lock) {
Map<ByteBuffer, TableSegmentEntry> table = mapOfTables.get(tableName);
Map<ByteBuffer, Long> tablePos = mapOfTablesPosition.get(tableName);
if (table == null) {
throw new WireCommandFailedException(type, WireCommandFailedException.Reason.SegmentDoesNotExist);
} else {
long floor;
if (state.equals(HashTableIteratorItem.State.EMPTY)) {
floor = 0L;
} else {
floor = new ByteArraySegment(state.toBytes()).getLong(0);
}
AtomicLong token = new AtomicLong(floor);
List<TableSegmentKey> list = tablePos.entrySet().stream().sorted(Comparator.comparingLong(Map.Entry::getValue)).filter(c -> c.getValue() > floor).map(r -> {
token.set(r.getValue());
return duplicate(table.get(r.getKey()).getKey());
}).limit(limit).collect(Collectors.toList());
byte[] continuationToken = new byte[Long.BYTES];
BitConverter.writeLong(continuationToken, 0, token.get());
HashTableIteratorItem.State newState = HashTableIteratorItem.State.fromBytes(Unpooled.wrappedBuffer(continuationToken));
return new HashTableIteratorItem<>(newState, list);
}
}
}, executor);
}).when(helper).readTableKeys(anyString(), anyInt(), any(), anyString(), anyLong());
// endregion
// region readTableEntries
doAnswer(x -> {
String tableName = x.getArgument(0);
int limit = x.getArgument(1);
HashTableIteratorItem.State state = x.getArgument(2);
final WireCommandType type = WireCommandType.READ_TABLE;
return CompletableFuture.supplyAsync(() -> {
synchronized (lock) {
Map<ByteBuffer, TableSegmentEntry> table = mapOfTables.get(tableName);
Map<ByteBuffer, Long> tablePos = mapOfTablesPosition.get(tableName);
if (table == null) {
throw new WireCommandFailedException(type, WireCommandFailedException.Reason.SegmentDoesNotExist);
} else {
long floor;
if (state.equals(HashTableIteratorItem.State.EMPTY)) {
floor = 0L;
} else {
floor = new ByteArraySegment(state.toBytes()).getLong(0);
}
AtomicLong token = new AtomicLong(floor);
List<TableSegmentEntry> list = tablePos.entrySet().stream().sorted(Comparator.comparingLong(Map.Entry::getValue)).filter(c -> c.getValue() > floor).map(r -> {
token.set(r.getValue());
return duplicate(table.get(r.getKey()));
}).limit(limit).collect(Collectors.toList());
byte[] continuationToken = new byte[Long.BYTES];
BitConverter.writeLong(continuationToken, 0, token.get());
HashTableIteratorItem.State newState = HashTableIteratorItem.State.fromBytes(Unpooled.wrappedBuffer(continuationToken));
return new HashTableIteratorItem<>(newState, list);
}
}
}, executor);
}).when(helper).readTableEntries(anyString(), anyInt(), any(), anyString(), anyLong());
// endregion
return helper;
}
use of io.pravega.client.tables.impl.HashTableIteratorItem in project pravega by pravega.
the class SegmentHelper method readTableEntries.
public CompletableFuture<HashTableIteratorItem<TableSegmentEntry>> readTableEntries(final String tableName, final PravegaNodeUri uri, final int suggestedEntryCount, final HashTableIteratorItem.State state, final String delegationToken, final long clientRequestId) {
final WireCommandType type = WireCommandType.READ_TABLE_ENTRIES;
RawClient connection = new RawClient(uri, connectionPool);
final long requestId = connection.getFlow().asLong();
final HashTableIteratorItem.State token = (state == null) ? HashTableIteratorItem.State.EMPTY : state;
WireCommands.TableIteratorArgs args = new WireCommands.TableIteratorArgs(token.getToken(), Unpooled.EMPTY_BUFFER, Unpooled.EMPTY_BUFFER, Unpooled.EMPTY_BUFFER);
WireCommands.ReadTableEntries request = new WireCommands.ReadTableEntries(requestId, tableName, delegationToken, suggestedEntryCount, args);
return sendRequest(connection, clientRequestId, request).thenApply(rpl -> {
handleReply(clientRequestId, rpl, connection, tableName, WireCommands.ReadTableEntries.class, type);
WireCommands.TableEntriesRead tableEntriesRead = (WireCommands.TableEntriesRead) rpl;
final HashTableIteratorItem.State newState = HashTableIteratorItem.State.fromBytes(tableEntriesRead.getContinuationToken());
final List<TableSegmentEntry> entries = tableEntriesRead.getEntries().getEntries().stream().map(e -> {
WireCommands.TableKey k = e.getKey();
return TableSegmentEntry.versioned(k.getData(), e.getValue().getData(), k.getKeyVersion());
}).collect(Collectors.toList());
return new HashTableIteratorItem<>(newState, entries);
});
}
Aggregations