use of io.pravega.client.tables.impl.TableSegmentKey in project pravega by pravega.
the class ControllerMetadataListKeysCommand method execute.
@Override
public void execute() {
ensureArgCount(3);
final String tableName = getArg(0);
final int keyCount = getIntArg(1);
final String segmentStoreHost = getArg(2);
@Cleanup CuratorFramework zkClient = createZKClient();
@Cleanup AdminSegmentHelper adminSegmentHelper = instantiateAdminSegmentHelper(zkClient);
HashTableIteratorItem<TableSegmentKey> keys = completeSafely(adminSegmentHelper.readTableKeys(tableName, new PravegaNodeUri(segmentStoreHost, getServiceConfig().getAdminGatewayPort()), keyCount, HashTableIteratorItem.State.EMPTY, super.authHelper.retrieveMasterToken(), 0L), tableName, null);
if (keys == null) {
return;
}
List<String> stringKeys = keys.getItems().stream().map(tableSegmentKey -> KEY_SERIALIZER.deserialize(getByteBuffer(tableSegmentKey.getKey()))).collect(Collectors.toList());
output("List of at most %s keys in %s: ", keyCount, tableName);
stringKeys.forEach(k -> output("- %s", k));
}
use of io.pravega.client.tables.impl.TableSegmentKey in project pravega by pravega.
the class ListTableSegmentKeysCommand method execute.
@Override
public void execute() {
ensureArgCount(3);
ensureSerializersExist();
final String fullyQualifiedTableSegmentName = getArg(0);
final int keyCount = getIntArg(1);
final String segmentStoreHost = getArg(2);
@Cleanup CuratorFramework zkClient = createZKClient();
@Cleanup AdminSegmentHelper adminSegmentHelper = instantiateAdminSegmentHelper(zkClient);
CompletableFuture<HashTableIteratorItem<TableSegmentKey>> reply = adminSegmentHelper.readTableKeys(fullyQualifiedTableSegmentName, new PravegaNodeUri(segmentStoreHost, getServiceConfig().getAdminGatewayPort()), keyCount, HashTableIteratorItem.State.EMPTY, super.authHelper.retrieveMasterToken(), 0L);
List<String> keys = reply.join().getItems().stream().map(tableSegmentKey -> getCommandArgs().getState().getKeySerializer().deserialize(getByteBuffer(tableSegmentKey.getKey()))).collect(Collectors.toList());
output("List of at most %s keys in %s: ", keyCount, fullyQualifiedTableSegmentName);
keys.forEach(k -> output("- %s", k));
}
use of io.pravega.client.tables.impl.TableSegmentKey in project pravega by pravega.
the class SegmentHelper method removeTableKeys.
/**
* This method sends a WireCommand to remove table keys.
*
* @param tableName Qualified table name.
* @param keys List of {@link TableSegmentKey}s to be removed. Only if all the elements in the list has version
* as {@link TableSegmentKeyVersion#NO_VERSION} then an unconditional update/removal is performed.
* Else an atomic conditional update (removal) is performed.
* @param delegationToken The token to be presented to the Segment Store.
* @param clientRequestId Request id.
* @return A CompletableFuture that will complete normally when the provided keys are deleted.
* If the operation failed, the future will be failed with the causing exception. If the exception can be
* retried then the future will be failed with {@link WireCommandFailedException}.
*/
public CompletableFuture<Void> removeTableKeys(final String tableName, final List<TableSegmentKey> keys, String delegationToken, final long clientRequestId) {
final Controller.NodeUri uri = getTableUri(tableName);
final WireCommandType type = WireCommandType.REMOVE_TABLE_KEYS;
List<WireCommands.TableKey> keyList = keys.stream().map(x -> {
WireCommands.TableKey key = convertToWireCommand(x);
return key;
}).collect(Collectors.toList());
RawClient connection = new RawClient(ModelHelper.encode(uri), connectionPool);
final long requestId = connection.getFlow().asLong();
WireCommands.RemoveTableKeys request = new WireCommands.RemoveTableKeys(requestId, tableName, delegationToken, keyList, WireCommands.NULL_TABLE_SEGMENT_OFFSET);
return sendRequest(connection, clientRequestId, request).thenAccept(rpl -> handleReply(clientRequestId, rpl, connection, tableName, WireCommands.RemoveTableKeys.class, type));
}
use of io.pravega.client.tables.impl.TableSegmentKey in project pravega by pravega.
the class PravegaTablesStoreHelper method removeEntry.
/**
* Method to remove entry from the store.
* @param tableName tableName
* @param key key
* @param ver version for conditional removal
* @param requestId request id
* @return CompletableFuture which when completed will indicate successful deletion of entry from the table.
* It ignores DataNotFound exception.
*/
public CompletableFuture<Void> removeEntry(String tableName, String key, Version ver, long requestId) {
log.trace(requestId, "remove entry called for : {} key : {}", tableName, key);
TableSegmentKey tableKey = ver == null ? TableSegmentKey.unversioned(key.getBytes(Charsets.UTF_8)) : TableSegmentKey.versioned(key.getBytes(Charsets.UTF_8), ver.asLongVersion().getLongValue());
return expectingDataNotFound(withRetries(() -> segmentHelper.removeTableKeys(tableName, Collections.singletonList(tableKey), authToken.get(), requestId), () -> String.format("remove entry: key: %s table: %s", key, tableName), requestId), null).thenAcceptAsync(v -> {
invalidateCache(tableName, key);
log.trace(requestId, "entry for key {} removed from table {}", key, tableName);
}, executor).whenComplete((r, ex) -> releaseKeys(Collections.singleton(tableKey)));
}
use of io.pravega.client.tables.impl.TableSegmentKey in project pravega by pravega.
the class SegmentHelperMock method getSegmentHelperMockForTables.
public static SegmentHelper getSegmentHelperMockForTables(ScheduledExecutorService executor) {
SegmentHelper helper = getSegmentHelperMock();
final Object lock = new Object();
final Map<String, Map<ByteBuffer, TableSegmentEntry>> mapOfTables = new HashMap<>();
final Map<String, Map<ByteBuffer, Long>> mapOfTablesPosition = new HashMap<>();
// region create table
doAnswer(x -> {
String tableName = x.getArgument(0);
return CompletableFuture.runAsync(() -> {
synchronized (lock) {
mapOfTables.putIfAbsent(tableName, new HashMap<>());
mapOfTablesPosition.putIfAbsent(tableName, new HashMap<>());
}
}, executor);
}).when(helper).createTableSegment(anyString(), anyString(), anyLong(), anyBoolean(), anyInt(), anyLong());
// endregion
// region delete table
doAnswer(x -> {
String tableName = x.getArgument(0);
Boolean mustBeEmpty = x.getArgument(1);
final WireCommandType type = WireCommandType.DELETE_TABLE_SEGMENT;
return CompletableFuture.supplyAsync(() -> {
synchronized (lock) {
if (!mapOfTables.containsKey(tableName)) {
throw new WireCommandFailedException(type, WireCommandFailedException.Reason.SegmentDoesNotExist);
}
boolean empty = Optional.ofNullable(mapOfTables.get(tableName)).orElse(Collections.emptyMap()).isEmpty();
if (!mustBeEmpty || empty) {
mapOfTables.remove(tableName);
mapOfTablesPosition.remove(tableName);
return null;
} else {
throw new WireCommandFailedException(type, WireCommandFailedException.Reason.TableSegmentNotEmpty);
}
}
}, executor);
}).when(helper).deleteTableSegment(anyString(), anyBoolean(), anyString(), anyLong());
// endregion
// region update keys
doAnswer(x -> {
final WireCommandType type = WireCommandType.UPDATE_TABLE_ENTRIES;
String tableName = x.getArgument(0);
List<TableSegmentEntry> entries = x.getArgument(1);
return CompletableFuture.supplyAsync(() -> {
synchronized (lock) {
Map<ByteBuffer, TableSegmentEntry> table = mapOfTables.get(tableName);
Map<ByteBuffer, Long> tablePos = mapOfTablesPosition.get(tableName);
if (table == null) {
throw new WireCommandFailedException(type, WireCommandFailedException.Reason.SegmentDoesNotExist);
} else {
List<TableSegmentKeyVersion> resultList = new LinkedList<>();
entries.forEach(entry -> {
ByteBuffer key = entry.getKey().getKey().copy().nioBuffer();
byte[] value = entry.getValue().copy().array();
TableSegmentEntry existingEntry = table.get(key);
if (existingEntry == null) {
if (entry.getKey().getVersion().equals(TableSegmentKeyVersion.NOT_EXISTS)) {
TableSegmentEntry newEntry = TableSegmentEntry.versioned(key.array(), value, 0);
table.put(key, newEntry);
tablePos.put(key, System.nanoTime());
resultList.add(newEntry.getKey().getVersion());
} else {
throw new WireCommandFailedException(type, WireCommandFailedException.Reason.TableKeyDoesNotExist);
}
} else if (existingEntry.getKey().getVersion().equals(entry.getKey().getVersion())) {
TableSegmentKeyVersion newVersion = TableSegmentKeyVersion.from(existingEntry.getKey().getVersion().getSegmentVersion() + 1);
TableSegmentEntry newEntry = TableSegmentEntry.versioned(key.array(), value, newVersion.getSegmentVersion());
table.put(key, newEntry);
tablePos.put(key, System.nanoTime());
resultList.add(newVersion);
} else {
throw new WireCommandFailedException(type, WireCommandFailedException.Reason.TableKeyBadVersion);
}
});
return resultList;
}
}
}, executor);
}).when(helper).updateTableEntries(anyString(), any(), anyString(), anyLong());
// endregion
// region remove keys
doAnswer(x -> {
final WireCommandType type = WireCommandType.REMOVE_TABLE_KEYS;
String tableName = x.getArgument(0);
List<TableSegmentKey> keys = x.getArgument(1);
return CompletableFuture.runAsync(() -> {
synchronized (lock) {
Map<ByteBuffer, TableSegmentEntry> table = mapOfTables.get(tableName);
Map<ByteBuffer, Long> tablePos = mapOfTablesPosition.get(tableName);
if (table == null) {
throw new WireCommandFailedException(type, WireCommandFailedException.Reason.SegmentDoesNotExist);
} else {
keys.forEach(rawKey -> {
ByteBuffer key = rawKey.getKey().copy().nioBuffer();
TableSegmentEntry existingEntry = table.get(key);
if (existingEntry != null) {
if (existingEntry.getKey().getVersion().equals(rawKey.getVersion()) || rawKey.getVersion() == null || rawKey.getVersion().equals(TableSegmentKeyVersion.NO_VERSION)) {
table.remove(key);
tablePos.remove(key);
} else {
throw new WireCommandFailedException(type, WireCommandFailedException.Reason.TableKeyBadVersion);
}
}
});
}
}
}, executor);
}).when(helper).removeTableKeys(anyString(), any(), anyString(), anyLong());
// endregion
// region read keys
doAnswer(x -> {
final WireCommandType type = WireCommandType.READ_TABLE;
String tableName = x.getArgument(0);
List<TableSegmentKey> requestKeys = x.getArgument(1);
return CompletableFuture.supplyAsync(() -> {
synchronized (lock) {
Map<ByteBuffer, TableSegmentEntry> table = mapOfTables.get(tableName);
if (table == null) {
throw new WireCommandFailedException(type, WireCommandFailedException.Reason.SegmentDoesNotExist);
} else {
List<TableSegmentEntry> resultList = new LinkedList<>();
requestKeys.forEach(requestKey -> {
ByteBuffer key = requestKey.getKey().copy().nioBuffer();
TableSegmentEntry existingEntry = table.get(key);
if (existingEntry == null) {
resultList.add(TableSegmentEntry.notExists(key.array(), new byte[0]));
} else if (existingEntry.getKey().getVersion().equals(requestKey.getVersion()) || requestKey.getVersion() == null || requestKey.getVersion().equals(TableSegmentKeyVersion.NO_VERSION)) {
resultList.add(duplicate(existingEntry));
} else {
throw new WireCommandFailedException(type, WireCommandFailedException.Reason.TableKeyBadVersion);
}
});
return resultList;
}
}
}, executor);
}).when(helper).readTable(anyString(), any(), anyString(), anyLong());
// endregion
// region readTableKeys
doAnswer(x -> {
String tableName = x.getArgument(0);
int limit = x.getArgument(1);
HashTableIteratorItem.State state = x.getArgument(2);
final WireCommandType type = WireCommandType.READ_TABLE;
return CompletableFuture.supplyAsync(() -> {
synchronized (lock) {
Map<ByteBuffer, TableSegmentEntry> table = mapOfTables.get(tableName);
Map<ByteBuffer, Long> tablePos = mapOfTablesPosition.get(tableName);
if (table == null) {
throw new WireCommandFailedException(type, WireCommandFailedException.Reason.SegmentDoesNotExist);
} else {
long floor;
if (state.equals(HashTableIteratorItem.State.EMPTY)) {
floor = 0L;
} else {
floor = new ByteArraySegment(state.toBytes()).getLong(0);
}
AtomicLong token = new AtomicLong(floor);
List<TableSegmentKey> list = tablePos.entrySet().stream().sorted(Comparator.comparingLong(Map.Entry::getValue)).filter(c -> c.getValue() > floor).map(r -> {
token.set(r.getValue());
return duplicate(table.get(r.getKey()).getKey());
}).limit(limit).collect(Collectors.toList());
byte[] continuationToken = new byte[Long.BYTES];
BitConverter.writeLong(continuationToken, 0, token.get());
HashTableIteratorItem.State newState = HashTableIteratorItem.State.fromBytes(Unpooled.wrappedBuffer(continuationToken));
return new HashTableIteratorItem<>(newState, list);
}
}
}, executor);
}).when(helper).readTableKeys(anyString(), anyInt(), any(), anyString(), anyLong());
// endregion
// region readTableEntries
doAnswer(x -> {
String tableName = x.getArgument(0);
int limit = x.getArgument(1);
HashTableIteratorItem.State state = x.getArgument(2);
final WireCommandType type = WireCommandType.READ_TABLE;
return CompletableFuture.supplyAsync(() -> {
synchronized (lock) {
Map<ByteBuffer, TableSegmentEntry> table = mapOfTables.get(tableName);
Map<ByteBuffer, Long> tablePos = mapOfTablesPosition.get(tableName);
if (table == null) {
throw new WireCommandFailedException(type, WireCommandFailedException.Reason.SegmentDoesNotExist);
} else {
long floor;
if (state.equals(HashTableIteratorItem.State.EMPTY)) {
floor = 0L;
} else {
floor = new ByteArraySegment(state.toBytes()).getLong(0);
}
AtomicLong token = new AtomicLong(floor);
List<TableSegmentEntry> list = tablePos.entrySet().stream().sorted(Comparator.comparingLong(Map.Entry::getValue)).filter(c -> c.getValue() > floor).map(r -> {
token.set(r.getValue());
return duplicate(table.get(r.getKey()));
}).limit(limit).collect(Collectors.toList());
byte[] continuationToken = new byte[Long.BYTES];
BitConverter.writeLong(continuationToken, 0, token.get());
HashTableIteratorItem.State newState = HashTableIteratorItem.State.fromBytes(Unpooled.wrappedBuffer(continuationToken));
return new HashTableIteratorItem<>(newState, list);
}
}
}, executor);
}).when(helper).readTableEntries(anyString(), anyInt(), any(), anyString(), anyLong());
// endregion
return helper;
}
Aggregations