use of io.pravega.controller.stream.api.grpc.v1.Controller.ContinuationToken in project pravega by pravega.
the class ControllerImpl method listStreams.
@Override
public AsyncIterator<Stream> listStreams(String scopeName) {
Exceptions.checkNotClosed(closed.get(), this);
long traceId = LoggerHelpers.traceEnter(log, "listStreams", scopeName);
long requestId = requestIdGenerator.get();
try {
final Function<ContinuationToken, CompletableFuture<Map.Entry<ContinuationToken, Collection<Stream>>>> function = token -> this.retryConfig.runAsync(() -> {
RPCAsyncCallback<StreamsInScopeResponse> callback = new RPCAsyncCallback<>(requestId, "listStreams", scopeName);
ScopeInfo scopeInfo = ScopeInfo.newBuilder().setScope(scopeName).build();
new ControllerClientTagger(client, timeoutMillis).withTag(requestId, LIST_STREAMS_IN_SCOPE, scopeName).listStreamsInScope(StreamsInScopeRequest.newBuilder().setScope(scopeInfo).setContinuationToken(token).build(), callback);
return callback.getFuture().thenApplyAsync(x -> {
switch(x.getStatus()) {
case SCOPE_NOT_FOUND:
log.warn(requestId, "Scope not found: {}", scopeName);
throw new NoSuchScopeException();
case FAILURE:
log.warn(requestId, "Internal Server Error while trying to list streams in scope: {}", scopeName);
throw new RuntimeException("Failure while trying to list streams");
case SUCCESS:
// compatibility reasons
default:
List<Stream> result = x.getStreamsList().stream().map(y -> new StreamImpl(y.getScope(), y.getStream())).collect(Collectors.toList());
return new AbstractMap.SimpleEntry<>(x.getContinuationToken(), result);
}
}, this.executor);
}, this.executor);
return new ContinuationTokenAsyncIterator<>(function, ContinuationToken.newBuilder().build());
} finally {
LoggerHelpers.traceLeave(log, "listStreams", traceId);
}
}
use of io.pravega.controller.stream.api.grpc.v1.Controller.ContinuationToken in project pravega by pravega.
the class ControllerImpl method listStreamsForTag.
@Override
public AsyncIterator<Stream> listStreamsForTag(String scopeName, String tag) {
Exceptions.checkNotClosed(closed.get(), this);
long traceId = LoggerHelpers.traceEnter(log, LIST_STREAMS_IN_SCOPE_FOR_TAG, scopeName);
long requestId = requestIdGenerator.get();
try {
final Function<ContinuationToken, CompletableFuture<Map.Entry<ContinuationToken, Collection<Stream>>>> function = token -> this.retryConfig.runAsync(() -> {
RPCAsyncCallback<StreamsInScopeResponse> callback = new RPCAsyncCallback<>(requestId, LIST_STREAMS_IN_SCOPE_FOR_TAG, scopeName);
ScopeInfo scopeInfo = ScopeInfo.newBuilder().setScope(scopeName).build();
StreamsInScopeWithTagRequest request = StreamsInScopeWithTagRequest.newBuilder().setScope(scopeInfo).setContinuationToken(token).setTag(tag).build();
new ControllerClientTagger(client, timeoutMillis).withTag(requestId, LIST_STREAMS_IN_SCOPE_FOR_TAG, scopeName).listStreamsInScopeForTag(request, callback);
return callback.getFuture().thenApplyAsync(x -> {
switch(x.getStatus()) {
case SCOPE_NOT_FOUND:
log.warn(requestId, "Scope not found: {}", scopeName);
throw new NoSuchScopeException();
case FAILURE:
log.warn(requestId, "Internal Server Error while trying to list streams in scope: {} with tag: {}", scopeName, tag);
throw new RuntimeException("Failure while trying to list streams with tag");
case SUCCESS:
// compatibility reasons
default:
List<Stream> result = x.getStreamsList().stream().map(y -> new StreamImpl(y.getScope(), y.getStream())).collect(Collectors.toList());
return new AbstractMap.SimpleEntry<>(x.getContinuationToken(), result);
}
}, this.executor);
}, this.executor);
return new ContinuationTokenAsyncIterator<>(function, ContinuationToken.newBuilder().build());
} finally {
LoggerHelpers.traceLeave(log, LIST_STREAMS_IN_SCOPE_FOR_TAG, traceId);
}
}
use of io.pravega.controller.stream.api.grpc.v1.Controller.ContinuationToken in project pravega by pravega.
the class SegmentHelperMock method getSegmentHelperMockForTables.
public static SegmentHelper getSegmentHelperMockForTables(ScheduledExecutorService executor) {
SegmentHelper helper = getSegmentHelperMock();
final Object lock = new Object();
final Map<String, Map<ByteBuffer, TableSegmentEntry>> mapOfTables = new HashMap<>();
final Map<String, Map<ByteBuffer, Long>> mapOfTablesPosition = new HashMap<>();
// region create table
doAnswer(x -> {
String tableName = x.getArgument(0);
return CompletableFuture.runAsync(() -> {
synchronized (lock) {
mapOfTables.putIfAbsent(tableName, new HashMap<>());
mapOfTablesPosition.putIfAbsent(tableName, new HashMap<>());
}
}, executor);
}).when(helper).createTableSegment(anyString(), anyString(), anyLong(), anyBoolean(), anyInt(), anyLong());
// endregion
// region delete table
doAnswer(x -> {
String tableName = x.getArgument(0);
Boolean mustBeEmpty = x.getArgument(1);
final WireCommandType type = WireCommandType.DELETE_TABLE_SEGMENT;
return CompletableFuture.supplyAsync(() -> {
synchronized (lock) {
if (!mapOfTables.containsKey(tableName)) {
throw new WireCommandFailedException(type, WireCommandFailedException.Reason.SegmentDoesNotExist);
}
boolean empty = Optional.ofNullable(mapOfTables.get(tableName)).orElse(Collections.emptyMap()).isEmpty();
if (!mustBeEmpty || empty) {
mapOfTables.remove(tableName);
mapOfTablesPosition.remove(tableName);
return null;
} else {
throw new WireCommandFailedException(type, WireCommandFailedException.Reason.TableSegmentNotEmpty);
}
}
}, executor);
}).when(helper).deleteTableSegment(anyString(), anyBoolean(), anyString(), anyLong());
// endregion
// region update keys
doAnswer(x -> {
final WireCommandType type = WireCommandType.UPDATE_TABLE_ENTRIES;
String tableName = x.getArgument(0);
List<TableSegmentEntry> entries = x.getArgument(1);
return CompletableFuture.supplyAsync(() -> {
synchronized (lock) {
Map<ByteBuffer, TableSegmentEntry> table = mapOfTables.get(tableName);
Map<ByteBuffer, Long> tablePos = mapOfTablesPosition.get(tableName);
if (table == null) {
throw new WireCommandFailedException(type, WireCommandFailedException.Reason.SegmentDoesNotExist);
} else {
List<TableSegmentKeyVersion> resultList = new LinkedList<>();
entries.forEach(entry -> {
ByteBuffer key = entry.getKey().getKey().copy().nioBuffer();
byte[] value = entry.getValue().copy().array();
TableSegmentEntry existingEntry = table.get(key);
if (existingEntry == null) {
if (entry.getKey().getVersion().equals(TableSegmentKeyVersion.NOT_EXISTS)) {
TableSegmentEntry newEntry = TableSegmentEntry.versioned(key.array(), value, 0);
table.put(key, newEntry);
tablePos.put(key, System.nanoTime());
resultList.add(newEntry.getKey().getVersion());
} else {
throw new WireCommandFailedException(type, WireCommandFailedException.Reason.TableKeyDoesNotExist);
}
} else if (existingEntry.getKey().getVersion().equals(entry.getKey().getVersion())) {
TableSegmentKeyVersion newVersion = TableSegmentKeyVersion.from(existingEntry.getKey().getVersion().getSegmentVersion() + 1);
TableSegmentEntry newEntry = TableSegmentEntry.versioned(key.array(), value, newVersion.getSegmentVersion());
table.put(key, newEntry);
tablePos.put(key, System.nanoTime());
resultList.add(newVersion);
} else {
throw new WireCommandFailedException(type, WireCommandFailedException.Reason.TableKeyBadVersion);
}
});
return resultList;
}
}
}, executor);
}).when(helper).updateTableEntries(anyString(), any(), anyString(), anyLong());
// endregion
// region remove keys
doAnswer(x -> {
final WireCommandType type = WireCommandType.REMOVE_TABLE_KEYS;
String tableName = x.getArgument(0);
List<TableSegmentKey> keys = x.getArgument(1);
return CompletableFuture.runAsync(() -> {
synchronized (lock) {
Map<ByteBuffer, TableSegmentEntry> table = mapOfTables.get(tableName);
Map<ByteBuffer, Long> tablePos = mapOfTablesPosition.get(tableName);
if (table == null) {
throw new WireCommandFailedException(type, WireCommandFailedException.Reason.SegmentDoesNotExist);
} else {
keys.forEach(rawKey -> {
ByteBuffer key = rawKey.getKey().copy().nioBuffer();
TableSegmentEntry existingEntry = table.get(key);
if (existingEntry != null) {
if (existingEntry.getKey().getVersion().equals(rawKey.getVersion()) || rawKey.getVersion() == null || rawKey.getVersion().equals(TableSegmentKeyVersion.NO_VERSION)) {
table.remove(key);
tablePos.remove(key);
} else {
throw new WireCommandFailedException(type, WireCommandFailedException.Reason.TableKeyBadVersion);
}
}
});
}
}
}, executor);
}).when(helper).removeTableKeys(anyString(), any(), anyString(), anyLong());
// endregion
// region read keys
doAnswer(x -> {
final WireCommandType type = WireCommandType.READ_TABLE;
String tableName = x.getArgument(0);
List<TableSegmentKey> requestKeys = x.getArgument(1);
return CompletableFuture.supplyAsync(() -> {
synchronized (lock) {
Map<ByteBuffer, TableSegmentEntry> table = mapOfTables.get(tableName);
if (table == null) {
throw new WireCommandFailedException(type, WireCommandFailedException.Reason.SegmentDoesNotExist);
} else {
List<TableSegmentEntry> resultList = new LinkedList<>();
requestKeys.forEach(requestKey -> {
ByteBuffer key = requestKey.getKey().copy().nioBuffer();
TableSegmentEntry existingEntry = table.get(key);
if (existingEntry == null) {
resultList.add(TableSegmentEntry.notExists(key.array(), new byte[0]));
} else if (existingEntry.getKey().getVersion().equals(requestKey.getVersion()) || requestKey.getVersion() == null || requestKey.getVersion().equals(TableSegmentKeyVersion.NO_VERSION)) {
resultList.add(duplicate(existingEntry));
} else {
throw new WireCommandFailedException(type, WireCommandFailedException.Reason.TableKeyBadVersion);
}
});
return resultList;
}
}
}, executor);
}).when(helper).readTable(anyString(), any(), anyString(), anyLong());
// endregion
// region readTableKeys
doAnswer(x -> {
String tableName = x.getArgument(0);
int limit = x.getArgument(1);
HashTableIteratorItem.State state = x.getArgument(2);
final WireCommandType type = WireCommandType.READ_TABLE;
return CompletableFuture.supplyAsync(() -> {
synchronized (lock) {
Map<ByteBuffer, TableSegmentEntry> table = mapOfTables.get(tableName);
Map<ByteBuffer, Long> tablePos = mapOfTablesPosition.get(tableName);
if (table == null) {
throw new WireCommandFailedException(type, WireCommandFailedException.Reason.SegmentDoesNotExist);
} else {
long floor;
if (state.equals(HashTableIteratorItem.State.EMPTY)) {
floor = 0L;
} else {
floor = new ByteArraySegment(state.toBytes()).getLong(0);
}
AtomicLong token = new AtomicLong(floor);
List<TableSegmentKey> list = tablePos.entrySet().stream().sorted(Comparator.comparingLong(Map.Entry::getValue)).filter(c -> c.getValue() > floor).map(r -> {
token.set(r.getValue());
return duplicate(table.get(r.getKey()).getKey());
}).limit(limit).collect(Collectors.toList());
byte[] continuationToken = new byte[Long.BYTES];
BitConverter.writeLong(continuationToken, 0, token.get());
HashTableIteratorItem.State newState = HashTableIteratorItem.State.fromBytes(Unpooled.wrappedBuffer(continuationToken));
return new HashTableIteratorItem<>(newState, list);
}
}
}, executor);
}).when(helper).readTableKeys(anyString(), anyInt(), any(), anyString(), anyLong());
// endregion
// region readTableEntries
doAnswer(x -> {
String tableName = x.getArgument(0);
int limit = x.getArgument(1);
HashTableIteratorItem.State state = x.getArgument(2);
final WireCommandType type = WireCommandType.READ_TABLE;
return CompletableFuture.supplyAsync(() -> {
synchronized (lock) {
Map<ByteBuffer, TableSegmentEntry> table = mapOfTables.get(tableName);
Map<ByteBuffer, Long> tablePos = mapOfTablesPosition.get(tableName);
if (table == null) {
throw new WireCommandFailedException(type, WireCommandFailedException.Reason.SegmentDoesNotExist);
} else {
long floor;
if (state.equals(HashTableIteratorItem.State.EMPTY)) {
floor = 0L;
} else {
floor = new ByteArraySegment(state.toBytes()).getLong(0);
}
AtomicLong token = new AtomicLong(floor);
List<TableSegmentEntry> list = tablePos.entrySet().stream().sorted(Comparator.comparingLong(Map.Entry::getValue)).filter(c -> c.getValue() > floor).map(r -> {
token.set(r.getValue());
return duplicate(table.get(r.getKey()));
}).limit(limit).collect(Collectors.toList());
byte[] continuationToken = new byte[Long.BYTES];
BitConverter.writeLong(continuationToken, 0, token.get());
HashTableIteratorItem.State newState = HashTableIteratorItem.State.fromBytes(Unpooled.wrappedBuffer(continuationToken));
return new HashTableIteratorItem<>(newState, list);
}
}
}, executor);
}).when(helper).readTableEntries(anyString(), anyInt(), any(), anyString(), anyLong());
// endregion
return helper;
}
use of io.pravega.controller.stream.api.grpc.v1.Controller.ContinuationToken in project pravega by pravega.
the class ControllerImpl method listKeyValueTables.
@Override
public AsyncIterator<KeyValueTableInfo> listKeyValueTables(String scopeName) {
Exceptions.checkNotClosed(closed.get(), this);
long traceId = LoggerHelpers.traceEnter(log, "listKeyValueTables", scopeName);
long requestId = requestIdGenerator.get();
try {
final Function<ContinuationToken, CompletableFuture<Map.Entry<ContinuationToken, Collection<KeyValueTableInfo>>>> function = token -> this.retryConfig.runAsync(() -> {
RPCAsyncCallback<KVTablesInScopeResponse> callback = new RPCAsyncCallback<>(requestId, "listKeyValueTables", scopeName);
ScopeInfo scopeInfo = ScopeInfo.newBuilder().setScope(scopeName).build();
new ControllerClientTagger(client, timeoutMillis).withTag(requestId, LIST_KEY_VALUE_TABLES, scopeName).listKeyValueTables(KVTablesInScopeRequest.newBuilder().setScope(scopeInfo).setContinuationToken(token).build(), callback);
return callback.getFuture().thenApplyAsync(x -> {
switch(x.getStatus()) {
case SCOPE_NOT_FOUND:
log.warn(requestId, "Scope not found: {}", scopeName);
throw new NoSuchScopeException();
case FAILURE:
log.warn(requestId, "Internal Server Error while trying to list streams in scope: {}", scopeName);
throw new RuntimeException("Failure while trying to list streams");
case SUCCESS:
// compatibility reasons
default:
List<KeyValueTableInfo> kvtList = x.getKvtablesList().stream().map(y -> new KeyValueTableInfo(y.getScope(), y.getKvtName())).collect(Collectors.toList());
return new AbstractMap.SimpleEntry<>(x.getContinuationToken(), kvtList);
}
}, this.executor);
}, this.executor);
return new ContinuationTokenAsyncIterator<>(function, ContinuationToken.newBuilder().build());
} finally {
LoggerHelpers.traceLeave(log, "listKeyValueTables", traceId);
}
}
Aggregations