use of io.prestosql.spi.connector.InMemoryRecordSet in project hetu-core by openlookeng.
the class TestThriftIndexPageSource method testGetNextPageTwoConcurrentRequests.
@Test
public void testGetNextPageTwoConcurrentRequests() throws Exception {
final int splits = 3;
final int lookupRequestsConcurrency = 2;
final int rowsPerSplit = 1;
List<SettableFuture<PrestoThriftPageResult>> futures = IntStream.range(0, splits).mapToObj(i -> SettableFuture.<PrestoThriftPageResult>create()).collect(toImmutableList());
List<CountDownLatch> signals = IntStream.range(0, splits).mapToObj(i -> new CountDownLatch(1)).collect(toImmutableList());
TestingThriftService client = new TestingThriftService(rowsPerSplit, false, false) {
@Override
public ListenableFuture<PrestoThriftPageResult> getRows(PrestoThriftId splitId, List<String> columns, long maxBytes, PrestoThriftNullableToken nextToken) {
int key = Ints.fromByteArray(splitId.getId());
signals.get(key).countDown();
return futures.get(key);
}
};
ThriftConnectorStats stats = new ThriftConnectorStats();
long pageSizeReceived = 0;
ThriftIndexPageSource pageSource = new ThriftIndexPageSource((context, headers) -> client, ImmutableMap.of(), stats, new ThriftIndexHandle(new SchemaTableName("default", "table1"), TupleDomain.all()), ImmutableList.of(column("a", INTEGER)), ImmutableList.of(column("b", INTEGER)), new InMemoryRecordSet(ImmutableList.of(INTEGER), generateKeys(0, splits)), MAX_BYTES_PER_RESPONSE, lookupRequestsConcurrency);
assertNull(pageSource.getNextPage());
assertEquals((long) stats.getIndexPageSize().getAllTime().getTotal(), 0);
signals.get(0).await(1, SECONDS);
signals.get(1).await(1, SECONDS);
signals.get(2).await(1, SECONDS);
assertEquals(signals.get(0).getCount(), 0, "first request wasn't sent");
assertEquals(signals.get(1).getCount(), 0, "second request wasn't sent");
assertEquals(signals.get(2).getCount(), 1, "third request shouldn't be sent");
// at this point first two requests were sent
assertFalse(pageSource.isFinished());
assertNull(pageSource.getNextPage());
assertEquals((long) stats.getIndexPageSize().getAllTime().getTotal(), 0);
// completing the second request
futures.get(1).set(pageResult(20, null));
Page page = pageSource.getNextPage();
pageSizeReceived += page.getSizeInBytes();
assertEquals((long) stats.getIndexPageSize().getAllTime().getTotal(), pageSizeReceived);
assertNotNull(page);
assertEquals(page.getPositionCount(), 1);
assertEquals(page.getBlock(0).getInt(0, 0), 20);
// not complete yet
assertFalse(pageSource.isFinished());
// once one of the requests completes the next one should be sent
signals.get(2).await(1, SECONDS);
assertEquals(signals.get(2).getCount(), 0, "third request wasn't sent");
// completing the first request
futures.get(0).set(pageResult(10, null));
page = pageSource.getNextPage();
assertNotNull(page);
pageSizeReceived += page.getSizeInBytes();
assertEquals((long) stats.getIndexPageSize().getAllTime().getTotal(), pageSizeReceived);
assertEquals(page.getPositionCount(), 1);
assertEquals(page.getBlock(0).getInt(0, 0), 10);
// still not complete
assertFalse(pageSource.isFinished());
// completing the third request
futures.get(2).set(pageResult(30, null));
page = pageSource.getNextPage();
assertNotNull(page);
pageSizeReceived += page.getSizeInBytes();
assertEquals((long) stats.getIndexPageSize().getAllTime().getTotal(), pageSizeReceived);
assertEquals(page.getPositionCount(), 1);
assertEquals(page.getBlock(0).getInt(0, 0), 30);
// finished now
assertTrue(pageSource.isFinished());
// after completion
assertNull(pageSource.getNextPage());
pageSource.close();
}
use of io.prestosql.spi.connector.InMemoryRecordSet in project hetu-core by openlookeng.
the class TestThriftIndexPageSource method runGeneralTest.
private static void runGeneralTest(int splits, int lookupRequestsConcurrency, int rowsPerSplit, boolean twoSplitBatches) throws Exception {
TestingThriftService client = new TestingThriftService(rowsPerSplit, true, twoSplitBatches);
ThriftIndexPageSource pageSource = new ThriftIndexPageSource((context, headers) -> client, ImmutableMap.of(), new ThriftConnectorStats(), new ThriftIndexHandle(new SchemaTableName("default", "table1"), TupleDomain.all()), ImmutableList.of(column("a", INTEGER)), ImmutableList.of(column("b", INTEGER)), new InMemoryRecordSet(ImmutableList.of(INTEGER), generateKeys(1, splits + 1)), MAX_BYTES_PER_RESPONSE, lookupRequestsConcurrency);
List<Integer> actual = new ArrayList<>();
while (!pageSource.isFinished()) {
CompletableFuture<?> blocked = pageSource.isBlocked();
blocked.get(1, SECONDS);
Page page = pageSource.getNextPage();
if (page != null) {
Block block = page.getBlock(0);
for (int position = 0; position < block.getPositionCount(); position++) {
actual.add(block.getInt(position, 0));
}
}
}
Collections.sort(actual);
List<Integer> expected = new ArrayList<>(splits * rowsPerSplit);
for (int split = 1; split <= splits; split++) {
for (int row = 0; row < rowsPerSplit; row++) {
expected.add(split * 10 + row);
}
}
assertEquals(actual, expected);
// must be null after finish
assertNull(pageSource.getNextPage());
pageSource.close();
}
use of io.prestosql.spi.connector.InMemoryRecordSet in project boostkit-bigdata by kunpengcompute.
the class HiveMetadata method getPartitionsSystemTable.
private Optional<SystemTable> getPartitionsSystemTable(ConnectorSession session, SchemaTableName tableName, SchemaTableName sourceTableName) {
HiveTableHandle sourceTableHandle = getTableHandle(session, sourceTableName);
if (sourceTableHandle == null) {
return Optional.empty();
}
SchemaTableName schemaTableName = sourceTableHandle.getSchemaTableName();
Table table = metastore.getTable(new HiveIdentity(session), schemaTableName.getSchemaName(), schemaTableName.getTableName()).orElseThrow(() -> new TableNotFoundException(schemaTableName));
List<HiveColumnHandle> partitionColumns = sourceTableHandle.getPartitionColumns();
if (partitionColumns.isEmpty()) {
return Optional.empty();
}
List<Type> partitionColumnTypes = partitionColumns.stream().map(HiveColumnHandle::getTypeSignature).map(typeManager::getType).collect(toImmutableList());
List<ColumnMetadata> partitionSystemTableColumns = partitionColumns.stream().map(column -> new ColumnMetadata(column.getName(), typeManager.getType(column.getTypeSignature()), column.getComment().orElse(null), column.isHidden())).collect(toImmutableList());
Map<Integer, HiveColumnHandle> fieldIdToColumnHandle = IntStream.range(0, partitionColumns.size()).boxed().collect(toImmutableMap(identity(), partitionColumns::get));
return Optional.of(createSystemTable(new ConnectorTableMetadata(tableName, partitionSystemTableColumns), constraint -> {
TupleDomain<ColumnHandle> targetTupleDomain = constraint.transform(fieldIdToColumnHandle::get);
Predicate<Map<ColumnHandle, NullableValue>> targetPredicate = convertToPredicate(targetTupleDomain);
Constraint targetConstraint = new Constraint(targetTupleDomain, targetPredicate);
Iterable<List<Object>> records = () -> stream(partitionManager.getPartitions(metastore, new HiveIdentity(session), sourceTableHandle, targetConstraint, table).getPartitions()).map(hivePartition -> IntStream.range(0, partitionColumns.size()).mapToObj(fieldIdToColumnHandle::get).map(columnHandle -> hivePartition.getKeys().get(columnHandle).getValue()).collect(toList())).iterator();
return new InMemoryRecordSet(partitionColumnTypes, records).cursor();
}));
}
use of io.prestosql.spi.connector.InMemoryRecordSet in project hetu-core by openlookeng.
the class HiveMetadata method getPartitionsSystemTable.
private Optional<SystemTable> getPartitionsSystemTable(ConnectorSession session, SchemaTableName tableName, SchemaTableName sourceTableName) {
HiveTableHandle sourceTableHandle = getTableHandle(session, sourceTableName);
if (sourceTableHandle == null) {
return Optional.empty();
}
SchemaTableName schemaTableName = sourceTableHandle.getSchemaTableName();
Table table = metastore.getTable(new HiveIdentity(session), schemaTableName.getSchemaName(), schemaTableName.getTableName()).orElseThrow(() -> new TableNotFoundException(schemaTableName));
List<HiveColumnHandle> partitionColumns = sourceTableHandle.getPartitionColumns();
if (partitionColumns.isEmpty()) {
return Optional.empty();
}
List<Type> partitionColumnTypes = partitionColumns.stream().map(HiveColumnHandle::getTypeSignature).map(typeManager::getType).collect(toImmutableList());
List<ColumnMetadata> partitionSystemTableColumns = partitionColumns.stream().map(column -> new ColumnMetadata(column.getName(), typeManager.getType(column.getTypeSignature()), column.getComment().orElse(null), column.isHidden())).collect(toImmutableList());
Map<Integer, HiveColumnHandle> fieldIdToColumnHandle = IntStream.range(0, partitionColumns.size()).boxed().collect(toImmutableMap(identity(), partitionColumns::get));
return Optional.of(createSystemTable(new ConnectorTableMetadata(tableName, partitionSystemTableColumns), constraint -> {
TupleDomain<ColumnHandle> targetTupleDomain = constraint.transform(fieldIdToColumnHandle::get);
Predicate<Map<ColumnHandle, NullableValue>> targetPredicate = convertToPredicate(targetTupleDomain);
Constraint targetConstraint = new Constraint(targetTupleDomain, targetPredicate);
Iterable<List<Object>> records = () -> stream(partitionManager.getPartitions(metastore, new HiveIdentity(session), sourceTableHandle, targetConstraint, table).getPartitions()).map(hivePartition -> IntStream.range(0, partitionColumns.size()).mapToObj(fieldIdToColumnHandle::get).map(columnHandle -> hivePartition.getKeys().get(columnHandle).getValue()).collect(toList())).iterator();
return new InMemoryRecordSet(partitionColumnTypes, records).cursor();
}));
}
use of io.prestosql.spi.connector.InMemoryRecordSet in project hetu-core by openlookeng.
the class HiveMetadata method getPropertiesSystemTable.
private Optional<SystemTable> getPropertiesSystemTable(ConnectorSession session, SchemaTableName tableName, SchemaTableName sourceTableName) {
Optional<Table> table = metastore.getTable(new HiveIdentity(session), sourceTableName.getSchemaName(), sourceTableName.getTableName());
if (!table.isPresent() || table.get().getTableType().equals(TableType.VIRTUAL_VIEW.name())) {
throw new TableNotFoundException(tableName);
}
Map<String, String> sortedTableParameters = ImmutableSortedMap.copyOf(table.get().getParameters());
List<ColumnMetadata> columns = sortedTableParameters.keySet().stream().map(key -> new ColumnMetadata(key, VarcharType.VARCHAR)).collect(toImmutableList());
List<Type> types = columns.stream().map(ColumnMetadata::getType).collect(toImmutableList());
Iterable<List<Object>> propertyValues = ImmutableList.of(ImmutableList.copyOf(sortedTableParameters.values()));
return Optional.of(createSystemTable(new ConnectorTableMetadata(sourceTableName, columns), constraint -> new InMemoryRecordSet(types, propertyValues).cursor()));
}
Aggregations