use of com.facebook.presto.spi.ConnectorSplit in project presto by prestodb.
the class AbstractTestHiveClientS3 method doCreateTable.
private void doCreateTable(SchemaTableName tableName, HiveStorageFormat storageFormat) throws Exception {
List<ColumnMetadata> columns = ImmutableList.<ColumnMetadata>builder().add(new ColumnMetadata("id", BIGINT)).build();
MaterializedResult data = MaterializedResult.resultBuilder(newSession(), BIGINT).row(1L).row(3L).row(2L).build();
try (Transaction transaction = newTransaction()) {
ConnectorMetadata metadata = transaction.getMetadata();
ConnectorSession session = newSession();
// begin creating the table
ConnectorTableMetadata tableMetadata = new ConnectorTableMetadata(tableName, columns, createTableProperties(storageFormat));
ConnectorOutputTableHandle outputHandle = metadata.beginCreateTable(session, tableMetadata, Optional.empty());
// write the records
ConnectorPageSink sink = pageSinkProvider.createPageSink(transaction.getTransactionHandle(), session, outputHandle);
sink.appendPage(data.toPage());
Collection<Slice> fragments = getFutureValue(sink.finish());
// commit the table
metadata.finishCreateTable(session, outputHandle, fragments);
transaction.commit();
// Hack to work around the metastore not being configured for S3.
// The metastore tries to validate the location when creating the
// table, which fails without explicit configuration for S3.
// We work around that by using a dummy location when creating the
// table and update it here to the correct S3 location.
metastoreClient.updateTableLocation(database, tableName.getTableName(), locationService.writePathRoot(((HiveOutputTableHandle) outputHandle).getLocationHandle()).get().toString());
}
try (Transaction transaction = newTransaction()) {
ConnectorMetadata metadata = transaction.getMetadata();
ConnectorSession session = newSession();
// load the new table
ConnectorTableHandle tableHandle = getTableHandle(metadata, tableName);
List<ColumnHandle> columnHandles = filterNonHiddenColumnHandles(metadata.getColumnHandles(session, tableHandle).values());
// verify the metadata
ConnectorTableMetadata tableMetadata = metadata.getTableMetadata(session, getTableHandle(metadata, tableName));
assertEquals(filterNonHiddenColumnMetadata(tableMetadata.getColumns()), columns);
// verify the data
List<ConnectorTableLayoutResult> tableLayoutResults = metadata.getTableLayouts(session, tableHandle, new Constraint<>(TupleDomain.all(), bindings -> true), Optional.empty());
HiveTableLayoutHandle layoutHandle = (HiveTableLayoutHandle) getOnlyElement(tableLayoutResults).getTableLayout().getHandle();
assertEquals(layoutHandle.getPartitions().get().size(), 1);
ConnectorSplitSource splitSource = splitManager.getSplits(transaction.getTransactionHandle(), session, layoutHandle);
ConnectorSplit split = getOnlyElement(getAllSplits(splitSource));
try (ConnectorPageSource pageSource = pageSourceProvider.createPageSource(transaction.getTransactionHandle(), session, split, columnHandles)) {
MaterializedResult result = materializeSourceDataStream(session, pageSource, getTypes(columnHandles));
assertEqualsIgnoreOrder(result.getMaterializedRows(), data.getMaterializedRows());
}
}
}
use of com.facebook.presto.spi.ConnectorSplit in project presto by prestodb.
the class AbstractTestHiveClient method assertTableIsBucketed.
private void assertTableIsBucketed(ConnectorTableHandle tableHandle) throws Exception {
// the bucketed test tables should have exactly 32 splits
List<ConnectorSplit> splits = getAllSplits(tableHandle, TupleDomain.all());
assertEquals(splits.size(), 32);
// verify all paths are unique
Set<String> paths = new HashSet<>();
for (ConnectorSplit split : splits) {
assertTrue(paths.add(((HiveSplit) split).getPath()));
}
}
use of com.facebook.presto.spi.ConnectorSplit in project presto by prestodb.
the class AbstractTestHiveClient method getSplitCount.
protected static int getSplitCount(ConnectorSplitSource splitSource) throws InterruptedException {
int splitCount = 0;
while (!splitSource.isFinished()) {
List<ConnectorSplit> batch = getFutureValue(splitSource.getNextBatch(1000));
splitCount += batch.size();
}
return splitCount;
}
use of com.facebook.presto.spi.ConnectorSplit in project presto by prestodb.
the class TpchSplitManager method getSplits.
@Override
public ConnectorSplitSource getSplits(ConnectorTransactionHandle transaction, ConnectorSession session, ConnectorTableLayoutHandle layout) {
TpchTableHandle tableHandle = ((TpchTableLayoutHandle) layout).getTable();
Set<Node> nodes = nodeManager.getRequiredWorkerNodes();
int totalParts = nodes.size() * splitsPerNode;
int partNumber = 0;
// Split the data using split and skew by the number of nodes available.
ImmutableList.Builder<ConnectorSplit> splits = ImmutableList.builder();
for (Node node : nodes) {
for (int i = 0; i < splitsPerNode; i++) {
splits.add(new TpchSplit(tableHandle, partNumber, totalParts, ImmutableList.of(node.getHostAndPort())));
partNumber++;
}
}
return new FixedSplitSource(splits.build());
}
use of com.facebook.presto.spi.ConnectorSplit in project presto by prestodb.
the class TestRaptorSplitManager method testAssignRandomNodeWhenBackupAvailable.
@Test
public void testAssignRandomNodeWhenBackupAvailable() throws InterruptedException, URISyntaxException {
TestingNodeManager nodeManager = new TestingNodeManager();
RaptorConnectorId connectorId = new RaptorConnectorId("raptor");
NodeSupplier nodeSupplier = nodeManager::getWorkerNodes;
PrestoNode node = new PrestoNode(UUID.randomUUID().toString(), new URI("http://127.0.0.1/"), NodeVersion.UNKNOWN, false);
nodeManager.addNode(node);
RaptorSplitManager raptorSplitManagerWithBackup = new RaptorSplitManager(connectorId, nodeSupplier, shardManager, true);
deleteShardNodes();
ConnectorTableLayoutResult layout = getOnlyElement(metadata.getTableLayouts(SESSION, tableHandle, Constraint.alwaysTrue(), Optional.empty()));
ConnectorSplitSource partitionSplit = getSplits(raptorSplitManagerWithBackup, layout);
List<ConnectorSplit> batch = getFutureValue(partitionSplit.getNextBatch(1), PrestoException.class);
assertEquals(getOnlyElement(getOnlyElement(batch).getAddresses()), node.getHostAndPort());
}
Aggregations