use of io.trino.spi.connector.ConnectorTransactionHandle in project trino by trinodb.
the class TestRaptorConnector method testMaintenanceBlocked.
@Test
public void testMaintenanceBlocked() {
long tableId1 = createTable("test1");
long tableId2 = createTable("test2");
assertFalse(metadataDao.isMaintenanceBlockedLocked(tableId1));
assertFalse(metadataDao.isMaintenanceBlockedLocked(tableId2));
// begin delete for table1
ConnectorTransactionHandle txn1 = beginTransaction();
ConnectorTableHandle handle1 = getTableHandle(connector.getMetadata(SESSION, txn1), "test1");
connector.getMetadata(SESSION, txn1).beginDelete(SESSION, handle1);
assertTrue(metadataDao.isMaintenanceBlockedLocked(tableId1));
assertFalse(metadataDao.isMaintenanceBlockedLocked(tableId2));
// begin delete for table2
ConnectorTransactionHandle txn2 = beginTransaction();
ConnectorTableHandle handle2 = getTableHandle(connector.getMetadata(SESSION, txn2), "test2");
connector.getMetadata(SESSION, txn2).beginDelete(SESSION, handle2);
assertTrue(metadataDao.isMaintenanceBlockedLocked(tableId1));
assertTrue(metadataDao.isMaintenanceBlockedLocked(tableId2));
// begin another delete for table1
ConnectorTransactionHandle txn3 = beginTransaction();
ConnectorTableHandle handle3 = getTableHandle(connector.getMetadata(SESSION, txn3), "test1");
connector.getMetadata(SESSION, txn3).beginDelete(SESSION, handle3);
assertTrue(metadataDao.isMaintenanceBlockedLocked(tableId1));
assertTrue(metadataDao.isMaintenanceBlockedLocked(tableId2));
// commit first delete for table1
connector.commit(txn1);
assertTrue(metadataDao.isMaintenanceBlockedLocked(tableId1));
assertTrue(metadataDao.isMaintenanceBlockedLocked(tableId2));
// rollback second delete for table1
connector.rollback(txn3);
assertFalse(metadataDao.isMaintenanceBlockedLocked(tableId1));
assertTrue(metadataDao.isMaintenanceBlockedLocked(tableId2));
// commit delete for table2
connector.commit(txn2);
assertFalse(metadataDao.isMaintenanceBlockedLocked(tableId1));
assertFalse(metadataDao.isMaintenanceBlockedLocked(tableId2));
}
use of io.trino.spi.connector.ConnectorTransactionHandle in project trino by trinodb.
the class TestS3TableConfigClient method testTableReading.
@Parameters({ "kinesis.test-table-description-location", "kinesis.awsAccessKey", "kinesis.awsSecretKey" })
@Test
public void testTableReading(String tableDescriptionS3, String accessKey, String secretKey) {
// To run this test: setup an S3 bucket with a folder for unit testing, and put
// MinimalTable.json in that folder.
// Create dependent objects, including the minimal config needed for this test
Map<String, String> properties = ImmutableMap.<String, String>builder().put("kinesis.table-description-location", tableDescriptionS3).put("kinesis.default-schema", "kinesis").put("kinesis.hide-internal-columns", "false").put("kinesis.access-key", TestUtils.noneToBlank(accessKey)).put("kinesis.secret-key", TestUtils.noneToBlank(secretKey)).buildOrThrow();
KinesisPlugin kinesisPlugin = new KinesisPlugin();
KinesisConnector kinesisConnector = TestUtils.createConnector(kinesisPlugin, properties, false);
// Sleep for 10 seconds to ensure that we've loaded the tables:
try {
Thread.sleep(10000);
log.info("done sleeping, will now try to read the tables.");
} catch (InterruptedException e) {
log.error("interrupted ...");
}
KinesisMetadata metadata = (KinesisMetadata) kinesisConnector.getMetadata(SESSION, new ConnectorTransactionHandle() {
});
SchemaTableName tblName = new SchemaTableName("default", "test123");
KinesisTableHandle tableHandle = metadata.getTableHandle(SESSION, tblName);
assertNotNull(metadata);
SchemaTableName tableSchemaName = tableHandle.toSchemaTableName();
assertEquals(tableSchemaName.getSchemaName(), "default");
assertEquals(tableSchemaName.getTableName(), "test123");
assertEquals(tableHandle.getStreamName(), "test123");
assertEquals(tableHandle.getMessageDataFormat(), "json");
Map<String, ColumnHandle> columnHandles = metadata.getColumnHandles(SESSION, tableHandle);
assertEquals(columnHandles.size(), 12);
}
use of io.trino.spi.connector.ConnectorTransactionHandle in project trino by trinodb.
the class TpchIndexProvider method getIndex.
@Override
public ConnectorIndex getIndex(ConnectorTransactionHandle transaction, ConnectorSession session, ConnectorIndexHandle indexHandle, List<ColumnHandle> lookupSchema, List<ColumnHandle> outputSchema) {
TpchIndexHandle tpchIndexHandle = (TpchIndexHandle) indexHandle;
Map<ColumnHandle, NullableValue> fixedValues = TupleDomain.extractFixedValues(tpchIndexHandle.getFixedValues()).get();
checkArgument(lookupSchema.stream().noneMatch(handle -> fixedValues.keySet().contains(handle)), "Lookup columnHandles are not expected to overlap with the fixed value predicates");
// Establish an order for the fixedValues
List<ColumnHandle> fixedValueColumns = ImmutableList.copyOf(fixedValues.keySet());
// Extract the fixedValues as their raw values and types
List<Object> rawFixedValues = new ArrayList<>(fixedValueColumns.size());
List<Type> rawFixedTypes = new ArrayList<>(fixedValueColumns.size());
for (ColumnHandle fixedValueColumn : fixedValueColumns) {
rawFixedValues.add(fixedValues.get(fixedValueColumn).getValue());
rawFixedTypes.add(((TpchColumnHandle) fixedValueColumn).getType());
}
// Establish the schema after we append the fixed values to the lookup keys.
List<ColumnHandle> finalLookupSchema = ImmutableList.<ColumnHandle>builder().addAll(lookupSchema).addAll(fixedValueColumns).build();
Optional<TpchIndexedData.IndexedTable> indexedTable = indexedData.getIndexedTable(tpchIndexHandle.getTableName(), tpchIndexHandle.getScaleFactor(), tpchIndexHandle.getIndexColumnNames());
checkState(indexedTable.isPresent());
TpchIndexedData.IndexedTable table = indexedTable.get();
// Compute how to map from the final lookup schema to the table index key order
List<Integer> keyRemap = computeRemap(handleToNames(finalLookupSchema), table.getKeyColumns());
Function<RecordSet, RecordSet> keyFormatter = key -> new MappedRecordSet(new AppendingRecordSet(key, rawFixedValues, rawFixedTypes), keyRemap);
// Compute how to map from the output of the indexed data to the expected output schema
List<Integer> outputRemap = computeRemap(table.getOutputColumns(), handleToNames(outputSchema));
Function<RecordSet, RecordSet> outputFormatter = output -> new MappedRecordSet(output, outputRemap);
return new TpchConnectorIndex(keyFormatter, outputFormatter, table);
}
use of io.trino.spi.connector.ConnectorTransactionHandle in project trino by trinodb.
the class IndexedTpchConnectorFactory method create.
@Override
public Connector create(String catalogName, Map<String, String> properties, ConnectorContext context) {
int splitsPerNode = getSplitsPerNode(properties);
TpchIndexedData indexedData = new TpchIndexedData(indexSpec);
NodeManager nodeManager = context.getNodeManager();
return new Connector() {
@Override
public ConnectorTransactionHandle beginTransaction(IsolationLevel isolationLevel, boolean readOnly, boolean autoCommit) {
return TpchTransactionHandle.INSTANCE;
}
@Override
public ConnectorMetadata getMetadata(ConnectorSession session, ConnectorTransactionHandle transactionHandle) {
return new TpchIndexMetadata(indexedData);
}
@Override
public ConnectorSplitManager getSplitManager() {
return new TpchSplitManager(nodeManager, splitsPerNode);
}
@Override
public ConnectorRecordSetProvider getRecordSetProvider() {
return new TpchRecordSetProvider(DecimalTypeMapping.DOUBLE);
}
@Override
public ConnectorIndexProvider getIndexProvider() {
return new TpchIndexProvider(indexedData);
}
@Override
public Set<SystemTable> getSystemTables() {
return ImmutableSet.of(new ExampleSystemTable());
}
@Override
public ConnectorNodePartitioningProvider getNodePartitioningProvider() {
return new TpchNodePartitioningProvider(nodeManager, splitsPerNode);
}
};
}
use of io.trino.spi.connector.ConnectorTransactionHandle in project trino by trinodb.
the class PhoenixSplitManager method getSplits.
@Override
public ConnectorSplitSource getSplits(ConnectorTransactionHandle transaction, ConnectorSession session, ConnectorTableHandle table, SplitSchedulingStrategy splitSchedulingStrategy, DynamicFilter dynamicFilter) {
JdbcTableHandle tableHandle = (JdbcTableHandle) table;
try (Connection connection = phoenixClient.getConnection(session)) {
List<JdbcColumnHandle> columns = tableHandle.getColumns().map(columnSet -> columnSet.stream().map(JdbcColumnHandle.class::cast).collect(toList())).orElseGet(() -> phoenixClient.getColumns(session, tableHandle));
PhoenixPreparedStatement inputQuery = (PhoenixPreparedStatement) phoenixClient.prepareStatement(session, connection, tableHandle, columns, Optional.empty());
int maxScansPerSplit = session.getProperty(PhoenixSessionProperties.MAX_SCANS_PER_SPLIT, Integer.class);
List<ConnectorSplit> splits = getSplits(inputQuery, maxScansPerSplit).stream().map(PhoenixInputSplit.class::cast).map(split -> new PhoenixSplit(getSplitAddresses(split), SerializedPhoenixInputSplit.serialize(split))).collect(toImmutableList());
return new FixedSplitSource(splits);
} catch (IOException | SQLException e) {
throw new TrinoException(PHOENIX_SPLIT_ERROR, "Couldn't get Phoenix splits", e);
}
}
Aggregations