use of io.trino.spi.type.BooleanType.BOOLEAN in project trino by trinodb.
the class BaseRaptorConnectorTest method testTablesSystemTable.
@Test
public void testTablesSystemTable() {
assertUpdate("" + "CREATE TABLE system_tables_test0 (c00 timestamp, c01 varchar, c02 double, c03 bigint, c04 bigint)");
assertUpdate("" + "CREATE TABLE system_tables_test1 (c10 timestamp, c11 varchar, c12 double, c13 bigint, c14 bigint) " + "WITH (temporal_column = 'c10')");
assertUpdate("" + "CREATE TABLE system_tables_test2 (c20 timestamp, c21 varchar, c22 double, c23 bigint, c24 bigint) " + "WITH (temporal_column = 'c20', ordering = ARRAY['c22', 'c21'])");
assertUpdate("" + "CREATE TABLE system_tables_test3 (c30 timestamp, c31 varchar, c32 double, c33 bigint, c34 bigint) " + "WITH (temporal_column = 'c30', bucket_count = 40, bucketed_on = ARRAY ['c34', 'c33'])");
assertUpdate("" + "CREATE TABLE system_tables_test4 (c40 timestamp, c41 varchar, c42 double, c43 bigint, c44 bigint) " + "WITH (temporal_column = 'c40', ordering = ARRAY['c41', 'c42'], distribution_name = 'test_distribution', bucket_count = 50, bucketed_on = ARRAY ['c43', 'c44'])");
assertUpdate("" + "CREATE TABLE system_tables_test5 (c50 timestamp, c51 varchar, c52 double, c53 bigint, c54 bigint) " + "WITH (ordering = ARRAY['c51', 'c52'], distribution_name = 'test_distribution', bucket_count = 50, bucketed_on = ARRAY ['c53', 'c54'], organized = true)");
MaterializedResult actualResults = computeActual("SELECT * FROM system.tables");
assertEquals(actualResults.getTypes(), ImmutableList.builder().add(// table_schema
VARCHAR).add(// table_name
VARCHAR).add(// temporal_column
VARCHAR).add(// ordering_columns
new ArrayType(VARCHAR)).add(// distribution_name
VARCHAR).add(// bucket_count
BIGINT).add(// bucket_columns
new ArrayType(VARCHAR)).add(// organized
BOOLEAN).build());
Map<String, MaterializedRow> map = actualResults.getMaterializedRows().stream().filter(row -> ((String) row.getField(1)).startsWith("system_tables_test")).collect(toImmutableMap(row -> ((String) row.getField(1)), identity()));
assertEquals(map.size(), 6);
assertEquals(map.get("system_tables_test0").getFields(), asList("tpch", "system_tables_test0", null, null, null, null, null, Boolean.FALSE));
assertEquals(map.get("system_tables_test1").getFields(), asList("tpch", "system_tables_test1", "c10", null, null, null, null, Boolean.FALSE));
assertEquals(map.get("system_tables_test2").getFields(), asList("tpch", "system_tables_test2", "c20", ImmutableList.of("c22", "c21"), null, null, null, Boolean.FALSE));
assertEquals(map.get("system_tables_test3").getFields(), asList("tpch", "system_tables_test3", "c30", null, null, 40L, ImmutableList.of("c34", "c33"), Boolean.FALSE));
assertEquals(map.get("system_tables_test4").getFields(), asList("tpch", "system_tables_test4", "c40", ImmutableList.of("c41", "c42"), "test_distribution", 50L, ImmutableList.of("c43", "c44"), Boolean.FALSE));
assertEquals(map.get("system_tables_test5").getFields(), asList("tpch", "system_tables_test5", null, ImmutableList.of("c51", "c52"), "test_distribution", 50L, ImmutableList.of("c53", "c54"), Boolean.TRUE));
actualResults = computeActual("SELECT * FROM system.tables WHERE table_schema = 'tpch'");
long actualRowCount = actualResults.getMaterializedRows().stream().filter(row -> ((String) row.getField(1)).startsWith("system_tables_test")).count();
assertEquals(actualRowCount, 6);
actualResults = computeActual("SELECT * FROM system.tables WHERE table_name = 'system_tables_test3'");
assertEquals(actualResults.getMaterializedRows().size(), 1);
actualResults = computeActual("SELECT * FROM system.tables WHERE table_schema = 'tpch' and table_name = 'system_tables_test3'");
assertEquals(actualResults.getMaterializedRows().size(), 1);
actualResults = computeActual("" + "SELECT distribution_name, bucket_count, bucketing_columns, ordering_columns, temporal_column, organized " + "FROM system.tables " + "WHERE table_schema = 'tpch' and table_name = 'system_tables_test3'");
assertEquals(actualResults.getTypes(), ImmutableList.of(VARCHAR, BIGINT, new ArrayType(VARCHAR), new ArrayType(VARCHAR), VARCHAR, BOOLEAN));
assertEquals(actualResults.getMaterializedRows().size(), 1);
assertUpdate("DROP TABLE system_tables_test0");
assertUpdate("DROP TABLE system_tables_test1");
assertUpdate("DROP TABLE system_tables_test2");
assertUpdate("DROP TABLE system_tables_test3");
assertUpdate("DROP TABLE system_tables_test4");
assertUpdate("DROP TABLE system_tables_test5");
assertEquals(computeActual("SELECT * FROM system.tables WHERE table_schema IN ('foo', 'bar')").getRowCount(), 0);
}
use of io.trino.spi.type.BooleanType.BOOLEAN in project trino by trinodb.
the class H2QueryRunner method rowMapper.
private static RowMapper<MaterializedRow> rowMapper(List<? extends Type> types) {
return (resultSet, context) -> {
int count = resultSet.getMetaData().getColumnCount();
checkArgument(types.size() == count, "expected types count (%s) does not match actual column count (%s)", types.size(), count);
List<Object> row = new ArrayList<>(count);
for (int i = 1; i <= count; i++) {
Type type = types.get(i - 1);
if (BOOLEAN.equals(type)) {
boolean booleanValue = resultSet.getBoolean(i);
if (resultSet.wasNull()) {
row.add(null);
} else {
row.add(booleanValue);
}
} else if (TINYINT.equals(type)) {
byte byteValue = resultSet.getByte(i);
if (resultSet.wasNull()) {
row.add(null);
} else {
row.add(byteValue);
}
} else if (SMALLINT.equals(type)) {
short shortValue = resultSet.getShort(i);
if (resultSet.wasNull()) {
row.add(null);
} else {
row.add(shortValue);
}
} else if (INTEGER.equals(type)) {
int intValue = resultSet.getInt(i);
if (resultSet.wasNull()) {
row.add(null);
} else {
row.add(intValue);
}
} else if (BIGINT.equals(type)) {
long longValue = resultSet.getLong(i);
if (resultSet.wasNull()) {
row.add(null);
} else {
row.add(longValue);
}
} else if (REAL.equals(type)) {
float floatValue = resultSet.getFloat(i);
if (resultSet.wasNull()) {
row.add(null);
} else {
row.add(floatValue);
}
} else if (DOUBLE.equals(type)) {
double doubleValue = resultSet.getDouble(i);
if (resultSet.wasNull()) {
row.add(null);
} else {
row.add(doubleValue);
}
} else if (JSON.equals(type)) {
String stringValue = resultSet.getString(i);
if (resultSet.wasNull()) {
row.add(null);
} else {
row.add(jsonParse(utf8Slice(stringValue)).toStringUtf8());
}
} else if (type instanceof VarcharType) {
String stringValue = resultSet.getString(i);
if (resultSet.wasNull()) {
row.add(null);
} else {
row.add(stringValue);
}
} else if (type instanceof CharType) {
String stringValue = resultSet.getString(i);
if (resultSet.wasNull()) {
row.add(null);
} else {
row.add(padSpaces(stringValue, (CharType) type));
}
} else if (VARBINARY.equals(type)) {
byte[] bytes = resultSet.getBytes(i);
if (resultSet.wasNull()) {
row.add(null);
} else {
row.add(bytes);
}
} else if (DATE.equals(type)) {
// resultSet.getDate(i) doesn't work if JVM's zone skipped day being retrieved (e.g. 2011-12-30 and Pacific/Apia zone)
LocalDate dateValue = resultSet.getObject(i, LocalDate.class);
if (resultSet.wasNull()) {
row.add(null);
} else {
row.add(dateValue);
}
} else if (type instanceof TimeType) {
// resultSet.getTime(i) doesn't work if JVM's zone had forward offset change during 1970-01-01 (e.g. America/Hermosillo zone)
LocalTime timeValue = resultSet.getObject(i, LocalTime.class);
if (resultSet.wasNull()) {
row.add(null);
} else {
row.add(timeValue);
}
} else if (TIME_WITH_TIME_ZONE.equals(type)) {
throw new UnsupportedOperationException("H2 does not support TIME WITH TIME ZONE");
} else if (type instanceof TimestampType) {
// resultSet.getTimestamp(i) doesn't work if JVM's zone had forward offset at the date/time being retrieved
LocalDateTime timestampValue;
try {
timestampValue = resultSet.getObject(i, LocalDateTime.class);
} catch (SQLException first) {
// H2 cannot convert DATE to LocalDateTime in their JDBC driver (even though it can convert to java.sql.Timestamp), we need to do this manually
try {
timestampValue = Optional.ofNullable(resultSet.getObject(i, LocalDate.class)).map(LocalDate::atStartOfDay).orElse(null);
} catch (RuntimeException e) {
first.addSuppressed(e);
throw first;
}
}
if (resultSet.wasNull()) {
row.add(null);
} else {
row.add(timestampValue);
}
} else if (TIMESTAMP_WITH_TIME_ZONE.equals(type)) {
// This means H2 is unsuitable for testing TIMESTAMP WITH TIME ZONE-bearing queries. Those need to be tested manually.
throw new UnsupportedOperationException();
} else if (UUID.equals(type)) {
java.util.UUID value = (java.util.UUID) resultSet.getObject(i);
row.add(value);
} else if (UNKNOWN.equals(type)) {
Object objectValue = resultSet.getObject(i);
checkState(resultSet.wasNull(), "Expected a null value, but got %s", objectValue);
row.add(null);
} else if (type instanceof DecimalType) {
DecimalType decimalType = (DecimalType) type;
BigDecimal decimalValue = resultSet.getBigDecimal(i);
if (resultSet.wasNull()) {
row.add(null);
} else {
row.add(decimalValue.setScale(decimalType.getScale(), BigDecimal.ROUND_HALF_UP).round(new MathContext(decimalType.getPrecision())));
}
} else if (type instanceof ArrayType) {
Array array = resultSet.getArray(i);
if (resultSet.wasNull()) {
row.add(null);
} else {
row.add(newArrayList((Object[]) array.getArray()));
}
} else {
throw new AssertionError("unhandled type: " + type);
}
}
return new MaterializedRow(MaterializedResult.DEFAULT_PRECISION, row);
};
}
use of io.trino.spi.type.BooleanType.BOOLEAN in project trino by trinodb.
the class AbstractTestHive method doTestTransactionDeleteInsert.
private void doTestTransactionDeleteInsert(HiveStorageFormat storageFormat, SchemaTableName tableName, Domain domainToDrop, MaterializedResult insertData, MaterializedResult expectedData, TransactionDeleteInsertTestTag tag, boolean expectQuerySucceed, Optional<ConflictTrigger> conflictTrigger) throws Exception {
Path writePath = null;
Path targetPath = null;
try (Transaction transaction = newTransaction()) {
try {
ConnectorMetadata metadata = transaction.getMetadata();
ConnectorTableHandle tableHandle = getTableHandle(metadata, tableName);
ConnectorSession session;
rollbackIfEquals(tag, ROLLBACK_RIGHT_AWAY);
// Query 1: delete
session = newSession();
HiveColumnHandle dsColumnHandle = (HiveColumnHandle) metadata.getColumnHandles(session, tableHandle).get("pk2");
TupleDomain<ColumnHandle> tupleDomain = TupleDomain.withColumnDomains(ImmutableMap.of(dsColumnHandle, domainToDrop));
Constraint constraint = new Constraint(tupleDomain, tupleDomain.asPredicate(), tupleDomain.getDomains().orElseThrow().keySet());
tableHandle = applyFilter(metadata, tableHandle, constraint);
tableHandle = metadata.applyDelete(session, tableHandle).get();
metadata.executeDelete(session, tableHandle);
rollbackIfEquals(tag, ROLLBACK_AFTER_DELETE);
// Query 2: insert
session = newSession();
ConnectorInsertTableHandle insertTableHandle = metadata.beginInsert(session, tableHandle, ImmutableList.of(), NO_RETRIES);
rollbackIfEquals(tag, ROLLBACK_AFTER_BEGIN_INSERT);
writePath = getStagingPathRoot(insertTableHandle);
targetPath = getTargetPathRoot(insertTableHandle);
ConnectorPageSink sink = pageSinkProvider.createPageSink(transaction.getTransactionHandle(), session, insertTableHandle);
sink.appendPage(insertData.toPage());
rollbackIfEquals(tag, ROLLBACK_AFTER_APPEND_PAGE);
Collection<Slice> fragments = getFutureValue(sink.finish());
rollbackIfEquals(tag, ROLLBACK_AFTER_SINK_FINISH);
metadata.finishInsert(session, insertTableHandle, fragments, ImmutableList.of());
rollbackIfEquals(tag, ROLLBACK_AFTER_FINISH_INSERT);
assertEquals(tag, COMMIT);
if (conflictTrigger.isPresent()) {
JsonCodec<PartitionUpdate> partitionUpdateCodec = JsonCodec.jsonCodec(PartitionUpdate.class);
List<PartitionUpdate> partitionUpdates = fragments.stream().map(Slice::getBytes).map(partitionUpdateCodec::fromJson).collect(toList());
conflictTrigger.get().triggerConflict(session, tableName, insertTableHandle, partitionUpdates);
}
transaction.commit();
if (conflictTrigger.isPresent()) {
assertTrue(expectQuerySucceed);
conflictTrigger.get().verifyAndCleanup(session, tableName);
}
} catch (TestingRollbackException e) {
transaction.rollback();
} catch (TrinoException e) {
assertFalse(expectQuerySucceed);
if (conflictTrigger.isPresent()) {
conflictTrigger.get().verifyAndCleanup(newSession(), tableName);
}
}
}
// check that temporary files are removed
if (writePath != null && !writePath.equals(targetPath)) {
HdfsContext context = new HdfsContext(newSession());
FileSystem fileSystem = hdfsEnvironment.getFileSystem(context, writePath);
assertFalse(fileSystem.exists(writePath));
}
try (Transaction transaction = newTransaction()) {
// verify partitions
List<String> partitionNames = transaction.getMetastore().getPartitionNames(tableName.getSchemaName(), tableName.getTableName()).orElseThrow(() -> new AssertionError("Table does not exist: " + tableName));
assertEqualsIgnoreOrder(partitionNames, expectedData.getMaterializedRows().stream().map(row -> format("pk1=%s/pk2=%s", row.getField(1), row.getField(2))).distinct().collect(toImmutableList()));
// load the new table
ConnectorSession session = newSession();
ConnectorMetadata metadata = transaction.getMetadata();
metadata.beginQuery(session);
ConnectorTableHandle tableHandle = getTableHandle(metadata, tableName);
List<ColumnHandle> columnHandles = filterNonHiddenColumnHandles(metadata.getColumnHandles(session, tableHandle).values());
// verify the data
MaterializedResult result = readTable(transaction, tableHandle, columnHandles, session, TupleDomain.all(), OptionalInt.empty(), Optional.of(storageFormat));
assertEqualsIgnoreOrder(result.getMaterializedRows(), expectedData.getMaterializedRows());
}
}
use of io.trino.spi.type.BooleanType.BOOLEAN in project trino by trinodb.
the class OrcTester method preprocessWriteValueHive.
private static Object preprocessWriteValueHive(Type type, Object value) {
if (value == null) {
return null;
}
if (type.equals(BOOLEAN)) {
return value;
}
if (type.equals(TINYINT)) {
return ((Number) value).byteValue();
}
if (type.equals(SMALLINT)) {
return ((Number) value).shortValue();
}
if (type.equals(INTEGER)) {
return ((Number) value).intValue();
}
if (type.equals(BIGINT)) {
return ((Number) value).longValue();
}
if (type.equals(REAL)) {
return ((Number) value).floatValue();
}
if (type.equals(DOUBLE)) {
return ((Number) value).doubleValue();
}
if (type instanceof VarcharType) {
return value;
}
if (type instanceof CharType) {
return new HiveChar((String) value, ((CharType) type).getLength());
}
if (type.equals(VARBINARY)) {
return ((SqlVarbinary) value).getBytes();
}
if (type.equals(DATE)) {
return Date.ofEpochDay(((SqlDate) value).getDays());
}
if (type.equals(TIMESTAMP_MILLIS) || type.equals(TIMESTAMP_MICROS) || type.equals(TIMESTAMP_NANOS)) {
LocalDateTime dateTime = ((SqlTimestamp) value).toLocalDateTime();
return Timestamp.ofEpochSecond(dateTime.toEpochSecond(ZoneOffset.UTC), dateTime.getNano());
}
if (type.equals(TIMESTAMP_TZ_MILLIS) || type.equals(TIMESTAMP_TZ_MICROS) || type.equals(TIMESTAMP_TZ_NANOS)) {
SqlTimestampWithTimeZone timestamp = (SqlTimestampWithTimeZone) value;
int nanosOfMilli = roundDiv(timestamp.getPicosOfMilli(), PICOSECONDS_PER_NANOSECOND);
return Timestamp.ofEpochMilli(timestamp.getEpochMillis(), nanosOfMilli);
}
if (type instanceof DecimalType) {
return HiveDecimal.create(((SqlDecimal) value).toBigDecimal());
}
if (type instanceof ArrayType) {
Type elementType = type.getTypeParameters().get(0);
return ((List<?>) value).stream().map(element -> preprocessWriteValueHive(elementType, element)).collect(toList());
}
if (type instanceof MapType) {
Type keyType = type.getTypeParameters().get(0);
Type valueType = type.getTypeParameters().get(1);
Map<Object, Object> newMap = new HashMap<>();
for (Entry<?, ?> entry : ((Map<?, ?>) value).entrySet()) {
newMap.put(preprocessWriteValueHive(keyType, entry.getKey()), preprocessWriteValueHive(valueType, entry.getValue()));
}
return newMap;
}
if (type instanceof RowType) {
List<?> fieldValues = (List<?>) value;
List<Type> fieldTypes = type.getTypeParameters();
List<Object> newStruct = new ArrayList<>();
for (int fieldId = 0; fieldId < fieldValues.size(); fieldId++) {
newStruct.add(preprocessWriteValueHive(fieldTypes.get(fieldId), fieldValues.get(fieldId)));
}
return newStruct;
}
throw new IllegalArgumentException("unsupported type: " + type);
}
use of io.trino.spi.type.BooleanType.BOOLEAN in project trino by trinodb.
the class DeltaLakeMetadata method createTable.
@Override
public void createTable(ConnectorSession session, ConnectorTableMetadata tableMetadata, boolean ignoreExisting) {
SchemaTableName schemaTableName = tableMetadata.getTable();
String schemaName = schemaTableName.getSchemaName();
String tableName = schemaTableName.getTableName();
Database schema = metastore.getDatabase(schemaName).orElseThrow(() -> new SchemaNotFoundException(schemaName));
boolean external = true;
String location = getLocation(tableMetadata.getProperties());
if (location == null) {
Optional<String> schemaLocation = getSchemaLocation(schema);
if (schemaLocation.isEmpty()) {
throw new TrinoException(NOT_SUPPORTED, "The 'location' property must be specified either for the table or the schema");
}
location = new Path(schemaLocation.get(), tableName).toString();
checkPathContainsNoFiles(session, new Path(location));
external = false;
}
Path targetPath = new Path(location);
ensurePathExists(session, targetPath);
Path deltaLogDirectory = getTransactionLogDir(targetPath);
Optional<Long> checkpointInterval = DeltaLakeTableProperties.getCheckpointInterval(tableMetadata.getProperties());
try {
FileSystem fileSystem = hdfsEnvironment.getFileSystem(new HdfsContext(session), targetPath);
if (!fileSystem.exists(deltaLogDirectory)) {
validateTableColumns(tableMetadata);
List<String> partitionColumns = getPartitionedBy(tableMetadata.getProperties());
List<DeltaLakeColumnHandle> deltaLakeColumns = tableMetadata.getColumns().stream().map(column -> toColumnHandle(column, partitionColumns)).collect(toImmutableList());
TransactionLogWriter transactionLogWriter = transactionLogWriterFactory.newWriterWithoutTransactionIsolation(session, targetPath.toString());
appendInitialTableEntries(transactionLogWriter, deltaLakeColumns, partitionColumns, buildDeltaMetadataConfiguration(checkpointInterval), CREATE_TABLE_OPERATION, session, nodeVersion, nodeId);
setRollback(() -> deleteRecursivelyIfExists(new HdfsContext(session), hdfsEnvironment, deltaLogDirectory));
transactionLogWriter.flush();
}
} catch (IOException e) {
throw new TrinoException(DELTA_LAKE_BAD_WRITE, "Unable to access file system for: " + location, e);
}
Table.Builder tableBuilder = Table.builder().setDatabaseName(schemaName).setTableName(tableName).setOwner(Optional.of(session.getUser())).setTableType(external ? EXTERNAL_TABLE.name() : MANAGED_TABLE.name()).setDataColumns(DUMMY_DATA_COLUMNS).setParameters(deltaTableProperties(session, location, external));
setDeltaStorageFormat(tableBuilder, location, targetPath);
Table table = tableBuilder.build();
PrincipalPrivileges principalPrivileges = buildInitialPrivilegeSet(table.getOwner().orElseThrow());
metastore.createTable(session, table, principalPrivileges);
}
Aggregations