Search in sources :

Example 1 with StorageFormat

use of io.prestosql.plugin.hive.metastore.StorageFormat in project hetu-core by openlookeng.

the class RcFileFileWriterFactory method createFileWriter.

@Override
public Optional<HiveFileWriter> createFileWriter(Path path, List<String> inputColumnNames, StorageFormat storageFormat, Properties schema, JobConf configuration, ConnectorSession session, Optional<AcidOutputFormat.Options> acidOptions, Optional<HiveACIDWriteType> acidWriteType) {
    if (!RCFileOutputFormat.class.getName().equals(storageFormat.getOutputFormat())) {
        return Optional.empty();
    }
    RcFileEncoding rcFileEncoding;
    if (LazyBinaryColumnarSerDe.class.getName().equals(storageFormat.getSerDe())) {
        rcFileEncoding = new BinaryRcFileEncoding(timeZone);
    } else if (ColumnarSerDe.class.getName().equals(storageFormat.getSerDe())) {
        rcFileEncoding = RcFilePageSourceFactory.createTextVectorEncoding(schema);
    } else {
        return Optional.empty();
    }
    Optional<String> codecName = Optional.ofNullable(configuration.get(FileOutputFormat.COMPRESS_CODEC));
    // existing tables and partitions may have columns in a different order than the writer is providing, so build
    // an index to rearrange columns in the proper order
    List<String> fileColumnNames = getColumnNames(schema);
    List<Type> fileColumnTypes = getColumnTypes(schema).stream().map(hiveType -> hiveType.getType(typeManager)).collect(toList());
    int[] fileInputColumnIndexes = fileColumnNames.stream().mapToInt(inputColumnNames::indexOf).toArray();
    try {
        FileSystem fileSystem = hdfsEnvironment.getFileSystem(session.getUser(), path, configuration);
        OutputStream outputStream = fileSystem.create(path);
        Optional<Supplier<RcFileDataSource>> validationInputFactory = Optional.empty();
        if (HiveSessionProperties.isRcfileOptimizedWriterValidate(session)) {
            validationInputFactory = Optional.of(() -> {
                try {
                    return new HdfsRcFileDataSource(path.toString(), fileSystem.open(path), fileSystem.getFileStatus(path).getLen(), stats);
                } catch (IOException e) {
                    throw new PrestoException(HiveErrorCode.HIVE_WRITE_VALIDATION_FAILED, e);
                }
            });
        }
        Callable<Void> rollbackAction = () -> {
            fileSystem.delete(path, false);
            return null;
        };
        return Optional.of(new RcFileFileWriter(outputStream, rollbackAction, rcFileEncoding, fileColumnTypes, codecName, fileInputColumnIndexes, ImmutableMap.<String, String>builder().put(HiveMetadata.PRESTO_VERSION_NAME, nodeVersion.toString()).put(HiveMetadata.PRESTO_QUERY_ID_NAME, session.getQueryId()).build(), validationInputFactory));
    } catch (Exception e) {
        throw new PrestoException(HiveErrorCode.HIVE_WRITER_OPEN_ERROR, "Error creating RCFile file", e);
    }
}
Also used : DateTimeZone(org.joda.time.DateTimeZone) StorageFormat(io.prestosql.plugin.hive.metastore.StorageFormat) FileSystem(org.apache.hadoop.fs.FileSystem) LazyBinaryColumnarSerDe(org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe) HiveUtil.getColumnTypes(io.prestosql.plugin.hive.HiveUtil.getColumnTypes) Callable(java.util.concurrent.Callable) RcFilePageSourceFactory(io.prestosql.plugin.hive.rcfile.RcFilePageSourceFactory) Supplier(java.util.function.Supplier) Inject(javax.inject.Inject) ConnectorSession(io.prestosql.spi.connector.ConnectorSession) HiveUtil.getColumnNames(io.prestosql.plugin.hive.HiveUtil.getColumnNames) Objects.requireNonNull(java.util.Objects.requireNonNull) Path(org.apache.hadoop.fs.Path) Type(io.prestosql.spi.type.Type) OutputStream(java.io.OutputStream) PrestoException(io.prestosql.spi.PrestoException) RcFileEncoding(io.prestosql.rcfile.RcFileEncoding) Properties(java.util.Properties) ImmutableMap(com.google.common.collect.ImmutableMap) RCFileOutputFormat(org.apache.hadoop.hive.ql.io.RCFileOutputFormat) TypeManager(io.prestosql.spi.type.TypeManager) AcidOutputFormat(org.apache.hadoop.hive.ql.io.AcidOutputFormat) IOException(java.io.IOException) BinaryRcFileEncoding(io.prestosql.rcfile.binary.BinaryRcFileEncoding) ColumnarSerDe(org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe) JobConf(org.apache.hadoop.mapred.JobConf) FileOutputFormat(org.apache.hadoop.mapreduce.lib.output.FileOutputFormat) List(java.util.List) Collectors.toList(java.util.stream.Collectors.toList) HdfsRcFileDataSource(io.prestosql.plugin.hive.rcfile.HdfsRcFileDataSource) Optional(java.util.Optional) RcFileDataSource(io.prestosql.rcfile.RcFileDataSource) OutputStream(java.io.OutputStream) PrestoException(io.prestosql.spi.PrestoException) RcFileEncoding(io.prestosql.rcfile.RcFileEncoding) BinaryRcFileEncoding(io.prestosql.rcfile.binary.BinaryRcFileEncoding) IOException(java.io.IOException) PrestoException(io.prestosql.spi.PrestoException) IOException(java.io.IOException) Type(io.prestosql.spi.type.Type) HdfsRcFileDataSource(io.prestosql.plugin.hive.rcfile.HdfsRcFileDataSource) FileSystem(org.apache.hadoop.fs.FileSystem) LazyBinaryColumnarSerDe(org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe) Supplier(java.util.function.Supplier) BinaryRcFileEncoding(io.prestosql.rcfile.binary.BinaryRcFileEncoding)

Example 2 with StorageFormat

use of io.prestosql.plugin.hive.metastore.StorageFormat in project hetu-core by openlookeng.

the class OrcFileWriterFactory method createFileWriter.

@Override
public Optional<HiveFileWriter> createFileWriter(Path path, List<String> inputColumnNames, StorageFormat storageFormat, Properties schema, JobConf configuration, ConnectorSession session, Optional<AcidOutputFormat.Options> acidOptions, Optional<HiveACIDWriteType> acidWriteType) {
    if (!OrcOutputFormat.class.getName().equals(storageFormat.getOutputFormat())) {
        return Optional.empty();
    }
    CompressionKind compression = getCompression(schema, configuration);
    // existing tables and partitions may have columns in a different order than the writer is providing, so build
    // an index to rearrange columns in the proper order
    List<String> fileColumnNames = getColumnNames(schema);
    List<Type> fileColumnTypes = getColumnTypes(schema).stream().map(hiveType -> hiveType.getType(typeManager)).collect(toList());
    List<Type> dataFileColumnTypes = fileColumnTypes;
    int[] fileInputColumnIndexes = fileColumnNames.stream().mapToInt(inputColumnNames::indexOf).toArray();
    Optional<HiveFileWriter> deleteDeltaWriter = Optional.empty();
    if (AcidUtils.isTablePropertyTransactional(schema) && !AcidUtils.isInsertOnlyTable(schema)) {
        ImmutableList<String> orcFileColumnNames = ImmutableList.of(OrcPageSourceFactory.ACID_COLUMN_OPERATION, OrcPageSourceFactory.ACID_COLUMN_ORIGINAL_TRANSACTION, OrcPageSourceFactory.ACID_COLUMN_BUCKET, OrcPageSourceFactory.ACID_COLUMN_ROW_ID, OrcPageSourceFactory.ACID_COLUMN_CURRENT_TRANSACTION, OrcPageSourceFactory.ACID_COLUMN_ROW_STRUCT);
        ImmutableList.Builder<RowType.Field> fieldsBuilder = ImmutableList.builder();
        for (int i = 0; i < fileColumnNames.size(); i++) {
            fieldsBuilder.add(new RowType.Field(Optional.of(fileColumnNames.get(i)), fileColumnTypes.get(i)));
        }
        ImmutableList<Type> orcFileColumnTypes = ImmutableList.of(INTEGER, BIGINT, INTEGER, BIGINT, BIGINT, RowType.from(fieldsBuilder.build()));
        fileColumnNames = orcFileColumnNames;
        fileColumnTypes = orcFileColumnTypes;
        if (acidWriteType.isPresent() && acidWriteType.get() == HiveACIDWriteType.UPDATE) {
            AcidOutputFormat.Options deleteOptions = acidOptions.get().clone().writingDeleteDelta(true);
            Path deleteDeltaPath = AcidUtils.createFilename(path.getParent().getParent(), deleteOptions);
            deleteDeltaWriter = createFileWriter(deleteDeltaPath, inputColumnNames, storageFormat, schema, configuration, session, Optional.of(deleteOptions), Optional.of(HiveACIDWriteType.DELETE));
        }
    }
    try {
        FileSystem fileSystem = hdfsEnvironment.getFileSystem(session.getUser(), path, configuration);
        OrcDataSink orcDataSink = createOrcDataSink(session, fileSystem, path);
        Optional<Supplier<OrcDataSource>> validationInputFactory = Optional.empty();
        if (HiveSessionProperties.isOrcOptimizedWriterValidate(session)) {
            validationInputFactory = Optional.of(() -> {
                try {
                    FileStatus fileStatus = fileSystem.getFileStatus(path);
                    return new HdfsOrcDataSource(new OrcDataSourceId(path.toString()), fileStatus.getLen(), HiveSessionProperties.getOrcMaxMergeDistance(session), HiveSessionProperties.getOrcMaxBufferSize(session), HiveSessionProperties.getOrcStreamBufferSize(session), false, fileSystem.open(path), readStats, fileStatus.getModificationTime());
                } catch (IOException e) {
                    throw new PrestoException(HiveErrorCode.HIVE_WRITE_VALIDATION_FAILED, e);
                }
            });
        }
        Callable<Void> rollbackAction = () -> {
            fileSystem.delete(path, false);
            return null;
        };
        return Optional.of(new OrcFileWriter(orcDataSink, rollbackAction, fileColumnNames, fileColumnTypes, dataFileColumnTypes, compression, orcWriterOptions.withStripeMinSize(HiveSessionProperties.getOrcOptimizedWriterMinStripeSize(session)).withStripeMaxSize(HiveSessionProperties.getOrcOptimizedWriterMaxStripeSize(session)).withStripeMaxRowCount(HiveSessionProperties.getOrcOptimizedWriterMaxStripeRows(session)).withDictionaryMaxMemory(HiveSessionProperties.getOrcOptimizedWriterMaxDictionaryMemory(session)).withMaxStringStatisticsLimit(HiveSessionProperties.getOrcStringStatisticsLimit(session)), writeLegacyVersion, fileInputColumnIndexes, ImmutableMap.<String, String>builder().put(HiveMetadata.PRESTO_VERSION_NAME, nodeVersion.toString()).put(HiveMetadata.PRESTO_QUERY_ID_NAME, session.getQueryId()).put("hive.acid.version", String.valueOf(AcidUtils.OrcAcidVersion.ORC_ACID_VERSION)).build(), validationInputFactory, HiveSessionProperties.getOrcOptimizedWriterValidateMode(session), stats, acidOptions, acidWriteType, deleteDeltaWriter, path));
    } catch (IOException e) {
        throw new PrestoException(HiveErrorCode.HIVE_WRITER_OPEN_ERROR, "Error creating ORC file", e);
    }
}
Also used : StorageFormat(io.prestosql.plugin.hive.metastore.StorageFormat) FileSystem(org.apache.hadoop.fs.FileSystem) Flatten(org.weakref.jmx.Flatten) HiveUtil.getColumnTypes(io.prestosql.plugin.hive.HiveUtil.getColumnTypes) Callable(java.util.concurrent.Callable) INTEGER(io.prestosql.spi.type.IntegerType.INTEGER) FileStatus(org.apache.hadoop.fs.FileStatus) Supplier(java.util.function.Supplier) Inject(javax.inject.Inject) ImmutableList(com.google.common.collect.ImmutableList) ConnectorSession(io.prestosql.spi.connector.ConnectorSession) Managed(org.weakref.jmx.Managed) OrcDataSink(io.prestosql.orc.OrcDataSink) OrcConf(org.apache.orc.OrcConf) HiveUtil.getColumnNames(io.prestosql.plugin.hive.HiveUtil.getColumnNames) Objects.requireNonNull(java.util.Objects.requireNonNull) RowType(io.prestosql.spi.type.RowType) OrcPageSourceFactory(io.prestosql.plugin.hive.orc.OrcPageSourceFactory) Path(org.apache.hadoop.fs.Path) Type(io.prestosql.spi.type.Type) BIGINT(io.prestosql.spi.type.BigintType.BIGINT) ENGLISH(java.util.Locale.ENGLISH) PrestoException(io.prestosql.spi.PrestoException) Properties(java.util.Properties) ImmutableMap(com.google.common.collect.ImmutableMap) TypeManager(io.prestosql.spi.type.TypeManager) AcidOutputFormat(org.apache.hadoop.hive.ql.io.AcidOutputFormat) OrcWriterStats(io.prestosql.orc.OrcWriterStats) IOException(java.io.IOException) OrcDataSource(io.prestosql.orc.OrcDataSource) OrcWriterOptions(io.prestosql.orc.OrcWriterOptions) JobConf(org.apache.hadoop.mapred.JobConf) List(java.util.List) Collectors.toList(java.util.stream.Collectors.toList) OutputStreamOrcDataSink(io.prestosql.orc.OutputStreamOrcDataSink) CompressionKind(io.prestosql.orc.metadata.CompressionKind) HdfsOrcDataSource(io.prestosql.plugin.hive.orc.HdfsOrcDataSource) Optional(java.util.Optional) OrcOutputFormat(org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat) AcidUtils(org.apache.hadoop.hive.ql.io.AcidUtils) OrcDataSourceId(io.prestosql.orc.OrcDataSourceId) FileStatus(org.apache.hadoop.fs.FileStatus) ImmutableList(com.google.common.collect.ImmutableList) OrcDataSink(io.prestosql.orc.OrcDataSink) OutputStreamOrcDataSink(io.prestosql.orc.OutputStreamOrcDataSink) RowType(io.prestosql.spi.type.RowType) HdfsOrcDataSource(io.prestosql.plugin.hive.orc.HdfsOrcDataSource) PrestoException(io.prestosql.spi.PrestoException) AcidOutputFormat(org.apache.hadoop.hive.ql.io.AcidOutputFormat) FileSystem(org.apache.hadoop.fs.FileSystem) Supplier(java.util.function.Supplier) Path(org.apache.hadoop.fs.Path) CompressionKind(io.prestosql.orc.metadata.CompressionKind) OrcDataSourceId(io.prestosql.orc.OrcDataSourceId) IOException(java.io.IOException) RowType(io.prestosql.spi.type.RowType) Type(io.prestosql.spi.type.Type)

Example 3 with StorageFormat

use of io.prestosql.plugin.hive.metastore.StorageFormat in project hetu-core by openlookeng.

the class TestHiveWriterFactory method testSortingPath.

@Test
public void testSortingPath() {
    setUp();
    String targetPath = "/tmp";
    String writePath = "/tmp/table";
    Optional<WriteIdInfo> writeIdInfo = Optional.of(new WriteIdInfo(1, 1, 0));
    StorageFormat storageFormat = StorageFormat.fromHiveStorageFormat(ORC);
    Storage storage = new Storage(storageFormat, "", Optional.empty(), false, ImmutableMap.of());
    Table table = new Table("schema", "table", "user", "MANAGED_TABLE", storage, ImmutableList.of(new Column("col_1", HiveType.HIVE_INT, Optional.empty())), ImmutableList.of(), ImmutableMap.of("transactional", "true"), Optional.of("original"), Optional.of("expanded"));
    HiveConfig hiveConfig = getHiveConfig();
    HivePageSinkMetadata hivePageSinkMetadata = new HivePageSinkMetadata(new SchemaTableName("schema", "table"), Optional.of(table), ImmutableMap.of());
    PageSorter pageSorter = new PagesIndexPageSorter(new PagesIndex.TestingFactory(false));
    Metadata metadata = createTestMetadataManager();
    TypeManager typeManager = new InternalTypeManager(metadata.getFunctionAndTypeManager());
    HdfsConfiguration hdfsConfiguration = new HiveHdfsConfiguration(new HdfsConfigurationInitializer(hiveConfig), ImmutableSet.of());
    HdfsEnvironment hdfsEnvironment = new HdfsEnvironment(hdfsConfiguration, hiveConfig, new NoHdfsAuthentication());
    LocationService locationService = new HiveLocationService(hdfsEnvironment);
    ConnectorSession session = newSession();
    HiveWriterFactory hiveWriterFactory = new HiveWriterFactory(getDefaultHiveFileWriterFactories(hiveConfig), "schema", "table", false, HiveACIDWriteType.DELETE, ImmutableList.of(new HiveColumnHandle("col_1", HiveType.HIVE_INT, new TypeSignature("integer", ImmutableList.of()), 0, HiveColumnHandle.ColumnType.REGULAR, Optional.empty())), ORC, ORC, ImmutableMap.of(), OptionalInt.empty(), ImmutableList.of(), new LocationHandle(targetPath, writePath, false, LocationHandle.WriteMode.STAGE_AND_MOVE_TO_TARGET_DIRECTORY, writeIdInfo), locationService, session.getQueryId(), new HivePageSinkMetadataProvider(hivePageSinkMetadata, CachingHiveMetastore.memoizeMetastore(metastore, 1000), new HiveIdentity(session)), typeManager, hdfsEnvironment, pageSorter, hiveConfig.getWriterSortBufferSize(), hiveConfig.getMaxOpenSortFiles(), false, UTC, session, new TestingNodeManager("fake-environment"), new HiveEventClient(), new HiveSessionProperties(hiveConfig, new OrcFileWriterConfig(), new ParquetFileWriterConfig()), new HiveWriterStats(), getDefaultOrcFileWriterFactory(hiveConfig));
    HiveWriter hiveWriter = hiveWriterFactory.createWriter(ImmutableList.of(), OptionalInt.empty(), Optional.empty());
    assertEquals(((SortingFileWriter) hiveWriter.getFileWriter()).getTempFilePrefix().getName(), ".tmp-sort.bucket_00000");
}
Also used : HivePageSinkMetadataProvider(io.prestosql.plugin.hive.metastore.HivePageSinkMetadataProvider) HivePageSinkMetadata(io.prestosql.plugin.hive.metastore.HivePageSinkMetadata) Metadata(io.prestosql.metadata.Metadata) StorageFormat(io.prestosql.plugin.hive.metastore.StorageFormat) PagesIndex(io.prestosql.operator.PagesIndex) NoHdfsAuthentication(io.prestosql.plugin.hive.authentication.NoHdfsAuthentication) HiveIdentity(io.prestosql.plugin.hive.authentication.HiveIdentity) PagesIndexPageSorter(io.prestosql.PagesIndexPageSorter) TypeSignature(io.prestosql.spi.type.TypeSignature) Column(io.prestosql.plugin.hive.metastore.Column) TestingNodeManager(io.prestosql.testing.TestingNodeManager) PagesIndexPageSorter(io.prestosql.PagesIndexPageSorter) PageSorter(io.prestosql.spi.PageSorter) ConnectorSession(io.prestosql.spi.connector.ConnectorSession) TestingConnectorSession(io.prestosql.testing.TestingConnectorSession) InternalTypeManager(io.prestosql.type.InternalTypeManager) Table(io.prestosql.plugin.hive.metastore.Table) HivePageSinkMetadata(io.prestosql.plugin.hive.metastore.HivePageSinkMetadata) SchemaTableName(io.prestosql.spi.connector.SchemaTableName) Storage(io.prestosql.plugin.hive.metastore.Storage) InternalTypeManager(io.prestosql.type.InternalTypeManager) TypeManager(io.prestosql.spi.type.TypeManager) Test(org.testng.annotations.Test)

Example 4 with StorageFormat

use of io.prestosql.plugin.hive.metastore.StorageFormat in project hetu-core by openlookeng.

the class AbstractTestHive method doTestMetadataDelete.

private void doTestMetadataDelete(HiveStorageFormat storageFormat, SchemaTableName tableName) throws Exception {
    // creating the table
    doCreateEmptyTable(tableName, storageFormat, CREATE_TABLE_COLUMNS_PARTITIONED);
    insertData(tableName, CREATE_TABLE_PARTITIONED_DATA);
    MaterializedResult.Builder expectedResultBuilder = MaterializedResult.resultBuilder(SESSION, CREATE_TABLE_PARTITIONED_DATA.getTypes());
    expectedResultBuilder.rows(CREATE_TABLE_PARTITIONED_DATA.getMaterializedRows());
    try (Transaction transaction = newTransaction()) {
        ConnectorSession session = newSession();
        ConnectorMetadata metadata = transaction.getMetadata();
        metadata.beginQuery(session);
        // verify partitions were created
        List<String> partitionNames = transaction.getMetastore(tableName.getSchemaName()).getPartitionNames(new HiveIdentity(session), tableName.getSchemaName(), tableName.getTableName()).orElseThrow(() -> new AssertionError("Table does not exist: " + tableName));
        assertEqualsIgnoreOrder(partitionNames, CREATE_TABLE_PARTITIONED_DATA.getMaterializedRows().stream().map(row -> "ds=" + row.getField(CREATE_TABLE_PARTITIONED_DATA.getTypes().size() - 1)).collect(toList()));
        // verify table directory is not empty
        Set<String> filesAfterInsert = listAllDataFiles(transaction, tableName.getSchemaName(), tableName.getTableName());
        assertFalse(filesAfterInsert.isEmpty());
        // verify the data
        ConnectorTableHandle tableHandle = getTableHandle(metadata, tableName);
        List<ColumnHandle> columnHandles = filterNonHiddenColumnHandles(metadata.getColumnHandles(session, tableHandle).values());
        MaterializedResult result = readTable(transaction, tableHandle, columnHandles, session, TupleDomain.all(), OptionalInt.empty(), Optional.of(storageFormat));
        assertEqualsIgnoreOrder(result.getMaterializedRows(), expectedResultBuilder.build().getMaterializedRows());
    }
    try (Transaction transaction = newTransaction()) {
        ConnectorSession session = newSession();
        ConnectorMetadata metadata = transaction.getMetadata();
        // get ds column handle
        ConnectorTableHandle tableHandle = getTableHandle(metadata, tableName);
        HiveColumnHandle dsColumnHandle = (HiveColumnHandle) metadata.getColumnHandles(session, tableHandle).get("ds");
        // delete ds=2015-07-03
        session = newSession();
        TupleDomain<ColumnHandle> tupleDomain = TupleDomain.fromFixedValues(ImmutableMap.of(dsColumnHandle, NullableValue.of(createUnboundedVarcharType(), utf8Slice("2015-07-03"))));
        Constraint constraint = new Constraint(tupleDomain, convertToPredicate(tupleDomain));
        tableHandle = applyFilter(metadata, tableHandle, constraint);
        tableHandle = metadata.applyDelete(session, tableHandle).get();
        metadata.executeDelete(session, tableHandle);
        transaction.commit();
    }
    try (Transaction transaction = newTransaction()) {
        ConnectorSession session = newSession();
        ConnectorMetadata metadata = transaction.getMetadata();
        metadata.beginQuery(session);
        ConnectorTableHandle tableHandle = getTableHandle(metadata, tableName);
        List<ColumnHandle> columnHandles = filterNonHiddenColumnHandles(metadata.getColumnHandles(session, tableHandle).values());
        HiveColumnHandle dsColumnHandle = (HiveColumnHandle) metadata.getColumnHandles(session, tableHandle).get("ds");
        int dsColumnOrdinalPosition = columnHandles.indexOf(dsColumnHandle);
        // verify the data
        ImmutableList<MaterializedRow> expectedRows = expectedResultBuilder.build().getMaterializedRows().stream().filter(row -> !"2015-07-03".equals(row.getField(dsColumnOrdinalPosition))).collect(toImmutableList());
        MaterializedResult actualAfterDelete = readTable(transaction, tableHandle, columnHandles, session, TupleDomain.all(), OptionalInt.empty(), Optional.of(storageFormat));
        assertEqualsIgnoreOrder(actualAfterDelete.getMaterializedRows(), expectedRows);
    }
    try (Transaction transaction = newTransaction()) {
        ConnectorSession session = newSession();
        ConnectorMetadata metadata = transaction.getMetadata();
        ConnectorTableHandle tableHandle = getTableHandle(metadata, tableName);
        HiveColumnHandle dsColumnHandle = (HiveColumnHandle) metadata.getColumnHandles(session, tableHandle).get("ds");
        // delete ds=2015-07-01 and 2015-07-02
        session = newSession();
        TupleDomain<ColumnHandle> tupleDomain2 = TupleDomain.withColumnDomains(ImmutableMap.of(dsColumnHandle, Domain.create(ValueSet.ofRanges(Range.range(createUnboundedVarcharType(), utf8Slice("2015-07-01"), true, utf8Slice("2015-07-02"), true)), false)));
        Constraint constraint2 = new Constraint(tupleDomain2, convertToPredicate(tupleDomain2));
        tableHandle = applyFilter(metadata, tableHandle, constraint2);
        tableHandle = metadata.applyDelete(session, tableHandle).get();
        metadata.executeDelete(session, tableHandle);
        transaction.commit();
    }
    try (Transaction transaction = newTransaction()) {
        ConnectorSession session = newSession();
        ConnectorMetadata metadata = transaction.getMetadata();
        ConnectorTableHandle tableHandle = getTableHandle(metadata, tableName);
        List<ColumnHandle> columnHandles = ImmutableList.copyOf(metadata.getColumnHandles(session, tableHandle).values());
        // verify the data
        session = newSession();
        MaterializedResult actualAfterDelete2 = readTable(transaction, tableHandle, columnHandles, session, TupleDomain.all(), OptionalInt.empty(), Optional.of(storageFormat));
        assertEqualsIgnoreOrder(actualAfterDelete2.getMaterializedRows(), ImmutableList.of());
        // verify table directory is empty
        Set<String> filesAfterDelete = listAllDataFiles(transaction, tableName.getSchemaName(), tableName.getTableName());
        assertTrue(filesAfterDelete.isEmpty());
    }
}
Also used : ROLLBACK_AFTER_FINISH_INSERT(io.prestosql.plugin.hive.AbstractTestHive.TransactionDeleteInsertTestTag.ROLLBACK_AFTER_FINISH_INSERT) HiveType.toHiveType(io.prestosql.plugin.hive.HiveType.toHiveType) TableStatistics(io.prestosql.spi.statistics.TableStatistics) StorageFormat(io.prestosql.plugin.hive.metastore.StorageFormat) Assertions.assertInstanceOf(io.airlift.testing.Assertions.assertInstanceOf) FileSystem(org.apache.hadoop.fs.FileSystem) Test(org.testng.annotations.Test) TableAlreadyExistsException(io.prestosql.spi.connector.TableAlreadyExistsException) NullableValue(io.prestosql.spi.predicate.NullableValue) FileStatus(org.apache.hadoop.fs.FileStatus) TEXTFILE(io.prestosql.plugin.hive.HiveStorageFormat.TEXTFILE) TypeSignature.parseTypeSignature(io.prestosql.spi.type.TypeSignature.parseTypeSignature) TableNotFoundException(io.prestosql.spi.connector.TableNotFoundException) Files.createTempDirectory(java.nio.file.Files.createTempDirectory) Map(java.util.Map) RowType(io.prestosql.spi.type.RowType) ENGLISH(java.util.Locale.ENGLISH) Assert.assertFalse(org.testng.Assert.assertFalse) Chars.isCharType(io.prestosql.spi.type.Chars.isCharType) LOCATION_PROPERTY(io.prestosql.plugin.hive.HiveTableProperties.LOCATION_PROPERTY) MoreExecutors.directExecutor(com.google.common.util.concurrent.MoreExecutors.directExecutor) RCTEXT(io.prestosql.plugin.hive.HiveStorageFormat.RCTEXT) ConnectorPageSource(io.prestosql.spi.connector.ConnectorPageSource) Lists.newArrayList(com.google.common.collect.Lists.newArrayList) Table(io.prestosql.plugin.hive.metastore.Table) ORC(io.prestosql.plugin.hive.HiveStorageFormat.ORC) SchemaTablePrefix(io.prestosql.spi.connector.SchemaTablePrefix) HiveBasicStatistics.createZeroStatistics(io.prestosql.plugin.hive.HiveBasicStatistics.createZeroStatistics) TRANSACTIONAL(io.prestosql.plugin.hive.HiveTableProperties.TRANSACTIONAL) TYPE_MANAGER(io.prestosql.plugin.hive.HiveTestUtils.TYPE_MANAGER) MetastoreLocator(io.prestosql.plugin.hive.metastore.thrift.MetastoreLocator) LocalDateTime(java.time.LocalDateTime) PRESTO_QUERY_ID_NAME(io.prestosql.plugin.hive.HiveMetadata.PRESTO_QUERY_ID_NAME) ThriftHiveMetastoreConfig(io.prestosql.plugin.hive.metastore.thrift.ThriftHiveMetastoreConfig) OptionalLong(java.util.OptionalLong) REGULAR(io.prestosql.plugin.hive.HiveColumnHandle.ColumnType.REGULAR) PARTITION_KEY(io.prestosql.plugin.hive.HiveColumnHandle.ColumnType.PARTITION_KEY) ScheduledExecutorService(java.util.concurrent.ScheduledExecutorService) ImmutableSet.toImmutableSet(com.google.common.collect.ImmutableSet.toImmutableSet) DOUBLE(io.prestosql.spi.type.DoubleType.DOUBLE) ThriftHiveMetastore(io.prestosql.plugin.hive.metastore.thrift.ThriftHiveMetastore) DiscretePredicates(io.prestosql.spi.connector.DiscretePredicates) Assertions.assertGreaterThanOrEqual(io.airlift.testing.Assertions.assertGreaterThanOrEqual) ImmutableMultimap(com.google.common.collect.ImmutableMultimap) PARQUET(io.prestosql.plugin.hive.HiveStorageFormat.PARQUET) ConnectorOutputTableHandle(io.prestosql.spi.connector.ConnectorOutputTableHandle) AfterClass(org.testng.annotations.AfterClass) HiveTestUtils.mapType(io.prestosql.plugin.hive.HiveTestUtils.mapType) FileUtils.makePartName(org.apache.hadoop.hive.common.FileUtils.makePartName) IOException(java.io.IOException) Iterables.getOnlyElement(com.google.common.collect.Iterables.getOnlyElement) ROLLBACK_RIGHT_AWAY(io.prestosql.plugin.hive.AbstractTestHive.TransactionDeleteInsertTestTag.ROLLBACK_RIGHT_AWAY) HostAndPort(com.google.common.net.HostAndPort) USER(io.prestosql.spi.security.PrincipalType.USER) ConnectorTableMetadata(io.prestosql.spi.connector.ConnectorTableMetadata) VARBINARY(io.prestosql.spi.type.VarbinaryType.VARBINARY) HiveTestUtils.getDefaultOrcFileWriterFactory(io.prestosql.plugin.hive.HiveTestUtils.getDefaultOrcFileWriterFactory) ConnectorPageSourceProvider(io.prestosql.spi.connector.ConnectorPageSourceProvider) ROLLBACK_AFTER_APPEND_PAGE(io.prestosql.plugin.hive.AbstractTestHive.TransactionDeleteInsertTestTag.ROLLBACK_AFTER_APPEND_PAGE) Varchars.isVarcharType(io.prestosql.spi.type.Varchars.isVarcharType) ConnectorSplitManager(io.prestosql.spi.connector.ConnectorSplitManager) ViewNotFoundException(io.prestosql.spi.connector.ViewNotFoundException) MaterializedResult.materializeSourceDataStream(io.prestosql.testing.MaterializedResult.materializeSourceDataStream) MaterializedResult(io.prestosql.testing.MaterializedResult) Duration(io.airlift.units.Duration) Preconditions.checkArgument(com.google.common.base.Preconditions.checkArgument) ConnectorSession(io.prestosql.spi.connector.ConnectorSession) ConnectorTableProperties(io.prestosql.spi.connector.ConnectorTableProperties) BOOLEAN(io.prestosql.spi.type.BooleanType.BOOLEAN) Type(io.prestosql.spi.type.Type) RcFilePageSource(io.prestosql.plugin.hive.rcfile.RcFilePageSource) BIGINT(io.prestosql.spi.type.BigintType.BIGINT) DecimalType.createDecimalType(io.prestosql.spi.type.DecimalType.createDecimalType) PrestoException(io.prestosql.spi.PrestoException) HiveBasicStatistics.createEmptyStatistics(io.prestosql.plugin.hive.HiveBasicStatistics.createEmptyStatistics) ImmutableSet(com.google.common.collect.ImmutableSet) CachingHiveMetastore(io.prestosql.plugin.hive.metastore.CachingHiveMetastore) MetadataManager.createTestMetadataManager(io.prestosql.metadata.MetadataManager.createTestMetadataManager) ROLLBACK_AFTER_DELETE(io.prestosql.plugin.hive.AbstractTestHive.TransactionDeleteInsertTestTag.ROLLBACK_AFTER_DELETE) HiveUtil.columnExtraInfo(io.prestosql.plugin.hive.HiveUtil.columnExtraInfo) BeforeClass(org.testng.annotations.BeforeClass) Collection(java.util.Collection) UUID(java.util.UUID) TINYINT(io.prestosql.spi.type.TinyintType.TINYINT) Assert.assertNotNull(org.testng.Assert.assertNotNull) HYPER_LOG_LOG(io.prestosql.spi.type.HyperLogLogType.HYPER_LOG_LOG) NOT_SUPPORTED(io.prestosql.spi.StandardErrorCode.NOT_SUPPORTED) JsonCodec(io.airlift.json.JsonCodec) IntStream(java.util.stream.IntStream) NOT_PARTITIONED(io.prestosql.spi.connector.NotPartitionedPartitionHandle.NOT_PARTITIONED) SqlTimestamp(io.prestosql.spi.type.SqlTimestamp) BUCKET_COLUMN_NAME(io.prestosql.plugin.hive.HiveColumnHandle.BUCKET_COLUMN_NAME) Assert.assertNull(org.testng.Assert.assertNull) ConnectorViewDefinition(io.prestosql.spi.connector.ConnectorViewDefinition) SqlDate(io.prestosql.spi.type.SqlDate) ConnectorNewTableLayout(io.prestosql.spi.connector.ConnectorNewTableLayout) OptionalDouble(java.util.OptionalDouble) Assert.assertEquals(org.testng.Assert.assertEquals) BUCKETED_BY_PROPERTY(io.prestosql.plugin.hive.HiveTableProperties.BUCKETED_BY_PROPERTY) HiveUtil.toPartitionValues(io.prestosql.plugin.hive.HiveUtil.toPartitionValues) OptionalInt(java.util.OptionalInt) HashSet(java.util.HashSet) ImmutableList(com.google.common.collect.ImmutableList) ViewColumn(io.prestosql.spi.connector.ConnectorViewDefinition.ViewColumn) HiveColumnHandle.bucketColumnHandle(io.prestosql.plugin.hive.HiveColumnHandle.bucketColumnHandle) DATE(io.prestosql.spi.type.DateType.DATE) Math.toIntExact(java.lang.Math.toIntExact) STORAGE_FORMAT_PROPERTY(io.prestosql.plugin.hive.HiveTableProperties.STORAGE_FORMAT_PROPERTY) Block(io.prestosql.spi.block.Block) ExecutorService(java.util.concurrent.ExecutorService) Collections.emptyMap(java.util.Collections.emptyMap) ParquetPageSource(io.prestosql.plugin.hive.parquet.ParquetPageSource) UTF_8(java.nio.charset.StandardCharsets.UTF_8) ColumnMetadata(io.prestosql.spi.connector.ColumnMetadata) TupleDomain(io.prestosql.spi.predicate.TupleDomain) Assert.fail(org.testng.Assert.fail) DateTime(org.joda.time.DateTime) PartitionWithStatistics(io.prestosql.plugin.hive.metastore.PartitionWithStatistics) Page(io.prestosql.spi.Page) HiveTestUtils.getDefaultHiveDataStreamFactories(io.prestosql.plugin.hive.HiveTestUtils.getDefaultHiveDataStreamFactories) Executors.newFixedThreadPool(java.util.concurrent.Executors.newFixedThreadPool) Hashing.sha256(com.google.common.hash.Hashing.sha256) BUCKETING_V1(io.prestosql.plugin.hive.HiveBucketing.BucketingVersion.BUCKETING_V1) Assertions.assertEqualsIgnoreOrder(io.airlift.testing.Assertions.assertEqualsIgnoreOrder) PARTITIONED_BY_PROPERTY(io.prestosql.plugin.hive.HiveTableProperties.PARTITIONED_BY_PROPERTY) Collectors.toList(java.util.stream.Collectors.toList) Column(io.prestosql.plugin.hive.metastore.Column) JoinCompiler(io.prestosql.sql.gen.JoinCompiler) Assert.assertTrue(org.testng.Assert.assertTrue) RecordPageSource(io.prestosql.spi.connector.RecordPageSource) ConnectorInsertTableHandle(io.prestosql.spi.connector.ConnectorInsertTableHandle) OrcConcatPageSource(io.prestosql.plugin.hive.orc.OrcConcatPageSource) ROLLBACK_AFTER_BEGIN_INSERT(io.prestosql.plugin.hive.AbstractTestHive.TransactionDeleteInsertTestTag.ROLLBACK_AFTER_BEGIN_INSERT) Arrays(java.util.Arrays) RCBINARY(io.prestosql.plugin.hive.HiveStorageFormat.RCBINARY) NoHdfsAuthentication(io.prestosql.plugin.hive.authentication.NoHdfsAuthentication) ConnectorPageSink(io.prestosql.spi.connector.ConnectorPageSink) ValueSet(io.prestosql.spi.predicate.ValueSet) Maps.uniqueIndex(com.google.common.collect.Maps.uniqueIndex) BigDecimal(java.math.BigDecimal) Sets.difference(com.google.common.collect.Sets.difference) Executors.newScheduledThreadPool(java.util.concurrent.Executors.newScheduledThreadPool) HIVE_STRING(io.prestosql.plugin.hive.HiveType.HIVE_STRING) RowFieldName(io.prestosql.spi.type.RowFieldName) Slices.utf8Slice(io.airlift.slice.Slices.utf8Slice) ConnectorPageSinkProvider(io.prestosql.spi.connector.ConnectorPageSinkProvider) JSON(io.prestosql.plugin.hive.HiveStorageFormat.JSON) HIVE_INT(io.prestosql.plugin.hive.HiveType.HIVE_INT) HIVE_LONG(io.prestosql.plugin.hive.HiveType.HIVE_LONG) ConstraintApplicationResult(io.prestosql.spi.connector.ConstraintApplicationResult) UNGROUPED_SCHEDULING(io.prestosql.spi.connector.ConnectorSplitManager.SplitSchedulingStrategy.UNGROUPED_SCHEDULING) ImmutableList.toImmutableList(com.google.common.collect.ImmutableList.toImmutableList) Set(java.util.Set) SqlStandardAccessControlMetadata(io.prestosql.plugin.hive.security.SqlStandardAccessControlMetadata) TIMESTAMP(io.prestosql.spi.type.TimestampType.TIMESTAMP) MILLISECONDS(java.util.concurrent.TimeUnit.MILLISECONDS) VarcharType.createVarcharType(io.prestosql.spi.type.VarcharType.createVarcharType) ImmutableMap.toImmutableMap(com.google.common.collect.ImmutableMap.toImmutableMap) Domain(io.prestosql.spi.predicate.Domain) SortingColumn(io.prestosql.plugin.hive.metastore.SortingColumn) TestingNodeManager(io.prestosql.testing.TestingNodeManager) Lists.reverse(com.google.common.collect.Lists.reverse) MoreObjects.toStringHelper(com.google.common.base.MoreObjects.toStringHelper) Slice(io.airlift.slice.Slice) Partition(io.prestosql.plugin.hive.metastore.Partition) StandardTypes(io.prestosql.spi.type.StandardTypes) ConnectorSplit(io.prestosql.spi.connector.ConnectorSplit) BUCKET_COUNT_PROPERTY(io.prestosql.plugin.hive.HiveTableProperties.BUCKET_COUNT_PROPERTY) MapType(io.prestosql.spi.type.MapType) GroupByHashPageIndexerFactory(io.prestosql.GroupByHashPageIndexerFactory) Float.floatToRawIntBits(java.lang.Float.floatToRawIntBits) VARCHAR(io.prestosql.spi.type.VarcharType.VARCHAR) ThreadLocalRandom(java.util.concurrent.ThreadLocalRandom) AVRO(io.prestosql.plugin.hive.HiveStorageFormat.AVRO) HiveTestUtils.rowType(io.prestosql.plugin.hive.HiveTestUtils.rowType) RecordCursor(io.prestosql.spi.connector.RecordCursor) SemiTransactionalHiveMetastore(io.prestosql.plugin.hive.metastore.SemiTransactionalHiveMetastore) SESSION(io.prestosql.plugin.hive.HiveTestUtils.SESSION) HiveMetastore(io.prestosql.plugin.hive.metastore.HiveMetastore) LongStream(java.util.stream.LongStream) MULTIDELIMIT(io.prestosql.plugin.hive.HiveStorageFormat.MULTIDELIMIT) MoreFutures.getFutureValue(io.airlift.concurrent.MoreFutures.getFutureValue) PAGE_SORTER(io.prestosql.plugin.hive.HiveTestUtils.PAGE_SORTER) UTC(org.joda.time.DateTimeZone.UTC) MaterializedRow(io.prestosql.testing.MaterializedRow) PrincipalPrivileges(io.prestosql.plugin.hive.metastore.PrincipalPrivileges) IS_EXTERNAL_TABLE(io.prestosql.plugin.hive.HiveTableProperties.IS_EXTERNAL_TABLE) HiveColumnStatistics(io.prestosql.plugin.hive.metastore.HiveColumnStatistics) DateTimeTestingUtils.sqlTimestampOf(io.prestosql.testing.DateTimeTestingUtils.sqlTimestampOf) ColumnHandle(io.prestosql.spi.connector.ColumnHandle) STAGE_AND_MOVE_TO_TARGET_DIRECTORY(io.prestosql.plugin.hive.LocationHandle.WriteMode.STAGE_AND_MOVE_TO_TARGET_DIRECTORY) TableType(org.apache.hadoop.hive.metastore.TableType) ConnectorMetadata(io.prestosql.spi.connector.ConnectorMetadata) OrcPageSource(io.prestosql.plugin.hive.orc.OrcPageSource) HiveTestUtils.getDefaultHiveSelectiveFactories(io.prestosql.plugin.hive.HiveTestUtils.getDefaultHiveSelectiveFactories) HdfsContext(io.prestosql.plugin.hive.HdfsEnvironment.HdfsContext) Assertions.assertThat(org.assertj.core.api.Assertions.assertThat) Assertions.assertGreaterThan(io.airlift.testing.Assertions.assertGreaterThan) MoreCollectors.onlyElement(com.google.common.collect.MoreCollectors.onlyElement) Iterables.concat(com.google.common.collect.Iterables.concat) HiveWriteUtils.createDirectory(io.prestosql.plugin.hive.HiveWriteUtils.createDirectory) Path(org.apache.hadoop.fs.Path) KILOBYTE(io.airlift.units.DataSize.Unit.KILOBYTE) Constraint(io.prestosql.spi.connector.Constraint) ImmutableMap(com.google.common.collect.ImmutableMap) ArrayType(io.prestosql.spi.type.ArrayType) CharType.createCharType(io.prestosql.spi.type.CharType.createCharType) ConnectorSplitSource(io.prestosql.spi.connector.ConnectorSplitSource) HiveTestUtils.getDefaultHiveFileWriterFactories(io.prestosql.plugin.hive.HiveTestUtils.getDefaultHiveFileWriterFactories) PRESTO_VERSION_NAME(io.prestosql.plugin.hive.HiveMetadata.PRESTO_VERSION_NAME) String.format(java.lang.String.format) Preconditions.checkState(com.google.common.base.Preconditions.checkState) DataSize(io.airlift.units.DataSize) List(java.util.List) VarcharType.createUnboundedVarcharType(io.prestosql.spi.type.VarcharType.createUnboundedVarcharType) HiveTestUtils.getTypes(io.prestosql.plugin.hive.HiveTestUtils.getTypes) ConnectorTransactionHandle(io.prestosql.spi.connector.ConnectorTransactionHandle) Optional(java.util.Optional) SORTED_BY_PROPERTY(io.prestosql.plugin.hive.HiveTableProperties.SORTED_BY_PROPERTY) Logger(io.airlift.log.Logger) CounterStat(io.airlift.stats.CounterStat) HashMap(java.util.HashMap) INTEGER(io.prestosql.spi.type.IntegerType.INTEGER) HivePrincipal(io.prestosql.plugin.hive.metastore.HivePrincipal) AtomicReference(java.util.concurrent.atomic.AtomicReference) SqlVarbinary(io.prestosql.spi.type.SqlVarbinary) BridgingHiveMetastore(io.prestosql.plugin.hive.metastore.thrift.BridgingHiveMetastore) NamedTypeSignature(io.prestosql.spi.type.NamedTypeSignature) SchemaTableName(io.prestosql.spi.connector.SchemaTableName) COMMIT(io.prestosql.plugin.hive.AbstractTestHive.TransactionDeleteInsertTestTag.COMMIT) TestingMetastoreLocator(io.prestosql.plugin.hive.metastore.thrift.TestingMetastoreLocator) Verify.verify(com.google.common.base.Verify.verify) Assertions.assertLessThanOrEqual(io.airlift.testing.Assertions.assertLessThanOrEqual) Range(io.prestosql.spi.predicate.Range) Threads.daemonThreadsNamed(io.airlift.concurrent.Threads.daemonThreadsNamed) HivePrivilegeInfo(io.prestosql.plugin.hive.metastore.HivePrivilegeInfo) Objects.requireNonNull(java.util.Objects.requireNonNull) SEQUENCEFILE(io.prestosql.plugin.hive.HiveStorageFormat.SEQUENCEFILE) REAL(io.prestosql.spi.type.RealType.REAL) HiveMetadata.convertToPredicate(io.prestosql.plugin.hive.HiveMetadata.convertToPredicate) ColumnStatistics(io.prestosql.spi.statistics.ColumnStatistics) HiveTestUtils.getNoOpIndexCache(io.prestosql.plugin.hive.HiveTestUtils.getNoOpIndexCache) HiveIdentity(io.prestosql.plugin.hive.authentication.HiveIdentity) TRANSACTION_CONFLICT(io.prestosql.spi.StandardErrorCode.TRANSACTION_CONFLICT) ConnectorTableHandle(io.prestosql.spi.connector.ConnectorTableHandle) CSV(io.prestosql.plugin.hive.HiveStorageFormat.CSV) HiveTestUtils.getDefaultHiveRecordCursorProvider(io.prestosql.plugin.hive.HiveTestUtils.getDefaultHiveRecordCursorProvider) HiveTestUtils.arrayType(io.prestosql.plugin.hive.HiveTestUtils.arrayType) SMALLINT(io.prestosql.spi.type.SmallintType.SMALLINT) Executors.newCachedThreadPool(java.util.concurrent.Executors.newCachedThreadPool) ROLLBACK_AFTER_SINK_FINISH(io.prestosql.plugin.hive.AbstractTestHive.TransactionDeleteInsertTestTag.ROLLBACK_AFTER_SINK_FINISH) TestingConnectorSession(io.prestosql.testing.TestingConnectorSession) HiveColumnHandle.bucketColumnHandle(io.prestosql.plugin.hive.HiveColumnHandle.bucketColumnHandle) ColumnHandle(io.prestosql.spi.connector.ColumnHandle) Constraint(io.prestosql.spi.connector.Constraint) HiveIdentity(io.prestosql.plugin.hive.authentication.HiveIdentity) Constraint(io.prestosql.spi.connector.Constraint) ConnectorTableHandle(io.prestosql.spi.connector.ConnectorTableHandle) ConnectorSession(io.prestosql.spi.connector.ConnectorSession) TestingConnectorSession(io.prestosql.testing.TestingConnectorSession) ConnectorMetadata(io.prestosql.spi.connector.ConnectorMetadata) MaterializedResult(io.prestosql.testing.MaterializedResult) MaterializedRow(io.prestosql.testing.MaterializedRow)

Example 5 with StorageFormat

use of io.prestosql.plugin.hive.metastore.StorageFormat in project hetu-core by openlookeng.

the class AbstractTestHive method doTestTransactionDeleteInsert.

private void doTestTransactionDeleteInsert(HiveStorageFormat storageFormat, SchemaTableName tableName, Domain domainToDrop, MaterializedResult insertData, MaterializedResult expectedData, TransactionDeleteInsertTestTag tag, boolean expectQuerySucceed, Optional<ConflictTrigger> conflictTrigger) throws Exception {
    Path writePath = null;
    Path targetPath = null;
    try (Transaction transaction = newTransaction()) {
        try {
            ConnectorMetadata metadata = transaction.getMetadata();
            ConnectorTableHandle tableHandle = getTableHandle(metadata, tableName);
            ConnectorSession session;
            rollbackIfEquals(tag, ROLLBACK_RIGHT_AWAY);
            // Query 1: delete
            session = newSession();
            HiveColumnHandle dsColumnHandle = (HiveColumnHandle) metadata.getColumnHandles(session, tableHandle).get("pk2");
            TupleDomain<ColumnHandle> tupleDomain = TupleDomain.withColumnDomains(ImmutableMap.of(dsColumnHandle, domainToDrop));
            Constraint constraint = new Constraint(tupleDomain, convertToPredicate(tupleDomain));
            tableHandle = applyFilter(metadata, tableHandle, constraint);
            tableHandle = metadata.applyDelete(session, tableHandle).get();
            metadata.executeDelete(session, tableHandle);
            rollbackIfEquals(tag, ROLLBACK_AFTER_DELETE);
            // Query 2: insert
            session = newSession();
            ConnectorInsertTableHandle insertTableHandle = metadata.beginInsert(session, tableHandle);
            rollbackIfEquals(tag, ROLLBACK_AFTER_BEGIN_INSERT);
            writePath = getStagingPathRoot(insertTableHandle);
            targetPath = getTargetPathRoot(insertTableHandle);
            ConnectorPageSink sink = pageSinkProvider.createPageSink(transaction.getTransactionHandle(), session, insertTableHandle);
            sink.appendPage(insertData.toPage());
            rollbackIfEquals(tag, ROLLBACK_AFTER_APPEND_PAGE);
            Collection<Slice> fragments = getFutureValue(sink.finish());
            rollbackIfEquals(tag, ROLLBACK_AFTER_SINK_FINISH);
            metadata.finishInsert(session, insertTableHandle, fragments, ImmutableList.of());
            rollbackIfEquals(tag, ROLLBACK_AFTER_FINISH_INSERT);
            assertEquals(tag, COMMIT);
            if (conflictTrigger.isPresent()) {
                JsonCodec<PartitionUpdate> partitionUpdateCodec = JsonCodec.jsonCodec(PartitionUpdate.class);
                List<PartitionUpdate> partitionUpdates = fragments.stream().map(Slice::getBytes).map(partitionUpdateCodec::fromJson).collect(toList());
                conflictTrigger.get().triggerConflict(session, tableName, insertTableHandle, partitionUpdates);
            }
            transaction.commit();
            if (conflictTrigger.isPresent()) {
                assertTrue(expectQuerySucceed);
                conflictTrigger.get().verifyAndCleanup(session, tableName);
            }
        } catch (TestingRollbackException e) {
            transaction.rollback();
        } catch (PrestoException e) {
            assertFalse(expectQuerySucceed);
            if (conflictTrigger.isPresent()) {
                conflictTrigger.get().verifyAndCleanup(newSession(), tableName);
            }
        }
    }
    // check that temporary files are removed
    if (writePath != null && !writePath.equals(targetPath)) {
        HdfsContext context = new HdfsContext(newSession(), tableName.getSchemaName(), tableName.getTableName());
        FileSystem fileSystem = hdfsEnvironment.getFileSystem(context, writePath);
        assertFalse(fileSystem.exists(writePath));
    }
    try (Transaction transaction = newTransaction()) {
        // verify partitions
        List<String> partitionNames = transaction.getMetastore(tableName.getSchemaName()).getPartitionNames(new HiveIdentity(newSession()), tableName.getSchemaName(), tableName.getTableName()).orElseThrow(() -> new AssertionError("Table does not exist: " + tableName));
        assertEqualsIgnoreOrder(partitionNames, expectedData.getMaterializedRows().stream().map(row -> format("pk1=%s/pk2=%s", row.getField(1), row.getField(2))).distinct().collect(toList()));
        // load the new table
        ConnectorSession session = newSession();
        ConnectorMetadata metadata = transaction.getMetadata();
        metadata.beginQuery(session);
        ConnectorTableHandle tableHandle = getTableHandle(metadata, tableName);
        List<ColumnHandle> columnHandles = filterNonHiddenColumnHandles(metadata.getColumnHandles(session, tableHandle).values());
        // verify the data
        MaterializedResult result = readTable(transaction, tableHandle, columnHandles, session, TupleDomain.all(), OptionalInt.empty(), Optional.of(storageFormat));
        assertEqualsIgnoreOrder(result.getMaterializedRows(), expectedData.getMaterializedRows());
    }
}
Also used : ROLLBACK_AFTER_FINISH_INSERT(io.prestosql.plugin.hive.AbstractTestHive.TransactionDeleteInsertTestTag.ROLLBACK_AFTER_FINISH_INSERT) HiveType.toHiveType(io.prestosql.plugin.hive.HiveType.toHiveType) TableStatistics(io.prestosql.spi.statistics.TableStatistics) StorageFormat(io.prestosql.plugin.hive.metastore.StorageFormat) Assertions.assertInstanceOf(io.airlift.testing.Assertions.assertInstanceOf) FileSystem(org.apache.hadoop.fs.FileSystem) Test(org.testng.annotations.Test) TableAlreadyExistsException(io.prestosql.spi.connector.TableAlreadyExistsException) NullableValue(io.prestosql.spi.predicate.NullableValue) FileStatus(org.apache.hadoop.fs.FileStatus) TEXTFILE(io.prestosql.plugin.hive.HiveStorageFormat.TEXTFILE) TypeSignature.parseTypeSignature(io.prestosql.spi.type.TypeSignature.parseTypeSignature) TableNotFoundException(io.prestosql.spi.connector.TableNotFoundException) Files.createTempDirectory(java.nio.file.Files.createTempDirectory) Map(java.util.Map) RowType(io.prestosql.spi.type.RowType) ENGLISH(java.util.Locale.ENGLISH) Assert.assertFalse(org.testng.Assert.assertFalse) Chars.isCharType(io.prestosql.spi.type.Chars.isCharType) LOCATION_PROPERTY(io.prestosql.plugin.hive.HiveTableProperties.LOCATION_PROPERTY) MoreExecutors.directExecutor(com.google.common.util.concurrent.MoreExecutors.directExecutor) RCTEXT(io.prestosql.plugin.hive.HiveStorageFormat.RCTEXT) ConnectorPageSource(io.prestosql.spi.connector.ConnectorPageSource) Lists.newArrayList(com.google.common.collect.Lists.newArrayList) Table(io.prestosql.plugin.hive.metastore.Table) ORC(io.prestosql.plugin.hive.HiveStorageFormat.ORC) SchemaTablePrefix(io.prestosql.spi.connector.SchemaTablePrefix) HiveBasicStatistics.createZeroStatistics(io.prestosql.plugin.hive.HiveBasicStatistics.createZeroStatistics) TRANSACTIONAL(io.prestosql.plugin.hive.HiveTableProperties.TRANSACTIONAL) TYPE_MANAGER(io.prestosql.plugin.hive.HiveTestUtils.TYPE_MANAGER) MetastoreLocator(io.prestosql.plugin.hive.metastore.thrift.MetastoreLocator) LocalDateTime(java.time.LocalDateTime) PRESTO_QUERY_ID_NAME(io.prestosql.plugin.hive.HiveMetadata.PRESTO_QUERY_ID_NAME) ThriftHiveMetastoreConfig(io.prestosql.plugin.hive.metastore.thrift.ThriftHiveMetastoreConfig) OptionalLong(java.util.OptionalLong) REGULAR(io.prestosql.plugin.hive.HiveColumnHandle.ColumnType.REGULAR) PARTITION_KEY(io.prestosql.plugin.hive.HiveColumnHandle.ColumnType.PARTITION_KEY) ScheduledExecutorService(java.util.concurrent.ScheduledExecutorService) ImmutableSet.toImmutableSet(com.google.common.collect.ImmutableSet.toImmutableSet) DOUBLE(io.prestosql.spi.type.DoubleType.DOUBLE) ThriftHiveMetastore(io.prestosql.plugin.hive.metastore.thrift.ThriftHiveMetastore) DiscretePredicates(io.prestosql.spi.connector.DiscretePredicates) Assertions.assertGreaterThanOrEqual(io.airlift.testing.Assertions.assertGreaterThanOrEqual) ImmutableMultimap(com.google.common.collect.ImmutableMultimap) PARQUET(io.prestosql.plugin.hive.HiveStorageFormat.PARQUET) ConnectorOutputTableHandle(io.prestosql.spi.connector.ConnectorOutputTableHandle) AfterClass(org.testng.annotations.AfterClass) HiveTestUtils.mapType(io.prestosql.plugin.hive.HiveTestUtils.mapType) FileUtils.makePartName(org.apache.hadoop.hive.common.FileUtils.makePartName) IOException(java.io.IOException) Iterables.getOnlyElement(com.google.common.collect.Iterables.getOnlyElement) ROLLBACK_RIGHT_AWAY(io.prestosql.plugin.hive.AbstractTestHive.TransactionDeleteInsertTestTag.ROLLBACK_RIGHT_AWAY) HostAndPort(com.google.common.net.HostAndPort) USER(io.prestosql.spi.security.PrincipalType.USER) ConnectorTableMetadata(io.prestosql.spi.connector.ConnectorTableMetadata) VARBINARY(io.prestosql.spi.type.VarbinaryType.VARBINARY) HiveTestUtils.getDefaultOrcFileWriterFactory(io.prestosql.plugin.hive.HiveTestUtils.getDefaultOrcFileWriterFactory) ConnectorPageSourceProvider(io.prestosql.spi.connector.ConnectorPageSourceProvider) ROLLBACK_AFTER_APPEND_PAGE(io.prestosql.plugin.hive.AbstractTestHive.TransactionDeleteInsertTestTag.ROLLBACK_AFTER_APPEND_PAGE) Varchars.isVarcharType(io.prestosql.spi.type.Varchars.isVarcharType) ConnectorSplitManager(io.prestosql.spi.connector.ConnectorSplitManager) ViewNotFoundException(io.prestosql.spi.connector.ViewNotFoundException) MaterializedResult.materializeSourceDataStream(io.prestosql.testing.MaterializedResult.materializeSourceDataStream) MaterializedResult(io.prestosql.testing.MaterializedResult) Duration(io.airlift.units.Duration) Preconditions.checkArgument(com.google.common.base.Preconditions.checkArgument) ConnectorSession(io.prestosql.spi.connector.ConnectorSession) ConnectorTableProperties(io.prestosql.spi.connector.ConnectorTableProperties) BOOLEAN(io.prestosql.spi.type.BooleanType.BOOLEAN) Type(io.prestosql.spi.type.Type) RcFilePageSource(io.prestosql.plugin.hive.rcfile.RcFilePageSource) BIGINT(io.prestosql.spi.type.BigintType.BIGINT) DecimalType.createDecimalType(io.prestosql.spi.type.DecimalType.createDecimalType) PrestoException(io.prestosql.spi.PrestoException) HiveBasicStatistics.createEmptyStatistics(io.prestosql.plugin.hive.HiveBasicStatistics.createEmptyStatistics) ImmutableSet(com.google.common.collect.ImmutableSet) CachingHiveMetastore(io.prestosql.plugin.hive.metastore.CachingHiveMetastore) MetadataManager.createTestMetadataManager(io.prestosql.metadata.MetadataManager.createTestMetadataManager) ROLLBACK_AFTER_DELETE(io.prestosql.plugin.hive.AbstractTestHive.TransactionDeleteInsertTestTag.ROLLBACK_AFTER_DELETE) HiveUtil.columnExtraInfo(io.prestosql.plugin.hive.HiveUtil.columnExtraInfo) BeforeClass(org.testng.annotations.BeforeClass) Collection(java.util.Collection) UUID(java.util.UUID) TINYINT(io.prestosql.spi.type.TinyintType.TINYINT) Assert.assertNotNull(org.testng.Assert.assertNotNull) HYPER_LOG_LOG(io.prestosql.spi.type.HyperLogLogType.HYPER_LOG_LOG) NOT_SUPPORTED(io.prestosql.spi.StandardErrorCode.NOT_SUPPORTED) JsonCodec(io.airlift.json.JsonCodec) IntStream(java.util.stream.IntStream) NOT_PARTITIONED(io.prestosql.spi.connector.NotPartitionedPartitionHandle.NOT_PARTITIONED) SqlTimestamp(io.prestosql.spi.type.SqlTimestamp) BUCKET_COLUMN_NAME(io.prestosql.plugin.hive.HiveColumnHandle.BUCKET_COLUMN_NAME) Assert.assertNull(org.testng.Assert.assertNull) ConnectorViewDefinition(io.prestosql.spi.connector.ConnectorViewDefinition) SqlDate(io.prestosql.spi.type.SqlDate) ConnectorNewTableLayout(io.prestosql.spi.connector.ConnectorNewTableLayout) OptionalDouble(java.util.OptionalDouble) Assert.assertEquals(org.testng.Assert.assertEquals) BUCKETED_BY_PROPERTY(io.prestosql.plugin.hive.HiveTableProperties.BUCKETED_BY_PROPERTY) HiveUtil.toPartitionValues(io.prestosql.plugin.hive.HiveUtil.toPartitionValues) OptionalInt(java.util.OptionalInt) HashSet(java.util.HashSet) ImmutableList(com.google.common.collect.ImmutableList) ViewColumn(io.prestosql.spi.connector.ConnectorViewDefinition.ViewColumn) HiveColumnHandle.bucketColumnHandle(io.prestosql.plugin.hive.HiveColumnHandle.bucketColumnHandle) DATE(io.prestosql.spi.type.DateType.DATE) Math.toIntExact(java.lang.Math.toIntExact) STORAGE_FORMAT_PROPERTY(io.prestosql.plugin.hive.HiveTableProperties.STORAGE_FORMAT_PROPERTY) Block(io.prestosql.spi.block.Block) ExecutorService(java.util.concurrent.ExecutorService) Collections.emptyMap(java.util.Collections.emptyMap) ParquetPageSource(io.prestosql.plugin.hive.parquet.ParquetPageSource) UTF_8(java.nio.charset.StandardCharsets.UTF_8) ColumnMetadata(io.prestosql.spi.connector.ColumnMetadata) TupleDomain(io.prestosql.spi.predicate.TupleDomain) Assert.fail(org.testng.Assert.fail) DateTime(org.joda.time.DateTime) PartitionWithStatistics(io.prestosql.plugin.hive.metastore.PartitionWithStatistics) Page(io.prestosql.spi.Page) HiveTestUtils.getDefaultHiveDataStreamFactories(io.prestosql.plugin.hive.HiveTestUtils.getDefaultHiveDataStreamFactories) Executors.newFixedThreadPool(java.util.concurrent.Executors.newFixedThreadPool) Hashing.sha256(com.google.common.hash.Hashing.sha256) BUCKETING_V1(io.prestosql.plugin.hive.HiveBucketing.BucketingVersion.BUCKETING_V1) Assertions.assertEqualsIgnoreOrder(io.airlift.testing.Assertions.assertEqualsIgnoreOrder) PARTITIONED_BY_PROPERTY(io.prestosql.plugin.hive.HiveTableProperties.PARTITIONED_BY_PROPERTY) Collectors.toList(java.util.stream.Collectors.toList) Column(io.prestosql.plugin.hive.metastore.Column) JoinCompiler(io.prestosql.sql.gen.JoinCompiler) Assert.assertTrue(org.testng.Assert.assertTrue) RecordPageSource(io.prestosql.spi.connector.RecordPageSource) ConnectorInsertTableHandle(io.prestosql.spi.connector.ConnectorInsertTableHandle) OrcConcatPageSource(io.prestosql.plugin.hive.orc.OrcConcatPageSource) ROLLBACK_AFTER_BEGIN_INSERT(io.prestosql.plugin.hive.AbstractTestHive.TransactionDeleteInsertTestTag.ROLLBACK_AFTER_BEGIN_INSERT) Arrays(java.util.Arrays) RCBINARY(io.prestosql.plugin.hive.HiveStorageFormat.RCBINARY) NoHdfsAuthentication(io.prestosql.plugin.hive.authentication.NoHdfsAuthentication) ConnectorPageSink(io.prestosql.spi.connector.ConnectorPageSink) ValueSet(io.prestosql.spi.predicate.ValueSet) Maps.uniqueIndex(com.google.common.collect.Maps.uniqueIndex) BigDecimal(java.math.BigDecimal) Sets.difference(com.google.common.collect.Sets.difference) Executors.newScheduledThreadPool(java.util.concurrent.Executors.newScheduledThreadPool) HIVE_STRING(io.prestosql.plugin.hive.HiveType.HIVE_STRING) RowFieldName(io.prestosql.spi.type.RowFieldName) Slices.utf8Slice(io.airlift.slice.Slices.utf8Slice) ConnectorPageSinkProvider(io.prestosql.spi.connector.ConnectorPageSinkProvider) JSON(io.prestosql.plugin.hive.HiveStorageFormat.JSON) HIVE_INT(io.prestosql.plugin.hive.HiveType.HIVE_INT) HIVE_LONG(io.prestosql.plugin.hive.HiveType.HIVE_LONG) ConstraintApplicationResult(io.prestosql.spi.connector.ConstraintApplicationResult) UNGROUPED_SCHEDULING(io.prestosql.spi.connector.ConnectorSplitManager.SplitSchedulingStrategy.UNGROUPED_SCHEDULING) ImmutableList.toImmutableList(com.google.common.collect.ImmutableList.toImmutableList) Set(java.util.Set) SqlStandardAccessControlMetadata(io.prestosql.plugin.hive.security.SqlStandardAccessControlMetadata) TIMESTAMP(io.prestosql.spi.type.TimestampType.TIMESTAMP) MILLISECONDS(java.util.concurrent.TimeUnit.MILLISECONDS) VarcharType.createVarcharType(io.prestosql.spi.type.VarcharType.createVarcharType) ImmutableMap.toImmutableMap(com.google.common.collect.ImmutableMap.toImmutableMap) Domain(io.prestosql.spi.predicate.Domain) SortingColumn(io.prestosql.plugin.hive.metastore.SortingColumn) TestingNodeManager(io.prestosql.testing.TestingNodeManager) Lists.reverse(com.google.common.collect.Lists.reverse) MoreObjects.toStringHelper(com.google.common.base.MoreObjects.toStringHelper) Slice(io.airlift.slice.Slice) Partition(io.prestosql.plugin.hive.metastore.Partition) StandardTypes(io.prestosql.spi.type.StandardTypes) ConnectorSplit(io.prestosql.spi.connector.ConnectorSplit) BUCKET_COUNT_PROPERTY(io.prestosql.plugin.hive.HiveTableProperties.BUCKET_COUNT_PROPERTY) MapType(io.prestosql.spi.type.MapType) GroupByHashPageIndexerFactory(io.prestosql.GroupByHashPageIndexerFactory) Float.floatToRawIntBits(java.lang.Float.floatToRawIntBits) VARCHAR(io.prestosql.spi.type.VarcharType.VARCHAR) ThreadLocalRandom(java.util.concurrent.ThreadLocalRandom) AVRO(io.prestosql.plugin.hive.HiveStorageFormat.AVRO) HiveTestUtils.rowType(io.prestosql.plugin.hive.HiveTestUtils.rowType) RecordCursor(io.prestosql.spi.connector.RecordCursor) SemiTransactionalHiveMetastore(io.prestosql.plugin.hive.metastore.SemiTransactionalHiveMetastore) SESSION(io.prestosql.plugin.hive.HiveTestUtils.SESSION) HiveMetastore(io.prestosql.plugin.hive.metastore.HiveMetastore) LongStream(java.util.stream.LongStream) MULTIDELIMIT(io.prestosql.plugin.hive.HiveStorageFormat.MULTIDELIMIT) MoreFutures.getFutureValue(io.airlift.concurrent.MoreFutures.getFutureValue) PAGE_SORTER(io.prestosql.plugin.hive.HiveTestUtils.PAGE_SORTER) UTC(org.joda.time.DateTimeZone.UTC) MaterializedRow(io.prestosql.testing.MaterializedRow) PrincipalPrivileges(io.prestosql.plugin.hive.metastore.PrincipalPrivileges) IS_EXTERNAL_TABLE(io.prestosql.plugin.hive.HiveTableProperties.IS_EXTERNAL_TABLE) HiveColumnStatistics(io.prestosql.plugin.hive.metastore.HiveColumnStatistics) DateTimeTestingUtils.sqlTimestampOf(io.prestosql.testing.DateTimeTestingUtils.sqlTimestampOf) ColumnHandle(io.prestosql.spi.connector.ColumnHandle) STAGE_AND_MOVE_TO_TARGET_DIRECTORY(io.prestosql.plugin.hive.LocationHandle.WriteMode.STAGE_AND_MOVE_TO_TARGET_DIRECTORY) TableType(org.apache.hadoop.hive.metastore.TableType) ConnectorMetadata(io.prestosql.spi.connector.ConnectorMetadata) OrcPageSource(io.prestosql.plugin.hive.orc.OrcPageSource) HiveTestUtils.getDefaultHiveSelectiveFactories(io.prestosql.plugin.hive.HiveTestUtils.getDefaultHiveSelectiveFactories) HdfsContext(io.prestosql.plugin.hive.HdfsEnvironment.HdfsContext) Assertions.assertThat(org.assertj.core.api.Assertions.assertThat) Assertions.assertGreaterThan(io.airlift.testing.Assertions.assertGreaterThan) MoreCollectors.onlyElement(com.google.common.collect.MoreCollectors.onlyElement) Iterables.concat(com.google.common.collect.Iterables.concat) HiveWriteUtils.createDirectory(io.prestosql.plugin.hive.HiveWriteUtils.createDirectory) Path(org.apache.hadoop.fs.Path) KILOBYTE(io.airlift.units.DataSize.Unit.KILOBYTE) Constraint(io.prestosql.spi.connector.Constraint) ImmutableMap(com.google.common.collect.ImmutableMap) ArrayType(io.prestosql.spi.type.ArrayType) CharType.createCharType(io.prestosql.spi.type.CharType.createCharType) ConnectorSplitSource(io.prestosql.spi.connector.ConnectorSplitSource) HiveTestUtils.getDefaultHiveFileWriterFactories(io.prestosql.plugin.hive.HiveTestUtils.getDefaultHiveFileWriterFactories) PRESTO_VERSION_NAME(io.prestosql.plugin.hive.HiveMetadata.PRESTO_VERSION_NAME) String.format(java.lang.String.format) Preconditions.checkState(com.google.common.base.Preconditions.checkState) DataSize(io.airlift.units.DataSize) List(java.util.List) VarcharType.createUnboundedVarcharType(io.prestosql.spi.type.VarcharType.createUnboundedVarcharType) HiveTestUtils.getTypes(io.prestosql.plugin.hive.HiveTestUtils.getTypes) ConnectorTransactionHandle(io.prestosql.spi.connector.ConnectorTransactionHandle) Optional(java.util.Optional) SORTED_BY_PROPERTY(io.prestosql.plugin.hive.HiveTableProperties.SORTED_BY_PROPERTY) Logger(io.airlift.log.Logger) CounterStat(io.airlift.stats.CounterStat) HashMap(java.util.HashMap) INTEGER(io.prestosql.spi.type.IntegerType.INTEGER) HivePrincipal(io.prestosql.plugin.hive.metastore.HivePrincipal) AtomicReference(java.util.concurrent.atomic.AtomicReference) SqlVarbinary(io.prestosql.spi.type.SqlVarbinary) BridgingHiveMetastore(io.prestosql.plugin.hive.metastore.thrift.BridgingHiveMetastore) NamedTypeSignature(io.prestosql.spi.type.NamedTypeSignature) SchemaTableName(io.prestosql.spi.connector.SchemaTableName) COMMIT(io.prestosql.plugin.hive.AbstractTestHive.TransactionDeleteInsertTestTag.COMMIT) TestingMetastoreLocator(io.prestosql.plugin.hive.metastore.thrift.TestingMetastoreLocator) Verify.verify(com.google.common.base.Verify.verify) Assertions.assertLessThanOrEqual(io.airlift.testing.Assertions.assertLessThanOrEqual) Range(io.prestosql.spi.predicate.Range) Threads.daemonThreadsNamed(io.airlift.concurrent.Threads.daemonThreadsNamed) HivePrivilegeInfo(io.prestosql.plugin.hive.metastore.HivePrivilegeInfo) Objects.requireNonNull(java.util.Objects.requireNonNull) SEQUENCEFILE(io.prestosql.plugin.hive.HiveStorageFormat.SEQUENCEFILE) REAL(io.prestosql.spi.type.RealType.REAL) HiveMetadata.convertToPredicate(io.prestosql.plugin.hive.HiveMetadata.convertToPredicate) ColumnStatistics(io.prestosql.spi.statistics.ColumnStatistics) HiveTestUtils.getNoOpIndexCache(io.prestosql.plugin.hive.HiveTestUtils.getNoOpIndexCache) HiveIdentity(io.prestosql.plugin.hive.authentication.HiveIdentity) TRANSACTION_CONFLICT(io.prestosql.spi.StandardErrorCode.TRANSACTION_CONFLICT) ConnectorTableHandle(io.prestosql.spi.connector.ConnectorTableHandle) CSV(io.prestosql.plugin.hive.HiveStorageFormat.CSV) HiveTestUtils.getDefaultHiveRecordCursorProvider(io.prestosql.plugin.hive.HiveTestUtils.getDefaultHiveRecordCursorProvider) HiveTestUtils.arrayType(io.prestosql.plugin.hive.HiveTestUtils.arrayType) SMALLINT(io.prestosql.spi.type.SmallintType.SMALLINT) Executors.newCachedThreadPool(java.util.concurrent.Executors.newCachedThreadPool) ROLLBACK_AFTER_SINK_FINISH(io.prestosql.plugin.hive.AbstractTestHive.TransactionDeleteInsertTestTag.ROLLBACK_AFTER_SINK_FINISH) TestingConnectorSession(io.prestosql.testing.TestingConnectorSession) Constraint(io.prestosql.spi.connector.Constraint) ConnectorInsertTableHandle(io.prestosql.spi.connector.ConnectorInsertTableHandle) PrestoException(io.prestosql.spi.PrestoException) HiveIdentity(io.prestosql.plugin.hive.authentication.HiveIdentity) FileSystem(org.apache.hadoop.fs.FileSystem) ConnectorSession(io.prestosql.spi.connector.ConnectorSession) TestingConnectorSession(io.prestosql.testing.TestingConnectorSession) ConnectorMetadata(io.prestosql.spi.connector.ConnectorMetadata) HdfsContext(io.prestosql.plugin.hive.HdfsEnvironment.HdfsContext) Path(org.apache.hadoop.fs.Path) HiveColumnHandle.bucketColumnHandle(io.prestosql.plugin.hive.HiveColumnHandle.bucketColumnHandle) ColumnHandle(io.prestosql.spi.connector.ColumnHandle) ConnectorTableHandle(io.prestosql.spi.connector.ConnectorTableHandle) Slices.utf8Slice(io.airlift.slice.Slices.utf8Slice) Slice(io.airlift.slice.Slice) ConnectorPageSink(io.prestosql.spi.connector.ConnectorPageSink) MaterializedResult(io.prestosql.testing.MaterializedResult)

Aggregations

StorageFormat (io.prestosql.plugin.hive.metastore.StorageFormat)12 ConnectorSession (io.prestosql.spi.connector.ConnectorSession)10 Path (org.apache.hadoop.fs.Path)10 ImmutableMap (com.google.common.collect.ImmutableMap)9 PrestoException (io.prestosql.spi.PrestoException)9 Type (io.prestosql.spi.type.Type)9 IOException (java.io.IOException)9 List (java.util.List)9 Objects.requireNonNull (java.util.Objects.requireNonNull)9 Optional (java.util.Optional)9 Collectors.toList (java.util.stream.Collectors.toList)9 ImmutableList (com.google.common.collect.ImmutableList)8 FileSystem (org.apache.hadoop.fs.FileSystem)8 Column (io.prestosql.plugin.hive.metastore.Column)7 Table (io.prestosql.plugin.hive.metastore.Table)7 Preconditions.checkArgument (com.google.common.base.Preconditions.checkArgument)6 ImmutableMap.toImmutableMap (com.google.common.collect.ImmutableMap.toImmutableMap)6 ImmutableSet (com.google.common.collect.ImmutableSet)6 HdfsContext (io.prestosql.plugin.hive.HdfsEnvironment.HdfsContext)6 Preconditions.checkState (com.google.common.base.Preconditions.checkState)5