use of io.trino.plugin.hive.HiveConfig in project trino by trinodb.
the class TestMetastoreHiveStatisticsProvider method testGetTableStatisticsValidationFailure.
@Test
public void testGetTableStatisticsValidationFailure() {
PartitionStatistics corruptedStatistics = PartitionStatistics.builder().setBasicStatistics(new HiveBasicStatistics(-1, 0, 0, 0)).build();
String partitionName = "p1=string1/p2=1234";
MetastoreHiveStatisticsProvider statisticsProvider = new MetastoreHiveStatisticsProvider((session, table, hivePartitions) -> ImmutableMap.of(partitionName, corruptedStatistics));
assertThatThrownBy(() -> statisticsProvider.getTableStatistics(getHiveSession(new HiveConfig().setIgnoreCorruptedStatistics(false)), TABLE, ImmutableMap.of(), ImmutableMap.of(), ImmutableList.of(partition(partitionName)))).isInstanceOf(TrinoException.class).hasFieldOrPropertyWithValue("errorCode", HIVE_CORRUPTED_COLUMN_STATISTICS.toErrorCode());
assertEquals(statisticsProvider.getTableStatistics(getHiveSession(new HiveConfig().setIgnoreCorruptedStatistics(true)), TABLE, ImmutableMap.of(), ImmutableMap.of(), ImmutableList.of(partition(partitionName))), TableStatistics.empty());
}
use of io.trino.plugin.hive.HiveConfig in project trino by trinodb.
the class TestRubixCaching method setup.
@BeforeClass
public void setup() throws IOException {
cacheStoragePath = getStoragePath("/");
config = new HdfsConfig();
List<PropertyMetadata<?>> hiveSessionProperties = getHiveSessionProperties(new HiveConfig(), new RubixEnabledConfig().setCacheEnabled(true), new OrcReaderConfig()).getSessionProperties();
context = new HdfsContext(TestingConnectorSession.builder().setPropertyMetadata(hiveSessionProperties).build());
nonCachingFileSystem = getNonCachingFileSystem();
}
use of io.trino.plugin.hive.HiveConfig in project trino by trinodb.
the class ParquetTester method assertMaxReadBytes.
void assertMaxReadBytes(List<ObjectInspector> objectInspectors, Iterable<?>[] writeValues, Iterable<?>[] readValues, List<String> columnNames, List<Type> columnTypes, Optional<MessageType> parquetSchema, DataSize maxReadBlockSize) throws Exception {
CompressionCodecName compressionCodecName = UNCOMPRESSED;
HiveSessionProperties hiveSessionProperties = new HiveSessionProperties(new HiveConfig().setHiveStorageFormat(HiveStorageFormat.PARQUET).setUseParquetColumnNames(false), new OrcReaderConfig(), new OrcWriterConfig(), new ParquetReaderConfig().setMaxReadBlockSize(maxReadBlockSize), new ParquetWriterConfig());
ConnectorSession session = TestingConnectorSession.builder().setPropertyMetadata(hiveSessionProperties.getSessionProperties()).build();
try (TempFile tempFile = new TempFile("test", "parquet")) {
JobConf jobConf = new JobConf();
jobConf.setEnum(COMPRESSION, compressionCodecName);
jobConf.setBoolean(ENABLE_DICTIONARY, true);
jobConf.setEnum(WRITER_VERSION, PARQUET_1_0);
writeParquetColumn(jobConf, tempFile.getFile(), compressionCodecName, createTableProperties(columnNames, objectInspectors), getStandardStructObjectInspector(columnNames, objectInspectors), getIterators(writeValues), parquetSchema, false);
Iterator<?>[] expectedValues = getIterators(readValues);
try (ConnectorPageSource pageSource = fileFormat.createFileFormatReader(session, HDFS_ENVIRONMENT, tempFile.getFile(), columnNames, columnTypes)) {
assertPageSource(columnTypes, expectedValues, pageSource, Optional.of(getParquetMaxReadBlockSize(session).toBytes()));
assertFalse(stream(expectedValues).allMatch(Iterator::hasNext));
}
}
}
use of io.trino.plugin.hive.HiveConfig in project trino by trinodb.
the class TestTimestamp method testTimestampBackedByInt64.
@Test
public void testTimestampBackedByInt64() throws Exception {
MessageType parquetSchema = parseMessageType("message hive_timestamp { optional int64 test (TIMESTAMP_MILLIS); }");
ContiguousSet<Long> epochMillisValues = ContiguousSet.create(Range.closedOpen((long) -1_000, (long) 1_000), DiscreteDomain.longs());
ImmutableList.Builder<SqlTimestamp> timestampsMillis = ImmutableList.builder();
ImmutableList.Builder<Long> bigints = ImmutableList.builder();
for (long value : epochMillisValues) {
timestampsMillis.add(SqlTimestamp.fromMillis(3, value));
bigints.add(value);
}
List<ObjectInspector> objectInspectors = singletonList(javaLongObjectInspector);
List<String> columnNames = ImmutableList.of("test");
ConnectorSession session = getHiveSession(new HiveConfig());
try (ParquetTester.TempFile tempFile = new ParquetTester.TempFile("test", "parquet")) {
JobConf jobConf = new JobConf();
jobConf.setEnum(WRITER_VERSION, PARQUET_1_0);
ParquetTester.writeParquetColumn(jobConf, tempFile.getFile(), CompressionCodecName.SNAPPY, ParquetTester.createTableProperties(columnNames, objectInspectors), getStandardStructObjectInspector(columnNames, objectInspectors), new Iterator<?>[] { epochMillisValues.iterator() }, Optional.of(parquetSchema), false);
testReadingAs(TIMESTAMP_MILLIS, session, tempFile, columnNames, timestampsMillis.build());
testReadingAs(BIGINT, session, tempFile, columnNames, bigints.build());
}
}
Aggregations