use of io.prestosql.testing.MaterializedRow in project hetu-core by openlookeng.
the class TestMongoIntegrationSmokeTest method testInsertWithEveryType.
@Test
public void testInsertWithEveryType() {
String createSql = "" + "CREATE TABLE test_insert_types_table " + "(" + " vc varchar" + ", vb varbinary" + ", bi bigint" + ", d double" + ", b boolean" + ", dt date" + ", ts timestamp" + ", objid objectid" + ")";
getQueryRunner().execute(getSession(), createSql);
String insertSql = "" + "INSERT INTO test_insert_types_table " + "SELECT" + " 'foo' _varchar" + ", cast('bar' as varbinary) _varbinary" + ", cast(1 as bigint) _bigint" + ", 3.14E0 _double" + ", true _boolean" + ", DATE '1980-05-07' _date" + ", TIMESTAMP '1980-05-07 11:22:33.456' _timestamp" + ", ObjectId('ffffffffffffffffffffffff') _objectid";
getQueryRunner().execute(getSession(), insertSql);
MaterializedResult results = getQueryRunner().execute(getSession(), "SELECT * FROM test_insert_types_table").toTestTypes();
assertEquals(results.getRowCount(), 1);
MaterializedRow row = results.getMaterializedRows().get(0);
assertEquals(row.getField(0), "foo");
assertEquals(row.getField(1), "bar".getBytes(UTF_8));
assertEquals(row.getField(2), 1L);
assertEquals(row.getField(3), 3.14);
assertEquals(row.getField(4), true);
assertEquals(row.getField(5), LocalDate.of(1980, 5, 7));
assertEquals(row.getField(6), LocalDateTime.of(1980, 5, 7, 11, 22, 33, 456_000_000));
assertUpdate("DROP TABLE test_insert_types_table");
assertFalse(getQueryRunner().tableExists(getSession(), "test_insert_types_table"));
}
use of io.prestosql.testing.MaterializedRow in project TiBigData by tidb-incubator.
the class PrestoTest method test.
@Test
public void test() {
preCommand();
try {
String sql = "SELECT * FROM sample_table";
List<MaterializedRow> targetRows = ImmutableList.of(new MaterializedRow(DEFAULT_PRECISION, 1, "zs"), new MaterializedRow(DEFAULT_PRECISION, 2, "ls"));
tiDBQueryRunner.verifySqlResult(sql, targetRows);
sql = "SELECT c1,c2 FROM sample_table WHERE c1 = 1";
targetRows = ImmutableList.of(new MaterializedRow(DEFAULT_PRECISION, 1, "zs"));
tiDBQueryRunner.verifySqlResult(sql, targetRows);
sql = "SELECT * FROM sample_table WHERE c1 = 1 OR c1 = 2";
targetRows = ImmutableList.of(new MaterializedRow(DEFAULT_PRECISION, 1, "zs"), new MaterializedRow(DEFAULT_PRECISION, 2, "ls"));
tiDBQueryRunner.verifySqlResult(sql, targetRows);
} catch (Exception e) {
throw e;
} finally {
afterCommand();
}
}
use of io.prestosql.testing.MaterializedRow in project boostkit-bigdata by kunpengcompute.
the class AbstractTestHive method testGetRecordsUnpartitioned.
@Test
public void testGetRecordsUnpartitioned() throws Exception {
try (Transaction transaction = newTransaction()) {
ConnectorMetadata metadata = transaction.getMetadata();
ConnectorSession session = newSession();
metadata.beginQuery(session);
ConnectorTableHandle tableHandle = getTableHandle(metadata, tableUnpartitioned);
List<ColumnHandle> columnHandles = ImmutableList.copyOf(metadata.getColumnHandles(session, tableHandle).values());
Map<String, Integer> columnIndex = indexColumns(columnHandles);
List<ConnectorSplit> splits = getAllSplits(tableHandle, transaction, session);
assertThat(splits).hasSameSizeAs(tableUnpartitionedPartitions);
for (ConnectorSplit split : splits) {
HiveSplit hiveSplit = HiveSplitWrapper.getOnlyHiveSplit(split);
assertEquals(hiveSplit.getPartitionKeys(), ImmutableList.of());
long rowNumber = 0;
try (ConnectorPageSource pageSource = pageSourceProvider.createPageSource(transaction.getTransactionHandle(), session, split, tableHandle, columnHandles)) {
assertPageSourceType(pageSource, TEXTFILE);
MaterializedResult result = materializeSourceDataStream(session, pageSource, getTypes(columnHandles));
for (MaterializedRow row : result) {
rowNumber++;
if (rowNumber % 19 == 0) {
assertNull(row.getField(columnIndex.get("t_string")));
} else if (rowNumber % 19 == 1) {
assertEquals(row.getField(columnIndex.get("t_string")), "");
} else {
assertEquals(row.getField(columnIndex.get("t_string")), "unpartitioned");
}
assertEquals(row.getField(columnIndex.get("t_tinyint")), (byte) (1 + rowNumber));
}
}
assertEquals(rowNumber, 100);
}
}
}
use of io.prestosql.testing.MaterializedRow in project boostkit-bigdata by kunpengcompute.
the class AbstractTestHive method doTestMetadataDelete.
private void doTestMetadataDelete(HiveStorageFormat storageFormat, SchemaTableName tableName) throws Exception {
// creating the table
doCreateEmptyTable(tableName, storageFormat, CREATE_TABLE_COLUMNS_PARTITIONED);
insertData(tableName, CREATE_TABLE_PARTITIONED_DATA);
MaterializedResult.Builder expectedResultBuilder = MaterializedResult.resultBuilder(SESSION, CREATE_TABLE_PARTITIONED_DATA.getTypes());
expectedResultBuilder.rows(CREATE_TABLE_PARTITIONED_DATA.getMaterializedRows());
try (Transaction transaction = newTransaction()) {
ConnectorSession session = newSession();
ConnectorMetadata metadata = transaction.getMetadata();
metadata.beginQuery(session);
// verify partitions were created
List<String> partitionNames = transaction.getMetastore(tableName.getSchemaName()).getPartitionNames(new HiveIdentity(session), tableName.getSchemaName(), tableName.getTableName()).orElseThrow(() -> new AssertionError("Table does not exist: " + tableName));
assertEqualsIgnoreOrder(partitionNames, CREATE_TABLE_PARTITIONED_DATA.getMaterializedRows().stream().map(row -> "ds=" + row.getField(CREATE_TABLE_PARTITIONED_DATA.getTypes().size() - 1)).collect(toList()));
// verify table directory is not empty
Set<String> filesAfterInsert = listAllDataFiles(transaction, tableName.getSchemaName(), tableName.getTableName());
assertFalse(filesAfterInsert.isEmpty());
// verify the data
ConnectorTableHandle tableHandle = getTableHandle(metadata, tableName);
List<ColumnHandle> columnHandles = filterNonHiddenColumnHandles(metadata.getColumnHandles(session, tableHandle).values());
MaterializedResult result = readTable(transaction, tableHandle, columnHandles, session, TupleDomain.all(), OptionalInt.empty(), Optional.of(storageFormat));
assertEqualsIgnoreOrder(result.getMaterializedRows(), expectedResultBuilder.build().getMaterializedRows());
}
try (Transaction transaction = newTransaction()) {
ConnectorSession session = newSession();
ConnectorMetadata metadata = transaction.getMetadata();
// get ds column handle
ConnectorTableHandle tableHandle = getTableHandle(metadata, tableName);
HiveColumnHandle dsColumnHandle = (HiveColumnHandle) metadata.getColumnHandles(session, tableHandle).get("ds");
// delete ds=2015-07-03
session = newSession();
TupleDomain<ColumnHandle> tupleDomain = TupleDomain.fromFixedValues(ImmutableMap.of(dsColumnHandle, NullableValue.of(createUnboundedVarcharType(), utf8Slice("2015-07-03"))));
Constraint constraint = new Constraint(tupleDomain, convertToPredicate(tupleDomain));
tableHandle = applyFilter(metadata, tableHandle, constraint);
tableHandle = metadata.applyDelete(session, tableHandle).get();
metadata.executeDelete(session, tableHandle);
transaction.commit();
}
try (Transaction transaction = newTransaction()) {
ConnectorSession session = newSession();
ConnectorMetadata metadata = transaction.getMetadata();
metadata.beginQuery(session);
ConnectorTableHandle tableHandle = getTableHandle(metadata, tableName);
List<ColumnHandle> columnHandles = filterNonHiddenColumnHandles(metadata.getColumnHandles(session, tableHandle).values());
HiveColumnHandle dsColumnHandle = (HiveColumnHandle) metadata.getColumnHandles(session, tableHandle).get("ds");
int dsColumnOrdinalPosition = columnHandles.indexOf(dsColumnHandle);
// verify the data
ImmutableList<MaterializedRow> expectedRows = expectedResultBuilder.build().getMaterializedRows().stream().filter(row -> !"2015-07-03".equals(row.getField(dsColumnOrdinalPosition))).collect(toImmutableList());
MaterializedResult actualAfterDelete = readTable(transaction, tableHandle, columnHandles, session, TupleDomain.all(), OptionalInt.empty(), Optional.of(storageFormat));
assertEqualsIgnoreOrder(actualAfterDelete.getMaterializedRows(), expectedRows);
}
try (Transaction transaction = newTransaction()) {
ConnectorSession session = newSession();
ConnectorMetadata metadata = transaction.getMetadata();
ConnectorTableHandle tableHandle = getTableHandle(metadata, tableName);
HiveColumnHandle dsColumnHandle = (HiveColumnHandle) metadata.getColumnHandles(session, tableHandle).get("ds");
// delete ds=2015-07-01 and 2015-07-02
session = newSession();
TupleDomain<ColumnHandle> tupleDomain2 = TupleDomain.withColumnDomains(ImmutableMap.of(dsColumnHandle, Domain.create(ValueSet.ofRanges(Range.range(createUnboundedVarcharType(), utf8Slice("2015-07-01"), true, utf8Slice("2015-07-02"), true)), false)));
Constraint constraint2 = new Constraint(tupleDomain2, convertToPredicate(tupleDomain2));
tableHandle = applyFilter(metadata, tableHandle, constraint2);
tableHandle = metadata.applyDelete(session, tableHandle).get();
metadata.executeDelete(session, tableHandle);
transaction.commit();
}
try (Transaction transaction = newTransaction()) {
ConnectorSession session = newSession();
ConnectorMetadata metadata = transaction.getMetadata();
ConnectorTableHandle tableHandle = getTableHandle(metadata, tableName);
List<ColumnHandle> columnHandles = ImmutableList.copyOf(metadata.getColumnHandles(session, tableHandle).values());
// verify the data
session = newSession();
MaterializedResult actualAfterDelete2 = readTable(transaction, tableHandle, columnHandles, session, TupleDomain.all(), OptionalInt.empty(), Optional.of(storageFormat));
assertEqualsIgnoreOrder(actualAfterDelete2.getMaterializedRows(), ImmutableList.of());
// verify table directory is empty
Set<String> filesAfterDelete = listAllDataFiles(transaction, tableName.getSchemaName(), tableName.getTableName());
assertTrue(filesAfterDelete.isEmpty());
}
}
use of io.prestosql.testing.MaterializedRow in project boostkit-bigdata by kunpengcompute.
the class AbstractTestHive method assertBucketTableEvolutionResult.
private static void assertBucketTableEvolutionResult(MaterializedResult result, List<ColumnHandle> columnHandles, Set<Integer> bucketIds, int rowCount) {
// Assert that only elements in the specified bucket shows up, and each element shows up 3 times.
int bucketCount = 8;
Set<Long> expectedIds = LongStream.range(0, rowCount).filter(x -> bucketIds.contains(toIntExact(x % bucketCount))).boxed().collect(toImmutableSet());
// assert that content from all three buckets are the same
Map<String, Integer> columnIndex = indexColumns(columnHandles);
OptionalInt idColumnIndex = columnIndex.containsKey("id") ? OptionalInt.of(columnIndex.get("id")) : OptionalInt.empty();
int nameColumnIndex = columnIndex.get("name");
int bucketColumnIndex = columnIndex.get(BUCKET_COLUMN_NAME);
Map<Long, Integer> idCount = new HashMap<>();
for (MaterializedRow row : result.getMaterializedRows()) {
String name = (String) row.getField(nameColumnIndex);
int bucket = (int) row.getField(bucketColumnIndex);
idCount.compute(Long.parseLong(name), (key, oldValue) -> oldValue == null ? 1 : oldValue + 1);
assertEquals(bucket, Integer.parseInt(name) % bucketCount);
if (idColumnIndex.isPresent()) {
long id = (long) row.getField(idColumnIndex.getAsInt());
assertEquals(Integer.parseInt(name), id);
}
}
assertEquals((int) idCount.values().stream().distinct().collect(onlyElement()), 3);
assertEquals(idCount.keySet(), expectedIds);
}
Aggregations