use of io.trino.plugin.raptor.legacy.metadata.ColumnInfo in project trino by trinodb.
the class RaptorStorageManager method computeShardStats.
private List<ColumnStats> computeShardStats(File file) {
try (OrcDataSource dataSource = fileOrcDataSource(orcReaderOptions, file)) {
OrcReader reader = OrcReader.createOrcReader(dataSource, orcReaderOptions).orElseThrow(() -> new TrinoException(RAPTOR_ERROR, "Data file is empty: " + file));
ImmutableList.Builder<ColumnStats> list = ImmutableList.builder();
for (ColumnInfo info : getColumnInfo(reader)) {
computeColumnStats(reader, info.getColumnId(), info.getType(), typeManager).ifPresent(list::add);
}
return list.build();
} catch (IOException e) {
throw new TrinoException(RAPTOR_ERROR, "Failed to read file: " + file, e);
}
}
use of io.trino.plugin.raptor.legacy.metadata.ColumnInfo in project trino by trinodb.
the class RaptorStorageManager method getColumnInfoFromOrcColumnTypes.
private List<ColumnInfo> getColumnInfoFromOrcColumnTypes(List<String> orcColumnNames, ColumnMetadata<OrcType> orcColumnTypes) {
Type rowType = getType(orcColumnTypes, ROOT_COLUMN);
if (orcColumnNames.size() != rowType.getTypeParameters().size()) {
throw new TrinoException(RAPTOR_ERROR, "Column names and types do not match");
}
ImmutableList.Builder<ColumnInfo> list = ImmutableList.builder();
for (int i = 0; i < orcColumnNames.size(); i++) {
list.add(new ColumnInfo(Long.parseLong(orcColumnNames.get(i)), rowType.getTypeParameters().get(i)));
}
return list.build();
}
use of io.trino.plugin.raptor.legacy.metadata.ColumnInfo in project trino by trinodb.
the class TestShardMetadataRecordCursor method testSimple.
@Test
public void testSimple() {
ShardManager shardManager = createShardManager(dbi);
// Add shards to the table
long tableId = 1;
OptionalInt bucketNumber = OptionalInt.empty();
UUID uuid1 = UUID.randomUUID();
UUID uuid2 = UUID.randomUUID();
UUID uuid3 = UUID.randomUUID();
ShardInfo shardInfo1 = new ShardInfo(uuid1, bucketNumber, ImmutableSet.of("node1"), ImmutableList.of(), 1, 10, 100, 0x1234);
ShardInfo shardInfo2 = new ShardInfo(uuid2, bucketNumber, ImmutableSet.of("node2"), ImmutableList.of(), 2, 20, 200, 0xCAFEBABEDEADBEEFL);
ShardInfo shardInfo3 = new ShardInfo(uuid3, bucketNumber, ImmutableSet.of("node3"), ImmutableList.of(), 3, 30, 300, 0xFEDCBA0987654321L);
List<ShardInfo> shards = ImmutableList.of(shardInfo1, shardInfo2, shardInfo3);
long transactionId = shardManager.beginTransaction();
shardManager.commitShards(transactionId, tableId, ImmutableList.of(new ColumnInfo(1, BIGINT), new ColumnInfo(2, DATE)), shards, Optional.empty(), 0);
Slice schema = utf8Slice(DEFAULT_TEST_ORDERS.getSchemaName());
Slice table = utf8Slice(DEFAULT_TEST_ORDERS.getTableName());
DateTime date1 = DateTime.parse("2015-01-01T00:00");
DateTime date2 = DateTime.parse("2015-01-02T00:00");
TupleDomain<Integer> tupleDomain = TupleDomain.withColumnDomains(ImmutableMap.<Integer, Domain>builder().put(0, Domain.singleValue(createVarcharType(10), schema)).put(1, Domain.create(ValueSet.ofRanges(lessThanOrEqual(createVarcharType(10), table)), true)).put(8, Domain.create(ValueSet.ofRanges(lessThanOrEqual(BIGINT, date1.getMillis()), greaterThan(BIGINT, date2.getMillis())), true)).put(9, Domain.create(ValueSet.ofRanges(lessThanOrEqual(BIGINT, date1.getMillis()), greaterThan(BIGINT, date2.getMillis())), true)).buildOrThrow());
List<MaterializedRow> actual;
try (RecordCursor cursor = new ShardMetadataSystemTable(dbi).cursor(null, SESSION, tupleDomain)) {
actual = getMaterializedResults(cursor, SHARD_METADATA.getColumns());
}
assertEquals(actual.size(), 3);
List<MaterializedRow> expected = ImmutableList.of(new MaterializedRow(DEFAULT_PRECISION, schema, table, utf8Slice(uuid1.toString()), null, 100L, 10L, 1L, utf8Slice("0000000000001234"), null, null, null, null), new MaterializedRow(DEFAULT_PRECISION, schema, table, utf8Slice(uuid2.toString()), null, 200L, 20L, 2L, utf8Slice("cafebabedeadbeef"), null, null, null, null), new MaterializedRow(DEFAULT_PRECISION, schema, table, utf8Slice(uuid3.toString()), null, 300L, 30L, 3L, utf8Slice("fedcba0987654321"), null, null, null, null));
assertEquals(actual, expected);
}
use of io.trino.plugin.raptor.legacy.metadata.ColumnInfo in project trino by trinodb.
the class TestShardEjector method testEjector.
@Test(invocationCount = 20)
public void testEjector() throws Exception {
NodeManager nodeManager = createNodeManager("node1", "node2", "node3", "node4", "node5");
ShardEjector ejector = new ShardEjector(nodeManager.getCurrentNode().getNodeIdentifier(), nodeManager::getWorkerNodes, shardManager, storageService, new Duration(1, HOURS), Optional.of(new TestingBackupStore()), "test");
List<ShardInfo> shards = ImmutableList.<ShardInfo>builder().add(shardInfo("node1", 14)).add(shardInfo("node1", 13)).add(shardInfo("node1", 12)).add(shardInfo("node1", 11)).add(shardInfo("node1", 10)).add(shardInfo("node1", 10)).add(shardInfo("node1", 10)).add(shardInfo("node1", 10)).add(shardInfo("node2", 5)).add(shardInfo("node2", 5)).add(shardInfo("node3", 10)).add(shardInfo("node4", 10)).add(shardInfo("node5", 10)).add(shardInfo("node6", 200)).build();
long tableId = createTable("test");
List<ColumnInfo> columns = ImmutableList.of(new ColumnInfo(1, BIGINT));
shardManager.createTable(tableId, columns, false, OptionalLong.empty());
long transactionId = shardManager.beginTransaction();
shardManager.commitShards(transactionId, tableId, columns, shards, Optional.empty(), 0);
for (ShardInfo shard : shards.subList(0, 8)) {
File file = storageService.getStorageFile(shard.getShardUuid());
storageService.createParents(file);
assertTrue(file.createNewFile());
}
ejector.process();
shardManager.getShardNodes(tableId, TupleDomain.all());
Set<UUID> ejectedShards = shards.subList(0, 4).stream().map(ShardInfo::getShardUuid).collect(toSet());
Set<UUID> keptShards = shards.subList(4, 8).stream().map(ShardInfo::getShardUuid).collect(toSet());
Set<UUID> remaining = uuids(shardManager.getNodeShards("node1"));
for (UUID uuid : ejectedShards) {
assertFalse(remaining.contains(uuid));
assertFalse(storageService.getStorageFile(uuid).exists());
}
assertEquals(remaining, keptShards);
for (UUID uuid : keptShards) {
assertTrue(storageService.getStorageFile(uuid).exists());
}
Set<UUID> others = ImmutableSet.<UUID>builder().addAll(uuids(shardManager.getNodeShards("node2"))).addAll(uuids(shardManager.getNodeShards("node3"))).addAll(uuids(shardManager.getNodeShards("node4"))).addAll(uuids(shardManager.getNodeShards("node5"))).build();
assertTrue(others.containsAll(ejectedShards));
}
use of io.trino.plugin.raptor.legacy.metadata.ColumnInfo in project trino by trinodb.
the class TestBucketBalancer method createBucketedTable.
private long createBucketedTable(String tableName, long distributionId, DataSize compressedSize) {
MetadataDao dao = dbi.onDemand(MetadataDao.class);
long tableId = dao.insertTable("test", tableName, false, false, distributionId, 0);
List<ColumnInfo> columnsA = ImmutableList.of(new ColumnInfo(1, BIGINT));
shardManager.createTable(tableId, columnsA, false, OptionalLong.empty());
metadataDao.updateTableStats(tableId, 1024, 1024 * 1024 * 1024, compressedSize.toBytes(), compressedSize.toBytes() * 2);
return tableId;
}
Aggregations