use of com.facebook.presto.spi.SchemaTableName in project presto by prestodb.
the class DeltaMetadata method listTables.
@Override
public List<SchemaTableName> listTables(ConnectorSession session, Optional<String> schemaName) {
List<String> schemaNames = schemaName.<List<String>>map(ImmutableList::of).orElse(listSchemaNames(session));
ImmutableList.Builder<SchemaTableName> tableNames = ImmutableList.builder();
for (String schema : schemaNames) {
for (String tableName : metastore.getAllTables(metastoreContext(session), schema).orElse(emptyList())) {
tableNames.add(new SchemaTableName(schema, tableName));
}
}
return tableNames.build();
}
use of com.facebook.presto.spi.SchemaTableName in project presto by prestodb.
the class StoragePartitionLoader method getBucketedSplits.
private List<InternalHiveSplit> getBucketedSplits(Path path, ExtendedFileSystem fileSystem, InternalHiveSplitFactory splitFactory, BucketSplitInfo bucketSplitInfo, Optional<HiveSplit.BucketConversion> bucketConversion, String partitionName, boolean splittable, PathFilter pathFilter) {
int readBucketCount = bucketSplitInfo.getReadBucketCount();
int tableBucketCount = bucketSplitInfo.getTableBucketCount();
int partitionBucketCount = bucketConversion.map(HiveSplit.BucketConversion::getPartitionBucketCount).orElse(tableBucketCount);
int bucketCount = max(readBucketCount, partitionBucketCount);
checkState(readBucketCount <= tableBucketCount, "readBucketCount(%s) should be less than or equal to tableBucketCount(%s)", readBucketCount, tableBucketCount);
// list all files in the partition
List<HiveFileInfo> fileInfos = new ArrayList<>(partitionBucketCount);
try {
Iterators.addAll(fileInfos, directoryLister.list(fileSystem, table, path, namenodeStats, pathFilter, new HiveDirectoryContext(FAIL, isUseListDirectoryCache(session))));
} catch (HiveFileIterator.NestedDirectoryNotAllowedException e) {
// Fail here to be on the safe side. This seems to be the same as what Hive does
throw new PrestoException(HIVE_INVALID_BUCKET_FILES, format("Hive table '%s' is corrupt. Found sub-directory in bucket directory for partition: %s", table.getSchemaTableName(), partitionName));
}
ListMultimap<Integer, HiveFileInfo> bucketToFileInfo = ArrayListMultimap.create();
if (!shouldCreateFilesForMissingBuckets(table, session)) {
fileInfos.stream().forEach(fileInfo -> {
String fileName = fileInfo.getPath().getName();
OptionalInt bucket = getBucketNumber(fileName);
if (bucket.isPresent()) {
bucketToFileInfo.put(bucket.getAsInt(), fileInfo);
} else {
throw new PrestoException(HIVE_INVALID_BUCKET_FILES, format("invalid hive bucket file name: %s", fileName));
}
});
} else {
// build mapping of file name to bucket
for (HiveFileInfo file : fileInfos) {
String fileName = file.getPath().getName();
OptionalInt bucket = getBucketNumber(fileName);
if (bucket.isPresent()) {
bucketToFileInfo.put(bucket.getAsInt(), file);
continue;
}
// legacy mode requires exactly one file per bucket
if (fileInfos.size() != partitionBucketCount) {
throw new PrestoException(HIVE_INVALID_BUCKET_FILES, format("Hive table '%s' is corrupt. File '%s' does not match the standard naming pattern, and the number " + "of files in the directory (%s) does not match the declared bucket count (%s) for partition: %s", table.getSchemaTableName(), fileName, fileInfos.size(), partitionBucketCount, partitionName));
}
if (fileInfos.get(0).getPath().getName().matches("\\d+")) {
try {
// File names are integer if they are created when file_renaming_enabled is set to true
fileInfos.sort(Comparator.comparingInt(fileInfo -> Integer.parseInt(fileInfo.getPath().getName())));
} catch (NumberFormatException e) {
throw new PrestoException(HIVE_INVALID_FILE_NAMES, format("Hive table '%s' is corrupt. Some of the filenames in the partition: %s are not integers", new SchemaTableName(table.getDatabaseName(), table.getTableName()), partitionName));
}
} else {
// Sort FileStatus objects (instead of, e.g., fileStatus.getPath().toString). This matches org.apache.hadoop.hive.ql.metadata.Table.getSortedPaths
fileInfos.sort(null);
}
// Use position in sorted list as the bucket number
bucketToFileInfo.clear();
for (int i = 0; i < fileInfos.size(); i++) {
bucketToFileInfo.put(i, fileInfos.get(i));
}
break;
}
}
// convert files internal splits
List<InternalHiveSplit> splitList = new ArrayList<>();
for (int bucketNumber = 0; bucketNumber < bucketCount; bucketNumber++) {
// Physical bucket #. This determine file name. It also determines the order of splits in the result.
int partitionBucketNumber = bucketNumber % partitionBucketCount;
if (!bucketToFileInfo.containsKey(partitionBucketNumber)) {
continue;
}
// Logical bucket #. Each logical bucket corresponds to a "bucket" from engine's perspective.
int readBucketNumber = bucketNumber % readBucketCount;
boolean containsIneligibleTableBucket = false;
List<Integer> eligibleTableBucketNumbers = new ArrayList<>();
for (int tableBucketNumber = bucketNumber % tableBucketCount; tableBucketNumber < tableBucketCount; tableBucketNumber += bucketCount) {
// table bucket number: this is used for evaluating "$bucket" filters.
if (bucketSplitInfo.isTableBucketEnabled(tableBucketNumber)) {
eligibleTableBucketNumbers.add(tableBucketNumber);
} else {
containsIneligibleTableBucket = true;
}
}
if (!eligibleTableBucketNumbers.isEmpty() && containsIneligibleTableBucket) {
throw new PrestoException(NOT_SUPPORTED, "The bucket filter cannot be satisfied. There are restrictions on the bucket filter when all the following is true: " + "1. a table has a different buckets count as at least one of its partitions that is read in this query; " + "2. the table has a different but compatible bucket number with another table in the query; " + "3. some buckets of the table is filtered out from the query, most likely using a filter on \"$bucket\". " + "(table name: " + table.getTableName() + ", table bucket count: " + tableBucketCount + ", " + "partition bucket count: " + partitionBucketCount + ", effective reading bucket count: " + readBucketCount + ")");
}
if (!eligibleTableBucketNumbers.isEmpty()) {
for (HiveFileInfo fileInfo : bucketToFileInfo.get(partitionBucketNumber)) {
eligibleTableBucketNumbers.stream().map(tableBucketNumber -> splitFactory.createInternalHiveSplit(fileInfo, readBucketNumber, tableBucketNumber, splittable)).forEach(optionalSplit -> optionalSplit.ifPresent(splitList::add));
}
}
}
return splitList;
}
use of com.facebook.presto.spi.SchemaTableName in project presto by prestodb.
the class MetadataManager method getViews.
@Override
public Map<QualifiedObjectName, ViewDefinition> getViews(Session session, QualifiedTablePrefix prefix) {
requireNonNull(prefix, "prefix is null");
Optional<CatalogMetadata> catalog = getOptionalCatalogMetadata(session, prefix.getCatalogName());
Map<QualifiedObjectName, ViewDefinition> views = new LinkedHashMap<>();
if (catalog.isPresent()) {
CatalogMetadata catalogMetadata = catalog.get();
SchemaTablePrefix tablePrefix = prefix.asSchemaTablePrefix();
for (ConnectorId connectorId : catalogMetadata.listConnectorIds()) {
ConnectorMetadata metadata = catalogMetadata.getMetadataFor(connectorId);
ConnectorSession connectorSession = session.toConnectorSession(connectorId);
for (Entry<SchemaTableName, ConnectorViewDefinition> entry : metadata.getViews(connectorSession, tablePrefix).entrySet()) {
QualifiedObjectName viewName = new QualifiedObjectName(prefix.getCatalogName(), entry.getKey().getSchemaName(), entry.getKey().getTableName());
views.put(viewName, deserializeView(entry.getValue().getViewData()));
}
}
}
return ImmutableMap.copyOf(views);
}
use of com.facebook.presto.spi.SchemaTableName in project presto by prestodb.
the class PinotMetadata method getTableMetadata.
@Override
public ConnectorTableMetadata getTableMetadata(ConnectorSession session, ConnectorTableHandle table) {
PinotTableHandle pinotTableHandle = (PinotTableHandle) table;
checkArgument(pinotTableHandle.getConnectorId().equals(connectorId), "tableHandle is not for this connector");
SchemaTableName tableName = new SchemaTableName(pinotTableHandle.getSchemaName(), pinotTableHandle.getTableName());
return getTableMetadata(tableName);
}
use of com.facebook.presto.spi.SchemaTableName in project presto by prestodb.
the class TestTpchMetadata method testOrdersOrderStatusPredicatePushdown.
@Test
public void testOrdersOrderStatusPredicatePushdown() {
TpchTableHandle tableHandle = tpchMetadata.getTableHandle(session, new SchemaTableName("sf1", ORDERS.getTableName()));
TupleDomain<ColumnHandle> domain;
ConnectorTableLayoutResult tableLayout;
domain = fixedValueTupleDomain(tpchMetadata, ORDER_STATUS, utf8Slice("P"));
tableLayout = getTableOnlyLayout(tpchMetadata, session, tableHandle, new Constraint<>(domain, convertToPredicate(domain, ORDER_STATUS)));
assertTupleDomainEquals(tableLayout.getUnenforcedConstraint(), TupleDomain.all(), session);
assertTupleDomainEquals(tableLayout.getTableLayout().getPredicate(), domain, session);
domain = fixedValueTupleDomain(tpchMetadata, ORDER_KEY, 42L);
tableLayout = getTableOnlyLayout(tpchMetadata, session, tableHandle, new Constraint<>(domain, convertToPredicate(domain, ORDER_STATUS)));
assertTupleDomainEquals(tableLayout.getUnenforcedConstraint(), domain, session);
assertTupleDomainEquals(tableLayout.getTableLayout().getPredicate(), // Using concrete expected value instead of checking TupleDomain::isNone to make sure the test doesn't pass on some other wrong value.
TupleDomain.columnWiseUnion(fixedValueTupleDomain(tpchMetadata, ORDER_STATUS, utf8Slice("F")), fixedValueTupleDomain(tpchMetadata, ORDER_STATUS, utf8Slice("O")), fixedValueTupleDomain(tpchMetadata, ORDER_STATUS, utf8Slice("P"))), session);
}
Aggregations