use of io.prestosql.spi.connector.SchemaTableName in project carbondata by apache.
the class CarbondataPageSourceProvider method getCarbonTable.
/**
* @param carbonSplit
* @return
*/
private CarbonTable getCarbonTable(HiveSplit carbonSplit, Configuration configuration) {
CarbonTableCacheModel tableCacheModel = carbonTableReader.getCarbonCache(new SchemaTableName(carbonSplit.getDatabase(), carbonSplit.getTable()), carbonSplit.getSchema().getProperty("tablePath"), configuration);
checkNotNull(tableCacheModel, "tableCacheModel should not be null");
checkNotNull(tableCacheModel.getCarbonTable(), "tableCacheModel.carbonTable should not be null");
checkNotNull(tableCacheModel.getCarbonTable().getTableInfo(), "tableCacheModel.carbonTable.tableInfo should not be null");
return tableCacheModel.getCarbonTable();
}
use of io.prestosql.spi.connector.SchemaTableName in project ranger by apache.
the class RangerPrestoAccessRequest method filterTables.
@Override
public Set<SchemaTableName> filterTables(SystemSecurityContext context, String catalogName, Set<SchemaTableName> tableNames) {
LOG.debug("==> RangerSystemAccessControl.filterTables(" + catalogName + ")");
Set<SchemaTableName> filteredTableNames = new HashSet<>(tableNames.size());
for (SchemaTableName tableName : tableNames) {
RangerPrestoResource res = createResource(catalogName, tableName.getSchemaName(), tableName.getTableName());
if (hasPermission(res, context, PrestoAccessType.SELECT)) {
filteredTableNames.add(tableName);
}
}
return filteredTableNames;
}
use of io.prestosql.spi.connector.SchemaTableName in project ranger by apache.
the class RangerSystemAccessControlTest method testTableOperations.
@Test
@SuppressWarnings("PMD")
public void testTableOperations() {
Set<SchemaTableName> aliceTables = ImmutableSet.of(new SchemaTableName("schema", "table"));
assertEquals(accessControlManager.filterTables(context(alice), aliceCatalog, aliceTables), aliceTables);
assertEquals(accessControlManager.filterTables(context(bob), "alice-catalog", aliceTables), ImmutableSet.of());
accessControlManager.checkCanCreateTable(context(alice), aliceTable);
accessControlManager.checkCanDropTable(context(alice), aliceTable);
accessControlManager.checkCanSelectFromColumns(context(alice), aliceTable, ImmutableSet.of());
accessControlManager.checkCanInsertIntoTable(context(alice), aliceTable);
accessControlManager.checkCanDeleteFromTable(context(alice), aliceTable);
accessControlManager.checkCanRenameColumn(context(alice), aliceTable);
try {
accessControlManager.checkCanCreateTable(context(bob), aliceTable);
} catch (AccessDeniedException expected) {
}
}
use of io.prestosql.spi.connector.SchemaTableName in project carbondata by apache.
the class CarbondataSplitManager method getSplits.
@Override
public ConnectorSplitSource getSplits(ConnectorTransactionHandle transactionHandle, ConnectorSession session, ConnectorTableHandle tableHandle, SplitSchedulingStrategy splitSchedulingStrategy) {
HiveTableHandle hiveTableHandle = (HiveTableHandle) tableHandle;
SchemaTableName schemaTableName = hiveTableHandle.getSchemaTableName();
carbonTableReader.setPrestoQueryId(session.getQueryId());
// get table metadata
SemiTransactionalHiveMetastore metastore = metastoreProvider.apply((HiveTransactionHandle) transactionHandle);
Table table = metastore.getTable(new HiveIdentity(session), schemaTableName.getSchemaName(), schemaTableName.getTableName()).orElseThrow(() -> new TableNotFoundException(schemaTableName));
if (!table.getStorage().getStorageFormat().getInputFormat().contains("carbon")) {
return super.getSplits(transactionHandle, session, tableHandle, splitSchedulingStrategy);
}
// for hive metastore, get table location from catalog table's tablePath
String location = table.getStorage().getSerdeParameters().get("tablePath");
if (StringUtils.isEmpty(location)) {
// file metastore case tablePath can be null, so get from location
location = table.getStorage().getLocation();
}
List<PartitionSpec> filteredPartitions = new ArrayList<>();
if (hiveTableHandle.getPartitionColumns().size() > 0 && hiveTableHandle.getPartitions().isPresent()) {
List<String> colNames = hiveTableHandle.getPartitionColumns().stream().map(HiveColumnHandle::getName).collect(Collectors.toList());
for (HivePartition partition : hiveTableHandle.getPartitions().get()) {
filteredPartitions.add(new PartitionSpec(colNames, location + CarbonCommonConstants.FILE_SEPARATOR + partition.getPartitionId()));
}
}
String queryId = System.nanoTime() + "";
QueryStatistic statistic = new QueryStatistic();
QueryStatisticsRecorder statisticRecorder = CarbonTimeStatisticsFactory.createDriverRecorder();
statistic.addStatistics(QueryStatisticsConstants.BLOCK_ALLOCATION, System.currentTimeMillis());
statisticRecorder.recordStatisticsForDriver(statistic, queryId);
statistic = new QueryStatistic();
carbonTableReader.setQueryId(queryId);
TupleDomain<HiveColumnHandle> predicate = hiveTableHandle.getCompactEffectivePredicate();
Configuration configuration = this.hdfsEnvironment.getConfiguration(new HdfsEnvironment.HdfsContext(session, schemaTableName.getSchemaName(), schemaTableName.getTableName()), new Path(location));
configuration = carbonTableReader.updateS3Properties(configuration);
for (Map.Entry<String, String> entry : table.getStorage().getSerdeParameters().entrySet()) {
configuration.set(entry.getKey(), entry.getValue());
}
// set the hadoop configuration to thread local, so that FileFactory can use it.
ThreadLocalSessionInfo.setConfigurationToCurrentThread(configuration);
CarbonTableCacheModel cache = carbonTableReader.getCarbonCache(schemaTableName, location, configuration);
Expression filters = PrestoFilterUtil.parseFilterExpression(predicate);
try {
List<CarbonLocalMultiBlockSplit> splits = carbonTableReader.getInputSplits(cache, filters, filteredPartitions, configuration);
ImmutableList.Builder<ConnectorSplit> cSplits = ImmutableList.builder();
long index = 0;
for (CarbonLocalMultiBlockSplit split : splits) {
index++;
Properties properties = new Properties();
for (Map.Entry<String, String> entry : table.getStorage().getSerdeParameters().entrySet()) {
properties.setProperty(entry.getKey(), entry.getValue());
}
properties.setProperty("tablePath", cache.getCarbonTable().getTablePath());
properties.setProperty("carbonSplit", split.getJsonString());
properties.setProperty("queryId", queryId);
properties.setProperty("index", String.valueOf(index));
cSplits.add(new HiveSplit(schemaTableName.getSchemaName(), schemaTableName.getTableName(), schemaTableName.getTableName(), cache.getCarbonTable().getTablePath(), 0, 0, 0, 0, properties, new ArrayList<>(), getHostAddresses(split.getLocations()), OptionalInt.empty(), false, TableToPartitionMapping.empty(), Optional.empty(), false, Optional.empty()));
}
statisticRecorder.logStatisticsAsTableDriver();
statistic.addStatistics(QueryStatisticsConstants.BLOCK_IDENTIFICATION, System.currentTimeMillis());
statisticRecorder.recordStatisticsForDriver(statistic, queryId);
statisticRecorder.logStatisticsAsTableDriver();
return new FixedSplitSource(cSplits.build());
} catch (Exception ex) {
throw new RuntimeException(ex.getMessage(), ex);
}
}
use of io.prestosql.spi.connector.SchemaTableName in project carbondata by apache.
the class CarbonDataMetaData method beginInsert.
@Override
public CarbonDataInsertTableHandle beginInsert(ConnectorSession session, ConnectorTableHandle tableHandle) {
HiveInsertTableHandle hiveInsertTableHandle = super.beginInsert(session, tableHandle);
SchemaTableName tableName = hiveInsertTableHandle.getSchemaTableName();
Optional<Table> table = this.metastore.getTable(new HiveIdentity(session), tableName.getSchemaName(), tableName.getTableName());
Path outputPath = new Path(hiveInsertTableHandle.getLocationHandle().getJsonSerializableTargetPath());
JobConf jobConf = ConfigurationUtils.toJobConf(this.hdfsEnvironment.getConfiguration(new HdfsEnvironment.HdfsContext(session, hiveInsertTableHandle.getSchemaName(), hiveInsertTableHandle.getTableName()), new Path(hiveInsertTableHandle.getLocationHandle().getJsonSerializableWritePath())));
jobConf.set("location", outputPath.toString());
Properties hiveSchema = MetastoreUtil.getHiveSchema(table.get());
try {
CarbonLoadModel carbonLoadModel = HiveCarbonUtil.getCarbonLoadModel(hiveSchema, jobConf);
CarbonTableOutputFormat.setLoadModel(jobConf, carbonLoadModel);
} catch (IOException ex) {
LOG.error("Error while creating carbon load model", ex);
throw new RuntimeException(ex);
}
try {
carbonOutputCommitter = new MapredCarbonOutputCommitter();
jobContext = new JobContextImpl(jobConf, new JobID());
carbonOutputCommitter.setupJob(jobContext);
ThreadLocalSessionInfo.setConfigurationToCurrentThread(jobConf);
} catch (IOException e) {
LOG.error("error setting the output committer", e);
throw new RuntimeException("error setting the output committer");
}
return new CarbonDataInsertTableHandle(hiveInsertTableHandle.getSchemaTableName().getSchemaName(), hiveInsertTableHandle.getTableName(), hiveInsertTableHandle.getInputColumns(), hiveInsertTableHandle.getPageSinkMetadata(), hiveInsertTableHandle.getLocationHandle(), hiveInsertTableHandle.getBucketProperty(), hiveInsertTableHandle.getTableStorageFormat(), hiveInsertTableHandle.getPartitionStorageFormat(), ImmutableMap.of(CarbonTableConfig.CARBON_PRESTO_LOAD_MODEL, jobContext.getConfiguration().get(CarbonTableOutputFormat.LOAD_MODEL)));
}
Aggregations