use of org.apache.hadoop.hive.metastore.api.Partition in project metacat by Netflix.
the class CatalogThriftHiveMetastore method get_partitions_by_names.
/**
* {@inheritDoc}
*/
@Override
public List<Partition> get_partitions_by_names(final String dbName, final String tblName, final List<String> names) throws TException {
return requestWrapper("get_partitions_by_names", new Object[] { dbName, tblName, names }, () -> {
final String databaseName = normalizeIdentifier(dbName);
final String tableName = normalizeIdentifier(tblName);
final TableDto tableDto = v1.getTable(catalogName, databaseName, tableName, true, false, false);
final GetPartitionsRequestDto dto = new GetPartitionsRequestDto(null, names, true, false);
final List<PartitionDto> metacatPartitions = partV1.getPartitionsForRequest(catalogName, databaseName, tableName, null, null, null, null, false, dto);
final List<Partition> result = Lists.newArrayListWithCapacity(metacatPartitions.size());
for (PartitionDto partition : metacatPartitions) {
result.add(hiveConverters.metacatToHivePartition(partition, tableDto));
}
return result;
});
}
use of org.apache.hadoop.hive.metastore.api.Partition in project metacat by Netflix.
the class CatalogThriftHiveMetastore method get_partitions_ps.
/**
* {@inheritDoc}
*/
@Override
public List<Partition> get_partitions_ps(final String dbName, final String tblName, final List<String> partVals, final short maxParts) throws TException {
return requestWrapper("get_partitions_ps", new Object[] { dbName, tblName, partVals, maxParts }, () -> {
final String databaseName = normalizeIdentifier(dbName);
final String tableName = normalizeIdentifier(tblName);
final TableDto tableDto = v1.getTable(catalogName, databaseName, tableName, true, false, false);
final String partFilter = partition_values_to_partition_filter(tableDto, partVals);
final Integer maxValues = maxParts > 0 ? Short.toUnsignedInt(maxParts) : null;
final List<PartitionDto> metacatPartitions = partV1.getPartitions(catalogName, dbName, tblName, partFilter, null, null, null, maxValues, false);
final List<Partition> result = Lists.newArrayListWithCapacity(metacatPartitions.size());
for (PartitionDto partition : metacatPartitions) {
result.add(hiveConverters.metacatToHivePartition(partition, tableDto));
}
return result;
});
}
use of org.apache.hadoop.hive.metastore.api.Partition in project metacat by Netflix.
the class CatalogThriftHiveMetastore method get_partitions.
/**
* {@inheritDoc}
*/
@Override
public List<Partition> get_partitions(final String dbName, final String tblName, final short maxParts) throws TException {
return requestWrapper("get_partitions", new Object[] { dbName, tblName, maxParts }, () -> {
final String databaseName = normalizeIdentifier(dbName);
final String tableName = normalizeIdentifier(tblName);
final TableDto tableDto = v1.getTable(catalogName, databaseName, tableName, true, false, false);
final Integer maxValues = maxParts > 0 ? Short.toUnsignedInt(maxParts) : null;
final List<PartitionDto> metacatPartitions = partV1.getPartitions(catalogName, dbName, tblName, null, null, null, null, maxValues, false);
final List<Partition> result = Lists.newArrayListWithCapacity(metacatPartitions.size());
for (PartitionDto partition : metacatPartitions) {
result.add(hiveConverters.metacatToHivePartition(partition, tableDto));
}
return result;
});
}
use of org.apache.hadoop.hive.metastore.api.Partition in project hive by apache.
the class ImportSemanticAnalyzer method prepareImport.
/**
* The same code is used from both the "repl load" as well as "import".
* Given that "repl load" now supports two modes "repl load dbName [location]" and
* "repl load [location]" in which case the database name has to be taken from the table metadata
* by default and then over-ridden if something specified on the command line.
*
* hence for import to work correctly we have to pass in the sessionState default Db via the
* parsedDbName parameter
*/
public static boolean prepareImport(boolean isImportCmd, boolean isLocationSet, boolean isExternalSet, boolean isPartSpecSet, boolean waitOnPrecursor, String parsedLocation, String parsedTableName, String overrideDBName, LinkedHashMap<String, String> parsedPartSpec, String fromLocn, EximUtil.SemanticAnalyzerWrapperContext x, UpdatedMetaDataTracker updatedMetadata) throws IOException, MetaException, HiveException, URISyntaxException {
// initialize load path
URI fromURI = EximUtil.getValidatedURI(x.getConf(), stripQuotes(fromLocn));
Path fromPath = new Path(fromURI.getScheme(), fromURI.getAuthority(), fromURI.getPath());
FileSystem fs = FileSystem.get(fromURI, x.getConf());
x.getInputs().add(toReadEntity(fromPath, x.getConf()));
MetaData rv;
try {
rv = EximUtil.readMetaData(fs, new Path(fromPath, EximUtil.METADATA_NAME));
} catch (IOException e) {
throw new SemanticException(ErrorMsg.INVALID_PATH.getMsg(), e);
}
if (rv.getTable() == null) {
// nothing to do here, silently return.
return false;
}
ReplicationSpec replicationSpec = rv.getReplicationSpec();
if (replicationSpec.isNoop()) {
// nothing to do here, silently return.
x.getLOG().debug("Current update with ID:{} is noop", replicationSpec.getCurrentReplicationState());
return false;
}
if (isImportCmd) {
replicationSpec.setReplSpecType(ReplicationSpec.Type.IMPORT);
}
String dbname = rv.getTable().getDbName();
if ((overrideDBName != null) && (!overrideDBName.isEmpty())) {
// If the parsed statement contained a db.tablename specification, prefer that.
dbname = overrideDBName;
}
// Create table associated with the import
// Executed if relevant, and used to contain all the other details about the table if not.
ImportTableDesc tblDesc;
try {
tblDesc = getBaseCreateTableDescFromTable(dbname, rv.getTable());
} catch (Exception e) {
throw new HiveException(e);
}
boolean isSourceMm = AcidUtils.isInsertOnlyTable(tblDesc.getTblProps());
if ((replicationSpec != null) && replicationSpec.isInReplicationScope()) {
tblDesc.setReplicationSpec(replicationSpec);
StatsSetupConst.setBasicStatsState(tblDesc.getTblProps(), StatsSetupConst.FALSE);
}
if (isExternalSet) {
if (isSourceMm) {
throw new SemanticException("Cannot import an MM table as external");
}
tblDesc.setExternal(isExternalSet);
// This condition-check could have been avoided, but to honour the old
// default of not calling if it wasn't set, we retain that behaviour.
// TODO:cleanup after verification that the outer if isn't really needed here
}
if (isLocationSet) {
tblDesc.setLocation(parsedLocation);
x.getInputs().add(toReadEntity(new Path(parsedLocation), x.getConf()));
}
if ((parsedTableName != null) && (!parsedTableName.isEmpty())) {
tblDesc.setTableName(parsedTableName);
}
List<AddPartitionDesc> partitionDescs = new ArrayList<AddPartitionDesc>();
Iterable<Partition> partitions = rv.getPartitions();
for (Partition partition : partitions) {
// TODO: this should ideally not create AddPartitionDesc per partition
AddPartitionDesc partsDesc = getBaseAddPartitionDescFromPartition(fromPath, dbname, tblDesc, partition);
if ((replicationSpec != null) && replicationSpec.isInReplicationScope()) {
StatsSetupConst.setBasicStatsState(partsDesc.getPartition(0).getPartParams(), StatsSetupConst.FALSE);
}
partitionDescs.add(partsDesc);
}
if (isPartSpecSet) {
// The import specification asked for only a particular partition to be loaded
// We load only that, and ignore all the others.
boolean found = false;
for (Iterator<AddPartitionDesc> partnIter = partitionDescs.listIterator(); partnIter.hasNext(); ) {
AddPartitionDesc addPartitionDesc = partnIter.next();
if (!found && addPartitionDesc.getPartition(0).getPartSpec().equals(parsedPartSpec)) {
found = true;
} else {
partnIter.remove();
}
}
if (!found) {
throw new SemanticException(ErrorMsg.INVALID_PARTITION.getMsg(" - Specified partition not found in import directory"));
}
}
if (tblDesc.getTableName() == null) {
// or from the export dump.
throw new SemanticException(ErrorMsg.NEED_TABLE_SPECIFICATION.getMsg());
} else {
x.getConf().set("import.destination.table", tblDesc.getTableName());
for (AddPartitionDesc addPartitionDesc : partitionDescs) {
addPartitionDesc.setTableName(tblDesc.getTableName());
}
}
Warehouse wh = new Warehouse(x.getConf());
Table table = tableIfExists(tblDesc, x.getHive());
boolean tableExists = false;
if (table != null) {
checkTable(table, tblDesc, replicationSpec, x.getConf());
x.getLOG().debug("table " + tblDesc.getTableName() + " exists: metadata checked");
tableExists = true;
}
// Initialize with 0 for non-ACID and non-MM tables.
Long writeId = 0L;
if (((table != null) && AcidUtils.isTransactionalTable(table)) || AcidUtils.isTablePropertyTransactional(tblDesc.getTblProps())) {
// Explain plan doesn't open a txn and hence no need to allocate write id.
if (x.getCtx().getExplainConfig() == null) {
writeId = SessionState.get().getTxnMgr().getTableWriteId(tblDesc.getDatabaseName(), tblDesc.getTableName());
}
}
int stmtId = 0;
/*
if (isAcid(writeId)) {
tblDesc.setInitialMmWriteId(writeId);
}
*/
if (!replicationSpec.isInReplicationScope()) {
createRegularImportTasks(tblDesc, partitionDescs, isPartSpecSet, replicationSpec, table, fromURI, fs, wh, x, writeId, stmtId, isSourceMm);
} else {
createReplImportTasks(tblDesc, partitionDescs, replicationSpec, waitOnPrecursor, table, fromURI, fs, wh, x, writeId, stmtId, isSourceMm, updatedMetadata);
}
return tableExists;
}
use of org.apache.hadoop.hive.metastore.api.Partition in project hive by apache.
the class TestAddPartitions method testAddPartitionsNullValue.
@Test
public void testAddPartitionsNullValue() throws Exception {
createTable();
Partition partition = buildPartition(DB_NAME, TABLE_NAME, null);
List<Partition> partitions = new ArrayList<>();
partitions.add(partition);
try {
client.add_partitions(partitions);
} catch (NullPointerException e) {
// TODO: This works different in remote and embedded mode.
// In embedded mode, no exception happens.
}
}
Aggregations