Search in sources :

Example 1 with GetTableRequest

use of org.apache.hadoop.hive.metastore.api.GetTableRequest in project hive by apache.

the class Hive method getTable.

/**
 * Returns metadata of the table.
 *
 * @param dbName
 *          the name of the database
 * @param tableName
 *          the name of the table
 * @param metaTableName
 *          the name of the metadata table
 * @param throwException
 *          controls whether an exception is thrown or a returns a null
 * @param checkTransactional
 *          checks whether the metadata table stats are valid (or
 *          compilant with the snapshot isolation of) for the current transaction.
 * @param getColumnStats
 *          get column statistics if available
 * @return the table or if throwException is false a null value.
 * @throws HiveException
 */
public Table getTable(final String dbName, final String tableName, String metaTableName, boolean throwException, boolean checkTransactional, boolean getColumnStats) throws HiveException {
    if (tableName == null || tableName.equals("")) {
        throw new HiveException("empty table creation??");
    }
    // Get the table from metastore
    org.apache.hadoop.hive.metastore.api.Table tTable = null;
    try {
        // Note: this is currently called w/true from StatsOptimizer only.
        GetTableRequest request = new GetTableRequest(dbName, tableName);
        request.setCatName(getDefaultCatalog(conf));
        request.setGetColumnStats(getColumnStats);
        request.setEngine(Constants.HIVE_ENGINE);
        if (checkTransactional) {
            ValidWriteIdList validWriteIdList = null;
            long txnId = SessionState.get() != null && SessionState.get().getTxnMgr() != null ? SessionState.get().getTxnMgr().getCurrentTxnId() : 0;
            if (txnId > 0) {
                validWriteIdList = AcidUtils.getTableValidWriteIdListWithTxnList(conf, dbName, tableName);
            }
            request.setValidWriteIdList(validWriteIdList != null ? validWriteIdList.toString() : null);
        }
        tTable = getMSC().getTable(request);
    } catch (NoSuchObjectException e) {
        if (throwException) {
            throw new InvalidTableException(tableName);
        }
        return null;
    } catch (Exception e) {
        throw new HiveException("Unable to fetch table " + tableName + ". " + e.getMessage(), e);
    }
    // For non-views, we need to do some extra fixes
    if (!TableType.VIRTUAL_VIEW.toString().equals(tTable.getTableType())) {
        // Fix the non-printable chars
        Map<String, String> parameters = tTable.getSd().getParameters();
        String sf = parameters != null ? parameters.get(SERIALIZATION_FORMAT) : null;
        if (sf != null) {
            char[] b = sf.toCharArray();
            if ((b.length == 1) && (b[0] < 10)) {
                // ^A, ^B, ^C, ^D, \t
                parameters.put(SERIALIZATION_FORMAT, Integer.toString(b[0]));
            }
        }
        // earlier version of Hive.
        if (org.apache.hadoop.hive.serde2.MetadataTypedColumnsetSerDe.class.getName().equals(tTable.getSd().getSerdeInfo().getSerializationLib()) && tTable.getSd().getColsSize() > 0 && tTable.getSd().getCols().get(0).getType().indexOf('<') == -1) {
            tTable.getSd().getSerdeInfo().setSerializationLib(org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe.class.getName());
        }
    }
    Table t = new Table(tTable);
    if (metaTableName != null) {
        if (t.getStorageHandler() == null || !t.getStorageHandler().isMetadataTableSupported()) {
            throw new SemanticException(ErrorMsg.METADATA_TABLE_NOT_SUPPORTED, t.getTableName());
        }
        if (!t.getStorageHandler().isValidMetadataTable(metaTableName)) {
            throw new SemanticException(ErrorMsg.INVALID_METADATA_TABLE_NAME, metaTableName);
        }
    }
    t.setMetaTable(metaTableName);
    return t;
}
Also used : HiveMaterializedViewUtils.extractTable(org.apache.hadoop.hive.ql.optimizer.calcite.rules.views.HiveMaterializedViewUtils.extractTable) LazySimpleSerDe(org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe) AlreadyExistsException(org.apache.hadoop.hive.metastore.api.AlreadyExistsException) LockException(org.apache.hadoop.hive.ql.lockmgr.LockException) InvalidOperationException(org.apache.hadoop.hive.metastore.api.InvalidOperationException) IOException(java.io.IOException) UnknownHostException(java.net.UnknownHostException) ExecutionException(java.util.concurrent.ExecutionException) NoSuchObjectException(org.apache.hadoop.hive.metastore.api.NoSuchObjectException) MetaException(org.apache.hadoop.hive.metastore.api.MetaException) HiveMetaException(org.apache.hadoop.hive.metastore.HiveMetaException) SemanticException(org.apache.hadoop.hive.ql.parse.SemanticException) TApplicationException(org.apache.thrift.TApplicationException) TException(org.apache.thrift.TException) SerDeException(org.apache.hadoop.hive.serde2.SerDeException) FileNotFoundException(java.io.FileNotFoundException) JDODataStoreException(javax.jdo.JDODataStoreException) GetTableRequest(org.apache.hadoop.hive.metastore.api.GetTableRequest) ValidWriteIdList(org.apache.hadoop.hive.common.ValidWriteIdList) NoSuchObjectException(org.apache.hadoop.hive.metastore.api.NoSuchObjectException) SemanticException(org.apache.hadoop.hive.ql.parse.SemanticException)

Example 2 with GetTableRequest

use of org.apache.hadoop.hive.metastore.api.GetTableRequest in project hive by apache.

the class SessionHiveMetaStoreClient method getTable.

// Need to override this one too or dropTable breaks because it doesn't find the table when checks
// before the drop.
@Override
public org.apache.hadoop.hive.metastore.api.Table getTable(String catName, String dbName, String tableName, boolean getColStats, String engine) throws TException {
    GetTableRequest getTableRequest = new GetTableRequest(dbName, tableName);
    getTableRequest.setGetColumnStats(getColStats);
    getTableRequest.setEngine(engine);
    if (!DEFAULT_CATALOG_NAME.equals(catName)) {
        getTableRequest.setCatName(catName);
        return super.getTable(getTableRequest);
    } else {
        return getTable(getTableRequest);
    }
}
Also used : GetTableRequest(org.apache.hadoop.hive.metastore.api.GetTableRequest)

Example 3 with GetTableRequest

use of org.apache.hadoop.hive.metastore.api.GetTableRequest in project hive by apache.

the class TestHiveMetaStoreClient method getTable.

public Table getTable(String dbName, String tableName, boolean getColumnStats, String engine) throws MetaException, TException, NoSuchObjectException {
    GetTableRequest getTableRequest = new GetTableRequest(dbName, tableName);
    getTableRequest.setGetColumnStats(getColumnStats);
    getTableRequest.setEngine(engine);
    return getTable(getTableRequest);
}
Also used : GetTableRequest(org.apache.hadoop.hive.metastore.api.GetTableRequest)

Example 4 with GetTableRequest

use of org.apache.hadoop.hive.metastore.api.GetTableRequest in project hive by apache.

the class TestCleaner method NoCleanupAfterMajorCompaction.

@Test
public void NoCleanupAfterMajorCompaction() throws Exception {
    Map<String, String> parameters = new HashMap<>();
    // With no cleanup true
    parameters.put("no_cleanup", "true");
    Table t = newTable("default", "dcamc", false, parameters);
    addBaseFile(t, null, 20L, 20);
    addDeltaFile(t, null, 21L, 22L, 2);
    addDeltaFile(t, null, 23L, 24L, 2);
    addBaseFile(t, null, 25L, 25);
    burnThroughTransactions("default", "dcamc", 25);
    CompactionRequest rqst = new CompactionRequest("default", "dcamc", CompactionType.MAJOR);
    compactInTxn(rqst);
    startCleaner();
    // Check there are no compactions requests left.
    ShowCompactResponse rsp = txnHandler.showCompact(new ShowCompactRequest());
    Assert.assertEquals(1, rsp.getCompactsSize());
    Assert.assertEquals(TxnStore.SUCCEEDED_RESPONSE, rsp.getCompacts().get(0).getState());
    // Check that the files are not removed
    List<Path> paths = getDirectories(conf, t, null);
    Assert.assertEquals(4, paths.size());
    // With no clean up false
    t = ms.getTable(new GetTableRequest("default", "dcamc"));
    t.getParameters().put("no_cleanup", "false");
    ms.alter_table("default", "dcamc", t);
    rqst = new CompactionRequest("default", "dcamc", CompactionType.MAJOR);
    compactInTxn(rqst);
    startCleaner();
    // Check there are no compactions requests left.
    rsp = txnHandler.showCompact(new ShowCompactRequest());
    Assert.assertEquals(2, rsp.getCompactsSize());
    Assert.assertEquals(TxnStore.SUCCEEDED_RESPONSE, rsp.getCompacts().get(0).getState());
    // Check that the files are not removed
    paths = getDirectories(conf, t, null);
    Assert.assertEquals(1, paths.size());
    Assert.assertEquals("base_25", paths.get(0).getName());
}
Also used : Path(org.apache.hadoop.fs.Path) GetTableRequest(org.apache.hadoop.hive.metastore.api.GetTableRequest) Table(org.apache.hadoop.hive.metastore.api.Table) HashMap(java.util.HashMap) ShowCompactResponse(org.apache.hadoop.hive.metastore.api.ShowCompactResponse) ShowCompactRequest(org.apache.hadoop.hive.metastore.api.ShowCompactRequest) CompactionRequest(org.apache.hadoop.hive.metastore.api.CompactionRequest) Test(org.junit.Test)

Aggregations

GetTableRequest (org.apache.hadoop.hive.metastore.api.GetTableRequest)4 FileNotFoundException (java.io.FileNotFoundException)1 IOException (java.io.IOException)1 UnknownHostException (java.net.UnknownHostException)1 HashMap (java.util.HashMap)1 ExecutionException (java.util.concurrent.ExecutionException)1 JDODataStoreException (javax.jdo.JDODataStoreException)1 Path (org.apache.hadoop.fs.Path)1 ValidWriteIdList (org.apache.hadoop.hive.common.ValidWriteIdList)1 HiveMetaException (org.apache.hadoop.hive.metastore.HiveMetaException)1 AlreadyExistsException (org.apache.hadoop.hive.metastore.api.AlreadyExistsException)1 CompactionRequest (org.apache.hadoop.hive.metastore.api.CompactionRequest)1 InvalidOperationException (org.apache.hadoop.hive.metastore.api.InvalidOperationException)1 MetaException (org.apache.hadoop.hive.metastore.api.MetaException)1 NoSuchObjectException (org.apache.hadoop.hive.metastore.api.NoSuchObjectException)1 ShowCompactRequest (org.apache.hadoop.hive.metastore.api.ShowCompactRequest)1 ShowCompactResponse (org.apache.hadoop.hive.metastore.api.ShowCompactResponse)1 Table (org.apache.hadoop.hive.metastore.api.Table)1 LockException (org.apache.hadoop.hive.ql.lockmgr.LockException)1 HiveMaterializedViewUtils.extractTable (org.apache.hadoop.hive.ql.optimizer.calcite.rules.views.HiveMaterializedViewUtils.extractTable)1