Search in sources :

Example 16 with Table

use of org.apache.hadoop.hive.metastore.api.Table in project hive by apache.

the class TestInitiator method noCompactWhenNoCompactSet.

@Test
public void noCompactWhenNoCompactSet() throws Exception {
    Map<String, String> parameters = new HashMap<String, String>(1);
    parameters.put("NO_AUTO_COMPACTION", "true");
    Table t = newTable("default", "ncwncs", false, parameters);
    HiveConf.setIntVar(conf, HiveConf.ConfVars.HIVE_COMPACTOR_ABORTEDTXN_THRESHOLD, 10);
    for (int i = 0; i < 11; i++) {
        long txnid = openTxn();
        LockComponent comp = new LockComponent(LockType.SHARED_WRITE, LockLevel.TABLE, "default");
        comp.setTablename("ncwncs");
        comp.setOperationType(DataOperationType.UPDATE);
        List<LockComponent> components = new ArrayList<LockComponent>(1);
        components.add(comp);
        LockRequest req = new LockRequest(components, "me", "localhost");
        req.setTxnid(txnid);
        LockResponse res = txnHandler.lock(req);
        txnHandler.abortTxn(new AbortTxnRequest(txnid));
    }
    startInitiator();
    ShowCompactResponse rsp = txnHandler.showCompact(new ShowCompactRequest());
    Assert.assertEquals(0, rsp.getCompactsSize());
}
Also used : Table(org.apache.hadoop.hive.metastore.api.Table) LockComponent(org.apache.hadoop.hive.metastore.api.LockComponent) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) AbortTxnRequest(org.apache.hadoop.hive.metastore.api.AbortTxnRequest) LockResponse(org.apache.hadoop.hive.metastore.api.LockResponse) ShowCompactResponse(org.apache.hadoop.hive.metastore.api.ShowCompactResponse) ShowCompactRequest(org.apache.hadoop.hive.metastore.api.ShowCompactRequest) LockRequest(org.apache.hadoop.hive.metastore.api.LockRequest) Test(org.junit.Test)

Example 17 with Table

use of org.apache.hadoop.hive.metastore.api.Table in project hive by apache.

the class TestInitiator method cleanEmptyAbortedTxns.

@Test
public void cleanEmptyAbortedTxns() throws Exception {
    // Test that we are cleaning aborted transactions with no components left in txn_components.
    // Put one aborted transaction with an entry in txn_components to make sure we don't
    // accidently clean it too.
    Table t = newTable("default", "ceat", false);
    long txnid = openTxn();
    LockComponent comp = new LockComponent(LockType.SHARED_WRITE, LockLevel.TABLE, "default");
    comp.setTablename("ceat");
    comp.setOperationType(DataOperationType.UPDATE);
    List<LockComponent> components = new ArrayList<LockComponent>(1);
    components.add(comp);
    LockRequest req = new LockRequest(components, "me", "localhost");
    req.setTxnid(txnid);
    LockResponse res = txnHandler.lock(req);
    txnHandler.abortTxn(new AbortTxnRequest(txnid));
    for (int i = 0; i < TxnStore.TIMED_OUT_TXN_ABORT_BATCH_SIZE + 50; i++) {
        txnid = openTxn();
        txnHandler.abortTxn(new AbortTxnRequest(txnid));
    }
    GetOpenTxnsResponse openTxns = txnHandler.getOpenTxns();
    Assert.assertEquals(TxnStore.TIMED_OUT_TXN_ABORT_BATCH_SIZE + 50 + 1, openTxns.getOpen_txnsSize());
    startInitiator();
    openTxns = txnHandler.getOpenTxns();
    Assert.assertEquals(1, openTxns.getOpen_txnsSize());
}
Also used : Table(org.apache.hadoop.hive.metastore.api.Table) LockComponent(org.apache.hadoop.hive.metastore.api.LockComponent) LockResponse(org.apache.hadoop.hive.metastore.api.LockResponse) ArrayList(java.util.ArrayList) GetOpenTxnsResponse(org.apache.hadoop.hive.metastore.api.GetOpenTxnsResponse) AbortTxnRequest(org.apache.hadoop.hive.metastore.api.AbortTxnRequest) LockRequest(org.apache.hadoop.hive.metastore.api.LockRequest) Test(org.junit.Test)

Example 18 with Table

use of org.apache.hadoop.hive.metastore.api.Table in project hive by apache.

the class GetColumnsOperation method runInternal.

@Override
public void runInternal() throws HiveSQLException {
    setState(OperationState.RUNNING);
    try {
        IMetaStoreClient metastoreClient = getParentSession().getMetaStoreClient();
        String schemaPattern = convertSchemaPattern(schemaName);
        String tablePattern = convertIdentifierPattern(tableName, true);
        Pattern columnPattern = null;
        if (columnName != null) {
            columnPattern = Pattern.compile(convertIdentifierPattern(columnName, false));
        }
        List<String> dbNames = metastoreClient.getDatabases(schemaPattern);
        Collections.sort(dbNames);
        Map<String, List<String>> db2Tabs = new HashMap<>();
        for (String dbName : dbNames) {
            List<String> tableNames = metastoreClient.getTables(dbName, tablePattern);
            Collections.sort(tableNames);
            db2Tabs.put(dbName, tableNames);
        }
        if (isAuthV2Enabled()) {
            List<HivePrivilegeObject> privObjs = getPrivObjs(db2Tabs);
            String cmdStr = "catalog : " + catalogName + ", schemaPattern : " + schemaName + ", tablePattern : " + tableName;
            authorizeMetaGets(HiveOperationType.GET_COLUMNS, privObjs, cmdStr);
        }
        int maxBatchSize = SessionState.get().getConf().getIntVar(ConfVars.METASTORE_BATCH_RETRIEVE_MAX);
        for (Entry<String, List<String>> dbTabs : db2Tabs.entrySet()) {
            String dbName = dbTabs.getKey();
            List<String> tableNames = dbTabs.getValue();
            for (Table table : new TableIterable(metastoreClient, dbName, tableNames, maxBatchSize)) {
                TableSchema schema = new TableSchema(metastoreClient.getSchema(dbName, table.getTableName()));
                List<SQLPrimaryKey> primaryKeys = metastoreClient.getPrimaryKeys(new PrimaryKeysRequest(dbName, table.getTableName()));
                Set<String> pkColNames = new HashSet<>();
                for (SQLPrimaryKey key : primaryKeys) {
                    pkColNames.add(key.getColumn_name().toLowerCase());
                }
                for (ColumnDescriptor column : schema.getColumnDescriptors()) {
                    if (columnPattern != null && !columnPattern.matcher(column.getName()).matches()) {
                        continue;
                    }
                    Object[] rowData = new Object[] { // TABLE_CAT
                    null, // TABLE_SCHEM
                    table.getDbName(), // TABLE_NAME
                    table.getTableName(), // COLUMN_NAME
                    column.getName(), // DATA_TYPE
                    column.getType().toJavaSQLType(), // TYPE_NAME
                    column.getTypeName(), // COLUMN_SIZE
                    column.getTypeDescriptor().getColumnSize(), // BUFFER_LENGTH, unused
                    null, // DECIMAL_DIGITS
                    column.getTypeDescriptor().getDecimalDigits(), // NUM_PREC_RADIX
                    column.getType().getNumPrecRadix(), pkColNames.contains(column.getName().toLowerCase()) ? DatabaseMetaData.columnNoNulls : // NULLABLE
                    DatabaseMetaData.columnNullable, // REMARKS
                    column.getComment(), // COLUMN_DEF
                    null, // SQL_DATA_TYPE
                    null, // SQL_DATETIME_SUB
                    null, // CHAR_OCTET_LENGTH
                    null, // ORDINAL_POSITION
                    column.getOrdinalPosition(), // IS_NULLABLE
                    pkColNames.contains(column.getName().toLowerCase()) ? "NO" : "YES", // SCOPE_CATALOG
                    null, // SCOPE_SCHEMA
                    null, // SCOPE_TABLE
                    null, // SOURCE_DATA_TYPE
                    null, // IS_AUTO_INCREMENT
                    "NO" };
                    rowSet.addRow(rowData);
                }
            }
        }
        setState(OperationState.FINISHED);
    } catch (Exception e) {
        setState(OperationState.ERROR);
        throw new HiveSQLException(e);
    }
}
Also used : Pattern(java.util.regex.Pattern) SQLPrimaryKey(org.apache.hadoop.hive.metastore.api.SQLPrimaryKey) Table(org.apache.hadoop.hive.metastore.api.Table) TableSchema(org.apache.hive.service.cli.TableSchema) HashMap(java.util.HashMap) ColumnDescriptor(org.apache.hive.service.cli.ColumnDescriptor) HiveSQLException(org.apache.hive.service.cli.HiveSQLException) HivePrivilegeObject(org.apache.hadoop.hive.ql.security.authorization.plugin.HivePrivilegeObject) TableIterable(org.apache.hadoop.hive.ql.metadata.TableIterable) IMetaStoreClient(org.apache.hadoop.hive.metastore.IMetaStoreClient) HiveSQLException(org.apache.hive.service.cli.HiveSQLException) PrimaryKeysRequest(org.apache.hadoop.hive.metastore.api.PrimaryKeysRequest) ArrayList(java.util.ArrayList) List(java.util.List) HivePrivilegeObject(org.apache.hadoop.hive.ql.security.authorization.plugin.HivePrivilegeObject) HashSet(java.util.HashSet)

Example 19 with Table

use of org.apache.hadoop.hive.metastore.api.Table in project storm by apache.

the class HiveSetupUtil method createDbAndTable.

public static void createDbAndTable(HiveConf conf, String databaseName, String tableName, List<String> partVals, String[] colNames, String[] colTypes, String[] partNames, String dbLocation) throws Exception {
    IMetaStoreClient client = new HiveMetaStoreClient(conf);
    try {
        Database db = new Database();
        db.setName(databaseName);
        db.setLocationUri(dbLocation);
        client.createDatabase(db);
        Table tbl = new Table();
        tbl.setDbName(databaseName);
        tbl.setTableName(tableName);
        tbl.setTableType(TableType.MANAGED_TABLE.toString());
        StorageDescriptor sd = new StorageDescriptor();
        sd.setCols(getTableColumns(colNames, colTypes));
        sd.setNumBuckets(1);
        sd.setLocation(dbLocation + Path.SEPARATOR + tableName);
        if (partNames != null && partNames.length != 0) {
            tbl.setPartitionKeys(getPartitionKeys(partNames));
        }
        tbl.setSd(sd);
        sd.setBucketCols(new ArrayList<String>(2));
        sd.setSerdeInfo(new SerDeInfo());
        sd.getSerdeInfo().setName(tbl.getTableName());
        sd.getSerdeInfo().setParameters(new HashMap<String, String>());
        sd.getSerdeInfo().getParameters().put(serdeConstants.SERIALIZATION_FORMAT, "1");
        sd.getSerdeInfo().setSerializationLib(OrcSerde.class.getName());
        sd.setInputFormat(OrcInputFormat.class.getName());
        sd.setOutputFormat(OrcOutputFormat.class.getName());
        Map<String, String> tableParams = new HashMap<String, String>();
        tbl.setParameters(tableParams);
        client.createTable(tbl);
        try {
            if (partVals != null && partVals.size() > 0) {
                addPartition(client, tbl, partVals);
            }
        } catch (AlreadyExistsException e) {
        }
    } finally {
        client.close();
    }
}
Also used : HiveMetaStoreClient(org.apache.hadoop.hive.metastore.HiveMetaStoreClient) Table(org.apache.hadoop.hive.metastore.api.Table) AlreadyExistsException(org.apache.hadoop.hive.metastore.api.AlreadyExistsException) HashMap(java.util.HashMap) SerDeInfo(org.apache.hadoop.hive.metastore.api.SerDeInfo) StorageDescriptor(org.apache.hadoop.hive.metastore.api.StorageDescriptor) IMetaStoreClient(org.apache.hadoop.hive.metastore.IMetaStoreClient) OrcOutputFormat(org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat) OrcSerde(org.apache.hadoop.hive.ql.io.orc.OrcSerde) OrcInputFormat(org.apache.hadoop.hive.ql.io.orc.OrcInputFormat) Database(org.apache.hadoop.hive.metastore.api.Database)

Example 20 with Table

use of org.apache.hadoop.hive.metastore.api.Table in project hive by apache.

the class AcidTableSerializer method decode.

/** Returns the {@link AcidTable} instance decoded from a base 64 representation. */
public static AcidTable decode(String encoded) throws IOException {
    if (!encoded.startsWith(PROLOG_V1)) {
        throw new IllegalStateException("Unsupported version.");
    }
    encoded = encoded.substring(PROLOG_V1.length());
    byte[] decoded = Base64.decodeBase64(encoded);
    AcidTable table = null;
    try (DataInputStream in = new DataInputStream(new ByteArrayInputStream(decoded))) {
        String databaseName = in.readUTF();
        String tableName = in.readUTF();
        boolean createPartitions = in.readBoolean();
        long transactionId = in.readLong();
        TableType tableType = TableType.valueOf(in.readByte());
        int thriftLength = in.readInt();
        table = new AcidTable(databaseName, tableName, createPartitions, tableType);
        table.setTransactionId(transactionId);
        Table metaTable = null;
        if (thriftLength > 0) {
            metaTable = new Table();
            try {
                byte[] thriftEncoded = new byte[thriftLength];
                in.readFully(thriftEncoded, 0, thriftLength);
                new TDeserializer(new TCompactProtocol.Factory()).deserialize(metaTable, thriftEncoded);
                table.setTable(metaTable);
            } catch (TException e) {
                throw new IOException("Error deserializing meta store table.", e);
            }
        }
    }
    return table;
}
Also used : TException(org.apache.thrift.TException) Table(org.apache.hadoop.hive.metastore.api.Table) TDeserializer(org.apache.thrift.TDeserializer) LoggerFactory(org.slf4j.LoggerFactory) IOException(java.io.IOException) DataInputStream(java.io.DataInputStream) ByteArrayInputStream(java.io.ByteArrayInputStream)

Aggregations

Table (org.apache.hadoop.hive.metastore.api.Table)355 Test (org.junit.Test)198 ArrayList (java.util.ArrayList)169 StorageDescriptor (org.apache.hadoop.hive.metastore.api.StorageDescriptor)143 FieldSchema (org.apache.hadoop.hive.metastore.api.FieldSchema)139 Partition (org.apache.hadoop.hive.metastore.api.Partition)126 SerDeInfo (org.apache.hadoop.hive.metastore.api.SerDeInfo)123 MetaException (org.apache.hadoop.hive.metastore.api.MetaException)56 HashMap (java.util.HashMap)55 Path (org.apache.hadoop.fs.Path)53 Database (org.apache.hadoop.hive.metastore.api.Database)53 NoSuchObjectException (org.apache.hadoop.hive.metastore.api.NoSuchObjectException)48 ShowCompactRequest (org.apache.hadoop.hive.metastore.api.ShowCompactRequest)48 ShowCompactResponse (org.apache.hadoop.hive.metastore.api.ShowCompactResponse)48 TException (org.apache.thrift.TException)41 CompactionRequest (org.apache.hadoop.hive.metastore.api.CompactionRequest)40 List (java.util.List)38 ShowCompactResponseElement (org.apache.hadoop.hive.metastore.api.ShowCompactResponseElement)35 FileSystem (org.apache.hadoop.fs.FileSystem)31 ColumnStatisticsDesc (org.apache.hadoop.hive.metastore.api.ColumnStatisticsDesc)31