use of org.apache.hadoop.hive.metastore.api.Table in project hive by apache.
the class TestInitiator method noCompactWhenNoCompactSet.
@Test
public void noCompactWhenNoCompactSet() throws Exception {
Map<String, String> parameters = new HashMap<String, String>(1);
parameters.put("NO_AUTO_COMPACTION", "true");
Table t = newTable("default", "ncwncs", false, parameters);
HiveConf.setIntVar(conf, HiveConf.ConfVars.HIVE_COMPACTOR_ABORTEDTXN_THRESHOLD, 10);
for (int i = 0; i < 11; i++) {
long txnid = openTxn();
LockComponent comp = new LockComponent(LockType.SHARED_WRITE, LockLevel.TABLE, "default");
comp.setTablename("ncwncs");
comp.setOperationType(DataOperationType.UPDATE);
List<LockComponent> components = new ArrayList<LockComponent>(1);
components.add(comp);
LockRequest req = new LockRequest(components, "me", "localhost");
req.setTxnid(txnid);
LockResponse res = txnHandler.lock(req);
txnHandler.abortTxn(new AbortTxnRequest(txnid));
}
startInitiator();
ShowCompactResponse rsp = txnHandler.showCompact(new ShowCompactRequest());
Assert.assertEquals(0, rsp.getCompactsSize());
}
use of org.apache.hadoop.hive.metastore.api.Table in project hive by apache.
the class TestInitiator method cleanEmptyAbortedTxns.
@Test
public void cleanEmptyAbortedTxns() throws Exception {
// Test that we are cleaning aborted transactions with no components left in txn_components.
// Put one aborted transaction with an entry in txn_components to make sure we don't
// accidently clean it too.
Table t = newTable("default", "ceat", false);
long txnid = openTxn();
LockComponent comp = new LockComponent(LockType.SHARED_WRITE, LockLevel.TABLE, "default");
comp.setTablename("ceat");
comp.setOperationType(DataOperationType.UPDATE);
List<LockComponent> components = new ArrayList<LockComponent>(1);
components.add(comp);
LockRequest req = new LockRequest(components, "me", "localhost");
req.setTxnid(txnid);
LockResponse res = txnHandler.lock(req);
txnHandler.abortTxn(new AbortTxnRequest(txnid));
for (int i = 0; i < TxnStore.TIMED_OUT_TXN_ABORT_BATCH_SIZE + 50; i++) {
txnid = openTxn();
txnHandler.abortTxn(new AbortTxnRequest(txnid));
}
GetOpenTxnsResponse openTxns = txnHandler.getOpenTxns();
Assert.assertEquals(TxnStore.TIMED_OUT_TXN_ABORT_BATCH_SIZE + 50 + 1, openTxns.getOpen_txnsSize());
startInitiator();
openTxns = txnHandler.getOpenTxns();
Assert.assertEquals(1, openTxns.getOpen_txnsSize());
}
use of org.apache.hadoop.hive.metastore.api.Table in project hive by apache.
the class GetColumnsOperation method runInternal.
@Override
public void runInternal() throws HiveSQLException {
setState(OperationState.RUNNING);
try {
IMetaStoreClient metastoreClient = getParentSession().getMetaStoreClient();
String schemaPattern = convertSchemaPattern(schemaName);
String tablePattern = convertIdentifierPattern(tableName, true);
Pattern columnPattern = null;
if (columnName != null) {
columnPattern = Pattern.compile(convertIdentifierPattern(columnName, false));
}
List<String> dbNames = metastoreClient.getDatabases(schemaPattern);
Collections.sort(dbNames);
Map<String, List<String>> db2Tabs = new HashMap<>();
for (String dbName : dbNames) {
List<String> tableNames = metastoreClient.getTables(dbName, tablePattern);
Collections.sort(tableNames);
db2Tabs.put(dbName, tableNames);
}
if (isAuthV2Enabled()) {
List<HivePrivilegeObject> privObjs = getPrivObjs(db2Tabs);
String cmdStr = "catalog : " + catalogName + ", schemaPattern : " + schemaName + ", tablePattern : " + tableName;
authorizeMetaGets(HiveOperationType.GET_COLUMNS, privObjs, cmdStr);
}
int maxBatchSize = SessionState.get().getConf().getIntVar(ConfVars.METASTORE_BATCH_RETRIEVE_MAX);
for (Entry<String, List<String>> dbTabs : db2Tabs.entrySet()) {
String dbName = dbTabs.getKey();
List<String> tableNames = dbTabs.getValue();
for (Table table : new TableIterable(metastoreClient, dbName, tableNames, maxBatchSize)) {
TableSchema schema = new TableSchema(metastoreClient.getSchema(dbName, table.getTableName()));
List<SQLPrimaryKey> primaryKeys = metastoreClient.getPrimaryKeys(new PrimaryKeysRequest(dbName, table.getTableName()));
Set<String> pkColNames = new HashSet<>();
for (SQLPrimaryKey key : primaryKeys) {
pkColNames.add(key.getColumn_name().toLowerCase());
}
for (ColumnDescriptor column : schema.getColumnDescriptors()) {
if (columnPattern != null && !columnPattern.matcher(column.getName()).matches()) {
continue;
}
Object[] rowData = new Object[] { // TABLE_CAT
null, // TABLE_SCHEM
table.getDbName(), // TABLE_NAME
table.getTableName(), // COLUMN_NAME
column.getName(), // DATA_TYPE
column.getType().toJavaSQLType(), // TYPE_NAME
column.getTypeName(), // COLUMN_SIZE
column.getTypeDescriptor().getColumnSize(), // BUFFER_LENGTH, unused
null, // DECIMAL_DIGITS
column.getTypeDescriptor().getDecimalDigits(), // NUM_PREC_RADIX
column.getType().getNumPrecRadix(), pkColNames.contains(column.getName().toLowerCase()) ? DatabaseMetaData.columnNoNulls : // NULLABLE
DatabaseMetaData.columnNullable, // REMARKS
column.getComment(), // COLUMN_DEF
null, // SQL_DATA_TYPE
null, // SQL_DATETIME_SUB
null, // CHAR_OCTET_LENGTH
null, // ORDINAL_POSITION
column.getOrdinalPosition(), // IS_NULLABLE
pkColNames.contains(column.getName().toLowerCase()) ? "NO" : "YES", // SCOPE_CATALOG
null, // SCOPE_SCHEMA
null, // SCOPE_TABLE
null, // SOURCE_DATA_TYPE
null, // IS_AUTO_INCREMENT
"NO" };
rowSet.addRow(rowData);
}
}
}
setState(OperationState.FINISHED);
} catch (Exception e) {
setState(OperationState.ERROR);
throw new HiveSQLException(e);
}
}
use of org.apache.hadoop.hive.metastore.api.Table in project storm by apache.
the class HiveSetupUtil method createDbAndTable.
public static void createDbAndTable(HiveConf conf, String databaseName, String tableName, List<String> partVals, String[] colNames, String[] colTypes, String[] partNames, String dbLocation) throws Exception {
IMetaStoreClient client = new HiveMetaStoreClient(conf);
try {
Database db = new Database();
db.setName(databaseName);
db.setLocationUri(dbLocation);
client.createDatabase(db);
Table tbl = new Table();
tbl.setDbName(databaseName);
tbl.setTableName(tableName);
tbl.setTableType(TableType.MANAGED_TABLE.toString());
StorageDescriptor sd = new StorageDescriptor();
sd.setCols(getTableColumns(colNames, colTypes));
sd.setNumBuckets(1);
sd.setLocation(dbLocation + Path.SEPARATOR + tableName);
if (partNames != null && partNames.length != 0) {
tbl.setPartitionKeys(getPartitionKeys(partNames));
}
tbl.setSd(sd);
sd.setBucketCols(new ArrayList<String>(2));
sd.setSerdeInfo(new SerDeInfo());
sd.getSerdeInfo().setName(tbl.getTableName());
sd.getSerdeInfo().setParameters(new HashMap<String, String>());
sd.getSerdeInfo().getParameters().put(serdeConstants.SERIALIZATION_FORMAT, "1");
sd.getSerdeInfo().setSerializationLib(OrcSerde.class.getName());
sd.setInputFormat(OrcInputFormat.class.getName());
sd.setOutputFormat(OrcOutputFormat.class.getName());
Map<String, String> tableParams = new HashMap<String, String>();
tbl.setParameters(tableParams);
client.createTable(tbl);
try {
if (partVals != null && partVals.size() > 0) {
addPartition(client, tbl, partVals);
}
} catch (AlreadyExistsException e) {
}
} finally {
client.close();
}
}
use of org.apache.hadoop.hive.metastore.api.Table in project hive by apache.
the class AcidTableSerializer method decode.
/** Returns the {@link AcidTable} instance decoded from a base 64 representation. */
public static AcidTable decode(String encoded) throws IOException {
if (!encoded.startsWith(PROLOG_V1)) {
throw new IllegalStateException("Unsupported version.");
}
encoded = encoded.substring(PROLOG_V1.length());
byte[] decoded = Base64.decodeBase64(encoded);
AcidTable table = null;
try (DataInputStream in = new DataInputStream(new ByteArrayInputStream(decoded))) {
String databaseName = in.readUTF();
String tableName = in.readUTF();
boolean createPartitions = in.readBoolean();
long transactionId = in.readLong();
TableType tableType = TableType.valueOf(in.readByte());
int thriftLength = in.readInt();
table = new AcidTable(databaseName, tableName, createPartitions, tableType);
table.setTransactionId(transactionId);
Table metaTable = null;
if (thriftLength > 0) {
metaTable = new Table();
try {
byte[] thriftEncoded = new byte[thriftLength];
in.readFully(thriftEncoded, 0, thriftLength);
new TDeserializer(new TCompactProtocol.Factory()).deserialize(metaTable, thriftEncoded);
table.setTable(metaTable);
} catch (TException e) {
throw new IOException("Error deserializing meta store table.", e);
}
}
}
return table;
}
Aggregations