use of org.apache.hadoop.hive.metastore.api.SQLPrimaryKey in project hive by apache.
the class MetaStoreDirectSql method getPrimaryKeys.
public List<SQLPrimaryKey> getPrimaryKeys(String db_name, String tbl_name) throws MetaException {
List<SQLPrimaryKey> ret = new ArrayList<SQLPrimaryKey>();
String queryText = "SELECT " + DBS + ".\"NAME\", " + TBLS + ".\"TBL_NAME\", " + "CASE WHEN " + COLUMNS_V2 + ".\"COLUMN_NAME\" IS NOT NULL THEN " + COLUMNS_V2 + ".\"COLUMN_NAME\" " + "ELSE " + PARTITION_KEYS + ".\"PKEY_NAME\" END, " + KEY_CONSTRAINTS + ".\"POSITION\", " + "" + KEY_CONSTRAINTS + ".\"CONSTRAINT_NAME\", " + KEY_CONSTRAINTS + ".\"ENABLE_VALIDATE_RELY\" " + " from " + TBLS + " " + " INNER JOIN " + KEY_CONSTRAINTS + " ON " + TBLS + ".\"TBL_ID\" = " + KEY_CONSTRAINTS + ".\"PARENT_TBL_ID\" " + " INNER JOIN " + DBS + " ON " + TBLS + ".\"DB_ID\" = " + DBS + ".\"DB_ID\" " + " LEFT OUTER JOIN " + COLUMNS_V2 + " ON " + COLUMNS_V2 + ".\"CD_ID\" = " + KEY_CONSTRAINTS + ".\"PARENT_CD_ID\" AND " + " " + COLUMNS_V2 + ".\"INTEGER_IDX\" = " + KEY_CONSTRAINTS + ".\"PARENT_INTEGER_IDX\" " + " LEFT OUTER JOIN " + PARTITION_KEYS + " ON " + TBLS + ".\"TBL_ID\" = " + PARTITION_KEYS + ".\"TBL_ID\" AND " + " " + PARTITION_KEYS + ".\"INTEGER_IDX\" = " + KEY_CONSTRAINTS + ".\"PARENT_INTEGER_IDX\" " + " WHERE " + KEY_CONSTRAINTS + ".\"CONSTRAINT_TYPE\" = " + MConstraint.PRIMARY_KEY_CONSTRAINT + " AND" + (db_name == null ? "" : " " + DBS + ".\"NAME\" = ? AND") + (tbl_name == null ? "" : " " + TBLS + ".\"TBL_NAME\" = ? ");
queryText = queryText.trim();
if (queryText.endsWith("AND")) {
queryText = queryText.substring(0, queryText.length() - 3);
}
List<String> pms = new ArrayList<String>();
if (db_name != null) {
pms.add(db_name);
}
if (tbl_name != null) {
pms.add(tbl_name);
}
Query queryParams = pm.newQuery("javax.jdo.query.SQL", queryText);
List<Object[]> sqlResult = ensureList(executeWithArray(queryParams, pms.toArray(), queryText));
if (!sqlResult.isEmpty()) {
for (Object[] line : sqlResult) {
int enableValidateRely = extractSqlInt(line[5]);
boolean enable = (enableValidateRely & 4) != 0;
boolean validate = (enableValidateRely & 2) != 0;
boolean rely = (enableValidateRely & 1) != 0;
SQLPrimaryKey currKey = new SQLPrimaryKey(extractSqlString(line[0]), extractSqlString(line[1]), extractSqlString(line[2]), extractSqlInt(line[3]), extractSqlString(line[4]), enable, validate, rely);
ret.add(currKey);
}
}
return ret;
}
use of org.apache.hadoop.hive.metastore.api.SQLPrimaryKey in project hive by apache.
the class TestObjectStore method testTableOps.
/**
* Test table operations
*/
@Test
public void testTableOps() throws MetaException, InvalidObjectException, NoSuchObjectException, InvalidInputException {
Database db1 = new Database(DB1, "description", "locationurl", null);
objectStore.createDatabase(db1);
StorageDescriptor sd1 = new StorageDescriptor(ImmutableList.of(new FieldSchema("pk_col", "double", null)), "location", null, null, false, 0, new SerDeInfo("SerDeName", "serializationLib", null), null, null, null);
HashMap<String, String> params = new HashMap<>();
params.put("EXTERNAL", "false");
Table tbl1 = new Table(TABLE1, DB1, "owner", 1, 2, 3, sd1, null, params, null, null, "MANAGED_TABLE");
objectStore.createTable(tbl1);
List<String> tables = objectStore.getAllTables(DB1);
Assert.assertEquals(1, tables.size());
Assert.assertEquals(TABLE1, tables.get(0));
StorageDescriptor sd2 = new StorageDescriptor(ImmutableList.of(new FieldSchema("fk_col", "double", null)), "location", null, null, false, 0, new SerDeInfo("SerDeName", "serializationLib", null), null, null, null);
Table newTbl1 = new Table("new" + TABLE1, DB1, "owner", 1, 2, 3, sd2, null, params, null, null, "MANAGED_TABLE");
objectStore.alterTable(DB1, TABLE1, newTbl1);
tables = objectStore.getTables(DB1, "new*");
Assert.assertEquals(1, tables.size());
Assert.assertEquals("new" + TABLE1, tables.get(0));
objectStore.createTable(tbl1);
tables = objectStore.getAllTables(DB1);
Assert.assertEquals(2, tables.size());
List<SQLForeignKey> foreignKeys = objectStore.getForeignKeys(DB1, TABLE1, null, null);
Assert.assertEquals(0, foreignKeys.size());
SQLPrimaryKey pk = new SQLPrimaryKey(DB1, TABLE1, "pk_col", 1, "pk_const_1", false, false, false);
objectStore.addPrimaryKeys(ImmutableList.of(pk));
SQLForeignKey fk = new SQLForeignKey(DB1, TABLE1, "pk_col", DB1, "new" + TABLE1, "fk_col", 1, 0, 0, "fk_const_1", "pk_const_1", false, false, false);
objectStore.addForeignKeys(ImmutableList.of(fk));
// Retrieve from PK side
foreignKeys = objectStore.getForeignKeys(null, null, DB1, "new" + TABLE1);
Assert.assertEquals(1, foreignKeys.size());
List<SQLForeignKey> fks = objectStore.getForeignKeys(null, null, DB1, "new" + TABLE1);
if (fks != null) {
for (SQLForeignKey fkcol : fks) {
objectStore.dropConstraint(fkcol.getFktable_db(), fkcol.getFktable_name(), fkcol.getFk_name());
}
}
// Retrieve from FK side
foreignKeys = objectStore.getForeignKeys(DB1, TABLE1, null, null);
Assert.assertEquals(0, foreignKeys.size());
// Retrieve from PK side
foreignKeys = objectStore.getForeignKeys(null, null, DB1, "new" + TABLE1);
Assert.assertEquals(0, foreignKeys.size());
objectStore.dropTable(DB1, TABLE1);
tables = objectStore.getAllTables(DB1);
Assert.assertEquals(1, tables.size());
objectStore.dropTable(DB1, "new" + TABLE1);
tables = objectStore.getAllTables(DB1);
Assert.assertEquals(0, tables.size());
objectStore.dropDatabase(DB1);
}
use of org.apache.hadoop.hive.metastore.api.SQLPrimaryKey in project hive by apache.
the class ObjectStore method getPrimaryKeysViaJdo.
private List<SQLPrimaryKey> getPrimaryKeysViaJdo(String db_name, String tbl_name) throws MetaException {
boolean commited = false;
List<SQLPrimaryKey> primaryKeys = null;
Query query = null;
try {
openTransaction();
query = pm.newQuery(MConstraint.class, "parentTable.tableName == tbl_name && parentTable.database.name == db_name &&" + " constraintType == MConstraint.PRIMARY_KEY_CONSTRAINT");
query.declareParameters("java.lang.String tbl_name, java.lang.String db_name");
Collection<?> constraints = (Collection<?>) query.execute(tbl_name, db_name);
pm.retrieveAll(constraints);
primaryKeys = new ArrayList<>();
for (Iterator<?> i = constraints.iterator(); i.hasNext(); ) {
MConstraint currPK = (MConstraint) i.next();
List<MFieldSchema> cols = currPK.getParentColumn() != null ? currPK.getParentColumn().getCols() : currPK.getParentTable().getPartitionKeys();
int enableValidateRely = currPK.getEnableValidateRely();
boolean enable = (enableValidateRely & 4) != 0;
boolean validate = (enableValidateRely & 2) != 0;
boolean rely = (enableValidateRely & 1) != 0;
primaryKeys.add(new SQLPrimaryKey(db_name, tbl_name, cols.get(currPK.getParentIntegerIndex()).getName(), currPK.getPosition(), currPK.getConstraintName(), enable, validate, rely));
}
commited = commitTransaction();
} finally {
rollbackAndCleanup(commited, query);
}
return primaryKeys;
}
Aggregations