use of org.apache.hadoop.hive.metastore.HiveMetaStoreClient in project hive by apache.
the class ReplLoadTask method createReplLoadCompleteAckTask.
private void createReplLoadCompleteAckTask() {
if (!work.hasBootstrapLoadTasks() && (work.isIncrementalLoad() ? !work.incrementalLoadTasksBuilder().hasMoreWork() : true)) {
// All repl load tasks are executed and status is 0, create the task to add the acknowledgement
List<PreAckTask> listOfPreAckTasks = new LinkedList<>();
listOfPreAckTasks.add(new PreAckTask() {
@Override
public void run() throws SemanticException {
try {
HiveMetaStoreClient metaStoreClient = new HiveMetaStoreClient(conf);
long currentNotificationID = metaStoreClient.getCurrentNotificationEventId().getEventId();
Path loadMetadataFilePath = new Path(work.dumpDirectory, LOAD_METADATA.toString());
Utils.writeOutput(String.valueOf(currentNotificationID), loadMetadataFilePath, conf);
LOG.info("Created LOAD Metadata file : {} with NotificationID : {}", loadMetadataFilePath, currentNotificationID);
} catch (TException ex) {
throw new SemanticException(ex);
}
}
});
if (work.shouldFailover()) {
listOfPreAckTasks.add(new PreAckTask() {
@Override
public void run() throws SemanticException {
try {
Database db = getHive().getDatabase(work.getTargetDatabase());
if (MetaStoreUtils.isDbBeingFailedOverAtEndpoint(db, MetaStoreUtils.FailoverEndpoint.TARGET)) {
return;
}
Map<String, String> params = db.getParameters();
if (params == null) {
params = new HashMap<>();
db.setParameters(params);
}
LOG.info("Setting failover endpoint:{} to TARGET for database: {}", ReplConst.REPL_FAILOVER_ENDPOINT, db.getName());
params.put(ReplConst.REPL_FAILOVER_ENDPOINT, MetaStoreUtils.FailoverEndpoint.TARGET.toString());
getHive().alterDatabase(work.getTargetDatabase(), db);
} catch (HiveException e) {
throw new SemanticException(e);
}
}
});
}
if (work.isSecondFailover) {
// If it is the second load of optimised bootstrap that means this is the end of the cycle, add tasks to sort
// out the database properties.
listOfPreAckTasks.add(new PreAckTask() {
@Override
public void run() throws SemanticException {
try {
Hive hiveDb = getHive();
Database db = hiveDb.getDatabase(work.getTargetDatabase());
LinkedHashMap<String, String> params = new LinkedHashMap<>(db.getParameters());
LOG.debug("Database {} properties before removal {}", work.getTargetDatabase(), params);
params.remove(SOURCE_OF_REPLICATION);
db.setParameters(params);
LOG.info("Removed {} property from database {} after successful optimised bootstrap load.", SOURCE_OF_REPLICATION, work.getTargetDatabase());
hiveDb.alterDatabase(work.getTargetDatabase(), db);
LOG.debug("Database {} poperties after removal {}", work.getTargetDatabase(), params);
} catch (HiveException e) {
throw new SemanticException(e);
}
}
});
}
AckWork replLoadAckWork = new AckWork(new Path(work.dumpDirectory, LOAD_ACKNOWLEDGEMENT.toString()), work.getMetricCollector(), listOfPreAckTasks);
Task<AckWork> loadAckWorkTask = TaskFactory.get(replLoadAckWork, conf);
if (childTasks.isEmpty()) {
childTasks.add(loadAckWorkTask);
} else {
DAGTraversal.traverse(childTasks, new AddDependencyToLeaves(Collections.singletonList(loadAckWorkTask)));
}
}
}
use of org.apache.hadoop.hive.metastore.HiveMetaStoreClient in project hive by apache.
the class TestTxnCommands method prepareParallelTest.
private IMetaStoreClient prepareParallelTest(String tableName, int val) throws Exception, MetaException, TException, NoSuchObjectException {
hiveConf.setBoolean("hive.stats.autogather", true);
hiveConf.setBoolean("hive.stats.column.autogather", true);
// Need to close the thread local Hive object so that configuration change is reflected to HMS.
Hive.closeCurrent();
runStatementOnDriver("drop table if exists " + tableName);
runStatementOnDriver(String.format("create table %s (a int) stored as orc " + "TBLPROPERTIES ('transactional'='true', 'transactional_properties'='insert_only')", tableName));
runStatementOnDriver(String.format("insert into %s (a) values (" + val + ")", tableName));
runStatementOnDriver(String.format("insert into %s (a) values (" + val + ")", tableName));
IMetaStoreClient msClient = new HiveMetaStoreClient(hiveConf);
// Stats should be valid after serial inserts.
List<ColumnStatisticsObj> stats = getTxnTableStats(msClient, tableName);
Assert.assertEquals(1, stats.size());
return msClient;
}
use of org.apache.hadoop.hive.metastore.HiveMetaStoreClient in project hive by apache.
the class TestTxnCommands method testMmExim.
@Test
public void testMmExim() throws Exception {
String tableName = "mm_table", importName = tableName + "_import";
runStatementOnDriver("drop table if exists " + tableName);
runStatementOnDriver(String.format("create table %s (a int, b int) stored as orc " + "TBLPROPERTIES ('transactional'='true', 'transactional_properties'='insert_only')", tableName));
// Regular insert: export some MM deltas, then import into a new table.
int[][] rows1 = { { 1, 2 }, { 3, 4 } };
runStatementOnDriver(String.format("insert into %s (a,b) %s", tableName, makeValuesClause(rows1)));
runStatementOnDriver(String.format("insert into %s (a,b) %s", tableName, makeValuesClause(rows1)));
IMetaStoreClient msClient = new HiveMetaStoreClient(hiveConf);
org.apache.hadoop.hive.metastore.api.Table table = msClient.getTable("default", tableName);
FileSystem fs = FileSystem.get(hiveConf);
Path exportPath = new Path(table.getSd().getLocation() + "_export");
fs.delete(exportPath, true);
runStatementOnDriver(String.format("export table %s to '%s'", tableName, exportPath));
List<String> paths = listPathsRecursive(fs, exportPath);
verifyMmExportPaths(paths, 2);
runStatementOnDriver(String.format("import table %s from '%s'", importName, exportPath));
org.apache.hadoop.hive.metastore.api.Table imported = msClient.getTable("default", importName);
Assert.assertEquals(imported.toString(), "insert_only", imported.getParameters().get("transactional_properties"));
Path importPath = new Path(imported.getSd().getLocation());
FileStatus[] stat = fs.listStatus(importPath, AcidUtils.hiddenFileFilter);
Assert.assertEquals(Arrays.toString(stat), 1, stat.length);
assertIsDelta(stat[0]);
List<String> allData = stringifyValues(rows1);
allData.addAll(stringifyValues(rows1));
allData.sort(null);
Collections.sort(allData);
List<String> rs = runStatementOnDriver(String.format("select a,b from %s order by a,b", importName));
Assert.assertEquals("After import: " + rs, allData, rs);
runStatementOnDriver("drop table if exists " + importName);
// Do insert overwrite to create some invalid deltas, and import into a non-MM table.
int[][] rows2 = { { 5, 6 }, { 7, 8 } };
runStatementOnDriver(String.format("insert overwrite table %s %s", tableName, makeValuesClause(rows2)));
fs.delete(exportPath, true);
runStatementOnDriver(String.format("export table %s to '%s'", tableName, exportPath));
paths = listPathsRecursive(fs, exportPath);
verifyMmExportPaths(paths, 1);
runStatementOnDriver(String.format("create table %s (a int, b int) stored as orc " + "TBLPROPERTIES ('transactional'='false')", importName));
runStatementOnDriver(String.format("import table %s from '%s'", importName, exportPath));
imported = msClient.getTable("default", importName);
Assert.assertNull(imported.toString(), imported.getParameters().get("transactional"));
Assert.assertNull(imported.toString(), imported.getParameters().get("transactional_properties"));
importPath = new Path(imported.getSd().getLocation());
stat = fs.listStatus(importPath, AcidUtils.hiddenFileFilter);
allData = stringifyValues(rows2);
Collections.sort(allData);
rs = runStatementOnDriver(String.format("select a,b from %s order by a,b", importName));
Assert.assertEquals("After import: " + rs, allData, rs);
runStatementOnDriver("drop table if exists " + importName);
runStatementOnDriver("drop table if exists " + tableName);
msClient.close();
}
use of org.apache.hadoop.hive.metastore.HiveMetaStoreClient in project hive by apache.
the class TestTxnCommands method truncateTableAdvancingWriteId.
@Test
public void truncateTableAdvancingWriteId() throws Exception {
runStatementOnDriver("create database IF NOT EXISTS trunc_db");
String tableName = "trunc_db.trunc_table";
IMetaStoreClient msClient = new HiveMetaStoreClient(hiveConf);
runStatementOnDriver(String.format("CREATE TABLE %s (f1 string) PARTITIONED BY (ds STRING) " + "TBLPROPERTIES ('transactional'='true', 'transactional_properties'='insert_only')", tableName));
String validWriteIds = msClient.getValidWriteIds(tableName).toString();
LOG.info("ValidWriteIds before truncate table::" + validWriteIds);
Assert.assertEquals("trunc_db.trunc_table:0:9223372036854775807::", validWriteIds);
runStatementOnDriver("TRUNCATE TABLE trunc_db.trunc_table");
validWriteIds = msClient.getValidWriteIds(tableName).toString();
LOG.info("ValidWriteIds after truncate table::" + validWriteIds);
Assert.assertEquals("trunc_db.trunc_table:1:9223372036854775807::", validWriteIds);
}
use of org.apache.hadoop.hive.metastore.HiveMetaStoreClient in project hive by apache.
the class TestHiveMetaStoreClientApiArgumentsChecker method setUp.
@Before
public void setUp() throws Exception {
client = new TestHiveMetaStoreClient(new HiveConf(Hive.class));
hive = Hive.get(client);
hive.getConf().set(MetastoreConf.ConfVars.FS_HANDLER_THREADS_COUNT.getVarname(), "15");
hive.getConf().set(MetastoreConf.ConfVars.MSCK_PATH_VALIDATION.getVarname(), "throw");
msc = new HiveMetaStoreClient(hive.getConf());
hive.getConf().setVar(HiveConf.ConfVars.HIVE_AUTHORIZATION_MANAGER, "org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactory");
HiveConf.setBoolVar(hive.getConf(), HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY, false);
hive.getConf().set(ValidTxnList.VALID_TXNS_KEY, "1:");
hive.getConf().set(ValidWriteIdList.VALID_WRITEIDS_KEY, TABLE_NAME + ":1:");
hive.getConf().setVar(HiveConf.ConfVars.HIVE_TXN_MANAGER, "org.apache.hadoop.hive.ql.lockmgr.TestTxnManager");
SessionState.start(hive.getConf());
SessionState.get().initTxnMgr(hive.getConf());
Context ctx = new Context(hive.getConf());
SessionState.get().getTxnMgr().openTxn(ctx, USER_NAME);
t = new Table();
org.apache.hadoop.hive.metastore.api.Table tTable = new org.apache.hadoop.hive.metastore.api.Table();
tTable.setId(Long.MAX_VALUE);
t.setTTable(tTable);
Map<String, String> parameters = new HashMap<>();
parameters.put(hive_metastoreConstants.TABLE_IS_TRANSACTIONAL, "true");
tTable.setParameters(parameters);
tTable.setTableType(TableType.MANAGED_TABLE.toString());
t.setTableName(TABLE_NAME);
t.setDbName(DB_NAME);
List<FieldSchema> partCols = new ArrayList<>();
partCols.add(new FieldSchema());
t.setPartCols(partCols);
}
Aggregations