use of org.smartdata.metastore.MetaStoreException in project SSM by Intel-bigdata.
the class MetaStoreUtils method getTableSetNum.
public static int getTableSetNum(Connection conn, String[] tableSet) throws MetaStoreException {
String tables = "('" + StringUtils.join(tableSet, "','") + "')";
try {
String url = conn.getMetaData().getURL();
String query;
if (url.startsWith(MetaStoreUtils.MYSQL_URL_PREFIX)) {
String dbName = getMysqlDBName(url);
query = String.format("SELECT COUNT(*) FROM INFORMATION_SCHEMA.TABLES " + "WHERE TABLE_SCHEMA='%s' AND TABLE_NAME IN %s", dbName, tables);
} else if (url.startsWith(MetaStoreUtils.SQLITE_URL_PREFIX)) {
query = String.format("SELECT COUNT(*) FROM sqlite_master " + "WHERE TYPE='table' AND NAME IN %s", tables);
} else {
throw new MetaStoreException("The jdbc url is not valid for SSM use.");
}
int num = 0;
Statement s = conn.createStatement();
ResultSet rs = s.executeQuery(query);
if (rs.next()) {
num = rs.getInt(1);
}
return num;
} catch (Exception e) {
throw new MetaStoreException(e);
} finally {
closeConnection(conn);
}
}
use of org.smartdata.metastore.MetaStoreException in project SSM by Intel-bigdata.
the class CmdletManager method updateStorageIfNeeded.
// Todo: remove this implementation
private void updateStorageIfNeeded(ActionInfo info) throws ActionException {
SmartAction action = ActionRegistry.createAction(info.getActionName());
if (action instanceof AbstractMoveFileAction) {
String policy = ((AbstractMoveFileAction) action).getStoragePolicy();
Map<String, String> args = info.getArgs();
if (policy == null) {
policy = args.get(AbstractMoveFileAction.STORAGE_POLICY);
}
String path = args.get(AbstractMoveFileAction.FILE_PATH);
try {
String result = info.getResult();
result = result == null ? "" : result;
if (!result.contains("UpdateStoragePolicy=false")) {
metaStore.updateFileStoragePolicy(path, policy);
}
} catch (MetaStoreException e) {
LOG.error("Failed to update storage policy {} for file {}", policy, path, e);
}
}
}
use of org.smartdata.metastore.MetaStoreException in project SSM by Intel-bigdata.
the class CompressionScheduler method onSubmit.
@Override
public boolean onSubmit(CmdletInfo cmdletInfo, ActionInfo actionInfo, int actionIndex) {
String srcPath = actionInfo.getArgs().get(HdfsAction.FILE_PATH);
if (!actions.contains(actionInfo.getActionName())) {
return false;
}
if (fileLock.contains(srcPath)) {
return false;
}
try {
if (actionInfo.getActionName().equals(COMPRESSION_ACTION_ID) && !supportCompression(srcPath)) {
return false;
}
if (actionInfo.getActionName().equals(DECOMPRESSION_ACTION_ID) && !supportDecompression(srcPath)) {
return false;
}
// TODO remove this part
CompressionFileState fileState = new CompressionFileState(srcPath, FileState.FileStage.PROCESSING);
metaStore.insertUpdateFileState(fileState);
return true;
} catch (MetaStoreException e) {
LOG.error("Failed to submit action due to metastore exception!", e);
return false;
} catch (IOException e) {
LOG.error(e.getMessage());
return false;
}
}
use of org.smartdata.metastore.MetaStoreException in project SSM by Intel-bigdata.
the class ErasureCodingScheduler method onSchedule.
@Override
public ScheduleResult onSchedule(CmdletInfo cmdletInfo, ActionInfo actionInfo, LaunchCmdlet cmdlet, LaunchAction action, int actionIndex) {
if (!actions.contains(action.getActionType())) {
return ScheduleResult.SUCCESS;
}
if (actionInfo.getActionName().equals(LIST_EC_ACTION_ID)) {
return ScheduleResult.SUCCESS;
}
String srcPath = actionInfo.getArgs().get(HdfsAction.FILE_PATH);
if (srcPath == null) {
actionInfo.appendLog("No file is given in this action!");
return ScheduleResult.FAIL;
}
if (actionInfo.getActionName().equals(CHECK_EC_ACTION_ID)) {
return ScheduleResult.SUCCESS;
}
try {
// use the default EC policy if an ec action has not been given an EC policy
if (actionInfo.getActionName().equals(EC_ACTION_ID)) {
String ecPolicy = actionInfo.getArgs().get(EC_POLICY);
if (ecPolicy == null || ecPolicy.isEmpty()) {
String defaultEcPolicy = conf.getTrimmed("dfs.namenode.ec.system.default.policy", "RS-6-3-1024k");
actionInfo.getArgs().put(EC_POLICY, defaultEcPolicy);
action.getArgs().put(EC_POLICY, defaultEcPolicy);
}
}
FileInfo fileinfo = metaStore.getFile(srcPath);
if (fileinfo != null && fileinfo.isdir()) {
return ScheduleResult.SUCCESS;
}
// The below code is just for ec or unec action with file as argument, not directory
if (isLimitedByThrottle(srcPath)) {
if (LOG.isDebugEnabled()) {
LOG.debug("Failed to schedule {} due to the limitation of throttle!", actionInfo);
}
return ScheduleResult.RETRY;
}
// For ec or unec, add ecTmp argument
String tmpName = createTmpName(action);
action.getArgs().put(EC_TMP, EC_DIR + tmpName);
actionInfo.getArgs().put(EC_TMP, EC_DIR + tmpName);
} catch (MetaStoreException ex) {
LOG.error("Error occurred for getting file info", ex);
actionInfo.appendLog(ex.getMessage());
return ScheduleResult.FAIL;
}
afterSchedule(actionInfo);
return ScheduleResult.SUCCESS;
}
use of org.smartdata.metastore.MetaStoreException in project SSM by Intel-bigdata.
the class Copy2S3Scheduler method onActionFinished.
@Override
public void onActionFinished(CmdletInfo cmdletInfo, ActionInfo actionInfo, int actionIndex) {
String path = actionInfo.getArgs().get(HdfsAction.FILE_PATH);
if (actionInfo.isFinished() && actionInfo.isSuccessful()) {
// Insert fileState
try {
metaStore.insertUpdateFileState(new S3FileState(path));
} catch (MetaStoreException e) {
LOG.error("Failed to insert file state.", e);
}
}
// unlock filelock
if (ifLocked(path)) {
unLockTheFile(path);
LOG.debug("unlocked copy2s3 file {}", path);
}
}
Aggregations