Search in sources :

Example 31 with MetaStoreException

use of org.smartdata.metastore.MetaStoreException in project SSM by Intel-bigdata.

the class RuleInfoRepo method updateRuleInfo.

public boolean updateRuleInfo(RuleState rs, long lastCheckTime, long checkedCount, int cmdletsGen) throws IOException {
    lockWrite();
    try {
        boolean ret = true;
        changeRuleState(rs, false);
        ruleInfo.updateRuleInfo(rs, lastCheckTime, checkedCount, cmdletsGen);
        if (metaStore != null) {
            try {
                ret = metaStore.updateRuleInfo(ruleInfo.getId(), rs, lastCheckTime, ruleInfo.getNumChecked(), (int) ruleInfo.getNumCmdsGen());
            } catch (MetaStoreException e) {
                throw new IOException(ruleInfo.toString(), e);
            }
        }
        return ret;
    } finally {
        unlockWrite();
    }
}
Also used : MetaStoreException(org.smartdata.metastore.MetaStoreException) IOException(java.io.IOException)

Example 32 with MetaStoreException

use of org.smartdata.metastore.MetaStoreException in project SSM by Intel-bigdata.

the class MetaStoreUtils method executeSql.

public static void executeSql(Connection conn, String sql) throws MetaStoreException {
    try {
        Statement s = conn.createStatement();
        s.execute(sql);
    } catch (Exception e) {
        LOG.error("SQL execution error " + sql);
        throw new MetaStoreException(e);
    }
}
Also used : MetaStoreException(org.smartdata.metastore.MetaStoreException) Statement(java.sql.Statement) InvalidPropertiesFormatException(java.util.InvalidPropertiesFormatException) SQLException(java.sql.SQLException) IOException(java.io.IOException) MetaStoreException(org.smartdata.metastore.MetaStoreException)

Example 33 with MetaStoreException

use of org.smartdata.metastore.MetaStoreException in project SSM by Intel-bigdata.

the class MetaStoreUtils method initializeDataBase.

public static void initializeDataBase(Connection conn) throws MetaStoreException {
    ArrayList<String> tableList = new ArrayList<>();
    for (String table : TABLESET) {
        tableList.add("DROP TABLE IF EXISTS " + table);
    }
    String[] deleteExistingTables = tableList.toArray(new String[tableList.size()]);
    String password = StringUtil.toSHA512String(defaultPassword);
    String[] createEmptyTables = new String[] { "CREATE TABLE access_count_table (\n" + "  table_name varchar(255) PRIMARY KEY,\n" + "  start_time bigint(20) NOT NULL,\n" + "  end_time bigint(20) NOT NULL\n" + ") ;", "CREATE TABLE blank_access_count_info (\n" + "  fid bigint(20) NOT NULL,\n" + "  count bigint(20) NOT NULL\n" + ");", "CREATE TABLE cached_file (\n" + "  fid bigint(20) NOT NULL,\n" + "  path varchar(1000) NOT NULL,\n" + "  from_time bigint(20) NOT NULL,\n" + "  last_access_time bigint(20) NOT NULL,\n" + "  accessed_num int(11) NOT NULL\n" + ");", "CREATE INDEX cached_file_fid_idx ON cached_file (fid);", "CREATE INDEX cached_file_path_idx ON cached_file (path);", "CREATE TABLE ec_policy (\n" + "  id tinyint(1) NOT NULL PRIMARY KEY,\n" + "  policy_name varchar(255) NOT NULL\n" + ");", "CREATE TABLE file (\n" + "  path varchar(1000) NOT NULL,\n" + "  fid bigint(20) NOT NULL,\n" + "  length bigint(20) DEFAULT NULL,\n" + "  block_replication smallint(6) DEFAULT NULL,\n" + "  block_size bigint(20) DEFAULT NULL,\n" + "  modification_time bigint(20) DEFAULT NULL,\n" + "  access_time bigint(20) DEFAULT NULL,\n" + "  is_dir tinyint(1) DEFAULT NULL,\n" + "  sid tinyint(4) DEFAULT NULL,\n" + "  owner varchar(255) DEFAULT NULL,\n" + "  owner_group varchar(255) DEFAULT NULL,\n" + "  permission smallint(6) DEFAULT NULL,\n" + "  ec_policy_id tinyint(1) DEFAULT NULL\n" + ");", "CREATE INDEX file_fid_idx ON file (fid);", "CREATE INDEX file_path_idx ON file (path);", "CREATE TABLE storage (\n" + "  type varchar(32) PRIMARY KEY,\n" + "  time_stamp bigint(20) DEFAULT NULL,\n" + "  capacity bigint(20) NOT NULL,\n" + "  free bigint(20) NOT NULL\n" + ");", // Keep this compatible with Table 'storage'
    "CREATE TABLE storage_hist (\n" + "  type varchar(64),\n" + "  time_stamp bigint(20) DEFAULT NULL,\n" + "  capacity bigint(20) NOT NULL,\n" + "  free bigint(20) NOT NULL\n" + ");", "CREATE INDEX type_idx ON storage_hist (type);", "CREATE INDEX time_stamp_idx ON storage_hist (time_stamp);", "CREATE TABLE storage_policy (\n" + "  sid tinyint(4) PRIMARY KEY,\n" + "  policy_name varchar(64) DEFAULT NULL\n" + ");", "INSERT INTO storage_policy VALUES ('0', 'UNDEF');", "INSERT INTO storage_policy VALUES ('2', 'COLD');", "INSERT INTO storage_policy VALUES ('5', 'WARM');", "INSERT INTO storage_policy VALUES ('7', 'HOT');", "INSERT INTO storage_policy VALUES ('10', 'ONE_SSD');", "INSERT INTO storage_policy VALUES ('12', 'ALL_SSD');", "INSERT INTO storage_policy VALUES ('15', 'LAZY_PERSIST');", "CREATE TABLE xattr (\n" + "  fid bigint(20) NOT NULL,\n" + "  namespace varchar(255) NOT NULL,\n" + "  name varchar(255) NOT NULL,\n" + "  value blob NOT NULL\n" + ");", "CREATE INDEX xattr_fid_idx ON xattr (fid);", "CREATE TABLE datanode_info (\n" + "  uuid varchar(64) PRIMARY KEY,\n" + "  hostname varchar(255) NOT NULL,\n" + // DatanodeInfo
    "  rpcAddress varchar(21) DEFAULT NULL,\n" + "  cache_capacity bigint(20) DEFAULT NULL,\n" + "  cache_used bigint(20) DEFAULT NULL,\n" + "  location varchar(255) DEFAULT NULL\n" + ");", "CREATE TABLE datanode_storage_info (\n" + "  uuid varchar(64) NOT NULL,\n" + "  sid tinyint(4) NOT NULL,\n" + // storage type
    "  state tinyint(4) NOT NULL,\n" + // DatanodeStorage.state
    "  storage_id varchar(64) NOT NULL,\n" + // StorageReport ...
    "  failed tinyint(1) DEFAULT NULL,\n" + "  capacity bigint(20) DEFAULT NULL,\n" + "  dfs_used bigint(20) DEFAULT NULL,\n" + "  remaining bigint(20) DEFAULT NULL,\n" + "  block_pool_used bigint(20) DEFAULT NULL\n" + ");", "CREATE TABLE rule (\n" + "  id INTEGER PRIMARY KEY AUTOINCREMENT,\n" + "  name varchar(255) DEFAULT NULL,\n" + "  state tinyint(4) NOT NULL,\n" + "  rule_text varchar(4096) NOT NULL,\n" + "  submit_time bigint(20) NOT NULL,\n" + "  last_check_time bigint(20) DEFAULT NULL,\n" + "  checked_count int(11) NOT NULL,\n" + "  generated_cmdlets int(11) NOT NULL\n" + ");", "CREATE TABLE cmdlet (\n" + "  cid INTEGER PRIMARY KEY,\n" + "  rid INTEGER NOT NULL,\n" + "  aids varchar(4096) NOT NULL,\n" + "  state tinyint(4) NOT NULL,\n" + "  parameters varchar(4096) NOT NULL,\n" + "  generate_time bigint(20) NOT NULL,\n" + "  state_changed_time bigint(20) NOT NULL\n" + ");", "CREATE TABLE action (\n" + "  aid INTEGER PRIMARY KEY,\n" + "  cid INTEGER NOT NULL,\n" + "  action_name varchar(4096) NOT NULL,\n" + "  args text NOT NULL,\n" + "  result mediumtext NOT NULL,\n" + "  log longtext NOT NULL,\n" + "  successful tinyint(4) NOT NULL,\n" + "  create_time bigint(20) NOT NULL,\n" + "  finished tinyint(4) NOT NULL,\n" + "  finish_time bigint(20) NOT NULL,\n" + "  exec_host varchar(255),\n" + "  progress float NOT NULL\n" + ");", "CREATE TABLE file_diff (\n" + "  did INTEGER PRIMARY KEY AUTOINCREMENT,\n" + "  rid INTEGER NOT NULL,\n" + "  diff_type varchar(4096) NOT NULL,\n" + "  src varchar(1000) NOT NULL,\n" + "  parameters varchar(4096) NOT NULL,\n" + "  state tinyint(4) NOT NULL,\n" + "  create_time bigint(20) NOT NULL\n" + ");", "CREATE INDEX file_diff_idx ON file_diff (src);", "CREATE TABLE global_config (\n" + " cid INTEGER PRIMARY KEY AUTOINCREMENT,\n" + " property_name varchar(512) NOT NULL UNIQUE,\n" + " property_value varchar(3072) NOT NULL\n" + ");", "CREATE TABLE cluster_config (\n" + " cid INTEGER PRIMARY KEY AUTOINCREMENT,\n" + " node_name varchar(512) NOT NULL UNIQUE,\n" + " config_path varchar(3072) NOT NULL\n" + ");", "CREATE TABLE sys_info (\n" + "  property varchar(512) PRIMARY KEY,\n" + "  value varchar(4096) NOT NULL\n" + ");", "CREATE TABLE user_info (\n" + "  user_name varchar(20) PRIMARY KEY,\n" + "  user_password varchar(256) NOT NULL\n" + ");", "INSERT INTO user_info VALUES('admin','" + password + "');", "CREATE TABLE cluster_info (\n" + "  cid INTEGER PRIMARY KEY AUTOINCREMENT,\n" + "  name varchar(512) NOT NULL UNIQUE,\n" + "  url varchar(4096) NOT NULL,\n" + "  conf_path varchar(4096) NOT NULL,\n" + "  state varchar(64) NOT NULL,\n" + // ClusterState
    "  type varchar(64) NOT NULL\n" + // ClusterType
    ");", "CREATE TABLE backup_file (\n" + " rid bigint(20) NOT NULL,\n" + " src varchar(4096) NOT NULL,\n" + " dest varchar(4096) NOT NULL,\n" + " period bigint(20) NOT NULL\n" + ");", "CREATE INDEX backup_file_rid_idx ON backup_file (rid);", "CREATE TABLE file_state (\n" + " path varchar(512) PRIMARY KEY,\n" + " type tinyint(4) NOT NULL,\n" + " stage tinyint(4) NOT NULL\n" + ");", "CREATE TABLE compression_file (\n" + " path varchar(512) PRIMARY KEY,\n" + " buffer_size int(11) NOT NULL,\n" + " compression_impl varchar(64) NOT NULL,\n" + " original_length bigint(20) NOT NULL,\n" + " compressed_length bigint(20) NOT NULL,\n" + " originalPos text NOT NULL,\n" + " compressedPos text NOT NULL\n" + ");", "CREATE TABLE small_file (\n" + "path varchar(1000) NOT NULL PRIMARY KEY,\n" + "container_file_path varchar(4096) NOT NULL,\n" + "offset bigint(20) NOT NULL,\n" + "length bigint(20) NOT NULL\n" + ");", "CREATE TABLE whitelist (\n" + "last_fetched_dirs varchar(4096) NOT NULL\n" + ");", "INSERT INTO whitelist VALUES( '' );" };
    try {
        for (String s : deleteExistingTables) {
            // Drop table if exists
            LOG.debug(s);
            executeSql(conn, s);
        }
        // Handle mysql related features
        String url = conn.getMetaData().getURL();
        boolean mysql = url.startsWith(MetaStoreUtils.MYSQL_URL_PREFIX);
        boolean mysqlOldRelease = false;
        if (mysql) {
            // Mysql version number
            double mysqlVersion = conn.getMetaData().getDatabaseMajorVersion() + conn.getMetaData().getDatabaseMinorVersion() * 0.1;
            LOG.debug("Mysql Version Number {}", mysqlVersion);
            if (mysqlVersion < 5.5) {
                LOG.error("Required Mysql version >= 5.5, but current is " + mysqlVersion);
                throw new MetaStoreException("Mysql version " + mysqlVersion + " is below requirement!");
            } else if (mysqlVersion < 5.7 && mysqlVersion >= 5.5) {
                mysqlOldRelease = true;
            }
        }
        if (mysqlOldRelease) {
            // Enable dynamic file format to avoid index length limit 767
            executeSql(conn, "SET GLOBAL innodb_file_format=barracuda;");
            executeSql(conn, "SET GLOBAL innodb_file_per_table=true;");
            executeSql(conn, "SET GLOBAL innodb_large_prefix = ON;");
        }
        for (String s : createEmptyTables) {
            // Solve mysql and sqlite sql difference
            s = sqlCompatibility(mysql, mysqlOldRelease, s);
            LOG.debug(s);
            executeSql(conn, s);
        }
    } catch (Exception e) {
        throw new MetaStoreException(e);
    }
}
Also used : MetaStoreException(org.smartdata.metastore.MetaStoreException) ArrayList(java.util.ArrayList) InvalidPropertiesFormatException(java.util.InvalidPropertiesFormatException) SQLException(java.sql.SQLException) IOException(java.io.IOException) MetaStoreException(org.smartdata.metastore.MetaStoreException)

Example 34 with MetaStoreException

use of org.smartdata.metastore.MetaStoreException in project SSM by Intel-bigdata.

the class FileStatusIngester method run.

@Override
public void run() {
    FileInfoBatch batch = IngestionTask.pollBatch();
    try {
        if (batch != null) {
            FileInfo[] statuses = batch.getFileInfos();
            if (statuses.length == batch.actualSize()) {
                this.dbAdapter.insertFiles(batch.getFileInfos());
                IngestionTask.numPersisted.addAndGet(statuses.length);
            } else {
                FileInfo[] actual = new FileInfo[batch.actualSize()];
                System.arraycopy(statuses, 0, actual, 0, batch.actualSize());
                this.dbAdapter.insertFiles(actual);
                IngestionTask.numPersisted.addAndGet(actual.length);
            }
            if (LOG.isDebugEnabled()) {
                LOG.debug("Consumer " + id + " " + batch.actualSize() + " files insert into table 'files'.");
            }
        }
    } catch (MetaStoreException e) {
        // TODO: handle this issue
        LOG.error("Consumer {} error", id);
    }
    if (id == 0) {
        long curr = System.currentTimeMillis();
        if (curr - lastUpdateTime >= 5000) {
            long total = IngestionTask.numDirectoriesFetched.get() + IngestionTask.numFilesFetched.get();
            if (total > 0) {
                LOG.info(String.format("%d sec, %d%% persisted into database", (curr - startTime) / 1000, IngestionTask.numPersisted.get() * 100 / total));
            } else {
                LOG.info(String.format("%d sec, 0%% persisted into database", (curr - startTime) / 1000));
            }
            lastUpdateTime = curr;
        }
    }
}
Also used : FileInfoBatch(org.smartdata.model.FileInfoBatch) MetaStoreException(org.smartdata.metastore.MetaStoreException) FileInfo(org.smartdata.model.FileInfo)

Example 35 with MetaStoreException

use of org.smartdata.metastore.MetaStoreException in project SSM by Intel-bigdata.

the class LoginRestApi method postAddUser.

/**
 * Adds new user. Only admin user has the permission.
 *
 * @param userName the new user's name to be added
 * @param password1 the new user's password
 * @param password2 the new user's password for verification.
 * @return
 */
@POST
@Path("adduser")
@ZeppelinApi
public Response postAddUser(@FormParam("adminPassword") String adminPassword, @FormParam("userName") String userName, @FormParam("password1") String password1, @FormParam("password2") String password2) {
    Subject currentUser = org.apache.shiro.SecurityUtils.getSubject();
    if (!password1.equals(password2)) {
        String msg = "Unmatched password typed in two times!";
        LOG.warn(msg);
        return new JsonResponse(Response.Status.BAD_REQUEST, msg, "").build();
    }
    String password = StringUtil.toSHA512String(adminPassword);
    try {
        boolean hasCredential = engine.getCmdletManager().authentic(new UserInfo(SSM_ADMIN, password));
        if (hasCredential && currentUser.isAuthenticated()) {
            engine.getCmdletManager().addNewUser(new UserInfo(userName, password1));
        } else {
            String msg = "The typed admin password is not correct!";
            LOG.warn(msg + " Failed to register new user!");
            return new JsonResponse(Response.Status.FORBIDDEN, msg, "").build();
        }
    } catch (MetaStoreException e) {
        LOG.warn(e.getMessage());
        return new JsonResponse(Response.Status.BAD_REQUEST, e.getMessage(), "").build();
    }
    return new JsonResponse(Response.Status.OK, "", "").build();
}
Also used : MetaStoreException(org.smartdata.metastore.MetaStoreException) UserInfo(org.smartdata.model.UserInfo) Subject(org.apache.shiro.subject.Subject) JsonResponse(org.apache.zeppelin.server.JsonResponse) Path(javax.ws.rs.Path) ZeppelinApi(org.apache.zeppelin.annotation.ZeppelinApi) POST(javax.ws.rs.POST)

Aggregations

MetaStoreException (org.smartdata.metastore.MetaStoreException)35 IOException (java.io.IOException)19 FileInfo (org.smartdata.model.FileInfo)9 ArrayList (java.util.ArrayList)8 SQLException (java.sql.SQLException)7 InvalidPropertiesFormatException (java.util.InvalidPropertiesFormatException)7 HashMap (java.util.HashMap)5 Statement (java.sql.Statement)4 SmartFilePermission (org.smartdata.SmartFilePermission)4 ActionInfo (org.smartdata.model.ActionInfo)4 Gson (com.google.gson.Gson)3 ResultSet (java.sql.ResultSet)3 LinkedHashMap (java.util.LinkedHashMap)3 ConcurrentHashMap (java.util.concurrent.ConcurrentHashMap)3 AtomicLong (java.util.concurrent.atomic.AtomicLong)3 TypeToken (com.google.gson.reflect.TypeToken)2 File (java.io.File)2 ParseException (java.text.ParseException)2 Map (java.util.Map)2 CmdletDescriptor (org.smartdata.model.CmdletDescriptor)2