Search in sources :

Example 11 with DBAdapter

use of org.smartdata.server.metastore.DBAdapter in project SSM by Intel-bigdata.

the class TestNamespaceFetcher method testNamespaceFetcher.

@Test
public void testNamespaceFetcher() throws IOException, InterruptedException, MissingEventsException, SQLException {
    final Configuration conf = new SmartConf();
    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
    final DistributedFileSystem dfs = cluster.getFileSystem();
    dfs.mkdir(new Path("/user"), new FsPermission("777"));
    dfs.create(new Path("/user/user1"));
    dfs.create(new Path("/user/user2"));
    dfs.mkdir(new Path("/tmp"), new FsPermission("777"));
    DFSClient client = dfs.getClient();
    DBAdapter adapter = mock(DBAdapter.class);
    NamespaceFetcher fetcher = new NamespaceFetcher(client, adapter, 100);
    fetcher.startFetch();
    List<String> expected = Arrays.asList("/", "/user", "/user/user1", "/user/user2", "/tmp");
    Thread.sleep(1000);
    verify(adapter).insertFiles(argThat(new FileStatusArgMatcher(expected)));
    fetcher.stop();
    cluster.shutdown();
}
Also used : Path(org.apache.hadoop.fs.Path) DFSClient(org.apache.hadoop.hdfs.DFSClient) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) DBAdapter(org.smartdata.server.metastore.DBAdapter) SmartConf(org.smartdata.conf.SmartConf) FsPermission(org.apache.hadoop.fs.permission.FsPermission) Test(org.junit.Test)

Example 12 with DBAdapter

use of org.smartdata.server.metastore.DBAdapter in project SSM by Intel-bigdata.

the class SmartServer method runSSMDaemons.

/**
   * Bring up all the daemons threads needed.
   *
   * @throws Exception
   */
public void runSSMDaemons() throws Exception {
    String nnRpcAddr = conf.get(SmartConfKeys.DFS_SSM_NAMENODE_RPCSERVER_KEY);
    if (nnRpcAddr == null) {
        throw new IOException("Can not find NameNode RPC server address. " + "Please configure it through '" + SmartConfKeys.DFS_SSM_NAMENODE_RPCSERVER_KEY + "'.");
    }
    namenodeURI = new URI(nnRpcAddr);
    this.fs = (DistributedFileSystem) FileSystem.get(namenodeURI, conf);
    outSSMIdFile = checkAndMarkRunning();
    if (outSSMIdFile == null) {
        // Exit if there is another one running.
        throw new IOException("Another SmartServer is running");
    }
    // Init and start RPC server and REST server
    rpcServer.start();
    httpServer.start();
    DBAdapter dbAdapter = getDBAdapter();
    for (ModuleSequenceProto m : modules) {
        m.init(dbAdapter);
    }
    for (ModuleSequenceProto m : modules) {
        m.start();
    }
    // TODO: for simple here, refine it later
    ssmServiceState = SmartServiceState.ACTIVE;
}
Also used : DBAdapter(org.smartdata.server.metastore.DBAdapter) IOException(java.io.IOException) URI(java.net.URI)

Example 13 with DBAdapter

use of org.smartdata.server.metastore.DBAdapter in project SSM by Intel-bigdata.

the class SmartServer method getDBAdapter.

public DBAdapter getDBAdapter() throws Exception {
    String fileName = "druid.xml";
    URL urlPoolConf = getClass().getResource(fileName);
    if (urlPoolConf != null) {
        LOG.info("Using pool configure file: " + urlPoolConf.getFile());
        Properties p = new Properties();
        p.loadFromXML(getClass().getResourceAsStream(fileName));
        return new DBAdapter(new DruidPool(p));
    } else {
        LOG.info(fileName + " NOT found.");
    }
    // TODO: keep it now for testing, remove it later.
    Connection conn = getDBConnection();
    return new DBAdapter(conn);
}
Also used : DBAdapter(org.smartdata.server.metastore.DBAdapter) Connection(java.sql.Connection) Properties(java.util.Properties) URL(java.net.URL) DruidPool(org.smartdata.server.metastore.DruidPool)

Example 14 with DBAdapter

use of org.smartdata.server.metastore.DBAdapter in project SSM by Intel-bigdata.

the class TestRulesTable method testRuleInsert.

/**
   * Insert rules into table and retrieve them back.
   * @throws Exception
   */
@Test
public void testRuleInsert() throws Exception {
    String dbFile = TestDBUtil.getUniqueDBFilePath();
    Connection conn = null;
    try {
        conn = Util.createSqliteConnection(dbFile);
        Util.initializeDataBase(conn);
        String rule = "file : accessCountX(10m) > 20 \n\n" + "and length() > 3 | cachefile";
        long submitTime = System.currentTimeMillis();
        RuleInfo info1 = new RuleInfo(0, submitTime, rule, RuleState.ACTIVE, 0, 0, 0);
        DBAdapter adapter = new DBAdapter(conn);
        Assert.assertTrue(adapter.insertNewRule(info1));
        RuleInfo info1_1 = adapter.getRuleInfo(info1.getId());
        Assert.assertTrue(info1.equals(info1_1));
        RuleInfo info2 = new RuleInfo(0, submitTime, rule, RuleState.ACTIVE, 0, 0, 0);
        Assert.assertTrue(adapter.insertNewRule(info2));
        RuleInfo info2_1 = adapter.getRuleInfo(info2.getId());
        Assert.assertFalse(info1_1.equals(info2_1));
        List<RuleInfo> infos = adapter.getRuleInfo();
        assert (infos.size() == 2);
    } finally {
        if (conn != null) {
            conn.close();
        }
        File file = new File(dbFile);
        file.deleteOnExit();
    }
}
Also used : DBAdapter(org.smartdata.server.metastore.DBAdapter) Connection(java.sql.Connection) RuleInfo(org.smartdata.common.rule.RuleInfo) File(java.io.File) Test(org.junit.Test)

Example 15 with DBAdapter

use of org.smartdata.server.metastore.DBAdapter in project SSM by Intel-bigdata.

the class TestSqliteDB method testSqliteDBBlankStatements.

@Test
public void testSqliteDBBlankStatements() throws Exception {
    String dbFile = TestDBUtil.getUniqueDBFilePath();
    Connection conn = null;
    try {
        conn = Util.createSqliteConnection(dbFile);
        Util.initializeDataBase(conn);
        DBAdapter adapter = new DBAdapter(conn);
        String[] presqls = new String[] { "INSERT INTO rules (state, rule_text, submit_time, checked_count, " + "commands_generated) VALUES (0, 'file: every 1s \n" + " | " + "accessCount(5s) > 3 | cachefile', 1494903787619, 0, 0);" };
        for (int i = 0; i < presqls.length; i++) {
            String sql = presqls[i];
            adapter.execute(sql);
        }
        String[] sqls = new String[] { "DROP TABLE IF EXISTS 'VIR_ACC_CNT_TAB_1_accessCount_5000';", "CREATE TABLE 'VIR_ACC_CNT_TAB_1_accessCount_5000' " + "AS SELECT * FROM 'blank_access_count_info';", "SELECT fid from 'VIR_ACC_CNT_TAB_1_accessCount_5000';", "SELECT path FROM files WHERE (fid IN (SELECT fid FROM " + "'VIR_ACC_CNT_TAB_1_accessCount_5000' WHERE ((count > 3))));" };
        for (int i = 0; i < sqls.length * 3; i++) {
            int idx = i % sqls.length;
            String sql = sqls[idx];
            adapter.execute(sql);
        }
    } finally {
        if (conn != null) {
            conn.close();
        }
        File file = new File(dbFile);
        file.deleteOnExit();
    }
}
Also used : DBAdapter(org.smartdata.server.metastore.DBAdapter) Connection(java.sql.Connection) File(java.io.File) Test(org.junit.Test)

Aggregations

DBAdapter (org.smartdata.server.metastore.DBAdapter)17 Test (org.junit.Test)11 Connection (java.sql.Connection)5 DBTest (org.smartdata.server.metastore.DBTest)4 File (java.io.File)3 IOException (java.io.IOException)3 FsPermission (org.apache.hadoop.fs.permission.FsPermission)3 DFSClient (org.apache.hadoop.hdfs.DFSClient)3 IDataSet (org.dbunit.dataset.IDataSet)3 XmlDataSet (org.dbunit.dataset.xml.XmlDataSet)3 InputStream (java.io.InputStream)2 ArrayList (java.util.ArrayList)2 Properties (java.util.Properties)2 Configuration (org.apache.hadoop.conf.Configuration)2 Path (org.apache.hadoop.fs.Path)2 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)2 Event (org.apache.hadoop.hdfs.inotify.Event)2 ITable (org.dbunit.dataset.ITable)2 SortedTable (org.dbunit.dataset.SortedTable)2 CommandInfo (org.smartdata.common.command.CommandInfo)2