Search in sources :

Example 6 with SmartConf

use of org.smartdata.conf.SmartConf in project SSM by Intel-bigdata.

the class TestEmptyMiniSmartCluster method setUp.

@Before
public void setUp() throws Exception {
    conf = new SmartConf();
    initConf(conf);
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).storagesPerDatanode(3).storageTypes(new StorageType[] { StorageType.DISK, StorageType.SSD, StorageType.ARCHIVE }).build();
    Collection<URI> namenodes = DFSUtil.getInternalNsRpcUris(conf);
    List<URI> uriList = new ArrayList<>(namenodes);
    conf.set(DFS_NAMENODE_HTTP_ADDRESS_KEY, uriList.get(0).toString());
    conf.set(SmartConfKeys.DFS_SSM_NAMENODE_RPCSERVER_KEY, uriList.get(0).toString());
    // Set db used
    dbFile = TestDBUtil.getUniqueEmptySqliteDBFile();
    dbUrl = Util.SQLITE_URL_PREFIX + dbFile;
    conf.set(SmartConfKeys.DFS_SSM_DEFAULT_DB_URL_KEY, dbUrl);
    // rpcServer start in SmartServer
    ssm = SmartServer.createSSM(null, conf);
}
Also used : MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) ArrayList(java.util.ArrayList) SmartConf(org.smartdata.conf.SmartConf) URI(java.net.URI) Before(org.junit.Before)

Example 7 with SmartConf

use of org.smartdata.conf.SmartConf in project SSM by Intel-bigdata.

the class TestSmartAdmin method test.

@Test
public void test() throws Exception {
    final SmartConf conf = new SmartConf();
    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
    // dfs not used , but datanode.ReplicaNotFoundException throws without dfs
    final DistributedFileSystem dfs = cluster.getFileSystem();
    final Collection<URI> namenodes = DFSUtil.getInternalNsRpcUris(conf);
    List<URI> uriList = new ArrayList<>(namenodes);
    conf.set(DFS_NAMENODE_HTTP_ADDRESS_KEY, uriList.get(0).toString());
    conf.set(SmartConfKeys.DFS_SSM_NAMENODE_RPCSERVER_KEY, uriList.get(0).toString());
    // Set db used
    String dbFile = TestDBUtil.getUniqueEmptySqliteDBFile();
    String dbUrl = Util.SQLITE_URL_PREFIX + dbFile;
    conf.set(SmartConfKeys.DFS_SSM_DEFAULT_DB_URL_KEY, dbUrl);
    // rpcServer start in SmartServer
    SmartServer server = SmartServer.createSSM(null, conf);
    SmartAdmin ssmClient = new SmartAdmin(conf);
    while (true) {
        //test getServiceStatus
        String state = ssmClient.getServiceState().getName();
        if ("ACTIVE".equals(state)) {
            break;
        }
        Thread.sleep(1000);
    }
    //test listRulesInfo and submitRule
    List<RuleInfo> ruleInfos = ssmClient.listRulesInfo();
    int ruleCounts0 = ruleInfos.size();
    long ruleId = ssmClient.submitRule("file: every 5s | path matches \"/foo*\"| cachefile", RuleState.DRYRUN);
    ruleInfos = ssmClient.listRulesInfo();
    int ruleCounts1 = ruleInfos.size();
    assertEquals(1, ruleCounts1 - ruleCounts0);
    //test checkRule
    //if success ,no Exception throw
    ssmClient.checkRule("file: every 5s | path matches \"/foo*\"| cachefile");
    boolean caughtException = false;
    try {
        ssmClient.checkRule("file.path");
    } catch (IOException e) {
        caughtException = true;
    }
    assertTrue(caughtException);
    //test getRuleInfo
    RuleInfo ruleInfo = ssmClient.getRuleInfo(ruleId);
    assertNotEquals(null, ruleInfo);
    //test disableRule
    ssmClient.disableRule(ruleId, true);
    assertEquals(RuleState.DISABLED, ssmClient.getRuleInfo(ruleId).getState());
    //test activateRule
    ssmClient.activateRule(ruleId);
    assertEquals(RuleState.ACTIVE, ssmClient.getRuleInfo(ruleId).getState());
    //test deleteRule
    ssmClient.deleteRule(ruleId, true);
    assertEquals(RuleState.DELETED, ssmClient.getRuleInfo(ruleId).getState());
    //test single SSM
    caughtException = false;
    try {
        conf.set(SmartConfKeys.DFS_SSM_RPC_ADDRESS_KEY, "localhost:8043");
        SmartServer.createSSM(null, conf);
    } catch (IOException e) {
        assertEquals("java.io.IOException: Another SmartServer is running", e.toString());
        caughtException = true;
    }
    assertTrue(caughtException);
    //test client close
    caughtException = false;
    ssmClient.close();
    try {
        ssmClient.getRuleInfo(ruleId);
    } catch (IOException e) {
        caughtException = true;
    }
    assertEquals(true, caughtException);
    server.shutdown();
    cluster.shutdown();
}
Also used : MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) SmartAdmin(org.smartdata.admin.SmartAdmin) ArrayList(java.util.ArrayList) IOException(java.io.IOException) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) URI(java.net.URI) SmartConf(org.smartdata.conf.SmartConf) RuleInfo(org.smartdata.common.rule.RuleInfo) Test(org.junit.Test)

Example 8 with SmartConf

use of org.smartdata.conf.SmartConf in project SSM by Intel-bigdata.

the class TestSmartServerCli method testConfNameNodeRPCAddr.

@Test
public void testConfNameNodeRPCAddr() throws Exception {
    SmartConf conf = new SmartConf();
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
    Collection<URI> namenodes = DFSUtil.getInternalNsRpcUris(conf);
    List<URI> uriList = new ArrayList<>(namenodes);
    conf.set(DFS_NAMENODE_HTTP_ADDRESS_KEY, uriList.get(0).toString());
    // Set db used
    String dbFile = TestDBUtil.getUniqueEmptySqliteDBFile();
    String dbUrl = Util.SQLITE_URL_PREFIX + dbFile;
    conf.set(SmartConfKeys.DFS_SSM_DEFAULT_DB_URL_KEY, dbUrl);
    // rpcServer start in SmartServer
    try {
        SmartServer.createSSM(null, conf);
        Assert.fail("Should not work without specifying " + SmartConfKeys.DFS_SSM_NAMENODE_RPCSERVER_KEY);
    } catch (Exception e) {
        Assert.assertTrue(e.getMessage().contains(SmartConfKeys.DFS_SSM_NAMENODE_RPCSERVER_KEY));
    }
    conf.set(SmartConfKeys.DFS_SSM_NAMENODE_RPCSERVER_KEY, uriList.get(0).toString());
    String[] args = new String[] { "-D", SmartConfKeys.DFS_SSM_NAMENODE_RPCSERVER_KEY + "=" + uriList.get(0).toString() };
    SmartServer s = SmartServer.createSSM(args, conf);
    s.shutdown();
    String[] argsHelp = new String[] { "-h" };
    s = SmartServer.createSSM(argsHelp, conf);
    Assert.assertTrue(s == null);
    cluster.shutdown();
}
Also used : MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) ArrayList(java.util.ArrayList) SmartConf(org.smartdata.conf.SmartConf) URI(java.net.URI) Test(org.junit.Test)

Example 9 with SmartConf

use of org.smartdata.conf.SmartConf in project SSM by Intel-bigdata.

the class RuleCommands method newSSMClient.

private static SmartAdmin newSSMClient(Command cmd) throws IOException {
    Configuration conf = cmd.getConf();
    if (conf == null) {
        conf = new SmartConf();
    }
    //System.out.println(conf.get(SmartConfigureKeys.DFS_SSM_RPC_ADDRESS_KEY));
    SmartAdmin client = new SmartAdmin(conf);
    return client;
}
Also used : Configuration(org.apache.hadoop.conf.Configuration) SmartAdmin(org.smartdata.admin.SmartAdmin) SmartConf(org.smartdata.conf.SmartConf)

Aggregations

SmartConf (org.smartdata.conf.SmartConf)9 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)6 URI (java.net.URI)4 ArrayList (java.util.ArrayList)4 Configuration (org.apache.hadoop.conf.Configuration)3 Before (org.junit.Before)3 Test (org.junit.Test)3 IOException (java.io.IOException)2 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)2 SmartAdmin (org.smartdata.admin.SmartAdmin)2 URISyntaxException (java.net.URISyntaxException)1 ParseException (org.apache.commons.cli.ParseException)1 Path (org.apache.hadoop.fs.Path)1 FsPermission (org.apache.hadoop.fs.permission.FsPermission)1 DFSClient (org.apache.hadoop.hdfs.DFSClient)1 AlreadyBeingCreatedException (org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException)1 RemoteException (org.apache.hadoop.ipc.RemoteException)1 SmartContext (org.smartdata.SmartContext)1 RuleInfo (org.smartdata.common.rule.RuleInfo)1 DBAdapter (org.smartdata.server.metastore.DBAdapter)1