Search in sources :

Example 16 with DBAdapter

use of org.smartdata.server.metastore.DBAdapter in project SSM by Intel-bigdata.

the class TestInotifyFetcher method testFetcher.

@Test(timeout = 60000)
public void testFetcher() throws IOException, InterruptedException {
    Configuration conf = new HdfsConfiguration();
    conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
    conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
    // so that we can get an atime change
    conf.setLong(DFSConfigKeys.DFS_NAMENODE_ACCESSTIME_PRECISION_KEY, 1);
    MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf);
    builder.numDataNodes(2);
    MiniDFSCluster cluster = builder.build();
    try {
        cluster.waitActive();
        DFSClient client = new DFSClient(cluster.getNameNode(0).getNameNodeAddress(), conf);
        FileSystem fs = cluster.getFileSystem(0);
        DFSTestUtil.createFile(fs, new Path("/file"), BLOCK_SIZE, (short) 1, 0L);
        DFSTestUtil.createFile(fs, new Path("/file3"), BLOCK_SIZE, (short) 1, 0L);
        DFSTestUtil.createFile(fs, new Path("/file5"), BLOCK_SIZE, (short) 1, 0L);
        DFSTestUtil.createFile(fs, new Path("/truncate_file"), BLOCK_SIZE * 2, (short) 1, 0L);
        fs.mkdirs(new Path("/tmp"), new FsPermission("777"));
        DBAdapter adapter = mock(DBAdapter.class);
        EventApplierForTest applierForTest = new EventApplierForTest(adapter, client);
        final InotifyEventFetcher fetcher = new InotifyEventFetcher(client, adapter, Executors.newScheduledThreadPool(2), applierForTest);
        Thread thread = new Thread() {

            public void run() {
                try {
                    fetcher.start();
                } catch (IOException | InterruptedException e) {
                    e.printStackTrace();
                }
            }
        };
        thread.start();
        Thread.sleep(2000);
        /**
       * Code copy from {@link org.apache.hadoop.hdfs.TestDFSInotifyEventInputStream}
       */
        // RenameOp -> RenameEvent
        client.rename("/file", "/file4", null);
        // RenameOldOp -> RenameEvent
        client.rename("/file4", "/file2");
        // DeleteOp, AddOp -> UnlinkEvent, CreateEvent
        OutputStream os = client.create("/file2", true, (short) 2, BLOCK_SIZE);
        os.write(new byte[BLOCK_SIZE]);
        // CloseOp -> CloseEvent
        os.close();
        // AddOp -> AppendEvent
        os = client.append("/file2", BLOCK_SIZE, EnumSet.of(CreateFlag.APPEND), null, null);
        os.write(new byte[BLOCK_SIZE]);
        // CloseOp -> CloseEvent
        os.close();
        // so that the atime will get updated on the next line
        Thread.sleep(10);
        // TimesOp -> MetadataUpdateEvent
        client.open("/file2").read(new byte[1]);
        // SetReplicationOp -> MetadataUpdateEvent
        client.setReplication("/file2", (short) 1);
        // ConcatDeleteOp -> AppendEvent, UnlinkEvent, CloseEvent
        client.concat("/file2", new String[] { "/file3" });
        // DeleteOp -> UnlinkEvent
        client.delete("/file2", false);
        // MkdirOp -> CreateEvent
        client.mkdirs("/dir", null, false);
        // SetPermissionsOp -> MetadataUpdateEvent
        client.setPermission("/dir", FsPermission.valueOf("-rw-rw-rw-"));
        // SetOwnerOp -> MetadataUpdateEvent
        Thread.sleep(2000);
        client.setOwner("/dir", "username", "groupname");
        // SymlinkOp -> CreateEvent
        client.createSymlink("/dir", "/dir2", false);
        client.setXAttr("/file5", "user.field", "value".getBytes(), EnumSet.of(// SetXAttrOp -> MetadataUpdateEvent
        XAttrSetFlag.CREATE));
        // RemoveXAttrOp -> MetadataUpdateEvent
        client.removeXAttr("/file5", "user.field");
        // SetAclOp -> MetadataUpdateEvent
        client.setAcl("/file5", AclEntry.parseAclSpec("user::rwx,user:foo:rw-,group::r--,other::---", true));
        // SetAclOp -> MetadataUpdateEvent
        client.removeAcl("/file5");
        // RenameOldOp -> RenameEvent
        client.rename("/file5", "/dir");
        //TruncateOp -> TruncateEvent
        client.truncate("/truncate_file", BLOCK_SIZE);
        while (applierForTest.getEvents().size() != 21) {
            Thread.sleep(100);
        }
        /**
       * Refer {@link org.apache.hadoop.hdfs.TestDFSInotifyEventInputStream} for more detail
       */
        List<Event> events = applierForTest.getEvents();
        Assert.assertTrue(events.get(0).getEventType() == Event.EventType.RENAME);
        Assert.assertTrue(events.get(1).getEventType() == Event.EventType.RENAME);
        Assert.assertTrue(events.get(2).getEventType() == Event.EventType.CREATE);
        Assert.assertTrue(events.get(3).getEventType() == Event.EventType.CLOSE);
        Assert.assertTrue(events.get(4).getEventType() == Event.EventType.APPEND);
        Assert.assertTrue(events.get(5).getEventType() == Event.EventType.CLOSE);
        Assert.assertTrue(events.get(6).getEventType() == Event.EventType.METADATA);
        Assert.assertTrue(events.get(7).getEventType() == Event.EventType.METADATA);
        Assert.assertTrue(events.get(8).getEventType() == Event.EventType.APPEND);
        Assert.assertTrue(events.get(9).getEventType() == Event.EventType.UNLINK);
        Assert.assertTrue(events.get(10).getEventType() == Event.EventType.CLOSE);
        Assert.assertTrue(events.get(11).getEventType() == Event.EventType.UNLINK);
        Assert.assertTrue(events.get(12).getEventType() == Event.EventType.CREATE);
        Assert.assertTrue(events.get(13).getEventType() == Event.EventType.METADATA);
        Assert.assertTrue(events.get(14).getEventType() == Event.EventType.METADATA);
        Assert.assertTrue(events.get(15).getEventType() == Event.EventType.CREATE);
        Assert.assertTrue(events.get(16).getEventType() == Event.EventType.METADATA);
        Assert.assertTrue(events.get(17).getEventType() == Event.EventType.METADATA);
        Assert.assertTrue(events.get(18).getEventType() == Event.EventType.METADATA);
        Assert.assertTrue(events.get(19).getEventType() == Event.EventType.METADATA);
        Assert.assertTrue(events.get(20).getEventType() == Event.EventType.RENAME);
        //      Assert.assertTrue(events.get(21).getEventType() == Event.EventType.TRUNCATE);
        fetcher.stop();
    } finally {
        cluster.shutdown();
    }
}
Also used : DFSClient(org.apache.hadoop.hdfs.DFSClient) Path(org.apache.hadoop.fs.Path) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) Configuration(org.apache.hadoop.conf.Configuration) OutputStream(java.io.OutputStream) IOException(java.io.IOException) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) DBAdapter(org.smartdata.server.metastore.DBAdapter) FileSystem(org.apache.hadoop.fs.FileSystem) Event(org.apache.hadoop.hdfs.inotify.Event) FsPermission(org.apache.hadoop.fs.permission.FsPermission) Test(org.junit.Test)

Example 17 with DBAdapter

use of org.smartdata.server.metastore.DBAdapter in project SSM by Intel-bigdata.

the class TestRuleManager method init.

@Before
public void init() throws Exception {
    String dbFile = TestDBUtil.getUniqueDBFilePath();
    Connection conn = null;
    try {
        conn = Util.createSqliteConnection(dbFile);
        Util.initializeDataBase(conn);
        dbAdapter = new DBAdapter(conn);
        // TODO: to be fixed
        ruleManager = new RuleManager(null, null, dbAdapter);
    } finally {
        File file = new File(dbFile);
        file.deleteOnExit();
    }
}
Also used : DBAdapter(org.smartdata.server.metastore.DBAdapter) Connection(java.sql.Connection) File(java.io.File) Before(org.junit.Before)

Aggregations

DBAdapter (org.smartdata.server.metastore.DBAdapter)17 Test (org.junit.Test)11 Connection (java.sql.Connection)5 DBTest (org.smartdata.server.metastore.DBTest)4 File (java.io.File)3 IOException (java.io.IOException)3 FsPermission (org.apache.hadoop.fs.permission.FsPermission)3 DFSClient (org.apache.hadoop.hdfs.DFSClient)3 IDataSet (org.dbunit.dataset.IDataSet)3 XmlDataSet (org.dbunit.dataset.xml.XmlDataSet)3 InputStream (java.io.InputStream)2 ArrayList (java.util.ArrayList)2 Properties (java.util.Properties)2 Configuration (org.apache.hadoop.conf.Configuration)2 Path (org.apache.hadoop.fs.Path)2 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)2 Event (org.apache.hadoop.hdfs.inotify.Event)2 ITable (org.dbunit.dataset.ITable)2 SortedTable (org.dbunit.dataset.SortedTable)2 CommandInfo (org.smartdata.common.command.CommandInfo)2