Search in sources :

Example 1 with MetaStore

use of org.smartdata.metastore.MetaStore in project SSM by Intel-bigdata.

the class TestInotifyFetcher method testFetcher.

@Test(timeout = 60000)
public void testFetcher() throws Exception {
    initDao();
    Configuration conf = new HdfsConfiguration();
    conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
    conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
    // so that we can get an atime change
    conf.setLong(DFSConfigKeys.DFS_NAMENODE_ACCESSTIME_PRECISION_KEY, 1);
    MiniDFSCluster cluster = MiniClusterFactory.get().create(2, conf);
    try {
        cluster.waitActive();
        DFSClient client = new DFSClient(cluster.getNameNode(0).getNameNodeAddress(), conf);
        MetaStore metaStore = new MetaStore(druidPool);
        EventApplierForTest applierForTest = new EventApplierForTest(metaStore, client);
        final InotifyEventFetcher fetcher = new InotifyEventFetcher(client, metaStore, Executors.newScheduledThreadPool(2), applierForTest, new Callable() {

            @Override
            public Object call() throws Exception {
                // Do nothing
                return null;
            }
        });
        Assert.assertFalse(InotifyEventFetcher.canContinueFromLastTxid(client, 1024L));
        FileSystem fs = cluster.getFileSystem(0);
        DFSTestUtil.createFile(fs, new Path("/file"), BLOCK_SIZE, (short) 1, 0L);
        DFSTestUtil.createFile(fs, new Path("/file3"), BLOCK_SIZE, (short) 1, 0L);
        DFSTestUtil.createFile(fs, new Path("/file5"), BLOCK_SIZE, (short) 1, 0L);
        DFSTestUtil.createFile(fs, new Path("/truncate_file"), BLOCK_SIZE * 2, (short) 1, 0L);
        fs.mkdirs(new Path("/tmp"), new FsPermission("777"));
        Thread thread = new Thread() {

            public void run() {
                try {
                    fetcher.start();
                } catch (IOException e) {
                    e.printStackTrace();
                }
            }
        };
        thread.start();
        Thread.sleep(2000);
        /**
         * Code copy from {@link org.apache.hadoop.hdfs.TestDFSInotifyEventInputStream}
         */
        // RenameOp -> RenameEvent
        client.rename("/file", "/file4", null);
        // RenameOldOp -> RenameEvent
        client.rename("/file4", "/file2");
        // DeleteOp, AddOp -> UnlinkEvent, CreateEvent
        OutputStream os = client.create("/file2", true, (short) 2, BLOCK_SIZE);
        os.write(new byte[BLOCK_SIZE]);
        // CloseOp -> CloseEvent
        os.close();
        // AddOp -> AppendEvent
        os = append(client, "/file2", BLOCK_SIZE);
        os.write(new byte[BLOCK_SIZE]);
        // CloseOp -> CloseEvent
        os.close();
        // so that the atime will get updated on the next line
        Thread.sleep(10);
        // TimesOp -> MetadataUpdateEvent
        client.open("/file2").read(new byte[1]);
        // SetReplicationOp -> MetadataUpdateEvent
        client.setReplication("/file2", (short) 1);
        // ConcatDeleteOp -> AppendEvent, UnlinkEvent, CloseEvent
        client.concat("/file2", new String[] { "/file3" });
        // DeleteOp -> UnlinkEvent
        client.delete("/file2", false);
        // MkdirOp -> CreateEvent
        client.mkdirs("/dir", null, false);
        // SetPermissionsOp -> MetadataUpdateEvent
        client.setPermission("/dir", FsPermission.valueOf("-rw-rw-rw-"));
        // SetOwnerOp -> MetadataUpdateEvent
        Thread.sleep(2000);
        client.setOwner("/dir", "username", "groupname");
        // SymlinkOp -> CreateEvent
        client.createSymlink("/dir", "/dir2", false);
        client.setXAttr("/file5", "user.field", "value".getBytes(), EnumSet.of(// SetXAttrOp -> MetadataUpdateEvent
        XAttrSetFlag.CREATE));
        // RemoveXAttrOp -> MetadataUpdateEvent
        client.removeXAttr("/file5", "user.field");
        // SetAclOp -> MetadataUpdateEvent
        client.setAcl("/file5", AclEntry.parseAclSpec("user::rwx,user:foo:rw-,group::r--,other::---", true));
        // SetAclOp -> MetadataUpdateEvent
        client.removeAcl("/file5");
        // RenameOldOp -> RenameEvent
        client.rename("/file5", "/dir");
        while (applierForTest.getEvents().size() != 21) {
            Thread.sleep(100);
        }
        /**
         * Refer {@link org.apache.hadoop.hdfs.TestDFSInotifyEventInputStream} for more detail
         */
        List<Event> events = applierForTest.getEvents();
        Assert.assertTrue(events.get(0).getEventType() == Event.EventType.RENAME);
        Assert.assertTrue(events.get(1).getEventType() == Event.EventType.RENAME);
        Assert.assertTrue(events.get(2).getEventType() == Event.EventType.CREATE);
        Assert.assertTrue(events.get(3).getEventType() == Event.EventType.CLOSE);
        Assert.assertTrue(events.get(4).getEventType() == Event.EventType.APPEND);
        Assert.assertTrue(events.get(5).getEventType() == Event.EventType.CLOSE);
        Assert.assertTrue(events.get(6).getEventType() == Event.EventType.METADATA);
        Assert.assertTrue(events.get(7).getEventType() == Event.EventType.METADATA);
        Assert.assertTrue(events.get(8).getEventType() == Event.EventType.APPEND);
        Assert.assertTrue(events.get(9).getEventType() == Event.EventType.UNLINK);
        Assert.assertTrue(events.get(10).getEventType() == Event.EventType.CLOSE);
        Assert.assertTrue(events.get(11).getEventType() == Event.EventType.UNLINK);
        Assert.assertTrue(events.get(12).getEventType() == Event.EventType.CREATE);
        Assert.assertTrue(events.get(13).getEventType() == Event.EventType.METADATA);
        Assert.assertTrue(events.get(14).getEventType() == Event.EventType.METADATA);
        Assert.assertTrue(events.get(15).getEventType() == Event.EventType.CREATE);
        Assert.assertTrue(events.get(16).getEventType() == Event.EventType.METADATA);
        Assert.assertTrue(events.get(17).getEventType() == Event.EventType.METADATA);
        Assert.assertTrue(events.get(18).getEventType() == Event.EventType.METADATA);
        Assert.assertTrue(events.get(19).getEventType() == Event.EventType.METADATA);
        Assert.assertTrue(events.get(20).getEventType() == Event.EventType.RENAME);
        // Assert.assertTrue(events.get(21).getEventType() == Event.EventType.TRUNCATE);
        fetcher.stop();
        Assert.assertTrue(metaStore.containSystemInfo(SmartConstants.SMART_HDFS_LAST_INOTIFY_TXID));
        Assert.assertTrue(InotifyEventFetcher.canContinueFromLastTxid(client, Long.parseLong(metaStore.getSystemInfoByProperty(SmartConstants.SMART_HDFS_LAST_INOTIFY_TXID).getValue())));
    } finally {
        cluster.shutdown();
        closeDao();
    }
}
Also used : DFSClient(org.apache.hadoop.hdfs.DFSClient) Path(org.apache.hadoop.fs.Path) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) OutputStream(java.io.OutputStream) HdfsDataOutputStream(org.apache.hadoop.hdfs.client.HdfsDataOutputStream) IOException(java.io.IOException) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) Callable(java.util.concurrent.Callable) IOException(java.io.IOException) MetaStore(org.smartdata.metastore.MetaStore) FileSystem(org.apache.hadoop.fs.FileSystem) Event(org.apache.hadoop.hdfs.inotify.Event) FsPermission(org.apache.hadoop.fs.permission.FsPermission) Test(org.junit.Test)

Example 2 with MetaStore

use of org.smartdata.metastore.MetaStore in project SSM by Intel-bigdata.

the class TestNamespaceFetcher method init.

NamespaceFetcher init(MiniDFSCluster cluster, SmartConf conf) throws IOException, InterruptedException, MissingEventsException, MetaStoreException {
    final DistributedFileSystem dfs = cluster.getFileSystem();
    dfs.mkdir(new Path("/user"), new FsPermission("777"));
    dfs.create(new Path("/user/user1"));
    dfs.create(new Path("/user/user2"));
    dfs.mkdir(new Path("/tmp"), new FsPermission("777"));
    DFSClient client = dfs.getClient();
    MetaStore adapter = Mockito.mock(MetaStore.class);
    doAnswer(new Answer<Void>() {

        @Override
        public Void answer(InvocationOnMock invocationOnMock) {
            try {
                Object[] objects = invocationOnMock.getArguments();
                for (FileInfo fileInfo : (FileInfo[]) objects[0]) {
                    pathesInDB.add(fileInfo.getPath());
                }
            } catch (Throwable t) {
                t.printStackTrace();
            }
            return null;
        }
    }).when(adapter).insertFiles(any(FileInfo[].class));
    NamespaceFetcher fetcher;
    if (conf != null) {
        fetcher = new NamespaceFetcher(client, adapter, 100, conf);
    } else {
        fetcher = new NamespaceFetcher(client, adapter, 100);
    }
    return fetcher;
}
Also used : Path(org.apache.hadoop.fs.Path) DFSClient(org.apache.hadoop.hdfs.DFSClient) MetaStore(org.smartdata.metastore.MetaStore) FileInfo(org.smartdata.model.FileInfo) InvocationOnMock(org.mockito.invocation.InvocationOnMock) FsPermission(org.apache.hadoop.fs.permission.FsPermission) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem)

Example 3 with MetaStore

use of org.smartdata.metastore.MetaStore in project SSM by Intel-bigdata.

the class TestDataNodeInfoFetcher method init.

@Before
public void init() throws Exception {
    initDao();
    conf = new SmartConf();
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
    cluster.waitActive();
    dfs = cluster.getFileSystem();
    dfsClient = dfs.getClient();
    scheduledExecutorService = Executors.newScheduledThreadPool(2);
    metaStore = new MetaStore(druidPool);
    fetcher = new DataNodeInfoFetcher(dfsClient, metaStore, scheduledExecutorService, conf);
}
Also used : MetaStore(org.smartdata.metastore.MetaStore) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) SmartConf(org.smartdata.conf.SmartConf) Before(org.junit.Before)

Example 4 with MetaStore

use of org.smartdata.metastore.MetaStore in project SSM by Intel-bigdata.

the class TestCachedListFetcher method init.

@Before
public void init() throws Exception {
    initDao();
    SmartConf conf = new SmartConf();
    initConf(conf);
    fid = 0l;
    cluster = MiniClusterFactory.get().create(5, conf);
    cluster.waitActive();
    dfs = cluster.getFileSystem();
    dfsClient = dfs.getClient();
    smartContext = new SmartContext(conf);
    metaStore = new MetaStore(druidPool);
    cachedListFetcher = new CachedListFetcher(600l, dfsClient, metaStore);
}
Also used : MetaStore(org.smartdata.metastore.MetaStore) SmartContext(org.smartdata.SmartContext) SmartConf(org.smartdata.conf.SmartConf) Before(org.junit.Before)

Example 5 with MetaStore

use of org.smartdata.metastore.MetaStore in project SSM by Intel-bigdata.

the class TestAlluxioNamespaceFetcher method setUp.

@Before
public void setUp() throws Exception {
    mLocalAlluxioCluster = new LocalAlluxioCluster(2);
    mLocalAlluxioCluster.initConfiguration();
    Configuration.set(PropertyKey.WEB_RESOURCES, PathUtils.concatPath(System.getProperty("user.dir"), "src/test/webapp"));
    mLocalAlluxioCluster.start();
    fs = mLocalAlluxioCluster.getClient();
    initDao();
    metaStore = new MetaStore(druidPool);
}
Also used : MetaStore(org.smartdata.metastore.MetaStore) LocalAlluxioCluster(alluxio.master.LocalAlluxioCluster) Before(org.junit.Before)

Aggregations

MetaStore (org.smartdata.metastore.MetaStore)29 Test (org.junit.Test)17 Before (org.junit.Before)7 DBTest (org.smartdata.metastore.DBTest)7 Path (org.apache.hadoop.fs.Path)5 ActionInfo (org.smartdata.model.ActionInfo)5 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)4 SmartConf (org.smartdata.conf.SmartConf)4 TimeGranularity (org.smartdata.metastore.utils.TimeGranularity)4 IOException (java.io.IOException)3 HashMap (java.util.HashMap)3 IDataSet (org.dbunit.dataset.IDataSet)3 XmlDataSet (org.dbunit.dataset.xml.XmlDataSet)3 SmartAdmin (org.smartdata.admin.SmartAdmin)3 CmdletInfo (org.smartdata.model.CmdletInfo)3 LocalAlluxioCluster (alluxio.master.LocalAlluxioCluster)2 ArrayList (java.util.ArrayList)2 FsPermission (org.apache.hadoop.fs.permission.FsPermission)2 DFSClient (org.apache.hadoop.hdfs.DFSClient)2 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)2