Search in sources :

Example 6 with Event

use of org.apache.hadoop.hdfs.inotify.Event in project SSM by Intel-bigdata.

the class EventBatchSerializer method serialize.

//Code copy from PBHelperClient.java
public static byte[] serialize(EventBatch eventBatch) {
    List<InotifyProtos.EventProto> events = Lists.newArrayList();
    for (Event e : eventBatch.getEvents()) {
        switch(e.getEventType()) {
            case CLOSE:
                Event.CloseEvent ce = (Event.CloseEvent) e;
                events.add(InotifyProtos.EventProto.newBuilder().setType(InotifyProtos.EventType.EVENT_CLOSE).setContents(InotifyProtos.CloseEventProto.newBuilder().setPath(ce.getPath()).setFileSize(ce.getFileSize()).setTimestamp(ce.getTimestamp()).build().toByteString()).build());
                break;
            case CREATE:
                Event.CreateEvent ce2 = (Event.CreateEvent) e;
                events.add(InotifyProtos.EventProto.newBuilder().setType(InotifyProtos.EventType.EVENT_CREATE).setContents(InotifyProtos.CreateEventProto.newBuilder().setType(createTypeConvert(ce2.getiNodeType())).setPath(ce2.getPath()).setCtime(ce2.getCtime()).setOwnerName(ce2.getOwnerName()).setGroupName(ce2.getGroupName()).setPerms(convert(ce2.getPerms())).setReplication(ce2.getReplication()).setSymlinkTarget(ce2.getSymlinkTarget() == null ? "" : ce2.getSymlinkTarget()).setDefaultBlockSize(ce2.getDefaultBlockSize()).setOverwrite(ce2.getOverwrite()).build().toByteString()).build());
                break;
            case METADATA:
                Event.MetadataUpdateEvent me = (Event.MetadataUpdateEvent) e;
                InotifyProtos.MetadataUpdateEventProto.Builder metaB = InotifyProtos.MetadataUpdateEventProto.newBuilder().setPath(me.getPath()).setType(metadataUpdateTypeConvert(me.getMetadataType())).setMtime(me.getMtime()).setAtime(me.getAtime()).setReplication(me.getReplication()).setOwnerName(me.getOwnerName() == null ? "" : me.getOwnerName()).setGroupName(me.getGroupName() == null ? "" : me.getGroupName()).addAllAcls(me.getAcls() == null ? Lists.<AclProtos.AclEntryProto>newArrayList() : convertAclEntryProto(me.getAcls())).addAllXAttrs(me.getxAttrs() == null ? Lists.<XAttrProtos.XAttrProto>newArrayList() : convertXAttrProto(me.getxAttrs())).setXAttrsRemoved(me.isxAttrsRemoved());
                if (me.getPerms() != null) {
                    metaB.setPerms(convert(me.getPerms()));
                }
                events.add(InotifyProtos.EventProto.newBuilder().setType(InotifyProtos.EventType.EVENT_METADATA).setContents(metaB.build().toByteString()).build());
                break;
            case RENAME:
                Event.RenameEvent re = (Event.RenameEvent) e;
                events.add(InotifyProtos.EventProto.newBuilder().setType(InotifyProtos.EventType.EVENT_RENAME).setContents(InotifyProtos.RenameEventProto.newBuilder().setSrcPath(re.getSrcPath()).setDestPath(re.getDstPath()).setTimestamp(re.getTimestamp()).build().toByteString()).build());
                break;
            case APPEND:
                Event.AppendEvent re2 = (Event.AppendEvent) e;
                events.add(InotifyProtos.EventProto.newBuilder().setType(InotifyProtos.EventType.EVENT_APPEND).setContents(InotifyProtos.AppendEventProto.newBuilder().setPath(re2.getPath()).setNewBlock(re2.toNewBlock()).build().toByteString()).build());
                break;
            case UNLINK:
                Event.UnlinkEvent ue = (Event.UnlinkEvent) e;
                events.add(InotifyProtos.EventProto.newBuilder().setType(InotifyProtos.EventType.EVENT_UNLINK).setContents(InotifyProtos.UnlinkEventProto.newBuilder().setPath(ue.getPath()).setTimestamp(ue.getTimestamp()).build().toByteString()).build());
                break;
            /*
        case TRUNCATE:
          Event.TruncateEvent te = (Event.TruncateEvent) e;
          events.add(InotifyProtos.EventProto.newBuilder()
            .setType(InotifyProtos.EventType.EVENT_TRUNCATE)
            .setContents(
              InotifyProtos.TruncateEventProto.newBuilder()
                .setPath(te.getPath())
                .setFileSize(te.getFileSize())
                .setTimestamp(te.getTimestamp()).build().toByteString()
            ).build());
          break;
          */
            default:
                throw new RuntimeException("Unexpected inotify event: " + e);
        }
    }
    return InotifyProtos.EventBatchProto.newBuilder().setTxid(eventBatch.getTxid()).addAllEvents(events).build().toByteArray();
}
Also used : XAttrProtos(org.apache.hadoop.hdfs.protocol.proto.XAttrProtos) AclEntryProto(org.apache.hadoop.hdfs.protocol.proto.AclProtos.AclEntryProto) Event(org.apache.hadoop.hdfs.inotify.Event)

Example 7 with Event

use of org.apache.hadoop.hdfs.inotify.Event in project SSM by Intel-bigdata.

the class TestInotifyFetcher method testFetcher.

@Test(timeout = 60000)
public void testFetcher() throws IOException, InterruptedException {
    Configuration conf = new HdfsConfiguration();
    conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
    conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
    // so that we can get an atime change
    conf.setLong(DFSConfigKeys.DFS_NAMENODE_ACCESSTIME_PRECISION_KEY, 1);
    MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf);
    builder.numDataNodes(2);
    MiniDFSCluster cluster = builder.build();
    try {
        cluster.waitActive();
        DFSClient client = new DFSClient(cluster.getNameNode(0).getNameNodeAddress(), conf);
        FileSystem fs = cluster.getFileSystem(0);
        DFSTestUtil.createFile(fs, new Path("/file"), BLOCK_SIZE, (short) 1, 0L);
        DFSTestUtil.createFile(fs, new Path("/file3"), BLOCK_SIZE, (short) 1, 0L);
        DFSTestUtil.createFile(fs, new Path("/file5"), BLOCK_SIZE, (short) 1, 0L);
        DFSTestUtil.createFile(fs, new Path("/truncate_file"), BLOCK_SIZE * 2, (short) 1, 0L);
        fs.mkdirs(new Path("/tmp"), new FsPermission("777"));
        DBAdapter adapter = mock(DBAdapter.class);
        EventApplierForTest applierForTest = new EventApplierForTest(adapter, client);
        final InotifyEventFetcher fetcher = new InotifyEventFetcher(client, adapter, Executors.newScheduledThreadPool(2), applierForTest);
        Thread thread = new Thread() {

            public void run() {
                try {
                    fetcher.start();
                } catch (IOException | InterruptedException e) {
                    e.printStackTrace();
                }
            }
        };
        thread.start();
        Thread.sleep(2000);
        /**
       * Code copy from {@link org.apache.hadoop.hdfs.TestDFSInotifyEventInputStream}
       */
        // RenameOp -> RenameEvent
        client.rename("/file", "/file4", null);
        // RenameOldOp -> RenameEvent
        client.rename("/file4", "/file2");
        // DeleteOp, AddOp -> UnlinkEvent, CreateEvent
        OutputStream os = client.create("/file2", true, (short) 2, BLOCK_SIZE);
        os.write(new byte[BLOCK_SIZE]);
        // CloseOp -> CloseEvent
        os.close();
        // AddOp -> AppendEvent
        os = client.append("/file2", BLOCK_SIZE, EnumSet.of(CreateFlag.APPEND), null, null);
        os.write(new byte[BLOCK_SIZE]);
        // CloseOp -> CloseEvent
        os.close();
        // so that the atime will get updated on the next line
        Thread.sleep(10);
        // TimesOp -> MetadataUpdateEvent
        client.open("/file2").read(new byte[1]);
        // SetReplicationOp -> MetadataUpdateEvent
        client.setReplication("/file2", (short) 1);
        // ConcatDeleteOp -> AppendEvent, UnlinkEvent, CloseEvent
        client.concat("/file2", new String[] { "/file3" });
        // DeleteOp -> UnlinkEvent
        client.delete("/file2", false);
        // MkdirOp -> CreateEvent
        client.mkdirs("/dir", null, false);
        // SetPermissionsOp -> MetadataUpdateEvent
        client.setPermission("/dir", FsPermission.valueOf("-rw-rw-rw-"));
        // SetOwnerOp -> MetadataUpdateEvent
        Thread.sleep(2000);
        client.setOwner("/dir", "username", "groupname");
        // SymlinkOp -> CreateEvent
        client.createSymlink("/dir", "/dir2", false);
        client.setXAttr("/file5", "user.field", "value".getBytes(), EnumSet.of(// SetXAttrOp -> MetadataUpdateEvent
        XAttrSetFlag.CREATE));
        // RemoveXAttrOp -> MetadataUpdateEvent
        client.removeXAttr("/file5", "user.field");
        // SetAclOp -> MetadataUpdateEvent
        client.setAcl("/file5", AclEntry.parseAclSpec("user::rwx,user:foo:rw-,group::r--,other::---", true));
        // SetAclOp -> MetadataUpdateEvent
        client.removeAcl("/file5");
        // RenameOldOp -> RenameEvent
        client.rename("/file5", "/dir");
        //TruncateOp -> TruncateEvent
        client.truncate("/truncate_file", BLOCK_SIZE);
        while (applierForTest.getEvents().size() != 21) {
            Thread.sleep(100);
        }
        /**
       * Refer {@link org.apache.hadoop.hdfs.TestDFSInotifyEventInputStream} for more detail
       */
        List<Event> events = applierForTest.getEvents();
        Assert.assertTrue(events.get(0).getEventType() == Event.EventType.RENAME);
        Assert.assertTrue(events.get(1).getEventType() == Event.EventType.RENAME);
        Assert.assertTrue(events.get(2).getEventType() == Event.EventType.CREATE);
        Assert.assertTrue(events.get(3).getEventType() == Event.EventType.CLOSE);
        Assert.assertTrue(events.get(4).getEventType() == Event.EventType.APPEND);
        Assert.assertTrue(events.get(5).getEventType() == Event.EventType.CLOSE);
        Assert.assertTrue(events.get(6).getEventType() == Event.EventType.METADATA);
        Assert.assertTrue(events.get(7).getEventType() == Event.EventType.METADATA);
        Assert.assertTrue(events.get(8).getEventType() == Event.EventType.APPEND);
        Assert.assertTrue(events.get(9).getEventType() == Event.EventType.UNLINK);
        Assert.assertTrue(events.get(10).getEventType() == Event.EventType.CLOSE);
        Assert.assertTrue(events.get(11).getEventType() == Event.EventType.UNLINK);
        Assert.assertTrue(events.get(12).getEventType() == Event.EventType.CREATE);
        Assert.assertTrue(events.get(13).getEventType() == Event.EventType.METADATA);
        Assert.assertTrue(events.get(14).getEventType() == Event.EventType.METADATA);
        Assert.assertTrue(events.get(15).getEventType() == Event.EventType.CREATE);
        Assert.assertTrue(events.get(16).getEventType() == Event.EventType.METADATA);
        Assert.assertTrue(events.get(17).getEventType() == Event.EventType.METADATA);
        Assert.assertTrue(events.get(18).getEventType() == Event.EventType.METADATA);
        Assert.assertTrue(events.get(19).getEventType() == Event.EventType.METADATA);
        Assert.assertTrue(events.get(20).getEventType() == Event.EventType.RENAME);
        //      Assert.assertTrue(events.get(21).getEventType() == Event.EventType.TRUNCATE);
        fetcher.stop();
    } finally {
        cluster.shutdown();
    }
}
Also used : DFSClient(org.apache.hadoop.hdfs.DFSClient) Path(org.apache.hadoop.fs.Path) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) Configuration(org.apache.hadoop.conf.Configuration) OutputStream(java.io.OutputStream) IOException(java.io.IOException) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) DBAdapter(org.smartdata.server.metastore.DBAdapter) FileSystem(org.apache.hadoop.fs.FileSystem) Event(org.apache.hadoop.hdfs.inotify.Event) FsPermission(org.apache.hadoop.fs.permission.FsPermission) Test(org.junit.Test)

Aggregations

Event (org.apache.hadoop.hdfs.inotify.Event)7 ArrayList (java.util.ArrayList)3 FsPermission (org.apache.hadoop.fs.permission.FsPermission)3 EventBatch (org.apache.hadoop.hdfs.inotify.EventBatch)3 Test (org.junit.Test)3 IOException (java.io.IOException)2 DFSClient (org.apache.hadoop.hdfs.DFSClient)2 AclEntryProto (org.apache.hadoop.hdfs.protocol.proto.AclProtos.AclEntryProto)2 DBAdapter (org.smartdata.server.metastore.DBAdapter)2 OutputStream (java.io.OutputStream)1 Connection (java.sql.Connection)1 ResultSet (java.sql.ResultSet)1 Configuration (org.apache.hadoop.conf.Configuration)1 FileSystem (org.apache.hadoop.fs.FileSystem)1 Path (org.apache.hadoop.fs.Path)1 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)1 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)1 EventBatchList (org.apache.hadoop.hdfs.inotify.EventBatchList)1 DatanodeInfoBuilder (org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder)1 HdfsFileStatus (org.apache.hadoop.hdfs.protocol.HdfsFileStatus)1