Search in sources :

Example 1 with Event

use of org.apache.hadoop.hdfs.inotify.Event in project nifi by apache.

the class TestGetHDFSEvents method makeSureExpressionLanguageIsWorkingProperlyWithinTheHdfsPathToWatch.

@Test
public void makeSureExpressionLanguageIsWorkingProperlyWithinTheHdfsPathToWatch() throws Exception {
    Event[] events = new Event[] { new Event.CreateEvent.Builder().path("/some/path/1/2/3/t.txt").build(), new Event.CreateEvent.Builder().path("/some/path/1/2/4/t.txt").build(), new Event.CreateEvent.Builder().path("/some/path/1/2/3/.t.txt").build() };
    EventBatch eventBatch = mock(EventBatch.class);
    when(eventBatch.getEvents()).thenReturn(events);
    when(inotifyEventInputStream.poll(1000000L, TimeUnit.MICROSECONDS)).thenReturn(eventBatch);
    when(hdfsAdmin.getInotifyEventStream()).thenReturn(inotifyEventInputStream);
    when(eventBatch.getTxid()).thenReturn(100L);
    GetHDFSEvents processor = new TestableGetHDFSEvents(kerberosProperties, hdfsAdmin);
    TestRunner runner = TestRunners.newTestRunner(processor);
    runner.setProperty(GetHDFSEvents.HDFS_PATH_TO_WATCH, "/some/path/${literal(1)}/${literal(2)}/${literal(3)}/.*.txt");
    runner.setProperty(GetHDFSEvents.EVENT_TYPES, "create");
    runner.setProperty(GetHDFSEvents.IGNORE_HIDDEN_FILES, "true");
    runner.run();
    List<MockFlowFile> successfulFlowFiles = runner.getFlowFilesForRelationship(GetHDFSEvents.REL_SUCCESS);
    assertEquals(1, successfulFlowFiles.size());
    for (MockFlowFile f : successfulFlowFiles) {
        String eventType = f.getAttribute(EventAttributes.EVENT_TYPE);
        assertTrue(eventType.equals("CREATE"));
    }
    verify(eventBatch).getTxid();
    assertEquals("100", runner.getProcessContext().getStateManager().getState(Scope.CLUSTER).get("last.tx.id"));
}
Also used : MockFlowFile(org.apache.nifi.util.MockFlowFile) TestRunner(org.apache.nifi.util.TestRunner) Event(org.apache.hadoop.hdfs.inotify.Event) EventBatch(org.apache.hadoop.hdfs.inotify.EventBatch) Test(org.junit.Test)

Example 2 with Event

use of org.apache.hadoop.hdfs.inotify.Event in project nifi by apache.

the class TestGetHDFSEvents method eventsProcessorShouldProperlyFilterEventTypes.

@Test
public void eventsProcessorShouldProperlyFilterEventTypes() throws Exception {
    Event[] events = getEvents();
    EventBatch eventBatch = mock(EventBatch.class);
    when(eventBatch.getEvents()).thenReturn(events);
    when(inotifyEventInputStream.poll(1000000L, TimeUnit.MICROSECONDS)).thenReturn(eventBatch);
    when(hdfsAdmin.getInotifyEventStream()).thenReturn(inotifyEventInputStream);
    when(eventBatch.getTxid()).thenReturn(100L);
    GetHDFSEvents processor = new TestableGetHDFSEvents(kerberosProperties, hdfsAdmin);
    TestRunner runner = TestRunners.newTestRunner(processor);
    runner.setProperty(GetHDFSEvents.HDFS_PATH_TO_WATCH, "/some/path(/.*)?");
    runner.setProperty(GetHDFSEvents.EVENT_TYPES, "create, metadata");
    runner.run();
    List<MockFlowFile> successfulFlowFiles = runner.getFlowFilesForRelationship(GetHDFSEvents.REL_SUCCESS);
    assertEquals(2, successfulFlowFiles.size());
    List<String> expectedEventTypes = Arrays.asList("CREATE", "METADATA");
    for (MockFlowFile f : successfulFlowFiles) {
        String eventType = f.getAttribute(EventAttributes.EVENT_TYPE);
        assertTrue(expectedEventTypes.contains(eventType));
    }
    verify(eventBatch).getTxid();
    assertEquals("100", runner.getProcessContext().getStateManager().getState(Scope.CLUSTER).get("last.tx.id"));
}
Also used : MockFlowFile(org.apache.nifi.util.MockFlowFile) TestRunner(org.apache.nifi.util.TestRunner) Event(org.apache.hadoop.hdfs.inotify.Event) EventBatch(org.apache.hadoop.hdfs.inotify.EventBatch) Test(org.junit.Test)

Example 3 with Event

use of org.apache.hadoop.hdfs.inotify.Event in project SSM by Intel-bigdata.

the class InotifyEventApplier method apply.

public void apply(List<Event> events) throws IOException, SQLException {
    List<String> statements = new ArrayList<>();
    for (Event event : events) {
        String statement = getSqlStatement(event);
        if (statement != null && !statement.isEmpty()) {
            statements.add(statement);
        }
    }
    this.adapter.execute(statements);
}
Also used : ArrayList(java.util.ArrayList) Event(org.apache.hadoop.hdfs.inotify.Event)

Example 4 with Event

use of org.apache.hadoop.hdfs.inotify.Event in project SSM by Intel-bigdata.

the class TestEventBatchSerializer method testSerializer.

@Test
public void testSerializer() throws InvalidProtocolBufferException {
    Event close = new Event.CloseEvent("/user1", 1024, 0);
    Event create = new Event.CreateEvent.Builder().iNodeType(Event.CreateEvent.INodeType.FILE).ctime(1).defaultBlockSize(1024).groupName("cg1").overwrite(true).ownerName("user1").path("/file1").perms(new FsPermission("777")).replication(3).build();
    Event meta = new Event.MetadataUpdateEvent.Builder().path("/file2").metadataType(Event.MetadataUpdateEvent.MetadataType.OWNER).mtime(2).atime(3).replication(4).ownerName("user2").groupName("cg2").build();
    Event rename = new Event.RenameEvent.Builder().dstPath("/file4").srcPath("/file3").timestamp(5).build();
    Event append = new Event.AppendEvent.Builder().newBlock(true).path("/file5").build();
    Event unlink = new Event.UnlinkEvent.Builder().path("/file6").timestamp(6).build();
    //    Event truncate = new Event.TruncateEvent("/file7", 1024, 16);
    List<Event> events = Arrays.asList(close, create, meta, rename, append, unlink);
    EventBatch batch = new EventBatch(1023, events.toArray(new Event[0]));
    List<String> expected = new ArrayList<>();
    for (Event event : events) {
        expected.add(event.toString());
    }
    byte[] bytes = EventBatchSerializer.serialize(batch);
    EventBatch result = EventBatchSerializer.deserialize(bytes);
    List<String> actual = new ArrayList<>();
    for (Event event : result.getEvents()) {
        actual.add(event.toString());
    }
    Assert.assertEquals(batch.getTxid(), result.getTxid());
    Assert.assertEquals(expected.size(), actual.size());
//    Assert.assertTrue(expected.containsAll(actual));
}
Also used : ArrayList(java.util.ArrayList) Event(org.apache.hadoop.hdfs.inotify.Event) FsPermission(org.apache.hadoop.fs.permission.FsPermission) EventBatch(org.apache.hadoop.hdfs.inotify.EventBatch) Test(org.junit.Test)

Example 5 with Event

use of org.apache.hadoop.hdfs.inotify.Event in project SSM by Intel-bigdata.

the class TestInotifyEventApplier method testApplier.

@Test
public void testApplier() throws Exception {
    DFSClient client = mock(DFSClient.class);
    Connection connection = databaseTester.getConnection().getConnection();
    Util.initializeDataBase(connection);
    DBAdapter adapter = new DBAdapter(connection);
    InotifyEventApplier applier = new InotifyEventApplier(adapter, client);
    Event.CreateEvent createEvent = new Event.CreateEvent.Builder().iNodeType(Event.CreateEvent.INodeType.FILE).ctime(1).defaultBlockSize(1024).groupName("cg1").overwrite(true).ownerName("user1").path("/file").perms(new FsPermission("777")).replication(3).build();
    HdfsFileStatus status1 = new HdfsFileStatus(0, false, 1, 1024, 0, 0, new FsPermission((short) 777), "owner", "group", new byte[0], new byte[0], 1010, 0, null, (byte) 0);
    when(client.getFileInfo(anyString())).thenReturn(status1);
    applier.apply(new Event[] { createEvent });
    ResultSet result1 = adapter.executeQuery("SELECT * FROM files");
    Assert.assertEquals(result1.getString("path"), "/file");
    Assert.assertEquals(result1.getLong("fid"), 1010L);
    Assert.assertEquals(result1.getShort("permission"), 511);
    Event close = new Event.CloseEvent("/file", 1024, 0);
    applier.apply(new Event[] { close });
    ResultSet result2 = adapter.executeQuery("SELECT * FROM files");
    Assert.assertEquals(result2.getLong("length"), 1024);
    Assert.assertEquals(result2.getLong("modification_time"), 0L);
    //    Event truncate = new Event.TruncateEvent("/file", 512, 16);
    //    applier.apply(new Event[] {truncate});
    //    ResultSet result3 = adapter.executeQuery("SELECT * FROM files");
    //    Assert.assertEquals(result3.getLong("length"), 512);
    //    Assert.assertEquals(result3.getLong("modification_time"), 16L);
    Event meta = new Event.MetadataUpdateEvent.Builder().path("/file").metadataType(Event.MetadataUpdateEvent.MetadataType.TIMES).mtime(2).atime(3).replication(4).ownerName("user2").groupName("cg2").build();
    applier.apply(new Event[] { meta });
    ResultSet result4 = adapter.executeQuery("SELECT * FROM files");
    Assert.assertEquals(result4.getLong("access_time"), 3);
    Assert.assertEquals(result4.getLong("modification_time"), 2);
    Event.CreateEvent createEvent2 = new Event.CreateEvent.Builder().iNodeType(Event.CreateEvent.INodeType.DIRECTORY).ctime(1).groupName("cg1").overwrite(true).ownerName("user1").path("/dir").perms(new FsPermission("777")).replication(3).build();
    Event.CreateEvent createEvent3 = new Event.CreateEvent.Builder().iNodeType(Event.CreateEvent.INodeType.FILE).ctime(1).groupName("cg1").overwrite(true).ownerName("user1").path("/dir/file").perms(new FsPermission("777")).replication(3).build();
    Event rename = new Event.RenameEvent.Builder().dstPath("/dir2").srcPath("/dir").timestamp(5).build();
    applier.apply(new Event[] { createEvent2, createEvent3, rename });
    ResultSet result5 = adapter.executeQuery("SELECT * FROM files");
    List<String> expectedPaths = Arrays.asList("/dir2", "/dir2/file", "/file");
    List<String> actualPaths = new ArrayList<>();
    while (result5.next()) {
        actualPaths.add(result5.getString("path"));
    }
    Collections.sort(actualPaths);
    Assert.assertTrue(actualPaths.size() == 3);
    Assert.assertTrue(actualPaths.containsAll(expectedPaths));
    Event unlink = new Event.UnlinkEvent.Builder().path("/").timestamp(6).build();
    applier.apply(new Event[] { unlink });
    ResultSet result6 = adapter.executeQuery("SELECT * FROM files");
    Assert.assertFalse(result6.next());
}
Also used : DFSClient(org.apache.hadoop.hdfs.DFSClient) Connection(java.sql.Connection) ArrayList(java.util.ArrayList) Matchers.anyString(org.mockito.Matchers.anyString) DBAdapter(org.smartdata.server.metastore.DBAdapter) HdfsFileStatus(org.apache.hadoop.hdfs.protocol.HdfsFileStatus) ResultSet(java.sql.ResultSet) Event(org.apache.hadoop.hdfs.inotify.Event) FsPermission(org.apache.hadoop.fs.permission.FsPermission) Test(org.junit.Test) DBTest(org.smartdata.server.metastore.DBTest)

Aggregations

Event (org.apache.hadoop.hdfs.inotify.Event)21 Test (org.junit.Test)12 EventBatch (org.apache.hadoop.hdfs.inotify.EventBatch)10 ArrayList (java.util.ArrayList)9 FsPermission (org.apache.hadoop.fs.permission.FsPermission)7 DFSClient (org.apache.hadoop.hdfs.DFSClient)6 IOException (java.io.IOException)5 MockFlowFile (org.apache.nifi.util.MockFlowFile)4 TestRunner (org.apache.nifi.util.TestRunner)4 OutputStream (java.io.OutputStream)3 HdfsFileStatus (org.apache.hadoop.hdfs.protocol.HdfsFileStatus)3 AlluxioURI (alluxio.AlluxioURI)2 InvalidPathException (alluxio.exception.InvalidPathException)2 LockResource (alluxio.resource.LockResource)2 Callable (java.util.concurrent.Callable)2 AclEntryProto (org.apache.hadoop.hdfs.protocol.proto.AclProtos.AclEntryProto)2 FileInfo (org.smartdata.model.FileInfo)2 Constants (alluxio.Constants)1 SyncInfo (alluxio.SyncInfo)1 ConcurrentHashSet (alluxio.collections.ConcurrentHashSet)1