use of org.smartdata.server.metastore.DBAdapter in project SSM by Intel-bigdata.
the class TestInotifyFetcher method testFetcher.
@Test(timeout = 60000)
public void testFetcher() throws IOException, InterruptedException {
Configuration conf = new HdfsConfiguration();
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
// so that we can get an atime change
conf.setLong(DFSConfigKeys.DFS_NAMENODE_ACCESSTIME_PRECISION_KEY, 1);
MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf);
builder.numDataNodes(2);
MiniDFSCluster cluster = builder.build();
try {
cluster.waitActive();
DFSClient client = new DFSClient(cluster.getNameNode(0).getNameNodeAddress(), conf);
FileSystem fs = cluster.getFileSystem(0);
DFSTestUtil.createFile(fs, new Path("/file"), BLOCK_SIZE, (short) 1, 0L);
DFSTestUtil.createFile(fs, new Path("/file3"), BLOCK_SIZE, (short) 1, 0L);
DFSTestUtil.createFile(fs, new Path("/file5"), BLOCK_SIZE, (short) 1, 0L);
DFSTestUtil.createFile(fs, new Path("/truncate_file"), BLOCK_SIZE * 2, (short) 1, 0L);
fs.mkdirs(new Path("/tmp"), new FsPermission("777"));
DBAdapter adapter = mock(DBAdapter.class);
EventApplierForTest applierForTest = new EventApplierForTest(adapter, client);
final InotifyEventFetcher fetcher = new InotifyEventFetcher(client, adapter, Executors.newScheduledThreadPool(2), applierForTest);
Thread thread = new Thread() {
public void run() {
try {
fetcher.start();
} catch (IOException | InterruptedException e) {
e.printStackTrace();
}
}
};
thread.start();
Thread.sleep(2000);
/**
* Code copy from {@link org.apache.hadoop.hdfs.TestDFSInotifyEventInputStream}
*/
// RenameOp -> RenameEvent
client.rename("/file", "/file4", null);
// RenameOldOp -> RenameEvent
client.rename("/file4", "/file2");
// DeleteOp, AddOp -> UnlinkEvent, CreateEvent
OutputStream os = client.create("/file2", true, (short) 2, BLOCK_SIZE);
os.write(new byte[BLOCK_SIZE]);
// CloseOp -> CloseEvent
os.close();
// AddOp -> AppendEvent
os = client.append("/file2", BLOCK_SIZE, EnumSet.of(CreateFlag.APPEND), null, null);
os.write(new byte[BLOCK_SIZE]);
// CloseOp -> CloseEvent
os.close();
// so that the atime will get updated on the next line
Thread.sleep(10);
// TimesOp -> MetadataUpdateEvent
client.open("/file2").read(new byte[1]);
// SetReplicationOp -> MetadataUpdateEvent
client.setReplication("/file2", (short) 1);
// ConcatDeleteOp -> AppendEvent, UnlinkEvent, CloseEvent
client.concat("/file2", new String[] { "/file3" });
// DeleteOp -> UnlinkEvent
client.delete("/file2", false);
// MkdirOp -> CreateEvent
client.mkdirs("/dir", null, false);
// SetPermissionsOp -> MetadataUpdateEvent
client.setPermission("/dir", FsPermission.valueOf("-rw-rw-rw-"));
// SetOwnerOp -> MetadataUpdateEvent
Thread.sleep(2000);
client.setOwner("/dir", "username", "groupname");
// SymlinkOp -> CreateEvent
client.createSymlink("/dir", "/dir2", false);
client.setXAttr("/file5", "user.field", "value".getBytes(), EnumSet.of(// SetXAttrOp -> MetadataUpdateEvent
XAttrSetFlag.CREATE));
// RemoveXAttrOp -> MetadataUpdateEvent
client.removeXAttr("/file5", "user.field");
// SetAclOp -> MetadataUpdateEvent
client.setAcl("/file5", AclEntry.parseAclSpec("user::rwx,user:foo:rw-,group::r--,other::---", true));
// SetAclOp -> MetadataUpdateEvent
client.removeAcl("/file5");
// RenameOldOp -> RenameEvent
client.rename("/file5", "/dir");
//TruncateOp -> TruncateEvent
client.truncate("/truncate_file", BLOCK_SIZE);
while (applierForTest.getEvents().size() != 21) {
Thread.sleep(100);
}
/**
* Refer {@link org.apache.hadoop.hdfs.TestDFSInotifyEventInputStream} for more detail
*/
List<Event> events = applierForTest.getEvents();
Assert.assertTrue(events.get(0).getEventType() == Event.EventType.RENAME);
Assert.assertTrue(events.get(1).getEventType() == Event.EventType.RENAME);
Assert.assertTrue(events.get(2).getEventType() == Event.EventType.CREATE);
Assert.assertTrue(events.get(3).getEventType() == Event.EventType.CLOSE);
Assert.assertTrue(events.get(4).getEventType() == Event.EventType.APPEND);
Assert.assertTrue(events.get(5).getEventType() == Event.EventType.CLOSE);
Assert.assertTrue(events.get(6).getEventType() == Event.EventType.METADATA);
Assert.assertTrue(events.get(7).getEventType() == Event.EventType.METADATA);
Assert.assertTrue(events.get(8).getEventType() == Event.EventType.APPEND);
Assert.assertTrue(events.get(9).getEventType() == Event.EventType.UNLINK);
Assert.assertTrue(events.get(10).getEventType() == Event.EventType.CLOSE);
Assert.assertTrue(events.get(11).getEventType() == Event.EventType.UNLINK);
Assert.assertTrue(events.get(12).getEventType() == Event.EventType.CREATE);
Assert.assertTrue(events.get(13).getEventType() == Event.EventType.METADATA);
Assert.assertTrue(events.get(14).getEventType() == Event.EventType.METADATA);
Assert.assertTrue(events.get(15).getEventType() == Event.EventType.CREATE);
Assert.assertTrue(events.get(16).getEventType() == Event.EventType.METADATA);
Assert.assertTrue(events.get(17).getEventType() == Event.EventType.METADATA);
Assert.assertTrue(events.get(18).getEventType() == Event.EventType.METADATA);
Assert.assertTrue(events.get(19).getEventType() == Event.EventType.METADATA);
Assert.assertTrue(events.get(20).getEventType() == Event.EventType.RENAME);
// Assert.assertTrue(events.get(21).getEventType() == Event.EventType.TRUNCATE);
fetcher.stop();
} finally {
cluster.shutdown();
}
}
use of org.smartdata.server.metastore.DBAdapter in project SSM by Intel-bigdata.
the class TestRuleManager method init.
@Before
public void init() throws Exception {
String dbFile = TestDBUtil.getUniqueDBFilePath();
Connection conn = null;
try {
conn = Util.createSqliteConnection(dbFile);
Util.initializeDataBase(conn);
dbAdapter = new DBAdapter(conn);
// TODO: to be fixed
ruleManager = new RuleManager(null, null, dbAdapter);
} finally {
File file = new File(dbFile);
file.deleteOnExit();
}
}
Aggregations