use of org.apache.hadoop.hdfs.DFSClient in project SSM by Intel-bigdata.
the class TestInotifyEventApplier method testApplier.
@Test
public void testApplier() throws Exception {
DFSClient client = mock(DFSClient.class);
Connection connection = databaseTester.getConnection().getConnection();
Util.initializeDataBase(connection);
DBAdapter adapter = new DBAdapter(connection);
InotifyEventApplier applier = new InotifyEventApplier(adapter, client);
Event.CreateEvent createEvent = new Event.CreateEvent.Builder().iNodeType(Event.CreateEvent.INodeType.FILE).ctime(1).defaultBlockSize(1024).groupName("cg1").overwrite(true).ownerName("user1").path("/file").perms(new FsPermission("777")).replication(3).build();
HdfsFileStatus status1 = new HdfsFileStatus(0, false, 1, 1024, 0, 0, new FsPermission((short) 777), "owner", "group", new byte[0], new byte[0], 1010, 0, null, (byte) 0);
when(client.getFileInfo(anyString())).thenReturn(status1);
applier.apply(new Event[] { createEvent });
ResultSet result1 = adapter.executeQuery("SELECT * FROM files");
Assert.assertEquals(result1.getString("path"), "/file");
Assert.assertEquals(result1.getLong("fid"), 1010L);
Assert.assertEquals(result1.getShort("permission"), 511);
Event close = new Event.CloseEvent("/file", 1024, 0);
applier.apply(new Event[] { close });
ResultSet result2 = adapter.executeQuery("SELECT * FROM files");
Assert.assertEquals(result2.getLong("length"), 1024);
Assert.assertEquals(result2.getLong("modification_time"), 0L);
// Event truncate = new Event.TruncateEvent("/file", 512, 16);
// applier.apply(new Event[] {truncate});
// ResultSet result3 = adapter.executeQuery("SELECT * FROM files");
// Assert.assertEquals(result3.getLong("length"), 512);
// Assert.assertEquals(result3.getLong("modification_time"), 16L);
Event meta = new Event.MetadataUpdateEvent.Builder().path("/file").metadataType(Event.MetadataUpdateEvent.MetadataType.TIMES).mtime(2).atime(3).replication(4).ownerName("user2").groupName("cg2").build();
applier.apply(new Event[] { meta });
ResultSet result4 = adapter.executeQuery("SELECT * FROM files");
Assert.assertEquals(result4.getLong("access_time"), 3);
Assert.assertEquals(result4.getLong("modification_time"), 2);
Event.CreateEvent createEvent2 = new Event.CreateEvent.Builder().iNodeType(Event.CreateEvent.INodeType.DIRECTORY).ctime(1).groupName("cg1").overwrite(true).ownerName("user1").path("/dir").perms(new FsPermission("777")).replication(3).build();
Event.CreateEvent createEvent3 = new Event.CreateEvent.Builder().iNodeType(Event.CreateEvent.INodeType.FILE).ctime(1).groupName("cg1").overwrite(true).ownerName("user1").path("/dir/file").perms(new FsPermission("777")).replication(3).build();
Event rename = new Event.RenameEvent.Builder().dstPath("/dir2").srcPath("/dir").timestamp(5).build();
applier.apply(new Event[] { createEvent2, createEvent3, rename });
ResultSet result5 = adapter.executeQuery("SELECT * FROM files");
List<String> expectedPaths = Arrays.asList("/dir2", "/dir2/file", "/file");
List<String> actualPaths = new ArrayList<>();
while (result5.next()) {
actualPaths.add(result5.getString("path"));
}
Collections.sort(actualPaths);
Assert.assertTrue(actualPaths.size() == 3);
Assert.assertTrue(actualPaths.containsAll(expectedPaths));
Event unlink = new Event.UnlinkEvent.Builder().path("/").timestamp(6).build();
applier.apply(new Event[] { unlink });
ResultSet result6 = adapter.executeQuery("SELECT * FROM files");
Assert.assertFalse(result6.next());
}
use of org.apache.hadoop.hdfs.DFSClient in project SSM by Intel-bigdata.
the class TestNamespaceFetcher method testNamespaceFetcher.
@Test
public void testNamespaceFetcher() throws IOException, InterruptedException, MissingEventsException, SQLException {
final Configuration conf = new SmartConf();
final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
final DistributedFileSystem dfs = cluster.getFileSystem();
dfs.mkdir(new Path("/user"), new FsPermission("777"));
dfs.create(new Path("/user/user1"));
dfs.create(new Path("/user/user2"));
dfs.mkdir(new Path("/tmp"), new FsPermission("777"));
DFSClient client = dfs.getClient();
DBAdapter adapter = mock(DBAdapter.class);
NamespaceFetcher fetcher = new NamespaceFetcher(client, adapter, 100);
fetcher.startFetch();
List<String> expected = Arrays.asList("/", "/user", "/user/user1", "/user/user2", "/tmp");
Thread.sleep(1000);
verify(adapter).insertFiles(argThat(new FileStatusArgMatcher(expected)));
fetcher.stop();
cluster.shutdown();
}
use of org.apache.hadoop.hdfs.DFSClient in project SSM by Intel-bigdata.
the class StatesManager method init.
/**
* Load configure/data to initialize.
*
* @return true if initialized successfully
*/
public boolean init(DBAdapter dbAdapter) throws IOException {
LOG.info("Initializing ...");
this.cleanFileTableContents(dbAdapter);
String nnUri = conf.get(SmartConfKeys.DFS_SSM_NAMENODE_RPCSERVER_KEY);
try {
this.client = new DFSClient(new URI(nnUri), conf);
} catch (URISyntaxException e) {
throw new IOException(e);
}
this.executorService = Executors.newScheduledThreadPool(4);
this.accessCountTableManager = new AccessCountTableManager(dbAdapter, executorService);
this.fileAccessEventSource = MetricsFactory.createAccessEventSource(conf);
this.accessEventFetcher = new AccessEventFetcher(conf, accessCountTableManager, executorService, fileAccessEventSource.getCollector());
this.inotifyEventFetcher = new InotifyEventFetcher(client, dbAdapter, executorService);
LOG.info("Initialized.");
return true;
}
use of org.apache.hadoop.hdfs.DFSClient in project hbase by apache.
the class FanOutOneBlockAsyncDFSOutputHelper method createDFSClientAdaptor.
private static DFSClientAdaptor createDFSClientAdaptor() throws NoSuchMethodException {
Method isClientRunningMethod = DFSClient.class.getDeclaredMethod("isClientRunning");
isClientRunningMethod.setAccessible(true);
return new DFSClientAdaptor() {
@Override
public boolean isClientRunning(DFSClient client) {
try {
return (Boolean) isClientRunningMethod.invoke(client);
} catch (IllegalAccessException | InvocationTargetException e) {
throw new RuntimeException(e);
}
}
};
}
use of org.apache.hadoop.hdfs.DFSClient in project hbase by apache.
the class FanOutOneBlockAsyncDFSOutputHelper method createOutput.
private static FanOutOneBlockAsyncDFSOutput createOutput(DistributedFileSystem dfs, String src, boolean overwrite, boolean createParent, short replication, long blockSize, EventLoop eventLoop) throws IOException {
Configuration conf = dfs.getConf();
FSUtils fsUtils = FSUtils.getInstance(dfs, conf);
DFSClient client = dfs.getClient();
String clientName = client.getClientName();
ClientProtocol namenode = client.getNamenode();
HdfsFileStatus stat;
try {
stat = namenode.create(src, FsPermission.getFileDefault().applyUMask(FsPermission.getUMask(conf)), clientName, new EnumSetWritable<>(overwrite ? EnumSet.of(CREATE, OVERWRITE) : EnumSet.of(CREATE)), createParent, replication, blockSize, CryptoProtocolVersion.supported());
} catch (Exception e) {
if (e instanceof RemoteException) {
throw (RemoteException) e;
} else {
throw new NameNodeException(e);
}
}
beginFileLease(client, stat.getFileId());
boolean succ = false;
LocatedBlock locatedBlock = null;
List<Future<Channel>> futureList = null;
try {
DataChecksum summer = createChecksum(client);
locatedBlock = BLOCK_ADDER.addBlock(namenode, src, client.getClientName(), null, null, stat.getFileId(), null);
List<Channel> datanodeList = new ArrayList<>();
futureList = connectToDataNodes(conf, client, clientName, locatedBlock, 0L, 0L, PIPELINE_SETUP_CREATE, summer, eventLoop);
for (Future<Channel> future : futureList) {
// fail the creation if there are connection failures since we are fail-fast. The upper
// layer should retry itself if needed.
datanodeList.add(future.syncUninterruptibly().getNow());
}
Encryptor encryptor = createEncryptor(conf, stat, client);
FanOutOneBlockAsyncDFSOutput output = new FanOutOneBlockAsyncDFSOutput(conf, fsUtils, dfs, client, namenode, clientName, src, stat.getFileId(), locatedBlock, encryptor, eventLoop, datanodeList, summer, ALLOC);
succ = true;
return output;
} finally {
if (!succ) {
if (futureList != null) {
for (Future<Channel> f : futureList) {
f.addListener(new FutureListener<Channel>() {
@Override
public void operationComplete(Future<Channel> future) throws Exception {
if (future.isSuccess()) {
future.getNow().close();
}
}
});
}
}
endFileLease(client, stat.getFileId());
fsUtils.recoverFileLease(dfs, new Path(src), conf, new CancelOnClose(client));
}
}
}
Aggregations