use of org.apache.hadoop.hdfs.DFSClient in project hadoop by apache.
the class TestRetryCacheWithHA method testSetXAttr.
@Test(timeout = 60000)
public void testSetXAttr() throws Exception {
DFSClient client = genClientWithDummyHandler();
AtMostOnceOp op = new SetXAttrOp(client, "/setxattr");
testClientRetryWithFailover(op);
}
use of org.apache.hadoop.hdfs.DFSClient in project SSM by Intel-bigdata.
the class TestInotifyEventApplier method testApplier.
@Test
public void testApplier() throws Exception {
DFSClient client = mock(DFSClient.class);
Connection connection = databaseTester.getConnection().getConnection();
Util.initializeDataBase(connection);
DBAdapter adapter = new DBAdapter(connection);
InotifyEventApplier applier = new InotifyEventApplier(adapter, client);
Event.CreateEvent createEvent = new Event.CreateEvent.Builder().iNodeType(Event.CreateEvent.INodeType.FILE).ctime(1).defaultBlockSize(1024).groupName("cg1").overwrite(true).ownerName("user1").path("/file").perms(new FsPermission("777")).replication(3).build();
HdfsFileStatus status1 = new HdfsFileStatus(0, false, 1, 1024, 0, 0, new FsPermission((short) 777), "owner", "group", new byte[0], new byte[0], 1010, 0, null, (byte) 0);
when(client.getFileInfo(anyString())).thenReturn(status1);
applier.apply(new Event[] { createEvent });
ResultSet result1 = adapter.executeQuery("SELECT * FROM files");
Assert.assertEquals(result1.getString("path"), "/file");
Assert.assertEquals(result1.getLong("fid"), 1010L);
Assert.assertEquals(result1.getShort("permission"), 511);
Event close = new Event.CloseEvent("/file", 1024, 0);
applier.apply(new Event[] { close });
ResultSet result2 = adapter.executeQuery("SELECT * FROM files");
Assert.assertEquals(result2.getLong("length"), 1024);
Assert.assertEquals(result2.getLong("modification_time"), 0L);
// Event truncate = new Event.TruncateEvent("/file", 512, 16);
// applier.apply(new Event[] {truncate});
// ResultSet result3 = adapter.executeQuery("SELECT * FROM files");
// Assert.assertEquals(result3.getLong("length"), 512);
// Assert.assertEquals(result3.getLong("modification_time"), 16L);
Event meta = new Event.MetadataUpdateEvent.Builder().path("/file").metadataType(Event.MetadataUpdateEvent.MetadataType.TIMES).mtime(2).atime(3).replication(4).ownerName("user2").groupName("cg2").build();
applier.apply(new Event[] { meta });
ResultSet result4 = adapter.executeQuery("SELECT * FROM files");
Assert.assertEquals(result4.getLong("access_time"), 3);
Assert.assertEquals(result4.getLong("modification_time"), 2);
Event.CreateEvent createEvent2 = new Event.CreateEvent.Builder().iNodeType(Event.CreateEvent.INodeType.DIRECTORY).ctime(1).groupName("cg1").overwrite(true).ownerName("user1").path("/dir").perms(new FsPermission("777")).replication(3).build();
Event.CreateEvent createEvent3 = new Event.CreateEvent.Builder().iNodeType(Event.CreateEvent.INodeType.FILE).ctime(1).groupName("cg1").overwrite(true).ownerName("user1").path("/dir/file").perms(new FsPermission("777")).replication(3).build();
Event rename = new Event.RenameEvent.Builder().dstPath("/dir2").srcPath("/dir").timestamp(5).build();
applier.apply(new Event[] { createEvent2, createEvent3, rename });
ResultSet result5 = adapter.executeQuery("SELECT * FROM files");
List<String> expectedPaths = Arrays.asList("/dir2", "/dir2/file", "/file");
List<String> actualPaths = new ArrayList<>();
while (result5.next()) {
actualPaths.add(result5.getString("path"));
}
Collections.sort(actualPaths);
Assert.assertTrue(actualPaths.size() == 3);
Assert.assertTrue(actualPaths.containsAll(expectedPaths));
Event unlink = new Event.UnlinkEvent.Builder().path("/").timestamp(6).build();
applier.apply(new Event[] { unlink });
ResultSet result6 = adapter.executeQuery("SELECT * FROM files");
Assert.assertFalse(result6.next());
}
use of org.apache.hadoop.hdfs.DFSClient in project SSM by Intel-bigdata.
the class TestNamespaceFetcher method testNamespaceFetcher.
@Test
public void testNamespaceFetcher() throws IOException, InterruptedException, MissingEventsException, SQLException {
final Configuration conf = new SmartConf();
final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
final DistributedFileSystem dfs = cluster.getFileSystem();
dfs.mkdir(new Path("/user"), new FsPermission("777"));
dfs.create(new Path("/user/user1"));
dfs.create(new Path("/user/user2"));
dfs.mkdir(new Path("/tmp"), new FsPermission("777"));
DFSClient client = dfs.getClient();
DBAdapter adapter = mock(DBAdapter.class);
NamespaceFetcher fetcher = new NamespaceFetcher(client, adapter, 100);
fetcher.startFetch();
List<String> expected = Arrays.asList("/", "/user", "/user/user1", "/user/user2", "/tmp");
Thread.sleep(1000);
verify(adapter).insertFiles(argThat(new FileStatusArgMatcher(expected)));
fetcher.stop();
cluster.shutdown();
}
use of org.apache.hadoop.hdfs.DFSClient in project SSM by Intel-bigdata.
the class StatesManager method init.
/**
* Load configure/data to initialize.
*
* @return true if initialized successfully
*/
public boolean init(DBAdapter dbAdapter) throws IOException {
LOG.info("Initializing ...");
this.cleanFileTableContents(dbAdapter);
String nnUri = conf.get(SmartConfKeys.DFS_SSM_NAMENODE_RPCSERVER_KEY);
try {
this.client = new DFSClient(new URI(nnUri), conf);
} catch (URISyntaxException e) {
throw new IOException(e);
}
this.executorService = Executors.newScheduledThreadPool(4);
this.accessCountTableManager = new AccessCountTableManager(dbAdapter, executorService);
this.fileAccessEventSource = MetricsFactory.createAccessEventSource(conf);
this.accessEventFetcher = new AccessEventFetcher(conf, accessCountTableManager, executorService, fileAccessEventSource.getCollector());
this.inotifyEventFetcher = new InotifyEventFetcher(client, dbAdapter, executorService);
LOG.info("Initialized.");
return true;
}
use of org.apache.hadoop.hdfs.DFSClient in project hbase by apache.
the class FanOutOneBlockAsyncDFSOutputHelper method createOutput.
private static FanOutOneBlockAsyncDFSOutput createOutput(DistributedFileSystem dfs, String src, boolean overwrite, boolean createParent, short replication, long blockSize, EventLoopGroup eventLoopGroup, Class<? extends Channel> channelClass, StreamSlowMonitor monitor) throws IOException {
Configuration conf = dfs.getConf();
DFSClient client = dfs.getClient();
String clientName = client.getClientName();
ClientProtocol namenode = client.getNamenode();
int createMaxRetries = conf.getInt(ASYNC_DFS_OUTPUT_CREATE_MAX_RETRIES, DEFAULT_ASYNC_DFS_OUTPUT_CREATE_MAX_RETRIES);
ExcludeDatanodeManager excludeDatanodeManager = monitor.getExcludeDatanodeManager();
Set<DatanodeInfo> toExcludeNodes = new HashSet<>(excludeDatanodeManager.getExcludeDNs().keySet());
for (int retry = 0; ; retry++) {
LOG.debug("When create output stream for {}, exclude list is {}, retry={}", src, toExcludeNodes, retry);
HdfsFileStatus stat;
try {
stat = FILE_CREATOR.create(namenode, src, FsPermission.getFileDefault().applyUMask(FsPermission.getUMask(conf)), clientName, getCreateFlags(overwrite), createParent, replication, blockSize, CryptoProtocolVersion.supported());
} catch (Exception e) {
if (e instanceof RemoteException) {
throw (RemoteException) e;
} else {
throw new NameNodeException(e);
}
}
beginFileLease(client, stat.getFileId());
boolean succ = false;
LocatedBlock locatedBlock = null;
List<Future<Channel>> futureList = null;
try {
DataChecksum summer = createChecksum(client);
locatedBlock = namenode.addBlock(src, client.getClientName(), null, toExcludeNodes.toArray(new DatanodeInfo[0]), stat.getFileId(), null, null);
Map<Channel, DatanodeInfo> datanodes = new IdentityHashMap<>();
futureList = connectToDataNodes(conf, client, clientName, locatedBlock, 0L, 0L, PIPELINE_SETUP_CREATE, summer, eventLoopGroup, channelClass);
for (int i = 0, n = futureList.size(); i < n; i++) {
DatanodeInfo datanodeInfo = locatedBlock.getLocations()[i];
try {
datanodes.put(futureList.get(i).syncUninterruptibly().getNow(), datanodeInfo);
} catch (Exception e) {
// exclude the broken DN next time
toExcludeNodes.add(datanodeInfo);
excludeDatanodeManager.tryAddExcludeDN(datanodeInfo, "connect error");
throw e;
}
}
Encryptor encryptor = createEncryptor(conf, stat, client);
FanOutOneBlockAsyncDFSOutput output = new FanOutOneBlockAsyncDFSOutput(conf, dfs, client, namenode, clientName, src, stat.getFileId(), locatedBlock, encryptor, datanodes, summer, ALLOC, monitor);
succ = true;
return output;
} catch (RemoteException e) {
LOG.warn("create fan-out dfs output {} failed, retry = {}", src, retry, e);
if (shouldRetryCreate(e)) {
if (retry >= createMaxRetries) {
throw e.unwrapRemoteException();
}
} else {
throw e.unwrapRemoteException();
}
} catch (IOException e) {
LOG.warn("create fan-out dfs output {} failed, retry = {}", src, retry, e);
if (retry >= createMaxRetries) {
throw e;
}
// overwrite the old broken file.
overwrite = true;
try {
Thread.sleep(ConnectionUtils.getPauseTime(100, retry));
} catch (InterruptedException ie) {
throw new InterruptedIOException();
}
} finally {
if (!succ) {
if (futureList != null) {
for (Future<Channel> f : futureList) {
f.addListener(new FutureListener<Channel>() {
@Override
public void operationComplete(Future<Channel> future) throws Exception {
if (future.isSuccess()) {
future.getNow().close();
}
}
});
}
}
endFileLease(client, stat.getFileId());
}
}
}
}
Aggregations