Search in sources :

Example 46 with DFSClient

use of org.apache.hadoop.hdfs.DFSClient in project SSM by Intel-bigdata.

the class TestInotifyEventApplier method testApplier.

@Test
public void testApplier() throws Exception {
    DFSClient client = mock(DFSClient.class);
    Connection connection = databaseTester.getConnection().getConnection();
    Util.initializeDataBase(connection);
    DBAdapter adapter = new DBAdapter(connection);
    InotifyEventApplier applier = new InotifyEventApplier(adapter, client);
    Event.CreateEvent createEvent = new Event.CreateEvent.Builder().iNodeType(Event.CreateEvent.INodeType.FILE).ctime(1).defaultBlockSize(1024).groupName("cg1").overwrite(true).ownerName("user1").path("/file").perms(new FsPermission("777")).replication(3).build();
    HdfsFileStatus status1 = new HdfsFileStatus(0, false, 1, 1024, 0, 0, new FsPermission((short) 777), "owner", "group", new byte[0], new byte[0], 1010, 0, null, (byte) 0);
    when(client.getFileInfo(anyString())).thenReturn(status1);
    applier.apply(new Event[] { createEvent });
    ResultSet result1 = adapter.executeQuery("SELECT * FROM files");
    Assert.assertEquals(result1.getString("path"), "/file");
    Assert.assertEquals(result1.getLong("fid"), 1010L);
    Assert.assertEquals(result1.getShort("permission"), 511);
    Event close = new Event.CloseEvent("/file", 1024, 0);
    applier.apply(new Event[] { close });
    ResultSet result2 = adapter.executeQuery("SELECT * FROM files");
    Assert.assertEquals(result2.getLong("length"), 1024);
    Assert.assertEquals(result2.getLong("modification_time"), 0L);
    //    Event truncate = new Event.TruncateEvent("/file", 512, 16);
    //    applier.apply(new Event[] {truncate});
    //    ResultSet result3 = adapter.executeQuery("SELECT * FROM files");
    //    Assert.assertEquals(result3.getLong("length"), 512);
    //    Assert.assertEquals(result3.getLong("modification_time"), 16L);
    Event meta = new Event.MetadataUpdateEvent.Builder().path("/file").metadataType(Event.MetadataUpdateEvent.MetadataType.TIMES).mtime(2).atime(3).replication(4).ownerName("user2").groupName("cg2").build();
    applier.apply(new Event[] { meta });
    ResultSet result4 = adapter.executeQuery("SELECT * FROM files");
    Assert.assertEquals(result4.getLong("access_time"), 3);
    Assert.assertEquals(result4.getLong("modification_time"), 2);
    Event.CreateEvent createEvent2 = new Event.CreateEvent.Builder().iNodeType(Event.CreateEvent.INodeType.DIRECTORY).ctime(1).groupName("cg1").overwrite(true).ownerName("user1").path("/dir").perms(new FsPermission("777")).replication(3).build();
    Event.CreateEvent createEvent3 = new Event.CreateEvent.Builder().iNodeType(Event.CreateEvent.INodeType.FILE).ctime(1).groupName("cg1").overwrite(true).ownerName("user1").path("/dir/file").perms(new FsPermission("777")).replication(3).build();
    Event rename = new Event.RenameEvent.Builder().dstPath("/dir2").srcPath("/dir").timestamp(5).build();
    applier.apply(new Event[] { createEvent2, createEvent3, rename });
    ResultSet result5 = adapter.executeQuery("SELECT * FROM files");
    List<String> expectedPaths = Arrays.asList("/dir2", "/dir2/file", "/file");
    List<String> actualPaths = new ArrayList<>();
    while (result5.next()) {
        actualPaths.add(result5.getString("path"));
    }
    Collections.sort(actualPaths);
    Assert.assertTrue(actualPaths.size() == 3);
    Assert.assertTrue(actualPaths.containsAll(expectedPaths));
    Event unlink = new Event.UnlinkEvent.Builder().path("/").timestamp(6).build();
    applier.apply(new Event[] { unlink });
    ResultSet result6 = adapter.executeQuery("SELECT * FROM files");
    Assert.assertFalse(result6.next());
}
Also used : DFSClient(org.apache.hadoop.hdfs.DFSClient) Connection(java.sql.Connection) ArrayList(java.util.ArrayList) Matchers.anyString(org.mockito.Matchers.anyString) DBAdapter(org.smartdata.server.metastore.DBAdapter) HdfsFileStatus(org.apache.hadoop.hdfs.protocol.HdfsFileStatus) ResultSet(java.sql.ResultSet) Event(org.apache.hadoop.hdfs.inotify.Event) FsPermission(org.apache.hadoop.fs.permission.FsPermission) Test(org.junit.Test) DBTest(org.smartdata.server.metastore.DBTest)

Example 47 with DFSClient

use of org.apache.hadoop.hdfs.DFSClient in project SSM by Intel-bigdata.

the class TestNamespaceFetcher method testNamespaceFetcher.

@Test
public void testNamespaceFetcher() throws IOException, InterruptedException, MissingEventsException, SQLException {
    final Configuration conf = new SmartConf();
    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
    final DistributedFileSystem dfs = cluster.getFileSystem();
    dfs.mkdir(new Path("/user"), new FsPermission("777"));
    dfs.create(new Path("/user/user1"));
    dfs.create(new Path("/user/user2"));
    dfs.mkdir(new Path("/tmp"), new FsPermission("777"));
    DFSClient client = dfs.getClient();
    DBAdapter adapter = mock(DBAdapter.class);
    NamespaceFetcher fetcher = new NamespaceFetcher(client, adapter, 100);
    fetcher.startFetch();
    List<String> expected = Arrays.asList("/", "/user", "/user/user1", "/user/user2", "/tmp");
    Thread.sleep(1000);
    verify(adapter).insertFiles(argThat(new FileStatusArgMatcher(expected)));
    fetcher.stop();
    cluster.shutdown();
}
Also used : Path(org.apache.hadoop.fs.Path) DFSClient(org.apache.hadoop.hdfs.DFSClient) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) DBAdapter(org.smartdata.server.metastore.DBAdapter) SmartConf(org.smartdata.conf.SmartConf) FsPermission(org.apache.hadoop.fs.permission.FsPermission) Test(org.junit.Test)

Example 48 with DFSClient

use of org.apache.hadoop.hdfs.DFSClient in project SSM by Intel-bigdata.

the class StatesManager method init.

/**
   * Load configure/data to initialize.
   *
   * @return true if initialized successfully
   */
public boolean init(DBAdapter dbAdapter) throws IOException {
    LOG.info("Initializing ...");
    this.cleanFileTableContents(dbAdapter);
    String nnUri = conf.get(SmartConfKeys.DFS_SSM_NAMENODE_RPCSERVER_KEY);
    try {
        this.client = new DFSClient(new URI(nnUri), conf);
    } catch (URISyntaxException e) {
        throw new IOException(e);
    }
    this.executorService = Executors.newScheduledThreadPool(4);
    this.accessCountTableManager = new AccessCountTableManager(dbAdapter, executorService);
    this.fileAccessEventSource = MetricsFactory.createAccessEventSource(conf);
    this.accessEventFetcher = new AccessEventFetcher(conf, accessCountTableManager, executorService, fileAccessEventSource.getCollector());
    this.inotifyEventFetcher = new InotifyEventFetcher(client, dbAdapter, executorService);
    LOG.info("Initialized.");
    return true;
}
Also used : DFSClient(org.apache.hadoop.hdfs.DFSClient) AccessEventFetcher(org.smartdata.server.metric.fetcher.AccessEventFetcher) AccessCountTableManager(org.smartdata.server.metastore.tables.AccessCountTableManager) URISyntaxException(java.net.URISyntaxException) IOException(java.io.IOException) InotifyEventFetcher(org.smartdata.server.metric.fetcher.InotifyEventFetcher) URI(java.net.URI)

Example 49 with DFSClient

use of org.apache.hadoop.hdfs.DFSClient in project hbase by apache.

the class FanOutOneBlockAsyncDFSOutputHelper method createDFSClientAdaptor.

private static DFSClientAdaptor createDFSClientAdaptor() throws NoSuchMethodException {
    Method isClientRunningMethod = DFSClient.class.getDeclaredMethod("isClientRunning");
    isClientRunningMethod.setAccessible(true);
    return new DFSClientAdaptor() {

        @Override
        public boolean isClientRunning(DFSClient client) {
            try {
                return (Boolean) isClientRunningMethod.invoke(client);
            } catch (IllegalAccessException | InvocationTargetException e) {
                throw new RuntimeException(e);
            }
        }
    };
}
Also used : DFSClient(org.apache.hadoop.hdfs.DFSClient) Method(java.lang.reflect.Method) InvocationTargetException(java.lang.reflect.InvocationTargetException)

Example 50 with DFSClient

use of org.apache.hadoop.hdfs.DFSClient in project hbase by apache.

the class FanOutOneBlockAsyncDFSOutputHelper method createOutput.

private static FanOutOneBlockAsyncDFSOutput createOutput(DistributedFileSystem dfs, String src, boolean overwrite, boolean createParent, short replication, long blockSize, EventLoop eventLoop) throws IOException {
    Configuration conf = dfs.getConf();
    FSUtils fsUtils = FSUtils.getInstance(dfs, conf);
    DFSClient client = dfs.getClient();
    String clientName = client.getClientName();
    ClientProtocol namenode = client.getNamenode();
    HdfsFileStatus stat;
    try {
        stat = namenode.create(src, FsPermission.getFileDefault().applyUMask(FsPermission.getUMask(conf)), clientName, new EnumSetWritable<>(overwrite ? EnumSet.of(CREATE, OVERWRITE) : EnumSet.of(CREATE)), createParent, replication, blockSize, CryptoProtocolVersion.supported());
    } catch (Exception e) {
        if (e instanceof RemoteException) {
            throw (RemoteException) e;
        } else {
            throw new NameNodeException(e);
        }
    }
    beginFileLease(client, stat.getFileId());
    boolean succ = false;
    LocatedBlock locatedBlock = null;
    List<Future<Channel>> futureList = null;
    try {
        DataChecksum summer = createChecksum(client);
        locatedBlock = BLOCK_ADDER.addBlock(namenode, src, client.getClientName(), null, null, stat.getFileId(), null);
        List<Channel> datanodeList = new ArrayList<>();
        futureList = connectToDataNodes(conf, client, clientName, locatedBlock, 0L, 0L, PIPELINE_SETUP_CREATE, summer, eventLoop);
        for (Future<Channel> future : futureList) {
            // fail the creation if there are connection failures since we are fail-fast. The upper
            // layer should retry itself if needed.
            datanodeList.add(future.syncUninterruptibly().getNow());
        }
        Encryptor encryptor = createEncryptor(conf, stat, client);
        FanOutOneBlockAsyncDFSOutput output = new FanOutOneBlockAsyncDFSOutput(conf, fsUtils, dfs, client, namenode, clientName, src, stat.getFileId(), locatedBlock, encryptor, eventLoop, datanodeList, summer, ALLOC);
        succ = true;
        return output;
    } finally {
        if (!succ) {
            if (futureList != null) {
                for (Future<Channel> f : futureList) {
                    f.addListener(new FutureListener<Channel>() {

                        @Override
                        public void operationComplete(Future<Channel> future) throws Exception {
                            if (future.isSuccess()) {
                                future.getNow().close();
                            }
                        }
                    });
                }
            }
            endFileLease(client, stat.getFileId());
            fsUtils.recoverFileLease(dfs, new Path(src), conf, new CancelOnClose(client));
        }
    }
}
Also used : Configuration(org.apache.hadoop.conf.Configuration) ArrayList(java.util.ArrayList) FanOutOneBlockAsyncDFSOutputSaslHelper.createEncryptor(org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createEncryptor) Encryptor(org.apache.hadoop.crypto.Encryptor) HdfsFileStatus(org.apache.hadoop.hdfs.protocol.HdfsFileStatus) FSUtils(org.apache.hadoop.hbase.util.FSUtils) DFSClient(org.apache.hadoop.hdfs.DFSClient) EnumSetWritable(org.apache.hadoop.io.EnumSetWritable) Path(org.apache.hadoop.fs.Path) NioSocketChannel(io.netty.channel.socket.nio.NioSocketChannel) Channel(io.netty.channel.Channel) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) UnresolvedLinkException(org.apache.hadoop.fs.UnresolvedLinkException) LeaseExpiredException(org.apache.hadoop.hdfs.server.namenode.LeaseExpiredException) InvalidBlockTokenException(org.apache.hadoop.hdfs.security.token.block.InvalidBlockTokenException) InvocationTargetException(java.lang.reflect.InvocationTargetException) IOException(java.io.IOException) RemoteException(org.apache.hadoop.ipc.RemoteException) DataChecksum(org.apache.hadoop.util.DataChecksum) ChannelFuture(io.netty.channel.ChannelFuture) Future(io.netty.util.concurrent.Future) ClientProtocol(org.apache.hadoop.hdfs.protocol.ClientProtocol) RemoteException(org.apache.hadoop.ipc.RemoteException)

Aggregations

DFSClient (org.apache.hadoop.hdfs.DFSClient)97 Test (org.junit.Test)53 IOException (java.io.IOException)35 Nfs3FileAttributes (org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes)27 FileHandle (org.apache.hadoop.nfs.nfs3.FileHandle)26 VisibleForTesting (com.google.common.annotations.VisibleForTesting)18 Path (org.apache.hadoop.fs.Path)18 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)17 InetSocketAddress (java.net.InetSocketAddress)13 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)13 Configuration (org.apache.hadoop.conf.Configuration)12 NfsConfiguration (org.apache.hadoop.hdfs.nfs.conf.NfsConfiguration)12 FileSystem (org.apache.hadoop.fs.FileSystem)11 HdfsFileStatus (org.apache.hadoop.hdfs.protocol.HdfsFileStatus)11 HdfsDataOutputStream (org.apache.hadoop.hdfs.client.HdfsDataOutputStream)9 WccData (org.apache.hadoop.nfs.nfs3.response.WccData)9 ShellBasedIdMapping (org.apache.hadoop.security.ShellBasedIdMapping)8 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)7 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)7 ArrayList (java.util.ArrayList)6