Search in sources :

Example 1 with CacheDirectiveEntry

use of org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry in project hadoop by apache.

the class TestFsDatasetCache method testReCacheAfterUncache.

@Test(timeout = 60000)
public void testReCacheAfterUncache() throws Exception {
    final int TOTAL_BLOCKS_PER_CACHE = Ints.checkedCast(CACHE_CAPACITY / BLOCK_SIZE);
    BlockReaderTestUtil.enableHdfsCachingTracing();
    Assert.assertEquals(0, CACHE_CAPACITY % BLOCK_SIZE);
    // Create a small file
    final Path SMALL_FILE = new Path("/smallFile");
    DFSTestUtil.createFile(fs, SMALL_FILE, BLOCK_SIZE, (short) 1, 0xcafe);
    // Create a file that will take up the whole cache
    final Path BIG_FILE = new Path("/bigFile");
    DFSTestUtil.createFile(fs, BIG_FILE, TOTAL_BLOCKS_PER_CACHE * BLOCK_SIZE, (short) 1, 0xbeef);
    final DistributedFileSystem dfs = cluster.getFileSystem();
    dfs.addCachePool(new CachePoolInfo("pool"));
    final long bigCacheDirectiveId = dfs.addCacheDirective(new CacheDirectiveInfo.Builder().setPool("pool").setPath(BIG_FILE).setReplication((short) 1).build());
    GenericTestUtils.waitFor(new Supplier<Boolean>() {

        @Override
        public Boolean get() {
            MetricsRecordBuilder dnMetrics = getMetrics(dn.getMetrics().name());
            long blocksCached = MetricsAsserts.getLongCounter("BlocksCached", dnMetrics);
            if (blocksCached != TOTAL_BLOCKS_PER_CACHE) {
                LOG.info("waiting for " + TOTAL_BLOCKS_PER_CACHE + " to " + "be cached.   Right now only " + blocksCached + " blocks are cached.");
                return false;
            }
            LOG.info(TOTAL_BLOCKS_PER_CACHE + " blocks are now cached.");
            return true;
        }
    }, 1000, 30000);
    // Try to cache a smaller file.  It should fail.
    final long shortCacheDirectiveId = dfs.addCacheDirective(new CacheDirectiveInfo.Builder().setPool("pool").setPath(SMALL_FILE).setReplication((short) 1).build());
    Thread.sleep(10000);
    MetricsRecordBuilder dnMetrics = getMetrics(dn.getMetrics().name());
    Assert.assertEquals(TOTAL_BLOCKS_PER_CACHE, MetricsAsserts.getLongCounter("BlocksCached", dnMetrics));
    // Uncache the big file and verify that the small file can now be
    // cached (regression test for HDFS-6107)
    dfs.removeCacheDirective(bigCacheDirectiveId);
    GenericTestUtils.waitFor(new Supplier<Boolean>() {

        @Override
        public Boolean get() {
            RemoteIterator<CacheDirectiveEntry> iter;
            try {
                iter = dfs.listCacheDirectives(new CacheDirectiveInfo.Builder().build());
                CacheDirectiveEntry entry;
                do {
                    entry = iter.next();
                } while (entry.getInfo().getId() != shortCacheDirectiveId);
                if (entry.getStats().getFilesCached() != 1) {
                    LOG.info("waiting for directive " + shortCacheDirectiveId + " to be cached.  stats = " + entry.getStats());
                    return false;
                }
                LOG.info("directive " + shortCacheDirectiveId + " has been cached.");
            } catch (IOException e) {
                Assert.fail("unexpected exception" + e.toString());
            }
            return true;
        }
    }, 1000, 30000);
    dfs.removeCacheDirective(shortCacheDirectiveId);
}
Also used : Path(org.apache.hadoop.fs.Path) MetricsRecordBuilder(org.apache.hadoop.metrics2.MetricsRecordBuilder) IOException(java.io.IOException) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) RemoteIterator(org.apache.hadoop.fs.RemoteIterator) CacheDirectiveInfo(org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo) CacheDirectiveEntry(org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry) Matchers.anyBoolean(org.mockito.Matchers.anyBoolean) CachePoolInfo(org.apache.hadoop.hdfs.protocol.CachePoolInfo) MetricsRecordBuilder(org.apache.hadoop.metrics2.MetricsRecordBuilder) Test(org.junit.Test)

Example 2 with CacheDirectiveEntry

use of org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry in project hadoop by apache.

the class CacheManager method listCacheDirectives.

public BatchedListEntries<CacheDirectiveEntry> listCacheDirectives(long prevId, CacheDirectiveInfo filter, FSPermissionChecker pc) throws IOException {
    assert namesystem.hasReadLock();
    final int NUM_PRE_ALLOCATED_ENTRIES = 16;
    String filterPath = null;
    if (filter.getPath() != null) {
        filterPath = validatePath(filter);
    }
    if (filter.getReplication() != null) {
        throw new InvalidRequestException("Filtering by replication is unsupported.");
    }
    // Querying for a single ID
    final Long id = filter.getId();
    if (id != null) {
        if (!directivesById.containsKey(id)) {
            throw new InvalidRequestException("Did not find requested id " + id);
        }
        // Since we use a tailMap on directivesById, setting prev to id-1 gets
        // us the directive with the id (if present)
        prevId = id - 1;
    }
    ArrayList<CacheDirectiveEntry> replies = new ArrayList<CacheDirectiveEntry>(NUM_PRE_ALLOCATED_ENTRIES);
    int numReplies = 0;
    SortedMap<Long, CacheDirective> tailMap = directivesById.tailMap(prevId + 1);
    for (Entry<Long, CacheDirective> cur : tailMap.entrySet()) {
        if (numReplies >= maxListCacheDirectivesNumResponses) {
            return new BatchedListEntries<CacheDirectiveEntry>(replies, true);
        }
        CacheDirective curDirective = cur.getValue();
        CacheDirectiveInfo info = cur.getValue().toInfo();
        // item and should break out.
        if (id != null && !(info.getId().equals(id))) {
            break;
        }
        if (filter.getPool() != null && !info.getPool().equals(filter.getPool())) {
            continue;
        }
        if (filterPath != null && !info.getPath().toUri().getPath().equals(filterPath)) {
            continue;
        }
        boolean hasPermission = true;
        if (pc != null) {
            try {
                pc.checkPermission(curDirective.getPool(), FsAction.READ);
            } catch (AccessControlException e) {
                hasPermission = false;
            }
        }
        if (hasPermission) {
            replies.add(new CacheDirectiveEntry(info, cur.getValue().toStats()));
            numReplies++;
        }
    }
    return new BatchedListEntries<CacheDirectiveEntry>(replies, false);
}
Also used : ArrayList(java.util.ArrayList) AccessControlException(org.apache.hadoop.security.AccessControlException) CacheDirectiveInfo(org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo) CacheDirective(org.apache.hadoop.hdfs.protocol.CacheDirective) BatchedListEntries(org.apache.hadoop.fs.BatchedRemoteIterator.BatchedListEntries) CacheDirectiveEntry(org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry) InvalidRequestException(org.apache.hadoop.fs.InvalidRequestException)

Example 3 with CacheDirectiveEntry

use of org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry in project hadoop by apache.

the class TestCacheDirectives method testCacheManagerRestart.

@Test(timeout = 60000)
public void testCacheManagerRestart() throws Exception {
    SecondaryNameNode secondary = null;
    try {
        // Start a secondary namenode
        conf.set(DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY, "0.0.0.0:0");
        secondary = new SecondaryNameNode(conf);
        // Create and validate a pool
        final String pool = "poolparty";
        String groupName = "partygroup";
        FsPermission mode = new FsPermission((short) 0777);
        long limit = 747;
        dfs.addCachePool(new CachePoolInfo(pool).setGroupName(groupName).setMode(mode).setLimit(limit));
        RemoteIterator<CachePoolEntry> pit = dfs.listCachePools();
        assertTrue("No cache pools found", pit.hasNext());
        CachePoolInfo info = pit.next().getInfo();
        assertEquals(pool, info.getPoolName());
        assertEquals(groupName, info.getGroupName());
        assertEquals(mode, info.getMode());
        assertEquals(limit, (long) info.getLimit());
        assertFalse("Unexpected # of cache pools found", pit.hasNext());
        // Create some cache entries
        int numEntries = 10;
        String entryPrefix = "/party-";
        long prevId = -1;
        final Date expiry = new Date();
        for (int i = 0; i < numEntries; i++) {
            prevId = dfs.addCacheDirective(new CacheDirectiveInfo.Builder().setPath(new Path(entryPrefix + i)).setPool(pool).setExpiration(CacheDirectiveInfo.Expiration.newAbsolute(expiry.getTime())).build());
        }
        RemoteIterator<CacheDirectiveEntry> dit = dfs.listCacheDirectives(null);
        for (int i = 0; i < numEntries; i++) {
            assertTrue("Unexpected # of cache entries: " + i, dit.hasNext());
            CacheDirectiveInfo cd = dit.next().getInfo();
            assertEquals(i + 1, cd.getId().longValue());
            assertEquals(entryPrefix + i, cd.getPath().toUri().getPath());
            assertEquals(pool, cd.getPool());
        }
        assertFalse("Unexpected # of cache directives found", dit.hasNext());
        // Checkpoint once to set some cache pools and directives on 2NN side
        secondary.doCheckpoint();
        // Add some more CacheManager state
        final String imagePool = "imagePool";
        dfs.addCachePool(new CachePoolInfo(imagePool));
        prevId = dfs.addCacheDirective(new CacheDirectiveInfo.Builder().setPath(new Path("/image")).setPool(imagePool).build());
        // Save a new image to force a fresh fsimage download
        dfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
        dfs.saveNamespace();
        dfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
        // Checkpoint again forcing a reload of FSN state
        boolean fetchImage = secondary.doCheckpoint();
        assertTrue("Secondary should have fetched a new fsimage from NameNode", fetchImage);
        // Remove temp pool and directive
        dfs.removeCachePool(imagePool);
        // Restart namenode
        cluster.restartNameNode();
        // Check that state came back up
        pit = dfs.listCachePools();
        assertTrue("No cache pools found", pit.hasNext());
        info = pit.next().getInfo();
        assertEquals(pool, info.getPoolName());
        assertEquals(pool, info.getPoolName());
        assertEquals(groupName, info.getGroupName());
        assertEquals(mode, info.getMode());
        assertEquals(limit, (long) info.getLimit());
        assertFalse("Unexpected # of cache pools found", pit.hasNext());
        dit = dfs.listCacheDirectives(null);
        for (int i = 0; i < numEntries; i++) {
            assertTrue("Unexpected # of cache entries: " + i, dit.hasNext());
            CacheDirectiveInfo cd = dit.next().getInfo();
            assertEquals(i + 1, cd.getId().longValue());
            assertEquals(entryPrefix + i, cd.getPath().toUri().getPath());
            assertEquals(pool, cd.getPool());
            assertEquals(expiry.getTime(), cd.getExpiration().getMillis());
        }
        assertFalse("Unexpected # of cache directives found", dit.hasNext());
        long nextId = dfs.addCacheDirective(new CacheDirectiveInfo.Builder().setPath(new Path("/foobar")).setPool(pool).build());
        assertEquals(prevId + 1, nextId);
    } finally {
        if (secondary != null) {
            secondary.shutdown();
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) Date(java.util.Date) CacheDirectiveInfo(org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo) CacheDirectiveEntry(org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry) FsPermission(org.apache.hadoop.fs.permission.FsPermission) CachePoolInfo(org.apache.hadoop.hdfs.protocol.CachePoolInfo) CachePoolEntry(org.apache.hadoop.hdfs.protocol.CachePoolEntry) Test(org.junit.Test)

Example 4 with CacheDirectiveEntry

use of org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry in project hadoop by apache.

the class ClientNamenodeProtocolServerSideTranslatorPB method listCacheDirectives.

@Override
public ListCacheDirectivesResponseProto listCacheDirectives(RpcController controller, ListCacheDirectivesRequestProto request) throws ServiceException {
    try {
        CacheDirectiveInfo filter = PBHelperClient.convert(request.getFilter());
        BatchedEntries<CacheDirectiveEntry> entries = server.listCacheDirectives(request.getPrevId(), filter);
        ListCacheDirectivesResponseProto.Builder builder = ListCacheDirectivesResponseProto.newBuilder();
        builder.setHasMore(entries.hasMore());
        for (int i = 0, n = entries.size(); i < n; i++) {
            builder.addElements(PBHelperClient.convert(entries.get(i)));
        }
        return builder.build();
    } catch (IOException e) {
        throw new ServiceException(e);
    }
}
Also used : CacheDirectiveInfo(org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo) ServiceException(com.google.protobuf.ServiceException) ListCacheDirectivesResponseProto(org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCacheDirectivesResponseProto) CacheDirectiveEntry(org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry) IOException(java.io.IOException)

Example 5 with CacheDirectiveEntry

use of org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry in project hadoop by apache.

the class DistributedFileSystem method listCacheDirectives.

/**
   * List cache directives.  Incrementally fetches results from the server.
   *
   * @param filter Filter parameters to use when listing the directives, null to
   *               list all directives visible to us.
   * @return A RemoteIterator which returns CacheDirectiveInfo objects.
   */
public RemoteIterator<CacheDirectiveEntry> listCacheDirectives(CacheDirectiveInfo filter) throws IOException {
    if (filter == null) {
        filter = new CacheDirectiveInfo.Builder().build();
    }
    if (filter.getPath() != null) {
        filter = new CacheDirectiveInfo.Builder(filter).setPath(new Path(getPathName(fixRelativePart(filter.getPath())))).build();
    }
    final RemoteIterator<CacheDirectiveEntry> iter = dfs.listCacheDirectives(filter);
    return new RemoteIterator<CacheDirectiveEntry>() {

        @Override
        public boolean hasNext() throws IOException {
            return iter.hasNext();
        }

        @Override
        public CacheDirectiveEntry next() throws IOException {
            // Although the paths we get back from the NameNode should always be
            // absolute, we call makeQualified to add the scheme and authority of
            // this DistributedFilesystem.
            CacheDirectiveEntry desc = iter.next();
            CacheDirectiveInfo info = desc.getInfo();
            Path p = info.getPath().makeQualified(getUri(), getWorkingDirectory());
            return new CacheDirectiveEntry(new CacheDirectiveInfo.Builder(info).setPath(p).build(), desc.getStats());
        }
    };
}
Also used : Path(org.apache.hadoop.fs.Path) RemoteIterator(org.apache.hadoop.fs.RemoteIterator) CacheDirectiveInfo(org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo) CacheDirectiveEntry(org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry)

Aggregations

CacheDirectiveEntry (org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry)12 CacheDirectiveInfo (org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo)10 Path (org.apache.hadoop.fs.Path)8 CachePoolInfo (org.apache.hadoop.hdfs.protocol.CachePoolInfo)6 Test (org.junit.Test)6 InvalidRequestException (org.apache.hadoop.fs.InvalidRequestException)4 IOException (java.io.IOException)3 Date (java.util.Date)3 ArrayList (java.util.ArrayList)2 RemoteIterator (org.apache.hadoop.fs.RemoteIterator)2 FsPermission (org.apache.hadoop.fs.permission.FsPermission)2 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)2 CachePoolEntry (org.apache.hadoop.hdfs.protocol.CachePoolEntry)2 AccessControlException (org.apache.hadoop.security.AccessControlException)2 ServiceException (com.google.protobuf.ServiceException)1 HashSet (java.util.HashSet)1 LinkedList (java.util.LinkedList)1 BatchedListEntries (org.apache.hadoop.fs.BatchedRemoteIterator.BatchedListEntries)1 CacheFlag (org.apache.hadoop.fs.CacheFlag)1 FileSystemTestHelper (org.apache.hadoop.fs.FileSystemTestHelper)1