Search in sources :

Example 1 with BlockCacheKey

use of org.apache.hadoop.hbase.io.hfile.BlockCacheKey in project hbase by apache.

the class BucketCache method iterator.

@Override
public Iterator<CachedBlock> iterator() {
    // Don't bother with ramcache since stuff is in here only a little while.
    final Iterator<Map.Entry<BlockCacheKey, BucketEntry>> i = this.backingMap.entrySet().iterator();
    return new Iterator<CachedBlock>() {

        private final long now = System.nanoTime();

        @Override
        public boolean hasNext() {
            return i.hasNext();
        }

        @Override
        public CachedBlock next() {
            final Map.Entry<BlockCacheKey, BucketEntry> e = i.next();
            return new CachedBlock() {

                @Override
                public String toString() {
                    return BlockCacheUtil.toString(this, now);
                }

                @Override
                public BlockPriority getBlockPriority() {
                    return e.getValue().getPriority();
                }

                @Override
                public BlockType getBlockType() {
                    // Not held by BucketEntry.  Could add it if wanted on BucketEntry creation.
                    return null;
                }

                @Override
                public long getOffset() {
                    return e.getKey().getOffset();
                }

                @Override
                public long getSize() {
                    return e.getValue().getLength();
                }

                @Override
                public long getCachedTime() {
                    return e.getValue().getCachedTime();
                }

                @Override
                public String getFilename() {
                    return e.getKey().getHfileName();
                }

                @Override
                public int compareTo(CachedBlock other) {
                    int diff = this.getFilename().compareTo(other.getFilename());
                    if (diff != 0)
                        return diff;
                    diff = Long.compare(this.getOffset(), other.getOffset());
                    if (diff != 0)
                        return diff;
                    if (other.getCachedTime() < 0 || this.getCachedTime() < 0) {
                        throw new IllegalStateException("" + this.getCachedTime() + ", " + other.getCachedTime());
                    }
                    return Long.compare(other.getCachedTime(), this.getCachedTime());
                }

                @Override
                public int hashCode() {
                    return e.getKey().hashCode();
                }

                @Override
                public boolean equals(Object obj) {
                    if (obj instanceof CachedBlock) {
                        CachedBlock cb = (CachedBlock) obj;
                        return compareTo(cb) == 0;
                    } else {
                        return false;
                    }
                }
            };
        }

        @Override
        public void remove() {
            throw new UnsupportedOperationException();
        }
    };
}
Also used : CachedBlock(org.apache.hadoop.hbase.io.hfile.CachedBlock) Iterator(java.util.Iterator) Map(java.util.Map) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) ConcurrentMap(java.util.concurrent.ConcurrentMap) BlockCacheKey(org.apache.hadoop.hbase.io.hfile.BlockCacheKey)

Example 2 with BlockCacheKey

use of org.apache.hadoop.hbase.io.hfile.BlockCacheKey in project hbase by apache.

the class TestAvoidCellReferencesIntoShippedBlocks method testHBASE16372InReadPath.

@Test
public void testHBASE16372InReadPath() throws Exception {
    final TableName tableName = TableName.valueOf(name.getMethodName());
    // Create a table with block size as 1024
    final Table table = TEST_UTIL.createTable(tableName, FAMILIES_1, 1, 1024, null);
    try {
        // get the block cache and region
        RegionLocator locator = TEST_UTIL.getConnection().getRegionLocator(tableName);
        String regionName = locator.getAllRegionLocations().get(0).getRegionInfo().getEncodedName();
        Region region = TEST_UTIL.getRSForFirstRegionInTable(tableName).getFromOnlineRegions(regionName);
        Store store = region.getStores().iterator().next();
        CacheConfig cacheConf = store.getCacheConfig();
        cacheConf.setCacheDataOnWrite(true);
        cacheConf.setEvictOnClose(true);
        final BlockCache cache = cacheConf.getBlockCache();
        // insert data. 5 Rows are added
        Put put = new Put(ROW);
        put.addColumn(FAMILY, QUALIFIER, data);
        table.put(put);
        put = new Put(ROW);
        put.addColumn(FAMILY, QUALIFIER1, data);
        table.put(put);
        put = new Put(ROW1);
        put.addColumn(FAMILY, QUALIFIER, data);
        table.put(put);
        put = new Put(ROW1);
        put.addColumn(FAMILY, QUALIFIER1, data);
        table.put(put);
        put = new Put(ROW2);
        put.addColumn(FAMILY, QUALIFIER, data);
        table.put(put);
        put = new Put(ROW2);
        put.addColumn(FAMILY, QUALIFIER1, data);
        table.put(put);
        put = new Put(ROW3);
        put.addColumn(FAMILY, QUALIFIER, data);
        table.put(put);
        put = new Put(ROW3);
        put.addColumn(FAMILY, QUALIFIER1, data);
        table.put(put);
        put = new Put(ROW4);
        put.addColumn(FAMILY, QUALIFIER, data);
        table.put(put);
        put = new Put(ROW4);
        put.addColumn(FAMILY, QUALIFIER1, data);
        table.put(put);
        put = new Put(ROW5);
        put.addColumn(FAMILY, QUALIFIER, data);
        table.put(put);
        put = new Put(ROW5);
        put.addColumn(FAMILY, QUALIFIER1, data);
        table.put(put);
        // data was in memstore so don't expect any changes
        region.flush(true);
        // Load cache
        Scan s = new Scan();
        s.setMaxResultSize(1000);
        ResultScanner scanner = table.getScanner(s);
        int count = 0;
        for (Result result : scanner) {
            count++;
        }
        assertEquals("Count all the rows ", count, 6);
        // Scan from cache
        s = new Scan();
        // Start a scan from row3
        s.setCaching(1);
        s.setStartRow(ROW1);
        // set partial as true so that the scan can send partial columns also
        s.setAllowPartialResults(true);
        s.setMaxResultSize(1000);
        scanner = table.getScanner(s);
        Thread evictorThread = new Thread() {

            @Override
            public void run() {
                List<BlockCacheKey> cacheList = new ArrayList<>();
                Iterator<CachedBlock> iterator = cache.iterator();
                // evict all the blocks
                while (iterator.hasNext()) {
                    CachedBlock next = iterator.next();
                    BlockCacheKey cacheKey = new BlockCacheKey(next.getFilename(), next.getOffset());
                    cacheList.add(cacheKey);
                    cache.evictBlock(cacheKey);
                }
                try {
                    Thread.sleep(1);
                } catch (InterruptedException e1) {
                }
                iterator = cache.iterator();
                int refBlockCount = 0;
                while (iterator.hasNext()) {
                    iterator.next();
                    refBlockCount++;
                }
                assertEquals("One block should be there ", refBlockCount, 1);
                // Rescan to prepopulate the data
                // cache this row.
                Scan s1 = new Scan();
                // This scan will start from ROW1 and it will populate the cache with a
                // row that is lower than ROW3.
                s1.setStartRow(ROW3);
                s1.setStopRow(ROW5);
                s1.setCaching(1);
                ResultScanner scanner;
                try {
                    scanner = table.getScanner(s1);
                    int count = 0;
                    for (Result result : scanner) {
                        count++;
                    }
                    assertEquals("Count the rows", count, 2);
                    iterator = cache.iterator();
                    List<BlockCacheKey> newCacheList = new ArrayList<>();
                    while (iterator.hasNext()) {
                        CachedBlock next = iterator.next();
                        BlockCacheKey cacheKey = new BlockCacheKey(next.getFilename(), next.getOffset());
                        newCacheList.add(cacheKey);
                    }
                    int newBlockRefCount = 0;
                    for (BlockCacheKey key : cacheList) {
                        if (newCacheList.contains(key)) {
                            newBlockRefCount++;
                        }
                    }
                    assertEquals("old blocks should still be found ", newBlockRefCount, 6);
                    latch.countDown();
                } catch (IOException e) {
                }
            }
        };
        count = 0;
        for (Result result : scanner) {
            count++;
            if (count == 2) {
                evictorThread.start();
                latch.await();
            }
        }
        assertEquals("Count should give all rows ", count, 10);
    } finally {
        table.close();
    }
}
Also used : CachedBlock(org.apache.hadoop.hbase.io.hfile.CachedBlock) ArrayList(java.util.ArrayList) Store(org.apache.hadoop.hbase.regionserver.Store) IOException(java.io.IOException) BlockCacheKey(org.apache.hadoop.hbase.io.hfile.BlockCacheKey) MultiRowMutationEndpoint(org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint) TableName(org.apache.hadoop.hbase.TableName) BlockCache(org.apache.hadoop.hbase.io.hfile.BlockCache) Region(org.apache.hadoop.hbase.regionserver.Region) CacheConfig(org.apache.hadoop.hbase.io.hfile.CacheConfig) Test(org.junit.Test)

Example 3 with BlockCacheKey

use of org.apache.hadoop.hbase.io.hfile.BlockCacheKey in project hbase by apache.

the class TestBlockEvictionFromClient method testMultiGets.

@Test
public void testMultiGets() throws IOException, InterruptedException {
    Table table = null;
    try {
        latch = new CountDownLatch(2);
        // Check if get() returns blocks on its close() itself
        getLatch = new CountDownLatch(1);
        final TableName tableName = TableName.valueOf(name.getMethodName());
        // Create KV that will give you two blocks
        // Create a table with block size as 1024
        table = TEST_UTIL.createTable(tableName, FAMILIES_1, 1, 1024, CustomInnerRegionObserver.class.getName());
        // get the block cache and region
        RegionLocator locator = TEST_UTIL.getConnection().getRegionLocator(tableName);
        String regionName = locator.getAllRegionLocations().get(0).getRegionInfo().getEncodedName();
        Region region = TEST_UTIL.getRSForFirstRegionInTable(tableName).getFromOnlineRegions(regionName);
        Store store = region.getStores().iterator().next();
        CacheConfig cacheConf = store.getCacheConfig();
        cacheConf.setCacheDataOnWrite(true);
        cacheConf.setEvictOnClose(true);
        BlockCache cache = cacheConf.getBlockCache();
        Put put = new Put(ROW);
        put.addColumn(FAMILY, QUALIFIER, data);
        table.put(put);
        region.flush(true);
        put = new Put(ROW1);
        put.addColumn(FAMILY, QUALIFIER, data);
        table.put(put);
        region.flush(true);
        byte[] QUALIFIER2 = Bytes.add(QUALIFIER, QUALIFIER);
        put = new Put(ROW);
        put.addColumn(FAMILY, QUALIFIER2, data2);
        table.put(put);
        region.flush(true);
        // flush the data
        System.out.println("Flushing cache");
        // Should create one Hfile with 2 blocks
        CustomInnerRegionObserver.waitForGets.set(true);
        // Create three sets of gets
        MultiGetThread[] getThreads = initiateMultiGet(table);
        Thread.sleep(200);
        int refCount;
        Iterator<CachedBlock> iterator = cache.iterator();
        boolean foundNonZeroBlock = false;
        while (iterator.hasNext()) {
            CachedBlock next = iterator.next();
            BlockCacheKey cacheKey = new BlockCacheKey(next.getFilename(), next.getOffset());
            if (cache instanceof BucketCache) {
                refCount = ((BucketCache) cache).getRefCount(cacheKey);
            } else if (cache instanceof CombinedBlockCache) {
                refCount = ((CombinedBlockCache) cache).getRefCount(cacheKey);
            } else {
                continue;
            }
            if (refCount != 0) {
                assertEquals(NO_OF_THREADS, refCount);
                foundNonZeroBlock = true;
            }
        }
        assertTrue("Should have found nonzero ref count block", foundNonZeroBlock);
        CustomInnerRegionObserver.getCdl().get().countDown();
        CustomInnerRegionObserver.getCdl().get().countDown();
        for (MultiGetThread thread : getThreads) {
            thread.join();
        }
        // Verify whether the gets have returned the blocks that it had
        CustomInnerRegionObserver.waitForGets.set(true);
        // giving some time for the block to be decremented
        iterateBlockCache(cache, iterator);
        getLatch.countDown();
        System.out.println("Gets should have returned the bloks");
    } finally {
        if (table != null) {
            table.close();
        }
    }
}
Also used : CachedBlock(org.apache.hadoop.hbase.io.hfile.CachedBlock) Store(org.apache.hadoop.hbase.regionserver.Store) CountDownLatch(java.util.concurrent.CountDownLatch) BlockCacheKey(org.apache.hadoop.hbase.io.hfile.BlockCacheKey) MultiRowMutationEndpoint(org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint) TableName(org.apache.hadoop.hbase.TableName) CombinedBlockCache(org.apache.hadoop.hbase.io.hfile.CombinedBlockCache) BlockCache(org.apache.hadoop.hbase.io.hfile.BlockCache) CombinedBlockCache(org.apache.hadoop.hbase.io.hfile.CombinedBlockCache) BucketCache(org.apache.hadoop.hbase.io.hfile.bucket.BucketCache) Region(org.apache.hadoop.hbase.regionserver.Region) CacheConfig(org.apache.hadoop.hbase.io.hfile.CacheConfig) Test(org.junit.Test)

Example 4 with BlockCacheKey

use of org.apache.hadoop.hbase.io.hfile.BlockCacheKey in project hbase by apache.

the class TestBlockEvictionFromClient method testScanWithCompactionInternals.

private void testScanWithCompactionInternals(String tableNameStr, boolean reversed) throws IOException, InterruptedException {
    Table table = null;
    try {
        latch = new CountDownLatch(1);
        compactionLatch = new CountDownLatch(1);
        TableName tableName = TableName.valueOf(tableNameStr);
        // Create a table with block size as 1024
        table = TEST_UTIL.createTable(tableName, FAMILIES_1, 1, 1024, CustomInnerRegionObserverWrapper.class.getName());
        // get the block cache and region
        RegionLocator locator = TEST_UTIL.getConnection().getRegionLocator(tableName);
        String regionName = locator.getAllRegionLocations().get(0).getRegionInfo().getEncodedName();
        Region region = TEST_UTIL.getRSForFirstRegionInTable(tableName).getFromOnlineRegions(regionName);
        Store store = region.getStores().iterator().next();
        CacheConfig cacheConf = store.getCacheConfig();
        cacheConf.setCacheDataOnWrite(true);
        cacheConf.setEvictOnClose(true);
        BlockCache cache = cacheConf.getBlockCache();
        // insert data. 2 Rows are added
        Put put = new Put(ROW);
        put.addColumn(FAMILY, QUALIFIER, data);
        table.put(put);
        put = new Put(ROW1);
        put.addColumn(FAMILY, QUALIFIER, data);
        table.put(put);
        assertTrue(Bytes.equals(table.get(new Get(ROW)).value(), data));
        // Should create one Hfile with 2 blocks
        region.flush(true);
        // read the data and expect same blocks, one new hit, no misses
        int refCount = 0;
        // Check how this miss is happening
        // insert a second column, read the row, no new blocks, 3 new hits
        byte[] QUALIFIER2 = Bytes.add(QUALIFIER, QUALIFIER);
        byte[] data2 = Bytes.add(data, data);
        put = new Put(ROW);
        put.addColumn(FAMILY, QUALIFIER2, data2);
        table.put(put);
        // flush, one new block
        System.out.println("Flushing cache");
        region.flush(true);
        Iterator<CachedBlock> iterator = cache.iterator();
        iterateBlockCache(cache, iterator);
        // Create three sets of scan
        ScanThread[] scanThreads = initiateScan(table, reversed);
        Thread.sleep(100);
        iterator = cache.iterator();
        boolean usedBlocksFound = false;
        while (iterator.hasNext()) {
            CachedBlock next = iterator.next();
            BlockCacheKey cacheKey = new BlockCacheKey(next.getFilename(), next.getOffset());
            if (cache instanceof BucketCache) {
                refCount = ((BucketCache) cache).getRefCount(cacheKey);
            } else if (cache instanceof CombinedBlockCache) {
                refCount = ((CombinedBlockCache) cache).getRefCount(cacheKey);
            } else {
                continue;
            }
            if (refCount != 0) {
                // Blocks will be with count 3
                assertEquals(NO_OF_THREADS, refCount);
                usedBlocksFound = true;
            }
        }
        assertTrue("Blocks with non zero ref count should be found ", usedBlocksFound);
        usedBlocksFound = false;
        System.out.println("Compacting");
        assertEquals(2, store.getStorefilesCount());
        store.triggerMajorCompaction();
        region.compact(true);
        // wait 10 seconds max
        waitForStoreFileCount(store, 1, 10000);
        assertEquals(1, store.getStorefilesCount());
        // Even after compaction is done we will have some blocks that cannot
        // be evicted this is because the scan is still referencing them
        iterator = cache.iterator();
        while (iterator.hasNext()) {
            CachedBlock next = iterator.next();
            BlockCacheKey cacheKey = new BlockCacheKey(next.getFilename(), next.getOffset());
            if (cache instanceof BucketCache) {
                refCount = ((BucketCache) cache).getRefCount(cacheKey);
            } else if (cache instanceof CombinedBlockCache) {
                refCount = ((CombinedBlockCache) cache).getRefCount(cacheKey);
            } else {
                continue;
            }
            if (refCount != 0) {
                // Blocks will be with count 3 as they are not yet cleared
                assertEquals(NO_OF_THREADS, refCount);
                usedBlocksFound = true;
            }
        }
        assertTrue("Blocks with non zero ref count should be found ", usedBlocksFound);
        // Should not throw exception
        compactionLatch.countDown();
        latch.countDown();
        for (ScanThread thread : scanThreads) {
            thread.join();
        }
        // by this time all blocks should have been evicted
        iterator = cache.iterator();
        iterateBlockCache(cache, iterator);
        Result r = table.get(new Get(ROW));
        assertTrue(Bytes.equals(r.getValue(FAMILY, QUALIFIER), data));
        assertTrue(Bytes.equals(r.getValue(FAMILY, QUALIFIER2), data2));
        // The gets would be working on new blocks
        iterator = cache.iterator();
        iterateBlockCache(cache, iterator);
    } finally {
        if (table != null) {
            table.close();
        }
    }
}
Also used : CachedBlock(org.apache.hadoop.hbase.io.hfile.CachedBlock) Store(org.apache.hadoop.hbase.regionserver.Store) CountDownLatch(java.util.concurrent.CountDownLatch) BlockCacheKey(org.apache.hadoop.hbase.io.hfile.BlockCacheKey) MultiRowMutationEndpoint(org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint) TableName(org.apache.hadoop.hbase.TableName) CombinedBlockCache(org.apache.hadoop.hbase.io.hfile.CombinedBlockCache) BlockCache(org.apache.hadoop.hbase.io.hfile.BlockCache) CombinedBlockCache(org.apache.hadoop.hbase.io.hfile.CombinedBlockCache) BucketCache(org.apache.hadoop.hbase.io.hfile.bucket.BucketCache) Region(org.apache.hadoop.hbase.regionserver.Region) CacheConfig(org.apache.hadoop.hbase.io.hfile.CacheConfig)

Example 5 with BlockCacheKey

use of org.apache.hadoop.hbase.io.hfile.BlockCacheKey in project hbase by apache.

the class TestBlockEvictionFromClient method testScanWithException.

@Test
public void testScanWithException() throws IOException, InterruptedException {
    Table table = null;
    try {
        latch = new CountDownLatch(1);
        exceptionLatch = new CountDownLatch(1);
        final TableName tableName = TableName.valueOf(name.getMethodName());
        // Create KV that will give you two blocks
        // Create a table with block size as 1024
        table = TEST_UTIL.createTable(tableName, FAMILIES_1, 1, 1024, CustomInnerRegionObserverWrapper.class.getName());
        // get the block cache and region
        RegionLocator locator = TEST_UTIL.getConnection().getRegionLocator(tableName);
        String regionName = locator.getAllRegionLocations().get(0).getRegionInfo().getEncodedName();
        Region region = TEST_UTIL.getRSForFirstRegionInTable(tableName).getFromOnlineRegions(regionName);
        Store store = region.getStores().iterator().next();
        CacheConfig cacheConf = store.getCacheConfig();
        cacheConf.setCacheDataOnWrite(true);
        cacheConf.setEvictOnClose(true);
        BlockCache cache = cacheConf.getBlockCache();
        // insert data. 2 Rows are added
        insertData(table);
        // flush the data
        System.out.println("Flushing cache");
        // Should create one Hfile with 2 blocks
        region.flush(true);
        // CustomInnerRegionObserver.sleepTime.set(5000);
        CustomInnerRegionObserver.throwException.set(true);
        ScanThread[] scanThreads = initiateScan(table, false);
        // The block would have been decremented for the scan case as it was
        // wrapped
        // before even the postNext hook gets executed.
        // giving some time for the block to be decremented
        Thread.sleep(100);
        Iterator<CachedBlock> iterator = cache.iterator();
        boolean usedBlocksFound = false;
        int refCount = 0;
        while (iterator.hasNext()) {
            CachedBlock next = iterator.next();
            BlockCacheKey cacheKey = new BlockCacheKey(next.getFilename(), next.getOffset());
            if (cache instanceof BucketCache) {
                refCount = ((BucketCache) cache).getRefCount(cacheKey);
            } else if (cache instanceof CombinedBlockCache) {
                refCount = ((CombinedBlockCache) cache).getRefCount(cacheKey);
            } else {
                continue;
            }
            if (refCount != 0) {
                // Blocks will be with count 3
                assertEquals(NO_OF_THREADS, refCount);
                usedBlocksFound = true;
            }
        }
        assertTrue(usedBlocksFound);
        exceptionLatch.countDown();
        // countdown the latch
        CustomInnerRegionObserver.getCdl().get().countDown();
        for (ScanThread thread : scanThreads) {
            thread.join();
        }
        iterator = cache.iterator();
        usedBlocksFound = false;
        refCount = 0;
        while (iterator.hasNext()) {
            CachedBlock next = iterator.next();
            BlockCacheKey cacheKey = new BlockCacheKey(next.getFilename(), next.getOffset());
            if (cache instanceof BucketCache) {
                refCount = ((BucketCache) cache).getRefCount(cacheKey);
            } else if (cache instanceof CombinedBlockCache) {
                refCount = ((CombinedBlockCache) cache).getRefCount(cacheKey);
            } else {
                continue;
            }
            if (refCount != 0) {
                // Blocks will be with count 3
                assertEquals(NO_OF_THREADS, refCount);
                usedBlocksFound = true;
            }
        }
        assertFalse(usedBlocksFound);
        // you should always see 0 ref count. since after HBASE-16604 we always recreate the scanner
        assertEquals(0, refCount);
    } finally {
        if (table != null) {
            table.close();
        }
    }
}
Also used : CachedBlock(org.apache.hadoop.hbase.io.hfile.CachedBlock) Store(org.apache.hadoop.hbase.regionserver.Store) CountDownLatch(java.util.concurrent.CountDownLatch) BlockCacheKey(org.apache.hadoop.hbase.io.hfile.BlockCacheKey) MultiRowMutationEndpoint(org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint) TableName(org.apache.hadoop.hbase.TableName) CombinedBlockCache(org.apache.hadoop.hbase.io.hfile.CombinedBlockCache) BlockCache(org.apache.hadoop.hbase.io.hfile.BlockCache) CombinedBlockCache(org.apache.hadoop.hbase.io.hfile.CombinedBlockCache) BucketCache(org.apache.hadoop.hbase.io.hfile.bucket.BucketCache) Region(org.apache.hadoop.hbase.regionserver.Region) CacheConfig(org.apache.hadoop.hbase.io.hfile.CacheConfig) Test(org.junit.Test)

Aggregations

BlockCacheKey (org.apache.hadoop.hbase.io.hfile.BlockCacheKey)16 CachedBlock (org.apache.hadoop.hbase.io.hfile.CachedBlock)11 MultiRowMutationEndpoint (org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint)10 BlockCache (org.apache.hadoop.hbase.io.hfile.BlockCache)9 CombinedBlockCache (org.apache.hadoop.hbase.io.hfile.CombinedBlockCache)9 BucketCache (org.apache.hadoop.hbase.io.hfile.bucket.BucketCache)9 TableName (org.apache.hadoop.hbase.TableName)8 Region (org.apache.hadoop.hbase.regionserver.Region)8 Test (org.junit.Test)8 CountDownLatch (java.util.concurrent.CountDownLatch)7 CacheConfig (org.apache.hadoop.hbase.io.hfile.CacheConfig)6 Store (org.apache.hadoop.hbase.regionserver.Store)5 IOException (java.io.IOException)2 ConcurrentHashMap (java.util.concurrent.ConcurrentHashMap)2 File (java.io.File)1 FileInputStream (java.io.FileInputStream)1 ObjectInputStream (java.io.ObjectInputStream)1 ArrayList (java.util.ArrayList)1 Iterator (java.util.Iterator)1 Map (java.util.Map)1