Search in sources :

Example 21 with ExtendedBlock

use of org.apache.hadoop.hdfs.protocol.ExtendedBlock in project hadoop by apache.

the class TestHASafeMode method testOpenFileWhenNNAndClientCrashAfterAddBlock.

/** Test NN crash and client crash/stuck immediately after block allocation */
@Test(timeout = 100000)
public void testOpenFileWhenNNAndClientCrashAfterAddBlock() throws Exception {
    cluster.getConfiguration(0).set(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_THRESHOLD_PCT_KEY, "1.0f");
    String testData = "testData";
    // to make sure we write the full block before creating dummy block at NN.
    cluster.getConfiguration(0).setInt("io.bytes.per.checksum", testData.length());
    cluster.restartNameNode(0);
    try {
        cluster.waitActive();
        cluster.transitionToActive(0);
        cluster.transitionToStandby(1);
        DistributedFileSystem dfs = cluster.getFileSystem(0);
        String pathString = "/tmp1.txt";
        Path filePath = new Path(pathString);
        FSDataOutputStream create = dfs.create(filePath, FsPermission.getDefault(), true, 1024, (short) 3, testData.length(), null);
        create.write(testData.getBytes());
        create.hflush();
        long fileId = ((DFSOutputStream) create.getWrappedStream()).getFileId();
        FileStatus fileStatus = dfs.getFileStatus(filePath);
        DFSClient client = DFSClientAdapter.getClient(dfs);
        // add one dummy block at NN, but not write to DataNode
        ExtendedBlock previousBlock = DFSClientAdapter.getPreviousBlock(client, fileId);
        DFSClientAdapter.getNamenode(client).addBlock(pathString, client.getClientName(), new ExtendedBlock(previousBlock), new DatanodeInfo[0], DFSClientAdapter.getFileId((DFSOutputStream) create.getWrappedStream()), null, null);
        cluster.restartNameNode(0, true);
        cluster.restartDataNode(0);
        cluster.transitionToActive(0);
        // let the block reports be processed.
        Thread.sleep(2000);
        FSDataInputStream is = dfs.open(filePath);
        is.close();
        // initiate recovery
        dfs.recoverLease(filePath);
        assertTrue("Recovery also should be success", dfs.recoverLease(filePath));
    } finally {
        cluster.shutdown();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) DFSClient(org.apache.hadoop.hdfs.DFSClient) FileStatus(org.apache.hadoop.fs.FileStatus) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) FSDataInputStream(org.apache.hadoop.fs.FSDataInputStream) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) DFSOutputStream(org.apache.hadoop.hdfs.DFSOutputStream) Test(org.junit.Test)

Example 22 with ExtendedBlock

use of org.apache.hadoop.hdfs.protocol.ExtendedBlock in project hadoop by apache.

the class FsDatasetImpl method checkAndUpdate.

/**
   * Reconcile the difference between blocks on the disk and blocks in
   * volumeMap
   *
   * Check the given block for inconsistencies. Look at the
   * current state of the block and reconcile the differences as follows:
   * <ul>
   * <li>If the block file is missing, delete the block from volumeMap</li>
   * <li>If the block file exists and the block is missing in volumeMap,
   * add the block to volumeMap <li>
   * <li>If generation stamp does not match, then update the block with right
   * generation stamp</li>
   * <li>If the block length in memory does not match the actual block file length
   * then mark the block as corrupt and update the block length in memory</li>
   * <li>If the file in {@link ReplicaInfo} does not match the file on
   * the disk, update {@link ReplicaInfo} with the correct file</li>
   * </ul>
   *
   * @param blockId Block that differs
   * @param diskFile Block file on the disk
   * @param diskMetaFile Metadata file from on the disk
   * @param vol Volume of the block file
   */
@Override
public void checkAndUpdate(String bpid, long blockId, File diskFile, File diskMetaFile, FsVolumeSpi vol) throws IOException {
    Block corruptBlock = null;
    ReplicaInfo memBlockInfo;
    try (AutoCloseableLock lock = datasetLock.acquire()) {
        memBlockInfo = volumeMap.get(bpid, blockId);
        if (memBlockInfo != null && memBlockInfo.getState() != ReplicaState.FINALIZED) {
            // Block is not finalized - ignore the difference
            return;
        }
        final FileIoProvider fileIoProvider = datanode.getFileIoProvider();
        final boolean diskMetaFileExists = diskMetaFile != null && fileIoProvider.exists(vol, diskMetaFile);
        final boolean diskFileExists = diskFile != null && fileIoProvider.exists(vol, diskFile);
        final long diskGS = diskMetaFileExists ? Block.getGenerationStamp(diskMetaFile.getName()) : HdfsConstants.GRANDFATHER_GENERATION_STAMP;
        if (!diskFileExists) {
            if (memBlockInfo == null) {
                // If metadata file exists then delete it
                if (diskMetaFileExists && fileIoProvider.delete(vol, diskMetaFile)) {
                    LOG.warn("Deleted a metadata file without a block " + diskMetaFile.getAbsolutePath());
                }
                return;
            }
            if (!memBlockInfo.blockDataExists()) {
                // Block is in memory and not on the disk
                // Remove the block from volumeMap
                volumeMap.remove(bpid, blockId);
                if (vol.isTransientStorage()) {
                    ramDiskReplicaTracker.discardReplica(bpid, blockId, true);
                }
                LOG.warn("Removed block " + blockId + " from memory with missing block file on the disk");
                // Finally remove the metadata file
                if (diskMetaFileExists && fileIoProvider.delete(vol, diskMetaFile)) {
                    LOG.warn("Deleted a metadata file for the deleted block " + diskMetaFile.getAbsolutePath());
                }
            }
            return;
        }
        /*
       * Block file exists on the disk
       */
        if (memBlockInfo == null) {
            // Block is missing in memory - add the block to volumeMap
            ReplicaInfo diskBlockInfo = new ReplicaBuilder(ReplicaState.FINALIZED).setBlockId(blockId).setLength(diskFile.length()).setGenerationStamp(diskGS).setFsVolume(vol).setDirectoryToUse(diskFile.getParentFile()).build();
            volumeMap.add(bpid, diskBlockInfo);
            if (vol.isTransientStorage()) {
                long lockedBytesReserved = cacheManager.reserve(diskBlockInfo.getNumBytes()) > 0 ? diskBlockInfo.getNumBytes() : 0;
                ramDiskReplicaTracker.addReplica(bpid, blockId, (FsVolumeImpl) vol, lockedBytesReserved);
            }
            LOG.warn("Added missing block to memory " + diskBlockInfo);
            return;
        }
        // Compare block files
        if (memBlockInfo.blockDataExists()) {
            if (memBlockInfo.getBlockURI().compareTo(diskFile.toURI()) != 0) {
                if (diskMetaFileExists) {
                    if (memBlockInfo.metadataExists()) {
                        // We have two sets of block+meta files. Decide which one to
                        // keep.
                        ReplicaInfo diskBlockInfo = new ReplicaBuilder(ReplicaState.FINALIZED).setBlockId(blockId).setLength(diskFile.length()).setGenerationStamp(diskGS).setFsVolume(vol).setDirectoryToUse(diskFile.getParentFile()).build();
                        ((FsVolumeImpl) vol).resolveDuplicateReplicas(bpid, memBlockInfo, diskBlockInfo, volumeMap);
                    }
                } else {
                    if (!fileIoProvider.delete(vol, diskFile)) {
                        LOG.warn("Failed to delete " + diskFile);
                    }
                }
            }
        } else {
            // Block refers to a block file that does not exist.
            // Update the block with the file found on the disk. Since the block
            // file and metadata file are found as a pair on the disk, update
            // the block based on the metadata file found on the disk
            LOG.warn("Block file in replica " + memBlockInfo.getBlockURI() + " does not exist. Updating it to the file found during scan " + diskFile.getAbsolutePath());
            memBlockInfo.updateWithReplica(StorageLocation.parse(diskFile.toString()));
            LOG.warn("Updating generation stamp for block " + blockId + " from " + memBlockInfo.getGenerationStamp() + " to " + diskGS);
            memBlockInfo.setGenerationStamp(diskGS);
        }
        // Compare generation stamp
        if (memBlockInfo.getGenerationStamp() != diskGS) {
            File memMetaFile = FsDatasetUtil.getMetaFile(diskFile, memBlockInfo.getGenerationStamp());
            if (fileIoProvider.exists(vol, memMetaFile)) {
                String warningPrefix = "Metadata file in memory " + memMetaFile.getAbsolutePath() + " does not match file found by scan ";
                if (!diskMetaFileExists) {
                    LOG.warn(warningPrefix + "null");
                } else if (memMetaFile.compareTo(diskMetaFile) != 0) {
                    LOG.warn(warningPrefix + diskMetaFile.getAbsolutePath());
                }
            } else {
                // as the block file, then use the generation stamp from it
                try {
                    File memFile = new File(memBlockInfo.getBlockURI());
                    long gs = diskMetaFileExists && diskMetaFile.getParent().equals(memFile.getParent()) ? diskGS : HdfsConstants.GRANDFATHER_GENERATION_STAMP;
                    LOG.warn("Updating generation stamp for block " + blockId + " from " + memBlockInfo.getGenerationStamp() + " to " + gs);
                    memBlockInfo.setGenerationStamp(gs);
                } catch (IllegalArgumentException e) {
                    //exception arises because the URI cannot be converted to a file
                    LOG.warn("Block URI could not be resolved to a file", e);
                }
            }
        }
        // Compare block size
        if (memBlockInfo.getNumBytes() != memBlockInfo.getBlockDataLength()) {
            // Update the length based on the block file
            corruptBlock = new Block(memBlockInfo);
            LOG.warn("Updating size of block " + blockId + " from " + memBlockInfo.getNumBytes() + " to " + memBlockInfo.getBlockDataLength());
            memBlockInfo.setNumBytes(memBlockInfo.getBlockDataLength());
        }
    }
    // Send corrupt block report outside the lock
    if (corruptBlock != null) {
        LOG.warn("Reporting the block " + corruptBlock + " as corrupt due to length mismatch");
        try {
            datanode.reportBadBlocks(new ExtendedBlock(bpid, corruptBlock), memBlockInfo.getVolume());
        } catch (IOException e) {
            LOG.warn("Failed to repot bad block " + corruptBlock, e);
        }
    }
}
Also used : ReplicaInfo(org.apache.hadoop.hdfs.server.datanode.ReplicaInfo) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) IOException(java.io.IOException) MultipleIOException(org.apache.hadoop.io.MultipleIOException) AutoCloseableLock(org.apache.hadoop.util.AutoCloseableLock) ReplicaBuilder(org.apache.hadoop.hdfs.server.datanode.ReplicaBuilder) Block(org.apache.hadoop.hdfs.protocol.Block) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) RecoveringBlock(org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock) FileIoProvider(org.apache.hadoop.hdfs.server.datanode.FileIoProvider) File(java.io.File)

Example 23 with ExtendedBlock

use of org.apache.hadoop.hdfs.protocol.ExtendedBlock in project hadoop by apache.

the class FsDatasetImpl method invalidate.

/**
   * Invalidate a block but does not delete the actual on-disk block file.
   *
   * It should only be used when deactivating disks.
   *
   * @param bpid the block pool ID.
   * @param block The block to be invalidated.
   */
public void invalidate(String bpid, ReplicaInfo block) {
    // If a DFSClient has the replica in its cache of short-circuit file
    // descriptors (and the client is using ShortCircuitShm), invalidate it.
    datanode.getShortCircuitRegistry().processBlockInvalidation(new ExtendedBlockId(block.getBlockId(), bpid));
    // If the block is cached, start uncaching it.
    cacheManager.uncacheBlock(bpid, block.getBlockId());
    datanode.notifyNamenodeDeletedBlock(new ExtendedBlock(bpid, block), block.getStorageUuid());
}
Also used : ExtendedBlockId(org.apache.hadoop.hdfs.ExtendedBlockId) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock)

Example 24 with ExtendedBlock

use of org.apache.hadoop.hdfs.protocol.ExtendedBlock in project hadoop by apache.

the class FsDatasetImpl method invalidate.

/**
   * We're informed that a block is no longer valid. Delete it.
   */
// FsDatasetSpi
@Override
public void invalidate(String bpid, Block[] invalidBlks) throws IOException {
    final List<String> errors = new ArrayList<String>();
    for (int i = 0; i < invalidBlks.length; i++) {
        final ReplicaInfo removing;
        final FsVolumeImpl v;
        try (AutoCloseableLock lock = datasetLock.acquire()) {
            final ReplicaInfo info = volumeMap.get(bpid, invalidBlks[i]);
            if (info == null) {
                ReplicaInfo infoByBlockId = volumeMap.get(bpid, invalidBlks[i].getBlockId());
                if (infoByBlockId == null) {
                    // It is okay if the block is not found -- it
                    // may be deleted earlier.
                    LOG.info("Failed to delete replica " + invalidBlks[i] + ": ReplicaInfo not found.");
                } else {
                    errors.add("Failed to delete replica " + invalidBlks[i] + ": GenerationStamp not matched, existing replica is " + Block.toString(infoByBlockId));
                }
                continue;
            }
            v = (FsVolumeImpl) info.getVolume();
            if (v == null) {
                errors.add("Failed to delete replica " + invalidBlks[i] + ". No volume for replica " + info);
                continue;
            }
            try {
                File blockFile = new File(info.getBlockURI());
                if (blockFile != null && blockFile.getParentFile() == null) {
                    errors.add("Failed to delete replica " + invalidBlks[i] + ". Parent not found for block file: " + blockFile);
                    continue;
                }
            } catch (IllegalArgumentException e) {
                LOG.warn("Parent directory check failed; replica " + info + " is not backed by a local file");
            }
            removing = volumeMap.remove(bpid, invalidBlks[i]);
            addDeletingBlock(bpid, removing.getBlockId());
            if (LOG.isDebugEnabled()) {
                LOG.debug("Block file " + removing.getBlockURI() + " is to be deleted");
            }
            if (removing instanceof ReplicaInPipeline) {
                ((ReplicaInPipeline) removing).releaseAllBytesReserved();
            }
        }
        if (v.isTransientStorage()) {
            RamDiskReplica replicaInfo = ramDiskReplicaTracker.getReplica(bpid, invalidBlks[i].getBlockId());
            if (replicaInfo != null) {
                if (!replicaInfo.getIsPersisted()) {
                    datanode.getMetrics().incrRamDiskBlocksDeletedBeforeLazyPersisted();
                }
                ramDiskReplicaTracker.discardReplica(replicaInfo.getBlockPoolId(), replicaInfo.getBlockId(), true);
            }
        }
        // If a DFSClient has the replica in its cache of short-circuit file
        // descriptors (and the client is using ShortCircuitShm), invalidate it.
        datanode.getShortCircuitRegistry().processBlockInvalidation(new ExtendedBlockId(invalidBlks[i].getBlockId(), bpid));
        // If the block is cached, start uncaching it.
        cacheManager.uncacheBlock(bpid, invalidBlks[i].getBlockId());
        // finishes.
        try {
            asyncDiskService.deleteAsync(v.obtainReference(), removing, new ExtendedBlock(bpid, invalidBlks[i]), dataStorage.getTrashDirectoryForReplica(bpid, removing));
        } catch (ClosedChannelException e) {
            LOG.warn("Volume " + v + " is closed, ignore the deletion task for " + "block " + invalidBlks[i]);
        }
    }
    if (!errors.isEmpty()) {
        StringBuilder b = new StringBuilder("Failed to delete ").append(errors.size()).append(" (out of ").append(invalidBlks.length).append(") replica(s):");
        for (int i = 0; i < errors.size(); i++) {
            b.append("\n").append(i).append(") ").append(errors.get(i));
        }
        throw new IOException(b.toString());
    }
}
Also used : ClosedChannelException(java.nio.channels.ClosedChannelException) ReplicaInfo(org.apache.hadoop.hdfs.server.datanode.ReplicaInfo) ExtendedBlockId(org.apache.hadoop.hdfs.ExtendedBlockId) ArrayList(java.util.ArrayList) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) IOException(java.io.IOException) MultipleIOException(org.apache.hadoop.io.MultipleIOException) AutoCloseableLock(org.apache.hadoop.util.AutoCloseableLock) ReplicaInPipeline(org.apache.hadoop.hdfs.server.datanode.ReplicaInPipeline) File(java.io.File) RamDiskReplica(org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.RamDiskReplicaTracker.RamDiskReplica)

Example 25 with ExtendedBlock

use of org.apache.hadoop.hdfs.protocol.ExtendedBlock in project hadoop by apache.

the class FSNamesystem method commitBlockSynchronization.

void commitBlockSynchronization(ExtendedBlock oldBlock, long newgenerationstamp, long newlength, boolean closeFile, boolean deleteblock, DatanodeID[] newtargets, String[] newtargetstorages) throws IOException {
    LOG.info("commitBlockSynchronization(oldBlock=" + oldBlock + ", newgenerationstamp=" + newgenerationstamp + ", newlength=" + newlength + ", newtargets=" + Arrays.asList(newtargets) + ", closeFile=" + closeFile + ", deleteBlock=" + deleteblock + ")");
    checkOperation(OperationCategory.WRITE);
    final String src;
    writeLock();
    boolean copyTruncate = false;
    BlockInfo truncatedBlock = null;
    try {
        checkOperation(OperationCategory.WRITE);
        // If a DN tries to commit to the standby, the recovery will
        // fail, and the next retry will succeed on the new NN.
        checkNameNodeSafeMode("Cannot commitBlockSynchronization while in safe mode");
        final BlockInfo storedBlock = getStoredBlock(ExtendedBlock.getLocalBlock(oldBlock));
        if (storedBlock == null) {
            if (deleteblock) {
                // to locate the block.
                if (LOG.isDebugEnabled()) {
                    LOG.debug("Block (=" + oldBlock + ") not found");
                }
                return;
            } else {
                throw new IOException("Block (=" + oldBlock + ") not found");
            }
        }
        final long oldGenerationStamp = storedBlock.getGenerationStamp();
        final long oldNumBytes = storedBlock.getNumBytes();
        //
        if (storedBlock.isDeleted()) {
            throw new IOException("The blockCollection of " + storedBlock + " is null, likely because the file owning this block was" + " deleted and the block removal is delayed");
        }
        final INodeFile iFile = getBlockCollection(storedBlock);
        src = iFile.getFullPathName();
        if (isFileDeleted(iFile)) {
            throw new FileNotFoundException("File not found: " + src + ", likely due to delayed block removal");
        }
        if ((!iFile.isUnderConstruction() || storedBlock.isComplete()) && iFile.getLastBlock().isComplete()) {
            if (LOG.isDebugEnabled()) {
                LOG.debug("Unexpected block (=" + oldBlock + ") since the file (=" + iFile.getLocalName() + ") is not under construction");
            }
            return;
        }
        truncatedBlock = iFile.getLastBlock();
        final long recoveryId = truncatedBlock.getUnderConstructionFeature().getBlockRecoveryId();
        copyTruncate = truncatedBlock.getBlockId() != storedBlock.getBlockId();
        if (recoveryId != newgenerationstamp) {
            throw new IOException("The recovery id " + newgenerationstamp + " does not match current recovery id " + recoveryId + " for block " + oldBlock);
        }
        if (deleteblock) {
            Block blockToDel = ExtendedBlock.getLocalBlock(oldBlock);
            boolean remove = iFile.removeLastBlock(blockToDel) != null;
            if (remove) {
                blockManager.removeBlock(storedBlock);
            }
        } else {
            // update last block
            if (!copyTruncate) {
                storedBlock.setGenerationStamp(newgenerationstamp);
                storedBlock.setNumBytes(newlength);
            }
            // Find the target DatanodeStorageInfos. If not found because of invalid
            // or empty DatanodeID/StorageID, the slot of same offset in dsInfos is
            // null
            final DatanodeStorageInfo[] dsInfos = blockManager.getDatanodeManager().getDatanodeStorageInfos(newtargets, newtargetstorages, "src=%s, oldBlock=%s, newgenerationstamp=%d, newlength=%d", src, oldBlock, newgenerationstamp, newlength);
            if (closeFile && dsInfos != null) {
                // blocksReceived from Datanodes take a long time to arrive.
                for (int i = 0; i < dsInfos.length; i++) {
                    if (dsInfos[i] != null) {
                        if (copyTruncate) {
                            dsInfos[i].addBlock(truncatedBlock, truncatedBlock);
                        } else {
                            Block bi = new Block(storedBlock);
                            if (storedBlock.isStriped()) {
                                bi.setBlockId(bi.getBlockId() + i);
                            }
                            dsInfos[i].addBlock(storedBlock, bi);
                        }
                    }
                }
            }
            // add pipeline locations into the INodeUnderConstruction
            if (copyTruncate) {
                iFile.convertLastBlockToUC(truncatedBlock, dsInfos);
            } else {
                iFile.convertLastBlockToUC(storedBlock, dsInfos);
                if (closeFile) {
                    blockManager.markBlockReplicasAsCorrupt(oldBlock.getLocalBlock(), storedBlock, oldGenerationStamp, oldNumBytes, dsInfos);
                }
            }
        }
        if (closeFile) {
            if (copyTruncate) {
                closeFileCommitBlocks(src, iFile, truncatedBlock);
                if (!iFile.isBlockInLatestSnapshot(storedBlock)) {
                    blockManager.removeBlock(storedBlock);
                }
            } else {
                closeFileCommitBlocks(src, iFile, storedBlock);
            }
        } else {
            // If this commit does not want to close the file, persist blocks
            FSDirWriteFileOp.persistBlocks(dir, src, iFile, false);
        }
    } finally {
        writeUnlock("commitBlockSynchronization");
    }
    getEditLog().logSync();
    if (closeFile) {
        LOG.info("commitBlockSynchronization(oldBlock=" + oldBlock + ", file=" + src + (copyTruncate ? ", newBlock=" + truncatedBlock : ", newgenerationstamp=" + newgenerationstamp) + ", newlength=" + newlength + ", newtargets=" + Arrays.asList(newtargets) + ") successful");
    } else {
        LOG.info("commitBlockSynchronization(" + oldBlock + ") successful");
    }
}
Also used : DatanodeStorageInfo(org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo) BlockInfo(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo) FileNotFoundException(java.io.FileNotFoundException) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) Block(org.apache.hadoop.hdfs.protocol.Block) IOException(java.io.IOException)

Aggregations

ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)208 Test (org.junit.Test)124 Path (org.apache.hadoop.fs.Path)91 Configuration (org.apache.hadoop.conf.Configuration)71 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)63 FileSystem (org.apache.hadoop.fs.FileSystem)62 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)55 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)53 IOException (java.io.IOException)41 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)41 Block (org.apache.hadoop.hdfs.protocol.Block)38 DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)34 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)32 File (java.io.File)22 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)20 LocatedBlocks (org.apache.hadoop.hdfs.protocol.LocatedBlocks)20 DatanodeID (org.apache.hadoop.hdfs.protocol.DatanodeID)18 FSNamesystem (org.apache.hadoop.hdfs.server.namenode.FSNamesystem)18 InetSocketAddress (java.net.InetSocketAddress)17 ArrayList (java.util.ArrayList)17