Search in sources :

Example 81 with Random

use of java.util.Random in project hadoop by apache.

the class TestGetBlocks method testBlockKey.

@Test
public void testBlockKey() {
    Map<Block, Long> map = new HashMap<Block, Long>();
    final Random RAN = new Random();
    final long seed = RAN.nextLong();
    System.out.println("seed=" + seed);
    RAN.setSeed(seed);
    long[] blkids = new long[10];
    for (int i = 0; i < blkids.length; i++) {
        blkids[i] = 1000L + RAN.nextInt(100000);
        map.put(new Block(blkids[i], 0, blkids[i]), blkids[i]);
    }
    System.out.println("map=" + map.toString().replace(",", "\n  "));
    for (int i = 0; i < blkids.length; i++) {
        Block b = new Block(blkids[i], 0, HdfsConstants.GRANDFATHER_GENERATION_STAMP);
        Long v = map.get(b);
        System.out.println(b + " => " + v);
        assertEquals(blkids[i], v.longValue());
    }
}
Also used : Random(java.util.Random) HashMap(java.util.HashMap) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) Block(org.apache.hadoop.hdfs.protocol.Block) Test(org.junit.Test)

Example 82 with Random

use of java.util.Random in project hadoop by apache.

the class TestJournalNodeSync method testRandomJournalMissingLogs.

// Test JournalNode Sync by randomly deleting edit logs from one or two of
// the journals.
@Test(timeout = 60000)
public void testRandomJournalMissingLogs() throws Exception {
    Random randomJournal = new Random();
    List<File> journalCurrentDirs = Lists.newArrayList();
    for (int i = 0; i < 3; i++) {
        journalCurrentDirs.add(new StorageDirectory(jCluster.getJournalDir(i, jid)).getCurrentDir());
    }
    int count = 0;
    long lastStartTxId;
    int journalIndex;
    List<File> missingLogs = Lists.newArrayList();
    while (count < 5) {
        lastStartTxId = generateEditLog();
        // Delete the last edit log segment from randomly selected journal node
        journalIndex = randomJournal.nextInt(3);
        missingLogs.add(deleteEditLog(journalCurrentDirs.get(journalIndex), lastStartTxId));
        // Delete the last edit log segment from two journals for some logs
        if (count % 2 == 0) {
            journalIndex = (journalIndex + 1) % 3;
            missingLogs.add(deleteEditLog(journalCurrentDirs.get(journalIndex), lastStartTxId));
        }
        count++;
    }
    GenericTestUtils.waitFor(editLogExists(missingLogs), 500, 30000);
}
Also used : Random(java.util.Random) StorageDirectory(org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory) File(java.io.File) FileJournalManager.getLogFile(org.apache.hadoop.hdfs.server.namenode.FileJournalManager.getLogFile) EditLogFile(org.apache.hadoop.hdfs.server.namenode.FileJournalManager.EditLogFile) Test(org.junit.Test)

Example 83 with Random

use of java.util.Random in project hadoop by apache.

the class TestBlockListAsLongs method testFuzz.

@Test
public void testFuzz() throws InterruptedException {
    Replica[] replicas = new Replica[100000];
    Random rand = new Random(0);
    for (int i = 0; i < replicas.length; i++) {
        Block b = new Block(rand.nextLong(), i, i << 4);
        switch(rand.nextInt(2)) {
            case 0:
                replicas[i] = new FinalizedReplica(b, null, null);
                break;
            case 1:
                replicas[i] = new ReplicaBeingWritten(b, null, null, null);
                break;
            case 2:
                replicas[i] = new ReplicaWaitingToBeRecovered(b, null, null);
                break;
        }
    }
    checkReport(replicas);
}
Also used : ReplicaWaitingToBeRecovered(org.apache.hadoop.hdfs.server.datanode.ReplicaWaitingToBeRecovered) ReplicaBeingWritten(org.apache.hadoop.hdfs.server.datanode.ReplicaBeingWritten) Random(java.util.Random) FinalizedReplica(org.apache.hadoop.hdfs.server.datanode.FinalizedReplica) BlockReportReplica(org.apache.hadoop.hdfs.protocol.BlockListAsLongs.BlockReportReplica) Replica(org.apache.hadoop.hdfs.server.datanode.Replica) FinalizedReplica(org.apache.hadoop.hdfs.server.datanode.FinalizedReplica) Test(org.junit.Test)

Example 84 with Random

use of java.util.Random in project hadoop by apache.

the class DiskBalancerTestUtil method newImbalancedCluster.

public static MiniDFSCluster newImbalancedCluster(final Configuration conf, final int numDatanodes, final long[] storageCapacities, final int defaultBlockSize, final int fileLen, final StartupOption dnOption) throws IOException, InterruptedException, TimeoutException {
    conf.setBoolean(DFSConfigKeys.DFS_DISK_BALANCER_ENABLED, true);
    conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, defaultBlockSize);
    conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, defaultBlockSize);
    conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1L);
    final String fileName = "/" + UUID.randomUUID().toString();
    final Path filePath = new Path(fileName);
    Preconditions.checkNotNull(storageCapacities);
    Preconditions.checkArgument(storageCapacities.length == 2, "need to specify capacities for two storages.");
    // Write a file and restart the cluster
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).storageCapacities(storageCapacities).storageTypes(new StorageType[] { StorageType.DISK, StorageType.DISK }).storagesPerDatanode(2).dnStartupOption(dnOption).build();
    FsVolumeImpl source = null;
    FsVolumeImpl dest = null;
    cluster.waitActive();
    Random r = new Random();
    FileSystem fs = cluster.getFileSystem(0);
    TestBalancer.createFile(cluster, filePath, fileLen, (short) 1, 0);
    DFSTestUtil.waitReplication(fs, filePath, (short) 1);
    cluster.restartDataNodes();
    cluster.waitActive();
    // Get the data node and move all data to one disk.
    for (int i = 0; i < numDatanodes; i++) {
        DataNode dnNode = cluster.getDataNodes().get(i);
        try (FsDatasetSpi.FsVolumeReferences refs = dnNode.getFSDataset().getFsVolumeReferences()) {
            source = (FsVolumeImpl) refs.get(0);
            dest = (FsVolumeImpl) refs.get(1);
            assertTrue(DiskBalancerTestUtil.getBlockCount(source) > 0);
            DiskBalancerTestUtil.moveAllDataToDestVolume(dnNode.getFSDataset(), source, dest);
            assertTrue(DiskBalancerTestUtil.getBlockCount(source) == 0);
        }
    }
    cluster.restartDataNodes();
    cluster.waitActive();
    return cluster;
}
Also used : Path(org.apache.hadoop.fs.Path) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) StorageType(org.apache.hadoop.fs.StorageType) Random(java.util.Random) FsVolumeImpl(org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsVolumeImpl) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) DiskBalancerDataNode(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerDataNode) FileSystem(org.apache.hadoop.fs.FileSystem) FsDatasetSpi(org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi)

Example 85 with Random

use of java.util.Random in project hadoop by apache.

the class TestDiskBalancerRPC method testMoveBlockAcrossVolume.

@Test
public void testMoveBlockAcrossVolume() throws Exception {
    Configuration conf = new HdfsConfiguration();
    final int defaultBlockSize = 100;
    conf.setBoolean(DFSConfigKeys.DFS_DISK_BALANCER_ENABLED, true);
    conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, defaultBlockSize);
    conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, defaultBlockSize);
    String fileName = "/tmp.txt";
    Path filePath = new Path(fileName);
    final int numDatanodes = 1;
    final int dnIndex = 0;
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).build();
    FsVolumeImpl source = null;
    FsVolumeImpl dest = null;
    try {
        cluster.waitActive();
        Random r = new Random();
        FileSystem fs = cluster.getFileSystem(dnIndex);
        DFSTestUtil.createFile(fs, filePath, 10 * 1024, (short) 1, r.nextLong());
        DataNode dnNode = cluster.getDataNodes().get(dnIndex);
        FsDatasetSpi.FsVolumeReferences refs = dnNode.getFSDataset().getFsVolumeReferences();
        try {
            source = (FsVolumeImpl) refs.get(0);
            dest = (FsVolumeImpl) refs.get(1);
            DiskBalancerTestUtil.moveAllDataToDestVolume(dnNode.getFSDataset(), source, dest);
            assertTrue(DiskBalancerTestUtil.getBlockCount(source) == 0);
        } finally {
            refs.close();
        }
    } finally {
        cluster.shutdown();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) FsDatasetSpi(org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) Random(java.util.Random) FsVolumeImpl(org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsVolumeImpl) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) DiskBalancerDataNode(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerDataNode) FileSystem(org.apache.hadoop.fs.FileSystem) Test(org.junit.Test)

Aggregations

Random (java.util.Random)4728 Test (org.junit.Test)1273 ArrayList (java.util.ArrayList)602 IOException (java.io.IOException)313 HashMap (java.util.HashMap)242 File (java.io.File)209 List (java.util.List)154 AtomicInteger (java.util.concurrent.atomic.AtomicInteger)151 ByteArrayInputStream (java.io.ByteArrayInputStream)134 HashSet (java.util.HashSet)129 ByteBuffer (java.nio.ByteBuffer)123 Test (org.testng.annotations.Test)121 Path (org.apache.hadoop.fs.Path)116 Map (java.util.Map)106 QuickTest (com.hazelcast.test.annotation.QuickTest)99 ParallelTest (com.hazelcast.test.annotation.ParallelTest)94 CountDownLatch (java.util.concurrent.CountDownLatch)93 Configuration (org.apache.hadoop.conf.Configuration)88 ByteArrayOutputStream (java.io.ByteArrayOutputStream)79 Before (org.junit.Before)78