Search in sources :

Example 71 with Random

use of java.util.Random in project hadoop by apache.

the class TestBlockReaderRemote method testSkip.

@Test(timeout = 60000)
public void testSkip() throws IOException {
    Random random = new Random();
    byte[] buf = new byte[1];
    for (int pos = 0; pos < blockData.length; ) {
        long skip = random.nextInt(100) + 1;
        long skipped = reader.skip(skip);
        if (pos + skip >= blockData.length) {
            assertEquals(blockData.length, pos + skipped);
            break;
        } else {
            assertEquals(skip, skipped);
            pos += skipped;
            assertEquals(1, reader.read(buf, 0, 1));
            assertEquals(blockData[pos], buf[0]);
            pos += 1;
        }
    }
}
Also used : Random(java.util.Random) Test(org.junit.Test)

Example 72 with Random

use of java.util.Random in project hadoop by apache.

the class TestSetTimes method writeFile.

private FSDataOutputStream writeFile(FileSystem fileSys, Path name, int repl) throws IOException {
    FSDataOutputStream stm = fileSys.create(name, true, fileSys.getConf().getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096), (short) repl, blockSize);
    byte[] buffer = new byte[fileSize];
    Random rand = new Random(seed);
    rand.nextBytes(buffer);
    stm.write(buffer);
    return stm;
}
Also used : Random(java.util.Random) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream)

Example 73 with Random

use of java.util.Random in project hadoop by apache.

the class TestSmallBlock method checkFile.

private void checkFile(DistributedFileSystem fileSys, Path name) throws IOException {
    BlockLocation[] locations = fileSys.getFileBlockLocations(fileSys.getFileStatus(name), 0, fileSize);
    assertEquals("Number of blocks", fileSize, locations.length);
    FSDataInputStream stm = fileSys.open(name);
    byte[] expected = new byte[fileSize];
    if (simulatedStorage) {
        LocatedBlocks lbs = fileSys.getClient().getLocatedBlocks(name.toString(), 0, fileSize);
        DFSTestUtil.fillExpectedBuf(lbs, expected);
    } else {
        Random rand = new Random(seed);
        rand.nextBytes(expected);
    }
    // do a sanity check. Read the file
    byte[] actual = new byte[fileSize];
    stm.readFully(0, actual);
    checkAndEraseData(actual, 0, expected, "Read Sanity Test");
    stm.close();
}
Also used : Random(java.util.Random) LocatedBlocks(org.apache.hadoop.hdfs.protocol.LocatedBlocks) FSDataInputStream(org.apache.hadoop.fs.FSDataInputStream) BlockLocation(org.apache.hadoop.fs.BlockLocation)

Example 74 with Random

use of java.util.Random in project hadoop by apache.

the class TestAvailableSpaceVolumeChoosingPolicy method doRandomizedTest.

/*
   * Ensure that we randomly select the lesser-used volumes with appropriate
   * frequency.
   */
public void doRandomizedTest(float preferencePercent, int lowSpaceVolumes, int highSpaceVolumes) throws Exception {
    Random random = new Random(123L);
    final AvailableSpaceVolumeChoosingPolicy<FsVolumeSpi> policy = new AvailableSpaceVolumeChoosingPolicy<FsVolumeSpi>(random);
    List<FsVolumeSpi> volumes = new ArrayList<FsVolumeSpi>();
    // Volumes with 1MB free space
    for (int i = 0; i < lowSpaceVolumes; i++) {
        FsVolumeSpi volume = Mockito.mock(FsVolumeSpi.class);
        Mockito.when(volume.getAvailable()).thenReturn(1024L * 1024L);
        volumes.add(volume);
    }
    // Volumes with 3MB free space
    for (int i = 0; i < highSpaceVolumes; i++) {
        FsVolumeSpi volume = Mockito.mock(FsVolumeSpi.class);
        Mockito.when(volume.getAvailable()).thenReturn(1024L * 1024L * 3);
        volumes.add(volume);
    }
    initPolicy(policy, preferencePercent);
    long lowAvailableSpaceVolumeSelected = 0;
    long highAvailableSpaceVolumeSelected = 0;
    for (int i = 0; i < RANDOMIZED_ITERATIONS; i++) {
        FsVolumeSpi volume = policy.chooseVolume(volumes, 100);
        for (int j = 0; j < volumes.size(); j++) {
            // Note how many times the first low available volume was selected
            if (volume == volumes.get(j) && j == 0) {
                lowAvailableSpaceVolumeSelected++;
            }
            // Note how many times the first high available volume was selected
            if (volume == volumes.get(j) && j == lowSpaceVolumes) {
                highAvailableSpaceVolumeSelected++;
                break;
            }
        }
    }
    // Calculate the expected ratio of how often low available space volumes
    // were selected vs. high available space volumes.
    float expectedSelectionRatio = preferencePercent / (1 - preferencePercent);
    GenericTestUtils.assertValueNear((long) (lowAvailableSpaceVolumeSelected * expectedSelectionRatio), highAvailableSpaceVolumeSelected, RANDOMIZED_ALLOWED_ERROR);
}
Also used : Random(java.util.Random) ArrayList(java.util.ArrayList)

Example 75 with Random

use of java.util.Random in project hadoop by apache.

the class TestOfflineImageViewer method createOriginalFSImage.

// Create a populated namespace for later testing. Save its contents to a
// data structure and store its fsimage location.
// We only want to generate the fsimage file once and use it for
// multiple tests.
@BeforeClass
public static void createOriginalFSImage() throws IOException {
    tempDir = Files.createTempDir();
    MiniDFSCluster cluster = null;
    try {
        final ErasureCodingPolicy ecPolicy = ErasureCodingPolicyManager.getPolicyByID(HdfsConstants.XOR_2_1_POLICY_ID);
        Configuration conf = new Configuration();
        conf.setLong(DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_MAX_LIFETIME_KEY, 10000);
        conf.setLong(DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_RENEW_INTERVAL_KEY, 5000);
        conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true);
        conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
        conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTH_TO_LOCAL, "RULE:[2:$1@$0](JobTracker@.*FOO.COM)s/@.*//" + "DEFAULT");
        conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY, ecPolicy.getName());
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
        cluster.waitActive();
        DistributedFileSystem hdfs = cluster.getFileSystem();
        // Create a reasonable namespace
        for (int i = 0; i < NUM_DIRS; i++, dirCount++) {
            Path dir = new Path("/dir" + i);
            hdfs.mkdirs(dir);
            writtenFiles.put(dir.toString(), pathToFileEntry(hdfs, dir.toString()));
            for (int j = 0; j < FILES_PER_DIR; j++) {
                Path file = new Path(dir, "file" + j);
                FSDataOutputStream o = hdfs.create(file);
                o.write(23);
                o.close();
                writtenFiles.put(file.toString(), pathToFileEntry(hdfs, file.toString()));
            }
        }
        // Create an empty directory
        Path emptydir = new Path("/emptydir");
        hdfs.mkdirs(emptydir);
        dirCount++;
        writtenFiles.put(emptydir.toString(), hdfs.getFileStatus(emptydir));
        //Create a directory whose name should be escaped in XML
        Path invalidXMLDir = new Path("/dirContainingInvalidXMLCharhere");
        hdfs.mkdirs(invalidXMLDir);
        dirCount++;
        //Create a directory with sticky bits
        Path stickyBitDir = new Path("/stickyBit");
        hdfs.mkdirs(stickyBitDir);
        hdfs.setPermission(stickyBitDir, new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL, true));
        dirCount++;
        writtenFiles.put(stickyBitDir.toString(), hdfs.getFileStatus(stickyBitDir));
        // Get delegation tokens so we log the delegation token op
        Token<?>[] delegationTokens = hdfs.addDelegationTokens(TEST_RENEWER, null);
        for (Token<?> t : delegationTokens) {
            LOG.debug("got token " + t);
        }
        // Create INodeReference
        final Path src = new Path("/src");
        hdfs.mkdirs(src);
        dirCount++;
        writtenFiles.put(src.toString(), hdfs.getFileStatus(src));
        // Create snapshot and snapshotDiff.
        final Path orig = new Path("/src/orig");
        hdfs.mkdirs(orig);
        final Path file1 = new Path("/src/file");
        FSDataOutputStream o = hdfs.create(file1);
        o.write(23);
        o.write(45);
        o.close();
        hdfs.allowSnapshot(src);
        hdfs.createSnapshot(src, "snapshot");
        final Path dst = new Path("/dst");
        // Rename a directory in the snapshot directory to add snapshotCopy
        // field to the dirDiff entry.
        hdfs.rename(orig, dst);
        dirCount++;
        writtenFiles.put(dst.toString(), hdfs.getFileStatus(dst));
        // Truncate a file in the snapshot directory to add snapshotCopy and
        // blocks fields to the fileDiff entry.
        hdfs.truncate(file1, 1);
        writtenFiles.put(file1.toString(), hdfs.getFileStatus(file1));
        // Set XAttrs so the fsimage contains XAttr ops
        final Path xattr = new Path("/xattr");
        hdfs.mkdirs(xattr);
        dirCount++;
        hdfs.setXAttr(xattr, "user.a1", new byte[] { 0x31, 0x32, 0x33 });
        hdfs.setXAttr(xattr, "user.a2", new byte[] { 0x37, 0x38, 0x39 });
        // OIV should be able to handle empty value XAttrs
        hdfs.setXAttr(xattr, "user.a3", null);
        // OIV should be able to handle XAttr values that can't be expressed
        // as UTF8
        hdfs.setXAttr(xattr, "user.a4", new byte[] { -0x3d, 0x28 });
        writtenFiles.put(xattr.toString(), hdfs.getFileStatus(xattr));
        // Set ACLs
        hdfs.setAcl(xattr, Lists.newArrayList(aclEntry(ACCESS, USER, ALL), aclEntry(ACCESS, USER, "foo", ALL), aclEntry(ACCESS, GROUP, READ_EXECUTE), aclEntry(ACCESS, GROUP, "bar", READ_EXECUTE), aclEntry(ACCESS, OTHER, EXECUTE)));
        // Create an Erasure Coded dir
        Path ecDir = new Path("/ec");
        hdfs.mkdirs(ecDir);
        dirCount++;
        hdfs.getClient().setErasureCodingPolicy(ecDir.toString(), ecPolicy.getName());
        writtenFiles.put(ecDir.toString(), hdfs.getFileStatus(ecDir));
        // Create an empty Erasure Coded file
        Path emptyECFile = new Path(ecDir, "EmptyECFile.txt");
        hdfs.create(emptyECFile).close();
        writtenFiles.put(emptyECFile.toString(), pathToFileEntry(hdfs, emptyECFile.toString()));
        filesECCount++;
        // Create a small Erasure Coded file
        Path smallECFile = new Path(ecDir, "SmallECFile.txt");
        FSDataOutputStream out = hdfs.create(smallECFile);
        Random r = new Random();
        byte[] bytes = new byte[1024 * 10];
        r.nextBytes(bytes);
        out.write(bytes);
        writtenFiles.put(smallECFile.toString(), pathToFileEntry(hdfs, smallECFile.toString()));
        filesECCount++;
        // Write results to the fsimage file
        hdfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER, false);
        hdfs.saveNamespace();
        hdfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE, false);
        // Determine location of fsimage file
        originalFsimage = FSImageTestUtil.findLatestImageFile(FSImageTestUtil.getFSImage(cluster.getNameNode()).getStorage().getStorageDir(0));
        if (originalFsimage == null) {
            throw new RuntimeException("Didn't generate or can't find fsimage");
        }
        LOG.debug("original FS image file is " + originalFsimage);
    } finally {
        if (cluster != null)
            cluster.shutdown();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) ErasureCodingPolicy(org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy) Token(org.apache.hadoop.security.token.Token) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) Random(java.util.Random) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) FsPermission(org.apache.hadoop.fs.permission.FsPermission) BeforeClass(org.junit.BeforeClass)

Aggregations

Random (java.util.Random)4728 Test (org.junit.Test)1273 ArrayList (java.util.ArrayList)602 IOException (java.io.IOException)313 HashMap (java.util.HashMap)242 File (java.io.File)209 List (java.util.List)154 AtomicInteger (java.util.concurrent.atomic.AtomicInteger)151 ByteArrayInputStream (java.io.ByteArrayInputStream)134 HashSet (java.util.HashSet)129 ByteBuffer (java.nio.ByteBuffer)123 Test (org.testng.annotations.Test)121 Path (org.apache.hadoop.fs.Path)116 Map (java.util.Map)106 QuickTest (com.hazelcast.test.annotation.QuickTest)99 ParallelTest (com.hazelcast.test.annotation.ParallelTest)94 CountDownLatch (java.util.concurrent.CountDownLatch)93 Configuration (org.apache.hadoop.conf.Configuration)88 ByteArrayOutputStream (java.io.ByteArrayOutputStream)79 Before (org.junit.Before)78