use of java.util.Random in project hadoop by apache.
the class TestPread method datanodeRestartTest.
// test pread can survive datanode restarts
private void datanodeRestartTest(MiniDFSCluster cluster, FileSystem fileSys, Path name) throws IOException {
// don't survive datanode restarts.
if (simulatedStorage) {
return;
}
int numBlocks = 1;
assertTrue(numBlocks <= HdfsClientConfigKeys.DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_DEFAULT);
byte[] expected = new byte[numBlocks * blockSize];
Random rand = new Random(seed);
rand.nextBytes(expected);
byte[] actual = new byte[numBlocks * blockSize];
FSDataInputStream stm = fileSys.open(name);
// read a block and get block locations cached as a result
stm.readFully(0, actual);
checkAndEraseData(actual, 0, expected, "Pread Datanode Restart Setup");
// restart all datanodes. it is expected that they will
// restart on different ports, hence, cached block locations
// will no longer work.
assertTrue(cluster.restartDataNodes());
cluster.waitActive();
// verify the block can be read again using the same InputStream
// (via re-fetching of block locations from namenode). there is a
// 3 sec sleep in chooseDataNode(), which can be shortened for
// this test if configurable.
stm.readFully(0, actual);
checkAndEraseData(actual, 0, expected, "Pread Datanode Restart Test");
}
use of java.util.Random in project hadoop by apache.
the class TestCachedBlocksList method testMultipleLists.
@Test(timeout = 60000)
public void testMultipleLists() {
DatanodeDescriptor[] datanodes = new DatanodeDescriptor[] { new DatanodeDescriptor(new DatanodeID("127.0.0.1", "localhost", "abcd", 5000, 5001, 5002, 5003)), new DatanodeDescriptor(new DatanodeID("127.0.1.1", "localhost", "efgh", 6000, 6001, 6002, 6003)) };
CachedBlocksList[] lists = new CachedBlocksList[] { datanodes[0].getPendingCached(), datanodes[0].getCached(), datanodes[1].getPendingCached(), datanodes[1].getCached(), datanodes[1].getPendingUncached() };
final int NUM_BLOCKS = 8000;
CachedBlock[] blocks = new CachedBlock[NUM_BLOCKS];
for (int i = 0; i < NUM_BLOCKS; i++) {
blocks[i] = new CachedBlock(i, (short) i, true);
}
Random r = new Random(654);
for (CachedBlocksList list : lists) {
testAddElementsToList(list, blocks);
}
for (CachedBlocksList list : lists) {
testRemoveElementsFromList(r, list, blocks);
}
}
use of java.util.Random in project hadoop by apache.
the class TestBlockTokenWithDFS method generateBytes.
public static byte[] generateBytes(int fileSize) {
Random r = new Random();
byte[] rawData = new byte[fileSize];
r.nextBytes(rawData);
return rawData;
}
use of java.util.Random in project hadoop by apache.
the class TestEditLog method testFuzzSequences.
/**
* "Fuzz" test for the edit log.
*
* This tests that we can read random garbage from the edit log without
* crashing the JVM or throwing an unchecked exception.
*/
@Test
public void testFuzzSequences() throws IOException {
final int MAX_GARBAGE_LENGTH = 512;
final int MAX_INVALID_SEQ = 5000;
// The seed to use for our random number generator. When given the same
// seed, Java.util.Random will always produce the same sequence of values.
// This is important because it means that the test is deterministic and
// repeatable on any machine.
final int RANDOM_SEED = 123;
Random r = new Random(RANDOM_SEED);
for (int i = 0; i < MAX_INVALID_SEQ; i++) {
byte[] garbage = new byte[r.nextInt(MAX_GARBAGE_LENGTH)];
r.nextBytes(garbage);
validateNoCrash(garbage);
}
}
use of java.util.Random in project hadoop by apache.
the class TestCodec method testSplitableCodec.
private void testSplitableCodec(Class<? extends SplittableCompressionCodec> codecClass) throws IOException {
final long DEFLBYTES = 2 * 1024 * 1024;
final Configuration conf = new Configuration();
final Random rand = new Random();
final long seed = rand.nextLong();
LOG.info("seed: " + seed);
rand.setSeed(seed);
SplittableCompressionCodec codec = ReflectionUtils.newInstance(codecClass, conf);
final FileSystem fs = FileSystem.getLocal(conf);
final FileStatus infile = fs.getFileStatus(writeSplitTestFile(fs, rand, codec, DEFLBYTES));
if (infile.getLen() > Integer.MAX_VALUE) {
fail("Unexpected compression: " + DEFLBYTES + " -> " + infile.getLen());
}
final int flen = (int) infile.getLen();
final Text line = new Text();
final Decompressor dcmp = CodecPool.getDecompressor(codec);
try {
for (int pos = 0; pos < infile.getLen(); pos += rand.nextInt(flen / 8)) {
// read from random positions, verifying that there exist two sequential
// lines as written in writeSplitTestFile
final SplitCompressionInputStream in = codec.createInputStream(fs.open(infile.getPath()), dcmp, pos, flen, SplittableCompressionCodec.READ_MODE.BYBLOCK);
if (in.getAdjustedStart() >= flen) {
break;
}
LOG.info("SAMPLE " + in.getAdjustedStart() + "," + in.getAdjustedEnd());
final LineReader lreader = new LineReader(in);
// ignore; likely partial
lreader.readLine(line);
if (in.getPos() >= flen) {
break;
}
lreader.readLine(line);
final int seq1 = readLeadingInt(line);
lreader.readLine(line);
if (in.getPos() >= flen) {
break;
}
final int seq2 = readLeadingInt(line);
assertEquals("Mismatched lines", seq1 + 1, seq2);
}
} finally {
CodecPool.returnDecompressor(dcmp);
}
// remove on success
fs.delete(infile.getPath().getParent(), true);
}
Aggregations