use of java.util.Random in project hadoop by apache.
the class TestGetBlocks method testBlockKey.
@Test
public void testBlockKey() {
Map<Block, Long> map = new HashMap<Block, Long>();
final Random RAN = new Random();
final long seed = RAN.nextLong();
System.out.println("seed=" + seed);
RAN.setSeed(seed);
long[] blkids = new long[10];
for (int i = 0; i < blkids.length; i++) {
blkids[i] = 1000L + RAN.nextInt(100000);
map.put(new Block(blkids[i], 0, blkids[i]), blkids[i]);
}
System.out.println("map=" + map.toString().replace(",", "\n "));
for (int i = 0; i < blkids.length; i++) {
Block b = new Block(blkids[i], 0, HdfsConstants.GRANDFATHER_GENERATION_STAMP);
Long v = map.get(b);
System.out.println(b + " => " + v);
assertEquals(blkids[i], v.longValue());
}
}
use of java.util.Random in project hadoop by apache.
the class TestJournalNodeSync method testRandomJournalMissingLogs.
// Test JournalNode Sync by randomly deleting edit logs from one or two of
// the journals.
@Test(timeout = 60000)
public void testRandomJournalMissingLogs() throws Exception {
Random randomJournal = new Random();
List<File> journalCurrentDirs = Lists.newArrayList();
for (int i = 0; i < 3; i++) {
journalCurrentDirs.add(new StorageDirectory(jCluster.getJournalDir(i, jid)).getCurrentDir());
}
int count = 0;
long lastStartTxId;
int journalIndex;
List<File> missingLogs = Lists.newArrayList();
while (count < 5) {
lastStartTxId = generateEditLog();
// Delete the last edit log segment from randomly selected journal node
journalIndex = randomJournal.nextInt(3);
missingLogs.add(deleteEditLog(journalCurrentDirs.get(journalIndex), lastStartTxId));
// Delete the last edit log segment from two journals for some logs
if (count % 2 == 0) {
journalIndex = (journalIndex + 1) % 3;
missingLogs.add(deleteEditLog(journalCurrentDirs.get(journalIndex), lastStartTxId));
}
count++;
}
GenericTestUtils.waitFor(editLogExists(missingLogs), 500, 30000);
}
use of java.util.Random in project hadoop by apache.
the class TestBlockListAsLongs method testFuzz.
@Test
public void testFuzz() throws InterruptedException {
Replica[] replicas = new Replica[100000];
Random rand = new Random(0);
for (int i = 0; i < replicas.length; i++) {
Block b = new Block(rand.nextLong(), i, i << 4);
switch(rand.nextInt(2)) {
case 0:
replicas[i] = new FinalizedReplica(b, null, null);
break;
case 1:
replicas[i] = new ReplicaBeingWritten(b, null, null, null);
break;
case 2:
replicas[i] = new ReplicaWaitingToBeRecovered(b, null, null);
break;
}
}
checkReport(replicas);
}
use of java.util.Random in project hadoop by apache.
the class DiskBalancerTestUtil method newImbalancedCluster.
public static MiniDFSCluster newImbalancedCluster(final Configuration conf, final int numDatanodes, final long[] storageCapacities, final int defaultBlockSize, final int fileLen, final StartupOption dnOption) throws IOException, InterruptedException, TimeoutException {
conf.setBoolean(DFSConfigKeys.DFS_DISK_BALANCER_ENABLED, true);
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, defaultBlockSize);
conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, defaultBlockSize);
conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1L);
final String fileName = "/" + UUID.randomUUID().toString();
final Path filePath = new Path(fileName);
Preconditions.checkNotNull(storageCapacities);
Preconditions.checkArgument(storageCapacities.length == 2, "need to specify capacities for two storages.");
// Write a file and restart the cluster
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).storageCapacities(storageCapacities).storageTypes(new StorageType[] { StorageType.DISK, StorageType.DISK }).storagesPerDatanode(2).dnStartupOption(dnOption).build();
FsVolumeImpl source = null;
FsVolumeImpl dest = null;
cluster.waitActive();
Random r = new Random();
FileSystem fs = cluster.getFileSystem(0);
TestBalancer.createFile(cluster, filePath, fileLen, (short) 1, 0);
DFSTestUtil.waitReplication(fs, filePath, (short) 1);
cluster.restartDataNodes();
cluster.waitActive();
// Get the data node and move all data to one disk.
for (int i = 0; i < numDatanodes; i++) {
DataNode dnNode = cluster.getDataNodes().get(i);
try (FsDatasetSpi.FsVolumeReferences refs = dnNode.getFSDataset().getFsVolumeReferences()) {
source = (FsVolumeImpl) refs.get(0);
dest = (FsVolumeImpl) refs.get(1);
assertTrue(DiskBalancerTestUtil.getBlockCount(source) > 0);
DiskBalancerTestUtil.moveAllDataToDestVolume(dnNode.getFSDataset(), source, dest);
assertTrue(DiskBalancerTestUtil.getBlockCount(source) == 0);
}
}
cluster.restartDataNodes();
cluster.waitActive();
return cluster;
}
use of java.util.Random in project hadoop by apache.
the class TestDiskBalancerRPC method testMoveBlockAcrossVolume.
@Test
public void testMoveBlockAcrossVolume() throws Exception {
Configuration conf = new HdfsConfiguration();
final int defaultBlockSize = 100;
conf.setBoolean(DFSConfigKeys.DFS_DISK_BALANCER_ENABLED, true);
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, defaultBlockSize);
conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, defaultBlockSize);
String fileName = "/tmp.txt";
Path filePath = new Path(fileName);
final int numDatanodes = 1;
final int dnIndex = 0;
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).build();
FsVolumeImpl source = null;
FsVolumeImpl dest = null;
try {
cluster.waitActive();
Random r = new Random();
FileSystem fs = cluster.getFileSystem(dnIndex);
DFSTestUtil.createFile(fs, filePath, 10 * 1024, (short) 1, r.nextLong());
DataNode dnNode = cluster.getDataNodes().get(dnIndex);
FsDatasetSpi.FsVolumeReferences refs = dnNode.getFSDataset().getFsVolumeReferences();
try {
source = (FsVolumeImpl) refs.get(0);
dest = (FsVolumeImpl) refs.get(1);
DiskBalancerTestUtil.moveAllDataToDestVolume(dnNode.getFSDataset(), source, dest);
assertTrue(DiskBalancerTestUtil.getBlockCount(source) == 0);
} finally {
refs.close();
}
} finally {
cluster.shutdown();
}
}
Aggregations