use of java.util.Random in project flink by apache.
the class BlobRecoveryITCase method testBlobServerRecovery.
public static void testBlobServerRecovery(final Configuration config) throws IOException {
final String clusterId = config.getString(HighAvailabilityOptions.HA_CLUSTER_ID);
String storagePath = config.getString(HighAvailabilityOptions.HA_STORAGE_PATH) + "/" + clusterId;
Random rand = new Random();
BlobServer[] server = new BlobServer[2];
InetSocketAddress[] serverAddress = new InetSocketAddress[2];
BlobClient client = null;
try {
for (int i = 0; i < server.length; i++) {
server[i] = new BlobServer(config);
serverAddress[i] = new InetSocketAddress("localhost", server[i].getPort());
}
client = new BlobClient(serverAddress[0], config);
// Random data
byte[] expected = new byte[1024];
rand.nextBytes(expected);
BlobKey[] keys = new BlobKey[2];
// Put data
// Request 1
keys[0] = client.put(expected);
// Request 2
keys[1] = client.put(expected, 32, 256);
JobID[] jobId = new JobID[] { new JobID(), new JobID() };
String[] testKey = new String[] { "test-key-1", "test-key-2" };
// Request 3
client.put(jobId[0], testKey[0], expected);
// Request 4
client.put(jobId[1], testKey[1], expected, 32, 256);
// check that the storage directory exists
final Path blobServerPath = new Path(storagePath, "blob");
FileSystem fs = blobServerPath.getFileSystem();
assertTrue("Unknown storage dir: " + blobServerPath, fs.exists(blobServerPath));
// Close the client and connect to the other server
client.close();
client = new BlobClient(serverAddress[1], config);
// Verify request 1
try (InputStream is = client.get(keys[0])) {
byte[] actual = new byte[expected.length];
BlobUtils.readFully(is, actual, 0, expected.length, null);
for (int i = 0; i < expected.length; i++) {
assertEquals(expected[i], actual[i]);
}
}
// Verify request 2
try (InputStream is = client.get(keys[1])) {
byte[] actual = new byte[256];
BlobUtils.readFully(is, actual, 0, 256, null);
for (int i = 32, j = 0; i < 256; i++, j++) {
assertEquals(expected[i], actual[j]);
}
}
// Verify request 3
try (InputStream is = client.get(jobId[0], testKey[0])) {
byte[] actual = new byte[expected.length];
BlobUtils.readFully(is, actual, 0, expected.length, null);
for (int i = 0; i < expected.length; i++) {
assertEquals(expected[i], actual[i]);
}
}
// Verify request 4
try (InputStream is = client.get(jobId[1], testKey[1])) {
byte[] actual = new byte[256];
BlobUtils.readFully(is, actual, 0, 256, null);
for (int i = 32, j = 0; i < 256; i++, j++) {
assertEquals(expected[i], actual[j]);
}
}
// Remove again
client.delete(keys[0]);
client.delete(keys[1]);
client.delete(jobId[0], testKey[0]);
client.delete(jobId[1], testKey[1]);
// Verify everything is clean
assertTrue("HA storage directory does not exist", fs.exists(new Path(storagePath)));
if (fs.exists(blobServerPath)) {
final org.apache.flink.core.fs.FileStatus[] recoveryFiles = fs.listStatus(blobServerPath);
ArrayList<String> filenames = new ArrayList<String>(recoveryFiles.length);
for (org.apache.flink.core.fs.FileStatus file : recoveryFiles) {
filenames.add(file.toString());
}
fail("Unclean state backend: " + filenames);
}
} finally {
for (BlobServer s : server) {
if (s != null) {
s.shutdown();
}
}
if (client != null) {
client.close();
}
}
}
use of java.util.Random in project flink by apache.
the class WebMonitorMessagesTest method testStatusMessages.
@Test
public void testStatusMessages() {
try {
final Random rnd = new Random();
GenericMessageTester.testMessageInstance(RequestJobsOverview.getInstance());
GenericMessageTester.testMessageInstance(RequestJobsWithIDsOverview.getInstance());
GenericMessageTester.testMessageInstance(RequestStatusOverview.getInstance());
GenericMessageTester.testMessageInstance(RequestJobsOverview.getInstance());
GenericMessageTester.testMessageInstance(GenericMessageTester.instantiateGeneric(RequestJobDetails.class, rnd));
GenericMessageTester.testMessageInstance(GenericMessageTester.instantiateGeneric(StatusOverview.class, rnd));
GenericMessageTester.testMessageInstance(GenericMessageTester.instantiateGeneric(JobsOverview.class, rnd));
GenericMessageTester.testMessageInstance(new JobsWithIDsOverview(randomIds(rnd), randomIds(rnd), randomIds(rnd), randomIds(rnd)));
} catch (Exception e) {
e.printStackTrace();
fail(e.getMessage());
}
}
use of java.util.Random in project hadoop by apache.
the class CryptoStreamsTestBase method setUp.
@Before
public void setUp() throws IOException {
// Generate data
final int seed = new Random().nextInt();
final DataOutputBuffer dataBuf = new DataOutputBuffer();
final RandomDatum.Generator generator = new RandomDatum.Generator(seed);
for (int i = 0; i < count; ++i) {
generator.next();
final RandomDatum key = generator.getKey();
final RandomDatum value = generator.getValue();
key.write(dataBuf);
value.write(dataBuf);
}
LOG.info("Generated " + count + " records");
data = dataBuf.getData();
dataLen = dataBuf.getLength();
}
use of java.util.Random in project hadoop by apache.
the class TestDU method createFile.
private void createFile(File newFile, int size) throws IOException {
// write random data so that filesystems with compression enabled (e.g., ZFS)
// can't compress the file
Random random = new Random();
byte[] data = new byte[size];
random.nextBytes(data);
newFile.createNewFile();
RandomAccessFile file = new RandomAccessFile(newFile, "rws");
file.write(data);
file.getFD().sync();
file.close();
}
use of java.util.Random in project flink by apache.
the class MutableHashTableTestBase method testBuildAndRetrieve.
@Test
public void testBuildAndRetrieve() throws Exception {
final int NUM_MEM_PAGES = 32 * NUM_PAIRS / PAGE_SIZE;
AbstractMutableHashTable<IntPair> table = getHashTable(intPairSerializer, intPairComparator, getMemory(NUM_MEM_PAGES));
final Random rnd = new Random(RANDOM_SEED);
final IntPair[] pairs = getRandomizedIntPairs(NUM_PAIRS, rnd);
table.open();
for (int i = 0; i < NUM_PAIRS; i++) {
table.insert(pairs[i]);
}
AbstractHashTableProber<IntPair, IntPair> prober = table.getProber(intPairComparator, pairComparator);
IntPair target = new IntPair();
for (int i = 0; i < NUM_PAIRS; i++) {
assertNotNull(prober.getMatchFor(pairs[i], target));
assertEquals(pairs[i].getValue(), target.getValue());
}
table.close();
assertEquals("Memory lost", NUM_MEM_PAGES, table.getFreeMemory().size());
}
Aggregations