Search in sources :

Example 41 with Random

use of java.util.Random in project flink by apache.

the class BlobRecoveryITCase method testBlobServerRecovery.

public static void testBlobServerRecovery(final Configuration config) throws IOException {
    final String clusterId = config.getString(HighAvailabilityOptions.HA_CLUSTER_ID);
    String storagePath = config.getString(HighAvailabilityOptions.HA_STORAGE_PATH) + "/" + clusterId;
    Random rand = new Random();
    BlobServer[] server = new BlobServer[2];
    InetSocketAddress[] serverAddress = new InetSocketAddress[2];
    BlobClient client = null;
    try {
        for (int i = 0; i < server.length; i++) {
            server[i] = new BlobServer(config);
            serverAddress[i] = new InetSocketAddress("localhost", server[i].getPort());
        }
        client = new BlobClient(serverAddress[0], config);
        // Random data
        byte[] expected = new byte[1024];
        rand.nextBytes(expected);
        BlobKey[] keys = new BlobKey[2];
        // Put data
        // Request 1
        keys[0] = client.put(expected);
        // Request 2
        keys[1] = client.put(expected, 32, 256);
        JobID[] jobId = new JobID[] { new JobID(), new JobID() };
        String[] testKey = new String[] { "test-key-1", "test-key-2" };
        // Request 3
        client.put(jobId[0], testKey[0], expected);
        // Request 4
        client.put(jobId[1], testKey[1], expected, 32, 256);
        // check that the storage directory exists
        final Path blobServerPath = new Path(storagePath, "blob");
        FileSystem fs = blobServerPath.getFileSystem();
        assertTrue("Unknown storage dir: " + blobServerPath, fs.exists(blobServerPath));
        // Close the client and connect to the other server
        client.close();
        client = new BlobClient(serverAddress[1], config);
        // Verify request 1
        try (InputStream is = client.get(keys[0])) {
            byte[] actual = new byte[expected.length];
            BlobUtils.readFully(is, actual, 0, expected.length, null);
            for (int i = 0; i < expected.length; i++) {
                assertEquals(expected[i], actual[i]);
            }
        }
        // Verify request 2
        try (InputStream is = client.get(keys[1])) {
            byte[] actual = new byte[256];
            BlobUtils.readFully(is, actual, 0, 256, null);
            for (int i = 32, j = 0; i < 256; i++, j++) {
                assertEquals(expected[i], actual[j]);
            }
        }
        // Verify request 3
        try (InputStream is = client.get(jobId[0], testKey[0])) {
            byte[] actual = new byte[expected.length];
            BlobUtils.readFully(is, actual, 0, expected.length, null);
            for (int i = 0; i < expected.length; i++) {
                assertEquals(expected[i], actual[i]);
            }
        }
        // Verify request 4
        try (InputStream is = client.get(jobId[1], testKey[1])) {
            byte[] actual = new byte[256];
            BlobUtils.readFully(is, actual, 0, 256, null);
            for (int i = 32, j = 0; i < 256; i++, j++) {
                assertEquals(expected[i], actual[j]);
            }
        }
        // Remove again
        client.delete(keys[0]);
        client.delete(keys[1]);
        client.delete(jobId[0], testKey[0]);
        client.delete(jobId[1], testKey[1]);
        // Verify everything is clean
        assertTrue("HA storage directory does not exist", fs.exists(new Path(storagePath)));
        if (fs.exists(blobServerPath)) {
            final org.apache.flink.core.fs.FileStatus[] recoveryFiles = fs.listStatus(blobServerPath);
            ArrayList<String> filenames = new ArrayList<String>(recoveryFiles.length);
            for (org.apache.flink.core.fs.FileStatus file : recoveryFiles) {
                filenames.add(file.toString());
            }
            fail("Unclean state backend: " + filenames);
        }
    } finally {
        for (BlobServer s : server) {
            if (s != null) {
                s.shutdown();
            }
        }
        if (client != null) {
            client.close();
        }
    }
}
Also used : InetSocketAddress(java.net.InetSocketAddress) ArrayList(java.util.ArrayList) Random(java.util.Random) FileSystem(org.apache.flink.core.fs.FileSystem) Path(org.apache.flink.core.fs.Path) InputStream(java.io.InputStream) JobID(org.apache.flink.api.common.JobID)

Example 42 with Random

use of java.util.Random in project flink by apache.

the class WebMonitorMessagesTest method testStatusMessages.

@Test
public void testStatusMessages() {
    try {
        final Random rnd = new Random();
        GenericMessageTester.testMessageInstance(RequestJobsOverview.getInstance());
        GenericMessageTester.testMessageInstance(RequestJobsWithIDsOverview.getInstance());
        GenericMessageTester.testMessageInstance(RequestStatusOverview.getInstance());
        GenericMessageTester.testMessageInstance(RequestJobsOverview.getInstance());
        GenericMessageTester.testMessageInstance(GenericMessageTester.instantiateGeneric(RequestJobDetails.class, rnd));
        GenericMessageTester.testMessageInstance(GenericMessageTester.instantiateGeneric(StatusOverview.class, rnd));
        GenericMessageTester.testMessageInstance(GenericMessageTester.instantiateGeneric(JobsOverview.class, rnd));
        GenericMessageTester.testMessageInstance(new JobsWithIDsOverview(randomIds(rnd), randomIds(rnd), randomIds(rnd), randomIds(rnd)));
    } catch (Exception e) {
        e.printStackTrace();
        fail(e.getMessage());
    }
}
Also used : RequestStatusOverview(org.apache.flink.runtime.messages.webmonitor.RequestStatusOverview) StatusOverview(org.apache.flink.runtime.messages.webmonitor.StatusOverview) Random(java.util.Random) RequestJobsWithIDsOverview(org.apache.flink.runtime.messages.webmonitor.RequestJobsWithIDsOverview) JobsWithIDsOverview(org.apache.flink.runtime.messages.webmonitor.JobsWithIDsOverview) RequestJobDetails(org.apache.flink.runtime.messages.webmonitor.RequestJobDetails) RequestJobsOverview(org.apache.flink.runtime.messages.webmonitor.RequestJobsOverview) JobsOverview(org.apache.flink.runtime.messages.webmonitor.JobsOverview) Test(org.junit.Test)

Example 43 with Random

use of java.util.Random in project hadoop by apache.

the class CryptoStreamsTestBase method setUp.

@Before
public void setUp() throws IOException {
    // Generate data
    final int seed = new Random().nextInt();
    final DataOutputBuffer dataBuf = new DataOutputBuffer();
    final RandomDatum.Generator generator = new RandomDatum.Generator(seed);
    for (int i = 0; i < count; ++i) {
        generator.next();
        final RandomDatum key = generator.getKey();
        final RandomDatum value = generator.getValue();
        key.write(dataBuf);
        value.write(dataBuf);
    }
    LOG.info("Generated " + count + " records");
    data = dataBuf.getData();
    dataLen = dataBuf.getLength();
}
Also used : Random(java.util.Random) DataOutputBuffer(org.apache.hadoop.io.DataOutputBuffer) RandomDatum(org.apache.hadoop.io.RandomDatum) Before(org.junit.Before)

Example 44 with Random

use of java.util.Random in project hadoop by apache.

the class TestDU method createFile.

private void createFile(File newFile, int size) throws IOException {
    // write random data so that filesystems with compression enabled (e.g., ZFS)
    // can't compress the file
    Random random = new Random();
    byte[] data = new byte[size];
    random.nextBytes(data);
    newFile.createNewFile();
    RandomAccessFile file = new RandomAccessFile(newFile, "rws");
    file.write(data);
    file.getFD().sync();
    file.close();
}
Also used : Random(java.util.Random) RandomAccessFile(java.io.RandomAccessFile)

Example 45 with Random

use of java.util.Random in project flink by apache.

the class MutableHashTableTestBase method testBuildAndRetrieve.

@Test
public void testBuildAndRetrieve() throws Exception {
    final int NUM_MEM_PAGES = 32 * NUM_PAIRS / PAGE_SIZE;
    AbstractMutableHashTable<IntPair> table = getHashTable(intPairSerializer, intPairComparator, getMemory(NUM_MEM_PAGES));
    final Random rnd = new Random(RANDOM_SEED);
    final IntPair[] pairs = getRandomizedIntPairs(NUM_PAIRS, rnd);
    table.open();
    for (int i = 0; i < NUM_PAIRS; i++) {
        table.insert(pairs[i]);
    }
    AbstractHashTableProber<IntPair, IntPair> prober = table.getProber(intPairComparator, pairComparator);
    IntPair target = new IntPair();
    for (int i = 0; i < NUM_PAIRS; i++) {
        assertNotNull(prober.getMatchFor(pairs[i], target));
        assertEquals(pairs[i].getValue(), target.getValue());
    }
    table.close();
    assertEquals("Memory lost", NUM_MEM_PAGES, table.getFreeMemory().size());
}
Also used : Random(java.util.Random) IntPair(org.apache.flink.runtime.operators.testutils.types.IntPair) Test(org.junit.Test)

Aggregations

Random (java.util.Random)4728 Test (org.junit.Test)1273 ArrayList (java.util.ArrayList)602 IOException (java.io.IOException)313 HashMap (java.util.HashMap)242 File (java.io.File)209 List (java.util.List)154 AtomicInteger (java.util.concurrent.atomic.AtomicInteger)151 ByteArrayInputStream (java.io.ByteArrayInputStream)134 HashSet (java.util.HashSet)129 ByteBuffer (java.nio.ByteBuffer)123 Test (org.testng.annotations.Test)121 Path (org.apache.hadoop.fs.Path)116 Map (java.util.Map)106 QuickTest (com.hazelcast.test.annotation.QuickTest)99 ParallelTest (com.hazelcast.test.annotation.ParallelTest)94 CountDownLatch (java.util.concurrent.CountDownLatch)93 Configuration (org.apache.hadoop.conf.Configuration)88 ByteArrayOutputStream (java.io.ByteArrayOutputStream)79 Before (org.junit.Before)78