Search in sources :

Example 31 with InetSocketAddress

use of java.net.InetSocketAddress in project flink by apache.

the class BlobClientTest method testContentAddressableStream.

/**
	 * Tests the PUT/GET operations for content-addressable streams.
	 */
@Test
public void testContentAddressableStream() {
    BlobClient client = null;
    InputStream is = null;
    try {
        File testFile = File.createTempFile("testfile", ".dat");
        testFile.deleteOnExit();
        BlobKey origKey = prepareTestFile(testFile);
        InetSocketAddress serverAddress = new InetSocketAddress("localhost", BLOB_SERVER.getPort());
        client = new BlobClient(serverAddress, blobServiceConfig);
        // Store the data
        is = new FileInputStream(testFile);
        BlobKey receivedKey = client.put(is);
        assertEquals(origKey, receivedKey);
        is.close();
        is = null;
        // Retrieve the data
        is = client.get(receivedKey);
        validateGet(is, testFile);
    } catch (Exception e) {
        e.printStackTrace();
        fail(e.getMessage());
    } finally {
        if (is != null) {
            try {
                is.close();
            } catch (Throwable t) {
            }
        }
        if (client != null) {
            try {
                client.close();
            } catch (Throwable t) {
            }
        }
    }
}
Also used : FileInputStream(java.io.FileInputStream) InputStream(java.io.InputStream) InetSocketAddress(java.net.InetSocketAddress) File(java.io.File) FileInputStream(java.io.FileInputStream) IOException(java.io.IOException) EOFException(java.io.EOFException) Test(org.junit.Test)

Example 32 with InetSocketAddress

use of java.net.InetSocketAddress in project flink by apache.

the class BlobRecoveryITCase method testBlobServerRecovery.

public static void testBlobServerRecovery(final Configuration config) throws IOException {
    final String clusterId = config.getString(HighAvailabilityOptions.HA_CLUSTER_ID);
    String storagePath = config.getString(HighAvailabilityOptions.HA_STORAGE_PATH) + "/" + clusterId;
    Random rand = new Random();
    BlobServer[] server = new BlobServer[2];
    InetSocketAddress[] serverAddress = new InetSocketAddress[2];
    BlobClient client = null;
    try {
        for (int i = 0; i < server.length; i++) {
            server[i] = new BlobServer(config);
            serverAddress[i] = new InetSocketAddress("localhost", server[i].getPort());
        }
        client = new BlobClient(serverAddress[0], config);
        // Random data
        byte[] expected = new byte[1024];
        rand.nextBytes(expected);
        BlobKey[] keys = new BlobKey[2];
        // Put data
        // Request 1
        keys[0] = client.put(expected);
        // Request 2
        keys[1] = client.put(expected, 32, 256);
        JobID[] jobId = new JobID[] { new JobID(), new JobID() };
        String[] testKey = new String[] { "test-key-1", "test-key-2" };
        // Request 3
        client.put(jobId[0], testKey[0], expected);
        // Request 4
        client.put(jobId[1], testKey[1], expected, 32, 256);
        // check that the storage directory exists
        final Path blobServerPath = new Path(storagePath, "blob");
        FileSystem fs = blobServerPath.getFileSystem();
        assertTrue("Unknown storage dir: " + blobServerPath, fs.exists(blobServerPath));
        // Close the client and connect to the other server
        client.close();
        client = new BlobClient(serverAddress[1], config);
        // Verify request 1
        try (InputStream is = client.get(keys[0])) {
            byte[] actual = new byte[expected.length];
            BlobUtils.readFully(is, actual, 0, expected.length, null);
            for (int i = 0; i < expected.length; i++) {
                assertEquals(expected[i], actual[i]);
            }
        }
        // Verify request 2
        try (InputStream is = client.get(keys[1])) {
            byte[] actual = new byte[256];
            BlobUtils.readFully(is, actual, 0, 256, null);
            for (int i = 32, j = 0; i < 256; i++, j++) {
                assertEquals(expected[i], actual[j]);
            }
        }
        // Verify request 3
        try (InputStream is = client.get(jobId[0], testKey[0])) {
            byte[] actual = new byte[expected.length];
            BlobUtils.readFully(is, actual, 0, expected.length, null);
            for (int i = 0; i < expected.length; i++) {
                assertEquals(expected[i], actual[i]);
            }
        }
        // Verify request 4
        try (InputStream is = client.get(jobId[1], testKey[1])) {
            byte[] actual = new byte[256];
            BlobUtils.readFully(is, actual, 0, 256, null);
            for (int i = 32, j = 0; i < 256; i++, j++) {
                assertEquals(expected[i], actual[j]);
            }
        }
        // Remove again
        client.delete(keys[0]);
        client.delete(keys[1]);
        client.delete(jobId[0], testKey[0]);
        client.delete(jobId[1], testKey[1]);
        // Verify everything is clean
        assertTrue("HA storage directory does not exist", fs.exists(new Path(storagePath)));
        if (fs.exists(blobServerPath)) {
            final org.apache.flink.core.fs.FileStatus[] recoveryFiles = fs.listStatus(blobServerPath);
            ArrayList<String> filenames = new ArrayList<String>(recoveryFiles.length);
            for (org.apache.flink.core.fs.FileStatus file : recoveryFiles) {
                filenames.add(file.toString());
            }
            fail("Unclean state backend: " + filenames);
        }
    } finally {
        for (BlobServer s : server) {
            if (s != null) {
                s.shutdown();
            }
        }
        if (client != null) {
            client.close();
        }
    }
}
Also used : InetSocketAddress(java.net.InetSocketAddress) ArrayList(java.util.ArrayList) Random(java.util.Random) FileSystem(org.apache.flink.core.fs.FileSystem) Path(org.apache.flink.core.fs.Path) InputStream(java.io.InputStream) JobID(org.apache.flink.api.common.JobID)

Example 33 with InetSocketAddress

use of java.net.InetSocketAddress in project flink by apache.

the class BlobCacheRetriesTest method testBlobFetchRetries.

/**
	 * A test where the BlobCache must use the BlobServer and the connection
	 * fails twice and then the get operation succeeds.
	 *
	 * @param config
	 * 		configuration to use (the BlobCache will get some additional settings
	 * 		set compared to this one)
	 */
private void testBlobFetchRetries(final Configuration config) {
    final byte[] data = new byte[] { 1, 2, 3, 4, 5, 6, 7, 8, 9, 0 };
    BlobServer server = null;
    BlobCache cache = null;
    try {
        server = new TestingFailingBlobServer(config, 2);
        final InetSocketAddress serverAddress = new InetSocketAddress("localhost", server.getPort());
        // upload some blob
        BlobClient blobClient = null;
        BlobKey key;
        try {
            blobClient = new BlobClient(serverAddress, config);
            key = blobClient.put(data);
        } finally {
            if (blobClient != null) {
                blobClient.close();
            }
        }
        // create a separate config for the cache with no access to
        // the (shared) storage path if available so that the cache
        // will always bother the BlobServer!
        final Configuration cacheConfig = new Configuration(config);
        cacheConfig.setString(HighAvailabilityOptions.HA_STORAGE_PATH, temporaryFolder.getRoot().getPath() + "/does-not-exist");
        cache = new BlobCache(serverAddress, cacheConfig);
        // trigger a download - it should fail the first two times, but retry, and succeed eventually
        URL url = cache.getURL(key);
        InputStream is = url.openStream();
        try {
            byte[] received = new byte[data.length];
            assertEquals(data.length, is.read(received));
            assertArrayEquals(data, received);
        } finally {
            is.close();
        }
    } catch (Exception e) {
        e.printStackTrace();
        fail(e.getMessage());
    } finally {
        if (cache != null) {
            cache.shutdown();
        }
        if (server != null) {
            server.shutdown();
        }
    }
}
Also used : Configuration(org.apache.flink.configuration.Configuration) InetSocketAddress(java.net.InetSocketAddress) InputStream(java.io.InputStream) URL(java.net.URL) IOException(java.io.IOException)

Example 34 with InetSocketAddress

use of java.net.InetSocketAddress in project flink by apache.

the class BlobCacheSuccessTest method uploadFileGetTest.

private void uploadFileGetTest(final Configuration config, boolean cacheWorksWithoutServer, boolean cacheHasAccessToFs) {
    // First create two BLOBs and upload them to BLOB server
    final byte[] buf = new byte[128];
    final List<BlobKey> blobKeys = new ArrayList<BlobKey>(2);
    BlobServer blobServer = null;
    BlobCache blobCache = null;
    try {
        // Start the BLOB server
        blobServer = new BlobServer(config);
        final InetSocketAddress serverAddress = new InetSocketAddress(blobServer.getPort());
        // Upload BLOBs
        BlobClient blobClient = null;
        try {
            blobClient = new BlobClient(serverAddress, config);
            blobKeys.add(blobClient.put(buf));
            // Make sure the BLOB key changes
            buf[0] = 1;
            blobKeys.add(blobClient.put(buf));
        } finally {
            if (blobClient != null) {
                blobClient.close();
            }
        }
        if (cacheWorksWithoutServer) {
            // Now, shut down the BLOB server, the BLOBs must still be accessible through the cache.
            blobServer.shutdown();
            blobServer = null;
        }
        final Configuration cacheConfig;
        if (cacheHasAccessToFs) {
            cacheConfig = config;
        } else {
            // just in case parameters are still read from the server,
            // create a separate configuration object for the cache
            cacheConfig = new Configuration(config);
            cacheConfig.setString(HighAvailabilityOptions.HA_STORAGE_PATH, temporaryFolder.getRoot().getPath() + "/does-not-exist");
        }
        blobCache = new BlobCache(serverAddress, cacheConfig);
        for (BlobKey blobKey : blobKeys) {
            blobCache.getURL(blobKey);
        }
        if (blobServer != null) {
            // Now, shut down the BLOB server, the BLOBs must still be accessible through the cache.
            blobServer.shutdown();
            blobServer = null;
        }
        final URL[] urls = new URL[blobKeys.size()];
        for (int i = 0; i < blobKeys.size(); i++) {
            urls[i] = blobCache.getURL(blobKeys.get(i));
        }
        // Verify the result
        assertEquals(blobKeys.size(), urls.length);
        for (final URL url : urls) {
            assertNotNull(url);
            try {
                final File cachedFile = new File(url.toURI());
                assertTrue(cachedFile.exists());
                assertEquals(buf.length, cachedFile.length());
            } catch (URISyntaxException e) {
                fail(e.getMessage());
            }
        }
    } catch (Exception e) {
        e.printStackTrace();
        fail(e.getMessage());
    } finally {
        if (blobServer != null) {
            blobServer.shutdown();
        }
        if (blobCache != null) {
            blobCache.shutdown();
        }
    }
}
Also used : Configuration(org.apache.flink.configuration.Configuration) InetSocketAddress(java.net.InetSocketAddress) ArrayList(java.util.ArrayList) URISyntaxException(java.net.URISyntaxException) URL(java.net.URL) URISyntaxException(java.net.URISyntaxException) File(java.io.File)

Example 35 with InetSocketAddress

use of java.net.InetSocketAddress in project flink by apache.

the class BlobServerDeleteTest method testDeleteAlreadyDeletedByBlobKey.

@Test
public void testDeleteAlreadyDeletedByBlobKey() {
    BlobServer server = null;
    BlobClient client = null;
    try {
        Configuration config = new Configuration();
        server = new BlobServer(config);
        InetSocketAddress serverAddress = new InetSocketAddress("localhost", server.getPort());
        client = new BlobClient(serverAddress, config);
        byte[] data = new byte[2000000];
        rnd.nextBytes(data);
        // put content addressable (like libraries)
        BlobKey key = client.put(data);
        assertNotNull(key);
        File blobFile = server.getStorageLocation(key);
        assertTrue(blobFile.delete());
        // issue a DELETE request
        try {
            client.delete(key);
        } catch (IOException e) {
            fail("DELETE operation should not fail if file is already deleted");
        }
    } catch (Exception e) {
        e.printStackTrace();
        fail(e.getMessage());
    } finally {
        cleanup(server, client);
    }
}
Also used : Configuration(org.apache.flink.configuration.Configuration) InetSocketAddress(java.net.InetSocketAddress) IOException(java.io.IOException) File(java.io.File) IOException(java.io.IOException) Test(org.junit.Test)

Aggregations

InetSocketAddress (java.net.InetSocketAddress)2586 Test (org.junit.Test)595 IOException (java.io.IOException)592 Socket (java.net.Socket)345 InetAddress (java.net.InetAddress)242 SocketAddress (java.net.SocketAddress)176 ServerSocket (java.net.ServerSocket)170 ArrayList (java.util.ArrayList)168 Configuration (org.apache.hadoop.conf.Configuration)140 ByteBuffer (java.nio.ByteBuffer)129 UnknownHostException (java.net.UnknownHostException)122 InputStream (java.io.InputStream)102 OutputStream (java.io.OutputStream)101 SocketChannel (java.nio.channels.SocketChannel)101 SocketException (java.net.SocketException)89 File (java.io.File)88 HashMap (java.util.HashMap)78 URI (java.net.URI)72 Proxy (java.net.Proxy)65 SocketTimeoutException (java.net.SocketTimeoutException)65