use of java.net.InetSocketAddress in project flink by apache.
the class BlobClientTest method testContentAddressableStream.
/**
* Tests the PUT/GET operations for content-addressable streams.
*/
@Test
public void testContentAddressableStream() {
BlobClient client = null;
InputStream is = null;
try {
File testFile = File.createTempFile("testfile", ".dat");
testFile.deleteOnExit();
BlobKey origKey = prepareTestFile(testFile);
InetSocketAddress serverAddress = new InetSocketAddress("localhost", BLOB_SERVER.getPort());
client = new BlobClient(serverAddress, blobServiceConfig);
// Store the data
is = new FileInputStream(testFile);
BlobKey receivedKey = client.put(is);
assertEquals(origKey, receivedKey);
is.close();
is = null;
// Retrieve the data
is = client.get(receivedKey);
validateGet(is, testFile);
} catch (Exception e) {
e.printStackTrace();
fail(e.getMessage());
} finally {
if (is != null) {
try {
is.close();
} catch (Throwable t) {
}
}
if (client != null) {
try {
client.close();
} catch (Throwable t) {
}
}
}
}
use of java.net.InetSocketAddress in project flink by apache.
the class BlobRecoveryITCase method testBlobServerRecovery.
public static void testBlobServerRecovery(final Configuration config) throws IOException {
final String clusterId = config.getString(HighAvailabilityOptions.HA_CLUSTER_ID);
String storagePath = config.getString(HighAvailabilityOptions.HA_STORAGE_PATH) + "/" + clusterId;
Random rand = new Random();
BlobServer[] server = new BlobServer[2];
InetSocketAddress[] serverAddress = new InetSocketAddress[2];
BlobClient client = null;
try {
for (int i = 0; i < server.length; i++) {
server[i] = new BlobServer(config);
serverAddress[i] = new InetSocketAddress("localhost", server[i].getPort());
}
client = new BlobClient(serverAddress[0], config);
// Random data
byte[] expected = new byte[1024];
rand.nextBytes(expected);
BlobKey[] keys = new BlobKey[2];
// Put data
// Request 1
keys[0] = client.put(expected);
// Request 2
keys[1] = client.put(expected, 32, 256);
JobID[] jobId = new JobID[] { new JobID(), new JobID() };
String[] testKey = new String[] { "test-key-1", "test-key-2" };
// Request 3
client.put(jobId[0], testKey[0], expected);
// Request 4
client.put(jobId[1], testKey[1], expected, 32, 256);
// check that the storage directory exists
final Path blobServerPath = new Path(storagePath, "blob");
FileSystem fs = blobServerPath.getFileSystem();
assertTrue("Unknown storage dir: " + blobServerPath, fs.exists(blobServerPath));
// Close the client and connect to the other server
client.close();
client = new BlobClient(serverAddress[1], config);
// Verify request 1
try (InputStream is = client.get(keys[0])) {
byte[] actual = new byte[expected.length];
BlobUtils.readFully(is, actual, 0, expected.length, null);
for (int i = 0; i < expected.length; i++) {
assertEquals(expected[i], actual[i]);
}
}
// Verify request 2
try (InputStream is = client.get(keys[1])) {
byte[] actual = new byte[256];
BlobUtils.readFully(is, actual, 0, 256, null);
for (int i = 32, j = 0; i < 256; i++, j++) {
assertEquals(expected[i], actual[j]);
}
}
// Verify request 3
try (InputStream is = client.get(jobId[0], testKey[0])) {
byte[] actual = new byte[expected.length];
BlobUtils.readFully(is, actual, 0, expected.length, null);
for (int i = 0; i < expected.length; i++) {
assertEquals(expected[i], actual[i]);
}
}
// Verify request 4
try (InputStream is = client.get(jobId[1], testKey[1])) {
byte[] actual = new byte[256];
BlobUtils.readFully(is, actual, 0, 256, null);
for (int i = 32, j = 0; i < 256; i++, j++) {
assertEquals(expected[i], actual[j]);
}
}
// Remove again
client.delete(keys[0]);
client.delete(keys[1]);
client.delete(jobId[0], testKey[0]);
client.delete(jobId[1], testKey[1]);
// Verify everything is clean
assertTrue("HA storage directory does not exist", fs.exists(new Path(storagePath)));
if (fs.exists(blobServerPath)) {
final org.apache.flink.core.fs.FileStatus[] recoveryFiles = fs.listStatus(blobServerPath);
ArrayList<String> filenames = new ArrayList<String>(recoveryFiles.length);
for (org.apache.flink.core.fs.FileStatus file : recoveryFiles) {
filenames.add(file.toString());
}
fail("Unclean state backend: " + filenames);
}
} finally {
for (BlobServer s : server) {
if (s != null) {
s.shutdown();
}
}
if (client != null) {
client.close();
}
}
}
use of java.net.InetSocketAddress in project flink by apache.
the class BlobCacheRetriesTest method testBlobFetchRetries.
/**
* A test where the BlobCache must use the BlobServer and the connection
* fails twice and then the get operation succeeds.
*
* @param config
* configuration to use (the BlobCache will get some additional settings
* set compared to this one)
*/
private void testBlobFetchRetries(final Configuration config) {
final byte[] data = new byte[] { 1, 2, 3, 4, 5, 6, 7, 8, 9, 0 };
BlobServer server = null;
BlobCache cache = null;
try {
server = new TestingFailingBlobServer(config, 2);
final InetSocketAddress serverAddress = new InetSocketAddress("localhost", server.getPort());
// upload some blob
BlobClient blobClient = null;
BlobKey key;
try {
blobClient = new BlobClient(serverAddress, config);
key = blobClient.put(data);
} finally {
if (blobClient != null) {
blobClient.close();
}
}
// create a separate config for the cache with no access to
// the (shared) storage path if available so that the cache
// will always bother the BlobServer!
final Configuration cacheConfig = new Configuration(config);
cacheConfig.setString(HighAvailabilityOptions.HA_STORAGE_PATH, temporaryFolder.getRoot().getPath() + "/does-not-exist");
cache = new BlobCache(serverAddress, cacheConfig);
// trigger a download - it should fail the first two times, but retry, and succeed eventually
URL url = cache.getURL(key);
InputStream is = url.openStream();
try {
byte[] received = new byte[data.length];
assertEquals(data.length, is.read(received));
assertArrayEquals(data, received);
} finally {
is.close();
}
} catch (Exception e) {
e.printStackTrace();
fail(e.getMessage());
} finally {
if (cache != null) {
cache.shutdown();
}
if (server != null) {
server.shutdown();
}
}
}
use of java.net.InetSocketAddress in project flink by apache.
the class BlobCacheSuccessTest method uploadFileGetTest.
private void uploadFileGetTest(final Configuration config, boolean cacheWorksWithoutServer, boolean cacheHasAccessToFs) {
// First create two BLOBs and upload them to BLOB server
final byte[] buf = new byte[128];
final List<BlobKey> blobKeys = new ArrayList<BlobKey>(2);
BlobServer blobServer = null;
BlobCache blobCache = null;
try {
// Start the BLOB server
blobServer = new BlobServer(config);
final InetSocketAddress serverAddress = new InetSocketAddress(blobServer.getPort());
// Upload BLOBs
BlobClient blobClient = null;
try {
blobClient = new BlobClient(serverAddress, config);
blobKeys.add(blobClient.put(buf));
// Make sure the BLOB key changes
buf[0] = 1;
blobKeys.add(blobClient.put(buf));
} finally {
if (blobClient != null) {
blobClient.close();
}
}
if (cacheWorksWithoutServer) {
// Now, shut down the BLOB server, the BLOBs must still be accessible through the cache.
blobServer.shutdown();
blobServer = null;
}
final Configuration cacheConfig;
if (cacheHasAccessToFs) {
cacheConfig = config;
} else {
// just in case parameters are still read from the server,
// create a separate configuration object for the cache
cacheConfig = new Configuration(config);
cacheConfig.setString(HighAvailabilityOptions.HA_STORAGE_PATH, temporaryFolder.getRoot().getPath() + "/does-not-exist");
}
blobCache = new BlobCache(serverAddress, cacheConfig);
for (BlobKey blobKey : blobKeys) {
blobCache.getURL(blobKey);
}
if (blobServer != null) {
// Now, shut down the BLOB server, the BLOBs must still be accessible through the cache.
blobServer.shutdown();
blobServer = null;
}
final URL[] urls = new URL[blobKeys.size()];
for (int i = 0; i < blobKeys.size(); i++) {
urls[i] = blobCache.getURL(blobKeys.get(i));
}
// Verify the result
assertEquals(blobKeys.size(), urls.length);
for (final URL url : urls) {
assertNotNull(url);
try {
final File cachedFile = new File(url.toURI());
assertTrue(cachedFile.exists());
assertEquals(buf.length, cachedFile.length());
} catch (URISyntaxException e) {
fail(e.getMessage());
}
}
} catch (Exception e) {
e.printStackTrace();
fail(e.getMessage());
} finally {
if (blobServer != null) {
blobServer.shutdown();
}
if (blobCache != null) {
blobCache.shutdown();
}
}
}
use of java.net.InetSocketAddress in project flink by apache.
the class BlobServerDeleteTest method testDeleteAlreadyDeletedByBlobKey.
@Test
public void testDeleteAlreadyDeletedByBlobKey() {
BlobServer server = null;
BlobClient client = null;
try {
Configuration config = new Configuration();
server = new BlobServer(config);
InetSocketAddress serverAddress = new InetSocketAddress("localhost", server.getPort());
client = new BlobClient(serverAddress, config);
byte[] data = new byte[2000000];
rnd.nextBytes(data);
// put content addressable (like libraries)
BlobKey key = client.put(data);
assertNotNull(key);
File blobFile = server.getStorageLocation(key);
assertTrue(blobFile.delete());
// issue a DELETE request
try {
client.delete(key);
} catch (IOException e) {
fail("DELETE operation should not fail if file is already deleted");
}
} catch (Exception e) {
e.printStackTrace();
fail(e.getMessage());
} finally {
cleanup(server, client);
}
}
Aggregations