use of java.net.InetSocketAddress in project hadoop by apache.
the class TestWebHDFS method testWebHdfsGetBlockLocationsWithStorageType.
@Test
public void testWebHdfsGetBlockLocationsWithStorageType() throws Exception {
MiniDFSCluster cluster = null;
final Configuration conf = WebHdfsTestUtil.createConf();
final int OFFSET = 42;
final int LENGTH = 512;
final Path PATH = new Path("/foo");
byte[] CONTENTS = new byte[1024];
RANDOM.nextBytes(CONTENTS);
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
final WebHdfsFileSystem fs = WebHdfsTestUtil.getWebHdfsFileSystem(conf, WebHdfsConstants.WEBHDFS_SCHEME);
try (OutputStream os = fs.create(PATH)) {
os.write(CONTENTS);
}
BlockLocation[] locations = fs.getFileBlockLocations(PATH, OFFSET, LENGTH);
for (BlockLocation location : locations) {
StorageType[] storageTypes = location.getStorageTypes();
Assert.assertTrue(storageTypes != null && storageTypes.length > 0 && storageTypes[0] == StorageType.DISK);
}
// Query webhdfs REST API to get block locations
InetSocketAddress addr = cluster.getNameNode().getHttpAddress();
// Case 1
// URL without length or offset parameters
URL url1 = new URL("http", addr.getHostString(), addr.getPort(), WebHdfsFileSystem.PATH_PREFIX + "/foo?op=GETFILEBLOCKLOCATIONS");
LOG.info("Sending GETFILEBLOCKLOCATIONS request " + url1);
String response1 = getResponse(url1, "GET");
LOG.info("The output of GETFILEBLOCKLOCATIONS request " + response1);
// Parse BlockLocation array from json output using object mapper
BlockLocation[] locationArray1 = toBlockLocationArray(response1);
// Verify the result from rest call is same as file system api
verifyEquals(locations, locationArray1);
// Case 2
// URL contains length and offset parameters
URL url2 = new URL("http", addr.getHostString(), addr.getPort(), WebHdfsFileSystem.PATH_PREFIX + "/foo?op=GETFILEBLOCKLOCATIONS" + "&length=" + LENGTH + "&offset=" + OFFSET);
LOG.info("Sending GETFILEBLOCKLOCATIONS request " + url2);
String response2 = getResponse(url2, "GET");
LOG.info("The output of GETFILEBLOCKLOCATIONS request " + response2);
BlockLocation[] locationArray2 = toBlockLocationArray(response2);
verifyEquals(locations, locationArray2);
// Case 3
// URL contains length parameter but without offset parameters
URL url3 = new URL("http", addr.getHostString(), addr.getPort(), WebHdfsFileSystem.PATH_PREFIX + "/foo?op=GETFILEBLOCKLOCATIONS" + "&length=" + LENGTH);
LOG.info("Sending GETFILEBLOCKLOCATIONS request " + url3);
String response3 = getResponse(url3, "GET");
LOG.info("The output of GETFILEBLOCKLOCATIONS request " + response3);
BlockLocation[] locationArray3 = toBlockLocationArray(response3);
verifyEquals(locations, locationArray3);
// Case 4
// URL contains offset parameter but without length parameter
URL url4 = new URL("http", addr.getHostString(), addr.getPort(), WebHdfsFileSystem.PATH_PREFIX + "/foo?op=GETFILEBLOCKLOCATIONS" + "&offset=" + OFFSET);
LOG.info("Sending GETFILEBLOCKLOCATIONS request " + url4);
String response4 = getResponse(url4, "GET");
LOG.info("The output of GETFILEBLOCKLOCATIONS request " + response4);
BlockLocation[] locationArray4 = toBlockLocationArray(response4);
verifyEquals(locations, locationArray4);
// Case 5
// URL specifies offset exceeds the file length
URL url5 = new URL("http", addr.getHostString(), addr.getPort(), WebHdfsFileSystem.PATH_PREFIX + "/foo?op=GETFILEBLOCKLOCATIONS" + "&offset=1200");
LOG.info("Sending GETFILEBLOCKLOCATIONS request " + url5);
String response5 = getResponse(url5, "GET");
LOG.info("The output of GETFILEBLOCKLOCATIONS request " + response5);
BlockLocation[] locationArray5 = toBlockLocationArray(response5);
// Expected an empty array of BlockLocation
verifyEquals(new BlockLocation[] {}, locationArray5);
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
use of java.net.InetSocketAddress in project hadoop by apache.
the class TestHttpsFileSystem method setUp.
@BeforeClass
public static void setUp() throws Exception {
conf = new Configuration();
conf.set(DFSConfigKeys.DFS_HTTP_POLICY_KEY, HttpConfig.Policy.HTTPS_ONLY.name());
conf.set(DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY, "localhost:0");
conf.set(DFSConfigKeys.DFS_DATANODE_HTTPS_ADDRESS_KEY, "localhost:0");
File base = new File(BASEDIR);
FileUtil.fullyDelete(base);
base.mkdirs();
keystoresDir = new File(BASEDIR).getAbsolutePath();
sslConfDir = KeyStoreTestUtil.getClasspathDir(TestHttpsFileSystem.class);
KeyStoreTestUtil.setupSSLConfig(keystoresDir, sslConfDir, conf, false);
conf.set(DFSConfigKeys.DFS_CLIENT_HTTPS_KEYSTORE_RESOURCE_KEY, KeyStoreTestUtil.getClientSSLConfigFileName());
conf.set(DFSConfigKeys.DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_KEY, KeyStoreTestUtil.getServerSSLConfigFileName());
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
OutputStream os = cluster.getFileSystem().create(new Path("/test"));
os.write(23);
os.close();
InetSocketAddress addr = cluster.getNameNode().getHttpsAddress();
nnAddr = NetUtils.getHostPortString(addr);
conf.set(DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY, nnAddr);
}
use of java.net.InetSocketAddress in project hadoop by apache.
the class TestWebHDFS method testWebHdfsNoRedirect.
@Test
public /**
* Test that when "&noredirect=true" is added to operations CREATE, APPEND,
* OPEN, and GETFILECHECKSUM the response (which is usually a 307 temporary
* redirect) is a 200 with JSON that contains the redirected location
*/
void testWebHdfsNoRedirect() throws Exception {
MiniDFSCluster cluster = null;
final Configuration conf = WebHdfsTestUtil.createConf();
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
LOG.info("Started cluster");
InetSocketAddress addr = cluster.getNameNode().getHttpAddress();
URL url = new URL("http", addr.getHostString(), addr.getPort(), WebHdfsFileSystem.PATH_PREFIX + "/testWebHdfsNoRedirectCreate" + "?op=CREATE" + Param.toSortedString("&", new NoRedirectParam(true)));
LOG.info("Sending create request " + url);
checkResponseContainsLocation(url, "PUT");
//Write a file that we can read
final WebHdfsFileSystem fs = WebHdfsTestUtil.getWebHdfsFileSystem(conf, WebHdfsConstants.WEBHDFS_SCHEME);
final String PATH = "/testWebHdfsNoRedirect";
byte[] CONTENTS = new byte[1024];
RANDOM.nextBytes(CONTENTS);
try (OutputStream os = fs.create(new Path(PATH))) {
os.write(CONTENTS);
}
url = new URL("http", addr.getHostString(), addr.getPort(), WebHdfsFileSystem.PATH_PREFIX + "/testWebHdfsNoRedirect" + "?op=OPEN" + Param.toSortedString("&", new NoRedirectParam(true)));
LOG.info("Sending open request " + url);
checkResponseContainsLocation(url, "GET");
url = new URL("http", addr.getHostString(), addr.getPort(), WebHdfsFileSystem.PATH_PREFIX + "/testWebHdfsNoRedirect" + "?op=GETFILECHECKSUM" + Param.toSortedString("&", new NoRedirectParam(true)));
LOG.info("Sending getfilechecksum request " + url);
checkResponseContainsLocation(url, "GET");
url = new URL("http", addr.getHostString(), addr.getPort(), WebHdfsFileSystem.PATH_PREFIX + "/testWebHdfsNoRedirect" + "?op=APPEND" + Param.toSortedString("&", new NoRedirectParam(true)));
LOG.info("Sending append request " + url);
checkResponseContainsLocation(url, "POST");
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
use of java.net.InetSocketAddress in project hadoop by apache.
the class TestWebHdfsTimeouts method setUp.
@Before
public void setUp() throws Exception {
Configuration conf = WebHdfsTestUtil.createConf();
serverSocket = new ServerSocket(0, CONNECTION_BACKLOG);
nnHttpAddress = new InetSocketAddress("localhost", serverSocket.getLocalPort());
conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "localhost:" + serverSocket.getLocalPort());
if (timeoutSource == TimeoutSource.Configuration) {
String v = Integer.toString(SHORT_SOCKET_TIMEOUT) + "ms";
conf.set(HdfsClientConfigKeys.DFS_WEBHDFS_SOCKET_CONNECT_TIMEOUT_KEY, v);
conf.set(HdfsClientConfigKeys.DFS_WEBHDFS_SOCKET_READ_TIMEOUT_KEY, v);
}
fs = WebHdfsTestUtil.getWebHdfsFileSystem(conf, WebHdfsConstants.WEBHDFS_SCHEME);
if (timeoutSource == TimeoutSource.ConnectionFactory) {
fs.connectionFactory = connectionFactory;
}
clients = new ArrayList<SocketChannel>();
serverThread = null;
}
use of java.net.InetSocketAddress in project hadoop by apache.
the class TestGetConf method getAddressListFromTool.
/**
* Using {@link GetConf} methods get the list of given {@code type} of
* addresses
*
* @param type, TestType
* @param conf, configuration
* @param checkPort, If checkPort is true, verify NNPRCADDRESSES whose
* expected value is hostname:rpc-port. If checkPort is false, the
* expected is hostname only.
* @param expected, expected addresses
*/
private void getAddressListFromTool(TestType type, HdfsConfiguration conf, boolean checkPort, List<ConfiguredNNAddress> expected) throws Exception {
String out = getAddressListFromTool(type, conf, expected.size() != 0);
List<String> values = new ArrayList<String>();
// Convert list of addresses returned to an array of string
StringTokenizer tokenizer = new StringTokenizer(out);
while (tokenizer.hasMoreTokens()) {
String s = tokenizer.nextToken().trim();
values.add(s);
}
String[] actual = values.toArray(new String[values.size()]);
// Convert expected list to String[] of hosts
int i = 0;
String[] expectedHosts = new String[expected.size()];
for (ConfiguredNNAddress cnn : expected) {
InetSocketAddress addr = cnn.getAddress();
if (!checkPort) {
expectedHosts[i++] = addr.getHostName();
} else {
expectedHosts[i++] = addr.getHostName() + ":" + addr.getPort();
}
}
// Compare two arrays
assertTrue(Arrays.equals(expectedHosts, actual));
}
Aggregations