use of org.opensearch.client.RestClient in project OpenSearch by opensearch-project.
the class Zen2RestApiIT method testFailsOnUnknownNode.
public void testFailsOnUnknownNode() throws Exception {
internalCluster().setBootstrapClusterManagerNodeIndex(2);
internalCluster().startNodes(3);
ensureStableCluster(3);
RestClient restClient = getRestClient();
try {
restClient.performRequest(new Request("POST", "/_cluster/voting_config_exclusions/invalid"));
fail("Invalid node name should throw.");
} catch (ResponseException e) {
assertThat(e.getResponse().getStatusLine().getStatusCode(), is(400));
assertThat(e.getMessage(), Matchers.containsString("add voting config exclusions request for [invalid] matched no cluster-manager-eligible nodes"));
}
}
use of org.opensearch.client.RestClient in project OpenSearch by opensearch-project.
the class Zen2RestApiIT method testClearVotingTombstonesWaitingForRemoval.
public void testClearVotingTombstonesWaitingForRemoval() throws Exception {
internalCluster().setBootstrapClusterManagerNodeIndex(2);
List<String> nodes = internalCluster().startNodes(3);
ensureStableCluster(3);
RestClient restClient = getRestClient();
String nodeToWithdraw = nodes.get(randomIntBetween(0, 2));
Response response = restClient.performRequest(new Request("POST", "/_cluster/voting_config_exclusions/" + nodeToWithdraw));
assertThat(response.getStatusLine().getStatusCode(), is(200));
assertThat(response.getEntity().getContentLength(), is(0L));
internalCluster().stopRandomNode(InternalTestCluster.nameFilter(nodeToWithdraw));
Response deleteResponse = restClient.performRequest(new Request("DELETE", "/_cluster/voting_config_exclusions"));
assertThat(deleteResponse.getStatusLine().getStatusCode(), is(200));
assertThat(deleteResponse.getEntity().getContentLength(), is(0L));
}
use of org.opensearch.client.RestClient in project OpenSearch by opensearch-project.
the class Zen2RestApiIT method testClearVotingTombstonesNotWaitingForRemoval.
public void testClearVotingTombstonesNotWaitingForRemoval() throws Exception {
internalCluster().setBootstrapClusterManagerNodeIndex(2);
List<String> nodes = internalCluster().startNodes(3);
ensureStableCluster(3);
RestClient restClient = getRestClient();
Response response = restClient.performRequest(new Request("POST", "/_cluster/voting_config_exclusions/" + nodes.get(2)));
assertThat(response.getStatusLine().getStatusCode(), is(200));
assertThat(response.getEntity().getContentLength(), is(0L));
Response deleteResponse = restClient.performRequest(new Request("DELETE", "/_cluster/voting_config_exclusions/?wait_for_removal=false"));
assertThat(deleteResponse.getStatusLine().getStatusCode(), is(200));
assertThat(deleteResponse.getEntity().getContentLength(), is(0L));
}
use of org.opensearch.client.RestClient in project OpenSearch by opensearch-project.
the class Zen2RestApiIT method testRemoveTwoNodesAtOnce.
public void testRemoveTwoNodesAtOnce() throws Exception {
internalCluster().setBootstrapClusterManagerNodeIndex(2);
List<String> nodes = internalCluster().startNodes(3);
ensureStableCluster(3);
RestClient restClient = getRestClient();
Response response = restClient.performRequest(new Request("POST", "/_cluster/voting_config_exclusions/" + nodes.get(2) + "," + nodes.get(0)));
assertThat(response.getStatusLine().getStatusCode(), is(200));
assertThat(response.getEntity().getContentLength(), is(0L));
internalCluster().stopRandomNode(InternalTestCluster.nameFilter(nodes.get(0)));
internalCluster().stopRandomNode(InternalTestCluster.nameFilter(nodes.get(2)));
ensureStableCluster(1);
}
use of org.opensearch.client.RestClient in project OpenSearch by opensearch-project.
the class HaHdfsFailoverTestSuiteIT method testHAFailoverWithRepository.
public void testHAFailoverWithRepository() throws Exception {
RestClient client = client();
String esKerberosPrincipal = System.getProperty("test.krb5.principal.es");
String hdfsKerberosPrincipal = System.getProperty("test.krb5.principal.hdfs");
String kerberosKeytabLocation = System.getProperty("test.krb5.keytab.hdfs");
String ports = System.getProperty("test.hdfs-fixture.ports");
String nn1Port = "10001";
String nn2Port = "10002";
if (ports.length() > 0) {
final Path path = PathUtils.get(ports);
final List<String> lines = AccessController.doPrivileged((PrivilegedExceptionAction<List<String>>) () -> {
return Files.readAllLines(path);
});
nn1Port = lines.get(0);
nn2Port = lines.get(1);
}
boolean securityEnabled = hdfsKerberosPrincipal != null;
Configuration hdfsConfiguration = new Configuration();
hdfsConfiguration.set("dfs.nameservices", "ha-hdfs");
hdfsConfiguration.set("dfs.ha.namenodes.ha-hdfs", "nn1,nn2");
hdfsConfiguration.set("dfs.namenode.rpc-address.ha-hdfs.nn1", "localhost:" + nn1Port);
hdfsConfiguration.set("dfs.namenode.rpc-address.ha-hdfs.nn2", "localhost:" + nn2Port);
hdfsConfiguration.set("dfs.client.failover.proxy.provider.ha-hdfs", "org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider");
AccessController.doPrivileged((PrivilegedExceptionAction<Void>) () -> {
if (securityEnabled) {
// ensure that keytab exists
Path kt = PathUtils.get(kerberosKeytabLocation);
if (Files.exists(kt) == false) {
throw new IllegalStateException("Could not locate keytab at " + kerberosKeytabLocation);
}
if (Files.isReadable(kt) != true) {
throw new IllegalStateException("Could not read keytab at " + kerberosKeytabLocation);
}
logger.info("Keytab Length: " + Files.readAllBytes(kt).length);
// set principal names
hdfsConfiguration.set("dfs.namenode.kerberos.principal", hdfsKerberosPrincipal);
hdfsConfiguration.set("dfs.datanode.kerberos.principal", hdfsKerberosPrincipal);
hdfsConfiguration.set("dfs.data.transfer.protection", "authentication");
SecurityUtil.setAuthenticationMethod(UserGroupInformation.AuthenticationMethod.KERBEROS, hdfsConfiguration);
UserGroupInformation.setConfiguration(hdfsConfiguration);
UserGroupInformation.loginUserFromKeytab(hdfsKerberosPrincipal, kerberosKeytabLocation);
} else {
SecurityUtil.setAuthenticationMethod(UserGroupInformation.AuthenticationMethod.SIMPLE, hdfsConfiguration);
UserGroupInformation.setConfiguration(hdfsConfiguration);
UserGroupInformation.getCurrentUser();
}
return null;
});
// Create repository
{
Request request = new Request("PUT", "/_snapshot/hdfs_ha_repo_read");
request.setJsonEntity("{" + "\"type\":\"hdfs\"," + "\"settings\":{" + "\"uri\": \"hdfs://ha-hdfs/\",\n" + "\"path\": \"/user/opensearch/existing/readonly-repository\"," + "\"readonly\": \"true\"," + securityCredentials(securityEnabled, esKerberosPrincipal) + "\"conf.dfs.nameservices\": \"ha-hdfs\"," + "\"conf.dfs.ha.namenodes.ha-hdfs\": \"nn1,nn2\"," + "\"conf.dfs.namenode.rpc-address.ha-hdfs.nn1\": \"localhost:" + nn1Port + "\"," + "\"conf.dfs.namenode.rpc-address.ha-hdfs.nn2\": \"localhost:" + nn2Port + "\"," + "\"conf.dfs.client.failover.proxy.provider.ha-hdfs\": " + "\"org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider\"" + "}" + "}");
Response response = client.performRequest(request);
Assert.assertEquals(200, response.getStatusLine().getStatusCode());
}
// Get repository
{
Response response = client.performRequest(new Request("GET", "/_snapshot/hdfs_ha_repo_read/_all"));
Assert.assertEquals(200, response.getStatusLine().getStatusCode());
}
// Failover the namenode to the second.
failoverHDFS("nn1", "nn2", hdfsConfiguration);
// Get repository again
{
Response response = client.performRequest(new Request("GET", "/_snapshot/hdfs_ha_repo_read/_all"));
Assert.assertEquals(200, response.getStatusLine().getStatusCode());
}
}
Aggregations