use of com.google.bigtable.admin.v2.Cluster in project java-bigtable by googleapis.
the class BaseBigtableTableAdminClientTest method listBackupsTest.
@Test
public void listBackupsTest() throws Exception {
Backup responsesElement = Backup.newBuilder().build();
ListBackupsResponse expectedResponse = ListBackupsResponse.newBuilder().setNextPageToken("").addAllBackups(Arrays.asList(responsesElement)).build();
mockBigtableTableAdmin.addResponse(expectedResponse);
ClusterName parent = ClusterName.of("[PROJECT]", "[INSTANCE]", "[CLUSTER]");
ListBackupsPagedResponse pagedListResponse = client.listBackups(parent);
List<Backup> resources = Lists.newArrayList(pagedListResponse.iterateAll());
Assert.assertEquals(1, resources.size());
Assert.assertEquals(expectedResponse.getBackupsList().get(0), resources.get(0));
List<AbstractMessage> actualRequests = mockBigtableTableAdmin.getRequests();
Assert.assertEquals(1, actualRequests.size());
ListBackupsRequest actualRequest = ((ListBackupsRequest) actualRequests.get(0));
Assert.assertEquals(parent.toString(), actualRequest.getParent());
Assert.assertTrue(channelProvider.isHeaderSent(ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), GaxGrpcProperties.getDefaultApiClientHeaderPattern()));
}
use of com.google.bigtable.admin.v2.Cluster in project java-bigtable by googleapis.
the class ClusterAutoscalingConfigTest method testToProto.
@Test
public void testToProto() {
ClusterAutoscalingConfig request = ClusterAutoscalingConfig.of(INSTANCE_ID, CLUSTER_ID).setMaxNodes(10).setMinNodes(3).setCpuUtilizationTargetPercent(30);
PartialUpdateClusterRequest partialUpdateClusterRequestProto = request.toProto(PROJECT_ID);
AutoscalingTargets autoscalingTargets = AutoscalingTargets.newBuilder().setCpuUtilizationPercent(30).build();
AutoscalingLimits autoscalingLimits = AutoscalingLimits.newBuilder().setMinServeNodes(3).setMaxServeNodes(10).build();
Cluster.ClusterAutoscalingConfig clusterAutoscalingConfig = Cluster.ClusterAutoscalingConfig.newBuilder().setAutoscalingTargets(autoscalingTargets).setAutoscalingLimits(autoscalingLimits).build();
Cluster.ClusterConfig clusterConfig = Cluster.ClusterConfig.newBuilder().setClusterAutoscalingConfig(clusterAutoscalingConfig).build();
Cluster cluster = Cluster.newBuilder().setName(NameUtil.formatClusterName(PROJECT_ID, INSTANCE_ID, CLUSTER_ID)).setClusterConfig(clusterConfig).build();
PartialUpdateClusterRequest requestProto = PartialUpdateClusterRequest.newBuilder().setUpdateMask(FieldMask.newBuilder().addPaths("cluster_config.cluster_autoscaling_config.autoscaling_limits.max_serve_nodes").addPaths("cluster_config.cluster_autoscaling_config.autoscaling_limits.min_serve_nodes").addPaths("cluster_config.cluster_autoscaling_config.autoscaling_targets.cpu_utilization_percent")).setCluster(cluster).build();
assertThat(partialUpdateClusterRequestProto).isEqualTo(requestProto);
}
use of com.google.bigtable.admin.v2.Cluster in project grpc-java by grpc.
the class ClientXdsClient method parseNonAggregateCluster.
private static StructOrError<CdsUpdate.Builder> parseNonAggregateCluster(Cluster cluster, Set<String> edsResources, Set<String> certProviderInstances, ServerInfo serverInfo) {
String clusterName = cluster.getName();
ServerInfo lrsServerInfo = null;
Long maxConcurrentRequests = null;
UpstreamTlsContext upstreamTlsContext = null;
if (cluster.hasLrsServer()) {
if (!cluster.getLrsServer().hasSelf()) {
return StructOrError.fromError("Cluster " + clusterName + ": only support LRS for the same management server");
}
lrsServerInfo = serverInfo;
}
if (cluster.hasCircuitBreakers()) {
List<Thresholds> thresholds = cluster.getCircuitBreakers().getThresholdsList();
for (Thresholds threshold : thresholds) {
if (threshold.getPriority() != RoutingPriority.DEFAULT) {
continue;
}
if (threshold.hasMaxRequests()) {
maxConcurrentRequests = (long) threshold.getMaxRequests().getValue();
}
}
}
if (cluster.getTransportSocketMatchesCount() > 0) {
return StructOrError.fromError("Cluster " + clusterName + ": transport-socket-matches not supported.");
}
if (cluster.hasTransportSocket()) {
if (!TRANSPORT_SOCKET_NAME_TLS.equals(cluster.getTransportSocket().getName())) {
return StructOrError.fromError("transport-socket with name " + cluster.getTransportSocket().getName() + " not supported.");
}
try {
upstreamTlsContext = UpstreamTlsContext.fromEnvoyProtoUpstreamTlsContext(validateUpstreamTlsContext(unpackCompatibleType(cluster.getTransportSocket().getTypedConfig(), io.envoyproxy.envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext.class, TYPE_URL_UPSTREAM_TLS_CONTEXT, TYPE_URL_UPSTREAM_TLS_CONTEXT_V2), certProviderInstances));
} catch (InvalidProtocolBufferException | ResourceInvalidException e) {
return StructOrError.fromError("Cluster " + clusterName + ": malformed UpstreamTlsContext: " + e);
}
}
DiscoveryType type = cluster.getType();
if (type == DiscoveryType.EDS) {
String edsServiceName = null;
io.envoyproxy.envoy.config.cluster.v3.Cluster.EdsClusterConfig edsClusterConfig = cluster.getEdsClusterConfig();
if (!edsClusterConfig.getEdsConfig().hasAds() && !edsClusterConfig.getEdsConfig().hasSelf()) {
return StructOrError.fromError("Cluster " + clusterName + ": field eds_cluster_config must be set to indicate to use" + " EDS over ADS or self ConfigSource");
}
// If the service_name field is set, that value will be used for the EDS request.
if (!edsClusterConfig.getServiceName().isEmpty()) {
edsServiceName = edsClusterConfig.getServiceName();
edsResources.add(edsServiceName);
} else {
edsResources.add(clusterName);
}
return StructOrError.fromStruct(CdsUpdate.forEds(clusterName, edsServiceName, lrsServerInfo, maxConcurrentRequests, upstreamTlsContext));
} else if (type.equals(DiscoveryType.LOGICAL_DNS)) {
if (!cluster.hasLoadAssignment()) {
return StructOrError.fromError("Cluster " + clusterName + ": LOGICAL_DNS clusters must have a single host");
}
ClusterLoadAssignment assignment = cluster.getLoadAssignment();
if (assignment.getEndpointsCount() != 1 || assignment.getEndpoints(0).getLbEndpointsCount() != 1) {
return StructOrError.fromError("Cluster " + clusterName + ": LOGICAL_DNS clusters must have a single " + "locality_lb_endpoint and a single lb_endpoint");
}
io.envoyproxy.envoy.config.endpoint.v3.LbEndpoint lbEndpoint = assignment.getEndpoints(0).getLbEndpoints(0);
if (!lbEndpoint.hasEndpoint() || !lbEndpoint.getEndpoint().hasAddress() || !lbEndpoint.getEndpoint().getAddress().hasSocketAddress()) {
return StructOrError.fromError("Cluster " + clusterName + ": LOGICAL_DNS clusters must have an endpoint with address and socket_address");
}
SocketAddress socketAddress = lbEndpoint.getEndpoint().getAddress().getSocketAddress();
if (!socketAddress.getResolverName().isEmpty()) {
return StructOrError.fromError("Cluster " + clusterName + ": LOGICAL DNS clusters must NOT have a custom resolver name set");
}
if (socketAddress.getPortSpecifierCase() != PortSpecifierCase.PORT_VALUE) {
return StructOrError.fromError("Cluster " + clusterName + ": LOGICAL DNS clusters socket_address must have port_value");
}
String dnsHostName = String.format("%s:%d", socketAddress.getAddress(), socketAddress.getPortValue());
return StructOrError.fromStruct(CdsUpdate.forLogicalDns(clusterName, dnsHostName, lrsServerInfo, maxConcurrentRequests, upstreamTlsContext));
}
return StructOrError.fromError("Cluster " + clusterName + ": unsupported built-in discovery type: " + type);
}
use of com.google.bigtable.admin.v2.Cluster in project grpc-java by grpc.
the class ClientXdsClientDataTest method parseCluster_ringHashLbPolicy_defaultLbConfig.
@Test
public void parseCluster_ringHashLbPolicy_defaultLbConfig() throws ResourceInvalidException {
Cluster cluster = Cluster.newBuilder().setName("cluster-foo.googleapis.com").setType(DiscoveryType.EDS).setEdsClusterConfig(EdsClusterConfig.newBuilder().setEdsConfig(ConfigSource.newBuilder().setAds(AggregatedConfigSource.getDefaultInstance())).setServiceName("service-foo.googleapis.com")).setLbPolicy(LbPolicy.RING_HASH).build();
CdsUpdate update = ClientXdsClient.processCluster(cluster, new HashSet<String>(), null, LRS_SERVER_INFO, LoadBalancerRegistry.getDefaultRegistry());
LbConfig lbConfig = ServiceConfigUtil.unwrapLoadBalancingConfig(update.lbPolicyConfig());
assertThat(lbConfig.getPolicyName()).isEqualTo("ring_hash_experimental");
}
use of com.google.bigtable.admin.v2.Cluster in project grpc-java by grpc.
the class ClientXdsClientDataTest method parseCluster_transportSocketMatches_exception.
@Test
public void parseCluster_transportSocketMatches_exception() throws ResourceInvalidException {
Cluster cluster = Cluster.newBuilder().setName("cluster-foo.googleapis.com").setType(DiscoveryType.EDS).setEdsClusterConfig(EdsClusterConfig.newBuilder().setEdsConfig(ConfigSource.newBuilder().setAds(AggregatedConfigSource.getDefaultInstance())).setServiceName("service-foo.googleapis.com")).setLbPolicy(LbPolicy.ROUND_ROBIN).addTransportSocketMatches(Cluster.TransportSocketMatch.newBuilder().setName("match1").build()).build();
thrown.expect(ResourceInvalidException.class);
thrown.expectMessage("Cluster cluster-foo.googleapis.com: transport-socket-matches not supported.");
ClientXdsClient.processCluster(cluster, new HashSet<String>(), null, LRS_SERVER_INFO, LoadBalancerRegistry.getDefaultRegistry());
}
Aggregations