use of com.sequenceiq.cloudbreak.grpc.ManagedChannelWrapper in project cloudbreak by hortonworks.
the class GrpcClusterDnsClient method createOrUpdateDnsEntryWithCloudDns.
public CreateDnsEntryResponse createOrUpdateDnsEntryWithCloudDns(String accountId, String endpoint, String environment, String cloudDns, String hostedZoneId, Optional<String> requestId) {
try (ManagedChannelWrapper channelWrapper = makeWrapper()) {
ClusterDnsClient client = makeClient(channelWrapper.getChannel(), regionAwareInternalCrnGeneratorFactory.iam().getInternalCrnForServiceAsString());
LOGGER.info("Create a dns entry with account id: {} and requestId: {} for cloud DNS: {}", accountId, requestId, cloudDns);
CreateDnsEntryResponse response = client.createDnsEntryWithCloudDns(requestId.orElse(UUID.randomUUID().toString()), accountId, endpoint, environment, cloudDns, hostedZoneId);
LOGGER.info("Dns entry creation finished for cloud DNS {}", cloudDns);
return response;
}
}
use of com.sequenceiq.cloudbreak.grpc.ManagedChannelWrapper in project cloudbreak by hortonworks.
the class DatalakeDrClient method getBackupById.
public DatalakeBackupInfo getBackupById(String datalakeName, String backupId, String actorCrn) {
DatalakeBackupInfo datalakeBackupInfo = null;
if (!datalakeDrConfig.isConfigured()) {
return null;
}
checkNotNull(datalakeName);
checkNotNull(backupId);
checkNotNull(actorCrn, "actorCrn should not be null.");
try (ManagedChannelWrapper channelWrapper = makeWrapper()) {
ListDatalakeBackupRequest.Builder builder = ListDatalakeBackupRequest.newBuilder().setDatalakeName(datalakeName);
ListDatalakeBackupResponse response = newStub(channelWrapper.getChannel(), UUID.randomUUID().toString(), actorCrn).listDatalakeBackups(builder.build());
if (response != null) {
datalakeBackupInfo = response.getDatalakeInfoList().stream().filter(backup -> backupId.equals(backup.getBackupId())).findFirst().orElse(null);
}
return datalakeBackupInfo;
}
}
use of com.sequenceiq.cloudbreak.grpc.ManagedChannelWrapper in project cloudbreak by hortonworks.
the class DatalakeDrClient method getBackupStatusByBackupId.
public DatalakeBackupStatusResponse getBackupStatusByBackupId(String datalakeName, String backupId, String backupName, String actorCrn) {
if (!datalakeDrConfig.isConfigured()) {
return missingConnectorResponseOnBackup();
}
checkNotNull(datalakeName);
checkNotNull(actorCrn, "actorCrn should not be null.");
checkNotNull(backupId);
try (ManagedChannelWrapper channelWrapper = makeWrapper()) {
BackupDatalakeStatusRequest.Builder builder = BackupDatalakeStatusRequest.newBuilder().setDatalakeName(datalakeName).setBackupId(backupId);
if (!Strings.isNullOrEmpty(backupName)) {
builder.setBackupName(backupName);
}
return statusConverter.convert(newStub(channelWrapper.getChannel(), UUID.randomUUID().toString(), actorCrn).backupDatalakeStatus(builder.build()));
}
}
use of com.sequenceiq.cloudbreak.grpc.ManagedChannelWrapper in project cloudbreak by hortonworks.
the class SigmaDatabusClient method putRecord.
/**
* Upload data into databus. If the payload is larger than 1 MB, the data will be uploaded to cloudera S3.
* @param request databus record payload input
* @throws DatabusRecordProcessingException error during databus record processing
*/
public void putRecord(DatabusRequest request) throws DatabusRecordProcessingException {
ManagedChannelWrapper channelWrapper = getMessageWrapper();
DbusProto.PutRecordRequest recordRequest = convert(request, databusStreamConfiguration);
String requestId = MDCBuilder.getOrGenerateRequestId();
LOGGER.debug("Creating databus request with request id: {}", requestId);
buildMdcContext(request, requestId);
DbusProto.PutRecordResponse recordResponse = newStub(channelWrapper.getChannel(), requestId, regionAwareInternalCrnGeneratorFactory.iam().getInternalCrnForServiceAsString()).putRecord(recordRequest);
DbusProto.Record.Reply.Status status = recordResponse.getRecord().getStatus();
LOGGER.debug("Returned dbus record status is {}", status);
if (DbusProto.Record.Reply.Status.SENT.equals(status)) {
String recordId = recordResponse.getRecord().getRecordId();
LOGGER.debug("Dbus record sucessfully processed with record id: {}", recordId);
} else if (DbusProto.Record.Reply.Status.PENDING.equals(status)) {
String recordId = recordResponse.getRecord().getRecordId();
String s3BucketUrl = recordResponse.getRecord().getUploadUrl();
LOGGER.debug("Dbus record can be uploaded to s3 [record id: {}], [s3 url: {}]", recordId, s3BucketUrl);
uploadRecordToS3(s3BucketUrl, request, recordId);
} else {
throw new DatabusRecordProcessingException("Cannot process record to Sigma Databus.");
}
}
use of com.sequenceiq.cloudbreak.grpc.ManagedChannelWrapper in project cloudbreak by hortonworks.
the class GrpcCcmV2Client method unRegisterAgent.
public UnregisterAgentResponse unRegisterAgent(String requestId, String agentCrn, String actorCrn) {
try (ManagedChannelWrapper channelWrapper = makeWrapper()) {
ClusterConnectivityManagementV2BlockingStub client = makeClient(channelWrapper.getChannel(), requestId, actorCrn);
UnregisterAgentRequest unregisterAgentRequest = UnregisterAgentRequest.newBuilder().setAgentCrn(agentCrn).build();
LOGGER.debug("Calling unRegisterAgent with params agentCrn: '{}'", agentCrn);
return client.unregisterAgent(unregisterAgentRequest);
}
}
Aggregations