use of com.google.firestore.v1.Value in project xtext-core by eclipse.
the class ActionTestLanguage2SemanticSequencer method sequence.
@Override
public void sequence(ISerializationContext context, EObject semanticObject) {
EPackage epackage = semanticObject.eClass().getEPackage();
ParserRule rule = context.getParserRule();
Action action = context.getAssignedAction();
Set<Parameter> parameters = context.getEnabledBooleanParameters();
if (epackage == ActionLang2Package.eINSTANCE)
switch(semanticObject.eClass().getClassifierID()) {
case ActionLang2Package.ORING:
sequence_ORing(context, (ORing) semanticObject);
return;
case ActionLang2Package.VALUE:
sequence_Value(context, (Value) semanticObject);
return;
}
if (errorAcceptor != null)
errorAcceptor.accept(diagnosticProvider.createInvalidContextOrTypeDiagnostic(semanticObject, context));
}
use of com.google.firestore.v1.Value in project java-docs-samples by GoogleCloudPlatform.
the class RiskAnalysis method numericalStatsAnalysis.
// [START dlp_numerical_stats]
/**
* Calculate numerical statistics for a column in a BigQuery table using the DLP API.
*
* @param projectId The Google Cloud Platform project ID to run the API call under.
* @param datasetId The BigQuery dataset to analyze.
* @param tableId The BigQuery table to analyze.
* @param columnName The name of the column to analyze, which must contain only numerical data.
* @param topicId The name of the Pub/Sub topic to notify once the job completes
* @param subscriptionId The name of the Pub/Sub subscription to use when listening for job
* completion status.
*/
private static void numericalStatsAnalysis(String projectId, String datasetId, String tableId, String columnName, String topicId, String subscriptionId) throws Exception {
// Instantiates a client
try (DlpServiceClient dlpServiceClient = DlpServiceClient.create()) {
BigQueryTable bigQueryTable = BigQueryTable.newBuilder().setTableId(tableId).setDatasetId(datasetId).setProjectId(projectId).build();
FieldId fieldId = FieldId.newBuilder().setName(columnName).build();
NumericalStatsConfig numericalStatsConfig = NumericalStatsConfig.newBuilder().setField(fieldId).build();
PrivacyMetric privacyMetric = PrivacyMetric.newBuilder().setNumericalStatsConfig(numericalStatsConfig).build();
String topicName = String.format("projects/%s/topics/%s", projectId, topicId);
PublishToPubSub publishToPubSub = PublishToPubSub.newBuilder().setTopic(topicName).build();
// Create action to publish job status notifications over Google Cloud Pub/Sub
Action action = Action.newBuilder().setPubSub(publishToPubSub).build();
RiskAnalysisJobConfig riskAnalysisJobConfig = RiskAnalysisJobConfig.newBuilder().setSourceTable(bigQueryTable).setPrivacyMetric(privacyMetric).addActions(action).build();
CreateDlpJobRequest createDlpJobRequest = CreateDlpJobRequest.newBuilder().setParent(ProjectName.of(projectId).toString()).setRiskJob(riskAnalysisJobConfig).build();
DlpJob dlpJob = dlpServiceClient.createDlpJob(createDlpJobRequest);
String dlpJobName = dlpJob.getName();
final SettableApiFuture<Boolean> done = SettableApiFuture.create();
// Set up a Pub/Sub subscriber to listen on the job completion status
Subscriber subscriber = Subscriber.newBuilder(ProjectSubscriptionName.newBuilder().setProject(projectId).setSubscription(subscriptionId).build(), (pubsubMessage, ackReplyConsumer) -> {
if (pubsubMessage.getAttributesCount() > 0 && pubsubMessage.getAttributesMap().get("DlpJobName").equals(dlpJobName)) {
// notify job completion
done.set(true);
ackReplyConsumer.ack();
}
}).build();
subscriber.startAsync();
// For long jobs, consider using a truly asynchronous execution model such as Cloud Functions
try {
done.get(1, TimeUnit.MINUTES);
// Wait for the job to become available
Thread.sleep(500);
} catch (TimeoutException e) {
System.out.println("Unable to verify job completion.");
}
// Retrieve completed job status
DlpJob completedJob = dlpServiceClient.getDlpJob(GetDlpJobRequest.newBuilder().setName(dlpJobName).build());
System.out.println("Job status: " + completedJob.getState());
AnalyzeDataSourceRiskDetails riskDetails = completedJob.getRiskDetails();
AnalyzeDataSourceRiskDetails.NumericalStatsResult result = riskDetails.getNumericalStatsResult();
System.out.printf("Value range : [%.3f, %.3f]\n", result.getMinValue().getFloatValue(), result.getMaxValue().getFloatValue());
int percent = 1;
Double lastValue = null;
for (Value quantileValue : result.getQuantileValuesList()) {
Double currentValue = quantileValue.getFloatValue();
if (lastValue == null || !lastValue.equals(currentValue)) {
System.out.printf("Value at %s %% quantile : %.3f", percent, currentValue);
}
lastValue = currentValue;
}
} catch (Exception e) {
System.out.println("Error in categoricalStatsAnalysis: " + e.getMessage());
}
}
use of com.google.firestore.v1.Value in project beam by apache.
the class FirestoreV1FnBatchWriteWithDeadLetterQueueTest method enqueueingWritesValidateBytesSize.
@Override
@Test
public void enqueueingWritesValidateBytesSize() throws Exception {
int maxBytes = 50;
RpcQosOptions options = rpcQosOptions.toBuilder().withBatchMaxBytes(maxBytes).build();
when(ff.getFirestoreStub(any())).thenReturn(stub);
when(ff.getRpcQos(any())).thenReturn(FirestoreStatefulComponentFactory.INSTANCE.getRpcQos(options));
byte[] bytes = new byte[maxBytes + 1];
SecureRandom.getInstanceStrong().nextBytes(bytes);
byte[] base64Bytes = Base64.getEncoder().encode(bytes);
String base64String = Base64.getEncoder().encodeToString(bytes);
Value largeValue = Value.newBuilder().setStringValueBytes(ByteString.copyFrom(base64Bytes)).build();
// apply a doc transform that is too large
Write write1 = Write.newBuilder().setTransform(DocumentTransform.newBuilder().setDocument(String.format("doc-%03d", 2)).addFieldTransforms(FieldTransform.newBuilder().setAppendMissingElements(ArrayValue.newBuilder().addValues(largeValue)))).build();
// delete a doc that is too large
Write write2 = Write.newBuilder().setDelete(String.format("doc-%03d_%s", 3, base64String)).build();
// update a doc that is too large
Write write3 = Write.newBuilder().setUpdate(Document.newBuilder().setName(String.format("doc-%03d", 4)).putAllFields(ImmutableMap.of("foo", largeValue))).build();
BatchWriteFnWithDeadLetterQueue fn = getFn(clock, ff, options, metricsFixture.counterFactory, metricsFixture.distributionFactory);
fn.populateDisplayData(displayDataBuilder);
fn.setup();
fn.startBundle(startBundleContext);
ArgumentCaptor<WriteFailure> write1FailureCapture = ArgumentCaptor.forClass(WriteFailure.class);
doNothing().when(processContext).outputWithTimestamp(write1FailureCapture.capture(), any());
when(processContext.element()).thenReturn(write1);
fn.processElement(processContext, window);
WriteFailure failure = write1FailureCapture.getValue();
assertNotNull(failure);
String message = failure.getStatus().getMessage();
assertTrue(message.contains("TRANSFORM"));
assertTrue(message.contains("larger than configured max allowed bytes per batch"));
ArgumentCaptor<WriteFailure> write2FailureCapture = ArgumentCaptor.forClass(WriteFailure.class);
doNothing().when(processContext).outputWithTimestamp(write2FailureCapture.capture(), any());
when(processContext.element()).thenReturn(write2);
fn.processElement(processContext, window);
WriteFailure failure2 = write2FailureCapture.getValue();
assertNotNull(failure2);
String message2 = failure2.getStatus().getMessage();
assertTrue(message2.contains("DELETE"));
assertTrue(message2.contains("larger than configured max allowed bytes per batch"));
ArgumentCaptor<WriteFailure> write3FailureCapture = ArgumentCaptor.forClass(WriteFailure.class);
doNothing().when(processContext).outputWithTimestamp(write3FailureCapture.capture(), any());
when(processContext.element()).thenReturn(write3);
fn.processElement(processContext, window);
WriteFailure failure3 = write3FailureCapture.getValue();
assertNotNull(failure3);
String message3 = failure3.getStatus().getMessage();
assertTrue(message3.contains("UPDATE"));
assertTrue(message3.contains("larger than configured max allowed bytes per batch"));
assertEquals(0, fn.writes.size());
}
use of com.google.firestore.v1.Value in project beam by apache.
the class FirestoreV1FnBatchWriteWithSummaryTest method enqueueingWritesValidateBytesSize.
@Override
@Test
public void enqueueingWritesValidateBytesSize() throws Exception {
int maxBytes = 50;
RpcQosOptions options = rpcQosOptions.toBuilder().withBatchMaxBytes(maxBytes).build();
when(ff.getFirestoreStub(any())).thenReturn(stub);
when(ff.getRpcQos(any())).thenReturn(FirestoreStatefulComponentFactory.INSTANCE.getRpcQos(options));
byte[] bytes = new byte[maxBytes + 1];
SecureRandom.getInstanceStrong().nextBytes(bytes);
byte[] base64Bytes = Base64.getEncoder().encode(bytes);
String base64String = Base64.getEncoder().encodeToString(bytes);
Value largeValue = Value.newBuilder().setStringValueBytes(ByteString.copyFrom(base64Bytes)).build();
// apply a doc transform that is too large
Write write1 = Write.newBuilder().setTransform(DocumentTransform.newBuilder().setDocument(String.format("doc-%03d", 2)).addFieldTransforms(FieldTransform.newBuilder().setAppendMissingElements(ArrayValue.newBuilder().addValues(largeValue)))).build();
// delete a doc that is too large
Write write2 = Write.newBuilder().setDelete(String.format("doc-%03d_%s", 3, base64String)).build();
// update a doc that is too large
Write write3 = Write.newBuilder().setUpdate(Document.newBuilder().setName(String.format("doc-%03d", 4)).putAllFields(ImmutableMap.of("foo", largeValue))).build();
BatchWriteFnWithSummary fn = getFn(clock, ff, options, metricsFixture.counterFactory, metricsFixture.distributionFactory);
fn.populateDisplayData(displayDataBuilder);
fn.setup();
fn.startBundle(startBundleContext);
try {
when(processContext.element()).thenReturn(write1);
fn.processElement(processContext, window);
fail("expected validation error");
} catch (FailedWritesException e) {
WriteFailure failure = e.getWriteFailures().get(0);
assertNotNull(failure);
String message = failure.getStatus().getMessage();
assertTrue(message.contains("TRANSFORM"));
assertTrue(message.contains("larger than configured max allowed bytes per batch"));
}
try {
when(processContext.element()).thenReturn(write2);
fn.processElement(processContext, window);
fail("expected validation error");
} catch (FailedWritesException e) {
WriteFailure failure = e.getWriteFailures().get(0);
assertNotNull(failure);
String message = failure.getStatus().getMessage();
assertTrue(message.contains("DELETE"));
assertTrue(message.contains("larger than configured max allowed bytes per batch"));
}
try {
when(processContext.element()).thenReturn(write3);
fn.processElement(processContext, window);
fail("expected validation error");
} catch (FailedWritesException e) {
WriteFailure failure = e.getWriteFailures().get(0);
assertNotNull(failure);
String message = failure.getStatus().getMessage();
assertTrue(message.contains("UPDATE"));
assertTrue(message.contains("larger than configured max allowed bytes per batch"));
}
assertEquals(0, fn.writes.size());
}
Aggregations