use of com.google.cloud.bigquery.storage.v1.ProtoSchema in project spark-bigquery-connector by GoogleCloudDataproc.
the class ProtobufUtilsTest method testBigQueryToProtoSchema.
@Test
public void testBigQueryToProtoSchema() throws Exception {
ProtoSchema converted = toProtoSchema(BIG_BIGQUERY_SCHEMA);
ProtoSchema expected = ProtoSchemaConverter.convert(Descriptors.FileDescriptor.buildFrom(DescriptorProtos.FileDescriptorProto.newBuilder().addMessageType(DescriptorProtos.DescriptorProto.newBuilder().addField(PROTO_INTEGER_FIELD.clone().setNumber(1)).addField(PROTO_STRING_FIELD.clone().setNumber(2)).addField(PROTO_ARRAY_FIELD.clone().setNumber(3)).addNestedType(NESTED_STRUCT_DESCRIPTOR.clone()).addField(PROTO_STRUCT_FIELD.clone().setNumber(4)).addField(PROTO_DOUBLE_FIELD.clone().setName("Float").setNumber(5)).addField(PROTO_BOOLEAN_FIELD.clone().setNumber(6)).addField(PROTO_BYTES_FIELD.clone().setNumber(7)).addField(PROTO_DATE_FIELD.clone().setNumber(8)).addField(PROTO_STRING_FIELD.clone().setName("Numeric").setNumber(9)).addField(PROTO_STRING_FIELD.clone().setName("BigNumeric").setNumber(10)).addField(PROTO_INTEGER_FIELD.clone().setName("TimeStamp").setNumber(11)).setName("Schema").build()).build(), new Descriptors.FileDescriptor[] {}).getMessageTypes().get(0));
for (int i = 0; i < expected.getProtoDescriptor().getFieldList().size(); i++) {
assertThat(converted.getProtoDescriptor().getField(i)).isEqualTo(expected.getProtoDescriptor().getField(i));
}
}
use of com.google.cloud.bigquery.storage.v1.ProtoSchema in project java-bigquerystorage by googleapis.
the class ParallelWriteCommittedStream method writeToStream.
private void writeToStream(BigQueryWriteClient client, WriteStream writeStream, long deadlineMillis) throws Throwable {
LOG.info("Start writing to new stream:" + writeStream.getName());
synchronized (this) {
inflightCount = 0;
successCount = 0;
failureCount = 0;
error = null;
lastMetricsTimeMillis = System.currentTimeMillis();
lastMetricsSuccessCount = 0;
lastMetricsFailureCount = 0;
}
Descriptor descriptor = BQTableSchemaToProtoDescriptor.convertBQTableSchemaToProtoDescriptor(writeStream.getTableSchema());
ProtoSchema protoSchema = ProtoSchemaConverter.convert(descriptor);
try (StreamWriter writer = StreamWriter.newBuilder(writeStream.getName()).setWriterSchema(protoSchema).setTraceId("SAMPLE:parallel_append").build()) {
while (System.currentTimeMillis() < deadlineMillis) {
synchronized (this) {
if (error != null) {
// Stop writing once we get an error.
throw error;
}
}
ApiFuture<AppendRowsResponse> future = writer.append(createAppendRows(descriptor), -1);
synchronized (this) {
inflightCount++;
}
ApiFutures.addCallback(future, new AppendCompleteCallback(this), MoreExecutors.directExecutor());
}
}
}
Aggregations