use of com.google.cloud.spanner.Dialect in project beam by apache.
the class SpannerIOWriteTest method singlePgMutationPipeline.
@Test
public void singlePgMutationPipeline() throws Exception {
Mutation mutation = m(2L);
PCollection<Mutation> mutations = pipeline.apply(Create.of(mutation));
PCollectionView<Dialect> pgDialectView = pipeline.apply("Create PG dialect", Create.of(Dialect.POSTGRESQL)).apply(View.asSingleton());
mutations.apply(SpannerIO.write().withProjectId("test-project").withInstanceId("test-instance").withDatabaseId("test-database").withServiceFactory(serviceFactory).withDialectView(pgDialectView));
pipeline.run();
verifyBatches(batch(m(2L)));
}
use of com.google.cloud.spanner.Dialect in project DataflowTemplates by GoogleCloudPlatform.
the class ExportTransform method expand.
/**
* Read the Cloud Spanner schema and all the rows in all the tables of the database. Create and
* write the exported Avro files to GCS.
*/
@Override
public WriteFilesResult<String> expand(PBegin begin) {
Pipeline p = begin.getPipeline();
/*
* Allow users to specify read timestamp.
* CreateTransaction and CreateTransactionFn classes in SpannerIO
* only take a timestamp object for exact staleness which works when
* parameters are provided during template compile time. They do not work with
* a Timestamp valueProvider which can take parameters at runtime. Hence a new
* ParDo class CreateTransactionFnWithTimestamp had to be created for this
* purpose.
*/
PCollectionView<Transaction> tx = p.apply("CreateTransaction", Create.of(1)).apply("Create transaction", ParDo.of(new CreateTransactionFnWithTimestamp(spannerConfig, snapshotTime))).apply("Tx As PCollectionView", View.asSingleton());
PCollectionView<Dialect> dialectView = p.apply("Read Dialect", new ReadDialect(spannerConfig)).apply("Dialect As PCollectionView", View.asSingleton());
PCollection<Ddl> ddl = p.apply("Read Information Schema", new ReadInformationSchema(spannerConfig, tx, dialectView));
PCollection<Ddl> exportState = ddl.apply("Check export conditions", ParDo.of(new DoFn<Ddl, Ddl>() {
@ProcessElement
public void processElement(ProcessContext c) throws Exception {
Ddl ddl = c.element();
List<String> tablesList = Collections.emptyList();
// a list of export tables, throw an exception.
if (tableNames.get().trim().isEmpty() && exportRelatedTables.get()) {
throw new Exception("Invalid usage of --tableNames and --shouldExportRelatedTables. Set" + " --shouldExportRelatedTables=true only if --tableNames is given" + " selected tables for export.");
}
// If the user provides a comma-separated list of strings, parse it into a List
if (!tableNames.get().trim().isEmpty()) {
tablesList = Arrays.asList(tableNames.get().split(",\\s*"));
}
// If the user provided any invalid table names, throw an exception.
List<String> allSpannerTables = ddl.allTables().stream().map(t -> t.name()).collect(Collectors.toList());
List<String> invalidTables = tablesList.stream().distinct().filter(t -> !allSpannerTables.contains(t)).collect(Collectors.toList());
if (invalidTables.size() != 0) {
throw new Exception("INVALID_ARGUMENT: Table(s) not found: " + String.join(", ", invalidTables) + ".");
}
List<String> filteredTables = getFilteredTables(ddl, tablesList).stream().map(t -> t.name()).collect(Collectors.toList());
// Save any missing necessary export table names; save a copy of the original
// table list to bypass 'final or effectively final' condition of the lambda
// expression below.
List<String> usersTables = tablesList.stream().collect(Collectors.toList());
List<String> missingTables = filteredTables.stream().distinct().filter(t -> !usersTables.contains(t)).collect(Collectors.toList());
Collections.sort(missingTables);
// throw an exception.
if (tablesList.size() != 0 && !(tablesList.equals(filteredTables)) && !exportRelatedTables.get()) {
throw new Exception("Attempted to export table(s) requiring parent and/or foreign keys tables" + " without setting the shouldExportRelatedTables parameter. Set" + " --shouldExportRelatedTables=true to export all necessary" + " tables, or add " + String.join(", ", missingTables) + " to --tableNames.");
}
c.output(ddl);
}
}));
PCollection<ReadOperation> tables = ddl.apply("Build table read operations", new BuildReadFromTableOperations(tableNames));
PCollection<KV<String, Void>> allTableAndViewNames = ddl.apply("List all table and view names", ParDo.of(new DoFn<Ddl, KV<String, Void>>() {
@ProcessElement
public void processElement(ProcessContext c) {
Ddl ddl = c.element();
for (Table t : ddl.allTables()) {
c.output(KV.of(t.name(), null));
}
// we need to add the names of all views separately here.
for (com.google.cloud.teleport.spanner.ddl.View v : ddl.views()) {
c.output(KV.of(v.name(), null));
}
}
}));
PCollection<String> allChangeStreamNames = ddl.apply("List all change stream names", ParDo.of(new DoFn<Ddl, String>() {
@ProcessElement
public void processElement(ProcessContext c) {
Ddl ddl = c.element();
for (ChangeStream changeStream : ddl.changeStreams()) {
c.output(changeStream.name());
}
}
}));
// Generate a unique output directory name.
final PCollectionView<String> outputDirectoryName = p.apply(Create.of(1)).apply("Create Avro output folder", ParDo.of(new DoFn<Integer, String>() {
@ProcessElement
public void processElement(ProcessContext c) {
String instanceId = spannerConfig.getInstanceId().get();
String dbId = spannerConfig.getDatabaseId().get();
// For direct runner or tests we need a deterministic jobId.
String testJobId = ExportTransform.this.testJobId.get();
if (!Strings.isNullOrEmpty(testJobId)) {
c.output(testJobId);
return;
}
try {
DataflowWorkerHarnessOptions workerHarnessOptions = c.getPipelineOptions().as(DataflowWorkerHarnessOptions.class);
String jobId = workerHarnessOptions.getJobId();
c.output(instanceId + "-" + dbId + "-" + jobId);
} catch (Exception e) {
throw new IllegalStateException("Please specify --testJobId to run with non-dataflow runner");
}
}
})).apply(View.asSingleton());
final PCollectionView<Map<String, SerializableSchemaSupplier>> avroSchemas = ddl.apply("Build Avro schemas from DDL", ParDo.of(new DoFn<Ddl, KV<String, SerializableSchemaSupplier>>() {
@ProcessElement
public void processElement(ProcessContext c) {
Collection<Schema> avroSchemas = new DdlToAvroSchemaConverter("spannerexport", "1.0.0", shouldExportTimestampAsLogicalType.get()).convert(c.element());
for (Schema schema : avroSchemas) {
c.output(KV.of(schema.getName(), new SerializableSchemaSupplier(schema)));
}
}
})).apply("As view", View.asMap());
PCollection<Struct> rows = tables.apply("Read all rows from Spanner", SpannerIO.readAll().withTransaction(tx).withSpannerConfig(spannerConfig));
ValueProvider<ResourceId> resource = ValueProvider.NestedValueProvider.of(outputDir, (SerializableFunction<String, ResourceId>) s -> FileSystems.matchNewResource(s, true));
ValueProvider<ResourceId> tempResource = ValueProvider.NestedValueProvider.of(eitherOrValueProvider(avroTempDirectory, outputDir), (SerializableFunction<String, ResourceId>) s -> FileSystems.matchNewResource(s, true));
WriteFilesResult<String> fileWriteResults = rows.apply("Store Avro files", AvroIO.<Struct>writeCustomTypeToGenericRecords().to(new SchemaBasedDynamicDestinations(avroSchemas, outputDirectoryName, dialectView, resource)).withTempDirectory(tempResource));
// Generate the manifest file.
PCollection<KV<String, Iterable<String>>> tableFiles = fileWriteResults.getPerDestinationOutputFilenames().apply(GroupByKey.create());
final TupleTag<Void> allTables = new TupleTag<>();
final TupleTag<Iterable<String>> nonEmptyTables = new TupleTag<>();
PCollection<KV<String, CoGbkResult>> groupedTables = KeyedPCollectionTuple.of(allTables, allTableAndViewNames).and(nonEmptyTables, tableFiles).apply("Group with all tables", CoGroupByKey.create());
// The following is to export empty tables and views from the database. Empty tables and views
// are handled together because we do not export any rows for views, only their metadata,
// including the queries defining them.
PCollection<KV<String, Iterable<String>>> emptyTablesAndViews = groupedTables.apply("Export empty tables and views", ParDo.of(new DoFn<KV<String, CoGbkResult>, KV<String, Iterable<String>>>() {
@ProcessElement
public void processElement(ProcessContext c) {
KV<String, CoGbkResult> kv = c.element();
String table = kv.getKey();
CoGbkResult coGbkResult = kv.getValue();
Iterable<String> only = coGbkResult.getOnly(nonEmptyTables, null);
if (only == null) {
LOG.info("Exporting empty table or view: " + table);
// This file will contain the schema definition: column definitions for empty
// tables or defining queries for views.
c.output(KV.of(table, Collections.singleton(table + ".avro-00000-of-00001")));
}
}
}));
PCollection<KV<String, Iterable<String>>> changeStreams = allChangeStreamNames.apply("Export change streams", ParDo.of(new DoFn<String, KV<String, Iterable<String>>>() {
@ProcessElement
public void processElement(ProcessContext c) {
String changeStreamName = c.element();
LOG.info("Exporting change stream: " + changeStreamName);
// This file will contain the schema definition for the change stream.
c.output(KV.of(changeStreamName, Collections.singleton(changeStreamName + ".avro-00000-of-00001")));
}
}));
// Empty tables, views and change streams are handled together, because we export them as empty
// Avro files that only contain the Avro schemas.
PCollection<KV<String, Iterable<String>>> emptySchemaFiles = PCollectionList.of(emptyTablesAndViews).and(changeStreams).apply("Combine all empty schema files", Flatten.pCollections());
emptySchemaFiles = emptySchemaFiles.apply("Save empty schema files", ParDo.of(new DoFn<KV<String, Iterable<String>>, KV<String, Iterable<String>>>() {
@ProcessElement
public void processElement(ProcessContext c) {
Map<String, SerializableSchemaSupplier> schemaMap = c.sideInput(avroSchemas);
KV<String, Iterable<String>> kv = c.element();
String objectName = kv.getKey();
String fileName = kv.getValue().iterator().next();
Schema schema = schemaMap.get(objectName).get();
DatumWriter<GenericRecord> datumWriter = new GenericDatumWriter<>(schema);
Path fullPath = createOutputPath(outputDir.get(), c.sideInput(outputDirectoryName), fileName);
try (DataFileWriter<GenericRecord> dataFileWriter = new DataFileWriter<>(datumWriter)) {
dataFileWriter.create(schema, createOutputStream(fullPath, c));
} catch (IOException e) {
throw new RuntimeException(e);
}
c.output(KV.of(objectName, Collections.singleton(fullPath.toString())));
}
/**
* Resolves the complete path name for Avro files for both GCS and local FS
* (for testing).
*
* @param outputDirectoryPath Initial directory path for the file.
* @param outputDirectoryName Terminal directory for the file.
* @param fileName Name of the Avro file
* @return The full {@link Path} of the output Avro file.
*/
private Path createOutputPath(String outputDirectoryPath, String outputDirectoryName, String fileName) {
if (GcsPath.GCS_URI.matcher(outputDirectoryPath).matches()) {
// Avro file path in GCS.
return GcsPath.fromUri(outputDirectoryPath).resolve(outputDirectoryName).resolve(fileName);
} else {
// Avro file path in local filesystem
return Paths.get(outputDirectoryPath, outputDirectoryName, fileName);
}
}
/**
* Creates the {@link OutputStream} for the output file either on GCS or on
* local FS (for testing).
*
* @param outputPath The full path of the output file.
* @param c The {@link org.apache.beam.sdk.transforms.DoFn.ProcessContext}
* @return An {@link OutputStream} for the opened output file.
* @throws IOException if the output file cannot be opened.
*/
private OutputStream createOutputStream(Path outputPath, ProcessContext c) throws IOException {
if (GcsPath.GCS_URI.matcher(outputPath.toString()).matches()) {
// Writing the Avro file to GCS.
org.apache.beam.sdk.extensions.gcp.util.GcsUtil gcsUtil = c.getPipelineOptions().as(GcsOptions.class).getGcsUtil();
String gcsType = "application/octet-stream";
WritableByteChannel gcsChannel = gcsUtil.create((GcsPath) outputPath, gcsType);
return Channels.newOutputStream(gcsChannel);
} else {
// Avro file is created on local filesystem (for testing).
Files.createDirectories(outputPath.getParent());
return Files.newOutputStream(outputPath);
}
}
}).withSideInputs(avroSchemas, outputDirectoryName));
PCollection<KV<String, Iterable<String>>> allFiles = PCollectionList.of(tableFiles).and(emptySchemaFiles).apply("Combine all files", Flatten.pCollections());
PCollection<KV<String, String>> tableManifests = allFiles.apply("Build table manifests", ParDo.of(new BuildTableManifests()));
Contextful.Fn<String, FileIO.Write.FileNaming> tableManifestNaming = (element, c) -> (window, pane, numShards, shardIndex, compression) -> GcsUtil.joinPath(outputDir.get(), c.sideInput(outputDirectoryName), tableManifestFileName(element));
tableManifests.apply("Store table manifests", FileIO.<String, KV<String, String>>writeDynamic().by(KV::getKey).withDestinationCoder(StringUtf8Coder.of()).withNaming(Contextful.of(tableManifestNaming, Requirements.requiresSideInputs(outputDirectoryName))).via(Contextful.fn(KV::getValue), TextIO.sink()).withTempDirectory(eitherOrValueProvider(avroTempDirectory, outputDir)));
PCollection<List<Export.Table>> metadataTables = tableManifests.apply("Combine table metadata", Combine.globally(new CombineTableMetadata()));
PCollectionView<Ddl> ddlView = ddl.apply("Cloud Spanner DDL as view", View.asSingleton());
PCollection<String> metadataContent = metadataTables.apply("Create database manifest", ParDo.of(new CreateDatabaseManifest(ddlView, dialectView)).withSideInputs(ddlView, dialectView));
Contextful.Fn<String, FileIO.Write.FileNaming> manifestNaming = (element, c) -> (window, pane, numShards, shardIndex, compression) -> GcsUtil.joinPath(outputDir.get(), c.sideInput(outputDirectoryName), "spanner-export.json");
metadataContent.apply("Store the database manifest", FileIO.<String, String>writeDynamic().by(SerializableFunctions.constant("")).withDestinationCoder(StringUtf8Coder.of()).via(TextIO.sink()).withNaming(Contextful.of(manifestNaming, Requirements.requiresSideInputs(outputDirectoryName))).withTempDirectory(eitherOrValueProvider(avroTempDirectory, outputDir)));
return fileWriteResults;
}
use of com.google.cloud.spanner.Dialect in project DataflowTemplates by GoogleCloudPlatform.
the class TextImportTransform method expand.
@Override
public PDone expand(PBegin begin) {
PCollectionView<Transaction> tx = begin.apply(SpannerIO.createTransaction().withSpannerConfig(spannerConfig));
PCollectionView<Dialect> dialectView = begin.apply("Read Dialect", new ReadDialect(spannerConfig)).apply("Dialect As PCollectionView", View.asSingleton());
PCollection<Ddl> ddl = begin.apply("Read Information Schema", new ReadInformationSchema(spannerConfig, tx, dialectView));
PCollectionView<Ddl> ddlView = ddl.apply("Cloud Spanner DDL as view", View.asSingleton());
PCollection<ImportManifest> manifest = begin.apply("Read manifest file", new ReadImportManifest(importManifest, dialectView));
PCollection<KV<String, String>> allFiles = manifest.apply("Resolve data files", new ResolveDataFiles(importManifest, ddlView));
PCollection<Map<String, List<TableManifest.Column>>> tableColumns = manifest.apply("Read table columns from manifest", new ReadTableColumns());
PCollectionView<Map<String, List<TableManifest.Column>>> tableColumnsView = tableColumns.apply("tableColumns as View", View.asSingleton());
PCollection<KV<String, List<String>>> tableFiles = allFiles.apply(Combine.perKey(AsList.fn()));
// TODO: add a step to check that schema in the manifest match db schema.
PCollection<HashMultimap<Integer, String>> levelMap = ddl.apply("Group tables by depth", ParDo.of(new DoFn<Ddl, HashMultimap<Integer, String>>() {
@ProcessElement
public void processElement(ProcessContext c) {
Ddl ddl = c.element();
c.output(ddl.perLevelView());
}
}));
PCollectionView<HashMultimap<Integer, String>> levelsView = levelMap.apply("Level map as view", View.asSingleton());
PCollection<HashMultimap<String, String>> tablesToFilesMap = tableFiles.apply("Combine table files", Combine.globally(AsList.fn())).apply("As HashMultimap", ParDo.of(new DoFn<List<KV<String, List<String>>>, HashMultimap<String, String>>() {
@ProcessElement
public void processElement(ProcessContext c) {
HashMultimap<String, String> result = HashMultimap.create();
for (KV<String, List<String>> kv : c.element()) {
result.putAll(kv.getKey().toLowerCase(), kv.getValue());
}
c.output(result);
}
}));
PCollection<?> previousComputation = ddl;
for (int i = 0; i < MAX_DEPTH; i++) {
final int depth = i;
PCollection<KV<String, String>> levelFileToTables = tablesToFilesMap.apply("Store depth " + depth, ParDo.of(new DoFn<HashMultimap<String, String>, KV<String, String>>() {
@ProcessElement
public void processElement(ProcessContext c) {
HashMultimap<String, String> allFiles = c.element();
HashMultimap<Integer, String> levels = c.sideInput(levelsView);
Set<String> tables = levels.get(depth);
for (String table : tables) {
for (String file : allFiles.get(table)) {
c.output(KV.of(file, table));
}
}
}
}).withSideInputs(levelsView));
PCollection<Mutation> mutations = levelFileToTables.apply("Reshuffle text files " + depth, Reshuffle.viaRandomKey()).apply("Text files as mutations. Depth: " + depth, new TextTableFilesAsMutations(ddlView, tableColumnsView));
SpannerWriteResult result = mutations.apply("Wait for previous depth " + depth, Wait.on(previousComputation)).apply("Write mutations " + depth, SpannerIO.write().withSpannerConfig(spannerConfig).withCommitDeadline(Duration.standardMinutes(1)).withMaxCumulativeBackoff(Duration.standardHours(2)).withMaxNumMutations(10000).withGroupingFactor(100).withDialectView(dialectView));
previousComputation = result.getOutput();
}
return PDone.in(begin.getPipeline());
}
use of com.google.cloud.spanner.Dialect in project java-spanner-jdbc by googleapis.
the class JdbcStatementTest method createStatement.
@SuppressWarnings("unchecked")
private JdbcStatement createStatement() throws SQLException {
Connection spanner = mock(Connection.class);
when(spanner.getDialect()).thenReturn(dialect);
com.google.cloud.spanner.ResultSet resultSet = mock(com.google.cloud.spanner.ResultSet.class);
when(resultSet.next()).thenReturn(true, false);
when(resultSet.getColumnType(0)).thenReturn(Type.int64());
when(resultSet.getLong(0)).thenReturn(1L);
StatementResult selectResult = mock(StatementResult.class);
when(selectResult.getResultType()).thenReturn(ResultType.RESULT_SET);
when(selectResult.getResultSet()).thenReturn(resultSet);
when(spanner.execute(com.google.cloud.spanner.Statement.of(SELECT))).thenReturn(selectResult);
StatementResult updateResult = mock(StatementResult.class);
when(updateResult.getResultType()).thenReturn(ResultType.UPDATE_COUNT);
when(updateResult.getUpdateCount()).thenReturn(1L);
when(spanner.execute(com.google.cloud.spanner.Statement.of(UPDATE))).thenReturn(updateResult);
StatementResult largeUpdateResult = mock(StatementResult.class);
when(largeUpdateResult.getResultType()).thenReturn(ResultType.UPDATE_COUNT);
when(largeUpdateResult.getUpdateCount()).thenReturn(Integer.MAX_VALUE + 1L);
when(spanner.execute(com.google.cloud.spanner.Statement.of(LARGE_UPDATE))).thenReturn(largeUpdateResult);
StatementResult ddlResult = mock(StatementResult.class);
when(ddlResult.getResultType()).thenReturn(ResultType.NO_RESULT);
when(spanner.execute(com.google.cloud.spanner.Statement.of(DDL))).thenReturn(ddlResult);
when(spanner.executeQuery(com.google.cloud.spanner.Statement.of(SELECT))).thenReturn(resultSet);
when(spanner.executeQuery(com.google.cloud.spanner.Statement.of(UPDATE))).thenThrow(SpannerExceptionFactory.newSpannerException(ErrorCode.INVALID_ARGUMENT, "not a query"));
when(spanner.executeQuery(com.google.cloud.spanner.Statement.of(DDL))).thenThrow(SpannerExceptionFactory.newSpannerException(ErrorCode.INVALID_ARGUMENT, "not a query"));
when(spanner.executeUpdate(com.google.cloud.spanner.Statement.of(UPDATE))).thenReturn(1L);
when(spanner.executeUpdate(com.google.cloud.spanner.Statement.of(SELECT))).thenThrow(SpannerExceptionFactory.newSpannerException(ErrorCode.INVALID_ARGUMENT, "not an update"));
when(spanner.executeUpdate(com.google.cloud.spanner.Statement.of(DDL))).thenThrow(SpannerExceptionFactory.newSpannerException(ErrorCode.INVALID_ARGUMENT, "not an update"));
when(spanner.executeBatchUpdate(anyList())).thenAnswer((Answer<long[]>) invocation -> {
List<com.google.cloud.spanner.Statement> statements = (List<com.google.cloud.spanner.Statement>) invocation.getArguments()[0];
if (statements.isEmpty() || AbstractStatementParser.getInstance(dialect).isDdlStatement(statements.get(0).getSql())) {
return new long[0];
}
long[] res = new long[((List<com.google.cloud.spanner.Statement>) invocation.getArguments()[0]).size()];
Arrays.fill(res, 1L);
return res;
});
JdbcConnection connection = mock(JdbcConnection.class);
when(connection.getDialect()).thenReturn(dialect);
when(connection.getParser()).thenReturn(AbstractStatementParser.getInstance(dialect));
when(connection.getSpannerConnection()).thenReturn(spanner);
return new JdbcStatement(connection);
}
use of com.google.cloud.spanner.Dialect in project beam by apache.
the class ReadSpannerSchemaTest method pgSimple.
@Test
public void pgSimple() throws Exception {
// Simplest schema: a table with bigint key
ReadOnlyTransaction tx = mock(ReadOnlyTransaction.class);
when(serviceFactory.mockDatabaseClient().readOnlyTransaction()).thenReturn(tx);
preparePkMetadata(tx, Arrays.asList(pkMetadata("test", "key", "ASC")));
prepareColumnMetadata(tx, Arrays.asList(columnMetadata("test", "key", "bigint")));
SpannerConfig config = SpannerConfig.create().withProjectId("test-project").withInstanceId("test-instance").withDatabaseId("test-database").withServiceFactory(serviceFactory);
PCollectionView<Dialect> dialectView = pipeline.apply(Create.of(Dialect.POSTGRESQL)).apply(View.asSingleton());
pipeline.run();
DoFnTester<Void, SpannerSchema> tester = DoFnTester.of(new ReadSpannerSchema(config, dialectView));
tester.setSideInput(dialectView, GlobalWindow.INSTANCE, Dialect.POSTGRESQL);
List<SpannerSchema> schemas = tester.processBundle(Arrays.asList((Void) null));
assertEquals(1, schemas.size());
SpannerSchema schema = schemas.get(0);
assertEquals(1, schema.getTables().size());
SpannerSchema.Column column = SpannerSchema.Column.create("key", Type.int64());
SpannerSchema.KeyPart keyPart = SpannerSchema.KeyPart.create("key", false);
assertThat(schema.getColumns("test"), contains(column));
assertThat(schema.getKeyParts("test"), contains(keyPart));
}
Aggregations