Search in sources :

Example 1 with Ddl

use of com.google.cloud.teleport.spanner.ddl.Ddl in project DataflowTemplates by GoogleCloudPlatform.

the class CopyDbTest method readDdl.

/* Returns the Ddl representing a Spanner database for given a String for the database name */
private Ddl readDdl(String db) {
    DatabaseClient dbClient = spannerServer.getDbClient(db);
    Ddl ddl;
    try (ReadOnlyTransaction ctx = dbClient.readOnlyTransaction()) {
        ddl = new InformationSchemaScanner(ctx).scan();
    }
    return ddl;
}
Also used : InformationSchemaScanner(com.google.cloud.teleport.spanner.ddl.InformationSchemaScanner) DatabaseClient(com.google.cloud.spanner.DatabaseClient) ReadOnlyTransaction(com.google.cloud.spanner.ReadOnlyTransaction) Ddl(com.google.cloud.teleport.spanner.ddl.Ddl)

Example 2 with Ddl

use of com.google.cloud.teleport.spanner.ddl.Ddl in project DataflowTemplates by GoogleCloudPlatform.

the class ImportFromAvroTest method runTest.

private void runTest(Schema avroSchema, String spannerSchema, Iterable<GenericRecord> records) throws Exception {
    // Create the Avro file to be imported.
    String fileName = "avroFile.avro";
    ExportProtos.Export exportProto = ExportProtos.Export.newBuilder().addTables(ExportProtos.Export.Table.newBuilder().setName("AvroTable").addDataFiles(fileName).build()).addDatabaseOptions(ExportProtos.Export.DatabaseOption.newBuilder().setOptionName("version_retention_period").setOptionValue("\"4d\"").build()).build();
    JsonFormat.printer().print(exportProto);
    File manifestFile = tmpDir.newFile("spanner-export.json");
    String manifestFileLocation = manifestFile.getParent();
    Files.write(manifestFile.toPath(), JsonFormat.printer().print(exportProto).getBytes(StandardCharsets.UTF_8));
    File avroFile = tmpDir.newFile(fileName);
    try (DataFileWriter<GenericRecord> fileWriter = new DataFileWriter<>(new GenericDatumWriter<>(avroSchema))) {
        fileWriter.create(avroSchema, avroFile);
        for (GenericRecord r : records) {
            fileWriter.append(r);
        }
        fileWriter.flush();
    }
    // Create the target database.
    spannerServer.createDatabase(dbName, Collections.singleton(spannerSchema));
    // Run the import pipeline.
    importPipeline.apply("Import", new ImportTransform(spannerServer.getSpannerConfig(dbName), ValueProvider.StaticValueProvider.of(manifestFileLocation), ValueProvider.StaticValueProvider.of(true), ValueProvider.StaticValueProvider.of(true), ValueProvider.StaticValueProvider.of(true), ValueProvider.StaticValueProvider.of(30)));
    PipelineResult importResult = importPipeline.run();
    importResult.waitUntilFinish();
    Ddl ddl;
    try (ReadOnlyTransaction ctx = spannerServer.getDbClient(dbName).readOnlyTransaction()) {
        ddl = new InformationSchemaScanner(ctx).scan();
    }
    assertThat(ddl.databaseOptions().size(), is(1));
    ExportProtos.Export.DatabaseOption dbOption = ddl.databaseOptions().get(0);
    assertThat(dbOption.getOptionName(), is("version_retention_period"));
    assertThat(dbOption.getOptionValue(), is("4d"));
}
Also used : DataFileWriter(org.apache.avro.file.DataFileWriter) PipelineResult(org.apache.beam.sdk.PipelineResult) Ddl(com.google.cloud.teleport.spanner.ddl.Ddl) InformationSchemaScanner(com.google.cloud.teleport.spanner.ddl.InformationSchemaScanner) ReadOnlyTransaction(com.google.cloud.spanner.ReadOnlyTransaction) GenericRecord(org.apache.avro.generic.GenericRecord) File(java.io.File)

Example 3 with Ddl

use of com.google.cloud.teleport.spanner.ddl.Ddl in project DataflowTemplates by GoogleCloudPlatform.

the class ExportTransform method expand.

/**
 * Read the Cloud Spanner schema and all the rows in all the tables of the database. Create and
 * write the exported Avro files to GCS.
 */
@Override
public WriteFilesResult<String> expand(PBegin begin) {
    Pipeline p = begin.getPipeline();
    /*
     * Allow users to specify read timestamp.
     * CreateTransaction and CreateTransactionFn classes in SpannerIO
     * only take a timestamp object for exact staleness which works when
     * parameters are provided during template compile time. They do not work with
     * a Timestamp valueProvider which can take parameters at runtime. Hence a new
     * ParDo class CreateTransactionFnWithTimestamp had to be created for this
     * purpose.
     */
    PCollectionView<Transaction> tx = p.apply("CreateTransaction", Create.of(1)).apply("Create transaction", ParDo.of(new CreateTransactionFnWithTimestamp(spannerConfig, snapshotTime))).apply("Tx As PCollectionView", View.asSingleton());
    PCollectionView<Dialect> dialectView = p.apply("Read Dialect", new ReadDialect(spannerConfig)).apply("Dialect As PCollectionView", View.asSingleton());
    PCollection<Ddl> ddl = p.apply("Read Information Schema", new ReadInformationSchema(spannerConfig, tx, dialectView));
    PCollection<Ddl> exportState = ddl.apply("Check export conditions", ParDo.of(new DoFn<Ddl, Ddl>() {

        @ProcessElement
        public void processElement(ProcessContext c) throws Exception {
            Ddl ddl = c.element();
            List<String> tablesList = Collections.emptyList();
            // a list of export tables, throw an exception.
            if (tableNames.get().trim().isEmpty() && exportRelatedTables.get()) {
                throw new Exception("Invalid usage of --tableNames and --shouldExportRelatedTables. Set" + " --shouldExportRelatedTables=true only if --tableNames is given" + " selected tables for export.");
            }
            // If the user provides a comma-separated list of strings, parse it into a List
            if (!tableNames.get().trim().isEmpty()) {
                tablesList = Arrays.asList(tableNames.get().split(",\\s*"));
            }
            // If the user provided any invalid table names, throw an exception.
            List<String> allSpannerTables = ddl.allTables().stream().map(t -> t.name()).collect(Collectors.toList());
            List<String> invalidTables = tablesList.stream().distinct().filter(t -> !allSpannerTables.contains(t)).collect(Collectors.toList());
            if (invalidTables.size() != 0) {
                throw new Exception("INVALID_ARGUMENT: Table(s) not found: " + String.join(", ", invalidTables) + ".");
            }
            List<String> filteredTables = getFilteredTables(ddl, tablesList).stream().map(t -> t.name()).collect(Collectors.toList());
            // Save any missing necessary export table names; save a copy of the original
            // table list to bypass 'final or effectively final' condition of the lambda
            // expression below.
            List<String> usersTables = tablesList.stream().collect(Collectors.toList());
            List<String> missingTables = filteredTables.stream().distinct().filter(t -> !usersTables.contains(t)).collect(Collectors.toList());
            Collections.sort(missingTables);
            // throw an exception.
            if (tablesList.size() != 0 && !(tablesList.equals(filteredTables)) && !exportRelatedTables.get()) {
                throw new Exception("Attempted to export table(s) requiring parent and/or foreign keys tables" + " without setting the shouldExportRelatedTables parameter. Set" + " --shouldExportRelatedTables=true to export all necessary" + " tables, or add " + String.join(", ", missingTables) + " to --tableNames.");
            }
            c.output(ddl);
        }
    }));
    PCollection<ReadOperation> tables = ddl.apply("Build table read operations", new BuildReadFromTableOperations(tableNames));
    PCollection<KV<String, Void>> allTableAndViewNames = ddl.apply("List all table and view names", ParDo.of(new DoFn<Ddl, KV<String, Void>>() {

        @ProcessElement
        public void processElement(ProcessContext c) {
            Ddl ddl = c.element();
            for (Table t : ddl.allTables()) {
                c.output(KV.of(t.name(), null));
            }
            // we need to add the names of all views separately here.
            for (com.google.cloud.teleport.spanner.ddl.View v : ddl.views()) {
                c.output(KV.of(v.name(), null));
            }
        }
    }));
    PCollection<String> allChangeStreamNames = ddl.apply("List all change stream names", ParDo.of(new DoFn<Ddl, String>() {

        @ProcessElement
        public void processElement(ProcessContext c) {
            Ddl ddl = c.element();
            for (ChangeStream changeStream : ddl.changeStreams()) {
                c.output(changeStream.name());
            }
        }
    }));
    // Generate a unique output directory name.
    final PCollectionView<String> outputDirectoryName = p.apply(Create.of(1)).apply("Create Avro output folder", ParDo.of(new DoFn<Integer, String>() {

        @ProcessElement
        public void processElement(ProcessContext c) {
            String instanceId = spannerConfig.getInstanceId().get();
            String dbId = spannerConfig.getDatabaseId().get();
            // For direct runner or tests we need a deterministic jobId.
            String testJobId = ExportTransform.this.testJobId.get();
            if (!Strings.isNullOrEmpty(testJobId)) {
                c.output(testJobId);
                return;
            }
            try {
                DataflowWorkerHarnessOptions workerHarnessOptions = c.getPipelineOptions().as(DataflowWorkerHarnessOptions.class);
                String jobId = workerHarnessOptions.getJobId();
                c.output(instanceId + "-" + dbId + "-" + jobId);
            } catch (Exception e) {
                throw new IllegalStateException("Please specify --testJobId to run with non-dataflow runner");
            }
        }
    })).apply(View.asSingleton());
    final PCollectionView<Map<String, SerializableSchemaSupplier>> avroSchemas = ddl.apply("Build Avro schemas from DDL", ParDo.of(new DoFn<Ddl, KV<String, SerializableSchemaSupplier>>() {

        @ProcessElement
        public void processElement(ProcessContext c) {
            Collection<Schema> avroSchemas = new DdlToAvroSchemaConverter("spannerexport", "1.0.0", shouldExportTimestampAsLogicalType.get()).convert(c.element());
            for (Schema schema : avroSchemas) {
                c.output(KV.of(schema.getName(), new SerializableSchemaSupplier(schema)));
            }
        }
    })).apply("As view", View.asMap());
    PCollection<Struct> rows = tables.apply("Read all rows from Spanner", SpannerIO.readAll().withTransaction(tx).withSpannerConfig(spannerConfig));
    ValueProvider<ResourceId> resource = ValueProvider.NestedValueProvider.of(outputDir, (SerializableFunction<String, ResourceId>) s -> FileSystems.matchNewResource(s, true));
    ValueProvider<ResourceId> tempResource = ValueProvider.NestedValueProvider.of(eitherOrValueProvider(avroTempDirectory, outputDir), (SerializableFunction<String, ResourceId>) s -> FileSystems.matchNewResource(s, true));
    WriteFilesResult<String> fileWriteResults = rows.apply("Store Avro files", AvroIO.<Struct>writeCustomTypeToGenericRecords().to(new SchemaBasedDynamicDestinations(avroSchemas, outputDirectoryName, dialectView, resource)).withTempDirectory(tempResource));
    // Generate the manifest file.
    PCollection<KV<String, Iterable<String>>> tableFiles = fileWriteResults.getPerDestinationOutputFilenames().apply(GroupByKey.create());
    final TupleTag<Void> allTables = new TupleTag<>();
    final TupleTag<Iterable<String>> nonEmptyTables = new TupleTag<>();
    PCollection<KV<String, CoGbkResult>> groupedTables = KeyedPCollectionTuple.of(allTables, allTableAndViewNames).and(nonEmptyTables, tableFiles).apply("Group with all tables", CoGroupByKey.create());
    // The following is to export empty tables and views from the database.  Empty tables and views
    // are handled together because we do not export any rows for views, only their metadata,
    // including the queries defining them.
    PCollection<KV<String, Iterable<String>>> emptyTablesAndViews = groupedTables.apply("Export empty tables and views", ParDo.of(new DoFn<KV<String, CoGbkResult>, KV<String, Iterable<String>>>() {

        @ProcessElement
        public void processElement(ProcessContext c) {
            KV<String, CoGbkResult> kv = c.element();
            String table = kv.getKey();
            CoGbkResult coGbkResult = kv.getValue();
            Iterable<String> only = coGbkResult.getOnly(nonEmptyTables, null);
            if (only == null) {
                LOG.info("Exporting empty table or view: " + table);
                // This file will contain the schema definition: column definitions for empty
                // tables or defining queries for views.
                c.output(KV.of(table, Collections.singleton(table + ".avro-00000-of-00001")));
            }
        }
    }));
    PCollection<KV<String, Iterable<String>>> changeStreams = allChangeStreamNames.apply("Export change streams", ParDo.of(new DoFn<String, KV<String, Iterable<String>>>() {

        @ProcessElement
        public void processElement(ProcessContext c) {
            String changeStreamName = c.element();
            LOG.info("Exporting change stream: " + changeStreamName);
            // This file will contain the schema definition for the change stream.
            c.output(KV.of(changeStreamName, Collections.singleton(changeStreamName + ".avro-00000-of-00001")));
        }
    }));
    // Empty tables, views and change streams are handled together, because we export them as empty
    // Avro files that only contain the Avro schemas.
    PCollection<KV<String, Iterable<String>>> emptySchemaFiles = PCollectionList.of(emptyTablesAndViews).and(changeStreams).apply("Combine all empty schema files", Flatten.pCollections());
    emptySchemaFiles = emptySchemaFiles.apply("Save empty schema files", ParDo.of(new DoFn<KV<String, Iterable<String>>, KV<String, Iterable<String>>>() {

        @ProcessElement
        public void processElement(ProcessContext c) {
            Map<String, SerializableSchemaSupplier> schemaMap = c.sideInput(avroSchemas);
            KV<String, Iterable<String>> kv = c.element();
            String objectName = kv.getKey();
            String fileName = kv.getValue().iterator().next();
            Schema schema = schemaMap.get(objectName).get();
            DatumWriter<GenericRecord> datumWriter = new GenericDatumWriter<>(schema);
            Path fullPath = createOutputPath(outputDir.get(), c.sideInput(outputDirectoryName), fileName);
            try (DataFileWriter<GenericRecord> dataFileWriter = new DataFileWriter<>(datumWriter)) {
                dataFileWriter.create(schema, createOutputStream(fullPath, c));
            } catch (IOException e) {
                throw new RuntimeException(e);
            }
            c.output(KV.of(objectName, Collections.singleton(fullPath.toString())));
        }

        /**
         * Resolves the complete path name for Avro files for both GCS and local FS
         * (for testing).
         *
         * @param outputDirectoryPath Initial directory path for the file.
         * @param outputDirectoryName Terminal directory for the file.
         * @param fileName Name of the Avro file
         * @return The full {@link Path} of the output Avro file.
         */
        private Path createOutputPath(String outputDirectoryPath, String outputDirectoryName, String fileName) {
            if (GcsPath.GCS_URI.matcher(outputDirectoryPath).matches()) {
                // Avro file path in GCS.
                return GcsPath.fromUri(outputDirectoryPath).resolve(outputDirectoryName).resolve(fileName);
            } else {
                // Avro file path in local filesystem
                return Paths.get(outputDirectoryPath, outputDirectoryName, fileName);
            }
        }

        /**
         * Creates the {@link OutputStream} for the output file either on GCS or on
         * local FS (for testing).
         *
         * @param outputPath The full path of the output file.
         * @param c The {@link org.apache.beam.sdk.transforms.DoFn.ProcessContext}
         * @return An {@link OutputStream} for the opened output file.
         * @throws IOException if the output file cannot be opened.
         */
        private OutputStream createOutputStream(Path outputPath, ProcessContext c) throws IOException {
            if (GcsPath.GCS_URI.matcher(outputPath.toString()).matches()) {
                // Writing the Avro file to GCS.
                org.apache.beam.sdk.extensions.gcp.util.GcsUtil gcsUtil = c.getPipelineOptions().as(GcsOptions.class).getGcsUtil();
                String gcsType = "application/octet-stream";
                WritableByteChannel gcsChannel = gcsUtil.create((GcsPath) outputPath, gcsType);
                return Channels.newOutputStream(gcsChannel);
            } else {
                // Avro file is created on local filesystem (for testing).
                Files.createDirectories(outputPath.getParent());
                return Files.newOutputStream(outputPath);
            }
        }
    }).withSideInputs(avroSchemas, outputDirectoryName));
    PCollection<KV<String, Iterable<String>>> allFiles = PCollectionList.of(tableFiles).and(emptySchemaFiles).apply("Combine all files", Flatten.pCollections());
    PCollection<KV<String, String>> tableManifests = allFiles.apply("Build table manifests", ParDo.of(new BuildTableManifests()));
    Contextful.Fn<String, FileIO.Write.FileNaming> tableManifestNaming = (element, c) -> (window, pane, numShards, shardIndex, compression) -> GcsUtil.joinPath(outputDir.get(), c.sideInput(outputDirectoryName), tableManifestFileName(element));
    tableManifests.apply("Store table manifests", FileIO.<String, KV<String, String>>writeDynamic().by(KV::getKey).withDestinationCoder(StringUtf8Coder.of()).withNaming(Contextful.of(tableManifestNaming, Requirements.requiresSideInputs(outputDirectoryName))).via(Contextful.fn(KV::getValue), TextIO.sink()).withTempDirectory(eitherOrValueProvider(avroTempDirectory, outputDir)));
    PCollection<List<Export.Table>> metadataTables = tableManifests.apply("Combine table metadata", Combine.globally(new CombineTableMetadata()));
    PCollectionView<Ddl> ddlView = ddl.apply("Cloud Spanner DDL as view", View.asSingleton());
    PCollection<String> metadataContent = metadataTables.apply("Create database manifest", ParDo.of(new CreateDatabaseManifest(ddlView, dialectView)).withSideInputs(ddlView, dialectView));
    Contextful.Fn<String, FileIO.Write.FileNaming> manifestNaming = (element, c) -> (window, pane, numShards, shardIndex, compression) -> GcsUtil.joinPath(outputDir.get(), c.sideInput(outputDirectoryName), "spanner-export.json");
    metadataContent.apply("Store the database manifest", FileIO.<String, String>writeDynamic().by(SerializableFunctions.constant("")).withDestinationCoder(StringUtf8Coder.of()).via(TextIO.sink()).withNaming(Contextful.of(manifestNaming, Requirements.requiresSideInputs(outputDirectoryName))).withTempDirectory(eitherOrValueProvider(avroTempDirectory, outputDir)));
    return fileWriteResults;
}
Also used : CombineFn(org.apache.beam.sdk.transforms.Combine.CombineFn) Arrays(java.util.Arrays) AvroIO(org.apache.beam.sdk.io.AvroIO) FileIO(org.apache.beam.sdk.io.FileIO) Table(com.google.cloud.teleport.spanner.ddl.Table) PBegin(org.apache.beam.sdk.values.PBegin) WriteFilesResult(org.apache.beam.sdk.io.WriteFilesResult) Dialect(com.google.cloud.spanner.Dialect) LoggerFactory(org.slf4j.LoggerFactory) SerializableFunction(org.apache.beam.sdk.transforms.SerializableFunction) ValueProviderUtils.eitherOrValueProvider(com.google.cloud.teleport.util.ValueProviderUtils.eitherOrValueProvider) ReadOperation(org.apache.beam.sdk.io.gcp.spanner.ReadOperation) DataflowWorkerHarnessOptions(org.apache.beam.runners.dataflow.options.DataflowWorkerHarnessOptions) PCollectionList(org.apache.beam.sdk.values.PCollectionList) Create(org.apache.beam.sdk.transforms.Create) Map(java.util.Map) KeyedPCollectionTuple(org.apache.beam.sdk.transforms.join.KeyedPCollectionTuple) TableManifest(com.google.cloud.teleport.spanner.ExportProtos.TableManifest) Path(java.nio.file.Path) ValueProvider(org.apache.beam.sdk.options.ValueProvider) Flatten(org.apache.beam.sdk.transforms.Flatten) InvalidProtocolBufferException(com.google.protobuf.InvalidProtocolBufferException) GenericDatumWriter(org.apache.avro.generic.GenericDatumWriter) Schema(org.apache.avro.Schema) DatumWriter(org.apache.avro.io.DatumWriter) Collection(java.util.Collection) DataFileWriter(org.apache.avro.file.DataFileWriter) Collectors(java.util.stream.Collectors) Serializable(java.io.Serializable) SpannerConfig(org.apache.beam.sdk.io.gcp.spanner.SpannerConfig) Export(com.google.cloud.teleport.spanner.ExportProtos.Export) Objects(java.util.Objects) List(java.util.List) JsonFormat(com.google.protobuf.util.JsonFormat) ParDo(org.apache.beam.sdk.transforms.ParDo) Struct(com.google.cloud.spanner.Struct) ResolveOptions(org.apache.beam.sdk.io.fs.ResolveOptions) Requirements(org.apache.beam.sdk.transforms.Requirements) DynamicAvroDestinations(org.apache.beam.sdk.io.DynamicAvroDestinations) Iterables(com.google.common.collect.Iterables) KV(org.apache.beam.sdk.values.KV) DefaultFilenamePolicy(org.apache.beam.sdk.io.DefaultFilenamePolicy) Combine(org.apache.beam.sdk.transforms.Combine) Supplier(com.google.common.base.Supplier) View(org.apache.beam.sdk.transforms.View) SerializableFunctions(org.apache.beam.sdk.transforms.SerializableFunctions) ArrayList(java.util.ArrayList) StringUtf8Coder(org.apache.beam.sdk.coders.StringUtf8Coder) PTransform(org.apache.beam.sdk.transforms.PTransform) CoGbkResult(org.apache.beam.sdk.transforms.join.CoGbkResult) FileBasedSink(org.apache.beam.sdk.io.FileBasedSink) Contextful(org.apache.beam.sdk.transforms.Contextful) Strings(com.google.common.base.Strings) Transaction(org.apache.beam.sdk.io.gcp.spanner.Transaction) TupleTag(org.apache.beam.sdk.values.TupleTag) Pipeline(org.apache.beam.sdk.Pipeline) Ddl(com.google.cloud.teleport.spanner.ddl.Ddl) CreateTransactionFnWithTimestamp(com.google.cloud.teleport.templates.common.SpannerConverters.CreateTransactionFnWithTimestamp) SpannerTableFilter.getFilteredTables(com.google.cloud.teleport.spanner.SpannerTableFilter.getFilteredTables) OutputStream(java.io.OutputStream) GcsPath(org.apache.beam.sdk.extensions.gcp.util.gcsfs.GcsPath) DoFn(org.apache.beam.sdk.transforms.DoFn) GenericRecord(org.apache.avro.generic.GenericRecord) GroupByKey(org.apache.beam.sdk.transforms.GroupByKey) Logger(org.slf4j.Logger) Iterator(java.util.Iterator) ResourceId(org.apache.beam.sdk.io.fs.ResourceId) Files(java.nio.file.Files) Channels(java.nio.channels.Channels) ProtoDialect(com.google.cloud.teleport.spanner.ExportProtos.ProtoDialect) IOException(java.io.IOException) TimestampBound(com.google.cloud.spanner.TimestampBound) PCollection(org.apache.beam.sdk.values.PCollection) SpannerIO(org.apache.beam.sdk.io.gcp.spanner.SpannerIO) SchemaBuilder(org.apache.avro.SchemaBuilder) CoGroupByKey(org.apache.beam.sdk.transforms.join.CoGroupByKey) ChangeStream(com.google.cloud.teleport.spanner.ddl.ChangeStream) Paths(java.nio.file.Paths) GcsOptions(org.apache.beam.sdk.extensions.gcp.options.GcsOptions) PCollectionView(org.apache.beam.sdk.values.PCollectionView) WritableByteChannel(java.nio.channels.WritableByteChannel) VisibleForTesting(com.google.common.annotations.VisibleForTesting) FileSystems(org.apache.beam.sdk.io.FileSystems) Collections(java.util.Collections) TextIO(org.apache.beam.sdk.io.TextIO) DataflowWorkerHarnessOptions(org.apache.beam.runners.dataflow.options.DataflowWorkerHarnessOptions) Schema(org.apache.avro.Schema) TupleTag(org.apache.beam.sdk.values.TupleTag) PCollectionList(org.apache.beam.sdk.values.PCollectionList) List(java.util.List) ArrayList(java.util.ArrayList) GcsOptions(org.apache.beam.sdk.extensions.gcp.options.GcsOptions) ReadOperation(org.apache.beam.sdk.io.gcp.spanner.ReadOperation) WritableByteChannel(java.nio.channels.WritableByteChannel) Contextful(org.apache.beam.sdk.transforms.Contextful) FileIO(org.apache.beam.sdk.io.FileIO) Transaction(org.apache.beam.sdk.io.gcp.spanner.Transaction) Collection(java.util.Collection) PCollection(org.apache.beam.sdk.values.PCollection) ChangeStream(com.google.cloud.teleport.spanner.ddl.ChangeStream) Map(java.util.Map) Ddl(com.google.cloud.teleport.spanner.ddl.Ddl) Struct(com.google.cloud.spanner.Struct) CoGbkResult(org.apache.beam.sdk.transforms.join.CoGbkResult) Dialect(com.google.cloud.spanner.Dialect) ProtoDialect(com.google.cloud.teleport.spanner.ExportProtos.ProtoDialect) Export(com.google.cloud.teleport.spanner.ExportProtos.Export) GenericRecord(org.apache.avro.generic.GenericRecord) Path(java.nio.file.Path) GcsPath(org.apache.beam.sdk.extensions.gcp.util.gcsfs.GcsPath) Table(com.google.cloud.teleport.spanner.ddl.Table) DataFileWriter(org.apache.avro.file.DataFileWriter) KV(org.apache.beam.sdk.values.KV) GenericDatumWriter(org.apache.avro.generic.GenericDatumWriter) IOException(java.io.IOException) InvalidProtocolBufferException(com.google.protobuf.InvalidProtocolBufferException) IOException(java.io.IOException) Pipeline(org.apache.beam.sdk.Pipeline) DoFn(org.apache.beam.sdk.transforms.DoFn) ResourceId(org.apache.beam.sdk.io.fs.ResourceId) CreateTransactionFnWithTimestamp(com.google.cloud.teleport.templates.common.SpannerConverters.CreateTransactionFnWithTimestamp)

Example 4 with Ddl

use of com.google.cloud.teleport.spanner.ddl.Ddl in project DataflowTemplates by GoogleCloudPlatform.

the class TextImportTransform method expand.

@Override
public PDone expand(PBegin begin) {
    PCollectionView<Transaction> tx = begin.apply(SpannerIO.createTransaction().withSpannerConfig(spannerConfig));
    PCollectionView<Dialect> dialectView = begin.apply("Read Dialect", new ReadDialect(spannerConfig)).apply("Dialect As PCollectionView", View.asSingleton());
    PCollection<Ddl> ddl = begin.apply("Read Information Schema", new ReadInformationSchema(spannerConfig, tx, dialectView));
    PCollectionView<Ddl> ddlView = ddl.apply("Cloud Spanner DDL as view", View.asSingleton());
    PCollection<ImportManifest> manifest = begin.apply("Read manifest file", new ReadImportManifest(importManifest, dialectView));
    PCollection<KV<String, String>> allFiles = manifest.apply("Resolve data files", new ResolveDataFiles(importManifest, ddlView));
    PCollection<Map<String, List<TableManifest.Column>>> tableColumns = manifest.apply("Read table columns from manifest", new ReadTableColumns());
    PCollectionView<Map<String, List<TableManifest.Column>>> tableColumnsView = tableColumns.apply("tableColumns as View", View.asSingleton());
    PCollection<KV<String, List<String>>> tableFiles = allFiles.apply(Combine.perKey(AsList.fn()));
    // TODO: add a step to check that schema in the manifest match db schema.
    PCollection<HashMultimap<Integer, String>> levelMap = ddl.apply("Group tables by depth", ParDo.of(new DoFn<Ddl, HashMultimap<Integer, String>>() {

        @ProcessElement
        public void processElement(ProcessContext c) {
            Ddl ddl = c.element();
            c.output(ddl.perLevelView());
        }
    }));
    PCollectionView<HashMultimap<Integer, String>> levelsView = levelMap.apply("Level map as view", View.asSingleton());
    PCollection<HashMultimap<String, String>> tablesToFilesMap = tableFiles.apply("Combine table files", Combine.globally(AsList.fn())).apply("As HashMultimap", ParDo.of(new DoFn<List<KV<String, List<String>>>, HashMultimap<String, String>>() {

        @ProcessElement
        public void processElement(ProcessContext c) {
            HashMultimap<String, String> result = HashMultimap.create();
            for (KV<String, List<String>> kv : c.element()) {
                result.putAll(kv.getKey().toLowerCase(), kv.getValue());
            }
            c.output(result);
        }
    }));
    PCollection<?> previousComputation = ddl;
    for (int i = 0; i < MAX_DEPTH; i++) {
        final int depth = i;
        PCollection<KV<String, String>> levelFileToTables = tablesToFilesMap.apply("Store depth " + depth, ParDo.of(new DoFn<HashMultimap<String, String>, KV<String, String>>() {

            @ProcessElement
            public void processElement(ProcessContext c) {
                HashMultimap<String, String> allFiles = c.element();
                HashMultimap<Integer, String> levels = c.sideInput(levelsView);
                Set<String> tables = levels.get(depth);
                for (String table : tables) {
                    for (String file : allFiles.get(table)) {
                        c.output(KV.of(file, table));
                    }
                }
            }
        }).withSideInputs(levelsView));
        PCollection<Mutation> mutations = levelFileToTables.apply("Reshuffle text files " + depth, Reshuffle.viaRandomKey()).apply("Text files as mutations. Depth: " + depth, new TextTableFilesAsMutations(ddlView, tableColumnsView));
        SpannerWriteResult result = mutations.apply("Wait for previous depth " + depth, Wait.on(previousComputation)).apply("Write mutations " + depth, SpannerIO.write().withSpannerConfig(spannerConfig).withCommitDeadline(Duration.standardMinutes(1)).withMaxCumulativeBackoff(Duration.standardHours(2)).withMaxNumMutations(10000).withGroupingFactor(100).withDialectView(dialectView));
        previousComputation = result.getOutput();
    }
    return PDone.in(begin.getPipeline());
}
Also used : Set(java.util.Set) Ddl(com.google.cloud.teleport.spanner.ddl.Ddl) ImportManifest(com.google.cloud.teleport.spanner.TextImportProtos.ImportManifest) Column(com.google.cloud.teleport.spanner.ddl.Column) Dialect(com.google.cloud.spanner.Dialect) ProtoDialect(com.google.cloud.teleport.spanner.ExportProtos.ProtoDialect) List(java.util.List) KV(org.apache.beam.sdk.values.KV) HashMultimap(com.google.common.collect.HashMultimap) DoFn(org.apache.beam.sdk.transforms.DoFn) Transaction(org.apache.beam.sdk.io.gcp.spanner.Transaction) SpannerWriteResult(org.apache.beam.sdk.io.gcp.spanner.SpannerWriteResult) Mutation(com.google.cloud.spanner.Mutation) Map(java.util.Map) HashMap(java.util.HashMap)

Example 5 with Ddl

use of com.google.cloud.teleport.spanner.ddl.Ddl in project DataflowTemplates by GoogleCloudPlatform.

the class TextRowToMutation method processElement.

@ProcessElement
public void processElement(ProcessContext c) throws IOException {
    /**
     * Input string is one line but Apache CSVParser process multiple lines, so we only take the
     * first item in the result list
     */
    KV<String, String> kv = c.element();
    String tableName = kv.getKey();
    Ddl ddl = c.sideInput(ddlView);
    Map<String, List<TableManifest.Column>> tableColumnsMap = c.sideInput(tableColumnsView);
    Table table = ddl.table(tableName);
    Reader in = new StringReader(kv.getValue());
    CSVFormat csvFormat = CSVFormat.newFormat(columnDelimiter.get()).withQuote(fieldQualifier.get()).withIgnoreEmptyLines(true).withTrailingDelimiter(trailingDelimiter.get()).withEscape(escape.get()).withNullString(nullString.get());
    CSVParser parser = new CSVParser(in, csvFormat);
    List<CSVRecord> list = parser.getRecords();
    if (list.isEmpty()) {
        return;
    }
    if (list.size() > 1) {
        throw new RuntimeException("Unable to parse this row: " + c.element());
    }
    CSVRecord row = list.get(0);
    writeBuilder = Mutation.newInsertOrUpdateBuilder(table.name());
    try {
        c.output(parseRow(writeBuilder, row, table, tableColumnsMap.get(tableName)));
    } catch (IllegalArgumentException e) {
        throw new RuntimeException(String.format("Error to parseRow. row: %s, table: %s", row, table), e);
    }
}
Also used : Table(com.google.cloud.teleport.spanner.ddl.Table) TableManifest(com.google.cloud.teleport.spanner.TextImportProtos.ImportManifest.TableManifest) Reader(java.io.Reader) StringReader(java.io.StringReader) Ddl(com.google.cloud.teleport.spanner.ddl.Ddl) CSVParser(org.apache.commons.csv.CSVParser) StringReader(java.io.StringReader) List(java.util.List) CSVFormat(org.apache.commons.csv.CSVFormat) CSVRecord(org.apache.commons.csv.CSVRecord)

Aggregations

Ddl (com.google.cloud.teleport.spanner.ddl.Ddl)109 Test (org.junit.Test)91 Schema (org.apache.avro.Schema)34 GenericRecord (org.apache.avro.generic.GenericRecord)19 List (java.util.List)18 Struct (com.google.cloud.spanner.Struct)14 Collectors (java.util.stream.Collectors)14 KV (org.apache.beam.sdk.values.KV)14 SpannerTableFilter.getFilteredTables (com.google.cloud.teleport.spanner.SpannerTableFilter.getFilteredTables)12 Type (com.google.cloud.teleport.spanner.common.Type)12 Path (java.nio.file.Path)12 Collections (java.util.Collections)12 ImmutableList (com.google.common.collect.ImmutableList)11 IOException (java.io.IOException)11 Assert.assertEquals (org.junit.Assert.assertEquals)11 ReadImportManifest (com.google.cloud.teleport.spanner.TextImportTransform.ReadImportManifest)10 ResolveDataFiles (com.google.cloud.teleport.spanner.TextImportTransform.ResolveDataFiles)10 BufferedWriter (java.io.BufferedWriter)10 Charset (java.nio.charset.Charset)10 RunWith (org.junit.runner.RunWith)9