use of com.google.cloud.teleport.v2.clients.DataplexClient in project DataflowTemplates by GoogleCloudPlatform.
the class DataplexBigQueryToGcs method setUpPipeline.
private static Pipeline setUpPipeline(DataplexBigQueryToGcsOptions options, DataplexClient dataplex, BigQuery bqClient, BigQueryStorageClient bqsClient) throws IOException, ExecutionException, InterruptedException {
int maxParallelBigQueryRequests = options.getMaxParallelBigQueryMetadataRequests();
checkArgument(maxParallelBigQueryRequests >= 1, "maxParallelBigQueryMetadataRequests must be >= 1, but was: %s", maxParallelBigQueryRequests);
String gcsResource = resolveAsset(dataplex, options.getDestinationStorageBucketAssetName(), DataplexAssetResourceSpec.STORAGE_BUCKET);
String targetRootPath = "gs://" + gcsResource;
String bqResource = options.getSourceBigQueryDataset();
// If param contains "/lakes/", assume it's a Dataplex resource and resolve it into BQ ID first:
if (bqResource.toLowerCase().contains("/lakes/")) {
bqResource = resolveAsset(dataplex, bqResource, DataplexAssetResourceSpec.BIGQUERY_DATASET);
}
DatasetId datasetId = BigQueryUtils.parseDatasetUrn(bqResource);
BigQueryMetadataLoader metadataLoader = new BigQueryMetadataLoader(bqClient, bqsClient, maxParallelBigQueryRequests);
return buildPipeline(options, metadataLoader, targetRootPath, datasetId);
}
use of com.google.cloud.teleport.v2.clients.DataplexClient in project DataflowTemplates by GoogleCloudPlatform.
the class DataplexFileFormatConversion method run.
/**
* Runs the pipeline to completion with the specified options.
*
* @return The pipeline result.
*/
public static PipelineResult run(Pipeline pipeline, FileFormatConversionOptions options, DataplexClient dataplex, OutputPathProvider outputPathProvider) throws IOException {
boolean isInputAsset = ASSET_PATTERN.matcher(options.getInputAssetOrEntitiesList()).matches();
if (!isInputAsset && !ENTITIES_PATTERN.matcher(options.getInputAssetOrEntitiesList()).matches()) {
throw new IllegalArgumentException("Either input asset or input entities list must be provided");
}
GoogleCloudDataplexV1Asset outputAsset = dataplex.getAsset(options.getOutputAsset());
if (outputAsset == null || outputAsset.getResourceSpec() == null || !DataplexAssetResourceSpec.STORAGE_BUCKET.name().equals(outputAsset.getResourceSpec().getType()) || outputAsset.getResourceSpec().getName() == null) {
throw new IllegalArgumentException("Output asset must be an existing asset with resource spec name being a GCS bucket and" + " resource spec type of " + DataplexAssetResourceSpec.STORAGE_BUCKET.name());
}
String outputBucket = outputAsset.getResourceSpec().getName();
Predicate<String> inputFilesFilter;
switch(options.getWriteDisposition()) {
case OVERWRITE:
inputFilesFilter = inputFilePath -> true;
break;
case FAIL:
Set<String> outputFilePaths = getAllOutputFilePaths(outputBucket);
inputFilesFilter = inputFilePath -> {
if (outputFilePaths.contains(inputFilePathToOutputFilePath(outputPathProvider, inputFilePath, outputBucket, options.getOutputFileFormat()))) {
throw new WriteDispositionException(String.format("The file %s already exists in the output asset bucket: %s", inputFilePath, outputBucket));
}
return true;
};
break;
case SKIP:
outputFilePaths = getAllOutputFilePaths(outputBucket);
inputFilesFilter = inputFilePath -> !outputFilePaths.contains(inputFilePathToOutputFilePath(outputPathProvider, inputFilePath, outputBucket, options.getOutputFileFormat()));
break;
default:
throw new UnsupportedOperationException("Unsupported existing file behaviour: " + options.getWriteDisposition());
}
ImmutableList<GoogleCloudDataplexV1Entity> entities = isInputAsset ? dataplex.getCloudStorageEntities(options.getInputAssetOrEntitiesList()) : dataplex.getEntities(Splitter.on(',').trimResults().splitToList(options.getInputAssetOrEntitiesList()));
boolean convertingFiles = false;
for (GoogleCloudDataplexV1Entity entity : entities) {
ImmutableList<GoogleCloudDataplexV1Partition> partitions = dataplex.getPartitions(entity.getName());
if (partitions.isEmpty()) {
String outputPath = outputPathProvider.outputPathFrom(entity.getDataPath(), outputBucket);
Iterator<String> inputFilePaths = getFilesFromFilePattern(entityToFileSpec(entity)).filter(inputFilesFilter).iterator();
convertingFiles = inputFilePaths.hasNext();
inputFilePaths.forEachRemaining(inputFilePath -> pipeline.apply("Convert " + shortenDataplexName(entity.getName()), new ConvertFiles(entity, inputFilePath, options, outputPath)));
} else {
for (GoogleCloudDataplexV1Partition partition : partitions) {
String outputPath = outputPathProvider.outputPathFrom(partition.getLocation(), outputBucket);
Iterator<String> inputFilePaths = getFilesFromFilePattern(partitionToFileSpec(partition)).filter(inputFilesFilter).iterator();
convertingFiles = inputFilePaths.hasNext();
inputFilePaths.forEachRemaining(inputFilePath -> pipeline.apply("Convert " + shortenDataplexName(partition.getName()), new ConvertFiles(entity, inputFilePath, options, outputPath)));
}
}
}
if (!convertingFiles) {
pipeline.apply("Nothing to convert", new NoopTransform());
}
return pipeline.run();
}
use of com.google.cloud.teleport.v2.clients.DataplexClient in project DataflowTemplates by GoogleCloudPlatform.
the class DataplexJdbcIngestion method main.
/**
* Main entry point for pipeline execution.
*
* @param args Command line arguments to the pipeline.
*/
public static void main(String[] args) throws IOException {
DataplexJdbcIngestionOptions options = PipelineOptionsFactory.fromArgs(args).withValidation().as(DataplexJdbcIngestionOptions.class);
Pipeline pipeline = Pipeline.create(options);
DataplexClient dataplexClient = DefaultDataplexClient.withDefaultClient(options.getGcpCredential());
String assetName = options.getOutputAsset();
GoogleCloudDataplexV1Asset asset = resolveAsset(assetName, dataplexClient);
DynamicDataSourceConfiguration dataSourceConfig = configDataSource(options);
String assetType = asset.getResourceSpec().getType();
if (DataplexAssetResourceSpec.BIGQUERY_DATASET.name().equals(assetType)) {
buildBigQueryPipeline(pipeline, options, dataSourceConfig);
} else if (DataplexAssetResourceSpec.STORAGE_BUCKET.name().equals(assetType)) {
String targetRootPath = "gs://" + asset.getResourceSpec().getName() + "/" + options.getOutputTable();
buildGcsPipeline(pipeline, options, dataSourceConfig, targetRootPath);
} else {
throw new IllegalArgumentException(String.format("Asset " + assetName + " is of type " + assetType + ". Only " + DataplexAssetResourceSpec.BIGQUERY_DATASET.name() + "and " + DataplexAssetResourceSpec.STORAGE_BUCKET.name() + " supported."));
}
pipeline.run();
}
use of com.google.cloud.teleport.v2.clients.DataplexClient in project DataflowTemplates by GoogleCloudPlatform.
the class DataplexFileFormatConversionTest method testAssetWithEntityParquetToAvroE2E.
/**
* Tests Parquet to Avro conversion for an asset with entity.
*/
@Test
@Category(NeedsRunner.class)
public void testAssetWithEntityParquetToAvroE2E() throws IOException {
DataplexClient dataplex = mock(DataplexClient.class);
when(dataplex.getEntities(ImmutableList.of(entity4.getName()))).thenReturn(ImmutableList.of(entity4));
when(dataplex.getPartitions(entity4.getName())).thenReturn(ImmutableList.of());
when(dataplex.getAsset(outputAsset.getName())).thenReturn(outputAsset);
FileFormatConversionOptions options = PipelineOptionsFactory.create().as(FileFormatConversionOptions.class);
options.setInputAssetOrEntitiesList(entity4.getName());
options.setOutputFileFormat(FileFormatOptions.AVRO);
options.setOutputAsset(outputAsset.getName());
DataplexFileFormatConversion.run(mainPipeline, options, dataplex, DataplexFileFormatConversionTest::outputPathProvider);
PCollection<GenericRecord> readParquetFile = readPipeline.apply("ReadAvroFile", AvroConverters.ReadAvroFile.newBuilder().withInputFileSpec(temporaryFolder.getRoot().getAbsolutePath() + "/**/*.avro").withSerializedSchema(EXPECT_SERIALIZED_AVRO_SCHEMA).build());
PAssert.that(readParquetFile).containsInAnyOrder(EXPECTED_GENERIC_RECORDS);
readPipeline.run();
}
use of com.google.cloud.teleport.v2.clients.DataplexClient in project DataflowTemplates by GoogleCloudPlatform.
the class DataplexFileFormatConversionTest method testAssetWithEntityAvroToParquetE2E.
/**
* Tests Avro to Parquet conversion for an asset with entity.
*/
@Test
@Category(NeedsRunner.class)
public void testAssetWithEntityAvroToParquetE2E() throws IOException {
DataplexClient dataplex = mock(DataplexClient.class);
when(dataplex.getEntities(ImmutableList.of(entity3.getName()))).thenReturn(ImmutableList.of(entity3));
when(dataplex.getPartitions(entity3.getName())).thenReturn(ImmutableList.of());
when(dataplex.getAsset(outputAsset.getName())).thenReturn(outputAsset);
FileFormatConversionOptions options = PipelineOptionsFactory.create().as(FileFormatConversionOptions.class);
options.setInputAssetOrEntitiesList(entity3.getName());
options.setOutputFileFormat(FileFormatOptions.PARQUET);
options.setOutputAsset(outputAsset.getName());
DataplexFileFormatConversion.run(mainPipeline, options, dataplex, DataplexFileFormatConversionTest::outputPathProvider);
PCollection<GenericRecord> readParquetFile = readPipeline.apply("ReadParquetFile", ParquetConverters.ReadParquetFile.newBuilder().withInputFileSpec(temporaryFolder.getRoot().getAbsolutePath() + "/**/*.parquet").withSerializedSchema(EXPECT_SERIALIZED_AVRO_SCHEMA).build());
PAssert.that(readParquetFile).containsInAnyOrder(EXPECTED_GENERIC_RECORDS);
readPipeline.run();
}
Aggregations