Search in sources :

Example 1 with Entities

use of com.google.api.services.dataplex.v1.CloudDataplex.Projects.Locations.Lakes.Zones.Entities in project DataflowTemplates by GoogleCloudPlatform.

the class DataplexFileFormatConversion method run.

/**
 * Runs the pipeline to completion with the specified options.
 *
 * @return The pipeline result.
 */
public static PipelineResult run(Pipeline pipeline, FileFormatConversionOptions options, DataplexClient dataplex, OutputPathProvider outputPathProvider) throws IOException {
    boolean isInputAsset = ASSET_PATTERN.matcher(options.getInputAssetOrEntitiesList()).matches();
    if (!isInputAsset && !ENTITIES_PATTERN.matcher(options.getInputAssetOrEntitiesList()).matches()) {
        throw new IllegalArgumentException("Either input asset or input entities list must be provided");
    }
    GoogleCloudDataplexV1Asset outputAsset = dataplex.getAsset(options.getOutputAsset());
    if (outputAsset == null || outputAsset.getResourceSpec() == null || !DataplexAssetResourceSpec.STORAGE_BUCKET.name().equals(outputAsset.getResourceSpec().getType()) || outputAsset.getResourceSpec().getName() == null) {
        throw new IllegalArgumentException("Output asset must be an existing asset with resource spec name being a GCS bucket and" + " resource spec type of " + DataplexAssetResourceSpec.STORAGE_BUCKET.name());
    }
    String outputBucket = outputAsset.getResourceSpec().getName();
    Predicate<String> inputFilesFilter;
    switch(options.getWriteDisposition()) {
        case OVERWRITE:
            inputFilesFilter = inputFilePath -> true;
            break;
        case FAIL:
            Set<String> outputFilePaths = getAllOutputFilePaths(outputBucket);
            inputFilesFilter = inputFilePath -> {
                if (outputFilePaths.contains(inputFilePathToOutputFilePath(outputPathProvider, inputFilePath, outputBucket, options.getOutputFileFormat()))) {
                    throw new WriteDispositionException(String.format("The file %s already exists in the output asset bucket: %s", inputFilePath, outputBucket));
                }
                return true;
            };
            break;
        case SKIP:
            outputFilePaths = getAllOutputFilePaths(outputBucket);
            inputFilesFilter = inputFilePath -> !outputFilePaths.contains(inputFilePathToOutputFilePath(outputPathProvider, inputFilePath, outputBucket, options.getOutputFileFormat()));
            break;
        default:
            throw new UnsupportedOperationException("Unsupported existing file behaviour: " + options.getWriteDisposition());
    }
    ImmutableList<GoogleCloudDataplexV1Entity> entities = isInputAsset ? dataplex.getCloudStorageEntities(options.getInputAssetOrEntitiesList()) : dataplex.getEntities(Splitter.on(',').trimResults().splitToList(options.getInputAssetOrEntitiesList()));
    boolean convertingFiles = false;
    for (GoogleCloudDataplexV1Entity entity : entities) {
        ImmutableList<GoogleCloudDataplexV1Partition> partitions = dataplex.getPartitions(entity.getName());
        if (partitions.isEmpty()) {
            String outputPath = outputPathProvider.outputPathFrom(entity.getDataPath(), outputBucket);
            Iterator<String> inputFilePaths = getFilesFromFilePattern(entityToFileSpec(entity)).filter(inputFilesFilter).iterator();
            convertingFiles = inputFilePaths.hasNext();
            inputFilePaths.forEachRemaining(inputFilePath -> pipeline.apply("Convert " + shortenDataplexName(entity.getName()), new ConvertFiles(entity, inputFilePath, options, outputPath)));
        } else {
            for (GoogleCloudDataplexV1Partition partition : partitions) {
                String outputPath = outputPathProvider.outputPathFrom(partition.getLocation(), outputBucket);
                Iterator<String> inputFilePaths = getFilesFromFilePattern(partitionToFileSpec(partition)).filter(inputFilesFilter).iterator();
                convertingFiles = inputFilePaths.hasNext();
                inputFilePaths.forEachRemaining(inputFilePath -> pipeline.apply("Convert " + shortenDataplexName(partition.getName()), new ConvertFiles(entity, inputFilePath, options, outputPath)));
            }
        }
    }
    if (!convertingFiles) {
        pipeline.apply("Nothing to convert", new NoopTransform());
    }
    return pipeline.run();
}
Also used : NoopTransform(com.google.cloud.teleport.v2.transforms.NoopTransform) GoogleCloudDataplexV1Partition(com.google.api.services.dataplex.v1.model.GoogleCloudDataplexV1Partition) WriteDispositionException(com.google.cloud.teleport.v2.utils.WriteDisposition.WriteDispositionException) GoogleCloudDataplexV1Asset(com.google.api.services.dataplex.v1.model.GoogleCloudDataplexV1Asset) GoogleCloudDataplexV1Entity(com.google.api.services.dataplex.v1.model.GoogleCloudDataplexV1Entity)

Example 2 with Entities

use of com.google.api.services.dataplex.v1.CloudDataplex.Projects.Locations.Lakes.Zones.Entities in project DataflowTemplates by GoogleCloudPlatform.

the class DefaultDataplexClient method getEntities.

@Override
public ImmutableList<GoogleCloudDataplexV1Entity> getEntities(List<String> entityNames) throws IOException {
    Entities entities = client.projects().locations().lakes().zones().entities();
    ImmutableList.Builder<GoogleCloudDataplexV1Entity> result = ImmutableList.builder();
    for (String entityName : entityNames) {
        result.add(entities.get(entityName).setView(GetEntityRequestEntityView.FULL.name()).execute());
    }
    return result.build();
}
Also used : ImmutableList(com.google.common.collect.ImmutableList) ImmutableList.toImmutableList(com.google.common.collect.ImmutableList.toImmutableList) GoogleCloudDataplexV1Entity(com.google.api.services.dataplex.v1.model.GoogleCloudDataplexV1Entity) Entities(com.google.api.services.dataplex.v1.CloudDataplex.Projects.Locations.Lakes.Zones.Entities)

Example 3 with Entities

use of com.google.api.services.dataplex.v1.CloudDataplex.Projects.Locations.Lakes.Zones.Entities in project DataflowTemplates by GoogleCloudPlatform.

the class DefaultDataplexClient method getEntitiesUnderAssetStream.

/**
 * Gets a stream of all entities under {@code assetName}.
 */
private Stream<GoogleCloudDataplexV1Entity> getEntitiesUnderAssetStream(String assetName) throws IOException {
    Entities entities = client.projects().locations().lakes().zones().entities();
    String zoneName = getZoneFromAsset(assetName);
    GoogleCloudDataplexV1ListEntitiesResponse response = entities.list(zoneName).execute();
    Stream<GoogleCloudDataplexV1Entity> result = getEntitiesUnderAssetForPage(response, assetName);
    // the result of the list is paginated with the default page size being 10
    while (response.getNextPageToken() != null) {
        response = entities.list(zoneName).setPageToken(response.getNextPageToken()).execute();
        result = Stream.concat(result, getEntitiesUnderAssetForPage(response, assetName));
    }
    return result;
}
Also used : GoogleCloudDataplexV1ListEntitiesResponse(com.google.api.services.dataplex.v1.model.GoogleCloudDataplexV1ListEntitiesResponse) GoogleCloudDataplexV1Entity(com.google.api.services.dataplex.v1.model.GoogleCloudDataplexV1Entity) Entities(com.google.api.services.dataplex.v1.CloudDataplex.Projects.Locations.Lakes.Zones.Entities)

Example 4 with Entities

use of com.google.api.services.dataplex.v1.CloudDataplex.Projects.Locations.Lakes.Zones.Entities in project DataflowTemplates by GoogleCloudPlatform.

the class DefaultDataplexClient method createPartitions.

/**
 * Handles creation of partitions. Each partition is logged after being created.
 */
private void createPartitions(ImmutableMap<String, ImmutableList<PartitionMetadata>> entityNameToPartition) throws IOException {
    for (Map.Entry<String, ImmutableList<PartitionMetadata>> entry : entityNameToPartition.entrySet()) {
        ImmutableList<GoogleCloudDataplexV1Partition> partitions = entry.getValue().stream().map(PartitionMetadata::toDataplexPartition).collect(toImmutableList());
        for (GoogleCloudDataplexV1Partition partition : partitions) {
            GoogleCloudDataplexV1Partition result = client.projects().locations().lakes().zones().entities().partitions().create(entry.getKey(), partition).execute();
            LOG.info("Created partition '{}' under entity '{}'", result.getName(), entry.getKey());
        }
    }
}
Also used : ImmutableList(com.google.common.collect.ImmutableList) ImmutableList.toImmutableList(com.google.common.collect.ImmutableList.toImmutableList) GoogleCloudDataplexV1Partition(com.google.api.services.dataplex.v1.model.GoogleCloudDataplexV1Partition) HashMap(java.util.HashMap) Collectors.toMap(java.util.stream.Collectors.toMap) Map(java.util.Map) ImmutableMap(com.google.common.collect.ImmutableMap)

Example 5 with Entities

use of com.google.api.services.dataplex.v1.CloudDataplex.Projects.Locations.Lakes.Zones.Entities in project DataflowTemplates by GoogleCloudPlatform.

the class DefaultDataplexClient method getPartitions.

@Override
public ImmutableList<GoogleCloudDataplexV1Partition> getPartitions(String entityName) throws IOException {
    ImmutableList.Builder<GoogleCloudDataplexV1Partition> result = ImmutableList.builder();
    Partitions partitions = client.projects().locations().lakes().zones().entities().partitions();
    GoogleCloudDataplexV1ListPartitionsResponse response = partitions.list(entityName).execute();
    if (response.getPartitions() == null) {
        return ImmutableList.of();
    }
    result.addAll(response.getPartitions());
    // the result of the list is paginated with the default page size being 10
    while (response.getNextPageToken() != null) {
        response = partitions.list(entityName).setPageToken(response.getNextPageToken()).execute();
        result.addAll(response.getPartitions());
    }
    return result.build();
}
Also used : Partitions(com.google.api.services.dataplex.v1.CloudDataplex.Projects.Locations.Lakes.Zones.Entities.Partitions) ImmutableList(com.google.common.collect.ImmutableList) ImmutableList.toImmutableList(com.google.common.collect.ImmutableList.toImmutableList) GoogleCloudDataplexV1ListPartitionsResponse(com.google.api.services.dataplex.v1.model.GoogleCloudDataplexV1ListPartitionsResponse) GoogleCloudDataplexV1Partition(com.google.api.services.dataplex.v1.model.GoogleCloudDataplexV1Partition)

Aggregations

GoogleCloudDataplexV1Entity (com.google.api.services.dataplex.v1.model.GoogleCloudDataplexV1Entity)7 CloudDataplex (com.google.api.services.dataplex.v1.CloudDataplex)4 GoogleCloudDataplexV1Partition (com.google.api.services.dataplex.v1.model.GoogleCloudDataplexV1Partition)4 Test (org.junit.Test)4 Entities (com.google.api.services.dataplex.v1.CloudDataplex.Projects.Locations.Lakes.Zones.Entities)3 ImmutableList (com.google.common.collect.ImmutableList)3 ImmutableList.toImmutableList (com.google.common.collect.ImmutableList.toImmutableList)3 HashMap (java.util.HashMap)3 Partitions (com.google.api.services.dataplex.v1.CloudDataplex.Projects.Locations.Lakes.Zones.Entities.Partitions)2 GoogleCloudDataplexV1ListEntitiesResponse (com.google.api.services.dataplex.v1.model.GoogleCloudDataplexV1ListEntitiesResponse)2 GoogleCloudDataplexV1ListPartitionsResponse (com.google.api.services.dataplex.v1.model.GoogleCloudDataplexV1ListPartitionsResponse)2 EntityMetadata (com.google.cloud.teleport.v2.values.EntityMetadata)2 ImmutableMap (com.google.common.collect.ImmutableMap)2 Map (java.util.Map)2 Collectors.toMap (java.util.stream.Collectors.toMap)2 Zones (com.google.api.services.dataplex.v1.CloudDataplex.Projects.Locations.Lakes.Zones)1 Assets (com.google.api.services.dataplex.v1.CloudDataplex.Projects.Locations.Lakes.Zones.Assets)1 GoogleCloudDataplexV1Asset (com.google.api.services.dataplex.v1.model.GoogleCloudDataplexV1Asset)1 NoopTransform (com.google.cloud.teleport.v2.transforms.NoopTransform)1 WriteDispositionException (com.google.cloud.teleport.v2.utils.WriteDisposition.WriteDispositionException)1