use of co.cask.cdap.data2.metadata.dataset.Metadata in project cdap by caskdata.
the class PartitionedFileSetDataset method onSuccess.
@Override
public void onSuccess() throws DataSetException {
String outputPath = FileSetArguments.getOutputPath(runtimeArguments);
// Either way, we can't do much here.
if (outputPath == null) {
return;
}
// its possible that there is no output key, if using the DynamicPartitioner, in which case
// DynamicPartitioningOutputFormat is responsible for registering the partitions and the metadata
PartitionKey outputKey = PartitionedFileSetArguments.getOutputPartitionKey(runtimeArguments, getPartitioning());
if (outputKey != null) {
Map<String, String> metadata = PartitionedFileSetArguments.getOutputPartitionMetadata(runtimeArguments);
addPartition(outputKey, outputPath, metadata, true, false);
}
// currently, FileSetDataset#onSuccess is a no-op, but call it, in case it does something in the future
((FileSetDataset) files).onSuccess();
}
use of co.cask.cdap.data2.metadata.dataset.Metadata in project cdap by caskdata.
the class DatasetInstanceService method executeAdmin.
/**
* Executes an admin operation on a dataset instance.
*
* @param instance the instance to execute the admin operation on
* @param method the type of admin operation to execute
* @return the {@link DatasetAdminOpResponse} from the HTTP handler
* @throws NamespaceNotFoundException if the requested namespace was not found
* @throws IOException if there was a problem in checking if the namespace exists over HTTP
* @throws UnauthorizedException if perimeter security and authorization are enabled, and the current user does not
* have -
* <ol>
* <li>{@link Action#ADMIN} privileges on the #instance (for "drop" or "truncate") </li>
* <li>any privileges on the #instance (for "exists")</li>
* <ol>
*/
DatasetAdminOpResponse executeAdmin(DatasetId instance, String method) throws Exception {
ensureNamespaceExists(instance.getParent());
Object result = null;
// NOTE: one cannot directly call create and drop, instead this should be called thru
// POST/DELETE @ /data/datasets/{instance-id}. Because we must create/drop metadata for these at same time
Principal principal = authenticationContext.getPrincipal();
switch(method) {
case "exists":
// ensure the user has some privilege on the dataset instance if it is not system dataset
if (!DatasetsUtil.isSystemDatasetInUserNamespace(instance)) {
AuthorizationUtil.ensureAccess(instance, authorizationEnforcer, principal);
}
result = opExecutorClient.exists(instance);
break;
case "truncate":
if (!DatasetsUtil.isSystemDatasetInUserNamespace(instance)) {
authorizationEnforcer.enforce(instance, principal, Action.ADMIN);
}
if (instanceManager.get(instance) == null) {
throw new DatasetNotFoundException(instance);
}
opExecutorClient.truncate(instance);
publishAudit(instance, AuditType.TRUNCATE);
break;
case "upgrade":
if (!DatasetsUtil.isSystemDatasetInUserNamespace(instance)) {
authorizationEnforcer.enforce(instance, principal, Action.ADMIN);
}
if (instanceManager.get(instance) == null) {
throw new DatasetNotFoundException(instance);
}
opExecutorClient.upgrade(instance);
publishAudit(instance, AuditType.UPDATE);
break;
default:
throw new HandlerException(HttpResponseStatus.NOT_FOUND, "Invalid admin operation: " + method);
}
return new DatasetAdminOpResponse(result, null);
}
use of co.cask.cdap.data2.metadata.dataset.Metadata in project cdap by caskdata.
the class DatasetInstanceManager method add.
/**
* Adds dataset instance metadata
* @param namespaceId the {@link NamespaceId} to add the dataset instance to
* @param spec {@link DatasetSpecification} of the dataset instance to be added
*/
public void add(final NamespaceId namespaceId, final DatasetSpecification spec) {
final DatasetInstanceMDS instanceMDS = datasetCache.getDataset(DatasetMetaTableUtil.INSTANCE_TABLE_NAME);
txExecutorFactory.createExecutor(datasetCache).executeUnchecked(() -> instanceMDS.write(namespaceId, spec));
}
use of co.cask.cdap.data2.metadata.dataset.Metadata in project cdap by caskdata.
the class DatasetAdminService method writeSystemMetadata.
private void writeSystemMetadata(DatasetId datasetInstanceId, final DatasetSpecification spec, DatasetProperties props, final DatasetTypeMeta typeMeta, final DatasetType type, final DatasetContext context, boolean existing, UserGroupInformation ugi) throws IOException {
// add system metadata for user datasets only
if (DatasetsUtil.isUserDataset(datasetInstanceId)) {
Dataset dataset = null;
try {
try {
dataset = ImpersonationUtils.doAs(ugi, () -> type.getDataset(context, spec, DatasetDefinition.NO_ARGUMENTS));
} catch (Exception e) {
LOG.warn("Exception while instantiating Dataset {}", datasetInstanceId, e);
}
// Make sure to write whatever system metadata that can be derived
// even if the above instantiation throws exception
SystemMetadataWriter systemMetadataWriter;
if (existing) {
systemMetadataWriter = new DatasetSystemMetadataWriter(metadataStore, datasetInstanceId, props, dataset, typeMeta.getName(), spec.getDescription());
} else {
long createTime = System.currentTimeMillis();
systemMetadataWriter = new DatasetSystemMetadataWriter(metadataStore, datasetInstanceId, props, createTime, dataset, typeMeta.getName(), spec.getDescription());
}
systemMetadataWriter.write();
} finally {
if (dataset != null) {
dataset.close();
}
}
}
}
use of co.cask.cdap.data2.metadata.dataset.Metadata in project cdap by caskdata.
the class ArtifactRepository method writeSystemMetadata.
private void writeSystemMetadata(co.cask.cdap.proto.id.ArtifactId artifactId, ArtifactInfo artifactInfo) {
// add system metadata for artifacts
ArtifactSystemMetadataWriter writer = new ArtifactSystemMetadataWriter(metadataStore, artifactId, artifactInfo);
writer.write();
}
Aggregations