use of co.cask.cdap.data2.dataset2.lib.table.Update in project cdap by caskdata.
the class AppMetadataStore method addWorkflowNodeState.
private void addWorkflowNodeState(ProgramRunId programRunId, Map<String, String> systemArgs, ProgramRunStatus status, @Nullable BasicThrowable failureCause, byte[] sourceId) {
String workflowNodeId = systemArgs.get(ProgramOptionConstants.WORKFLOW_NODE_ID);
String workflowName = systemArgs.get(ProgramOptionConstants.WORKFLOW_NAME);
String workflowRun = systemArgs.get(ProgramOptionConstants.WORKFLOW_RUN_ID);
ApplicationId appId = programRunId.getParent().getParent();
ProgramRunId workflowRunId = appId.workflow(workflowName).run(workflowRun);
// Node states will be stored with following key:
// workflowNodeState.namespace.app.WORKFLOW.workflowName.workflowRun.workflowNodeId
MDSKey key = getProgramKeyBuilder(TYPE_WORKFLOW_NODE_STATE, workflowRunId).add(workflowNodeId).build();
WorkflowNodeStateDetail nodeStateDetail = new WorkflowNodeStateDetail(workflowNodeId, ProgramRunStatus.toNodeStatus(status), programRunId.getRun(), failureCause);
write(key, nodeStateDetail);
// Get the run record of the Workflow which started this program
key = getProgramKeyBuilder(TYPE_RUN_RECORD_STARTED, workflowRunId).build();
RunRecordMeta record = get(key, RunRecordMeta.class);
if (record != null) {
// Update the parent Workflow run record by adding node id and program run id in the properties
Map<String, String> properties = new HashMap<>(record.getProperties());
properties.put(workflowNodeId, programRunId.getRun());
write(key, RunRecordMeta.builder(record).setProperties(properties).setSourceId(sourceId).build());
}
}
use of co.cask.cdap.data2.dataset2.lib.table.Update in project cdap by caskdata.
the class DatasetTypeService method addModule.
/**
* Adds a new {@link DatasetModule}.
*
* @param datasetModuleId the {@link DatasetModuleId} for the module to be added
* @param className the module class name specified in the HTTP header
* @param forceUpdate if true, an update will be allowed even if there are conflicts with other modules, or if
* removal of a type would break other modules' dependencies
* @return a {@link BodyConsumer} to upload the module jar in chunks
* @throws NotFoundException if the namespace in which the module is being added is not found
* @throws IOException if there are issues while performing I/O like creating temporary directories, moving/unpacking
* module jar files
* @throws DatasetModuleConflictException if #forceUpdate is {@code false}, and there are conflicts with other modules
*/
BodyConsumer addModule(final DatasetModuleId datasetModuleId, final String className, final boolean forceUpdate) throws Exception {
NamespaceId namespaceId = datasetModuleId.getParent();
final Principal principal = authenticationContext.getPrincipal();
// enforce that the principal has WRITE access on the namespace
authorizationEnforcer.enforce(namespaceId, principal, Action.WRITE);
if (NamespaceId.SYSTEM.equals(namespaceId)) {
throw new UnauthorizedException(String.format("Cannot add module '%s' to '%s' namespace.", datasetModuleId.getModule(), datasetModuleId.getNamespace()));
}
ensureNamespaceExists(namespaceId);
// It is now determined that a new dataset module will be deployed. First grant privileges, then deploy the module.
// If creation fails, revoke the granted privileges. This ensures that just like delete, there may be orphaned
// privileges in rare scenarios, but there can never be orphaned datasets.
// If the module previously existed and was deleted, but revoking privileges somehow failed, there may be orphaned
// privileges for the module. Revoke them first, so no users unintentionally get privileges on the dataset.
revokeAllPrivilegesOnModule(datasetModuleId);
grantAllPrivilegesOnModule(datasetModuleId, principal);
try {
return createModuleConsumer(datasetModuleId, className, forceUpdate, principal);
} catch (Exception e) {
revokeAllPrivilegesOnModule(datasetModuleId);
throw e;
}
}
use of co.cask.cdap.data2.dataset2.lib.table.Update in project cdap by caskdata.
the class AppMetadataStore method addWorkflowNodeState.
private void addWorkflowNodeState(ProgramId programId, String pid, Map<String, String> systemArgs, ProgramRunStatus status, @Nullable BasicThrowable failureCause) {
String workflowNodeId = systemArgs.get(ProgramOptionConstants.WORKFLOW_NODE_ID);
String workflowName = systemArgs.get(ProgramOptionConstants.WORKFLOW_NAME);
String workflowRun = systemArgs.get(ProgramOptionConstants.WORKFLOW_RUN_ID);
ApplicationId appId = Ids.namespace(programId.getNamespace()).app(programId.getApplication());
ProgramRunId workflowRunId = appId.workflow(workflowName).run(workflowRun);
// Node states will be stored with following key:
// workflowNodeState.namespace.app.WORKFLOW.workflowName.workflowRun.workflowNodeId
MDSKey key = getProgramKeyBuilder(TYPE_WORKFLOW_NODE_STATE, workflowRunId.getParent()).add(workflowRun).add(workflowNodeId).build();
WorkflowNodeStateDetail nodeStateDetail = new WorkflowNodeStateDetail(workflowNodeId, ProgramRunStatus.toNodeStatus(status), pid, failureCause);
write(key, nodeStateDetail);
// Get the run record of the Workflow which started this program
key = getProgramKeyBuilder(TYPE_RUN_RECORD_STARTED, workflowRunId.getParent()).add(workflowRunId.getRun()).build();
RunRecordMeta record = get(key, RunRecordMeta.class);
if (record != null) {
// Update the parent Workflow run record by adding node id and program run id in the properties
Map<String, String> properties = record.getProperties();
properties.put(workflowNodeId, pid);
write(key, new RunRecordMeta(record, properties));
}
}
use of co.cask.cdap.data2.dataset2.lib.table.Update in project cdap by caskdata.
the class DatasetAdminService method createOrUpdate.
/**
* Configures and creates a Dataset
*
* @param datasetInstanceId dataset instance to be created
* @param typeMeta type meta for the dataset
* @param props dataset instance properties
* @param existing if dataset already exists (in case of update), the existing properties
* @return dataset specification
*/
public DatasetSpecification createOrUpdate(final DatasetId datasetInstanceId, final DatasetTypeMeta typeMeta, final DatasetProperties props, @Nullable final DatasetSpecification existing) throws Exception {
if (existing == null) {
LOG.info("Creating dataset instance {}, type meta: {}", datasetInstanceId, typeMeta);
} else {
LOG.info("Updating dataset instance {}, type meta: {}, existing: {}", datasetInstanceId, typeMeta, existing);
}
try (DatasetClassLoaderProvider classLoaderProvider = new DirectoryClassLoaderProvider(cConf, locationFactory)) {
final DatasetContext context = DatasetContext.from(datasetInstanceId.getNamespace());
UserGroupInformation ugi = getUgiForDataset(impersonator, datasetInstanceId);
final DatasetType type = ImpersonationUtils.doAs(ugi, () -> {
DatasetType type1 = dsFramework.getDatasetType(typeMeta, null, classLoaderProvider);
if (type1 == null) {
throw new BadRequestException(String.format("Cannot instantiate dataset type using provided type meta: %s", typeMeta));
}
return type1;
});
DatasetSpecification spec = ImpersonationUtils.doAs(ugi, () -> {
DatasetSpecification spec1 = existing == null ? type.configure(datasetInstanceId.getEntityName(), props) : type.reconfigure(datasetInstanceId.getEntityName(), props, existing);
DatasetAdmin admin = type.getAdmin(context, spec1);
try {
if (existing != null) {
if (admin instanceof Updatable) {
((Updatable) admin).update(existing);
} else {
admin.upgrade();
}
} else {
admin.create();
}
} finally {
Closeables.closeQuietly(admin);
}
return spec1;
});
// Writing system metadata should be done without impersonation since user may not have access to system tables.
writeSystemMetadata(datasetInstanceId, spec, props, typeMeta, type, context, existing != null, ugi);
return spec;
} catch (Exception e) {
if (e instanceof IncompatibleUpdateException) {
// this is expected to happen if user provides bad update properties, so we log this as debug
LOG.debug("Incompatible update for dataset '{}'", datasetInstanceId, e);
} else {
LOG.error("Error {} dataset '{}': {}", existing == null ? "creating" : "updating", datasetInstanceId, e.getMessage(), e);
}
throw e;
}
}
use of co.cask.cdap.data2.dataset2.lib.table.Update in project cdap by caskdata.
the class InMemoryDatasetOpExecutor method update.
@Override
public DatasetSpecification update(DatasetId datasetInstanceId, DatasetTypeMeta typeMeta, DatasetProperties props, DatasetSpecification existing) throws Exception {
DatasetType type = client.getDatasetType(typeMeta, null, new ConstantClassLoaderProvider());
if (type == null) {
throw new IllegalArgumentException("Dataset type cannot be instantiated for provided type meta: " + typeMeta);
}
try {
DatasetSpecification spec = type.reconfigure(datasetInstanceId.getEntityName(), props, existing);
DatasetAdmin admin = type.getAdmin(DatasetContext.from(datasetInstanceId.getNamespace()), spec);
if (admin instanceof Updatable) {
((Updatable) admin).update(existing);
} else {
admin.create();
}
if (spec.getDescription() == null && existing.getDescription() != null) {
spec.setDescription(existing.getDescription());
}
return spec;
} catch (IncompatibleUpdateException e) {
throw new ConflictException(e.getMessage());
}
}
Aggregations