use of co.cask.cdap.api.dataset.DatasetProperties in project cdap by caskdata.
the class CubeDatasetTest method getCubeInternal.
private Cube getCubeInternal(String name, int[] resolutions, Map<String, ? extends Aggregation> aggregations) throws Exception {
DatasetProperties props = configureProperties(resolutions, aggregations);
DatasetId id = DatasetFrameworkTestUtil.NAMESPACE_ID.dataset(name);
if (dsFrameworkUtil.getInstance(id) == null) {
dsFrameworkUtil.createInstance(Cube.class.getName(), id, props);
}
return dsFrameworkUtil.getInstance(id);
}
use of co.cask.cdap.api.dataset.DatasetProperties in project cdap by caskdata.
the class LevelDBStreamConsumerStateStoreFactory method getLevelDBTableAdmin.
private LevelDBTableAdmin getLevelDBTableAdmin(TableId tableId) throws IOException {
DatasetProperties props = TableProperties.builder().setColumnFamily("t").build();
LevelDBTableDefinition tableDefinition = new LevelDBTableDefinition("tableDefinition");
DatasetSpecification spec = tableDefinition.configure(tableId.getTableName(), props);
return new LevelDBTableAdmin(DatasetContext.from(tableId.getNamespace()), spec, tableService, cConf);
}
use of co.cask.cdap.api.dataset.DatasetProperties in project cdap by caskdata.
the class AdminApp method performAdmin.
// this will get called from the worker, also from a custom workflow action
static void performAdmin(RuntimeContext context) {
Admin admin = context.getAdmin();
Map<String, String> args = context.getRuntimeArguments();
try {
// if invoked with dropAll=true, clean up all datasets (a, b, c, d)
if ("true".equals(args.get("dropAll"))) {
for (String name : new String[] { "a", "b", "c", "d" }) {
if (admin.datasetExists(name)) {
admin.dropDataset(name);
}
}
} else {
// create a, update b with /extra in base path, truncate c, drop d
admin.createDataset("a", Table.class.getName(), DatasetProperties.EMPTY);
String type = admin.getDatasetType("b");
Assert.assertEquals(FileSet.class.getName(), type);
DatasetProperties bProps = admin.getDatasetProperties("b");
String base = bProps.getProperties().get("base.path");
Assert.assertNotNull(base);
String newBase = args.get("new.base.path");
DatasetProperties newBProps = ((FileSetProperties.Builder) FileSetProperties.builder().addAll(bProps.getProperties())).setDataExternal(true).setBasePath(newBase).build();
admin.updateDataset("b", newBProps);
admin.truncateDataset("c");
admin.dropDataset("d");
}
} catch (DatasetManagementException e) {
Throwables.propagate(e);
}
}
use of co.cask.cdap.api.dataset.DatasetProperties in project cdap by caskdata.
the class PartitionedFileSetDefinition method configure.
@Override
public DatasetSpecification configure(String instanceName, DatasetProperties properties) {
Partitioning partitioning = PartitionedFileSetProperties.getPartitioning(properties.getProperties());
Preconditions.checkNotNull(partitioning, "Properties do not contain partitioning");
// define the columns for indexing on the partitionsTable
DatasetProperties indexedTableProperties = DatasetProperties.builder().addAll(properties.getProperties()).add(IndexedTable.INDEX_COLUMNS_CONF_KEY, INDEXED_COLS).build();
Map<String, String> pfsProperties = new HashMap<>(properties.getProperties());
// this property allows us to distinguish between datasets that were created
// before base path was explicitly set and those created after.
// this is important to know when a pfs is updated, as we want to keep the old base path behavior for
// previously created datasets
String defaultBasePathStr = properties.getProperties().get(NAME_AS_BASE_PATH_DEFAULT);
boolean useNameAsBasePathDefault = defaultBasePathStr == null || Boolean.parseBoolean(defaultBasePathStr);
DatasetProperties.Builder fileProperties = DatasetProperties.builder().addAll(properties.getProperties());
// and when the dataset is deleted, only the 'files' dir will be deleted and not the dataset name dir.
if (useNameAsBasePathDefault && !properties.getProperties().containsKey(FileSetProperties.BASE_PATH)) {
fileProperties.add(FileSetProperties.BASE_PATH, instanceName);
pfsProperties.put(NAME_AS_BASE_PATH_DEFAULT, Boolean.TRUE.toString());
}
return DatasetSpecification.builder(instanceName, getName()).properties(pfsProperties).datasets(filesetDef.configure(FILESET_NAME, fileProperties.build()), indexedTableDef.configure(PARTITION_TABLE_NAME, indexedTableProperties)).build();
}
use of co.cask.cdap.api.dataset.DatasetProperties in project cdap by caskdata.
the class PartitionedFileSetDefinition method reconfigure.
@Override
public DatasetSpecification reconfigure(String instanceName, DatasetProperties properties, DatasetSpecification currentSpec) throws IncompatibleUpdateException {
// validate that the partitioning is not changing
Partitioning oldPartitioning = PartitionedFileSetProperties.getPartitioning(currentSpec.getProperties());
Partitioning newPartitioning = PartitionedFileSetProperties.getPartitioning(properties.getProperties());
Preconditions.checkNotNull(oldPartitioning, "Existing dataset has no partitioning");
Preconditions.checkNotNull(newPartitioning, "New properties do not contain partitioning");
if (!Iterators.elementsEqual(oldPartitioning.getFields().entrySet().iterator(), newPartitioning.getFields().entrySet().iterator())) {
throw new IncompatibleUpdateException(String.format("Partitioning cannot be changed. Existing: %s, new: %s", oldPartitioning, newPartitioning));
}
Map<String, String> pfsProperties = new HashMap<>(properties.getProperties());
// define the columns for indexing on the partitionsTable
DatasetProperties indexedTableProperties = DatasetProperties.builder().addAll(properties.getProperties()).add(IndexedTable.INDEX_COLUMNS_CONF_KEY, INDEXED_COLS).build();
// only set the default base path property if the default was set the last time it was configured,
// and no base path is in the current properties.
DatasetSpecification currentFileSpec = currentSpec.getSpecification(FILESET_NAME);
DatasetProperties.Builder newFileProperties = DatasetProperties.builder().addAll(properties.getProperties());
String useNameAsBasePathDefault = currentSpec.getProperty(NAME_AS_BASE_PATH_DEFAULT);
if (Boolean.parseBoolean(useNameAsBasePathDefault) && !properties.getProperties().containsKey(FileSetProperties.BASE_PATH)) {
newFileProperties.add(FileSetProperties.BASE_PATH, instanceName);
pfsProperties.put(NAME_AS_BASE_PATH_DEFAULT, Boolean.TRUE.toString());
}
return DatasetSpecification.builder(instanceName, getName()).properties(pfsProperties).datasets(AbstractDatasetDefinition.reconfigure(filesetDef, FILESET_NAME, newFileProperties.build(), currentFileSpec), AbstractDatasetDefinition.reconfigure(indexedTableDef, PARTITION_TABLE_NAME, indexedTableProperties, currentSpec.getSpecification(PARTITION_TABLE_NAME))).build();
}
Aggregations