use of io.cdap.cdap.api.dataset.lib.Partitioning in project cdap by caskdata.
the class ExploreTableManager method generateFileSetCreateStatement.
/**
* Generate a create statement for a ((time-)partitioned) file set.
*
* @param dataset the instantiated dataset
* @param datasetId the dataset id
* @param properties the properties from dataset specification
* @param truncating whether this call to create() is part of a truncate() operation. The effect is:
* If possessExisting is true, then the truncate() has just dropped this
* dataset and that deleted the explore table: we must recreate it.
*
* @return a CREATE TABLE statement, or null if the dataset is not explorable
* @throws IllegalArgumentException if the schema cannot be parsed, or if shouldErrorOnMissingSchema is true and
* the dataset spec does not contain a schema.
*/
@Nullable
private String generateFileSetCreateStatement(DatasetId datasetId, Dataset dataset, Map<String, String> properties, boolean truncating) throws IllegalArgumentException, ExploreException {
String tableName = tableNaming.getTableName(datasetId, properties);
String databaseName = ExploreProperties.getExploreDatabaseName(properties);
Map<String, String> tableProperties = FileSetProperties.getTableProperties(properties);
// if this dataset reuses an existing table, do not attempt to create it
if (FileSetProperties.isUseExisting(tableProperties) || (FileSetProperties.isPossessExisting(tableProperties) && !truncating)) {
try {
exploreService.getTableInfo(datasetId.getNamespace(), databaseName, tableName);
// table exists: do not attempt to create
return null;
} catch (TableNotFoundException e) {
throw new ExploreException(String.format("Dataset '%s' is configured to use an existing explore table, but table '%s' does not " + "exist in database '%s'. ", datasetId.getDataset(), tableName, databaseName));
}
}
Location baseLocation;
Partitioning partitioning = null;
if (dataset instanceof PartitionedFileSet) {
partitioning = ((PartitionedFileSet) dataset).getPartitioning();
baseLocation = ((PartitionedFileSet) dataset).getEmbeddedFileSet().getBaseLocation();
} else {
baseLocation = ((FileSet) dataset).getBaseLocation();
}
CreateStatementBuilder createStatementBuilder = new CreateStatementBuilder(datasetId.getDataset(), databaseName, tableName, shouldEscapeColumns).setLocation(baseLocation).setPartitioning(partitioning).setTableProperties(tableProperties);
String schema = FileSetProperties.getExploreSchema(properties);
String format = FileSetProperties.getExploreFormat(properties);
if (format != null) {
if ("parquet".equals(format)) {
return createStatementBuilder.setSchema(FileSetProperties.getExploreSchema(properties)).buildWithFileFormat("parquet");
}
// for text and csv, we know what to do
Preconditions.checkArgument("text".equals(format) || "csv".equals(format), "Only text and csv are supported as native formats");
Preconditions.checkNotNull(schema, "for native formats, explore schema must be given in dataset properties");
String delimiter = null;
if ("text".equals(format)) {
delimiter = FileSetProperties.getExploreFormatProperties(properties).get("delimiter");
} else if ("csv".equals(format)) {
delimiter = ",";
}
return createStatementBuilder.setSchema(schema).setRowFormatDelimited(delimiter, null).buildWithFileFormat("TEXTFILE");
} else {
// They can be created by setting the avro.schema.literal table property
if (schema != null) {
createStatementBuilder.setSchema(schema);
}
// format not given, look for serde, input format, etc.
String serde = FileSetProperties.getSerDe(properties);
String inputFormat = FileSetProperties.getExploreInputFormat(properties);
String outputFormat = FileSetProperties.getExploreOutputFormat(properties);
Preconditions.checkArgument(serde != null && inputFormat != null && outputFormat != null, "All of SerDe, InputFormat and OutputFormat must be given in dataset properties");
return createStatementBuilder.setRowFormatSerde(serde).buildWithFormats(inputFormat, outputFormat);
}
}
use of io.cdap.cdap.api.dataset.lib.Partitioning in project cdap by caskdata.
the class ExploreExecutorHttpHandler method doPartitionOperation.
private void doPartitionOperation(FullHttpRequest request, HttpResponder responder, DatasetId datasetId, PartitionOperation partitionOperation) {
try (SystemDatasetInstantiator datasetInstantiator = datasetInstantiatorFactory.create()) {
Dataset dataset;
try {
dataset = datasetInstantiator.getDataset(datasetId);
} catch (Exception e) {
LOG.error("Exception instantiating dataset {}.", datasetId, e);
responder.sendString(HttpResponseStatus.INTERNAL_SERVER_ERROR, "Exception instantiating dataset " + datasetId);
return;
}
try {
if (!(dataset instanceof PartitionedFileSet)) {
responder.sendString(HttpResponseStatus.BAD_REQUEST, "not a partitioned dataset.");
return;
}
Partitioning partitioning = ((PartitionedFileSet) dataset).getPartitioning();
Reader reader = new InputStreamReader(new ByteBufInputStream(request.content()));
Map<String, String> properties = GSON.fromJson(reader, new TypeToken<Map<String, String>>() {
}.getType());
PartitionKey partitionKey;
try {
partitionKey = PartitionedFileSetArguments.getOutputPartitionKey(properties, partitioning);
} catch (Exception e) {
responder.sendString(HttpResponseStatus.BAD_REQUEST, "invalid partition key: " + e.getMessage());
return;
}
if (partitionKey == null) {
responder.sendString(HttpResponseStatus.BAD_REQUEST, "no partition key was given.");
return;
}
QueryHandle handle = partitionOperation.submitOperation(partitionKey, properties);
if (handle == null) {
return;
}
JsonObject json = new JsonObject();
json.addProperty("handle", handle.getHandle());
responder.sendJson(HttpResponseStatus.OK, json.toString());
} finally {
Closeables.closeQuietly(dataset);
}
} catch (Throwable e) {
LOG.error("Got exception:", e);
responder.sendString(HttpResponseStatus.INTERNAL_SERVER_ERROR, e.getMessage());
}
}
use of io.cdap.cdap.api.dataset.lib.Partitioning in project cdap by caskdata.
the class PartitionedFileSetDefinition method getDataset.
@Override
public PartitionedFileSet getDataset(DatasetContext datasetContext, DatasetSpecification spec, Map<String, String> arguments, ClassLoader classLoader) throws IOException {
// properties must contain the partitioning
Partitioning partitioning = PartitionedFileSetProperties.getPartitioning(spec.getProperties());
// make any necessary updates to the arguments
arguments = updateArgumentsIfNeeded(arguments, partitioning);
FileSet fileset = filesetDef.getDataset(datasetContext, spec.getSpecification(FILESET_NAME), arguments, classLoader);
IndexedTable table = indexedTableDef.getDataset(datasetContext, spec.getSpecification(PARTITION_TABLE_NAME), arguments, classLoader);
return new PartitionedFileSetDataset(datasetContext, spec.getName(), partitioning, fileset, table, spec, arguments, getExploreProvider());
}
use of io.cdap.cdap.api.dataset.lib.Partitioning in project cdap by caskdata.
the class PartitionedFileSetDefinition method reconfigure.
@Override
public DatasetSpecification reconfigure(String instanceName, DatasetProperties properties, DatasetSpecification currentSpec) throws IncompatibleUpdateException {
// validate that the partitioning is not changing
Partitioning oldPartitioning = PartitionedFileSetProperties.getPartitioning(currentSpec.getProperties());
Partitioning newPartitioning = PartitionedFileSetProperties.getPartitioning(properties.getProperties());
Preconditions.checkNotNull(oldPartitioning, "Existing dataset has no partitioning");
Preconditions.checkNotNull(newPartitioning, "New properties do not contain partitioning");
if (!Iterators.elementsEqual(oldPartitioning.getFields().entrySet().iterator(), newPartitioning.getFields().entrySet().iterator())) {
throw new IncompatibleUpdateException(String.format("Partitioning cannot be changed. Existing: %s, new: %s", oldPartitioning, newPartitioning));
}
Map<String, String> pfsProperties = new HashMap<>(properties.getProperties());
// define the columns for indexing on the partitionsTable
DatasetProperties indexedTableProperties = DatasetProperties.builder().addAll(properties.getProperties()).add(IndexedTable.INDEX_COLUMNS_CONF_KEY, INDEXED_COLS).build();
// only set the default base path property if the default was set the last time it was configured,
// and no base path is in the current properties.
DatasetSpecification currentFileSpec = currentSpec.getSpecification(FILESET_NAME);
DatasetProperties.Builder newFileProperties = DatasetProperties.builder().addAll(properties.getProperties());
String useNameAsBasePathDefault = currentSpec.getProperty(NAME_AS_BASE_PATH_DEFAULT);
if (Boolean.parseBoolean(useNameAsBasePathDefault) && !properties.getProperties().containsKey(FileSetProperties.BASE_PATH)) {
newFileProperties.add(FileSetProperties.BASE_PATH, instanceName);
pfsProperties.put(NAME_AS_BASE_PATH_DEFAULT, Boolean.TRUE.toString());
}
return DatasetSpecification.builder(instanceName, getName()).properties(pfsProperties).datasets(AbstractDatasetDefinition.reconfigure(filesetDef, FILESET_NAME, newFileProperties.build(), currentFileSpec), AbstractDatasetDefinition.reconfigure(indexedTableDef, PARTITION_TABLE_NAME, indexedTableProperties, currentSpec.getSpecification(PARTITION_TABLE_NAME))).build();
}
use of io.cdap.cdap.api.dataset.lib.Partitioning in project cdap by caskdata.
the class PartitioningTest method testFieldOrder.
@Test
public void testFieldOrder() {
Partitioning partitioning = Partitioning.builder().addIntField("1").addLongField("2").addStringField("3").build();
Iterator<Map.Entry<String, FieldType>> iterator = partitioning.getFields().entrySet().iterator();
Assert.assertEquals("1", iterator.next().getKey());
Assert.assertEquals("2", iterator.next().getKey());
Assert.assertEquals("3", iterator.next().getKey());
Assert.assertFalse(iterator.hasNext());
// the previous order may have been preserved by chance. Now try the reverse order
partitioning = Partitioning.builder().addIntField("3").addLongField("2").addStringField("1").build();
iterator = partitioning.getFields().entrySet().iterator();
Assert.assertEquals("3", iterator.next().getKey());
Assert.assertEquals("2", iterator.next().getKey());
Assert.assertEquals("1", iterator.next().getKey());
Assert.assertFalse(iterator.hasNext());
}
Aggregations