use of org.apache.flink.table.catalog.CatalogPartitionSpec in project flink by apache.
the class HiveParserDDLSemanticAnalyzer method convertShowPartitions.
private Operation convertShowPartitions(HiveParserASTNode ast) {
String tableName = HiveParserBaseSemanticAnalyzer.getUnescapedName((HiveParserASTNode) ast.getChild(0));
List<Map<String, String>> partSpecs = getPartitionSpecs(ast);
// We only can have a single partition spec
assert (partSpecs.size() <= 1);
Map<String, String> partSpec = null;
if (partSpecs.size() > 0) {
partSpec = partSpecs.get(0);
}
ObjectIdentifier tableIdentifier = parseObjectIdentifier(tableName);
CatalogPartitionSpec spec = null;
if (partSpec != null && !partSpec.isEmpty()) {
spec = new CatalogPartitionSpec(new HashMap<>(partSpec));
}
return new ShowPartitionsOperation(tableIdentifier, spec);
}
use of org.apache.flink.table.catalog.CatalogPartitionSpec in project flink by apache.
the class HiveParserDDLSemanticAnalyzer method convertAlterTableAddParts.
/**
* Add one or more partitions to a table. Useful when the data has been copied to the right
* location by some other process.
*/
private Operation convertAlterTableAddParts(String[] qualified, CommonTree ast) {
// ^(TOK_ALTERTABLE_ADDPARTS identifier ifNotExists?
// alterStatementSuffixAddPartitionsElement+)
boolean ifNotExists = ast.getChild(0).getType() == HiveASTParser.TOK_IFNOTEXISTS;
Table tab = getTable(new ObjectPath(qualified[0], qualified[1]));
boolean isView = tab.isView();
validateAlterTableType(tab);
int numCh = ast.getChildCount();
int start = ifNotExists ? 1 : 0;
String currentLocation = null;
Map<String, String> currentPartSpec = null;
// Parser has done some verification, so the order of tokens doesn't need to be verified
// here.
List<CatalogPartitionSpec> specs = new ArrayList<>();
List<CatalogPartition> partitions = new ArrayList<>();
for (int num = start; num < numCh; num++) {
HiveParserASTNode child = (HiveParserASTNode) ast.getChild(num);
switch(child.getToken().getType()) {
case HiveASTParser.TOK_PARTSPEC:
if (currentPartSpec != null) {
specs.add(new CatalogPartitionSpec(currentPartSpec));
Map<String, String> props = new HashMap<>();
if (currentLocation != null) {
props.put(TABLE_LOCATION_URI, currentLocation);
}
partitions.add(new CatalogPartitionImpl(props, null));
currentLocation = null;
}
currentPartSpec = getPartSpec(child);
// validate reserved values
validatePartitionValues(currentPartSpec);
break;
case HiveASTParser.TOK_PARTITIONLOCATION:
// if location specified, set in partition
if (isView) {
throw new ValidationException("LOCATION clause illegal for view partition");
}
currentLocation = HiveParserBaseSemanticAnalyzer.unescapeSQLString(child.getChild(0).getText());
break;
default:
throw new ValidationException("Unknown child: " + child);
}
}
// add the last one
if (currentPartSpec != null) {
specs.add(new CatalogPartitionSpec(currentPartSpec));
Map<String, String> props = new HashMap<>();
if (currentLocation != null) {
props.put(TABLE_LOCATION_URI, currentLocation);
}
partitions.add(new CatalogPartitionImpl(props, null));
}
ObjectIdentifier tableIdentifier = tab.getDbName() == null ? parseObjectIdentifier(tab.getTableName()) : catalogManager.qualifyIdentifier(UnresolvedIdentifier.of(tab.getDbName(), tab.getTableName()));
return new AddPartitionsOperation(tableIdentifier, ifNotExists, specs, partitions);
}
use of org.apache.flink.table.catalog.CatalogPartitionSpec in project flink by apache.
the class HiveDialectITCase method testAlterPartition.
@Test
public void testAlterPartition() throws Exception {
tableEnv.executeSql("create table tbl (x tinyint,y string) partitioned by (p1 bigint,p2 date)");
tableEnv.executeSql("alter table tbl add partition (p1=1000,p2='2020-05-01') partition (p1=2000,p2='2020-01-01')");
CatalogPartitionSpec spec1 = new CatalogPartitionSpec(new LinkedHashMap<String, String>() {
{
put("p1", "1000");
put("p2", "2020-05-01");
}
});
CatalogPartitionSpec spec2 = new CatalogPartitionSpec(new LinkedHashMap<String, String>() {
{
put("p1", "2000");
put("p2", "2020-01-01");
}
});
ObjectPath tablePath = new ObjectPath("default", "tbl");
Table hiveTable = hiveCatalog.getHiveTable(tablePath);
// change location
String location = warehouse + "/new_part_location";
tableEnv.executeSql(String.format("alter table tbl partition (p1=1000,p2='2020-05-01') set location '%s'", location));
Partition partition = hiveCatalog.getHivePartition(hiveTable, spec1);
assertEquals(location, locationPath(partition.getSd().getLocation()));
// change file format
tableEnv.executeSql("alter table tbl partition (p1=2000,p2='2020-01-01') set fileformat rcfile");
partition = hiveCatalog.getHivePartition(hiveTable, spec2);
assertEquals(LazyBinaryColumnarSerDe.class.getName(), partition.getSd().getSerdeInfo().getSerializationLib());
assertEquals(RCFileInputFormat.class.getName(), partition.getSd().getInputFormat());
assertEquals(RCFileOutputFormat.class.getName(), partition.getSd().getOutputFormat());
// change serde
tableEnv.executeSql(String.format("alter table tbl partition (p1=1000,p2='2020-05-01') set serde '%s' with serdeproperties('%s'='%s')", LazyBinarySerDe.class.getName(), serdeConstants.LINE_DELIM, "\n"));
partition = hiveCatalog.getHivePartition(hiveTable, spec1);
assertEquals(LazyBinarySerDe.class.getName(), partition.getSd().getSerdeInfo().getSerializationLib());
assertEquals("\n", partition.getSd().getSerdeInfo().getParameters().get(serdeConstants.LINE_DELIM));
}
use of org.apache.flink.table.catalog.CatalogPartitionSpec in project flink by apache.
the class TestManagedCommittable method combine.
public static TestManagedCommittable combine(List<TestManagedCommittable> committables) {
Map<CatalogPartitionSpec, List<RowData>> toAdd = new HashMap<>();
Map<CatalogPartitionSpec, Set<Path>> toDelete = new HashMap<>();
for (TestManagedCommittable committable : committables) {
Map<CatalogPartitionSpec, List<RowData>> partialAdd = committable.toAdd;
Map<CatalogPartitionSpec, Set<Path>> partialDelete = committable.toDelete;
for (Map.Entry<CatalogPartitionSpec, List<RowData>> entry : partialAdd.entrySet()) {
CatalogPartitionSpec partitionSpec = entry.getKey();
List<RowData> elements = toAdd.getOrDefault(partitionSpec, new ArrayList<>());
elements.addAll(entry.getValue());
toAdd.put(partitionSpec, elements);
}
for (Map.Entry<CatalogPartitionSpec, Set<Path>> entry : partialDelete.entrySet()) {
CatalogPartitionSpec partitionSpec = entry.getKey();
Set<Path> paths = toDelete.getOrDefault(partitionSpec, new HashSet<>());
paths.addAll(entry.getValue());
toDelete.put(partitionSpec, paths);
}
}
return new TestManagedCommittable(toAdd, toDelete);
}
use of org.apache.flink.table.catalog.CatalogPartitionSpec in project flink by apache.
the class TestManagedSinkCommittableSerializer method deserialize.
@Override
public TestManagedCommittable deserialize(int version, byte[] serialized) throws IOException {
if (version == VERSION) {
final DataInputDeserializer in = new DataInputDeserializer(serialized);
int newFileSize = in.readInt();
Map<CatalogPartitionSpec, List<RowData>> toCommit = new HashMap<>(newFileSize);
for (int i = 0; i < newFileSize; i++) {
CatalogPartitionSpec partitionSpec = deserializePartitionSpec(in);
List<RowData> elements = deserializeRowDataElements(in);
toCommit.put(partitionSpec, elements);
}
int cleanupFileSize = in.readInt();
Map<CatalogPartitionSpec, Set<Path>> toCleanup = new HashMap<>(cleanupFileSize);
for (int i = 0; i < cleanupFileSize; i++) {
CatalogPartitionSpec partitionSpec = deserializePartitionSpec(in);
Set<Path> paths = deserializePaths(in);
toCleanup.put(partitionSpec, paths);
}
return new TestManagedCommittable(toCommit, toCleanup);
}
throw new IOException(String.format("Unknown version %d", version));
}
Aggregations