use of org.apache.flink.table.catalog.CatalogPartitionSpec in project flink by apache.
the class TestManagedSinkCommittableSerializer method serialize.
@Override
public byte[] serialize(TestManagedCommittable committable) throws IOException {
final DataOutputSerializer out = new DataOutputSerializer(64);
out.writeInt(committable.getToAdd().size());
for (Map.Entry<CatalogPartitionSpec, List<RowData>> entry : committable.getToAdd().entrySet()) {
serializePartitionSpec(out, entry.getKey());
serializeRowDataElements(out, entry.getValue());
}
out.writeInt(committable.getToDelete().size());
for (Map.Entry<CatalogPartitionSpec, Set<Path>> entry : committable.getToDelete().entrySet()) {
serializePartitionSpec(out, entry.getKey());
serializePaths(out, entry.getValue());
}
final byte[] result = out.getCopyOfBuffer();
out.clear();
return result;
}
use of org.apache.flink.table.catalog.CatalogPartitionSpec in project flink by apache.
the class TestManagedSinkWriter method write.
@Override
public void write(RowData element, Context context) throws IOException, InterruptedException {
assert element.getArity() == 3;
String partition = element.getString(0).toString();
Path filePath = new Path(element.getString(1).toString());
RowData rowData = GenericRowData.of(element.getString(2));
CatalogPartitionSpec currentPartitionSpec = processedPartitions.getOrDefault(partition, new CatalogPartitionSpec(PartitionPathUtils.extractPartitionSpecFromPath(filePath)));
processedPartitions.put(partition, currentPartitionSpec);
List<RowData> elements = stagingElements.getOrDefault(currentPartitionSpec, new ArrayList<>());
elements.add(rowData);
stagingElements.put(currentPartitionSpec, elements);
Set<Path> old = toDelete.getOrDefault(currentPartitionSpec, new HashSet<>());
old.add(filePath);
toDelete.put(currentPartitionSpec, old);
}
use of org.apache.flink.table.catalog.CatalogPartitionSpec in project flink by apache.
the class TestManagedTableFactory method resolveCompactFileBasePath.
private static Optional<String> resolveCompactFileBasePath(ObjectIdentifier tableIdentifier) {
AtomicReference<Map<CatalogPartitionSpec, List<Path>>> reference = MANAGED_TABLE_FILE_ENTRIES.get(tableIdentifier);
if (reference != null) {
Map<CatalogPartitionSpec, List<Path>> managedTableFileEntries = reference.get();
for (Map.Entry<CatalogPartitionSpec, List<Path>> entry : managedTableFileEntries.entrySet()) {
List<Path> partitionFiles = entry.getValue();
if (partitionFiles.size() > 0) {
Path file = partitionFiles.get(0);
LinkedHashMap<String, String> partitionSpec = PartitionPathUtils.extractPartitionSpecFromPath(file);
if (partitionSpec.isEmpty()) {
return Optional.of(file.getParent().getPath());
} else {
String tableName = tableIdentifier.asSummaryString();
int index = file.getPath().indexOf(tableName);
if (index != -1) {
return Optional.of(file.getPath().substring(0, index + tableName.length()));
}
}
}
}
}
return Optional.empty();
}
use of org.apache.flink.table.catalog.CatalogPartitionSpec in project flink by apache.
the class CompactManagedTableITCase method testCompactSinglePartitionedTable.
@Test
public void testCompactSinglePartitionedTable() throws Exception {
String sql = "CREATE TABLE MyTable (\n" + " id BIGINT,\n" + " content STRING,\n" + " season STRING\n" + ") PARTITIONED BY (season)";
prepare(sql, Arrays.asList(of("season", "'spring'"), of("season", "'summer'")));
Set<CatalogPartitionSpec> resolvedPartitionSpecsHaveBeenOrToBeCompacted = new HashSet<>();
// test compact one partition
CatalogPartitionSpec unresolvedPartitionSpec = new CatalogPartitionSpec(of("season", "'summer'"));
resolvedPartitionSpecsHaveBeenOrToBeCompacted.add(new CatalogPartitionSpec(of("season", "summer")));
executeAndCheck(unresolvedPartitionSpec, resolvedPartitionSpecsHaveBeenOrToBeCompacted);
// test compact the whole table
unresolvedPartitionSpec = new CatalogPartitionSpec(Collections.emptyMap());
resolvedPartitionSpecsHaveBeenOrToBeCompacted.add(new CatalogPartitionSpec(of("season", "spring")));
executeAndCheck(unresolvedPartitionSpec, resolvedPartitionSpecsHaveBeenOrToBeCompacted);
}
use of org.apache.flink.table.catalog.CatalogPartitionSpec in project flink by apache.
the class CompactManagedTableITCase method testCompactNonPartitionedTable.
@Test
public void testCompactNonPartitionedTable() throws Exception {
String sql = "CREATE TABLE MyTable (id BIGINT, content STRING)";
prepare(sql, Collections.emptyList());
// test compact table
CatalogPartitionSpec unresolvedDummySpec = new CatalogPartitionSpec(Collections.emptyMap());
Set<CatalogPartitionSpec> resolvedPartitionSpecsHaveBeenOrToBeCompacted = Collections.singleton(unresolvedDummySpec);
executeAndCheck(unresolvedDummySpec, resolvedPartitionSpecsHaveBeenOrToBeCompacted);
}
Aggregations