use of org.apache.drill.metastore.components.tables.TableMetadataUnit in project drill by apache.
the class TestTablesMetadataMapper method testToDeleteConditionsPartitions.
@Test
public void testToDeleteConditionsPartitions() {
TableMetadataUnit basicUnit = TestData.basicTableMetadataUnit();
List<TableMetadataUnit> units = Arrays.asList(basicUnit.toBuilder().storagePlugin("dfs").workspace("tmp").tableName("nation").metadataKey("2008").metadataIdentifier("2008/01").build(), basicUnit.toBuilder().storagePlugin("dfs").workspace("tmp").tableName("nation").metadataKey("2008").metadataIdentifier("2008/02").build(), basicUnit.toBuilder().storagePlugin("dfs").workspace("tmp").tableName("nation").metadataKey("2009").metadataIdentifier("2009/01").build());
Condition[] expectedConditions = new Condition[] { DSL.and(Tables.PARTITIONS.STORAGE_PLUGIN.eq("dfs"), Tables.PARTITIONS.WORKSPACE.eq("tmp"), Tables.PARTITIONS.TABLE_NAME.eq("nation"), Tables.PARTITIONS.METADATA_KEY.eq("2008")), DSL.and(Tables.PARTITIONS.STORAGE_PLUGIN.eq("dfs"), Tables.PARTITIONS.WORKSPACE.eq("tmp"), Tables.PARTITIONS.TABLE_NAME.eq("nation"), Tables.PARTITIONS.METADATA_KEY.eq("2009")) };
List<Condition> actualConditions = TablesMetadataMapper.PartitionMapper.get().toDeleteConditions(units);
assertEquals(expectedConditions.length, actualConditions.size());
assertThat(actualConditions, hasItems(expectedConditions));
}
use of org.apache.drill.metastore.components.tables.TableMetadataUnit in project drill by apache.
the class TestTablesOperationTransformer method testToOverwriteOperation.
@Test
public void testToOverwriteOperation() {
TableMetadataUnit unit = TableMetadataUnit.builder().storagePlugin("dfs").workspace("tmp").tableName("nation").metadataType(MetadataType.TABLE.name()).metadataIdentifier("s1").build();
List<Overwrite> operations = transformer.toOverwrite(Collections.singletonList(unit));
InputDataTransformer<TableMetadataUnit> inputDataTransformer = ((MongoTables) metastore.tables()).transformer().inputData();
Document expected = new Document();
expected.append("storagePlugin", "dfs");
expected.append("workspace", "tmp");
expected.append("tableName", "nation");
expected.append("metadataType", MetadataType.TABLE.name());
expected.append("metadataIdentifier", "s1");
assertEquals(new Document().append(MongoConfigConstants.ID, inputDataTransformer.createId(expected)), operations.get(0).filter());
assertEquals(expected, operations.get(0).data().get(MongoConfigConstants.ID));
}
use of org.apache.drill.metastore.components.tables.TableMetadataUnit in project drill by apache.
the class TestTablesTransformer method testToOverwriteAbsentMetadataType.
@Test
public void testToOverwriteAbsentMetadataType() {
TableMetadataUnit basicUnit = TestData.basicTableMetadataUnit();
List<TableMetadataUnit> units = Arrays.asList(basicUnit.toBuilder().metadataType(MetadataType.TABLE.name()).build(), basicUnit.toBuilder().metadataType(MetadataType.VIEW.name()).build());
try {
TRANSFORMER.toOverwrite(units);
fail();
} catch (RdbmsMetastoreException e) {
assertThat(e.getMessage(), startsWith("Metadata mapper is absent for type"));
}
}
use of org.apache.drill.metastore.components.tables.TableMetadataUnit in project drill by apache.
the class TestTablesTransformer method testToOverwriteOneUnit.
@Test
public void testToOverwriteOneUnit() {
TableMetadataUnit basicUnit = TestData.basicTableMetadataUnit();
List<TableMetadataUnit> units = Collections.singletonList(basicUnit.toBuilder().storagePlugin("dfs").workspace("tmp").tableName("nation").metadataType(MetadataType.TABLE.name()).build());
List<RdbmsOperation.Overwrite> overwrites = TRANSFORMER.toOverwrite(units);
assertEquals(1, overwrites.size());
RdbmsOperation.Overwrite overwrite = overwrites.get(0);
assertEquals(Tables.TABLES, overwrite.table());
assertEquals(1, overwrite.deleteConditions().size());
assertEquals(DSL.and(Tables.TABLES.STORAGE_PLUGIN.eq("dfs"), Tables.TABLES.WORKSPACE.eq("tmp"), Tables.TABLES.TABLE_NAME.eq("nation")), overwrite.deleteConditions().get(0));
}
use of org.apache.drill.metastore.components.tables.TableMetadataUnit in project drill by apache.
the class TestTablesInputDataTransformer method testValidDataOneRecord.
@Test
public void testValidDataOneRecord() {
Map<String, String> partitionKeys = new HashMap<>();
partitionKeys.put("dir0", "2018");
partitionKeys.put("dir1", "2019");
List<String> partitionValues = Arrays.asList("a", "b", "c");
Long lastModifiedTime = System.currentTimeMillis();
TableMetadataUnit unit = TableMetadataUnit.builder().storagePlugin("dfs").workspace("tmp").tableName("nation").metadataKey(MetadataInfo.GENERAL_INFO_KEY).partitionKeys(partitionKeys).partitionValues(partitionValues).lastModifiedTime(lastModifiedTime).build();
WriteData writeData = new InputDataTransformer<TableMetadataUnit>(metastoreSchema, partitionSchema, unitGetters).units(Collections.singletonList(unit)).execute();
Record tableRecord = GenericRecord.create(metastoreSchema);
tableRecord.setField("storagePlugin", "dfs");
tableRecord.setField("workspace", "tmp");
tableRecord.setField("tableName", "nation");
tableRecord.setField("metadataKey", MetadataInfo.GENERAL_INFO_KEY);
tableRecord.setField("partitionKeys", partitionKeys);
tableRecord.setField("partitionValues", partitionValues);
tableRecord.setField("lastModifiedTime", lastModifiedTime);
Record partitionRecord = GenericRecord.create(partitionSchema);
partitionRecord.setField("storagePlugin", "dfs");
partitionRecord.setField("workspace", "tmp");
partitionRecord.setField("tableName", "nation");
partitionRecord.setField("metadataKey", MetadataInfo.GENERAL_INFO_KEY);
assertEquals(Collections.singletonList(tableRecord), writeData.records());
assertEquals(partitionRecord, writeData.partition());
}
Aggregations