use of org.apache.drill.metastore.components.tables.TableMetadataUnit in project drill by apache.
the class TablesOutputDataTransformer method execute.
@Override
public List<TableMetadataUnit> execute() {
List<TableMetadataUnit> results = new ArrayList<>();
for (Map<MethodHandle, Object> valueToSet : valuesToSet()) {
TableMetadataUnit.Builder builder = TableMetadataUnit.builder();
for (Map.Entry<MethodHandle, Object> entry : valueToSet.entrySet()) {
try {
entry.getKey().invokeWithArguments(builder, entry.getValue());
} catch (Throwable e) {
throw new MongoMetastoreException(String.format("Unable to invoke setter for [%s] using [%s]", TableMetadataUnit.Builder.class.getSimpleName(), entry.getKey()), e);
}
}
results.add(builder.build());
}
return results;
}
use of org.apache.drill.metastore.components.tables.TableMetadataUnit in project drill by apache.
the class TestTablesTransformer method testToOverwriteSeveralUnitsSameType.
@Test
public void testToOverwriteSeveralUnitsSameType() {
TableMetadataUnit basicUnit = TestData.basicTableMetadataUnit();
List<TableMetadataUnit> units = Arrays.asList(basicUnit.toBuilder().storagePlugin("dfs").workspace("tmp").tableName("region").metadataType(MetadataType.TABLE.name()).build(), basicUnit.toBuilder().storagePlugin("dfs").workspace("tmp").tableName("nation").metadataType(MetadataType.TABLE.name()).build());
List<RdbmsOperation.Overwrite> overwrites = TRANSFORMER.toOverwrite(units);
assertEquals(1, overwrites.size());
RdbmsOperation.Overwrite overwrite = overwrites.get(0);
assertEquals(Tables.TABLES, overwrite.table());
assertEquals(2, overwrite.deleteConditions().size());
}
use of org.apache.drill.metastore.components.tables.TableMetadataUnit in project drill by apache.
the class TestTablesTransformer method testToOverwriteSeveralUnitsDifferentTypes.
@Test
public void testToOverwriteSeveralUnitsDifferentTypes() {
TableMetadataUnit basicUnit = TestData.basicTableMetadataUnit();
List<TableMetadataUnit> units = Arrays.asList(basicUnit.toBuilder().storagePlugin("dfs").workspace("tmp").tableName("region").metadataType(MetadataType.TABLE.name()).build(), basicUnit.toBuilder().storagePlugin("dfs").workspace("tmp").tableName("nation").metadataType(MetadataType.TABLE.name()).build(), basicUnit.toBuilder().storagePlugin("dfs").workspace("tmp").tableName("nation").metadataType(MetadataType.SEGMENT.name()).build());
List<RdbmsOperation.Overwrite> overwrites = TRANSFORMER.toOverwrite(units);
assertEquals(2, overwrites.size());
}
use of org.apache.drill.metastore.components.tables.TableMetadataUnit in project drill by apache.
the class TestTablesMetadataMapper method testToDeleteConditionsTables.
@Test
public void testToDeleteConditionsTables() {
TableMetadataUnit basicUnit = TestData.basicTableMetadataUnit();
List<TableMetadataUnit> units = Arrays.asList(basicUnit.toBuilder().storagePlugin("dfs").workspace("tmp").tableName("region").build(), basicUnit.toBuilder().storagePlugin("dfs").workspace("tmp").tableName("nation").build());
Condition[] expectedConditions = new Condition[] { DSL.and(Tables.TABLES.STORAGE_PLUGIN.eq("dfs"), Tables.TABLES.WORKSPACE.eq("tmp"), Tables.TABLES.TABLE_NAME.eq("region")), DSL.and(Tables.TABLES.STORAGE_PLUGIN.eq("dfs"), Tables.TABLES.WORKSPACE.eq("tmp"), Tables.TABLES.TABLE_NAME.eq("nation")) };
List<Condition> actualConditions = TablesMetadataMapper.TableMapper.get().toDeleteConditions(units);
assertEquals(expectedConditions.length, actualConditions.size());
assertThat(actualConditions, hasItems(expectedConditions));
}
use of org.apache.drill.metastore.components.tables.TableMetadataUnit in project drill by apache.
the class TestTablesMetadataMapper method testToDeleteConditionsSegments.
@Test
public void testToDeleteConditionsSegments() {
TableMetadataUnit basicUnit = TestData.basicTableMetadataUnit();
List<TableMetadataUnit> units = Arrays.asList(basicUnit.toBuilder().storagePlugin("dfs").workspace("tmp").tableName("nation").metadataKey("2008").metadataIdentifier("2008").build(), basicUnit.toBuilder().storagePlugin("dfs").workspace("tmp").tableName("nation").metadataKey("2008").metadataIdentifier("2008/Q1").build(), basicUnit.toBuilder().storagePlugin("dfs").workspace("tmp").tableName("nation").metadataKey("2009").metadataIdentifier("2009").build());
Condition[] expectedConditions = new Condition[] { DSL.and(Tables.SEGMENTS.STORAGE_PLUGIN.eq("dfs"), Tables.SEGMENTS.WORKSPACE.eq("tmp"), Tables.SEGMENTS.TABLE_NAME.eq("nation"), Tables.SEGMENTS.METADATA_KEY.eq("2008")), DSL.and(Tables.SEGMENTS.STORAGE_PLUGIN.eq("dfs"), Tables.SEGMENTS.WORKSPACE.eq("tmp"), Tables.SEGMENTS.TABLE_NAME.eq("nation"), Tables.SEGMENTS.METADATA_KEY.eq("2009")) };
List<Condition> actualConditions = TablesMetadataMapper.SegmentMapper.get().toDeleteConditions(units);
assertEquals(expectedConditions.length, actualConditions.size());
assertThat(actualConditions, hasItems(expectedConditions));
}
Aggregations