use of org.apache.beam.sdk.extensions.sql.meta.Table in project beam by apache.
the class ZetaSQLPushDownTest method initializeBeamTableProvider.
private static void initializeBeamTableProvider() {
Table projectTable = getTable("InMemoryTableProject", PushDownOptions.PROJECT);
Table bothTable = getTable("InMemoryTableBoth", PushDownOptions.BOTH);
Row[] rows = { row(BASIC_SCHEMA, 100L, 1L, "one", 100L), row(BASIC_SCHEMA, 200L, 2L, "two", 200L) };
tableProvider = new TestTableProvider();
tableProvider.createTable(projectTable);
tableProvider.createTable(bothTable);
tableProvider.addRows(projectTable.getName(), rows);
tableProvider.addRows(bothTable.getName(), rows);
}
use of org.apache.beam.sdk.extensions.sql.meta.Table in project beam by apache.
the class BeamSqlEnvRunner method registerAllTablesByInMemoryMetaStore.
/**
* Register all tables into InMemoryMetaStore, set their schemas, and set the locations where
* their corresponding data are stored.
*/
private static void registerAllTablesByInMemoryMetaStore(InMemoryMetaStore inMemoryMetaStore, String dataSize) throws Exception {
JSONObject properties = new JSONObject();
properties.put("csvformat", "InformixUnload");
Map<String, Schema> schemaMap = TpcdsSchemas.getTpcdsSchemas();
for (Map.Entry<String, Schema> entry : schemaMap.entrySet()) {
String tableName = entry.getKey();
String dataLocation = DATA_DIRECTORY + "/" + dataSize + "/" + tableName + ".dat";
Schema tableSchema = schemaMap.get(tableName);
checkArgumentNotNull(tableSchema, "Table schema can't be null for table: " + tableName);
Table table = Table.builder().name(tableName).schema(tableSchema).location(dataLocation).properties(properties).type("text").build();
inMemoryMetaStore.createTable(table);
}
}
use of org.apache.beam.sdk.extensions.sql.meta.Table in project beam by apache.
the class BeamAggregateProjectMergeRuleTest method buildUp.
@Before
public void buildUp() {
TestTableProvider tableProvider = new TestTableProvider();
Table projectTable = getTable("TEST_PROJECT", PushDownOptions.PROJECT);
Table filterTable = getTable("TEST_FILTER", PushDownOptions.FILTER);
Table noneTable = getTable("TEST_NONE", PushDownOptions.NONE);
tableProvider.createTable(projectTable);
tableProvider.createTable(filterTable);
tableProvider.createTable(noneTable);
// Rules are cost based, need rows to optimize!
tableProvider.addRows("TEST_PROJECT", Row.withSchema(BASIC_SCHEMA).addValues(1, 2, "3", 4).build());
tableProvider.addRows("TEST_FILTER", Row.withSchema(BASIC_SCHEMA).addValues(1, 2, "3", 4).build());
tableProvider.addRows("TEST_NONE", Row.withSchema(BASIC_SCHEMA).addValues(1, 2, "3", 4).build());
sqlEnv = BeamSqlEnv.inMemory(tableProvider);
}
use of org.apache.beam.sdk.extensions.sql.meta.Table in project beam by apache.
the class BigQueryFilterTest method buildUp.
@Before
public void buildUp() {
TestTableProvider tableProvider = new TestTableProvider();
Table table = getTable("TEST", PushDownOptions.NONE);
tableProvider.createTable(table);
tableProvider.addRows(table.getName(), row(BASIC_SCHEMA, 100, 1, "one", (short) 100, true), row(BASIC_SCHEMA, 200, 2, "two", (short) 200, false));
sqlEnv = BeamSqlEnv.builder(tableProvider).setPipelineOptions(PipelineOptionsFactory.create()).build();
}
use of org.apache.beam.sdk.extensions.sql.meta.Table in project beam by apache.
the class BigQueryRowCountIT method testEmptyTable.
@Test
public void testEmptyTable() {
BigQueryTableProvider provider = new BigQueryTableProvider();
Table table = getTable("testTable", bigQuery.tableSpec());
BeamSqlTable sqlTable = provider.buildBeamSqlTable(table);
BeamTableStatistics size = sqlTable.getTableStatistics(TestPipeline.testingPipelineOptions());
assertNotNull(size);
assertEquals(0d, size.getRowCount(), 0.1);
}
Aggregations