use of org.apache.beam.sdk.extensions.sql.impl.BeamSqlEnv in project beam by apache.
the class ThreeTablesSchema method testSystemNotReorderingMediumSmallLarge.
@Test
public void testSystemNotReorderingMediumSmallLarge() {
TestTableProvider tableProvider = new TestTableProvider();
createThreeTables(tableProvider);
BeamSqlEnv env = BeamSqlEnv.withTableProvider(tableProvider);
// This is a correct ordered join because large table is on the top. It should not change that.
BeamRelNode parsedQuery = env.parseQuery("select * from medium_table " + " JOIN small_table on medium_table.small_key = small_table.medium_key " + " JOIN large_table on large_table.medium_key = medium_table.large_key ");
assertTopTableInJoins(parsedQuery, "large_table");
}
use of org.apache.beam.sdk.extensions.sql.impl.BeamSqlEnv in project beam by apache.
the class BeamDDLNestedTypesTest method executeCreateTableWith.
private Table executeCreateTableWith(String fieldType) throws SqlParseException {
String createTable = "CREATE EXTERNAL TABLE tablename ( " + "fieldName " + fieldType + " ) " + "TYPE 'text' " + "LOCATION '/home/admin/person'\n";
System.out.println(createTable);
TestTableProvider tableProvider = new TestTableProvider();
BeamSqlEnv env = BeamSqlEnv.withTableProvider(tableProvider);
env.executeDdl(createTable);
return tableProvider.getTables().get("tablename");
}
use of org.apache.beam.sdk.extensions.sql.impl.BeamSqlEnv in project beam by apache.
the class BigtableTableFlatTest method testSimpleInsert.
@Test
public void testSimpleInsert() {
final String tableId = "beamWriteTable";
emulatorWrapper.createTable(tableId, FAMILY_TEST);
BeamSqlEnv sqlEnv = BeamSqlEnv.inMemory(new BigtableTableProvider());
sqlEnv.executeDdl(createFlatTableString(tableId, location(tableId)));
String query = "INSERT INTO beamWriteTable(key, boolColumn, longColumn, stringColumn, doubleColumn) " + "VALUES ('key', TRUE, CAST(10 AS bigint), 'stringValue', 5.5)";
BeamSqlRelUtils.toPCollection(writePipeline, sqlEnv.parseQuery(query));
writePipeline.run().waitUntilFinish();
PCollection<com.google.bigtable.v2.Row> bigTableRows = readPipeline.apply(readTransform(tableId)).apply(MapElements.via(new ReplaceCellTimestamp()));
PAssert.that(bigTableRows).containsInAnyOrder(bigTableRow());
readPipeline.run().waitUntilFinish();
}
use of org.apache.beam.sdk.extensions.sql.impl.BeamSqlEnv in project beam by apache.
the class DataStoreReadWriteIT method testDataStoreV1SqlWriteRead_withoutKey.
@Test
public void testDataStoreV1SqlWriteRead_withoutKey() {
BeamSqlEnv sqlEnv = BeamSqlEnv.inMemory(new DataStoreV1TableProvider());
String projectId = options.getProject();
String createTableStatement = "CREATE EXTERNAL TABLE TEST( \n" + " `content` VARCHAR \n" + ") \n" + "TYPE 'datastoreV1' \n" + "LOCATION '" + projectId + "/" + KIND + "'";
sqlEnv.executeDdl(createTableStatement);
String insertStatement = "INSERT INTO TEST VALUES ( '3000' )";
BeamSqlRelUtils.toPCollection(writePipeline, sqlEnv.parseQuery(insertStatement));
writePipeline.run().waitUntilFinish();
String selectTableStatement = "SELECT * FROM TEST";
PCollection<Row> output = BeamSqlRelUtils.toPCollection(readPipeline, sqlEnv.parseQuery(selectTableStatement));
assertThat(output.getSchema(), equalTo(SOURCE_SCHEMA_WITHOUT_KEY));
PipelineResult.State state = readPipeline.run().waitUntilFinish(Duration.standardMinutes(5));
assertThat(state, equalTo(State.DONE));
}
use of org.apache.beam.sdk.extensions.sql.impl.BeamSqlEnv in project beam by apache.
the class KafkaTableProviderIT method testFake.
@Test
public void testFake() throws InterruptedException {
pipeline.getOptions().as(DirectOptions.class).setBlockOnRun(false);
String createTableString = String.format("CREATE EXTERNAL TABLE kafka_table(\n" + "f_long BIGINT NOT NULL, \n" + "f_int INTEGER NOT NULL, \n" + "f_string VARCHAR NOT NULL \n" + ") \n" + "TYPE 'kafka' \n" + "LOCATION '%s'\n" + "TBLPROPERTIES '%s'", buildLocation(), objectsProvider.getKafkaPropertiesString());
TableProvider tb = new KafkaTableProvider();
BeamSqlEnv env = BeamSqlEnv.inMemory(tb);
env.executeDdl(createTableString);
PCollection<Row> queryOutput = BeamSqlRelUtils.toPCollection(pipeline, env.parseQuery("SELECT * FROM kafka_table"));
queryOutput.apply(ParDo.of(new FakeKvPair())).setCoder(KvCoder.of(StringUtf8Coder.of(), RowCoder.of(TEST_TABLE_SCHEMA))).apply("waitForSuccess", ParDo.of(new StreamAssertEqual(ImmutableSet.of(generateRow(0), generateRow(1), generateRow(2)))));
queryOutput.apply(logRecords(""));
pipeline.run();
TimeUnit.SECONDS.sleep(4);
produceSomeRecords(3);
for (int i = 0; i < 200; i++) {
if (FLAG.getOrDefault(pipeline.getOptions().getOptionsId(), false)) {
return;
}
TimeUnit.MILLISECONDS.sleep(90);
}
Assert.fail();
}
Aggregations