Search in sources :

Example 1 with NullDBWritable

use of org.apache.hadoop.mapreduce.lib.db.DBInputFormat.NullDBWritable in project hadoop by apache.

the class TestDbClasses method testDataDrivenDBInputFormat.

@Test(timeout = 10000)
public void testDataDrivenDBInputFormat() throws Exception {
    JobContext jobContext = mock(JobContext.class);
    Configuration configuration = new Configuration();
    configuration.setInt(MRJobConfig.NUM_MAPS, 1);
    when(jobContext.getConfiguration()).thenReturn(configuration);
    DataDrivenDBInputFormat<NullDBWritable> format = new DataDrivenDBInputFormat<NullDBWritable>();
    List<InputSplit> splits = format.getSplits(jobContext);
    assertEquals(1, splits.size());
    DataDrivenDBInputSplit split = (DataDrivenDBInputSplit) splits.get(0);
    assertEquals("1=1", split.getLowerClause());
    assertEquals("1=1", split.getUpperClause());
    // 2
    configuration.setInt(MRJobConfig.NUM_MAPS, 2);
    DataDrivenDBInputFormat.setBoundingQuery(configuration, "query");
    assertEquals("query", configuration.get(DBConfiguration.INPUT_BOUNDING_QUERY));
    Job job = mock(Job.class);
    when(job.getConfiguration()).thenReturn(configuration);
    DataDrivenDBInputFormat.setInput(job, NullDBWritable.class, "query", "Bounding Query");
    assertEquals("Bounding Query", configuration.get(DBConfiguration.INPUT_BOUNDING_QUERY));
}
Also used : DataDrivenDBInputSplit(org.apache.hadoop.mapreduce.lib.db.DataDrivenDBInputFormat.DataDrivenDBInputSplit) Configuration(org.apache.hadoop.conf.Configuration) NullDBWritable(org.apache.hadoop.mapreduce.lib.db.DBInputFormat.NullDBWritable) JobContext(org.apache.hadoop.mapreduce.JobContext) Job(org.apache.hadoop.mapreduce.Job) InputSplit(org.apache.hadoop.mapreduce.InputSplit) DBInputSplit(org.apache.hadoop.mapreduce.lib.db.DBInputFormat.DBInputSplit) DataDrivenDBInputSplit(org.apache.hadoop.mapreduce.lib.db.DataDrivenDBInputFormat.DataDrivenDBInputSplit) Test(org.junit.Test)

Example 2 with NullDBWritable

use of org.apache.hadoop.mapreduce.lib.db.DBInputFormat.NullDBWritable in project hadoop by apache.

the class TestDbClasses method testOracleDBRecordReader.

/**
   * test generate sql script for OracleDBRecordReader.
   */
@Test(timeout = 20000)
public void testOracleDBRecordReader() throws Exception {
    DBInputSplit splitter = new DBInputSplit(1, 10);
    Configuration configuration = new Configuration();
    Connection connect = DriverForTest.getConnection();
    DBConfiguration dbConfiguration = new DBConfiguration(configuration);
    dbConfiguration.setInputOrderBy("Order");
    String[] fields = { "f1", "f2" };
    OracleDBRecordReader<NullDBWritable> recorder = new OracleDBRecordReader<NullDBWritable>(splitter, NullDBWritable.class, configuration, connect, dbConfiguration, "condition", fields, "table");
    assertEquals("SELECT * FROM (SELECT a.*,ROWNUM dbif_rno FROM ( SELECT f1, f2 FROM table WHERE condition ORDER BY Order ) a WHERE rownum <= 10 ) WHERE dbif_rno > 1", recorder.getSelectQuery());
}
Also used : Configuration(org.apache.hadoop.conf.Configuration) NullDBWritable(org.apache.hadoop.mapreduce.lib.db.DBInputFormat.NullDBWritable) Connection(java.sql.Connection) DBInputSplit(org.apache.hadoop.mapreduce.lib.db.DBInputFormat.DBInputSplit) DataDrivenDBInputSplit(org.apache.hadoop.mapreduce.lib.db.DataDrivenDBInputFormat.DataDrivenDBInputSplit) Test(org.junit.Test)

Aggregations

Configuration (org.apache.hadoop.conf.Configuration)2 DBInputSplit (org.apache.hadoop.mapreduce.lib.db.DBInputFormat.DBInputSplit)2 NullDBWritable (org.apache.hadoop.mapreduce.lib.db.DBInputFormat.NullDBWritable)2 DataDrivenDBInputSplit (org.apache.hadoop.mapreduce.lib.db.DataDrivenDBInputFormat.DataDrivenDBInputSplit)2 Test (org.junit.Test)2 Connection (java.sql.Connection)1 InputSplit (org.apache.hadoop.mapreduce.InputSplit)1 Job (org.apache.hadoop.mapreduce.Job)1 JobContext (org.apache.hadoop.mapreduce.JobContext)1