use of java.sql.Statement in project groovy by apache.
the class Sql method executeInsert.
/**
* Executes the given SQL statement (typically an INSERT statement).
* Use this variant when you want to receive the values of any
* auto-generated columns, such as an autoincrement ID field.
* See {@link #executeInsert(GString)} for more details.
* <p>
* Resource handling is performed automatically where appropriate.
*
* @param sql The SQL statement to execute
* @return A list of the auto-generated column values for each
* inserted row (typically auto-generated keys)
* @throws SQLException if a database access error occurs
*/
public List<List<Object>> executeInsert(String sql) throws SQLException {
Connection connection = createConnection();
Statement statement = null;
try {
statement = getStatement(connection, sql);
this.updateCount = statement.executeUpdate(sql, Statement.RETURN_GENERATED_KEYS);
ResultSet keys = statement.getGeneratedKeys();
return calculateKeys(keys);
} catch (SQLException e) {
LOG.warning("Failed to execute: " + sql + " because: " + e.getMessage());
throw e;
} finally {
closeResources(connection, statement);
}
}
use of java.sql.Statement in project groovy by apache.
the class Sql method getStatement.
private Statement getStatement(Connection connection, String sql) throws SQLException {
LOG.fine(sql);
Statement stmt = getAbstractStatement(new CreateStatementCommand(), connection, sql);
configure(stmt);
return stmt;
}
use of java.sql.Statement in project groovy by apache.
the class Sql method execute.
/**
* Executes the given piece of SQL.
* Also saves the updateCount, if any, for subsequent examination.
* <p>
* Example usages:
* <pre>
* sql.execute "DROP TABLE IF EXISTS person"
*
* sql.execute """
* CREATE TABLE person (
* id INTEGER NOT NULL,
* firstname VARCHAR(100),
* lastname VARCHAR(100),
* location_id INTEGER
* )
* """
*
* sql.execute """
* INSERT INTO person (id, firstname, lastname, location_id) VALUES (4, 'Paul', 'King', 40)
* """
* assert sql.updateCount == 1
* </pre>
* <p>
* Resource handling is performed automatically where appropriate.
*
* @param sql the SQL to execute
* @return <code>true</code> if the first result is a <code>ResultSet</code>
* object; <code>false</code> if it is an update count or there are
* no results
* @throws SQLException if a database access error occurs
*/
public boolean execute(String sql) throws SQLException {
Connection connection = createConnection();
Statement statement = null;
try {
statement = getStatement(connection, sql);
boolean isResultSet = statement.execute(sql);
this.updateCount = statement.getUpdateCount();
return isResultSet;
} catch (SQLException e) {
LOG.warning("Failed to execute: " + sql + " because: " + e.getMessage());
throw e;
} finally {
closeResources(connection, statement);
}
}
use of java.sql.Statement in project hadoop by apache.
the class TestDataDrivenDBInputFormat method testDateSplits.
@Test
public void testDateSplits() throws Exception {
Statement s = connection.createStatement();
final String DATE_TABLE = "datetable";
final String COL = "foo";
try {
// delete the table if it already exists.
s.executeUpdate("DROP TABLE " + DATE_TABLE);
} catch (SQLException e) {
}
// Create the table.
s.executeUpdate("CREATE TABLE " + DATE_TABLE + "(" + COL + " DATE)");
s.executeUpdate("INSERT INTO " + DATE_TABLE + " VALUES('2010-04-01')");
s.executeUpdate("INSERT INTO " + DATE_TABLE + " VALUES('2010-04-02')");
s.executeUpdate("INSERT INTO " + DATE_TABLE + " VALUES('2010-05-01')");
s.executeUpdate("INSERT INTO " + DATE_TABLE + " VALUES('2011-04-01')");
// commit this tx.
connection.commit();
Configuration conf = new Configuration();
conf.set("fs.defaultFS", "file:///");
FileSystem fs = FileSystem.getLocal(conf);
fs.delete(new Path(OUT_DIR), true);
// now do a dd import
Job job = Job.getInstance(conf);
job.setMapperClass(ValMapper.class);
job.setReducerClass(Reducer.class);
job.setMapOutputKeyClass(DateCol.class);
job.setMapOutputValueClass(NullWritable.class);
job.setOutputKeyClass(DateCol.class);
job.setOutputValueClass(NullWritable.class);
job.setNumReduceTasks(1);
job.getConfiguration().setInt("mapreduce.map.tasks", 2);
FileOutputFormat.setOutputPath(job, new Path(OUT_DIR));
DBConfiguration.configureDB(job.getConfiguration(), DRIVER_CLASS, DB_URL, null, null);
DataDrivenDBInputFormat.setInput(job, DateCol.class, DATE_TABLE, null, COL, COL);
boolean ret = job.waitForCompletion(true);
assertTrue("job failed", ret);
// Check to see that we imported as much as we thought we did.
assertEquals("Did not get all the records", 4, job.getCounters().findCounter(TaskCounter.REDUCE_OUTPUT_RECORDS).getValue());
}
use of java.sql.Statement in project hadoop by apache.
the class DBCountPageView method createTables.
private void createTables() throws SQLException {
String dataType = "BIGINT NOT NULL";
if (isOracle) {
dataType = "NUMBER(19) NOT NULL";
}
String createAccess = "CREATE TABLE " + "HAccess(url VARCHAR(100) NOT NULL," + " referrer VARCHAR(100)," + " time " + dataType + ", " + " PRIMARY KEY (url, time))";
String createPageview = "CREATE TABLE " + "Pageview(url VARCHAR(100) NOT NULL," + " pageview " + dataType + ", " + " PRIMARY KEY (url))";
Statement st = connection.createStatement();
try {
st.executeUpdate(createAccess);
st.executeUpdate(createPageview);
connection.commit();
} finally {
st.close();
}
}
Aggregations