use of org.hibernate.engine.jdbc.batch.internal.BasicBatchKey in project hibernate-orm by hibernate.
the class AbstractEntityPersister method insert.
/**
* Perform an SQL INSERT.
* <p/>
* This for is used for all non-root tables as well as the root table
* in cases where the identifier value is known beforeQuery the insert occurs.
*/
protected void insert(final Serializable id, final Object[] fields, final boolean[] notNull, final int j, final String sql, final Object object, final SharedSessionContractImplementor session) throws HibernateException {
if (isInverseTable(j)) {
return;
}
// a non-null value, so the following is arguable:
if (isNullableTable(j) && isAllNull(fields, j)) {
return;
}
if (LOG.isTraceEnabled()) {
LOG.tracev("Inserting entity: {0}", MessageHelper.infoString(this, id, getFactory()));
if (j == 0 && isVersioned()) {
LOG.tracev("Version: {0}", Versioning.getVersion(fields, this));
}
}
// TODO : shouldn't inserts be Expectations.NONE?
final Expectation expectation = Expectations.appropriateExpectation(insertResultCheckStyles[j]);
// we can't batch joined inserts, *especially* not if it is an identity insert;
// nor can we batch statements where the expectation is based on an output param
final boolean useBatch = j == 0 && expectation.canBeBatched();
if (useBatch && inserBatchKey == null) {
inserBatchKey = new BasicBatchKey(getEntityName() + "#INSERT", expectation);
}
final boolean callable = isInsertCallable(j);
try {
// Render the SQL query
final PreparedStatement insert;
if (useBatch) {
insert = session.getJdbcCoordinator().getBatch(inserBatchKey).getBatchStatement(sql, callable);
} else {
insert = session.getJdbcCoordinator().getStatementPreparer().prepareStatement(sql, callable);
}
try {
int index = 1;
index += expectation.prepare(insert);
// Write the values of fields onto the prepared statement - we MUST use the state at the time the
// insert was issued (cos of foreign key constraints). Not necessarily the object's current state
dehydrate(id, fields, null, notNull, propertyColumnInsertable, j, insert, session, index, false);
if (useBatch) {
session.getJdbcCoordinator().getBatch(inserBatchKey).addToBatch();
} else {
expectation.verifyOutcome(session.getJdbcCoordinator().getResultSetReturn().executeUpdate(insert), insert, -1);
}
} catch (SQLException e) {
if (useBatch) {
session.getJdbcCoordinator().abortBatch();
}
throw e;
} finally {
if (!useBatch) {
session.getJdbcCoordinator().getResourceRegistry().release(insert);
session.getJdbcCoordinator().afterStatementExecution();
}
}
} catch (SQLException e) {
throw getFactory().getSQLExceptionHelper().convert(e, "could not insert: " + MessageHelper.infoString(this), sql);
}
}
use of org.hibernate.engine.jdbc.batch.internal.BasicBatchKey in project hibernate-orm by hibernate.
the class BatchingTest method testSessionBatchingUsage.
@Test
public void testSessionBatchingUsage() throws Exception {
Session session = openSession();
session.setJdbcBatchSize(3);
SessionImplementor sessionImpl = (SessionImplementor) session;
final JdbcCoordinator jdbcCoordinator = sessionImpl.getJdbcCoordinator();
LogicalConnectionImplementor logicalConnection = jdbcCoordinator.getLogicalConnection();
// set up some tables to use
Statement statement = jdbcCoordinator.getStatementPreparer().createStatement();
String dropSql = getDialect().getDropTableString("SANDBOX_JDBC_TST");
try {
jdbcCoordinator.getResultSetReturn().execute(statement, dropSql);
} catch (Exception e) {
// ignore if the DB doesn't support "if exists" and the table doesn't exist
}
jdbcCoordinator.getResultSetReturn().execute(statement, "create table SANDBOX_JDBC_TST ( ID integer, NAME varchar(100) )");
assertTrue(jdbcCoordinator.getResourceRegistry().hasRegisteredResources());
assertTrue(logicalConnection.isPhysicallyConnected());
jdbcCoordinator.getResourceRegistry().release(statement);
assertFalse(jdbcCoordinator.getResourceRegistry().hasRegisteredResources());
// after_transaction specified
assertTrue(logicalConnection.isPhysicallyConnected());
// ok, now we can get down to it...
// same as Session#getTransaction
Transaction txn = session.getTransaction();
txn.begin();
final BatchBuilder batchBuilder = new BatchBuilderImpl(2);
final BatchKey batchKey = new BasicBatchKey("this", Expectations.BASIC);
final Batch insertBatch = batchBuilder.buildBatch(batchKey, jdbcCoordinator);
assertTrue("unexpected Batch impl", BatchingBatch.class.isInstance(insertBatch));
final JournalingBatchObserver batchObserver = new JournalingBatchObserver();
insertBatch.addObserver(batchObserver);
final String insertSql = "insert into SANDBOX_JDBC_TST( ID, NAME ) values ( ?, ? )";
PreparedStatement insert = insertBatch.getBatchStatement(insertSql, false);
insert.setLong(1, 1);
insert.setString(2, "name");
assertEquals(0, batchObserver.getExplicitExecutionCount());
assertEquals(0, batchObserver.getImplicitExecutionCount());
insertBatch.addToBatch();
assertEquals(0, batchObserver.getExplicitExecutionCount());
assertEquals(0, batchObserver.getImplicitExecutionCount());
assertTrue(jdbcCoordinator.getResourceRegistry().hasRegisteredResources());
PreparedStatement insert2 = insertBatch.getBatchStatement(insertSql, false);
assertSame(insert, insert2);
insert = insert2;
insert.setLong(1, 2);
insert.setString(2, "another name");
assertEquals(0, batchObserver.getExplicitExecutionCount());
assertEquals(0, batchObserver.getImplicitExecutionCount());
insertBatch.addToBatch();
assertEquals(0, batchObserver.getExplicitExecutionCount());
assertEquals(0, batchObserver.getImplicitExecutionCount());
assertTrue(jdbcCoordinator.getResourceRegistry().hasRegisteredResources());
PreparedStatement insert3 = insertBatch.getBatchStatement(insertSql, false);
assertSame(insert, insert3);
insert = insert3;
insert.setLong(1, 3);
insert.setString(2, "yet another name");
assertEquals(0, batchObserver.getExplicitExecutionCount());
assertEquals(0, batchObserver.getImplicitExecutionCount());
insertBatch.addToBatch();
assertEquals(0, batchObserver.getExplicitExecutionCount());
assertEquals(1, batchObserver.getImplicitExecutionCount());
assertTrue(jdbcCoordinator.getResourceRegistry().hasRegisteredResources());
insertBatch.execute();
assertEquals(1, batchObserver.getExplicitExecutionCount());
assertEquals(1, batchObserver.getImplicitExecutionCount());
assertFalse(jdbcCoordinator.getResourceRegistry().hasRegisteredResources());
insertBatch.release();
txn.commit();
session.close();
}
use of org.hibernate.engine.jdbc.batch.internal.BasicBatchKey in project hibernate-orm by hibernate.
the class AbstractEntityPersister method delete.
/**
* Perform an SQL DELETE
*/
protected void delete(final Serializable id, final Object version, final int j, final Object object, final String sql, final SharedSessionContractImplementor session, final Object[] loadedState) throws HibernateException {
if (isInverseTable(j)) {
return;
}
final boolean useVersion = j == 0 && isVersioned();
final boolean callable = isDeleteCallable(j);
final Expectation expectation = Expectations.appropriateExpectation(deleteResultCheckStyles[j]);
final boolean useBatch = j == 0 && isBatchable() && expectation.canBeBatched();
if (useBatch && deleteBatchKey == null) {
deleteBatchKey = new BasicBatchKey(getEntityName() + "#DELETE", expectation);
}
final boolean traceEnabled = LOG.isTraceEnabled();
if (traceEnabled) {
LOG.tracev("Deleting entity: {0}", MessageHelper.infoString(this, id, getFactory()));
if (useVersion) {
LOG.tracev("Version: {0}", version);
}
}
if (isTableCascadeDeleteEnabled(j)) {
if (traceEnabled) {
LOG.tracev("Delete handled by foreign key constraint: {0}", getTableName(j));
}
//EARLY EXIT!
return;
}
try {
//Render the SQL query
PreparedStatement delete;
int index = 1;
if (useBatch) {
delete = session.getJdbcCoordinator().getBatch(deleteBatchKey).getBatchStatement(sql, callable);
} else {
delete = session.getJdbcCoordinator().getStatementPreparer().prepareStatement(sql, callable);
}
try {
index += expectation.prepare(delete);
// Do the key. The key is immutable so we can use the _current_ object state - not necessarily
// the state at the time the delete was issued
getIdentifierType().nullSafeSet(delete, id, index, session);
index += getIdentifierColumnSpan();
if (useVersion) {
getVersionType().nullSafeSet(delete, version, index, session);
} else if (isAllOrDirtyOptLocking() && loadedState != null) {
boolean[] versionability = getPropertyVersionability();
Type[] types = getPropertyTypes();
for (int i = 0; i < entityMetamodel.getPropertySpan(); i++) {
if (isPropertyOfTable(i, j) && versionability[i]) {
// this property belongs to the table and it is not specifically
// excluded from optimistic locking by optimistic-lock="false"
boolean[] settable = types[i].toColumnNullness(loadedState[i], getFactory());
types[i].nullSafeSet(delete, loadedState[i], index, settable, session);
index += ArrayHelper.countTrue(settable);
}
}
}
if (useBatch) {
session.getJdbcCoordinator().getBatch(deleteBatchKey).addToBatch();
} else {
check(session.getJdbcCoordinator().getResultSetReturn().executeUpdate(delete), id, j, expectation, delete);
}
} catch (SQLException sqle) {
if (useBatch) {
session.getJdbcCoordinator().abortBatch();
}
throw sqle;
} finally {
if (!useBatch) {
session.getJdbcCoordinator().getResourceRegistry().release(delete);
session.getJdbcCoordinator().afterStatementExecution();
}
}
} catch (SQLException sqle) {
throw getFactory().getSQLExceptionHelper().convert(sqle, "could not delete: " + MessageHelper.infoString(this, id, getFactory()), sql);
}
}
use of org.hibernate.engine.jdbc.batch.internal.BasicBatchKey in project hibernate-orm by hibernate.
the class OneToManyPersister method writeIndex.
private void writeIndex(PersistentCollection collection, Iterator entries, Serializable id, boolean resetIndex, SharedSessionContractImplementor session) {
// If one-to-many and inverse, still need to create the index. See HHH-5732.
if (isInverse && hasIndex && !indexContainsFormula && ArrayHelper.countTrue(indexColumnIsSettable) > 0) {
try {
if (entries.hasNext()) {
int nextIndex = resetIndex ? 0 : getSize(id, session);
Expectation expectation = Expectations.appropriateExpectation(getUpdateCheckStyle());
while (entries.hasNext()) {
final Object entry = entries.next();
if (entry != null && collection.entryExists(entry, nextIndex)) {
int offset = 1;
PreparedStatement st = null;
boolean callable = isUpdateCallable();
boolean useBatch = expectation.canBeBatched();
String sql = getSQLUpdateRowString();
if (useBatch) {
if (recreateBatchKey == null) {
recreateBatchKey = new BasicBatchKey(getRole() + "#RECREATE", expectation);
}
st = session.getJdbcCoordinator().getBatch(recreateBatchKey).getBatchStatement(sql, callable);
} else {
st = session.getJdbcCoordinator().getStatementPreparer().prepareStatement(sql, callable);
}
try {
offset += expectation.prepare(st);
if (hasIdentifier) {
offset = writeIdentifier(st, collection.getIdentifier(entry, nextIndex), offset, session);
}
offset = writeIndex(st, collection.getIndex(entry, nextIndex, this), offset, session);
offset = writeElement(st, collection.getElement(entry), offset, session);
if (useBatch) {
session.getJdbcCoordinator().getBatch(recreateBatchKey).addToBatch();
} else {
expectation.verifyOutcome(session.getJdbcCoordinator().getResultSetReturn().executeUpdate(st), st, -1);
}
} catch (SQLException sqle) {
if (useBatch) {
session.getJdbcCoordinator().abortBatch();
}
throw sqle;
} finally {
if (!useBatch) {
session.getJdbcCoordinator().getResourceRegistry().release(st);
session.getJdbcCoordinator().afterStatementExecution();
}
}
}
nextIndex++;
}
}
} catch (SQLException sqle) {
throw sqlExceptionHelper.convert(sqle, "could not update collection: " + MessageHelper.collectionInfoString(this, collection, id, session), getSQLUpdateRowString());
}
}
}
use of org.hibernate.engine.jdbc.batch.internal.BasicBatchKey in project hibernate-orm by hibernate.
the class AbstractCollectionPersister method remove.
@Override
public void remove(Serializable id, SharedSessionContractImplementor session) throws HibernateException {
if (!isInverse && isRowDeleteEnabled()) {
if (LOG.isDebugEnabled()) {
LOG.debugf("Deleting collection: %s", MessageHelper.collectionInfoString(this, id, getFactory()));
}
try {
int offset = 1;
PreparedStatement st = null;
Expectation expectation = Expectations.appropriateExpectation(getDeleteAllCheckStyle());
boolean callable = isDeleteAllCallable();
boolean useBatch = expectation.canBeBatched();
String sql = getSQLDeleteString();
if (useBatch) {
if (removeBatchKey == null) {
removeBatchKey = new BasicBatchKey(getRole() + "#REMOVE", expectation);
}
st = session.getJdbcCoordinator().getBatch(removeBatchKey).getBatchStatement(sql, callable);
} else {
st = session.getJdbcCoordinator().getStatementPreparer().prepareStatement(sql, callable);
}
try {
offset += expectation.prepare(st);
writeKey(st, id, offset, session);
if (useBatch) {
session.getJdbcCoordinator().getBatch(removeBatchKey).addToBatch();
} else {
expectation.verifyOutcome(session.getJdbcCoordinator().getResultSetReturn().executeUpdate(st), st, -1);
}
} catch (SQLException sqle) {
if (useBatch) {
session.getJdbcCoordinator().abortBatch();
}
throw sqle;
} finally {
if (!useBatch) {
session.getJdbcCoordinator().getResourceRegistry().release(st);
session.getJdbcCoordinator().afterStatementExecution();
}
}
LOG.debug("Done deleting collection");
} catch (SQLException sqle) {
throw sqlExceptionHelper.convert(sqle, "could not delete collection: " + MessageHelper.collectionInfoString(this, id, getFactory()), getSQLDeleteString());
}
}
}
Aggregations