use of javax.jdo.JDODataStoreException in project tests by datanucleus.
the class SQLQueryTest method testInvalidQueryAllowedByConfiguration.
/**
* Test of a query not starting with "SELECT"
*/
public void testInvalidQueryAllowedByConfiguration() throws Exception {
addClassesToSchema(new Class[] { Person.class, Manager.class, Employee.class, Developer.class });
// Try a query
PersistenceManager pm = pmf.getPersistenceManager();
pm.setProperty(PropertyNames.PROPERTY_QUERY_SQL_ALLOWALL, "true");
Transaction tx = pm.currentTransaction();
try {
tx.begin();
String sqlText = "EXECUTE SELECT 1 FROM PERSON";
Query query = pm.newQuery("javax.jdo.query.SQL", sqlText);
query.execute();
// expected
} catch (JDODataStoreException dse) {
// expected, if query is invalid by database
} finally {
if (tx.isActive()) {
tx.rollback();
}
pm.close();
}
}
use of javax.jdo.JDODataStoreException in project tests by datanucleus.
the class SQLQueryTest method testQueryWithTimeout.
/**
* Test of a query with a timeout.
*/
public void testQueryWithTimeout() throws Exception {
// Try a query
PersistenceManager pm = pmf.getPersistenceManager();
Transaction tx = pm.currentTransaction();
try {
tx.begin();
// TODO Change this to a query that will take a LONG time and check for it
String sqlText = "SELECT count(*) FROM PERSON";
Query query = pm.newQuery("javax.jdo.query.SQL", sqlText);
query.addExtension("org.jpox.query.timeout", "1");
query.execute();
tx.commit();
} catch (JDODataStoreException dse) {
fail("JDODataStoreException thrown when using query timeout : " + dse.getCause().getMessage());
} catch (JDOUserException ue) {
// Do nothing, since this is expected
} finally {
if (tx.isActive()) {
tx.rollback();
}
pm.close();
}
}
use of javax.jdo.JDODataStoreException in project datanucleus-api-jdo by datanucleus.
the class JDOQuery method deletePersistentInternal.
protected long deletePersistentInternal() {
try {
if (parameterValues != null) {
return query.deletePersistentAll(parameterValues);
} else if (parameterValueByName != null) {
return query.deletePersistentAll(parameterValueByName);
}
return query.deletePersistentAll();
} catch (NoQueryResultsException nqre) {
return 0;
} catch (QueryTimeoutException qte) {
throw new JDODataStoreException("Query has timed out : " + qte.getMessage());
} catch (QueryInterruptedException qie) {
throw new JDOQueryInterruptedException("Query has been cancelled : " + qie.getMessage());
} catch (NucleusException jpe) {
throw NucleusJDOHelper.getJDOExceptionForNucleusException(jpe);
} finally {
// Parameter values are not retained beyond subsequent execute/deletePersistentAll
this.parameterValueByName = null;
this.parameterValues = null;
}
}
use of javax.jdo.JDODataStoreException in project hive by apache.
the class Hive method getPartition.
/**
* Returns partition metadata
*
* @param tbl
* the partition's table
* @param partSpec
* partition keys and values
* @param forceCreate
* if this is true and partition doesn't exist then a partition is
* created
* @param partPath the path where the partition data is located
* @param inheritTableSpecs whether to copy over the table specs for if/of/serde
* @return result partition object or null if there is no partition
* @throws HiveException
*/
public Partition getPartition(Table tbl, Map<String, String> partSpec, boolean forceCreate, String partPath, boolean inheritTableSpecs) throws HiveException {
tbl.validatePartColumnNames(partSpec, true);
List<String> pvals = new ArrayList<String>();
for (FieldSchema field : tbl.getPartCols()) {
String val = partSpec.get(field.getName());
// enable dynamic partitioning
if ((val == null && !HiveConf.getBoolVar(conf, HiveConf.ConfVars.DYNAMICPARTITIONING)) || (val != null && val.length() == 0)) {
throw new HiveException("get partition: Value for key " + field.getName() + " is null or empty");
} else if (val != null) {
pvals.add(val);
}
}
org.apache.hadoop.hive.metastore.api.Partition tpart = null;
try {
tpart = getSynchronizedMSC().getPartitionWithAuthInfo(tbl.getDbName(), tbl.getTableName(), pvals, getUserName(), getGroupNames());
} catch (NoSuchObjectException nsoe) {
// this means no partition exists for the given partition
// key value pairs - thrift cannot handle null return values, hence
// getPartition() throws NoSuchObjectException to indicate null partition
tpart = null;
} catch (Exception e) {
LOG.error("Failed getPartitionWithAuthInfo", e);
throw new HiveException(e);
}
try {
if (forceCreate) {
if (tpart == null) {
LOG.debug("creating partition for table " + tbl.getTableName() + " with partition spec : " + partSpec);
try {
tpart = getSynchronizedMSC().appendPartition(tbl.getDbName(), tbl.getTableName(), pvals);
} catch (AlreadyExistsException aee) {
LOG.debug("Caught already exists exception, trying to alter partition instead");
tpart = getSynchronizedMSC().getPartitionWithAuthInfo(tbl.getDbName(), tbl.getTableName(), pvals, getUserName(), getGroupNames());
alterPartitionSpec(tbl, partSpec, tpart, inheritTableSpecs, partPath);
} catch (Exception e) {
if (CheckJDOException.isJDODataStoreException(e)) {
// Using utility method above, so that JDODataStoreException doesn't
// have to be used here. This helps avoid adding jdo dependency for
// hcatalog client uses
LOG.debug("Caught JDO exception, trying to alter partition instead");
tpart = getSynchronizedMSC().getPartitionWithAuthInfo(tbl.getDbName(), tbl.getTableName(), pvals, getUserName(), getGroupNames());
if (tpart == null) {
// in creating the partition, since the partition still doesn't exist.
throw e;
}
alterPartitionSpec(tbl, partSpec, tpart, inheritTableSpecs, partPath);
} else {
throw e;
}
}
} else {
alterPartitionSpec(tbl, partSpec, tpart, inheritTableSpecs, partPath);
fireInsertEvent(tbl, partSpec, true, null);
}
}
if (tpart == null) {
return null;
}
} catch (Exception e) {
LOG.error("Failed getPartition", e);
throw new HiveException(e);
}
return new Partition(tbl, tpart);
}
use of javax.jdo.JDODataStoreException in project datanucleus-api-jdo by datanucleus.
the class JDOQuery method executeInternal.
protected Object executeInternal() {
try {
if (parameterValues != null) {
return query.executeWithArray(parameterValues);
} else if (parameterValueByName != null) {
return query.executeWithMap(parameterValueByName);
}
return query.execute();
} catch (NoQueryResultsException nqre) {
return null;
} catch (QueryTimeoutException qte) {
throw new JDODataStoreException("Query has timed out : " + qte.getMessage());
} catch (QueryInterruptedException qie) {
throw new JDOQueryInterruptedException("Query has been cancelled : " + qie.getMessage());
} catch (NucleusException jpe) {
// Convert any exceptions into what JDO expects
throw NucleusJDOHelper.getJDOExceptionForNucleusException(jpe);
} finally {
// Parameter values are not retained beyond subsequent execute/deletePersistentAll
this.parameterValueByName = null;
this.parameterValues = null;
}
}
Aggregations