use of bio.terra.common.DaoKeyHolder in project jade-data-repo by DataBiosphere.
the class DatasetDao method createAndLock.
/**
* Create a new dataset object and lock it. An exception is thrown if the dataset already exists.
* The correct order to call the DatasetDao methods when creating a dataset is: createAndLock, unlock.
* @param dataset the dataset object to create
* @return the id of the new dataset
* @throws SQLException
* @throws IOException
* @throws InvalidDatasetException if a row already exists with this dataset name
*/
@Transactional(propagation = Propagation.REQUIRED, isolation = Isolation.SERIALIZABLE)
public UUID createAndLock(Dataset dataset, String flightId) throws IOException, SQLException {
logger.debug("createAndLock dataset " + dataset.getName());
String sql = "INSERT INTO dataset " + "(name, default_profile_id, flightid, description, additional_profile_ids, sharedlock) " + "VALUES (:name, :default_profile_id, :flightid, :description, :additional_profile_ids, ARRAY[]::TEXT[]) ";
Array additionalProfileIds = DaoUtils.createSqlUUIDArray(connection, dataset.getAdditionalProfileIds());
MapSqlParameterSource params = new MapSqlParameterSource().addValue("name", dataset.getName()).addValue("default_profile_id", dataset.getDefaultProfileId()).addValue("flightid", flightId).addValue("description", dataset.getDescription()).addValue("additional_profile_ids", additionalProfileIds);
DaoKeyHolder keyHolder = new DaoKeyHolder();
try {
jdbcTemplate.update(sql, params, keyHolder);
} catch (DuplicateKeyException dkEx) {
throw new InvalidDatasetException("Dataset name already exists: " + dataset.getName(), dkEx);
}
UUID datasetId = keyHolder.getId();
dataset.id(datasetId);
dataset.createdDate(keyHolder.getCreatedDate());
tableDao.createTables(dataset.getId(), dataset.getTables());
relationshipDao.createDatasetRelationships(dataset);
assetDao.createAssets(dataset);
return datasetId;
}
use of bio.terra.common.DaoKeyHolder in project jade-data-repo by DataBiosphere.
the class DatasetTableDao method createTables.
// Assumes transaction propagation from parent's create
public void createTables(UUID parentId, List<DatasetTable> tableList) throws IOException {
MapSqlParameterSource params = new MapSqlParameterSource();
DaoKeyHolder keyHolder = new DaoKeyHolder();
params.addValue("dataset_id", parentId);
for (DatasetTable table : tableList) {
params.addValue("name", table.getName());
params.addValue("raw_table_name", table.getRawTableName());
params.addValue("soft_delete_table_name", table.getSoftDeleteTableName());
params.addValue("bigquery_partition_config", objectMapper.writeValueAsString(table.getBigQueryPartitionConfig()));
List<String> naturalKeyStringList = table.getPrimaryKey().stream().map(Column::getName).collect(Collectors.toList());
try (Connection connection = jdbcDataSource.getConnection()) {
params.addValue("primary_key", DaoUtils.createSqlStringArray(connection, naturalKeyStringList));
} catch (SQLException e) {
logger.error("Failed to convert primary key list to SQL array", e);
throw new IllegalArgumentException("Failed to convert primary key list to SQL array", e);
}
jdbcTemplate.update(sqlInsertTable, params, keyHolder);
UUID tableId = keyHolder.getId();
table.id(tableId);
createColumns(tableId, table.getColumns());
}
}
use of bio.terra.common.DaoKeyHolder in project jade-data-repo by DataBiosphere.
the class AssetDao method create.
/**
* Create a new AssetSpecification. If you try to create an asset with the same name as an existing
* one for the same dataset, this method throws an InvalidAssetException.
* @param assetSpecification the AssetSpecification being created
* @param datasetId the ID of the dataset corresponding to the AssetSpecification being created
* @return
*/
@Transactional(propagation = Propagation.REQUIRED, isolation = Isolation.SERIALIZABLE)
public UUID create(AssetSpecification assetSpecification, UUID datasetId) {
String sql = "INSERT INTO asset_specification (dataset_id, name, root_table_id, root_column_id) " + "VALUES (:dataset_id, :name, :root_table_id, :root_column_id)";
MapSqlParameterSource params = new MapSqlParameterSource();
params.addValue("dataset_id", datasetId);
params.addValue("name", assetSpecification.getName());
params.addValue("root_table_id", assetSpecification.getRootTable().getTable().getId());
params.addValue("root_column_id", assetSpecification.getRootColumn().getDatasetColumn().getId());
DaoKeyHolder keyHolder = new DaoKeyHolder();
try {
jdbcTemplate.update(sql, params, keyHolder);
} catch (DuplicateKeyException e) {
throw new InvalidAssetException("Asset name already exists: " + assetSpecification.getName(), e);
}
UUID assetSpecId = keyHolder.getId();
assetSpecification.id(assetSpecId);
createAssetColumns(assetSpecification);
createAssetRelationships(assetSpecification);
return assetSpecId;
}
use of bio.terra.common.DaoKeyHolder in project jade-data-repo by DataBiosphere.
the class AssetDao method createAssetColumns.
private void createAssetColumns(AssetSpecification assetSpec) {
assetSpec.getAssetTables().forEach(assetTable -> {
assetTable.getColumns().forEach(assetCol -> {
String sql = "INSERT INTO asset_column (asset_id, dataset_column_id) " + "VALUES (:asset_id, :dataset_column_id)";
MapSqlParameterSource params = new MapSqlParameterSource();
params.addValue("asset_id", assetSpec.getId());
params.addValue("dataset_column_id", assetCol.getDatasetColumn().getId());
DaoKeyHolder keyHolder = new DaoKeyHolder();
jdbcTemplate.update(sql, params, keyHolder);
UUID assetColumnId = keyHolder.getId();
assetCol.id(assetColumnId);
});
});
}
use of bio.terra.common.DaoKeyHolder in project jade-data-repo by DataBiosphere.
the class LoadDao method lockLoad.
// -- load tags public methods --
// This must be serializable so that conflicting updates of the locked state and flightid
// are detected. We lock the table so that we avoid serialization errors.
/**
* We implement a rule that one load job can use one load tag at a time. That rule is needed to control
* concurrent operations. For example, a delete-by-load-tag cannot compete with a load; two loads cannot
* run in parallel with the same load tag - it confuses the algorithm for re-running a load with a load tag
* and skipping already-loaded files.
*
* This call and the unlock call use a load table in the database to record that a load tag is in use.
* The load tag is associated with a load id (a guid); that guid is a foreign key to the load_file table
* that maintains the state of files being loaded.
*
* We expect conflicts on load tags to be rare. The typical case will be: a load starts, runs, and ends
* without conflict and with a re-run.
*
* We learned from the first implementation of this code that when there were conflicts, we would get
* serialization errors from Postgres. Those require building retry logic. Instead, we chose to use
* table locks to serialize access to the load table during the time we are setting and freeing the
* our load lock state.
*
* A lock is taken by creating the load tag row and storing the flight id holding the lock.
* The lock is freed by deleting the load tag row. Code can safely re-lock a load tag lock it holds and
* unlock a load tag lock it has freed.
*
* There is never a case where a lock row is updated. They are only ever inserted or deleted.
*
* @param loadTag tag identifying this load
* @param flightId flight id taking the lock
* @return Load object including the load id
*/
@Transactional(propagation = Propagation.REQUIRED, isolation = Isolation.SERIALIZABLE)
public Load lockLoad(String loadTag, String flightId) throws InterruptedException {
jdbcTemplate.getJdbcTemplate().execute("LOCK TABLE load IN EXCLUSIVE MODE");
String upsert = "INSERT INTO load (load_tag, locked, locking_flight_id)" + " VALUES (:load_tag, true, :flight_id)" + " ON CONFLICT ON CONSTRAINT load_load_tag_key DO NOTHING";
MapSqlParameterSource params = new MapSqlParameterSource().addValue("load_tag", loadTag).addValue("flight_id", flightId);
DaoKeyHolder keyHolder = new DaoKeyHolder();
int rows = jdbcTemplate.update(upsert, params, keyHolder);
Load load;
if (rows == 0) {
// We did not insert. Therefore, someone has the load tag locked.
// Retrieve it, in case it is us re-locking
load = lookupLoadByTag(loadTag);
if (load == null) {
throw new CorruptMetadataException("Load row should exist! Load tag: " + loadTag);
}
// It is locked by someone else
if (!StringUtils.equals(load.getLockingFlightId(), flightId)) {
throw new LoadLockedException("Load '" + loadTag + "' is locked by flight '" + load.getLockingFlightId() + "'");
}
} else {
load = new Load().id(keyHolder.getId()).loadTag(keyHolder.getString("load_tag")).locked(keyHolder.getField("locked", Boolean.class)).lockingFlightId(keyHolder.getString("locking_flight_id"));
}
return load;
}
Aggregations