use of io.cdap.cdap.api.dataset.DataSetException in project cdap by cdapio.
the class PartitionedFileSetDataset method postTxCommit.
@Override
public void postTxCommit() {
// simply delete the quarantine directory for this transaction
try {
Location quarantine = getQuarantineLocation();
if (quarantine.exists()) {
boolean deleteSuccess = quarantine.delete(true);
if (!deleteSuccess) {
throw new DataSetException(String.format("Error deleting quarantine location %s.", quarantine));
}
}
} catch (IOException e) {
throw new DataSetException(String.format("Error deleting quarantine location for tx %s.", tx.getWritePointer()), e);
}
this.tx = null;
super.postTxCommit();
}
use of io.cdap.cdap.api.dataset.DataSetException in project cdap by cdapio.
the class PartitionedFileSetDataset method onFailure.
@Override
public void onFailure() throws DataSetException {
try {
rollbackPartitionOperations();
((FileSetDataset) files).onFailure();
} catch (Throwable caught) {
Throwables.propagateIfPossible(caught, DataSetException.class);
throw new DataSetException("Unable to rollback", caught);
}
}
use of io.cdap.cdap.api.dataset.DataSetException in project cdap by cdapio.
the class PartitionedFileSetDataset method addPartition.
public void addPartition(PartitionKey key, String path, Map<String, String> metadata, boolean filesCreated, boolean allowAppend) {
byte[] rowKey = generateRowKey(key, partitioning);
Row row = partitionsTable.get(rowKey);
boolean appending = !row.isEmpty();
if (appending && !allowAppend) {
throw new PartitionAlreadyExistsException(getName(), key);
}
if (appending) {
// this can happen if user originally created the partition with a custom relative path
String existingPath = Bytes.toString(row.get(RELATIVE_PATH));
if (!path.equals(existingPath)) {
throw new DataSetException(String.format("Attempting to append to Dataset '%s', to partition '%s' with a " + "different path. Original path: '%s'. New path: '%s'", getName(), key.toString(), existingPath, path));
}
}
LOG.debug("{} partition with key {} and path {} to dataset {}", appending ? "Appending to" : "Creating", key, path, getName());
AddPartitionOperation operation = new AddPartitionOperation(key, path, filesCreated);
operationsInThisTx.add(operation);
Put put = new Put(rowKey);
byte[] nowInMillis = Bytes.toBytes(System.currentTimeMillis());
if (!appending) {
put.add(RELATIVE_PATH, Bytes.toBytes(path));
put.add(CREATION_TIME_COL, nowInMillis);
}
put.add(LAST_MODIFICATION_TIME_COL, nowInMillis);
// we allow updates, because an update will only happen if its an append
addMetadataToPut(row, metadata, put, true);
// index each row by its transaction's write pointer
put.add(WRITE_PTR_COL, tx.getWritePointer());
partitionsTable.put(put);
if (!appending) {
addPartitionToExplore(key, path);
operation.setExplorePartitionCreated();
}
}
use of io.cdap.cdap.api.dataset.DataSetException in project cdap by cdapio.
the class BufferingTable method get.
@ReadOnly
@Override
public Row get(byte[] row, byte[] startColumn, byte[] stopColumn, int limit) {
ensureTransactionIsStarted();
reportRead(1);
// checking if the row was deleted inside this tx
NavigableMap<byte[], Update> buffCols = buff.get(row);
// potential improvement: do not fetch columns available in in-mem buffer (we know them at this point)
try {
Map<byte[], byte[]> persistedCols = getPersisted(row, startColumn, stopColumn, limit);
// adding server cols, and then overriding with buffered values
NavigableMap<byte[], byte[]> result = Maps.newTreeMap(Bytes.BYTES_COMPARATOR);
if (persistedCols != null) {
result.putAll(persistedCols);
}
if (buffCols != null) {
buffCols = getRange(buffCols, startColumn, stopColumn, limit);
// null valued columns in in-memory buffer are deletes, so we need to delete them from the result list
mergeToPersisted(result, buffCols, null);
}
// applying limit
return new Result(row, head(result, limit));
} catch (Exception e) {
LOG.debug("get failed for table: " + getTransactionAwareName() + ", row: " + Bytes.toStringBinary(row), e);
throw new DataSetException("get failed", e);
}
}
use of io.cdap.cdap.api.dataset.DataSetException in project cdap by cdapio.
the class BufferingTable method get.
@ReadOnly
@Override
public List<Row> get(List<Get> gets) {
ensureTransactionIsStarted();
try {
// get persisted, then overwrite with whats buffered
List<Map<byte[], byte[]>> persistedRows = getPersisted(gets);
// gets and rows lists are always of the same size
Preconditions.checkArgument(gets.size() == persistedRows.size(), "Invalid number of rows fetched when performing multi-get. There must be one row for each get.");
List<Row> result = Lists.newArrayListWithCapacity(persistedRows.size());
Iterator<Map<byte[], byte[]>> persistedRowsIter = persistedRows.iterator();
Iterator<Get> getIter = gets.iterator();
while (persistedRowsIter.hasNext() && getIter.hasNext()) {
Get get = getIter.next();
Map<byte[], byte[]> persistedRow = persistedRowsIter.next();
// navigable copy of the persisted data. Implementation may return immutable or unmodifiable maps,
// so we make a copy here.
NavigableMap<byte[], byte[]> rowColumns = Maps.newTreeMap(Bytes.BYTES_COMPARATOR);
rowColumns.putAll(persistedRow);
byte[] row = get.getRow();
NavigableMap<byte[], Update> buffCols = buff.get(row);
// merge what was in the buffer and what was persisted
if (buffCols != null) {
List<byte[]> getColumns = get.getColumns();
byte[][] columns = getColumns == null ? null : getColumns.toArray(new byte[getColumns.size()][]);
mergeToPersisted(rowColumns, buffCols, columns);
}
result.add(new Result(row, unwrapDeletes(rowColumns)));
}
return result;
} catch (Exception e) {
LOG.debug("multi-get failed for table: " + getTransactionAwareName(), e);
throw new DataSetException("multi-get failed", e);
}
}
Aggregations