use of io.cdap.cdap.api.annotation.ReadOnly in project cdap by caskdata.
the class PartitionedFileSetDataset method assertNotExists.
// Throws PartitionAlreadyExistsException if the partition key already exists.
// Otherwise, returns the rowkey corresponding to the PartitionKey.
@ReadOnly
byte[] assertNotExists(PartitionKey key, boolean supportNonTransactional) {
byte[] rowKey = generateRowKey(key, partitioning);
if (tx == null && supportNonTransactional) {
if (LOG.isWarnEnabled()) {
StringBuilder sb = new StringBuilder();
for (StackTraceElement stackTraceElement : Thread.currentThread().getStackTrace()) {
sb.append("\n\tat ").append(stackTraceElement.toString());
}
SAMPLING_LOG.warn("Operation should be performed within a transaction. " + "This operation may require a transaction in the future. {}", sb);
}
// to handle backwards compatibility (user might have called PartitionedFileSet#getPartitionOutput outside
// of a transaction), we can't check partition existence via the partitionsTable. As an fallback approach,
// check the filesystem.
Location partitionLocation = files.getLocation(getOutputPath(key));
if (exists(partitionLocation)) {
throw new DataSetException(String.format("Location %s for partition key %s already exists: ", partitionLocation, key));
}
} else {
Row row = partitionsTable.get(rowKey);
if (!row.isEmpty()) {
throw new PartitionAlreadyExistsException(getName(), key);
}
}
return rowKey;
}
use of io.cdap.cdap.api.annotation.ReadOnly in project cdap by caskdata.
the class BufferingTable method get.
/**
* NOTE: Depending on the use-case, calling this method may be much less
* efficient than calling same method with columns as parameters because it may always require round trip to
* persistent store
*/
@ReadOnly
@Override
public Row get(byte[] row) {
ensureTransactionIsStarted();
reportRead(1);
try {
return new Result(row, getRowMap(row));
} catch (Exception e) {
LOG.debug("get failed for table: " + getTransactionAwareName() + ", row: " + Bytes.toStringBinary(row), e);
throw new DataSetException("get failed", e);
}
}
use of io.cdap.cdap.api.annotation.ReadOnly in project cdap by cdapio.
the class PartitionedFileSetDataset method getPartition.
@ReadOnly
@Override
public PartitionDetail getPartition(PartitionKey key) {
byte[] rowKey = generateRowKey(key, partitioning);
Row row = partitionsTable.get(rowKey);
if (row.isEmpty()) {
return null;
}
byte[] pathBytes = row.get(RELATIVE_PATH);
if (pathBytes == null) {
return null;
}
return new BasicPartitionDetail(this, Bytes.toString(pathBytes), key, metadataFromRow(row));
}
use of io.cdap.cdap.api.annotation.ReadOnly in project cdap by cdapio.
the class BufferingTable method get.
@ReadOnly
@Override
public Row get(byte[] row, byte[] startColumn, byte[] stopColumn, int limit) {
ensureTransactionIsStarted();
reportRead(1);
// checking if the row was deleted inside this tx
NavigableMap<byte[], Update> buffCols = buff.get(row);
// potential improvement: do not fetch columns available in in-mem buffer (we know them at this point)
try {
Map<byte[], byte[]> persistedCols = getPersisted(row, startColumn, stopColumn, limit);
// adding server cols, and then overriding with buffered values
NavigableMap<byte[], byte[]> result = Maps.newTreeMap(Bytes.BYTES_COMPARATOR);
if (persistedCols != null) {
result.putAll(persistedCols);
}
if (buffCols != null) {
buffCols = getRange(buffCols, startColumn, stopColumn, limit);
// null valued columns in in-memory buffer are deletes, so we need to delete them from the result list
mergeToPersisted(result, buffCols, null);
}
// applying limit
return new Result(row, head(result, limit));
} catch (Exception e) {
LOG.debug("get failed for table: " + getTransactionAwareName() + ", row: " + Bytes.toStringBinary(row), e);
throw new DataSetException("get failed", e);
}
}
use of io.cdap.cdap.api.annotation.ReadOnly in project cdap by cdapio.
the class BufferingTable method get.
@ReadOnly
@Override
public List<Row> get(List<Get> gets) {
ensureTransactionIsStarted();
try {
// get persisted, then overwrite with whats buffered
List<Map<byte[], byte[]>> persistedRows = getPersisted(gets);
// gets and rows lists are always of the same size
Preconditions.checkArgument(gets.size() == persistedRows.size(), "Invalid number of rows fetched when performing multi-get. There must be one row for each get.");
List<Row> result = Lists.newArrayListWithCapacity(persistedRows.size());
Iterator<Map<byte[], byte[]>> persistedRowsIter = persistedRows.iterator();
Iterator<Get> getIter = gets.iterator();
while (persistedRowsIter.hasNext() && getIter.hasNext()) {
Get get = getIter.next();
Map<byte[], byte[]> persistedRow = persistedRowsIter.next();
// navigable copy of the persisted data. Implementation may return immutable or unmodifiable maps,
// so we make a copy here.
NavigableMap<byte[], byte[]> rowColumns = Maps.newTreeMap(Bytes.BYTES_COMPARATOR);
rowColumns.putAll(persistedRow);
byte[] row = get.getRow();
NavigableMap<byte[], Update> buffCols = buff.get(row);
// merge what was in the buffer and what was persisted
if (buffCols != null) {
List<byte[]> getColumns = get.getColumns();
byte[][] columns = getColumns == null ? null : getColumns.toArray(new byte[getColumns.size()][]);
mergeToPersisted(rowColumns, buffCols, columns);
}
result.add(new Result(row, unwrapDeletes(rowColumns)));
}
return result;
} catch (Exception e) {
LOG.debug("multi-get failed for table: " + getTransactionAwareName(), e);
throw new DataSetException("multi-get failed", e);
}
}
Aggregations