use of co.cask.cdap.api.dataset.table.Row in project cdap by caskdata.
the class PartitionedFileSetDataset method scannerToPartitions.
/**
* While applying a partition filter and a limit, parse partitions from the rows of a scanner and add them to a list.
* Note that multiple partitions can have the same transaction write pointer. For each set of partitions with the same
* write pointer, we either add the entire set or exclude the entire set. The limit is applied after adding each such
* set of partitions to the list.
*
* @param scanner the scanner on the partitions table from which to read partitions
* @param partitions list to add the qualifying partitions to
* @param limit limit, which once reached, partitions committed by other transactions will not be added.
* The limit is checked after adding consuming all partitions of a transaction, so
* the total number of consumed partitions may be greater than this limit.
* @param predicate predicate to apply before adding to the partitions list
* @return Transaction ID of the partition that we reached in the scanner, but did not add to the list. This value
* can be useful in future scans.
*/
@Nullable
private Long scannerToPartitions(Scanner scanner, List<PartitionDetail> partitions, int limit, Predicate<PartitionDetail> predicate) {
Long prevTxId = null;
Row row;
while ((row = scanner.next()) != null) {
PartitionKey key = parseRowKey(row.getRow(), partitioning);
String relativePath = Bytes.toString(row.get(RELATIVE_PATH));
Long txId = Bytes.toLong(row.get(WRITE_PTR_COL));
// by a transaction or none, since we keep our marker based upon transaction id.
if (prevTxId != null && !prevTxId.equals(txId)) {
if (partitions.size() >= limit) {
return txId;
}
}
prevTxId = txId;
BasicPartitionDetail partitionDetail = new BasicPartitionDetail(PartitionedFileSetDataset.this, relativePath, key, metadataFromRow(row));
if (!predicate.apply(partitionDetail)) {
continue;
}
partitions.add(partitionDetail);
}
return null;
}
use of co.cask.cdap.api.dataset.table.Row in project cdap by caskdata.
the class MetadataStoreDataset method deleteAll.
public void deleteAll(MDSKey id, @Nullable Predicate<MDSKey> filter) {
byte[] prefix = id.getKey();
byte[] stopKey = Bytes.stopKeyForPrefix(prefix);
try {
try (Scanner scan = table.scan(prefix, stopKey)) {
Row next;
while ((next = scan.next()) != null) {
String columnValue = next.getString(COLUMN);
if (columnValue == null) {
continue;
}
MDSKey key = new MDSKey(next.getRow());
if (filter != null && !filter.apply(key)) {
continue;
}
table.delete(new Delete(next.getRow()).add(COLUMN));
}
}
} catch (Exception e) {
throw Throwables.propagate(e);
}
}
use of co.cask.cdap.api.dataset.table.Row in project cdap by caskdata.
the class MetadataStoreDataset method listKV.
private <T> Map<MDSKey, T> listKV(Scan runScan, Type typeOfT, int limit, @Nullable Predicate<MDSKey> keyFilter, @Nullable Predicate<T> valueFilter) {
try {
Map<MDSKey, T> map = Maps.newLinkedHashMap();
try (Scanner scan = table.scan(runScan)) {
Row next;
while ((limit > 0) && (next = scan.next()) != null) {
MDSKey key = new MDSKey(next.getRow());
byte[] columnValue = next.get(COLUMN);
if (columnValue == null) {
continue;
}
T value = deserialize(columnValue, typeOfT);
// Key Filter doesn't pass
if (keyFilter != null && !keyFilter.apply(key)) {
continue;
}
// If Value Filter doesn't pass
if (valueFilter != null && !valueFilter.apply(value)) {
continue;
}
map.put(key, value);
limit--;
}
return map;
}
} catch (Exception e) {
throw Throwables.propagate(e);
}
}
use of co.cask.cdap.api.dataset.table.Row in project cdap by caskdata.
the class MetadataStoreDataset method exists.
public boolean exists(MDSKey id) {
Row row = table.get(id.getKey());
if (row.isEmpty()) {
return false;
}
byte[] value = row.get(COLUMN);
if (value == null) {
return false;
}
return true;
}
use of co.cask.cdap.api.dataset.table.Row in project cdap by caskdata.
the class MetadataStoreDataset method listCombinedFilterKV.
private <T> Map<MDSKey, T> listCombinedFilterKV(Scan runScan, Type typeOfT, int limit, @Nullable Predicate<KeyValue<T>> combinedFilter) {
try {
Map<MDSKey, T> map = Maps.newLinkedHashMap();
try (Scanner scan = table.scan(runScan)) {
Row next;
while ((limit > 0) && (next = scan.next()) != null) {
MDSKey key = new MDSKey(next.getRow());
byte[] columnValue = next.get(COLUMN);
if (columnValue == null) {
continue;
}
T value = deserialize(columnValue, typeOfT);
KeyValue<T> kv = new KeyValue<>(key, value);
// Combined Filter doesn't pass
if (combinedFilter != null && !combinedFilter.apply(kv)) {
continue;
}
map.put(kv.getKey(), kv.getValue());
limit--;
}
return map;
}
} catch (Exception e) {
throw Throwables.propagate(e);
}
}
Aggregations