use of io.cdap.cdap.spi.data.table.field.Range in project cdap by caskdata.
the class ConnectionStore method listConnections.
/**
* Get all the connections in the given namespace
*
* @param namespace the namespace to look up
* @return the list of connections in this namespace
*/
public List<Connection> listConnections(NamespaceSummary namespace) {
return TransactionRunners.run(transactionRunner, context -> {
StructuredTable table = context.getTable(TABLE_ID);
Range range = Range.singleton(getNamespaceKeys(namespace));
List<Connection> connections = new ArrayList<>();
try (CloseableIterator<StructuredRow> rowIter = table.scan(range, Integer.MAX_VALUE)) {
rowIter.forEachRemaining(structuredRow -> connections.add(GSON.fromJson(structuredRow.getString(CONNECTION_DATA_FIELD), Connection.class)));
}
return connections;
});
}
use of io.cdap.cdap.spi.data.table.field.Range in project cdap by caskdata.
the class JobQueueTable method getJobs.
@Override
public CloseableIterator<Job> getJobs(int partition, @Nullable Job lastJobProcessed) throws IOException {
Collection<Field<?>> begin;
Range.Bound beginBound;
Set<Field<?>> partitionField = Collections.singleton(Fields.intField(StoreDefinition.JobQueueStore.PARTITION_ID, partition));
if (lastJobProcessed == null) {
begin = partitionField;
beginBound = Range.Bound.INCLUSIVE;
} else {
// sanity check that the specified job is from the same partition
Preconditions.checkArgument(partition == getPartition(lastJobProcessed.getSchedule().getScheduleId()), "Job is not from partition '%s': %s", partition, lastJobProcessed);
begin = getJobScanKeys(lastJobProcessed.getSchedule().getScheduleId(), lastJobProcessed.getGenerationId());
// we want to exclude the given Job from the scan
beginBound = Range.Bound.EXCLUSIVE;
}
Range range = Range.create(begin, beginBound, partitionField, Range.Bound.INCLUSIVE);
return createJobIterator(jobQueueTable.scan(range, Integer.MAX_VALUE));
}
use of io.cdap.cdap.spi.data.table.field.Range in project cdap by caskdata.
the class ProgramHeartbeatTable method scan.
/**
* Scan the table for the time range for each of the namespace provided and return collection of latest {@link
* RunRecordDetail} we maintain the latest {@link RunRecordDetail} identified by {@link ProgramRunId}, Since there can
* be more than one RunRecordDetail for the same runId due to multiple state changes and heart beat messages.
*
* @param startTimestampInSeconds inclusive start rowKey
* @param endTimestampInSeconds exclusive end rowKey
* @param namespaces set of namespaces
* @return collection of {@link RunRecordDetail}
*/
public Collection<RunRecordDetail> scan(long startTimestampInSeconds, long endTimestampInSeconds, Set<String> namespaces) throws IOException {
Collection<Range> ranges = new ArrayList<>();
List<RunRecordDetail> resultRunRecordList = new ArrayList<>();
for (String namespace : namespaces) {
List<Field<?>> startRowKey = getScanKey(namespace, startTimestampInSeconds);
List<Field<?>> endRowKey = getScanKey(namespace, endTimestampInSeconds);
Range range = Range.create(startRowKey, Range.Bound.INCLUSIVE, endRowKey, Range.Bound.EXCLUSIVE);
ranges.add(range);
}
performMultiScanAddToList(ranges, resultRunRecordList);
return resultRunRecordList;
}
use of io.cdap.cdap.spi.data.table.field.Range in project cdap by caskdata.
the class FileMetaDataReader method getFiles.
private List<LogLocation> getFiles(StructuredTable metaTable, LogPathIdentifier logPathIdentifier, long endTimestampMs) throws IOException {
// create scanner with
// start rowkey prefix:context:event-time(0):create-time(0)
// end rowkey prefix:context:event-time(endTimestamp):0(create-time doesn't matter for get files)
// add these files to the list
List<LogLocation> files = new ArrayList<>();
Range scanRange = Range.create(getKeyFields(logPathIdentifier.getRowkey(), 0L, 0L), Range.Bound.INCLUSIVE, getPartialKey(logPathIdentifier.getRowkey(), endTimestampMs), Range.Bound.INCLUSIVE);
try (CloseableIterator<StructuredRow> iter = metaTable.scan(scanRange, Integer.MAX_VALUE)) {
while (iter.hasNext()) {
StructuredRow row = iter.next();
files.add(fromRow(row, logPathIdentifier.getNamespaceId()));
}
}
return files;
}
use of io.cdap.cdap.spi.data.table.field.Range in project cdap by cdapio.
the class PostgreSqlStructuredTable method multiScan.
@Override
public CloseableIterator<StructuredRow> multiScan(Collection<Range> keyRanges, int limit) throws InvalidFieldException, IOException {
LOG.trace("Table {}: MultiScan ranges {} with limit {}", tableSchema.getTableId(), keyRanges, limit);
if (keyRanges.isEmpty()) {
return CloseableIterator.empty();
}
// Validate all ranges. Also, find if there is any range that is open on both ends.
// Also split the scans into actual range scans and singleton scan, which can be done via IN condition.
Map<String, Set<Field<?>>> keyFields = new LinkedHashMap<>();
List<Range> rangeScans = new ArrayList<>();
boolean scanAll = false;
for (Range range : keyRanges) {
fieldValidator.validatePrimaryKeys(range.getBegin(), true);
fieldValidator.validatePrimaryKeys(range.getEnd(), true);
if (range.isSingleton()) {
range.getBegin().forEach(f -> keyFields.computeIfAbsent(f.getName(), k -> new LinkedHashSet<>()).add(f));
} else {
if (range.getBegin().isEmpty() && range.getEnd().isEmpty()) {
scanAll = true;
}
rangeScans.add(range);
}
}
if (scanAll) {
return scan(Range.all(), limit);
}
try {
// Don't close the statement. Leave it to the ResultSetIterator.close() to close it.
PreparedStatement statement = prepareMultiScanQuery(keyFields, rangeScans, limit);
LOG.trace("MultiScan SQL statement: {}", statement);
ResultSet resultSet = statement.executeQuery();
return new ResultSetIterator(statement, resultSet, tableSchema);
} catch (SQLException e) {
throw new IOException(String.format("Failed to scan from table %s with ranges %s", tableSchema.getTableId().getName(), keyRanges), e);
}
}
Aggregations