use of co.cask.cdap.api.dataset.table.Row in project cdap by caskdata.
the class WorkflowDataset method delete.
public void delete(ApplicationId id) {
MDSKey mdsKey = new MDSKey.Builder().add(id.getNamespace()).add(id.getApplication()).build();
Scanner scanner = table.scan(mdsKey.getKey(), Bytes.stopKeyForPrefix(mdsKey.getKey()));
Row row;
try {
while ((row = scanner.next()) != null) {
table.delete(row.getRow());
}
} finally {
scanner.close();
}
}
use of co.cask.cdap.api.dataset.table.Row in project cdap by caskdata.
the class WorkflowDataset method scan.
/**
* This function scans the workflow.stats dataset for a list of workflow runs in a time range.
*
* @param id The workflow id
* @param timeRangeStart Start of the time range that the scan should begin from
* @param timeRangeEnd End of the time range that the scan should end at
* @return List of WorkflowRunRecords
*/
private List<WorkflowRunRecord> scan(WorkflowId id, long timeRangeStart, long timeRangeEnd) {
byte[] startRowKey = getRowKeyBuilder(id, timeRangeStart).build().getKey();
byte[] endRowKey = getRowKeyBuilder(id, timeRangeEnd).build().getKey();
Scan scan = new Scan(startRowKey, endRowKey);
Scanner scanner = table.scan(scan);
Row indexRow;
List<WorkflowRunRecord> workflowRunRecordList = new ArrayList<>();
while ((indexRow = scanner.next()) != null) {
Map<byte[], byte[]> columns = indexRow.getColumns();
String workflowRunId = Bytes.toString(columns.get(RUNID));
long timeTaken = Bytes.toLong(columns.get(TIME_TAKEN));
List<ProgramRun> programRunList = GSON.fromJson(Bytes.toString(columns.get(NODES)), PROGRAM_RUNS_TYPE);
WorkflowRunRecord workflowRunRecord = new WorkflowRunRecord(workflowRunId, timeTaken, programRunList);
workflowRunRecordList.add(workflowRunRecord);
}
return workflowRunRecordList;
}
use of co.cask.cdap.api.dataset.table.Row in project cdap by caskdata.
the class MockSink method clear.
/**
* Clear any records written to this sink.
*
* @param tableManager dataset manager used to get the sink dataset
*/
public static void clear(DataSetManager<Table> tableManager) {
tableManager.flush();
Table table = tableManager.get();
try (Scanner scanner = table.scan(null, null)) {
Row row;
while ((row = scanner.next()) != null) {
table.delete(row.getRow());
}
}
tableManager.flush();
}
use of co.cask.cdap.api.dataset.table.Row in project cdap by caskdata.
the class MetricsDataMigrator method migrateMetricsData.
private void migrateMetricsData(EntityTable entityTable, MetricsTable metricsTable, String scope, Version version) {
MetricsEntityCodec codec = getEntityCodec(entityTable);
int idSize = getIdSize(version);
Row row;
long rowCount = 0;
try {
Scanner scanner = metricsTable.scan(null, null, null);
while ((row = scanner.next()) != null) {
byte[] rowKey = row.getRow();
int offset = 0;
String context = codec.decode(MetricsEntityType.CONTEXT, rowKey, offset, idSize);
context = getContextBasedOnVersion(context, version);
offset += codec.getEncodedSize(MetricsEntityType.CONTEXT, idSize);
String metricName = codec.decode(MetricsEntityType.METRIC, rowKey, offset, idSize);
offset += codec.getEncodedSize(MetricsEntityType.METRIC, idSize);
scope = getScopeBasedOnVersion(scope, metricName, version);
metricName = getMetricNameBasedOnVersion(metricName, version);
String runId = codec.decode(MetricsEntityType.RUN, rowKey, offset, idSize);
parseAndAddNewMetricValue(scope, context, metricName, runId, row.getColumns());
rowCount++;
printStatus(rowCount);
}
System.out.println("Migrated " + rowCount + " records");
} catch (Exception e) {
LOG.warn("Exception during data-transfer in aggregates table", e);
//no-op
}
}
use of co.cask.cdap.api.dataset.table.Row in project cdap by caskdata.
the class PartitionedFileSetDataset method addMetadata.
@WriteOnly
@Override
public void addMetadata(PartitionKey key, Map<String, String> metadata) {
final byte[] rowKey = generateRowKey(key, partitioning);
Row row = partitionsTable.get(rowKey);
if (row.isEmpty()) {
throw new PartitionNotFoundException(key, getName());
}
// ensure that none of the entries already exist in the metadata
for (Map.Entry<String, String> metadataEntry : metadata.entrySet()) {
String metadataKey = metadataEntry.getKey();
byte[] columnKey = columnKeyFromMetadataKey(metadataKey);
if (row.get(columnKey) != null) {
throw new DataSetException(String.format("Entry already exists for metadata key: %s", metadataKey));
}
}
Put put = new Put(rowKey);
addMetadataToPut(metadata, put);
partitionsTable.put(put);
}
Aggregations