use of co.cask.cdap.api.dataset.table.Put in project cdap by caskdata.
the class AbstractTable method write.
@WriteOnly
@Override
public void write(StructuredRecord structuredRecord) throws IOException {
if (recordPutTransformer == null) {
throw new IllegalStateException(String.format("Table must have both '%s' and '%s' properties set in " + "order to be used as a RecordWritable.", Table.PROPERTY_SCHEMA, Table.PROPERTY_SCHEMA_ROW_FIELD));
}
Put put = recordPutTransformer.toPut(structuredRecord);
put(put);
}
use of co.cask.cdap.api.dataset.table.Put in project cdap by caskdata.
the class PartitionedFileSetDataset method addPartition.
private void addPartition(PartitionKey key, String path, Map<String, String> metadata, boolean filesCreated) {
AddPartitionOperation operation = new AddPartitionOperation(key, path, filesCreated);
operationsInThisTx.add(operation);
byte[] rowKey = generateRowKey(key, partitioning);
Row row = partitionsTable.get(rowKey);
if (!row.isEmpty()) {
throw new DataSetException(String.format("Dataset '%s' already has a partition with the same key: %s", getName(), key.toString()));
}
LOG.debug("Adding partition with key {} and path {} to dataset {}", key, path, getName());
Put put = new Put(rowKey);
put.add(RELATIVE_PATH, Bytes.toBytes(path));
byte[] nowInMillis = Bytes.toBytes(System.currentTimeMillis());
put.add(CREATION_TIME_COL, nowInMillis);
for (Map.Entry<String, ? extends Comparable> entry : key.getFields().entrySet()) {
// "f.<field name>"
put.add(// "f.<field name>"
Bytes.add(FIELD_PREFIX, Bytes.toBytes(entry.getKey())), // "<string rep. of value>"
Bytes.toBytes(entry.getValue().toString()));
}
addMetadataToPut(metadata, put);
// index each row by its transaction's write pointer
put.add(WRITE_PTR_COL, tx.getWritePointer());
partitionsTable.put(put);
addPartitionToExplore(key, path);
operation.setExplorePartitionCreated();
}
use of co.cask.cdap.api.dataset.table.Put in project cdap by caskdata.
the class MetricsTableOnTable method putBytes.
@Override
public void putBytes(SortedMap<byte[], ? extends SortedMap<byte[], byte[]>> updates) {
for (Map.Entry<byte[], ? extends SortedMap<byte[], byte[]>> rowUpdate : updates.entrySet()) {
Put put = new Put(rowUpdate.getKey());
for (Map.Entry<byte[], byte[]> columnUpdate : rowUpdate.getValue().entrySet()) {
put.add(columnUpdate.getKey(), columnUpdate.getValue());
}
table.put(put);
}
}
use of co.cask.cdap.api.dataset.table.Put in project cdap by caskdata.
the class DatasetOpExecutorServiceTest method testRest.
@Test
public void testRest() throws Exception {
// check non-existence with 404
testAdminOp(bob, "exists", 404, null);
// add instance, should automatically create an instance
dsFramework.addInstance("table", bob, DatasetProperties.EMPTY);
testAdminOp(bob, "exists", 200, true);
testAdminOp("bob", "exists", 404, null);
// check truncate
final Table table = dsFramework.getDataset(bob, DatasetDefinition.NO_ARGUMENTS, null);
Assert.assertNotNull(table);
TransactionExecutor txExecutor = new DefaultTransactionExecutor(new InMemoryTxSystemClient(txManager), ImmutableList.of((TransactionAware) table));
// writing smth to table
txExecutor.execute(new TransactionExecutor.Subroutine() {
@Override
public void apply() throws Exception {
table.put(new Put("key1", "col1", "val1"));
}
});
// verify that we can read the data
txExecutor.execute(new TransactionExecutor.Subroutine() {
@Override
public void apply() throws Exception {
Assert.assertEquals("val1", table.get(new Get("key1", "col1")).getString("col1"));
}
});
testAdminOp(bob, "truncate", 200, null);
// verify that data is no longer there
txExecutor.execute(new TransactionExecutor.Subroutine() {
@Override
public void apply() throws Exception {
Assert.assertTrue(table.get(new Get("key1", "col1")).isEmpty());
}
});
// check upgrade
testAdminOp(bob, "upgrade", 200, null);
// drop and check non-existence
dsFramework.deleteInstance(bob);
testAdminOp(bob, "exists", 404, null);
}
use of co.cask.cdap.api.dataset.table.Put in project cdap by caskdata.
the class RecordPutTransformer method toPut.
public Put toPut(StructuredRecord record) {
Schema recordSchema = record.getSchema();
Preconditions.checkArgument(recordSchema.getType() == Schema.Type.RECORD, "input must be a record.");
Schema.Field keyField = getKeyField(recordSchema);
Preconditions.checkArgument(keyField != null, "Could not find key field in record.");
Put output = createPut(record, keyField);
for (Schema.Field field : recordSchema.getFields()) {
if (field.getName().equals(keyField.getName())) {
continue;
}
// Skip fields that are not present in the Output Schema
if (outputSchema != null && outputSchema.getField(field.getName()) == null) {
continue;
}
setField(output, field, record.get(field.getName()));
}
return output;
}
Aggregations