use of io.cdap.cdap.api.dataset.table.Put in project cdap by caskdata.
the class AbstractTable method write.
@WriteOnly
@Override
public void write(StructuredRecord structuredRecord) throws IOException {
if (recordPutTransformer == null) {
throw new IllegalStateException(String.format("Table must have both '%s' and '%s' properties set in " + "order to be used as a RecordWritable.", Table.PROPERTY_SCHEMA, Table.PROPERTY_SCHEMA_ROW_FIELD));
}
Put put = recordPutTransformer.toPut(structuredRecord);
put(put);
}
use of io.cdap.cdap.api.dataset.table.Put in project cdap by caskdata.
the class PartitionedFileSetDataset method setMetadata.
private void setMetadata(PartitionKey key, Map<String, String> metadata, boolean allowUpdates) {
final byte[] rowKey = generateRowKey(key, partitioning);
Row row = partitionsTable.get(rowKey);
if (row.isEmpty()) {
throw new PartitionNotFoundException(key, getName());
}
Put put = new Put(rowKey);
addMetadataToPut(row, metadata, put, allowUpdates);
partitionsTable.put(put);
}
use of io.cdap.cdap.api.dataset.table.Put in project cdap by caskdata.
the class PartitionedFileSetDataset method addPartition.
public void addPartition(PartitionKey key, String path, Map<String, String> metadata, boolean filesCreated, boolean allowAppend) {
byte[] rowKey = generateRowKey(key, partitioning);
Row row = partitionsTable.get(rowKey);
boolean appending = !row.isEmpty();
if (appending && !allowAppend) {
throw new PartitionAlreadyExistsException(getName(), key);
}
if (appending) {
// this can happen if user originally created the partition with a custom relative path
String existingPath = Bytes.toString(row.get(RELATIVE_PATH));
if (!path.equals(existingPath)) {
throw new DataSetException(String.format("Attempting to append to Dataset '%s', to partition '%s' with a " + "different path. Original path: '%s'. New path: '%s'", getName(), key.toString(), existingPath, path));
}
}
LOG.debug("{} partition with key {} and path {} to dataset {}", appending ? "Appending to" : "Creating", key, path, getName());
AddPartitionOperation operation = new AddPartitionOperation(key, path, filesCreated);
operationsInThisTx.add(operation);
Put put = new Put(rowKey);
byte[] nowInMillis = Bytes.toBytes(System.currentTimeMillis());
if (!appending) {
put.add(RELATIVE_PATH, Bytes.toBytes(path));
put.add(CREATION_TIME_COL, nowInMillis);
}
put.add(LAST_MODIFICATION_TIME_COL, nowInMillis);
// we allow updates, because an update will only happen if its an append
addMetadataToPut(row, metadata, put, true);
// index each row by its transaction's write pointer
put.add(WRITE_PTR_COL, tx.getWritePointer());
partitionsTable.put(put);
if (!appending) {
addPartitionToExplore(key, path);
operation.setExplorePartitionCreated();
}
}
use of io.cdap.cdap.api.dataset.table.Put in project cdap by caskdata.
the class TestFrameworkTestRun method testAppRedeployKeepsData.
@Test
public void testAppRedeployKeepsData() throws Exception {
deployApplication(testSpace, AppWithTable.class);
DataSetManager<Table> myTableManager = getDataset(testSpace.dataset("my_table"));
myTableManager.get().put(new Put("key1", "column1", "value1"));
myTableManager.flush();
// Changes should be visible to other instances of datasets
DataSetManager<Table> myTableManager2 = getDataset(testSpace.dataset("my_table"));
Assert.assertEquals("value1", myTableManager2.get().get(new Get("key1", "column1")).getString("column1"));
// Even after redeploy of an app: changes should be visible to other instances of datasets
deployApplication(AppWithTable.class);
DataSetManager<Table> myTableManager3 = getDataset(testSpace.dataset("my_table"));
Assert.assertEquals("value1", myTableManager3.get().get(new Get("key1", "column1")).getString("column1"));
// Calling commit again (to test we can call it multiple times)
myTableManager.get().put(new Put("key1", "column1", "value2"));
myTableManager.flush();
Assert.assertEquals("value1", myTableManager3.get().get(new Get("key1", "column1")).getString("column1"));
}
use of io.cdap.cdap.api.dataset.table.Put in project cdap by caskdata.
the class AbstractMockSink method transform.
@Override
public void transform(StructuredRecord input, Emitter<KeyValue<byte[], Put>> emitter) throws Exception {
byte[] rowkey = Bytes.concat(Bytes.toBytes(System.currentTimeMillis()), Bytes.toBytes(inputCounter.incrementAndGet()), Bytes.toBytes(UUID.randomUUID()));
Put put = new Put(rowkey);
put.add(SCHEMA_COL, input.getSchema().toString());
put.add(RECORD_COL, StructuredRecordStringConverter.toJsonString(input));
emitter.emit(new KeyValue<>(rowkey, put));
}
Aggregations