use of org.apache.tephra.TransactionAware in project cdap by caskdata.
the class PartitionedFileSetTest method testUpdateMetadata.
@Test
public void testUpdateMetadata() throws Exception {
final PartitionedFileSet dataset = dsFrameworkUtil.getInstance(pfsInstance);
dsFrameworkUtil.newTransactionExecutor((TransactionAware) dataset).execute(new TransactionExecutor.Subroutine() {
@Override
public void apply() throws Exception {
PartitionOutput partitionOutput = dataset.getPartitionOutput(PARTITION_KEY);
ImmutableMap<String, String> originalEntries = ImmutableMap.of("key1", "value1");
partitionOutput.setMetadata(originalEntries);
partitionOutput.addPartition();
ImmutableMap<String, String> updatedMetadata = ImmutableMap.of("key2", "value2");
dataset.addMetadata(PARTITION_KEY, updatedMetadata);
PartitionDetail partitionDetail = dataset.getPartition(PARTITION_KEY);
Assert.assertNotNull(partitionDetail);
HashMap<String, String> combinedEntries = Maps.newHashMap();
combinedEntries.putAll(originalEntries);
combinedEntries.putAll(updatedMetadata);
Assert.assertEquals(combinedEntries, partitionDetail.getMetadata().asMap());
// adding an entry, for a key that already exists will throw an Exception
try {
dataset.addMetadata(PARTITION_KEY, "key2", "value3");
Assert.fail("Expected not to be able to update an existing metadata entry");
} catch (DataSetException expected) {
}
PartitionKey nonexistentPartitionKey = PartitionKey.builder().addIntField("i", 42).addLongField("l", 17L).addStringField("s", "nonexistent").build();
try {
// adding an entry, for a key that already exists will throw an Exception
dataset.addMetadata(nonexistentPartitionKey, "key2", "value3");
Assert.fail("Expected not to be able to add metadata for a nonexistent partition");
} catch (DataSetException expected) {
}
}
});
}
use of org.apache.tephra.TransactionAware in project cdap by caskdata.
the class PartitionedFileSetTest method testAddRemoveGetPartitions.
@Test
@Category(SlowTests.class)
public void testAddRemoveGetPartitions() throws Exception {
final PartitionedFileSet dataset = dsFrameworkUtil.getInstance(pfsInstance);
final PartitionKey[][][] keys = new PartitionKey[4][4][4];
final String[][][] paths = new String[4][4][4];
final Set<BasicPartition> allPartitionDetails = Sets.newHashSet();
// add a bunch of partitions
for (int s = 0; s < 4; s++) {
for (int i = 0; i < 4; i++) {
for (int l = 0; l < 4; l++) {
final PartitionKey key = PartitionKey.builder().addField("s", String.format("%c-%d", 'a' + s, s)).addField("i", i * 100).addField("l", 15L - 10 * l).build();
BasicPartition basicPartition = dsFrameworkUtil.newTransactionExecutor((TransactionAware) dataset).execute(new Callable<BasicPartition>() {
@Override
public BasicPartition call() throws Exception {
PartitionOutput p = dataset.getPartitionOutput(key);
p.addPartition();
return new BasicPartition((PartitionedFileSetDataset) dataset, p.getRelativePath(), p.getPartitionKey());
}
});
keys[s][i][l] = key;
paths[s][i][l] = basicPartition.getRelativePath();
allPartitionDetails.add(basicPartition);
}
}
}
// validate getPartition with exact partition key
for (int s = 0; s < 4; s++) {
for (int i = 0; i < 4; i++) {
for (int l = 0; l < 4; l++) {
final PartitionKey key = keys[s][i][l];
final String path = paths[s][i][l];
dsFrameworkUtil.newTransactionExecutor((TransactionAware) dataset).execute(new TransactionExecutor.Subroutine() {
@Override
public void apply() throws Exception {
PartitionDetail partitionDetail = dataset.getPartition(key);
Assert.assertNotNull(partitionDetail);
Assert.assertEquals(path, partitionDetail.getRelativePath());
}
});
// also test getPartitionPaths() and getPartitions() for the filter matching this
@SuppressWarnings({ "unchecked", "unused" }) boolean success = testFilter(dataset, allPartitionDetails, PartitionFilter.builder().addValueCondition("l", key.getField("l")).addValueCondition("s", key.getField("s")).addValueCondition("i", key.getField("i")).build());
}
}
}
// test whether query works without filter
testFilter(dataset, allPartitionDetails, null);
// generate an list of partition filters with exhaustive coverage
List<PartitionFilter> filters = generateFilters();
// test all kinds of filters
testAllFilters(dataset, allPartitionDetails, filters);
// remove a few of the partitions and test again, repeatedly
PartitionKey[] keysToRemove = { keys[1][2][3], keys[0][1][0], keys[2][3][2], keys[3][1][2] };
for (final PartitionKey key : keysToRemove) {
// remove in a transaction
dsFrameworkUtil.newTransactionExecutor((TransactionAware) dataset).execute(new TransactionExecutor.Procedure<PartitionKey>() {
@Override
public void apply(PartitionKey partitionKey) throws Exception {
dataset.dropPartition(partitionKey);
}
}, key);
// test all filters
BasicPartition toRemove = Iterables.tryFind(allPartitionDetails, new com.google.common.base.Predicate<BasicPartition>() {
@Override
public boolean apply(BasicPartition partition) {
return key.equals(partition.getPartitionKey());
}
}).get();
allPartitionDetails.remove(toRemove);
testAllFilters(dataset, allPartitionDetails, filters);
}
}
use of org.apache.tephra.TransactionAware in project cdap by caskdata.
the class PartitionedFileSetTest method testInvalidPartitionFilter.
@Test
public void testInvalidPartitionFilter() throws Exception {
final PartitionedFileSet pfs = dsFrameworkUtil.getInstance(pfsInstance);
dsFrameworkUtil.newTransactionExecutor((TransactionAware) pfs).execute(new TransactionExecutor.Subroutine() {
@Override
public void apply() throws Exception {
// this should succeed without error (but log a warning)
Assert.assertEquals(Collections.EMPTY_SET, pfs.getPartitions(PartitionFilter.builder().addValueCondition("me-not-there", 42).build()));
}
});
}
use of org.apache.tephra.TransactionAware in project cdap by caskdata.
the class PartitionedFileSetTest method testInvalidPartitionKey.
@Test
public void testInvalidPartitionKey() throws Exception {
final PartitionedFileSet pfs = dsFrameworkUtil.getInstance(pfsInstance);
dsFrameworkUtil.newTransactionExecutor((TransactionAware) pfs).execute(new TransactionExecutor.Subroutine() {
@Override
public void apply() throws Exception {
try {
pfs.getPartitionOutput(PartitionKey.builder().addField("i", 1).addField("l", 2L).build());
Assert.fail("should have thrown exception due to missing field");
} catch (IllegalArgumentException e) {
// expected
}
try {
pfs.addPartition(PartitionKey.builder().addField("i", 1).addField("l", "2").addField("s", "a").build(), "some/location");
Assert.fail("should have thrown exception due to incompatible field");
} catch (IllegalArgumentException e) {
// expected
}
try {
pfs.addPartition(PartitionKey.builder().addField("i", 1).addField("l", 2L).addField("s", "a").addField("x", "x").build(), "some/location", ImmutableMap.of("a", "b"));
Assert.fail("should have thrown exception due to extra field");
} catch (IllegalArgumentException e) {
// expected
}
pfs.addPartition(PartitionKey.builder().addField("i", 1).addField("l", 2L).addField("s", "a").build(), "some/location", ImmutableMap.of("a", "b"));
try {
pfs.addMetadata(PartitionKey.builder().addField("i", 1).addField("l", 2L).addField("s", "a").addField("x", "x").build(), ImmutableMap.of("abc", "xyz"));
Assert.fail("should have thrown exception due to extra field");
} catch (IllegalArgumentException e) {
// expected
}
try {
pfs.dropPartition(PartitionKey.builder().addField("i", 1).addField("l", 2L).addField("s", 0).build());
Assert.fail("should have thrown exception due to incompatible field");
} catch (IllegalArgumentException e) {
// expected
}
}
});
}
use of org.apache.tephra.TransactionAware in project cdap by caskdata.
the class PartitionedFileSetTest method testAddRemoveGetPartitionExternal.
@Test
public void testAddRemoveGetPartitionExternal() throws Exception {
final File absolutePath = tmpFolder.newFolder();
absolutePath.mkdirs();
dsFrameworkUtil.createInstance("partitionedFileSet", pfsExternalInstance, PartitionedFileSetProperties.builder().setPartitioning(PARTITIONING_1).setBasePath(absolutePath.getPath()).setDataExternal(true).build());
final PartitionedFileSet pfs = dsFrameworkUtil.getInstance(pfsExternalInstance);
dsFrameworkUtil.newTransactionExecutor((TransactionAware) pfs).execute(new TransactionExecutor.Subroutine() {
@Override
public void apply() throws Exception {
Assert.assertTrue(pfsBaseLocation.exists());
// attempt to write a new partition - should fail
try {
pfs.getPartitionOutput(PARTITION_KEY);
Assert.fail("External partitioned file set should not allow writing files");
} catch (UnsupportedOperationException e) {
// expected
}
// create an external file and add it as a partition
File someFile = new File(absolutePath, "some.file");
OutputStream out = new FileOutputStream(someFile);
out.close();
Assert.assertTrue(someFile.exists());
pfs.addPartition(PARTITION_KEY, "some.file");
Assert.assertNotNull(pfs.getPartition(PARTITION_KEY));
Assert.assertTrue(pfs.getPartition(PARTITION_KEY).getLocation().exists());
// now drop the partition and validate the file is still there
pfs.dropPartition(PARTITION_KEY);
Assert.assertNull(pfs.getPartition(PARTITION_KEY));
Assert.assertTrue(someFile.exists());
}
});
// drop the dataset and validate that the base dir still exists
dsFrameworkUtil.deleteInstance(pfsExternalInstance);
Assert.assertTrue(pfsBaseLocation.exists());
Assert.assertTrue(absolutePath.isDirectory());
}
Aggregations