use of org.apache.tephra.TransactionFailureException in project cdap by caskdata.
the class AbstractTransactionContext method persist.
/**
* Calls {@link TransactionAware#commitTx()} on all {@link TransactionAware} to persist pending changes.
*/
private void persist() throws TransactionFailureException {
for (TransactionAware txAware : getTransactionAwares()) {
boolean success = false;
Throwable cause = null;
try {
success = txAware.commitTx();
} catch (Throwable e) {
cause = e;
}
if (!success) {
abort(new TransactionFailureException(String.format("Unable to persist changes of transaction-aware '%s' for transaction %d. ", txAware.getTransactionAwareName(), currentTx.getTransactionId()), cause));
}
}
}
use of org.apache.tephra.TransactionFailureException in project cdap by caskdata.
the class MetadataDatasetTest method testSearchDifferentEntityScope.
@Test
public void testSearchDifferentEntityScope() throws InterruptedException, TransactionFailureException {
final ArtifactId sysArtifact = NamespaceId.SYSTEM.artifact("artifact", "1.0");
final ArtifactId nsArtifact = new ArtifactId("ns1", "artifact", "1.0");
final String multiWordKey = "multiword";
final String multiWordValue = "aV1 av2 , - , av3 - av4_av5 av6";
txnl.execute(new TransactionExecutor.Subroutine() {
@Override
public void apply() throws Exception {
dataset.setProperty(nsArtifact, multiWordKey, multiWordValue);
dataset.setProperty(sysArtifact, multiWordKey, multiWordValue);
}
});
final MetadataEntry systemArtifactEntry = new MetadataEntry(sysArtifact, multiWordKey, multiWordValue);
final MetadataEntry nsArtifactEntry = new MetadataEntry(nsArtifact, multiWordKey, multiWordValue);
txnl.execute(new TransactionExecutor.Subroutine() {
@Override
public void apply() throws Exception {
List<MetadataEntry> results = dataset.search("ns1", "aV5", ImmutableSet.of(EntityTypeSimpleName.ALL), SortInfo.DEFAULT, 0, Integer.MAX_VALUE, 1, null, false, EnumSet.of(EntityScope.USER)).getResults();
// the result should not contain system entities
Assert.assertEquals(Sets.newHashSet(nsArtifactEntry), Sets.newHashSet(results));
results = dataset.search("ns1", "aV5", ImmutableSet.of(EntityTypeSimpleName.ALL), SortInfo.DEFAULT, 0, Integer.MAX_VALUE, 1, null, false, EnumSet.of(EntityScope.SYSTEM)).getResults();
// the result should not contain user entities
Assert.assertEquals(Sets.newHashSet(systemArtifactEntry), Sets.newHashSet(results));
results = dataset.search("ns1", "aV5", ImmutableSet.of(EntityTypeSimpleName.ALL), SortInfo.DEFAULT, 0, Integer.MAX_VALUE, 1, null, false, EnumSet.allOf(EntityScope.class)).getResults();
// the result should contain both entity scopes
Assert.assertEquals(Sets.newHashSet(nsArtifactEntry, systemArtifactEntry), Sets.newHashSet(results));
}
});
// clean up
txnl.execute(new TransactionExecutor.Subroutine() {
@Override
public void apply() throws Exception {
dataset.removeProperties(nsArtifact);
dataset.removeProperties(sysArtifact);
}
});
}
use of org.apache.tephra.TransactionFailureException in project cdap by caskdata.
the class MetadataDatasetTest method testSearchIncludesSystemEntities.
@Test
public void testSearchIncludesSystemEntities() throws InterruptedException, TransactionFailureException {
// Use the same artifact in two different namespaces - system and ns2
final ArtifactId sysArtifact = NamespaceId.SYSTEM.artifact("artifact", "1.0");
final ArtifactId ns2Artifact = new ArtifactId("ns2", "artifact", "1.0");
final String multiWordKey = "multiword";
final String multiWordValue = "aV1 av2 , - , av3 - av4_av5 av6";
txnl.execute(new TransactionExecutor.Subroutine() {
@Override
public void apply() throws Exception {
dataset.setProperty(flow1, multiWordKey, multiWordValue);
dataset.setProperty(sysArtifact, multiWordKey, multiWordValue);
dataset.setProperty(ns2Artifact, multiWordKey, multiWordValue);
}
});
// perform the exact same multiword search in the 'ns1' namespace. It should return the system artifact along with
// matched entities in the 'ns1' namespace
final MetadataEntry flowMultiWordEntry = new MetadataEntry(flow1, multiWordKey, multiWordValue);
final MetadataEntry systemArtifactEntry = new MetadataEntry(sysArtifact, multiWordKey, multiWordValue);
final MetadataEntry ns2ArtifactEntry = new MetadataEntry(ns2Artifact, multiWordKey, multiWordValue);
txnl.execute(new TransactionExecutor.Subroutine() {
@Override
public void apply() throws Exception {
List<MetadataEntry> results = searchByDefaultIndex("ns1", "aV5", ImmutableSet.of(EntityTypeSimpleName.ALL));
Assert.assertEquals(Sets.newHashSet(flowMultiWordEntry, systemArtifactEntry), Sets.newHashSet(results));
// search only programs - should only return flow
results = searchByDefaultIndex("ns1", multiWordKey + MetadataDataset.KEYVALUE_SEPARATOR + "aV5", ImmutableSet.of(EntityTypeSimpleName.PROGRAM));
Assert.assertEquals(ImmutableList.of(flowMultiWordEntry), results);
// search only artifacts - should only return system artifact
results = searchByDefaultIndex("ns1", multiWordKey + MetadataDataset.KEYVALUE_SEPARATOR + multiWordValue, ImmutableSet.of(EntityTypeSimpleName.ARTIFACT));
// this query returns the system artifact 4 times, since the dataset returns a list with duplicates for scoring
// purposes. Convert to a Set for comparison.
Assert.assertEquals(Sets.newHashSet(systemArtifactEntry), Sets.newHashSet(results));
// search all entities in namespace 'ns2' - should return the system artifact and the same artifact in ns2
results = searchByDefaultIndex("ns2", multiWordKey + MetadataDataset.KEYVALUE_SEPARATOR + "aV4", ImmutableSet.of(EntityTypeSimpleName.ALL));
Assert.assertEquals(Sets.newHashSet(systemArtifactEntry, ns2ArtifactEntry), Sets.newHashSet(results));
// search only programs in a namespace 'ns2'. Should return empty
results = searchByDefaultIndex("ns2", "aV*", ImmutableSet.of(EntityTypeSimpleName.PROGRAM));
Assert.assertTrue(results.isEmpty());
// search all entities in namespace 'ns3'. Should return only the system artifact
results = searchByDefaultIndex("ns3", "av*", ImmutableSet.of(EntityTypeSimpleName.ALL));
Assert.assertEquals(Sets.newHashSet(systemArtifactEntry), Sets.newHashSet(results));
// search the system namespace for all entities. Should return only the system artifact
results = searchByDefaultIndex(NamespaceId.SYSTEM.getEntityName(), "av*", ImmutableSet.of(EntityTypeSimpleName.ALL));
Assert.assertEquals(Sets.newHashSet(systemArtifactEntry), Sets.newHashSet(results));
}
});
// clean up
txnl.execute(new TransactionExecutor.Subroutine() {
@Override
public void apply() throws Exception {
dataset.removeProperties(flow1);
dataset.removeProperties(sysArtifact);
}
});
}
use of org.apache.tephra.TransactionFailureException in project cdap by caskdata.
the class DatasetBasedStreamSizeScheduleStore method list.
/**
* @return a list of all the schedules and their states present in the store
*/
public synchronized List<StreamSizeScheduleState> list() throws InterruptedException, TransactionFailureException {
final List<StreamSizeScheduleState> scheduleStates = Lists.newArrayList();
factory.createExecutor(ImmutableList.of((TransactionAware) table)).execute(new TransactionExecutor.Subroutine() {
@Override
public void apply() throws Exception {
try (Scanner scan = getScannerWithPrefix(table, KEY_PREFIX)) {
Row row;
while ((row = scan.next()) != null) {
byte[] scheduleBytes = row.get(SCHEDULE_COL);
byte[] baseSizeBytes = row.get(BASE_SIZE_COL);
byte[] baseTsBytes = row.get(BASE_TS_COL);
byte[] lastRunSizeBytes = row.get(LAST_RUN_SIZE_COL);
byte[] lastRunTsBytes = row.get(LAST_RUN_TS_COL);
byte[] activeBytes = row.get(ACTIVE_COL);
byte[] propertyBytes = row.get(PROPERTIES_COL);
if (isInvalidRow(row)) {
LIMITED_LOG.debug("Stream Sized Schedule entry with Row key {} does not have all columns.", Bytes.toString(row.getRow()));
continue;
}
String rowKey = Bytes.toString(row.getRow());
String[] splits = rowKey.split(":");
ProgramId program;
if (splits.length == 7) {
// New Row key for the trigger should be of the form -
// streamSizeSchedule:namespace:application:version:type:program:schedule
program = new ApplicationId(splits[1], splits[2], splits[3]).program(ProgramType.valueOf(splits[4]), splits[5]);
} else if (splits.length == 6) {
program = new ApplicationId(splits[1], splits[2]).program(ProgramType.valueOf(splits[3]), splits[4]);
} else {
continue;
}
SchedulableProgramType programType = program.getType().getSchedulableType();
StreamSizeSchedule schedule = GSON.fromJson(Bytes.toString(scheduleBytes), StreamSizeSchedule.class);
long baseSize = Bytes.toLong(baseSizeBytes);
long baseTs = Bytes.toLong(baseTsBytes);
long lastRunSize = Bytes.toLong(lastRunSizeBytes);
long lastRunTs = Bytes.toLong(lastRunTsBytes);
boolean active = Bytes.toBoolean(activeBytes);
Map<String, String> properties = Maps.newHashMap();
if (propertyBytes != null) {
properties = GSON.fromJson(Bytes.toString(propertyBytes), STRING_MAP_TYPE);
}
StreamSizeScheduleState scheduleState = new StreamSizeScheduleState(program, programType, schedule, properties, baseSize, baseTs, lastRunSize, lastRunTs, active);
scheduleStates.add(scheduleState);
LOG.debug("StreamSizeSchedule found in store: {}", scheduleState);
}
}
}
});
return scheduleStates;
}
use of org.apache.tephra.TransactionFailureException in project cdap by caskdata.
the class DatasetBasedStreamSizeScheduleStore method upgrade.
/**
* Method to add version in StreamSizeSchedule row key in SchedulerStore.
*
* @throws InterruptedException
* @throws IOException
* @throws DatasetManagementException
*/
public void upgrade() throws InterruptedException, IOException, DatasetManagementException {
// Wait until the store is initialized
// Use a new instance of table since Table is not thread safe
Table metaTable = null;
while (metaTable == null) {
try {
metaTable = tableUtil.getMetaTable();
} catch (Exception e) {
// ignore exception
}
TimeUnit.SECONDS.sleep(10);
}
if (isUpgradeComplete()) {
LOG.info("{} is already upgraded.", NAME);
return;
}
final AtomicInteger maxNumberUpdateRows = new AtomicInteger(1000);
final AtomicInteger sleepTimeInSecs = new AtomicInteger(60);
LOG.info("Starting upgrade of {}.", NAME);
while (true) {
sleepTimeInSecs.set(60);
try {
if (executeUpgradeInTransaction(table, maxNumberUpdateRows)) {
break;
}
} catch (TransactionFailureException e) {
if (e instanceof TransactionConflictException) {
LOG.debug("Upgrade step faced Transaction Conflict exception. Retrying operation now.", e);
sleepTimeInSecs.set(10);
} else if (e instanceof TransactionNotInProgressException) {
int currMaxRows = maxNumberUpdateRows.get();
if (currMaxRows > 500) {
maxNumberUpdateRows.decrementAndGet();
} else {
LOG.warn("Could not complete upgrade of {}, tried for 500 times", NAME);
return;
}
sleepTimeInSecs.set(10);
LOG.debug("Upgrade step faced a Transaction Timeout exception. " + "Current number of max update rows is set to : {} and retrying the operation now.", maxNumberUpdateRows.get(), e);
} else {
LOG.error("Upgrade step faced exception. Will retry operation after some delay.", e);
sleepTimeInSecs.set(60);
}
}
TimeUnit.SECONDS.sleep(sleepTimeInSecs.get());
}
LOG.info("Upgrade of {} is complete.", NAME);
}
Aggregations