use of com.palantir.common.exception.PalantirRuntimeException in project atlasdb by palantir.
the class SchemaMutationLockIntegrationTest method runWithLockShouldTimeoutIfLockIsTaken.
@Test
public void runWithLockShouldTimeoutIfLockIsTaken() throws InterruptedException, ExecutionException, TimeoutException {
Assume.assumeTrue(casEnabled);
Semaphore blockingLock = blockSchemaMutationLock();
try {
secondSchemaMutationLock.runWithLock(() -> fail("The schema mutation lock should have been acquired by the first thread"));
fail("Should have thrown an exception");
} catch (Exception exception) {
assertTrue(exception instanceof PalantirRuntimeException);
assertThat(exception.getMessage(), containsString(expectedTimeoutErrorMessage));
} finally {
blockingLock.release();
}
}
use of com.palantir.common.exception.PalantirRuntimeException in project atlasdb by palantir.
the class ColumnFamilyDefinitions method getCfDef.
/**
* Provides a default column family definition given raw metadata, which is generally obtained from the _metadata
* table.
*
* Warning to developers: you must update CKVS.isMatchingCf if you update this method
*/
@SuppressWarnings("CyclomaticComplexity")
static CfDef getCfDef(String keyspace, TableReference tableRef, int gcGraceSeconds, byte[] rawMetadata) {
Map<String, String> compressionOptions = Maps.newHashMap();
CfDef cf = getStandardCfDef(keyspace, AbstractKeyValueService.internalTableName(tableRef));
boolean negativeLookups = false;
double falsePositiveChance = CassandraConstants.DEFAULT_LEVELED_COMPACTION_BLOOM_FILTER_FP_CHANCE;
int explicitCompressionBlockSizeKb = 0;
boolean appendHeavyAndReadLight = false;
TableMetadataPersistence.CachePriority cachePriority = TableMetadataPersistence.CachePriority.WARM;
if (!CassandraKeyValueServices.isEmptyOrInvalidMetadata(rawMetadata)) {
TableMetadata tableMetadata = TableMetadata.BYTES_HYDRATOR.hydrateFromBytes(rawMetadata);
negativeLookups = tableMetadata.hasNegativeLookups();
explicitCompressionBlockSizeKb = tableMetadata.getExplicitCompressionBlockSizeKB();
appendHeavyAndReadLight = tableMetadata.isAppendHeavyAndReadLight();
cachePriority = tableMetadata.getCachePriority();
}
if (explicitCompressionBlockSizeKb != 0) {
compressionOptions.put(CassandraConstants.CFDEF_COMPRESSION_TYPE_KEY, CassandraConstants.DEFAULT_COMPRESSION_TYPE);
compressionOptions.put(CassandraConstants.CFDEF_COMPRESSION_CHUNK_LENGTH_KEY, Integer.toString(explicitCompressionBlockSizeKb));
} else {
// We don't really need compression here nor anticipate it will garner us any gains
// (which is why we're doing such a small chunk size), but this is how we can get "free" CRC checking.
compressionOptions.put(CassandraConstants.CFDEF_COMPRESSION_TYPE_KEY, CassandraConstants.DEFAULT_COMPRESSION_TYPE);
compressionOptions.put(CassandraConstants.CFDEF_COMPRESSION_CHUNK_LENGTH_KEY, Integer.toString(AtlasDbConstants.MINIMUM_COMPRESSION_BLOCK_SIZE_KB));
}
if (negativeLookups) {
falsePositiveChance = CassandraConstants.NEGATIVE_LOOKUPS_BLOOM_FILTER_FP_CHANCE;
}
if (appendHeavyAndReadLight) {
cf.setCompaction_strategy(CassandraConstants.SIZE_TIERED_COMPACTION_STRATEGY);
// clear out the now nonsensical "keep it at 80MB per sstable" option from LCS
cf.setCompaction_strategy_optionsIsSet(false);
if (!negativeLookups) {
falsePositiveChance = CassandraConstants.DEFAULT_SIZE_TIERED_COMPACTION_BLOOM_FILTER_FP_CHANCE;
} else {
falsePositiveChance = CassandraConstants.NEGATIVE_LOOKUPS_SIZE_TIERED_BLOOM_FILTER_FP_CHANCE;
}
}
switch(cachePriority) {
case COLDEST:
break;
case COLD:
break;
case WARM:
break;
case HOT:
break;
case HOTTEST:
cf.setPopulate_io_cache_on_flushIsSet(true);
break;
default:
throw new PalantirRuntimeException("Unknown cache priority: " + cachePriority);
}
cf.setGc_grace_seconds(gcGraceSeconds);
cf.setBloom_filter_fp_chance(falsePositiveChance);
cf.setCompression_options(compressionOptions);
return cf;
}
Aggregations