use of com.apple.foundationdb.record.RecordCoreException in project fdb-record-layer by FoundationDB.
the class OnlineIndexerMultiTargetTest method testMultiTargetPartlyBuildFailure.
@Test
public void testMultiTargetPartlyBuildFailure() {
// Throw when one index has a different type stamp
final FDBStoreTimer timer = new FDBStoreTimer();
final int numRecords = 107;
final int chunkSize = 17;
List<Index> indexes = new ArrayList<>();
indexes.add(new Index("indexD", new GroupingKeyExpression(EmptyKeyExpression.EMPTY, 0), IndexTypes.COUNT));
indexes.add(new Index("indexA", field("num_value_2"), EmptyKeyExpression.EMPTY, IndexTypes.VALUE, IndexOptions.UNIQUE_OPTIONS));
indexes.add(new Index("indexB", field("num_value_3_indexed"), IndexTypes.VALUE));
indexes.add(new Index("indexC", field("num_value_unique"), EmptyKeyExpression.EMPTY, IndexTypes.VALUE, IndexOptions.UNIQUE_OPTIONS));
openSimpleMetaData();
populateData(numRecords);
FDBRecordStoreTestBase.RecordMetaDataHook hook = allIndexesHook(indexes);
openSimpleMetaData(hook);
disableAll(indexes);
// 1. partly build multi
buildIndexAndCrashHalfway(chunkSize, 2, timer, OnlineIndexer.newBuilder().setTargetIndexes(indexes));
// 2. let one index continue ahead
timer.reset();
buildIndexAndCrashHalfway(chunkSize, 2, timer, OnlineIndexer.newBuilder().setIndex(indexes.get(2)));
// 3. assert mismatch type stamp
try (OnlineIndexer indexBuilder = OnlineIndexer.newBuilder().setDatabase(fdb).setMetaData(metaData).setSubspace(subspace).setTargetIndexes(indexes).setTimer(timer).setLimit(chunkSize).setIndexingPolicy(OnlineIndexer.IndexingPolicy.newBuilder().setIfMismatchPrevious(OnlineIndexer.IndexingPolicy.DesiredAction.ERROR).build()).build()) {
RecordCoreException e = assertThrows(RecordCoreException.class, indexBuilder::buildIndex);
assertTrue(e.getMessage().contains("This index was partly built by another method"));
}
}
use of com.apple.foundationdb.record.RecordCoreException in project fdb-record-layer by FoundationDB.
the class OnlineIndexerSimpleTest method reincreaseLimit.
@Test
public void reincreaseLimit() {
// Non-retriable error that is in lessen work codes.
Supplier<RuntimeException> createException = () -> new RecordCoreException("Non-retriable", new FDBException("transaction_too_large", 2101));
Queue<Pair<Integer, Supplier<RuntimeException>>> queue = new LinkedList<>();
// failures until it hits 1
for (int i = 100; i > 1; i = (3 * i) / 4) {
queue.add(Pair.of(i, createException));
}
// success for a while
for (int i = 0; i < 10; i++) {
queue.add(Pair.of(1, null));
}
// queue size = 23
// now starts re-increasing
queue.add(Pair.of(2, null));
queue.add(Pair.of(3, null));
queue.add(Pair.of(4, null));
for (int i = 5; i < 100; i = (i * 4) / 3) {
queue.add(Pair.of(i, null));
}
// queue size = 38
// does not pass original max
queue.add(Pair.of(100, null));
queue.add(Pair.of(100, null));
queue.add(Pair.of(100, null));
for (int i = 100; i > 42; i = (3 * i) / 4) {
queue.add(Pair.of(i, createException));
}
// success for a while
for (int i = 0; i < 10; i++) {
queue.add(Pair.of(42, null));
}
// queue size = 54
// fail once
queue.add(Pair.of(56, createException));
for (int i = 0; i < 10; i++) {
queue.add(Pair.of(42, null));
}
// queue size = 65
queue.add(Pair.of(56, createException));
queue.add(Pair.of(42, null));
reincreaseLimit(queue, index -> OnlineIndexer.newBuilder().setDatabase(fdb).setMetaData(metaData).setIndex(index).setSubspace(subspace).setLimit(100).setMaxRetries(queue.size() + 3).setRecordsPerSecond(10000).setIncreaseLimitAfter(10).setMdcContext(ImmutableMap.of("mdcKey", "my cool mdc value")).setMaxAttempts(3).setProgressLogIntervalMillis(0).build());
}
use of com.apple.foundationdb.record.RecordCoreException in project fdb-record-layer by FoundationDB.
the class SplitHelperTest method loadSingleRecords.
private void loadSingleRecords(boolean splitLongRecords, boolean omitUnsplitSuffix, @Nonnull LoadRecordFunction loadRecordFunction) throws Exception {
final byte[] globalVersion = "-hastings-".getBytes(Charsets.US_ASCII);
try (FDBRecordContext context = openContext()) {
// No record
loadRecordFunction.load(context, Tuple.from(1042L), null, null);
// One unsplit record
FDBStoredSizes sizes1 = writeDummyRecord(context, Tuple.from(1066L), 1, omitUnsplitSuffix);
assertThat(sizes1.isSplit(), is(false));
loadRecordFunction.load(context, Tuple.from(1066L), sizes1, HUMPTY_DUMPTY);
if (!omitUnsplitSuffix) {
// One record with version
FDBRecordVersion version2 = FDBRecordVersion.complete(globalVersion, context.claimLocalVersion());
FDBStoredSizes sizes2 = writeDummyRecord(context, Tuple.from(1087L), version2, 1);
assertThat(sizes2.isVersionedInline(), is(true));
loadRecordFunction.load(context, Tuple.from(1087L), sizes2, HUMPTY_DUMPTY, version2);
// One version but missing record
FDBRecordVersion version3 = FDBRecordVersion.complete(globalVersion, context.claimLocalVersion());
writeDummyRecord(context, Tuple.from(1100L), version3, 1);
context.ensureActive().clear(subspace.pack(Tuple.from(1100L, SplitHelper.UNSPLIT_RECORD)));
assertThrows(SplitHelper.FoundSplitWithoutStartException.class, () -> loadRecordFunction.load(context, Tuple.from(1100L), null, null, version3));
}
if (splitLongRecords) {
// One split record
FDBStoredSizes sizes4 = writeDummyRecord(context, Tuple.from(1135L), MEDIUM_COPIES, false);
assertEquals(MEDIUM_COPIES, sizes4.getKeyCount());
loadRecordFunction.load(context, Tuple.from(1135L), sizes4, MEDIUM_STRING);
// One split record but then delete the last split point (no way to distinguish this from just inserting one fewer split)
writeDummyRecord(context, Tuple.from(1135L), MEDIUM_COPIES + 1, false);
context.ensureActive().clear(subspace.pack(Tuple.from(1135L, SplitHelper.START_SPLIT_RECORD + MEDIUM_COPIES)));
loadRecordFunction.load(context, Tuple.from(1135L), sizes4, MEDIUM_STRING);
// One split record then delete the first split point
writeDummyRecord(context, Tuple.from(1189L), MEDIUM_COPIES, false);
context.ensureActive().clear(subspace.pack(Tuple.from(1189L, SplitHelper.START_SPLIT_RECORD)));
assertThrows(SplitHelper.FoundSplitWithoutStartException.class, () -> loadRecordFunction.load(context, Tuple.from(1189L), null, null));
// One split record then delete the a middle split point
writeDummyRecord(context, Tuple.from(1199L), MEDIUM_COPIES, false);
context.ensureActive().clear(subspace.pack(Tuple.from(1199L, SplitHelper.START_SPLIT_RECORD + 2)));
RecordCoreException err7 = assertThrows(RecordCoreException.class, () -> loadRecordFunction.load(context, Tuple.from(1199L), null, null));
assertThat(err7.getMessage(), containsString("Split record segments out of order"));
// One split record then add an extra key in the middle
writeDummyRecord(context, Tuple.from(1216L), MEDIUM_COPIES, false);
context.ensureActive().set(subspace.pack(Tuple.from(1216L, SplitHelper.START_SPLIT_RECORD + 2, 0L)), HUMPTY_DUMPTY);
RecordCoreException err8 = assertThrows(RecordCoreException.class, () -> loadRecordFunction.load(context, Tuple.from(1216L), null, null));
assertThat(err8.getMessage(), anyOf(containsString("Expected only a single key extension"), containsString("Split record segments out of order")));
// One split record with version then delete the first split point
FDBRecordVersion version9 = FDBRecordVersion.complete(globalVersion, context.claimLocalVersion());
writeDummyRecord(context, Tuple.from(1272L), version9, MEDIUM_COPIES);
context.ensureActive().clear(subspace.pack(Tuple.from(1272L, SplitHelper.START_SPLIT_RECORD)));
assertThrows(SplitHelper.FoundSplitWithoutStartException.class, () -> loadRecordFunction.load(context, Tuple.from(1272L), null, null, version9));
}
commit(context);
}
}
use of com.apple.foundationdb.record.RecordCoreException in project fdb-record-layer by FoundationDB.
the class OnlineIndexerBuildVersionIndexTest method versionRebuild.
private void versionRebuild(@Nonnull List<TestRecords1Proto.MySimpleRecord> records, @Nullable List<TestRecords1Proto.MySimpleRecord> recordsWhileBuilding, int agents, boolean overlap) {
final Index index = new Index("newVersionIndex", concat(field("num_value_2"), VersionKeyExpression.VERSION), IndexTypes.VERSION);
final Function<FDBQueriedRecord<Message>, Tuple> projection = rec -> {
TestRecords1Proto.MySimpleRecord simple = TestRecords1Proto.MySimpleRecord.newBuilder().mergeFrom(rec.getRecord()).build();
Integer numValue2 = (simple.hasNumValue2()) ? simple.getNumValue2() : null;
FDBRecordVersion version = rec.hasVersion() ? rec.getVersion() : null;
if (version != null) {
assertTrue(version.isComplete());
}
return Tuple.from(numValue2, (version == null) ? null : version.toVersionstamp());
};
List<RecordQuery> queries = records.stream().map(record -> {
Integer value2 = (record.hasNumValue2()) ? record.getNumValue2() : null;
return RecordQuery.newBuilder().setRecordType("MySimpleRecord").setFilter(value2 != null ? Query.field("num_value_2").equalsValue(record.getNumValue2()) : Query.field("num_value_2").isNull()).setSort(VersionKeyExpression.VERSION).build();
}).collect(Collectors.toList());
Function<TestRecords1Proto.MySimpleRecord, Integer> indexValue = msg -> msg.hasNumValue2() ? msg.getNumValue2() : null;
Map<Integer, List<Message>> valueMap = group(records, indexValue);
Map<Long, FDBRecordVersion> versionMap = new HashMap<>(records.size() + (recordsWhileBuilding == null ? 0 : recordsWhileBuilding.size()));
AtomicReference<FDBRecordVersion> greatestVersion = new AtomicReference<>(null);
final Runnable beforeBuild = () -> {
try (FDBRecordContext context = openContext()) {
for (int i = 0; i < queries.size(); i++) {
Integer value2 = (records.get(i).hasNumValue2()) ? records.get(i).getNumValue2() : null;
try {
executeQuery(queries.get(i), "Index(newVersionIndex [[" + value2 + "],[" + value2 + "])", valueMap.get(value2));
fail("somehow executed query with new index before build");
} catch (RecordCoreException e) {
assertEquals("Cannot sort without appropriate index: Version", e.getMessage());
}
}
// Load all the version information for the records that were initially there.
for (TestRecords1Proto.MySimpleRecord simple : records) {
recordStore.loadRecordVersion(Tuple.from(simple.getRecNo())).ifPresent(version -> {
versionMap.put(simple.getRecNo(), version);
if (greatestVersion.get() == null || version.compareTo(greatestVersion.get()) > 0) {
greatestVersion.set(version);
}
});
}
context.commit();
}
};
List<TestRecords1Proto.MySimpleRecord> updatedRecords;
List<RecordQuery> updatedQueries;
Map<Integer, List<Message>> updatedValueMap;
if (recordsWhileBuilding == null || recordsWhileBuilding.size() == 0) {
updatedRecords = records;
updatedQueries = queries;
updatedValueMap = valueMap;
} else {
updatedRecords = updated(records, recordsWhileBuilding);
updatedQueries = updatedRecords.stream().map(record -> {
Integer value2 = (record.hasNumValue2()) ? record.getNumValue2() : null;
return RecordQuery.newBuilder().setRecordType("MySimpleRecord").setFilter(value2 != null ? Query.field("num_value_2").equalsValue(record.getNumValue2()) : Query.field("num_value_2").isNull()).setSort(VersionKeyExpression.VERSION).build();
}).collect(Collectors.toList());
updatedValueMap = group(updatedRecords, indexValue);
}
Map<Long, FDBRecordVersion> updatedVersionMap = new HashMap<>(versionMap.size());
Set<Long> newRecordKeys = (recordsWhileBuilding == null) ? Collections.emptySet() : recordsWhileBuilding.stream().map(TestRecords1Proto.MySimpleRecord::getRecNo).collect(Collectors.toSet());
Runnable afterBuild = new Runnable() {
@SuppressWarnings("try")
@Override
public void run() {
try (FDBRecordContext context = openContext()) {
// The build job shouldn't affect the reads.
for (int i = 0; i < updatedQueries.size(); i++) {
Integer value2 = (updatedRecords.get(i).hasNumValue2()) ? updatedRecords.get(i).getNumValue2() : null;
try {
executeQuery(updatedQueries.get(i), "Index(newVersionIndex [[" + value2 + "],[" + value2 + "])", updatedValueMap.get(value2));
fail("somehow executed query with new index before readable");
} catch (RecordCoreException e) {
assertEquals("Cannot sort without appropriate index: Version", e.getMessage());
}
}
// Load all the version information for records that are there now and that values are sane.
for (TestRecords1Proto.MySimpleRecord simple : updatedRecords) {
recordStore.loadRecordVersion(Tuple.from(simple.getRecNo())).ifPresent(version -> {
assertTrue(version.isComplete());
if (newRecordKeys.contains(simple.getRecNo())) {
assertThat(version, greaterThan(greatestVersion.get()));
if (versionMap.containsKey(simple.getRecNo())) {
assertThat(version, greaterThan(versionMap.get(simple.getRecNo())));
}
} else {
if (versionMap.containsKey(simple.getRecNo())) {
assertEquals(versionMap.get(simple.getRecNo()), version);
}
}
updatedVersionMap.put(simple.getRecNo(), version);
});
}
}
}
};
Runnable afterReadable = () -> {
Descriptors.FieldDescriptor recNoFieldDescriptor = TestRecords1Proto.MySimpleRecord.getDescriptor().findFieldByName("rec_no");
try (FDBRecordContext context = openContext()) {
for (int i = 0; i < updatedQueries.size(); i++) {
Integer value2 = (updatedRecords.get(i).hasNumValue2()) ? updatedRecords.get(i).getNumValue2() : null;
List<Tuple> sortedValues = updatedValueMap.get(value2).stream().map(msg -> {
FDBRecordVersion version = updatedVersionMap.get(((Number) msg.getField(recNoFieldDescriptor)).longValue());
return Tuple.from(value2, version == null ? null : version.toVersionstamp());
}).sorted().collect(Collectors.toList());
executeQuery(updatedQueries.get(i), "Index(newVersionIndex [[" + value2 + "],[" + value2 + "]])", sortedValues, projection);
}
context.commit();
}
};
singleRebuild(records, recordsWhileBuilding, agents, overlap, false, index, beforeBuild, afterBuild, afterReadable);
}
use of com.apple.foundationdb.record.RecordCoreException in project fdb-record-layer by FoundationDB.
the class FileSorter method saveToNextFile.
@SuppressWarnings("PMD.CompareObjectsWithEquals")
private void saveToNextFile(int maxNumFiles) {
final long startTime = System.nanoTime();
final boolean compress = adapter.isCompressed();
final java.security.Key encryptionKey = adapter.getEncryptionKey();
Cipher cipher = null;
if (!mapSorter.getMap().isEmpty()) {
File file;
try {
file = adapter.generateFilename();
try (FileOutputStream fileStream = new FileOutputStream(file)) {
final FileChannel fileChannel = fileStream.getChannel();
final CodedOutputStream headerStream = CodedOutputStream.newInstance(fileStream);
// To stay the same size, field existence must not change.
final RecordSortingProto.SortFileHeader.Builder fileHeader = RecordSortingProto.SortFileHeader.newBuilder().setVersion(SORT_FILE_VERSION).setMetaDataVersion(adapter.getMetaDataVersion()).setNumberOfRecords(0).setNumberOfSections(0);
headerStream.writeMessageNoTag(fileHeader.build());
final RecordSortingProto.SortSectionHeader.Builder sectionHeader = RecordSortingProto.SortSectionHeader.newBuilder().setNumberOfRecords(0).setNumberOfBytes(0);
if (encryptionKey != null) {
final String cipherName = adapter.getEncryptionCipherName();
if (cipherName != null) {
cipher = CipherPool.borrowCipher(cipherName);
initCipherEncrypt(cipher, encryptionKey, adapter.getSecureRandom(), sectionHeader);
}
}
headerStream.writeMessageNoTag(sectionHeader.build());
final long headerEnd = headerStream.getTotalBytesWritten();
final OutputStream outputStream;
final CodedOutputStream entryStream;
if (compress || cipher != null) {
headerStream.flush();
outputStream = wrapOutputStream(fileStream, cipher, compress);
entryStream = CodedOutputStream.newInstance(outputStream);
} else {
outputStream = fileStream;
entryStream = headerStream;
}
if (timer != null) {
timer.recordSinceNanoTime(SortEvents.Events.FILE_SORT_OPEN_FILE, startTime);
}
int numberOfRecords = 0;
for (Map.Entry<K, V> keyAndValue : mapSorter.getMap().entrySet()) {
final long recordStartTime = System.nanoTime();
entryStream.writeByteArrayNoTag(adapter.serializeKey(keyAndValue.getKey()));
adapter.writeValue(keyAndValue.getValue(), entryStream);
numberOfRecords++;
if (timer != null) {
timer.recordSinceNanoTime(SortEvents.Events.FILE_SORT_SAVE_RECORD, recordStartTime);
}
}
entryStream.flush();
if (outputStream != fileStream) {
outputStream.close();
}
final long fileLength = fileChannel.position();
fileChannel.position(0);
fileHeader.setNumberOfSections(1).setNumberOfRecords(numberOfRecords);
headerStream.writeMessageNoTag(fileHeader.build());
sectionHeader.setNumberOfRecords(numberOfRecords).setNumberOfBytes(fileLength - headerEnd);
headerStream.writeMessageNoTag(sectionHeader.build());
headerStream.flush();
if (fileChannel.position() != headerEnd) {
throw new RecordCoreException("header size changed");
}
fileChannel.position(fileLength);
if (timer != null) {
timer.increment(SortEvents.Counts.FILE_SORT_FILE_BYTES, (int) fileLength);
}
}
} catch (IOException | GeneralSecurityException ex) {
throw new RecordCoreException(ex);
} finally {
if (cipher != null) {
CipherPool.returnCipher(cipher);
}
}
files.add(file);
mapSorter.getMap().clear();
}
if (files.size() > maxNumFiles) {
File file;
try {
file = adapter.generateFilename();
merge(files, file);
} catch (IOException | GeneralSecurityException ex) {
throw new RecordCoreException(ex);
}
files.clear();
files.add(file);
}
}
Aggregations