use of com.torodb.core.exceptions.user.UserException in project torodb by torodb.
the class InsertImplementation method apply.
@Override
public Status<InsertResult> apply(Request req, Command<? super InsertArgument, ? super InsertResult> command, InsertArgument arg, WriteMongodTransaction context) {
mongodMetrics.getInserts().mark(arg.getDocuments().size());
Stream<KvDocument> docsToInsert = arg.getDocuments().stream().map(FromBsonValueTranslator.getInstance()).map((v) -> (KvDocument) v);
try {
if (!context.getTorodTransaction().existsCollection(req.getDatabase(), arg.getCollection())) {
context.getTorodTransaction().createIndex(req.getDatabase(), arg.getCollection(), Constants.ID_INDEX, ImmutableList.<IndexFieldInfo>of(new IndexFieldInfo(new AttributeReference(Arrays.asList(new Key[] { new ObjectKey(Constants.ID) })), FieldIndexOrdering.ASC.isAscending())), true);
}
context.getTorodTransaction().insert(req.getDatabase(), arg.getCollection(), docsToInsert);
} catch (UserException ex) {
//TODO: Improve error reporting
return Status.from(ErrorCode.COMMAND_FAILED, ex.getLocalizedMessage());
}
return Status.ok(new InsertResult(arg.getDocuments().size()));
}
use of com.torodb.core.exceptions.user.UserException in project torodb by torodb.
the class UpdateImplementation method apply.
@Override
public Status<UpdateResult> apply(Request req, Command<? super UpdateArgument, ? super UpdateResult> command, UpdateArgument arg, WriteMongodTransaction context) {
UpdateStatus updateStatus = new UpdateStatus();
try {
if (!context.getTorodTransaction().existsCollection(req.getDatabase(), arg.getCollection())) {
context.getTorodTransaction().createIndex(req.getDatabase(), arg.getCollection(), Constants.ID_INDEX, ImmutableList.<IndexFieldInfo>of(new IndexFieldInfo(new AttributeReference(Arrays.asList(new Key[] { new ObjectKey(Constants.ID) })), FieldIndexOrdering.ASC.isAscending())), true);
}
for (UpdateStatement updateStatement : arg.getStatements()) {
BsonDocument query = updateStatement.getQuery();
UpdateAction updateAction = UpdateActionTranslator.translate(updateStatement.getUpdate());
Cursor<ToroDocument> candidatesCursor;
switch(query.size()) {
case 0:
{
candidatesCursor = context.getTorodTransaction().findAll(req.getDatabase(), arg.getCollection()).asDocCursor();
break;
}
case 1:
{
try {
candidatesCursor = findByAttribute(context.getTorodTransaction(), req.getDatabase(), arg.getCollection(), query);
} catch (CommandFailed ex) {
return Status.from(ex);
}
break;
}
default:
{
return Status.from(ErrorCode.COMMAND_FAILED, "The given query is not supported right now");
}
}
if (candidatesCursor.hasNext()) {
try {
Stream<List<ToroDocument>> candidatesbatchStream;
if (updateStatement.isMulti()) {
candidatesbatchStream = StreamSupport.stream(Spliterators.spliteratorUnknownSize(candidatesCursor.batch(100), Spliterator.ORDERED), false);
} else {
candidatesbatchStream = Stream.of(ImmutableList.of(candidatesCursor.next()));
}
Stream<KvDocument> updatedCandidates = candidatesbatchStream.map(candidates -> {
updateStatus.increaseCandidates(candidates.size());
context.getTorodTransaction().delete(req.getDatabase(), arg.getCollection(), candidates);
return candidates;
}).flatMap(l -> l.stream()).map(candidate -> {
try {
updateStatus.increaseUpdated();
return update(updateAction, candidate);
} catch (UserException userException) {
throw new UserWrappedException(userException);
}
});
context.getTorodTransaction().insert(req.getDatabase(), arg.getCollection(), updatedCandidates);
} catch (UserWrappedException userWrappedException) {
throw userWrappedException.getCause();
}
} else if (updateStatement.isUpsert()) {
KvDocument toInsertCandidate;
if (updateAction instanceof SetDocumentUpdateAction) {
toInsertCandidate = ((SetDocumentUpdateAction) updateAction).getNewValue();
} else {
toInsertCandidate = update(updateAction, new ToroDocument(-1, (KvDocument) MongoWpConverter.translate(query)));
}
if (!toInsertCandidate.containsKey(Constants.ID)) {
KvDocument.Builder builder = new KvDocument.Builder();
for (DocEntry<?> entry : toInsertCandidate) {
builder.putValue(entry.getKey(), entry.getValue());
}
builder.putValue(Constants.ID, MongoWpConverter.translate(objectIdFactory.consumeObjectId()));
toInsertCandidate = builder.build();
}
updateStatus.increaseCandidates(1);
updateStatus.increaseCreated(toInsertCandidate.get(Constants.ID));
Stream<KvDocument> toInsertCandidates = Stream.of(toInsertCandidate);
context.getTorodTransaction().insert(req.getDatabase(), arg.getCollection(), toInsertCandidates);
}
}
} catch (UserException ex) {
//TODO: Improve error reporting
return Status.from(ErrorCode.COMMAND_FAILED, ex.getLocalizedMessage());
}
mongodMetrics.getUpdateModified().mark(updateStatus.updated);
mongodMetrics.getUpdateMatched().mark(updateStatus.candidates);
mongodMetrics.getUpdateUpserted().mark(updateStatus.upsertResults.size());
return Status.ok(new UpdateResult(updateStatus.updated, updateStatus.candidates, ImmutableList.copyOf(updateStatus.upsertResults)));
}
use of com.torodb.core.exceptions.user.UserException in project torodb by torodb.
the class CreateIndexesImplementation method apply.
@Override
public Status<CreateIndexesResult> apply(Request req, Command<? super CreateIndexesArgument, ? super CreateIndexesResult> command, CreateIndexesArgument arg, WriteMongodTransaction context) {
int indexesBefore = (int) context.getTorodTransaction().getIndexesInfo(req.getDatabase(), arg.getCollection()).count();
int indexesAfter = indexesBefore;
try {
boolean existsCollection = context.getTorodTransaction().existsCollection(req.getDatabase(), arg.getCollection());
if (!existsCollection) {
context.getTorodTransaction().createIndex(req.getDatabase(), arg.getCollection(), Constants.ID_INDEX, ImmutableList.<IndexFieldInfo>of(new IndexFieldInfo(new AttributeReference(Arrays.asList(new Key[] { new ObjectKey(Constants.ID) })), FieldIndexOrdering.ASC.isAscending())), true);
}
boolean createdCollectionAutomatically = !existsCollection;
for (IndexOptions indexOptions : arg.getIndexesToCreate()) {
if (indexOptions.getKeys().size() < 1) {
return Status.from(ErrorCode.CANNOT_CREATE_INDEX, "Index keys cannot be empty.");
}
if (indexOptions.isBackground()) {
throw new CommandFailed("createIndexes", "Building index in background is not supported right now");
}
if (indexOptions.isSparse()) {
throw new CommandFailed("createIndexes", "Sparse index are not supported right now");
}
List<IndexFieldInfo> fields = new ArrayList<>(indexOptions.getKeys().size());
for (IndexOptions.Key indexKey : indexOptions.getKeys()) {
AttributeReference.Builder attRefBuilder = new AttributeReference.Builder();
for (String key : indexKey.getKeys()) {
attRefBuilder.addObjectKey(key);
}
IndexType indexType = indexKey.getType();
if (!KnownType.contains(indexType)) {
return Status.from(ErrorCode.CANNOT_CREATE_INDEX, "bad index key pattern: Unknown index plugin '" + indexKey.getType().getName() + "'");
}
Optional<FieldIndexOrdering> ordering = indexType.accept(filedIndexOrderingConverterVisitor, null);
if (!ordering.isPresent()) {
throw new CommandFailed("createIndexes", "Index of type " + indexType.getName() + " is not supported right now");
}
fields.add(new IndexFieldInfo(attRefBuilder.build(), ordering.get().isAscending()));
}
if (context.getTorodTransaction().createIndex(req.getDatabase(), arg.getCollection(), indexOptions.getName(), fields, indexOptions.isUnique())) {
indexesAfter++;
}
}
String note = null;
if (indexesAfter == indexesBefore) {
note = "all indexes already exist";
}
return Status.ok(new CreateIndexesResult(indexesBefore, indexesAfter, note, createdCollectionAutomatically));
} catch (UserException ex) {
return Status.from(ErrorCode.COMMAND_FAILED, ex.getLocalizedMessage());
} catch (CommandFailed ex) {
return Status.from(ex);
}
}
use of com.torodb.core.exceptions.user.UserException in project torodb by torodb.
the class RecoveryService method initialSync.
private boolean initialSync() throws TryAgainException, FatalErrorException {
/*
* 1. store that data is inconsistent 2. decide a sync source 3. lastRemoteOptime1 = get the
* last optime of the sync source 4. clone all databases except local 5. lastRemoteOptime2 = get
* the last optime of the sync source 6. apply remote oplog from lastRemoteOptime1 to
* lastRemoteOptime2 7. lastRemoteOptime3 = get the last optime of the sync source 8. apply
* remote oplog from lastRemoteOptime2 to lastRemoteOptime3 9. rebuild indexes 10. store
* lastRemoteOptime3 as the last applied operation optime 11. store that data is consistent 12.
* change replication state to SECONDARY
*/
//TODO: Support fastsync (used to restore a node by copying the data from other up-to-date node)
LOGGER.info("Starting initial sync");
callback.setConsistentState(false);
HostAndPort syncSource;
try {
syncSource = syncSourceProvider.newSyncSource();
LOGGER.info("Using node " + syncSource + " to replicate from");
} catch (NoSyncSourceFoundException ex) {
throw new TryAgainException("No sync source");
}
MongoClient remoteClient;
try {
remoteClient = remoteClientFactory.createClient(syncSource);
} catch (UnreachableMongoServerException ex) {
throw new TryAgainException(ex);
}
try {
LOGGER.debug("Remote client obtained");
MongoConnection remoteConnection = remoteClient.openConnection();
try (OplogReader reader = oplogReaderProvider.newReader(remoteConnection)) {
OplogOperation lastClonedOp = reader.getLastOp();
OpTime lastRemoteOptime1 = lastClonedOp.getOpTime();
try (WriteOplogTransaction oplogTransaction = oplogManager.createWriteTransaction()) {
LOGGER.info("Remote database cloning started");
oplogTransaction.truncate();
LOGGER.info("Local databases dropping started");
Status<?> status = dropDatabases();
if (!status.isOk()) {
throw new TryAgainException("Error while trying to drop collections: " + status);
}
LOGGER.info("Local databases dropping finished");
if (!isRunning()) {
LOGGER.warn("Recovery stopped before it can finish");
return false;
}
LOGGER.info("Remote database cloning started");
cloneDatabases(remoteClient);
LOGGER.info("Remote database cloning finished");
oplogTransaction.forceNewValue(lastClonedOp.getHash(), lastClonedOp.getOpTime());
}
if (!isRunning()) {
LOGGER.warn("Recovery stopped before it can finish");
return false;
}
TorodServer torodServer = server.getTorodServer();
try (TorodConnection connection = torodServer.openConnection();
SharedWriteTorodTransaction trans = connection.openWriteTransaction(false)) {
OpTime lastRemoteOptime2 = reader.getLastOp().getOpTime();
LOGGER.info("First oplog application started");
applyOplog(reader, lastRemoteOptime1, lastRemoteOptime2);
trans.commit();
LOGGER.info("First oplog application finished");
if (!isRunning()) {
LOGGER.warn("Recovery stopped before it can finish");
return false;
}
OplogOperation lastOperation = reader.getLastOp();
OpTime lastRemoteOptime3 = lastOperation.getOpTime();
LOGGER.info("Second oplog application started");
applyOplog(reader, lastRemoteOptime2, lastRemoteOptime3);
trans.commit();
LOGGER.info("Second oplog application finished");
if (!isRunning()) {
LOGGER.warn("Recovery stopped before it can finish");
return false;
}
LOGGER.info("Index rebuild started");
rebuildIndexes();
trans.commit();
LOGGER.info("Index rebuild finished");
if (!isRunning()) {
LOGGER.warn("Recovery stopped before it can finish");
return false;
}
trans.commit();
}
} catch (OplogStartMissingException ex) {
throw new TryAgainException(ex);
} catch (OplogOperationUnsupported ex) {
throw new TryAgainException(ex);
} catch (MongoException | RollbackException ex) {
throw new TryAgainException(ex);
} catch (OplogManagerPersistException ex) {
throw new FatalErrorException();
} catch (UserException ex) {
throw new FatalErrorException(ex);
}
callback.setConsistentState(true);
LOGGER.info("Initial sync finished");
} finally {
remoteClient.close();
}
return true;
}
use of com.torodb.core.exceptions.user.UserException in project torodb by torodb.
the class RecoveryService method cloneDatabases.
private void cloneDatabases(@Nonnull MongoClient remoteClient) throws CloningException, MongoException, UserException {
enableDataImportMode();
try {
Stream<String> dbNames;
try (MongoConnection remoteConnection = remoteClient.openConnection()) {
RemoteCommandResponse<ListDatabasesReply> remoteResponse = remoteConnection.execute(ListDatabasesCommand.INSTANCE, "admin", true, Empty.getInstance());
if (!remoteResponse.isOk()) {
throw remoteResponse.asMongoException();
}
dbNames = remoteResponse.getCommandReply().get().getDatabases().stream().map(db -> db.getName());
}
dbNames.filter(this::isReplicable).forEach(databaseName -> {
MyWritePermissionSupplier writePermissionSupplier = new MyWritePermissionSupplier(databaseName);
CloneOptions options = new CloneOptions(true, true, true, false, databaseName, Collections.<String>emptySet(), writePermissionSupplier, (colName) -> replFilters.getCollectionPredicate().test(databaseName, colName), (collection, indexName, unique, keys) -> replFilters.getIndexPredicate().test(databaseName, collection, indexName, unique, keys));
try {
cloner.cloneDatabase(databaseName, remoteClient, server, options);
} catch (MongoException ex) {
throw new CloningException(ex);
}
});
} finally {
disableDataImportMode();
}
}
Aggregations