use of org.apache.solr.common.SolrException in project lucene-solr by apache.
the class DocumentBuilder method toDocument.
/**
* Convert a SolrInputDocument to a lucene Document.
*
* This function should go elsewhere. This builds the Document without an
* extra Map<> checking for multiple values. For more discussion, see:
* http://www.nabble.com/Re%3A-svn-commit%3A-r547493---in--lucene-solr-trunk%3A-.--src-java-org-apache-solr-common--src-java-org-apache-solr-schema--src-java-org-apache-solr-update--src-test-org-apache-solr-common--tf3931539.html
*
* TODO: /!\ NOTE /!\ This semantics of this function are still in flux.
* Something somewhere needs to be able to fill up a SolrDocument from
* a lucene document - this is one place that may happen. It may also be
* moved to an independent function
*
* @since solr 1.3
*
* @param doc SolrInputDocument from which the document has to be built
* @param schema Schema instance
* @param forInPlaceUpdate Whether the output document would be used for an in-place update or not. When this is true,
* default fields values and copy fields targets are not populated.
* @return Built Lucene document
*/
public static Document toDocument(SolrInputDocument doc, IndexSchema schema, boolean forInPlaceUpdate) {
final SchemaField uniqueKeyField = schema.getUniqueKeyField();
final String uniqueKeyFieldName = null == uniqueKeyField ? null : uniqueKeyField.getName();
Document out = new Document();
Set<String> usedFields = Sets.newHashSet();
// Load fields from SolrDocument to Document
for (SolrInputField field : doc) {
String name = field.getName();
SchemaField sfield = schema.getFieldOrNull(name);
boolean used = false;
// Make sure it has the correct number
if (sfield != null && !sfield.multiValued() && field.getValueCount() > 1) {
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "ERROR: " + getID(doc, schema) + "multiple values encountered for non multiValued field " + sfield.getName() + ": " + field.getValue());
}
List<CopyField> copyFields = schema.getCopyFieldsList(name);
if (copyFields.size() == 0)
copyFields = null;
// load each field value
boolean hasField = false;
try {
for (Object v : field) {
if (v == null) {
continue;
}
hasField = true;
if (sfield != null) {
used = true;
addField(out, sfield, v, name.equals(uniqueKeyFieldName) ? false : forInPlaceUpdate);
// record the field as having a value
usedFields.add(sfield.getName());
}
// This could happen whether it is explicit or not.
if (copyFields != null) {
// and this is the uniqueKey field (because the uniqueKey can't change so no need to "update" the copyField).
if (!(forInPlaceUpdate && name.equals(uniqueKeyFieldName))) {
for (CopyField cf : copyFields) {
SchemaField destinationField = cf.getDestination();
final boolean destHasValues = usedFields.contains(destinationField.getName());
// check if the copy field is a multivalued or not
if (!destinationField.multiValued() && destHasValues) {
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "ERROR: " + getID(doc, schema) + "multiple values encountered for non multiValued copy field " + destinationField.getName() + ": " + v);
}
used = true;
// Perhaps trim the length of a copy field
Object val = v;
if (val instanceof String && cf.getMaxChars() > 0) {
val = cf.getLimitedValue((String) val);
}
addField(out, destinationField, val, destinationField.getName().equals(uniqueKeyFieldName) ? false : forInPlaceUpdate);
// record the field as having a value
usedFields.add(destinationField.getName());
}
}
}
}
} catch (SolrException ex) {
throw ex;
} catch (Exception ex) {
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "ERROR: " + getID(doc, schema) + "Error adding field '" + field.getName() + "'='" + field.getValue() + "' msg=" + ex.getMessage(), ex);
}
// make sure the field was used somehow...
if (!used && hasField) {
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "ERROR: " + getID(doc, schema) + "unknown field '" + name + "'");
}
}
// during the full indexing initially.
if (!forInPlaceUpdate) {
for (SchemaField field : schema.getRequiredFields()) {
if (out.getField(field.getName()) == null) {
if (field.getDefaultValue() != null) {
addField(out, field, field.getDefaultValue(), false);
} else {
String msg = getID(doc, schema) + "missing required field: " + field.getName();
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, msg);
}
}
}
}
if (!forInPlaceUpdate) {
moveLargestFieldLast(out);
}
return out;
}
use of org.apache.solr.common.SolrException in project lucene-solr by apache.
the class CdcrTransactionLog method reopenOutputStream.
/**
* Re-open the output stream of the tlog and position
* the file pointer at the end of the file. It assumes
* that the tlog is non-empty and that the tlog's header
* has been already read.
*/
synchronized void reopenOutputStream() {
try {
if (debug) {
log.debug("Re-opening tlog's output stream: " + this);
}
raf = new RandomAccessFile(this.tlogFile, "rw");
channel = raf.getChannel();
long start = raf.length();
raf.seek(start);
os = Channels.newOutputStream(channel);
fos = new FastOutputStream(os, new byte[65536], 0);
// reflect that we aren't starting at the beginning
fos.setWritten(start);
} catch (IOException e) {
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, e);
}
}
use of org.apache.solr.common.SolrException in project lucene-solr by apache.
the class CdcrTransactionLog method close.
@Override
public void close() {
try {
if (debug) {
log.debug("Closing tlog" + this);
}
synchronized (this) {
if (fos != null) {
fos.flush();
fos.close();
// dereference these variables for GC
fos = null;
os = null;
channel = null;
raf = null;
}
}
if (deleteOnClose) {
try {
Files.deleteIfExists(tlogFile.toPath());
} catch (IOException e) {
// TODO: should this class care if a file couldnt be deleted?
// this just emulates previous behavior, where only SecurityException would be handled.
}
}
} catch (IOException e) {
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, e);
} finally {
assert ObjectReleaseTracker.release(this);
}
}
use of org.apache.solr.common.SolrException in project lucene-solr by apache.
the class CdcrUpdateLog method copyBufferedUpdates.
/**
* <p>
* Read the entries from the given tlog file and replay them as buffered updates.
* The buffered tlog that we are trying to copy might contain duplicate operations with the
* current update log. During the tlog replication process, the replica might buffer update operations
* that will be present also in the tlog files downloaded from the leader. In order to remove these
* duplicates, it will skip any operations with a version inferior to the latest know version.
*/
private void copyBufferedUpdates(File tlogSrc, long offsetSrc, long latestVersion) {
recoveryInfo = new RecoveryInfo();
recoveryInfo.positionOfStart = tlog == null ? 0 : tlog.snapshot();
state = State.BUFFERING;
operationFlags |= FLAG_GAP;
ModifiableSolrParams params = new ModifiableSolrParams();
params.set(DistributingUpdateProcessorFactory.DISTRIB_UPDATE_PARAM, DistributedUpdateProcessor.DistribPhase.FROMLEADER.toString());
SolrQueryRequest req = new LocalSolrQueryRequest(uhandler.core, params);
CdcrTransactionLog src = new CdcrTransactionLog(tlogSrc, null, true);
TransactionLog.LogReader tlogReader = src.getReader(offsetSrc);
try {
int operationAndFlags = 0;
for (; ; ) {
Object o = tlogReader.next();
// we reached the end of the tlog
if (o == null)
break;
// should currently be a List<Oper,Ver,Doc/Id>
List entry = (List) o;
operationAndFlags = (Integer) entry.get(0);
int oper = operationAndFlags & OPERATION_MASK;
long version = (Long) entry.get(1);
if (version <= latestVersion) {
// probably a buffered update that is also present in a tlog file coming from the leader,
// skip it.
log.debug("Dropping buffered operation - version {} < {}", version, latestVersion);
continue;
}
switch(oper) {
case UpdateLog.ADD:
{
SolrInputDocument sdoc = (SolrInputDocument) entry.get(entry.size() - 1);
AddUpdateCommand cmd = new AddUpdateCommand(req);
cmd.solrDoc = sdoc;
cmd.setVersion(version);
cmd.setFlags(UpdateCommand.BUFFERING);
this.add(cmd);
break;
}
case UpdateLog.DELETE:
{
byte[] idBytes = (byte[]) entry.get(2);
DeleteUpdateCommand cmd = new DeleteUpdateCommand(req);
cmd.setIndexedId(new BytesRef(idBytes));
cmd.setVersion(version);
cmd.setFlags(UpdateCommand.BUFFERING);
this.delete(cmd);
break;
}
case UpdateLog.DELETE_BY_QUERY:
{
String query = (String) entry.get(2);
DeleteUpdateCommand cmd = new DeleteUpdateCommand(req);
cmd.query = query;
cmd.setVersion(version);
cmd.setFlags(UpdateCommand.BUFFERING);
this.deleteByQuery(cmd);
break;
}
default:
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Invalid Operation! " + oper);
}
}
} catch (Exception e) {
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Unable to copy buffered updates", e);
} finally {
try {
tlogReader.close();
} finally {
this.doClose(src);
}
}
}
use of org.apache.solr.common.SolrException in project lucene-solr by apache.
the class ChannelFastInputStream method writeCommit.
public long writeCommit(CommitUpdateCommand cmd, int flags) {
LogCodec codec = new LogCodec(resolver);
synchronized (this) {
try {
// if we had flushed, this should be equal to channel.position()
long pos = fos.size();
if (pos == 0) {
writeLogHeader(codec);
pos = fos.size();
}
codec.init(fos);
codec.writeTag(JavaBinCodec.ARR, 3);
// should just take one byte
codec.writeInt(UpdateLog.COMMIT | flags);
codec.writeLong(cmd.getVersion());
// ensure these bytes are (almost) last in the file
codec.writeStr(END_MESSAGE);
endRecord(pos);
// flush since this will be the last record in a log fill
fos.flush();
assert fos.size() == channel.size();
return pos;
} catch (IOException e) {
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, e);
}
}
}
Aggregations