use of org.solbase.common.SolbaseException in project Solbase by Photobucket.
the class IndexWriter method updateDocument.
public void updateDocument(Put documentPut, Document doc) {
String uniqId = doc.get("global_uniq_id");
Put mappingPut = new Put(Bytes.toBytes(uniqId));
mappingPut.add(SolbaseUtil.docIdColumnFamilyName, SolbaseUtil.tombstonedColumnFamilyQualifierBytes, Bytes.toBytes(0));
updateDocKeyIdMap(mappingPut);
// for remote server update via solr update, we want to use
// getDocTable(), but for now map/red can use local htable
HTableInterface docTable = SolbaseUtil.getDocTable();
// insert document to doctable
try {
documentPut.add(SolbaseUtil.timestampColumnFamilyName, SolbaseUtil.tombstonedColumnFamilyQualifierBytes, Bytes.toBytes(0));
docTable.put(documentPut);
} catch (IOException e) {
throw new SolbaseException(SolbaseException.ErrorCode.SERVER_ERROR, e.getMessage());
} finally {
SolbaseUtil.releaseTable(docTable);
}
}
use of org.solbase.common.SolbaseException in project Solbase by Photobucket.
the class IndexWriter method deleteTermVector.
/**
*
* @param termDocMeta - term vector to be deleted
* @param startDocId
* @param endDocId
* @param compare - if true, it will compare new and old term vectors and if same, don't bother deleting term vector
* @return boolean - indicating whether term vector's been deleted
*/
public boolean deleteTermVector(TermDocMetadata termDocMeta, int startDocId, int endDocId, boolean compare) {
// to update, we should first delete existing term doc meta data.
// getting terVector and doc tables
HTableInterface termVectorTable = SolbaseUtil.getTermVectorTable();
ResultScanner fieldScanner = null;
try {
byte[] key = termDocMeta.getFieldTermKey();
int docNumber = termDocMeta.getDocId();
Delete delete = null;
switch(TermDocMetadataLoader.storageType) {
case KEY_ONLY:
{
byte[] termBeginKey = Bytes.add(key, SolbaseUtil.delimiter, Bytes.toBytes(docNumber));
byte[] termEndKey = Bytes.add(key, SolbaseUtil.delimiter, Bytes.toBytes(docNumber + 1));
Scan fieldScan = new Scan(termBeginKey, termEndKey);
fieldScan.addFamily(SolbaseUtil.termVectorDocColumnFamilyName);
fieldScanner = termVectorTable.getScanner(fieldScan);
Result termDoc;
termDoc = fieldScanner.next();
fieldScanner.close();
if (termDoc != null && !termDoc.isEmpty()) {
if (compare) {
byte[] oldRow = termDoc.getRow();
ByteBuffer buf = termDocMeta.serialize();
byte[] newRow = Bytes.add(Bytes.add(key, SolbaseUtil.delimiter, Bytes.toBytes(docNumber)), Bytes.toBytes(buf));
// if term vector hasn't changed, don't bother deleting
if (!ArrayUtils.isEquals(oldRow, newRow)) {
delete = new Delete(termDoc.getRow());
}
} else {
delete = new Delete(termDoc.getRow());
}
}
}
break;
case WIDE_ROW:
int chunkId = TermDocMetadataLoader.getChunkId(docNumber);
delete = new Delete(Bytes.add(key, SolbaseUtil.delimiter, Bytes.toBytes(chunkId)));
break;
case NARROW_ROW:
default:
{
delete = new Delete(Bytes.add(key, SolbaseUtil.delimiter, Bytes.toBytes(docNumber)));
}
}
if (delete != null) {
termVectorTable.delete(delete);
logger.info("deleting term vector: " + termDocMeta.getTerm().toString() + " docId: " + docNumber);
return true;
}
} catch (IOException e) {
throw new SolbaseException(SolbaseException.ErrorCode.SERVER_ERROR, e.getMessage());
} finally {
if (fieldScanner != null) {
fieldScanner.close();
}
SolbaseUtil.releaseTable(termVectorTable);
}
return false;
}
use of org.solbase.common.SolbaseException in project Solbase by Photobucket.
the class SolbaseIndexWriter method editDoc.
/**
* Doing edit logic here. instead of blindingly inserting, we need to compare new doc with old doc and do appropriate modification
* to tv and doc
* @param newDoc
* @param indexName
* @return
*/
public boolean editDoc(Document newDoc, String indexName, int docNumber, boolean updateStore) {
try {
CachedObjectWrapper<Document, Long> cachedObj = ReaderCache.getDocument(docNumber, null, indexName, 0, 0);
if (cachedObj == null || cachedObj.getValue() == null) {
// document doesn't exist, so let's just bail out here
return true;
}
ParsedDoc parsedDoc = new ParsedDoc(newDoc);
parsedDoc.setIndexName(indexName);
parsedDoc.setIndexUtil(indexUtil);
parsedDoc.setIndexWriter(writer);
parsedDoc.setUpdateStore(updateStore);
int shardNum = SolbaseShardUtil.getShardNum(indexName);
int startDocId = SolbaseShardUtil.getStartDocId(shardNum);
int endDocId = SolbaseShardUtil.getEndDocId(shardNum);
ReaderCache.updateDocument(docNumber, parsedDoc, indexName, writer, LayeredCache.ModificationType.UPDATE, updateStore, startDocId, endDocId);
return true;
} catch (IOException e) {
logger.info("edit doc failed: " + docNumber);
logger.info(e.toString());
} catch (InterruptedException e) {
logger.info("edit doc failed: " + docNumber);
logger.info(e.toString());
} catch (MemcachedException e) {
logger.info("edit doc failed: " + docNumber);
logger.info(e.toString());
} catch (TimeoutException e) {
logger.info("edit doc failed: " + docNumber);
logger.info(e.toString());
} catch (SolbaseException e) {
logger.info("edit doc failed: " + docNumber);
logger.info(e.toString());
}
return false;
}
use of org.solbase.common.SolbaseException in project Solbase by Photobucket.
the class SolbaseIndexWriter method delete.
public void delete(DeleteUpdateCommand cmd) throws IOException {
deleteByIdCommands.incrementAndGet();
deleteByIdCommandsCumulative.incrementAndGet();
if (!cmd.fromPending && !cmd.fromCommitted) {
numErrors.incrementAndGet();
numErrorsCumulative.incrementAndGet();
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "meaningless command: " + cmd);
}
if (!cmd.fromPending || !cmd.fromCommitted) {
numErrors.incrementAndGet();
numErrorsCumulative.incrementAndGet();
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "operation not supported" + cmd);
}
// Delete all terms/fields/etc
String indexName = core.getName();
writer.setIndexName(indexName);
writer.setIndexUtil(indexUtil);
int docId = Integer.parseInt(cmd.id);
logger.info("deleting doc: " + docId);
try {
CachedObjectWrapper<Document, Long> wrapper = ReaderCache.getDocument(docId, null, indexName, 0, 0);
boolean updateStore = cmd.getUpdateStore();
ParsedDoc parsedDoc = new ParsedDoc();
parsedDoc.setIndexName(indexName);
parsedDoc.setIndexUtil(indexUtil);
parsedDoc.setIndexWriter(writer);
parsedDoc.setUpdateStore(updateStore);
int shardNum = SolbaseShardUtil.getShardNum(indexName);
int startDocId = SolbaseShardUtil.getStartDocId(shardNum);
int endDocId = SolbaseShardUtil.getEndDocId(shardNum);
ReaderCache.updateDocument(docId, parsedDoc, indexName, writer, LayeredCache.ModificationType.DELETE, updateStore, startDocId, endDocId);
} catch (InterruptedException e) {
logger.info("delete doc failed: " + docId);
logger.info(e.toString());
} catch (MemcachedException e) {
logger.info("delete doc failed: " + docId);
logger.info(e.toString());
} catch (TimeoutException e) {
logger.info("delete doc failed: " + docId);
logger.info(e.toString());
} catch (SolbaseException e) {
logger.info("delete doc failed: " + docId);
logger.info(e.toString());
}
}
use of org.solbase.common.SolbaseException in project Solbase by Photobucket.
the class SolbaseIndexWriter method addDoc.
public int addDoc(AddUpdateCommand cmd) throws IOException {
addCommands.incrementAndGet();
addCommandsCumulative.incrementAndGet();
int rc = -1;
// no duplicates allowed
SchemaField uniqueField = core.getSchema().getUniqueKeyField();
if (uniqueField == null)
throw new IOException("Solbase requires a unique field");
// if there is no ID field, use allowDups
if (idField == null) {
throw new IOException("Solbase requires a unique field");
}
try {
String indexName = core.getName();
writer.setIndexName(indexName);
Document doc = cmd.getLuceneDocument(schema);
String idFieldName = idTerm.field();
// solbase specific fields. should remove it after using
boolean updateStore = false;
String updateVal = doc.get("updateStore");
if (updateVal != null) {
// updating hbase after cache is updated
updateStore = true;
}
int docNumber = Integer.parseInt(doc.get(idFieldName));
// if edit field is present, it's for modification instead of blind add
String editVal = doc.get("edit");
// we don't need following fields. only used for update api
doc.removeField("docId");
doc.removeField("edit");
doc.removeField("updateStore");
// set indexutil to writer
writer.setIndexUtil(indexUtil);
String globaId = doc.getField("global_uniq_id").stringValue();
int shardNum = SolbaseShardUtil.getShardNum(indexName);
int startDocId = SolbaseShardUtil.getStartDocId(shardNum);
int endDocId = SolbaseShardUtil.getEndDocId(shardNum);
if (editVal != null) {
logger.info("updating doc: " + docNumber);
if (editDoc(doc, indexName, docNumber, updateStore)) {
rc = 1;
}
} else {
try {
logger.info("adding doc: " + docNumber);
ParsedDoc parsedDoc = writer.parseDoc(doc, schema.getAnalyzer(), indexName, docNumber, indexUtil.getSortFieldNames());
List<TermDocMetadata> termDocMetas = parsedDoc.getTermDocMetadatas();
// TODO: possible problem
// doc is not in cache, cluster isn't responsible for update store
// doc never gets updated in hbase, nor cache
// for loop below will update tv with this new doc.
// when searched, it will throw null point exception on this doc
// therefore, update store first if adding doc (replication can still cause this issue if back'd up)
ReaderCache.updateDocument(docNumber, parsedDoc, indexName, writer, LayeredCache.ModificationType.ADD, updateStore, startDocId, endDocId);
for (TermDocMetadata termDocMeta : termDocMetas) {
ReaderCache.updateTermDocsMetadata(termDocMeta.getTerm(), termDocMeta, indexName, writer, LayeredCache.ModificationType.ADD, updateStore, startDocId, endDocId);
}
rc = 1;
logger.info("adding doc: " + docNumber);
} catch (NumberFormatException e) {
logger.info("adding doc failed: " + docNumber);
logger.info(e.toString());
} catch (InterruptedException e) {
logger.info("adding doc failed: " + docNumber);
logger.info(e.toString());
} catch (MemcachedException e) {
logger.info("adding doc failed: " + docNumber);
logger.info(e.toString());
} catch (TimeoutException e) {
logger.info("adding doc failed: " + docNumber);
logger.info(e.toString());
} catch (SolbaseException e) {
logger.info("adding doc failed: " + docNumber);
logger.info(e.toString());
}
}
} finally {
if (rc != 1) {
numErrors.incrementAndGet();
numErrorsCumulative.incrementAndGet();
}
}
return rc;
}
Aggregations