use of org.alfresco.httpclient.AuthenticationException in project SearchServices by Alfresco.
the class CascadeTracker method processCascades.
private void processCascades() throws IOException {
// System.out.println("######### processCascades()");
int num = 50;
List<Transaction> txBatch = null;
do {
try {
getWriteLock().acquire();
txBatch = infoSrv.getCascades(num);
if (txBatch.size() == 0) {
// No transactions to process for cascades.
return;
}
ArrayList<Long> txIds = new ArrayList<Long>();
Set<Long> txIdSet = new HashSet<Long>();
for (Transaction tx : txBatch) {
txIds.add(tx.getId());
txIdSet.add(tx.getId());
}
List<NodeMetaData> nodeMetaDatas = infoSrv.getCascadeNodes(txIds);
// System.out.println("########### Cascade node meta datas:"+nodeMetaDatas.size());
if (nodeMetaDatas.size() > 0) {
LinkedList<NodeMetaData> stack = new LinkedList<NodeMetaData>();
stack.addAll(nodeMetaDatas);
int batchSize = 10;
do {
List<NodeMetaData> batch = new ArrayList<NodeMetaData>();
while (batch.size() < batchSize && stack.size() > 0) {
batch.add(stack.removeFirst());
}
CascadeIndexWorkerRunnable worker = new CascadeIndexWorkerRunnable(this.threadHandler, batch, infoSrv);
this.threadHandler.scheduleTask(worker);
} while (stack.size() > 0);
}
// Update the transaction records.
updateTransactionsAfterAsynchronous(txBatch);
// System.out.println("######################: Finished Cascade Run #########");
} catch (AuthenticationException e) {
throw new IOException(e);
} catch (JSONException e) {
throw new IOException(e);
} catch (InterruptedException e) {
throw new IOException(e);
} finally {
// System.out.println("###################: Releasing Cascade write lock");
getWriteLock().release();
}
} while (txBatch.size() > 0);
}
use of org.alfresco.httpclient.AuthenticationException in project SearchServices by Alfresco.
the class SolrInformationServer method indexNode.
@Override
public void indexNode(Node node, boolean overwrite) throws IOException, AuthenticationException, JSONException {
SolrQueryRequest request = null;
UpdateRequestProcessor processor = null;
try {
request = getLocalSolrQueryRequest();
processor = this.core.getUpdateProcessingChain(null).createProcessor(request, new SolrQueryResponse());
long start = System.nanoTime();
if ((node.getStatus() == SolrApiNodeStatus.DELETED) || (node.getStatus() == SolrApiNodeStatus.NON_SHARD_DELETED) || (node.getStatus() == SolrApiNodeStatus.NON_SHARD_UPDATED) || (node.getStatus() == SolrApiNodeStatus.UNKNOWN)) {
// fix up any secondary paths
NodeMetaDataParameters nmdp = new NodeMetaDataParameters();
nmdp.setFromNodeId(node.getId());
nmdp.setToNodeId(node.getId());
List<NodeMetaData> nodeMetaDatas;
if ((node.getStatus() == SolrApiNodeStatus.DELETED) || (node.getStatus() == SolrApiNodeStatus.NON_SHARD_DELETED) || (node.getStatus() == SolrApiNodeStatus.NON_SHARD_UPDATED)) {
// Fake the empty node metadata for this parent deleted node
NodeMetaData nodeMetaData = createDeletedNodeMetaData(node);
nodeMetaDatas = Collections.singletonList(nodeMetaData);
} else {
nodeMetaDatas = repositoryClient.getNodesMetaData(nmdp, Integer.MAX_VALUE);
}
NodeMetaData nodeMetaData = null;
if (!nodeMetaDatas.isEmpty()) {
nodeMetaData = nodeMetaDatas.get(0);
if (!(nodeMetaData.getTxnId() > node.getTxnId())) {
if (node.getStatus() == SolrApiNodeStatus.DELETED) {
try {
// Lock the node to ensure that no other trackers work with this node until this code completes.
if (!spinLock(nodeMetaData.getId(), 120000)) {
// We haven't acquired the lock in over 2 minutes. This really shouldn't happen unless something has gone wrong.
throw new Exception("Unable to acquire lock on nodeId:" + nodeMetaData.getId());
}
solrContentStore.removeDocFromContentStore(nodeMetaData);
} finally {
unlock(nodeMetaData.getId());
}
}
}
// else, the node has moved on to a later transaction, and it will be indexed later
}
if (log.isDebugEnabled()) {
log.debug(".. deleting");
}
deleteNode(processor, request, node);
}
if ((node.getStatus() == SolrApiNodeStatus.UPDATED) || (node.getStatus() == SolrApiNodeStatus.UNKNOWN) || (node.getStatus() == SolrApiNodeStatus.NON_SHARD_UPDATED)) {
log.info(".. updating");
long nodeId = node.getId();
try {
if (!spinLock(nodeId, 120000)) {
// We haven't acquired the lock in over 2 minutes. This really shouldn't happen unless something has gone wrong.
throw new Exception("Unable to acquire lock on nodeId:" + nodeId);
}
NodeMetaDataParameters nmdp = new NodeMetaDataParameters();
nmdp.setFromNodeId(node.getId());
nmdp.setToNodeId(node.getId());
List<NodeMetaData> nodeMetaDatas = repositoryClient.getNodesMetaData(nmdp, Integer.MAX_VALUE);
AddUpdateCommand addDocCmd = new AddUpdateCommand(request);
addDocCmd.overwrite = overwrite;
if (!nodeMetaDatas.isEmpty()) {
NodeMetaData nodeMetaData = nodeMetaDatas.get(0);
if (!(nodeMetaData.getTxnId() > node.getTxnId())) {
/*
if (mayHaveChildren(nodeMetaData))
{
cascadeUpdate(nodeMetaData, overwrite, request, processor);
}
*/
}
if (node.getTxnId() == Long.MAX_VALUE) {
// This is a re-index. We need to clear the txnId from the pr
this.cleanContentCache.remove(nodeMetaData.getTxnId());
}
if ((node.getStatus() == SolrApiNodeStatus.UPDATED) || (node.getStatus() == SolrApiNodeStatus.UNKNOWN)) {
// check index control
Map<QName, PropertyValue> properties = nodeMetaData.getProperties();
StringPropertyValue pValue = (StringPropertyValue) properties.get(ContentModel.PROP_IS_INDEXED);
if (pValue != null) {
Boolean isIndexed = Boolean.valueOf(pValue.getValue());
if (!isIndexed.booleanValue()) {
if (log.isDebugEnabled()) {
log.debug(".. clearing unindexed");
}
deleteNode(processor, request, node);
SolrInputDocument doc = createNewDoc(nodeMetaData, DOC_TYPE_UNINDEXED_NODE);
solrContentStore.storeDocOnSolrContentStore(nodeMetaData, doc);
addDocCmd.solrDoc = doc;
processor.processAdd(addDocCmd);
long end = System.nanoTime();
this.trackerStats.addNodeTime(end - start);
return;
}
}
// Make sure any unindexed or error doc is removed.
if (log.isDebugEnabled()) {
log.debug(".. deleting node " + node.getId());
}
deleteNode(processor, request, node);
SolrInputDocument doc = createNewDoc(nodeMetaData, DOC_TYPE_NODE);
addToNewDocAndCache(nodeMetaData, doc);
addDocCmd.solrDoc = doc;
processor.processAdd(addDocCmd);
}
}
// Ends checking for a nodeMetaData
} finally {
unlock(nodeId);
}
}
// Ends checking for updated or unknown node status
long end = System.nanoTime();
this.trackerStats.addNodeTime(end - start);
} catch (Exception e) {
log.warn("Node index failed and skipped for " + node.getId() + " in Tx " + node.getTxnId(), e);
if (processor == null) {
if (request == null) {
request = getLocalSolrQueryRequest();
}
processor = this.core.getUpdateProcessingChain(null).createProcessor(request, new SolrQueryResponse());
}
if (log.isDebugEnabled()) {
log.debug(".. deleting on exception");
}
deleteNode(processor, request, node);
AddUpdateCommand addDocCmd = new AddUpdateCommand(request);
addDocCmd.overwrite = overwrite;
SolrInputDocument doc = new SolrInputDocument();
doc.addField(FIELD_SOLR4_ID, PREFIX_ERROR + node.getId());
doc.addField(FIELD_VERSION, "0");
doc.addField(FIELD_DBID, node.getId());
doc.addField(FIELD_INTXID, node.getTxnId());
doc.addField(FIELD_EXCEPTION_MESSAGE, e.getMessage());
doc.addField(FIELD_DOC_TYPE, DOC_TYPE_ERROR_NODE);
StringWriter stringWriter = new StringWriter(4096);
PrintWriter printWriter = new PrintWriter(stringWriter, true);
try {
e.printStackTrace(printWriter);
String stack = stringWriter.toString();
doc.addField(FIELD_EXCEPTION_STACK, stack.length() < 32766 ? stack : stack.substring(0, 32765));
} finally {
printWriter.close();
}
addDocCmd.solrDoc = doc;
processor.processAdd(addDocCmd);
} finally {
if (processor != null) {
processor.finish();
}
if (request != null) {
request.close();
}
}
}
use of org.alfresco.httpclient.AuthenticationException in project SearchServices by Alfresco.
the class MetadataTracker method getFullNodesForDbTransaction.
public List<Node> getFullNodesForDbTransaction(Long txid) {
try {
GetNodesParameters gnp = new GetNodesParameters();
ArrayList<Long> txs = new ArrayList<Long>();
txs.add(txid);
gnp.setTransactionIds(txs);
gnp.setStoreProtocol(storeRef.getProtocol());
gnp.setStoreIdentifier(storeRef.getIdentifier());
return client.getNodes(gnp, Integer.MAX_VALUE);
} catch (IOException e) {
throw new AlfrescoRuntimeException("Failed to get nodes", e);
} catch (JSONException e) {
throw new AlfrescoRuntimeException("Failed to get nodes", e);
} catch (AuthenticationException e) {
throw new AlfrescoRuntimeException("Failed to get nodes", e);
}
}
use of org.alfresco.httpclient.AuthenticationException in project SearchServices by Alfresco.
the class MetadataTracker method checkNode.
public NodeReport checkNode(Long dbid) {
NodeReport nodeReport = new NodeReport();
nodeReport.setDbid(dbid);
// In DB
GetNodesParameters parameters = new GetNodesParameters();
parameters.setFromNodeId(dbid);
parameters.setToNodeId(dbid);
List<Node> dbnodes;
try {
dbnodes = client.getNodes(parameters, 1);
if (dbnodes.size() == 1) {
Node dbnode = dbnodes.get(0);
nodeReport.setDbNodeStatus(dbnode.getStatus());
nodeReport.setDbTx(dbnode.getTxnId());
} else {
nodeReport.setDbNodeStatus(SolrApiNodeStatus.UNKNOWN);
nodeReport.setDbTx(-1l);
}
} catch (IOException e) {
nodeReport.setDbNodeStatus(SolrApiNodeStatus.UNKNOWN);
nodeReport.setDbTx(-2l);
} catch (JSONException e) {
nodeReport.setDbNodeStatus(SolrApiNodeStatus.UNKNOWN);
nodeReport.setDbTx(-3l);
} catch (AuthenticationException e1) {
nodeReport.setDbNodeStatus(SolrApiNodeStatus.UNKNOWN);
nodeReport.setDbTx(-4l);
}
this.infoSrv.addCommonNodeReportInfo(nodeReport);
return nodeReport;
}
use of org.alfresco.httpclient.AuthenticationException in project SearchServices by Alfresco.
the class MetadataTracker method trackTransactions.
protected void trackTransactions() throws AuthenticationException, IOException, JSONException, EncoderException {
long startElapsed = System.nanoTime();
boolean upToDate = false;
Transactions transactions;
BoundedDeque<Transaction> txnsFound = new BoundedDeque<Transaction>(100);
HashSet<Transaction> txsIndexed = new LinkedHashSet<>();
long totalUpdatedDocs = 0;
int docCount = 0;
do {
try {
getWriteLock().acquire();
/*
* We acquire the tracker state again here and set it globally. This is because the
* tracker state could have been invalidated due to a rollback by the CommitTracker.
* In this case the state will revert to the last transaction state record in the index.
*/
this.state = getTrackerState();
Long fromCommitTime = getTxFromCommitTime(txnsFound, state.getLastGoodTxCommitTimeInIndex());
transactions = getSomeTransactions(txnsFound, fromCommitTime, TIME_STEP_1_HR_IN_MS, 2000, state.getTimeToStopIndexing());
setLastTxCommitTimeAndTxIdInTrackerState(transactions, state);
log.info("Scanning transactions ...");
if (transactions.getTransactions().size() > 0) {
log.info(".... from " + transactions.getTransactions().get(0));
log.info(".... to " + transactions.getTransactions().get(transactions.getTransactions().size() - 1));
} else {
log.info(".... none found after lastTxCommitTime " + ((txnsFound.size() > 0) ? txnsFound.getLast().getCommitTimeMs() : state.getLastIndexedTxCommitTime()));
}
ArrayList<Transaction> txBatch = new ArrayList<>();
for (Transaction info : transactions.getTransactions()) {
boolean isInIndex = (infoSrv.txnInIndex(info.getId(), true) && info.getCommitTimeMs() <= state.getLastIndexedTxCommitTime());
if (isInIndex) {
txnsFound.add(info);
} else {
// correctly next time
if (info.getCommitTimeMs() > state.getTimeToStopIndexing()) {
upToDate = true;
break;
}
txBatch.add(info);
if (getUpdateAndDeleteCount(txBatch) > this.transactionDocsBatchSize) {
docCount += indexBatchOfTransactions(txBatch);
totalUpdatedDocs += docCount;
for (Transaction scheduledTx : txBatch) {
txnsFound.add(scheduledTx);
txsIndexed.add(scheduledTx);
}
txBatch.clear();
}
}
if (docCount > batchCount) {
indexTransactionsAfterAsynchronous(txsIndexed, state);
long endElapsed = System.nanoTime();
trackerStats.addElapsedNodeTime(docCount, endElapsed - startElapsed);
startElapsed = endElapsed;
docCount = 0;
// Release the write lock allowing the commit tracker to run.
this.getWriteLock().release();
// Re-acquire the write lock and keep indexing.
this.getWriteLock().acquire();
}
checkShutdown();
}
if (!txBatch.isEmpty()) {
if (this.getUpdateAndDeleteCount(txBatch) > 0) {
docCount += indexBatchOfTransactions(txBatch);
totalUpdatedDocs += docCount;
}
for (Transaction scheduledTx : txBatch) {
txnsFound.add(scheduledTx);
txsIndexed.add(scheduledTx);
}
txBatch.clear();
}
if (txsIndexed.size() > 0) {
indexTransactionsAfterAsynchronous(txsIndexed, state);
long endElapsed = System.nanoTime();
trackerStats.addElapsedNodeTime(docCount, endElapsed - startElapsed);
startElapsed = endElapsed;
docCount = 0;
}
} catch (Exception e) {
throw new IOException(e);
} finally {
getWriteLock().release();
}
} while ((transactions.getTransactions().size() > 0) && (upToDate == false));
log.info("total number of docs with metadata updated: " + totalUpdatedDocs);
}
Aggregations