Search in sources :

Example 11 with SuppressWarnings

use of edu.umd.cs.findbugs.annotations.SuppressWarnings in project SearchServices by Alfresco.

the class Solr4QueryParser method getFuzzyQuery.

@SuppressWarnings("deprecation")
@Override
public Query getFuzzyQuery(String field, String termStr, float minSimilarity) throws ParseException {
    if (field.equals(FIELD_PATH)) {
        throw new UnsupportedOperationException("Fuzzy Queries are not support for " + FIELD_PATH);
    } else if (field.equals(FIELD_PATHWITHREPEATS)) {
        throw new UnsupportedOperationException("Fuzzy Queries are not support for " + FIELD_PATHWITHREPEATS);
    } else if (field.equals(FIELD_TEXT)) {
        return createDefaultTextQuery(textField -> getFuzzyQuery(textField, termStr, minSimilarity));
    } else if (field.equals(FIELD_ID) || field.equals(FIELD_DBID) || field.equals(FIELD_ISROOT) || field.equals(FIELD_ISCONTAINER) || field.equals(FIELD_ISNODE) || field.equals(FIELD_TX) || field.equals(FIELD_PARENT) || field.equals(FIELD_PRIMARYPARENT) || field.equals(FIELD_QNAME) || field.equals(FIELD_PRIMARYASSOCTYPEQNAME) || field.equals(FIELD_ASSOCTYPEQNAME)) {
        boolean lowercaseExpandedTerms = getLowercaseExpandedTerms();
        try {
            setLowercaseExpandedTerms(false);
            return super.getFuzzyQuery(field, termStr, minSimilarity);
        } finally {
            setLowercaseExpandedTerms(lowercaseExpandedTerms);
        }
    } else if (field.equals(FIELD_CLASS)) {
        throw new UnsupportedOperationException("Fuzzy Queries are not support for " + FIELD_CLASS);
    } else if (field.equals(FIELD_TYPE)) {
        throw new UnsupportedOperationException("Fuzzy Queries are not support for " + FIELD_TYPE);
    } else if (field.equals(FIELD_EXACTTYPE)) {
        throw new UnsupportedOperationException("Fuzzy Queries are not support for " + FIELD_EXACTTYPE);
    } else if (field.equals(FIELD_ASPECT)) {
        throw new UnsupportedOperationException("Fuzzy Queries are not support for " + FIELD_ASPECT);
    } else if (field.equals(FIELD_EXACTASPECT)) {
        throw new UnsupportedOperationException("Fuzzy Queries are not support for " + FIELD_EXACTASPECT);
    } else if (isPropertyField(field)) {
        return attributeQueryBuilder(field, termStr, new FuzzyQuery(minSimilarity), AnalysisMode.FUZZY, LuceneFunction.FIELD);
    } else if (field.equals(FIELD_ALL)) {
        Set<String> all = searchParameters.getAllAttributes();
        if ((all == null) || (all.size() == 0)) {
            Collection<QName> contentAttributes = dictionaryService.getAllProperties(null);
            BooleanQuery.Builder query = new BooleanQuery.Builder();
            for (QName qname : contentAttributes) {
                // The super implementation will create phrase queries etc
                // if required
                Query part = getFuzzyQuery(PROPERTY_FIELD_PREFIX + qname.toString(), termStr, minSimilarity);
                if (part != null) {
                    query.add(part, Occur.SHOULD);
                } else {
                    query.add(createNoMatchQuery(), Occur.SHOULD);
                }
            }
            return query.build();
        } else {
            BooleanQuery.Builder query = new BooleanQuery.Builder();
            for (String fieldName : all) {
                Query part = getFuzzyQuery(fieldName, termStr, minSimilarity);
                if (part != null) {
                    query.add(part, Occur.SHOULD);
                } else {
                    query.add(createNoMatchQuery(), Occur.SHOULD);
                }
            }
            return query.build();
        }
    } else if (field.equals(FIELD_ISUNSET)) {
        throw new UnsupportedOperationException("Fuzzy Queries are not support for " + FIELD_ISUNSET);
    } else if (field.equals(FIELD_ISNULL)) {
        throw new UnsupportedOperationException("Fuzzy Queries are not support for " + FIELD_ISNULL);
    } else if (field.equals(FIELD_ISNOTNULL)) {
        throw new UnsupportedOperationException("Fuzzy Queries are not support for " + FIELD_ISNOTNULL);
    } else if (field.equals(FIELD_EXISTS)) {
        throw new UnsupportedOperationException("Fuzzy Queries are not support for " + FIELD_EXISTS);
    } else if (QueryParserUtils.matchDataTypeDefinition(searchParameters.getNamespace(), namespacePrefixResolver, dictionaryService, field) != null) {
        Collection<QName> contentAttributes = dictionaryService.getAllProperties(QueryParserUtils.matchDataTypeDefinition(searchParameters.getNamespace(), namespacePrefixResolver, dictionaryService, field).getName());
        BooleanQuery.Builder query = new BooleanQuery.Builder();
        for (QName qname : contentAttributes) {
            // The super implementation will create phrase queries etc if
            // required
            Query part = getFuzzyQuery(PROPERTY_FIELD_PREFIX + qname.toString(), termStr, minSimilarity);
            if (part != null) {
                query.add(part, Occur.SHOULD);
            } else {
                query.add(createNoMatchQuery(), Occur.SHOULD);
            }
        }
        return query.build();
    } else if (field.equals(FIELD_FTSSTATUS)) {
        throw new UnsupportedOperationException("Fuzzy Queries are not support for " + FIELD_FTSSTATUS);
    } else if (field.equals(FIELD_TAG)) {
        return super.getFuzzyQuery(field, termStr, minSimilarity);
    } else if (field.equals(FIELD_SITE)) {
        return super.getFuzzyQuery(field, termStr, minSimilarity);
    } else if (field.equals(FIELD_PNAME)) {
        return super.getFuzzyQuery(field, termStr, minSimilarity);
    } else if (field.equals(FIELD_NPATH)) {
        return super.getFuzzyQuery(field, termStr, minSimilarity);
    } else {
        return super.getFuzzyQuery(field, termStr, minSimilarity);
    }
}
Also used : BooleanQuery(org.apache.lucene.search.BooleanQuery) Query(org.apache.lucene.search.Query) RegexpQuery(org.apache.lucene.search.RegexpQuery) LegacyNumericRangeQuery(org.apache.lucene.search.LegacyNumericRangeQuery) MatchAllDocsQuery(org.apache.lucene.search.MatchAllDocsQuery) ConstantScoreQuery(org.apache.lucene.search.ConstantScoreQuery) SpanNearQuery(org.apache.lucene.search.spans.SpanNearQuery) SpanOrQuery(org.apache.lucene.search.spans.SpanOrQuery) MultiTermQuery(org.apache.lucene.search.MultiTermQuery) SpanTermQuery(org.apache.lucene.search.spans.SpanTermQuery) SpanQuery(org.apache.lucene.search.spans.SpanQuery) TermQuery(org.apache.lucene.search.TermQuery) BooleanQuery(org.apache.lucene.search.BooleanQuery) TermRangeQuery(org.apache.lucene.search.TermRangeQuery) QName(org.alfresco.service.namespace.QName) Builder(org.apache.lucene.search.BooleanQuery.Builder) Collection(java.util.Collection) Builder(org.apache.lucene.search.BooleanQuery.Builder) SuppressWarnings(edu.umd.cs.findbugs.annotations.SuppressWarnings)

Example 12 with SuppressWarnings

use of edu.umd.cs.findbugs.annotations.SuppressWarnings in project SearchServices by Alfresco.

the class TempFileWarningLogger method checkFiles.

// Avoid FindBugs false positive (https://github.com/spotbugs/spotbugs/issues/756)
@SuppressWarnings("RCN_REDUNDANT_NULLCHECK_WOULD_HAVE_BEEN_A_NPE")
public boolean checkFiles() {
    if (log.isDebugEnabled()) {
        log.debug("Looking for temp files matching " + glob + " in directory " + dir);
    }
    try (DirectoryStream<Path> stream = Files.newDirectoryStream(dir, glob)) {
        for (Path file : stream) {
            if (log.isDebugEnabled()) {
                log.debug("Solr suggester temp file found matching file pattern: " + glob + ", path: " + file);
                log.debug("Removing suggester temp files.");
            }
            return true;
        }
        return false;
    } catch (IOException e) {
        throw new RuntimeException("Unable to create directory stream", e);
    }
}
Also used : Path(java.nio.file.Path) IOException(java.io.IOException) SuppressWarnings(edu.umd.cs.findbugs.annotations.SuppressWarnings)

Example 13 with SuppressWarnings

use of edu.umd.cs.findbugs.annotations.SuppressWarnings in project st-js by st-js.

the class NodeJSExecutor method run.

/**
 * <p>run.</p>
 *
 * @param srcFile a {@link java.io.File} object.
 * @return a {@link org.stjs.generator.executor.ExecutionResult} object.
 */
@SuppressWarnings(value = "REC_CATCH_EXCEPTION")
public ExecutionResult run(File srcFile) {
    try {
        Process p = Runtime.getRuntime().exec(new String[] { NODE_JS, srcFile.getAbsolutePath() });
        int exitValue = p.waitFor();
        return new ExecutionResult(null, readStream(p.getInputStream()), readStream(p.getErrorStream()), exitValue);
    } catch (IOException e) {
        // TODO : this is not really going to be working on all OS!
        if (e.getMessage().contains("Cannot run program")) {
            String errMsg = "Please install node.js to use this feature https://github.com/joyent/node/wiki/Installation";
            throw new STJSRuntimeException(errMsg, e);
        }
        throw new STJSRuntimeException(e);
    } catch (InterruptedException e) {
        throw new STJSRuntimeException(e);
    }
}
Also used : STJSRuntimeException(org.stjs.generator.STJSRuntimeException) IOException(java.io.IOException) SuppressWarnings(edu.umd.cs.findbugs.annotations.SuppressWarnings)

Example 14 with SuppressWarnings

use of edu.umd.cs.findbugs.annotations.SuppressWarnings in project phoenix by apache.

the class UpgradeUtil method copyTable.

@SuppressWarnings("deprecation")
private static void copyTable(PhoenixConnection conn, byte[] sourceName, byte[] targetName) throws SQLException {
    // 100K chunks
    int batchSizeBytes = 100 * 1024;
    int sizeBytes = 0;
    List<Mutation> mutations = Lists.newArrayListWithExpectedSize(10000);
    Scan scan = new Scan();
    scan.setRaw(true);
    scan.setMaxVersions();
    ResultScanner scanner = null;
    Table source = null;
    Table target = null;
    try {
        source = conn.getQueryServices().getTable(sourceName);
        target = conn.getQueryServices().getTable(targetName);
        scanner = source.getScanner(scan);
        Result result;
        while ((result = scanner.next()) != null) {
            for (Cell keyValue : result.rawCells()) {
                sizeBytes += CellUtil.estimatedSerializedSizeOf(keyValue);
                if (KeyValue.Type.codeToType(keyValue.getTypeByte()) == KeyValue.Type.Put) {
                    // Put new value
                    Put put = new Put(keyValue.getRowArray(), keyValue.getRowOffset(), keyValue.getRowLength());
                    put.add(keyValue);
                    mutations.add(put);
                } else if (KeyValue.Type.codeToType(keyValue.getTypeByte()) == KeyValue.Type.Delete) {
                    // Copy delete marker using new key so that it continues
                    // to delete the key value preceding it that will be updated
                    // as well.
                    Delete delete = new Delete(keyValue.getRowArray(), keyValue.getRowOffset(), keyValue.getRowLength());
                    delete.addDeleteMarker(keyValue);
                    mutations.add(delete);
                }
            }
            if (sizeBytes >= batchSizeBytes) {
                LOGGER.info("Committing bactch of temp rows");
                target.batch(mutations, null);
                mutations.clear();
                sizeBytes = 0;
            }
        }
        if (!mutations.isEmpty()) {
            LOGGER.info("Committing last bactch of temp rows");
            target.batch(mutations, null);
        }
        LOGGER.info("Successfully completed copy");
    } catch (SQLException e) {
        throw e;
    } catch (Exception e) {
        throw ServerUtil.parseServerException(e);
    } finally {
        try {
            if (scanner != null)
                scanner.close();
        } finally {
            try {
                if (source != null)
                    source.close();
            } catch (IOException e) {
                LOGGER.warn("Exception during close of source table", e);
            } finally {
                try {
                    if (target != null)
                        target.close();
                } catch (IOException e) {
                    LOGGER.warn("Exception during close of target table", e);
                }
            }
        }
    }
}
Also used : Delete(org.apache.hadoop.hbase.client.Delete) ResultScanner(org.apache.hadoop.hbase.client.ResultScanner) PTable(org.apache.phoenix.schema.PTable) Table(org.apache.hadoop.hbase.client.Table) SQLException(java.sql.SQLException) IOException(java.io.IOException) Put(org.apache.hadoop.hbase.client.Put) SnapshotCreationException(org.apache.hadoop.hbase.snapshot.SnapshotCreationException) TableNotFoundException(org.apache.phoenix.schema.TableNotFoundException) SQLException(java.sql.SQLException) SequenceAlreadyExistsException(org.apache.phoenix.schema.SequenceAlreadyExistsException) IOException(java.io.IOException) TimeoutException(java.util.concurrent.TimeoutException) MetaDataMutationResult(org.apache.phoenix.coprocessor.MetaDataProtocol.MetaDataMutationResult) Result(org.apache.hadoop.hbase.client.Result) Scan(org.apache.hadoop.hbase.client.Scan) Mutation(org.apache.hadoop.hbase.client.Mutation) Cell(org.apache.hadoop.hbase.Cell) SuppressWarnings(edu.umd.cs.findbugs.annotations.SuppressWarnings)

Example 15 with SuppressWarnings

use of edu.umd.cs.findbugs.annotations.SuppressWarnings in project phoenix by apache.

the class UpgradeUtil method upgradeSequenceTable.

@SuppressWarnings("deprecation")
public static boolean upgradeSequenceTable(PhoenixConnection conn, int nSaltBuckets, PTable oldTable) throws SQLException {
    LOGGER.info("Upgrading SYSTEM.SEQUENCE table");
    byte[] seqTableKey = SchemaUtil.getTableKey(null, PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_SCHEMA, PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_TABLE);
    Table sysTable = conn.getQueryServices().getTable(SYSTEM_CATALOG_NAME_BYTES);
    try {
        LOGGER.info("Setting SALT_BUCKETS property of SYSTEM.SEQUENCE to " + SaltingUtil.MAX_BUCKET_NUM);
        Cell saltKV = PhoenixKeyValueUtil.newKeyValue(seqTableKey, PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES, PhoenixDatabaseMetaData.SALT_BUCKETS_BYTES, MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP, PInteger.INSTANCE.toBytes(nSaltBuckets));
        Put saltPut = new Put(seqTableKey);
        saltPut.add(saltKV);
        // Prevent multiple clients from doing this upgrade
        if (!sysTable.checkAndPut(seqTableKey, PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES, PhoenixDatabaseMetaData.SALT_BUCKETS_BYTES, null, saltPut)) {
            if (oldTable == null) {
                // Unexpected, but to be safe just run pre-split code
                preSplitSequenceTable(conn, nSaltBuckets);
                return true;
            }
            // This is needed as a fix for https://issues.apache.org/jira/browse/PHOENIX-1401
            if (oldTable.getTimeStamp() == MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_2_0) {
                byte[] oldSeqNum = PLong.INSTANCE.toBytes(oldTable.getSequenceNumber());
                Cell seqNumKV = PhoenixKeyValueUtil.newKeyValue(seqTableKey, PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES, PhoenixDatabaseMetaData.TABLE_SEQ_NUM_BYTES, MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP, PLong.INSTANCE.toBytes(MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP));
                Put seqNumPut = new Put(seqTableKey);
                seqNumPut.add(seqNumKV);
                // pre-splits the sequence table.
                if (sysTable.checkAndPut(seqTableKey, PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES, PhoenixDatabaseMetaData.TABLE_SEQ_NUM_BYTES, oldSeqNum, seqNumPut)) {
                    preSplitSequenceTable(conn, nSaltBuckets);
                    return true;
                }
            }
            LOGGER.info("SYSTEM.SEQUENCE table has already been upgraded");
            return false;
        }
        // and pre-split it.
        if (oldTable.getTimeStamp() <= MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_1_0) {
            // 100K chunks
            int batchSizeBytes = 100 * 1024;
            int sizeBytes = 0;
            List<Mutation> mutations = Lists.newArrayListWithExpectedSize(10000);
            boolean success = false;
            Scan scan = new Scan();
            scan.setRaw(true);
            scan.setMaxVersions();
            Table seqTable = conn.getQueryServices().getTable(PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_NAME_BYTES);
            try {
                boolean committed = false;
                LOGGER.info("Adding salt byte to all SYSTEM.SEQUENCE rows");
                ResultScanner scanner = seqTable.getScanner(scan);
                try {
                    Result result;
                    while ((result = scanner.next()) != null) {
                        for (Cell keyValue : result.rawCells()) {
                            KeyValue newKeyValue = addSaltByte(keyValue, nSaltBuckets);
                            if (newKeyValue != null) {
                                sizeBytes += newKeyValue.getLength();
                                if (KeyValue.Type.codeToType(newKeyValue.getTypeByte()) == KeyValue.Type.Put) {
                                    // Delete old value
                                    byte[] buf = keyValue.getRowArray();
                                    Delete delete = new Delete(keyValue.getRowArray(), keyValue.getRowOffset(), keyValue.getRowLength());
                                    KeyValue deleteKeyValue = new KeyValue(buf, keyValue.getRowOffset(), keyValue.getRowLength(), buf, keyValue.getFamilyOffset(), keyValue.getFamilyLength(), buf, keyValue.getQualifierOffset(), keyValue.getQualifierLength(), keyValue.getTimestamp(), KeyValue.Type.Delete, ByteUtil.EMPTY_BYTE_ARRAY, 0, 0);
                                    delete.addDeleteMarker(deleteKeyValue);
                                    mutations.add(delete);
                                    sizeBytes += deleteKeyValue.getLength();
                                    // Put new value
                                    Put put = new Put(newKeyValue.getRowArray(), newKeyValue.getRowOffset(), newKeyValue.getRowLength());
                                    put.add(newKeyValue);
                                    mutations.add(put);
                                } else if (KeyValue.Type.codeToType(newKeyValue.getTypeByte()) == KeyValue.Type.Delete) {
                                    // Copy delete marker using new key so that it continues
                                    // to delete the key value preceding it that will be updated
                                    // as well.
                                    Delete delete = new Delete(newKeyValue.getRowArray(), newKeyValue.getRowOffset(), newKeyValue.getRowLength());
                                    delete.addDeleteMarker(newKeyValue);
                                    mutations.add(delete);
                                }
                            }
                            if (sizeBytes >= batchSizeBytes) {
                                LOGGER.info("Committing bactch of SYSTEM.SEQUENCE rows");
                                seqTable.batch(mutations, null);
                                mutations.clear();
                                sizeBytes = 0;
                                committed = true;
                            }
                        }
                    }
                    if (!mutations.isEmpty()) {
                        LOGGER.info("Committing last bactch of SYSTEM.SEQUENCE rows");
                        seqTable.batch(mutations, null);
                    }
                    preSplitSequenceTable(conn, nSaltBuckets);
                    LOGGER.info("Successfully completed upgrade of SYSTEM.SEQUENCE");
                    success = true;
                    return true;
                } catch (InterruptedException e) {
                    throw ServerUtil.parseServerException(e);
                } finally {
                    try {
                        scanner.close();
                    } finally {
                        if (!success) {
                            if (!committed) {
                                // Try to recover by setting salting back to off, as we haven't successfully committed anything
                                // Don't use Delete here as we'd never be able to change it again at this timestamp.
                                Cell unsaltKV = PhoenixKeyValueUtil.newKeyValue(seqTableKey, PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES, PhoenixDatabaseMetaData.SALT_BUCKETS_BYTES, MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP, PInteger.INSTANCE.toBytes(0));
                                Put unsaltPut = new Put(seqTableKey);
                                unsaltPut.add(unsaltKV);
                                try {
                                    sysTable.put(unsaltPut);
                                    success = true;
                                } finally {
                                    if (!success)
                                        LOGGER.error("SYSTEM.SEQUENCE TABLE LEFT IN CORRUPT STATE");
                                }
                            } else {
                                // We're screwed b/c we've already committed some salted sequences...
                                LOGGER.error("SYSTEM.SEQUENCE TABLE LEFT IN CORRUPT STATE");
                            }
                        }
                    }
                }
            } catch (IOException e) {
                throw ServerUtil.parseServerException(e);
            } finally {
                try {
                    seqTable.close();
                } catch (IOException e) {
                    LOGGER.warn("Exception during close", e);
                }
            }
        }
        return false;
    } catch (IOException e) {
        throw ServerUtil.parseServerException(e);
    } finally {
        try {
            sysTable.close();
        } catch (IOException e) {
            LOGGER.warn("Exception during close", e);
        }
    }
}
Also used : Delete(org.apache.hadoop.hbase.client.Delete) PTable(org.apache.phoenix.schema.PTable) Table(org.apache.hadoop.hbase.client.Table) ResultScanner(org.apache.hadoop.hbase.client.ResultScanner) KeyValue(org.apache.hadoop.hbase.KeyValue) IOException(java.io.IOException) Put(org.apache.hadoop.hbase.client.Put) MetaDataMutationResult(org.apache.phoenix.coprocessor.MetaDataProtocol.MetaDataMutationResult) Result(org.apache.hadoop.hbase.client.Result) Scan(org.apache.hadoop.hbase.client.Scan) Mutation(org.apache.hadoop.hbase.client.Mutation) Cell(org.apache.hadoop.hbase.Cell) SuppressWarnings(edu.umd.cs.findbugs.annotations.SuppressWarnings)

Aggregations

SuppressWarnings (edu.umd.cs.findbugs.annotations.SuppressWarnings)24 IOException (java.io.IOException)11 BooleanQuery (org.apache.lucene.search.BooleanQuery)4 Builder (org.apache.lucene.search.BooleanQuery.Builder)4 ConstantScoreQuery (org.apache.lucene.search.ConstantScoreQuery)4 LegacyNumericRangeQuery (org.apache.lucene.search.LegacyNumericRangeQuery)4 MatchAllDocsQuery (org.apache.lucene.search.MatchAllDocsQuery)4 MultiTermQuery (org.apache.lucene.search.MultiTermQuery)4 Query (org.apache.lucene.search.Query)4 RegexpQuery (org.apache.lucene.search.RegexpQuery)4 TermQuery (org.apache.lucene.search.TermQuery)4 TermRangeQuery (org.apache.lucene.search.TermRangeQuery)4 SpanNearQuery (org.apache.lucene.search.spans.SpanNearQuery)4 SpanOrQuery (org.apache.lucene.search.spans.SpanOrQuery)4 SpanQuery (org.apache.lucene.search.spans.SpanQuery)4 SpanTermQuery (org.apache.lucene.search.spans.SpanTermQuery)4 Collection (java.util.Collection)3 QName (org.alfresco.service.namespace.QName)3 OrderedHashSet (org.antlr.misc.OrderedHashSet)3 Optional (com.google.common.base.Optional)2