use of edu.umd.cs.findbugs.annotations.SuppressWarnings in project phoenix by apache.
the class UpgradeUtil method addSaltByte.
@SuppressWarnings("deprecation")
private static KeyValue addSaltByte(Cell keyValue, int nSaltBuckets) {
byte[] buf = keyValue.getRowArray();
int length = keyValue.getRowLength();
int offset = keyValue.getRowOffset();
boolean isViewSeq = length > SEQ_PREFIX_BYTES.length && Bytes.compareTo(SEQ_PREFIX_BYTES, 0, SEQ_PREFIX_BYTES.length, buf, offset, SEQ_PREFIX_BYTES.length) == 0;
if (!isViewSeq && nSaltBuckets == 0) {
return null;
}
byte[] newBuf;
if (isViewSeq) {
// We messed up the name for the sequences for view indexes so we'll take this opportunity to fix it
if (buf[length - 1] == 0) {
// Global indexes on views have trailing null byte
length--;
}
byte[][] rowKeyMetaData = new byte[3][];
SchemaUtil.getVarChars(buf, offset, length, 0, rowKeyMetaData);
byte[] schemaName = rowKeyMetaData[PhoenixDatabaseMetaData.SCHEMA_NAME_INDEX];
byte[] unprefixedSchemaName = new byte[schemaName.length - MetaDataUtil.VIEW_INDEX_SEQUENCE_PREFIX_BYTES.length];
System.arraycopy(schemaName, MetaDataUtil.VIEW_INDEX_SEQUENCE_PREFIX_BYTES.length, unprefixedSchemaName, 0, unprefixedSchemaName.length);
byte[] tableName = rowKeyMetaData[PhoenixDatabaseMetaData.TABLE_NAME_INDEX];
PName physicalName = PNameFactory.newName(unprefixedSchemaName);
// Reformulate key based on correct data
newBuf = MetaDataUtil.getViewIndexSequenceKey(tableName == null ? null : Bytes.toString(tableName), physicalName, nSaltBuckets, false).getKey();
} else {
newBuf = new byte[length + 1];
System.arraycopy(buf, offset, newBuf, SaltingUtil.NUM_SALTING_BYTES, length);
newBuf[0] = SaltingUtil.getSaltingByte(newBuf, SaltingUtil.NUM_SALTING_BYTES, length, nSaltBuckets);
}
return new KeyValue(newBuf, 0, newBuf.length, buf, keyValue.getFamilyOffset(), keyValue.getFamilyLength(), buf, keyValue.getQualifierOffset(), keyValue.getQualifierLength(), keyValue.getTimestamp(), KeyValue.Type.codeToType(keyValue.getTypeByte()), buf, keyValue.getValueOffset(), keyValue.getValueLength());
}
use of edu.umd.cs.findbugs.annotations.SuppressWarnings in project phoenix by apache.
the class MultiHfileOutputFormat method writePartitions.
@SuppressWarnings(value = "EC_ARRAY_AND_NONARRAY", justification = "ImmutableBytesWritable DOES implement equals(byte])")
private static void writePartitions(Configuration conf, Path partitionsPath, Set<TableRowkeyPair> tablesStartKeys) throws IOException {
LOGGER.info("Writing partition information to " + partitionsPath);
if (tablesStartKeys.isEmpty()) {
throw new IllegalArgumentException("No regions passed");
}
// We're generating a list of split points, and we don't ever
// have keys < the first region (which has an empty start key)
// so we need to remove it. Otherwise we would end up with an
// empty reducer with index 0
TreeSet<TableRowkeyPair> sorted = new TreeSet<TableRowkeyPair>(tablesStartKeys);
TableRowkeyPair first = sorted.first();
if (!first.getRowkey().equals(HConstants.EMPTY_BYTE_ARRAY)) {
throw new IllegalArgumentException("First region of table should have empty start key. Instead has: " + Bytes.toStringBinary(first.getRowkey().get()));
}
sorted.remove(first);
// Write the actual file
FileSystem fs = partitionsPath.getFileSystem(conf);
SequenceFile.Writer writer = SequenceFile.createWriter(fs, conf, partitionsPath, TableRowkeyPair.class, NullWritable.class);
try {
for (TableRowkeyPair startKey : sorted) {
writer.append(startKey, NullWritable.get());
}
} finally {
writer.close();
}
}
use of edu.umd.cs.findbugs.annotations.SuppressWarnings in project SearchServices by Alfresco.
the class Solr4QueryParser method getToken.
// Avoid FindBugs false positive (https://github.com/spotbugs/spotbugs/issues/756)
@SuppressWarnings("RCN_REDUNDANT_NULLCHECK_WOULD_HAVE_BEEN_A_NPE")
protected String getToken(String field, String value, AnalysisMode analysisMode) throws ParseException {
try (TokenStream source = getAnalyzer().tokenStream(field, new StringReader(value))) {
String tokenised = null;
while (source.incrementToken()) {
CharTermAttribute cta = source.getAttribute(CharTermAttribute.class);
OffsetAttribute offsetAtt = source.getAttribute(OffsetAttribute.class);
TypeAttribute typeAtt = null;
if (source.hasAttribute(TypeAttribute.class)) {
typeAtt = source.getAttribute(TypeAttribute.class);
}
PositionIncrementAttribute posIncAtt = null;
if (source.hasAttribute(PositionIncrementAttribute.class)) {
posIncAtt = source.getAttribute(PositionIncrementAttribute.class);
}
PackedTokenAttributeImpl token = new PackedTokenAttributeImpl();
token.setEmpty().copyBuffer(cta.buffer(), 0, cta.length());
token.setOffset(offsetAtt.startOffset(), offsetAtt.endOffset());
if (typeAtt != null) {
token.setType(typeAtt.type());
}
if (posIncAtt != null) {
token.setPositionIncrement(posIncAtt.getPositionIncrement());
}
tokenised = token.toString();
}
return tokenised;
} catch (IOException e) {
throw new ParseException("IO" + e.getMessage());
}
}
use of edu.umd.cs.findbugs.annotations.SuppressWarnings in project SearchServices by Alfresco.
the class Solr4QueryParser method analyzeMultitermTerm.
// Avoid FindBugs false positive (https://github.com/spotbugs/spotbugs/issues/756)
@SuppressWarnings("RCN_REDUNDANT_NULLCHECK_WOULD_HAVE_BEEN_A_NPE")
protected BytesRef analyzeMultitermTerm(String field, String part, Analyzer analyzerIn) {
if (analyzerIn == null)
analyzerIn = getAnalyzer();
try (TokenStream source = analyzerIn.tokenStream(field, part)) {
source.reset();
TermToBytesRefAttribute termAtt = source.getAttribute(TermToBytesRefAttribute.class);
if (!source.incrementToken())
throw new IllegalArgumentException("analyzer returned no terms for multiTerm term: " + part);
BytesRef bytes = BytesRef.deepCopyOf(termAtt.getBytesRef());
if (source.incrementToken())
throw new IllegalArgumentException("analyzer returned too many terms for multiTerm term: " + part);
source.end();
return bytes;
} catch (IOException e) {
throw new RuntimeException("Error analyzing multiTerm term: " + part, e);
}
}
use of edu.umd.cs.findbugs.annotations.SuppressWarnings in project incubator-gobblin by apache.
the class Fork method consumeRecordStream.
@SuppressWarnings(value = "RV_RETURN_VALUE_IGNORED", justification = "We actually don't care about the return value of subscribe.")
public void consumeRecordStream(RecordStreamWithMetadata<D, S> stream) throws RecordStreamProcessor.StreamProcessingException {
if (this.converter instanceof MultiConverter) {
// if multiconverter, unpack it
for (Converter cverter : ((MultiConverter) this.converter).getConverters()) {
stream = cverter.processStream(stream, this.taskState);
}
} else {
stream = this.converter.processStream(stream, this.taskState);
}
stream = this.rowLevelPolicyChecker.processStream(stream, this.taskState);
stream = stream.mapStream(s -> s.map(r -> {
onEachRecord();
return r;
}));
stream = stream.mapStream(s -> s.doOnSubscribe(subscription -> onStart()));
stream = stream.mapStream(s -> s.doOnComplete(() -> verifyAndSetForkState(ForkState.RUNNING, ForkState.SUCCEEDED)));
stream = stream.mapStream(s -> s.doOnCancel(() -> {
// Errors don't propagate up from below the fork, but cancel the stream, so use the failed state to indicate that
// the fork failed to complete, which will then fail the task.
verifyAndSetForkState(ForkState.RUNNING, ForkState.FAILED);
}));
stream = stream.mapStream(s -> s.doOnError(exc -> {
verifyAndSetForkState(ForkState.RUNNING, ForkState.FAILED);
this.logger.error(String.format("Fork %d of task %s failed to process data records", this.index, this.taskId), exc);
}));
stream = stream.mapStream(s -> s.doFinally(this::cleanup));
stream.getRecordStream().subscribe(r -> {
if (r instanceof RecordEnvelope) {
this.writer.get().writeEnvelope((RecordEnvelope) r);
} else if (r instanceof ControlMessage) {
// This is to avoid missing an ack/nack in the error path.
try {
this.writer.get().getMessageHandler().handleMessage((ControlMessage) r);
} catch (Throwable error) {
r.nack(error);
throw error;
}
r.ack();
}
}, e -> {
// Handle writer close in error case since onComplete will not call when exception happens
if (this.writer.isPresent()) {
this.writer.get().close();
}
logger.error("Failed to process record.", e);
verifyAndSetForkState(ForkState.RUNNING, ForkState.FAILED);
}, () -> {
if (this.writer.isPresent()) {
this.writer.get().close();
}
});
}
Aggregations