use of com.ms.silverking.cloud.dht.ValueCreator in project SilverKing by Morgan-Stanley.
the class SimpleValueCreator method equals.
@Override
public boolean equals(Object o) {
ValueCreator oVC;
oVC = (ValueCreator) o;
return areEqual(bytes, oVC.getBytes());
}
use of com.ms.silverking.cloud.dht.ValueCreator in project SilverKing by Morgan-Stanley.
the class ActiveProxyRetrieval method createValueMessageForSecondaryReplicas.
// ///////////////////////
private ProtoValueMessageGroup createValueMessageForSecondaryReplicas(RetrievalResult result) {
ProtoValueMessageGroup pmg;
ByteBuffer buf;
ValueCreator creator;
int valueBytes;
ByteBuffer value;
int valueLength;
buf = result.getValue();
creator = MetaDataUtil.getCreator(buf, 0);
valueBytes = MetaDataUtil.getStoredLength(buf, 0);
valueLength = MetaDataUtil.getCompressedLength(buf, 0);
value = (ByteBuffer) buf.duplicate().flip();
if (debug) {
System.out.printf("buf %s\n", buf);
System.out.printf("value %s\n", value);
System.out.printf("valueBytes %d valueLength %d\n", valueBytes, valueLength);
}
pmg = new ProtoValueMessageGroup(new UUIDBase(), message.getContext(), 1, valueBytes, creator.getBytes(), DHTConstants.defaultSecondaryReplicaUpdateTimeoutMillis);
pmg.addValue(result.getKey(), value, valueLength, true);
return pmg;
}
use of com.ms.silverking.cloud.dht.ValueCreator in project SilverKing by Morgan-Stanley.
the class WritableSegmentBase method _put.
public SegmentStorageResult _put(DHTKey key, int offset, long version, byte[] valueCreator, NamespaceOptions nsOptions) {
OffsetList offsetList;
int existingOffset;
existingOffset = keyToOffset.get(key);
if (debugPut) {
Log.warning("segmentNumber: ", segmentNumber);
Log.warning("existingOffset: ", existingOffset);
}
if (existingOffset == CuckooBase.keyNotFound) {
// no offset for the key; add the mapping
if (nsOptions.getVersionMode() == NamespaceVersionMode.SINGLE_VERSION || nsOptions.getStorageType() == StorageType.RAM) {
if (debugPut) {
Log.warning("initial mapping: ", KeyUtil.keyToString(key));
}
try {
keyToOffset.put(key, offset);
if (debugPut) {
if (keyToOffset.get(key) != offset) {
Log.warning("sanity check failed" + keyToOffset.get(key) + " " + offset);
}
}
} catch (TableFullException tfe) {
Log.warning("Segment pkc full. Creating new table");
keyToOffset = IntArrayCuckoo.rehashAndAdd((IntArrayCuckoo) keyToOffset, key, offset);
}
} else {
long creationTime;
// Recovery takes too long if we need to look all over for the version
// For disk-based ns, we currently always create an offset list
// Similar logic in NamespaceStore.putSegmentNumberAndVersion()
offsetList = offsetListStore.newOffsetList();
if (nsOptions.getRevisionMode() == RevisionMode.UNRESTRICTED_REVISIONS) {
creationTime = getCreationTime(offset);
} else {
creationTime = 0;
}
offsetList.putOffset(version, offset, creationTime);
try {
keyToOffset.put(key, -((RAMOffsetList) offsetList).getIndex());
} catch (TableFullException tfe) {
Log.warning("Segment pkc full. Creating new table");
keyToOffset = IntArrayCuckoo.rehashAndAdd((IntArrayCuckoo) keyToOffset, key, -((RAMOffsetList) offsetList).getIndex());
}
}
} else {
// a single value associated with it or an offset list
if (nsOptions.getVersionMode() == NamespaceVersionMode.SINGLE_VERSION) {
// long existingVersion;
// existingVersion = getVersion(existingOffset);
// if (version != existingVersion) {
// return SegmentStorageResult.invalidVersion;
// } else {
byte[] existingChecksum;
byte[] newChecksum;
existingChecksum = getChecksum(existingOffset);
newChecksum = getChecksum(offset);
if (ArrayUtil.compare(existingChecksum, newChecksum, ArrayUtil.MismatchedLengthMode.Ignore) == 0) {
return SegmentStorageResult.stored;
} else {
if (debugPut) {
Log.warning(String.format("Checksums failed to compare: %s %s", StringUtil.byteArrayToHexString(existingChecksum), StringUtil.byteArrayToHexString(newChecksum)));
}
return SegmentStorageResult.mutation;
}
// }
} else {
if (existingOffset >= 0) {
long existingVersion;
long existingCreationTime;
long creationTime;
if (debugPut) {
Log.warning("single key associated: ", KeyUtil.keyToString(key));
}
// single key is associated, create an offset list
offsetList = offsetListStore.newOffsetList();
existingVersion = getVersion(existingOffset);
if (nsOptions.getRevisionMode() == RevisionMode.UNRESTRICTED_REVISIONS) {
existingCreationTime = getCreationTime(existingOffset);
creationTime = getCreationTime(offset);
} else {
existingCreationTime = 0;
creationTime = 0;
}
if (nsOptions.getRevisionMode() == RevisionMode.UNRESTRICTED_REVISIONS || version > existingVersion) {
offsetList.putOffset(existingVersion, existingOffset, existingCreationTime);
offsetList.putOffset(version, offset, creationTime);
if (debugPut) {
Log.warning("removing existing mapping: ", KeyUtil.keyToString(key));
}
boolean removed;
removed = keyToOffset.remove(key);
if (debugPut || Log.levelMet(Level.FINE)) {
Log.warning("removed: ", removed);
Log.warning("pkc.get: ", keyToOffset.get(key));
Log.warning("putting new mapping: ", KeyUtil.keyToString(key) + " " + -((RAMOffsetList) offsetList).getIndex());
}
try {
keyToOffset.put(key, -((RAMOffsetList) offsetList).getIndex());
} catch (TableFullException tfe) {
Log.warning("Segment pkc full. Creating new table");
keyToOffset = IntArrayCuckoo.rehashAndAdd((IntArrayCuckoo) keyToOffset, key, -((RAMOffsetList) offsetList).getIndex());
}
} else {
ValueCreator creator;
// FUTURE - Think about this. Important currently to allow for retries to succeed cleanly.
creator = getCreator(offset);
if (SimpleValueCreator.areEqual(creator.getBytes(), valueCreator)) {
byte[] existingChecksum;
byte[] newChecksum;
existingChecksum = getChecksum(existingOffset);
newChecksum = getChecksum(offset);
if (ArrayUtil.compare(existingChecksum, newChecksum) == 0) {
// Log.warningf("%s %d %d", key, existingVersion, version);
return SegmentStorageResult.duplicateStore;
} else {
if (debugPut) {
Log.warning(String.format("Duplicate existingVersion %d version %d, eo %d o %d, but checksums failed to compare: %s %s", existingVersion, version, existingOffset, offset, StringUtil.byteArrayToHexString(existingChecksum), StringUtil.byteArrayToHexString(newChecksum)));
}
return SegmentStorageResult.invalidVersion;
}
} else {
// FUTURE: Consider: allow puts of incomplete stores to continue?
if (debugPut) {
Log.warning("WritableSegmentBase._put detected invalid version b");
Log.warning(nsOptions);
}
return SegmentStorageResult.invalidVersion;
}
}
} else {
if (debugPut) {
Log.warning("list associated: ", KeyUtil.keyToString(key));
}
// offset list is associated, use the existing offset list
offsetList = offsetListStore.getOffsetList(-existingOffset);
if (nsOptions.getRevisionMode() == RevisionMode.UNRESTRICTED_REVISIONS || version >= offsetList.getLatestVersion()) {
// note: > since we want new versions to be added
// == since we might need to store over an incomplete store
// so >= to cover both
long creationTime;
if (debugPut || Log.levelMet(Level.FINE)) {
Log.warning("adding offset: ", KeyUtil.keyToString(key) + " " + offset);
}
if (nsOptions.getRevisionMode() == RevisionMode.UNRESTRICTED_REVISIONS) {
creationTime = getCreationTime(offset);
} else {
creationTime = 0;
}
offsetList.putOffset(version, offset, creationTime);
} else {
return SegmentStorageResult.invalidVersion;
}
}
}
}
// }
return SegmentStorageResult.stored;
}
use of com.ms.silverking.cloud.dht.ValueCreator in project SilverKing by Morgan-Stanley.
the class DataSegmentWalker method next.
/**
* Position at next entry. Initial call
* @return if there is an additional entry
*/
@Override
public DataSegmentWalkEntry next() {
long msl;
long lsl;
int storedLength;
int uncompressedLength;
int compressedLength;
long nextMSL;
long nextLSL;
if (!hasNext) {
return null;
} else {
DHTKey curKey;
int curOffset;
long version;
long creationTime;
// double keyEntropy;
byte storageState;
ValueCreator creator;
ByteBuffer entry;
int nextEntryPostKeyPosition;
if (debug) {
System.out.println("position: " + position);
}
msl = dataSegment.getLong(position);
lsl = dataSegment.getLong(position + NumConversion.BYTES_PER_LONG);
curOffset = position;
position += DHTKey.BYTES_PER_KEY;
curKey = new SimpleKey(msl, lsl);
// FUTURE - probably delete the below code, but could consider some
// additional sanity checking
/*
keyEntropy = KeyUtil.keyEntropy(curKey);
if (keyEntropy < minValidKeyEntropy) {
boolean sane;
Log.warning("Invalid key: ", curKey +" "+ position);
sane = false;
// FUTURE - need more sophisticated validation of entry
// FUTURE - we need to decode the entry
while (!sane) {
if (position > dataSegment.limit() - DHTKey.BYTES_PER_KEY) { // FUTURE - crude limit; refine
Log.warning("Ran off the end of the segment searching for a valid key");
return null;
}
position++;
msl = dataSegment.getLong(position);
lsl = dataSegment.getLong(position + NumConversion.BYTES_PER_LONG);
curKey = new SimpleKey(msl, lsl);
keyEntropy = KeyUtil.keyEntropy(curKey);
if (keyEntropy >= minValidKeyEntropy) {
try {
storedLength = MetaDataUtil.getStoredLength(dataSegment, position);
if (storedLength > 0) {
uncompressedLength = MetaDataUtil.getUncompressedLength(dataSegment, position);
if (uncompressedLength >= 0) {
compressedLength = MetaDataUtil.getCompressedLength(dataSegment, position);
if (compressedLength >= 0) {
if (position + storedLength < dataSegment.limit()
&& uncompressedLength < compressedLength) {
sane = true;
}
}
}
}
} catch (Exception e) {
// couldn't decode entry, move to next
}
}
}
}
if (debug) {
System.out.printf("%x:%x %3.2f\n", msl, lsl, keyEntropy);
}
*/
storedLength = MetaDataUtil.getStoredLength(dataSegment, position);
uncompressedLength = MetaDataUtil.getUncompressedLength(dataSegment, position);
compressedLength = MetaDataUtil.getCompressedLength(dataSegment, position);
version = MetaDataUtil.getVersion(dataSegment, position);
creationTime = MetaDataUtil.getCreationTime(dataSegment, position);
storageState = MetaDataUtil.getStorageState(dataSegment, position);
creator = MetaDataUtil.getCreator(dataSegment, position);
if (debug) {
System.out.println(storedLength);
}
nextEntryPostKeyPosition = position + storedLength + DHTKey.BYTES_PER_KEY;
// Check to see if it's possible that there is another entry
if (nextEntryPostKeyPosition + MetaDataUtil.getMinimumEntrySize() < dataSegment.limit()) {
int nextEntryStoredLength;
// Check to see if the potential next entry actually fits in this segment
nextEntryStoredLength = MetaDataUtil.getStoredLength(dataSegment, nextEntryPostKeyPosition);
if (debug) {
System.out.printf("nextEntryPostKeyPosition %d nextEntryStoredLength %d dataSegment.limit() %d\n", nextEntryPostKeyPosition, nextEntryStoredLength, dataSegment.limit());
}
if (nextEntryPostKeyPosition + nextEntryStoredLength < dataSegment.limit()) {
nextMSL = dataSegment.getLong(nextEntryPostKeyPosition);
nextLSL = dataSegment.getLong(nextEntryPostKeyPosition + NumConversion.BYTES_PER_LONG);
hasNext = !(nextMSL == 0 && nextLSL == 0);
if (debug) {
System.out.printf("a: hastNext %s\n", hasNext);
}
} else {
hasNext = false;
if (debug) {
System.out.printf("b: hastNext %s\n", hasNext);
}
}
} else {
// No room for valid next entry
hasNext = false;
if (debug) {
System.out.printf("c: hastNext %s\n", hasNext);
}
}
entry = (ByteBuffer) ((ByteBuffer) dataSegment.duplicate().position(position)).slice().limit(storedLength);
position += storedLength;
return new DataSegmentWalkEntry(curKey, version, curOffset, storedLength, uncompressedLength, compressedLength, DHTKey.BYTES_PER_KEY, entry, creationTime, creator, storageState);
}
}
Aggregations