use of com.github.ambry.utils.ByteBufferInputStream in project ambry by linkedin.
the class CuratedLogIndexState method forceAddPutEntry.
/**
* Add an existing Put IndexEntry forcely to index to create duplicate PUTs in index.
* @param id The {@link MockId} of this duplicate put.
* @param value The {@link IndexValue} of this duplicate put.
* @param bytes The content of this duplicate put.
* @throws StoreException
*/
void forceAddPutEntry(MockId id, IndexValue value, byte[] bytes) throws StoreException {
if (!value.isPut()) {
throw new IllegalArgumentException("Value has to be a put: " + value);
}
Offset endOffsetOfPrevMsg = index.getCurrentEndOffset();
ByteBuffer buffer = ByteBuffer.wrap(bytes);
ReadableByteChannel channel = Channels.newChannel(new ByteBufferInputStream(buffer));
log.appendFrom(channel, buffer.capacity());
FileSpan fileSpan = log.getFileSpanForMessage(endOffsetOfPrevMsg, bytes.length);
Offset indexSegmentStartOffset = generateReferenceIndexSegmentStartOffset(fileSpan.getStartOffset());
if (!referenceIndex.containsKey(indexSegmentStartOffset)) {
// rollover will occur
advanceTime(DELAY_BETWEEN_LAST_MODIFIED_TIMES_MS);
referenceIndex.put(indexSegmentStartOffset, new TreeMap<>());
}
IndexValue newValue = new IndexValue(value);
newValue.setNewOffset(fileSpan.getStartOffset());
IndexEntry entry = new IndexEntry(id, newValue);
logOrder.put(fileSpan.getStartOffset(), new Pair<>(id, new LogEntry(bytes, value)));
allKeys.computeIfAbsent(id, k -> new TreeSet<>()).add(value);
referenceIndex.get(indexSegmentStartOffset).computeIfAbsent(id, k -> new TreeSet<>()).add(value);
long expiresAtMs = value.getExpiresAtMs();
if (expiresAtMs != Utils.Infinite_Time && expiresAtMs < time.milliseconds()) {
expiredKeys.add(id);
} else {
liveKeys.add(id);
}
index.addToIndex(Collections.singletonList(entry), fileSpan);
lastModifiedTimesInSecs.put(indexSegmentStartOffset, value.getOperationTimeInMs() / Time.MsPerSec);
}
use of com.github.ambry.utils.ByteBufferInputStream in project ambry by linkedin.
the class CuratedLogIndexState method appendToLog.
/**
* Appends random data of size {@code size} to the {@link #log}.
* @param size the size of data that needs to be appended.
* @return the data that was appended.
* @throws StoreException
*/
byte[] appendToLog(long size) throws StoreException {
byte[] bytes = TestUtils.getRandomBytes((int) size);
if (size > CuratedLogIndexState.HARD_DELETE_START_OFFSET) {
// ensure at least one byte is set to 1 for hard delete verification purposes
int randomByte = (int) (CuratedLogIndexState.HARD_DELETE_START_OFFSET + TestUtils.RANDOM.nextInt((int) (size - CuratedLogIndexState.HARD_DELETE_START_OFFSET - CuratedLogIndexState.HARD_DELETE_LAST_PART_SIZE)));
bytes[randomByte] = 1;
}
ByteBuffer buffer = ByteBuffer.wrap(bytes);
ReadableByteChannel channel = Channels.newChannel(new ByteBufferInputStream(buffer));
log.appendFrom(channel, buffer.capacity());
return bytes;
}
use of com.github.ambry.utils.ByteBufferInputStream in project ambry by linkedin.
the class LogSegmentTest method writeFromTest.
/**
* Tests {@link LogSegment#writeFrom(ReadableByteChannel, long, long)} for various cases.
* @throws IOException
*/
@Test
public void writeFromTest() throws IOException, StoreException {
LogSegmentName currSegmentName = LogSegmentName.generateFirstSegmentName(false);
LogSegment segment = getSegment(currSegmentName, STANDARD_SEGMENT_SIZE, true);
try {
long writeStartOffset = segment.getStartOffset();
byte[] bufOne = TestUtils.getRandomBytes(STANDARD_SEGMENT_SIZE / 3);
byte[] bufTwo = TestUtils.getRandomBytes(STANDARD_SEGMENT_SIZE / 2);
segment.writeFrom(Channels.newChannel(new ByteBufferInputStream(ByteBuffer.wrap(bufOne))), writeStartOffset, bufOne.length);
assertEquals("End offset is not as expected", writeStartOffset + bufOne.length, segment.getEndOffset());
readAndEnsureMatch(segment, writeStartOffset, bufOne);
// overwrite using bufTwo
segment.writeFrom(Channels.newChannel(new ByteBufferInputStream(ByteBuffer.wrap(bufTwo))), writeStartOffset, bufTwo.length);
assertEquals("End offset is not as expected", writeStartOffset + bufTwo.length, segment.getEndOffset());
readAndEnsureMatch(segment, writeStartOffset, bufTwo);
// overwrite using bufOne
segment.writeFrom(Channels.newChannel(new ByteBufferInputStream(ByteBuffer.wrap(bufOne))), writeStartOffset, bufOne.length);
// end offset should not have changed
assertEquals("End offset is not as expected", writeStartOffset + bufTwo.length, segment.getEndOffset());
readAndEnsureMatch(segment, writeStartOffset, bufOne);
readAndEnsureMatch(segment, writeStartOffset + bufOne.length, Arrays.copyOfRange(bufTwo, bufOne.length, bufTwo.length));
// write at random locations
for (int i = 0; i < 10; i++) {
long offset = writeStartOffset + Utils.getRandomLong(TestUtils.RANDOM, segment.sizeInBytes() - bufOne.length - writeStartOffset);
segment.writeFrom(Channels.newChannel(new ByteBufferInputStream(ByteBuffer.wrap(bufOne))), offset, bufOne.length);
readAndEnsureMatch(segment, offset, bufOne);
}
// try to overwrite using a channel that won't fit
ByteBuffer failBuf = ByteBuffer.wrap(TestUtils.getRandomBytes((int) (STANDARD_SEGMENT_SIZE - writeStartOffset + 1)));
long writeOverFlowCount = metrics.overflowWriteError.getCount();
try {
segment.writeFrom(Channels.newChannel(new ByteBufferInputStream(failBuf)), writeStartOffset, failBuf.remaining());
fail("WriteFrom should have failed because data won't fit");
} catch (IndexOutOfBoundsException e) {
assertEquals("Write overflow should have been reported", writeOverFlowCount + 1, metrics.overflowWriteError.getCount());
assertEquals("Position of buffer has changed", 0, failBuf.position());
}
// data cannot be written at invalid offsets.
long[] invalidOffsets = { writeStartOffset - 1, STANDARD_SEGMENT_SIZE, STANDARD_SEGMENT_SIZE + 1 };
ByteBuffer buffer = ByteBuffer.wrap(TestUtils.getRandomBytes(1));
for (long invalidOffset : invalidOffsets) {
try {
segment.writeFrom(Channels.newChannel(new ByteBufferInputStream(buffer)), invalidOffset, buffer.remaining());
fail("WriteFrom should have failed because offset provided for write is invalid");
} catch (IndexOutOfBoundsException e) {
assertEquals("Position of buffer has changed", 0, buffer.position());
}
}
segment.close(false);
// ensure that writeFrom fails.
try {
segment.writeFrom(Channels.newChannel(new ByteBufferInputStream(buffer)), writeStartOffset, buffer.remaining());
fail("WriteFrom should have failed because segments are closed");
} catch (ClosedChannelException e) {
assertEquals("Position of buffer has changed", 0, buffer.position());
}
} finally {
closeSegmentAndDeleteFile(segment);
}
}
use of com.github.ambry.utils.ByteBufferInputStream in project ambry by linkedin.
the class OffsetTest method offsetBadInputTest.
/**
* Tests the constructor and {@link Offset#fromBytes(DataInputStream)} function with bad input.
* @throws IOException
*/
@Test
public void offsetBadInputTest() throws IOException {
doBadOffsetInputTest(null, 10);
doBadOffsetInputTest(LogSegmentName.fromString("1_11"), -1);
Offset offset = new Offset(LogSegmentName.fromString("1_11"), 10);
byte[] serialized = offset.toBytes();
// mess with a version byte
serialized[0] = serialized[0] == (byte) 1 ? (byte) 2 : (byte) 1;
try {
Offset.fromBytes(new DataInputStream(new ByteBufferInputStream(ByteBuffer.wrap(serialized))));
fail("Version check should have failed");
} catch (IllegalArgumentException e) {
// expected.
}
}
use of com.github.ambry.utils.ByteBufferInputStream in project ambry by linkedin.
the class OffsetTest method offsetSerDeTest.
/**
* Tests serialization and deserialization of an {@link Offset} class.
* @throws IOException
*/
@Test
public void offsetSerDeTest() throws IOException {
long pos = Utils.getRandomLong(TestUtils.RANDOM, 1000);
long gen = Utils.getRandomLong(TestUtils.RANDOM, 1000);
LogSegmentName name = LogSegmentName.fromPositionAndGeneration(pos, gen);
long offset = Utils.getRandomLong(new Random(), Long.MAX_VALUE);
Offset logOffset = new Offset(name, offset);
byte[] serialized = logOffset.toBytes();
Offset deserializedOffset = Offset.fromBytes(new DataInputStream(new ByteBufferInputStream(ByteBuffer.wrap(serialized))));
assertEquals("Original offset 'name' does not match with the deserialized offset", name, deserializedOffset.getName());
assertEquals("Original offset 'offset' does not match with the deserialized offset", offset, deserializedOffset.getOffset());
// equals test
assertEquals("Original offset does not match with the deserialized offset", logOffset, deserializedOffset);
// hashcode test
assertEquals("Hashcode doesn't match", logOffset.hashCode(), deserializedOffset.hashCode());
}
Aggregations