use of io.pravega.common.Timer in project pravega by pravega.
the class EventStreamReaderImpl method readNextEvent.
@Override
public EventRead<Type> readNextEvent(long timeout) throws ReinitializationRequiredException, TruncatedDataException {
synchronized (readers) {
Preconditions.checkState(!closed, "Reader is closed");
long waitTime = Math.min(timeout, ReaderGroupStateManager.TIME_UNIT.toMillis());
Timer timer = new Timer();
Segment segment = null;
long offset = -1;
ByteBuffer buffer;
do {
String checkpoint = updateGroupStateIfNeeded();
if (checkpoint != null) {
return createEmptyEvent(checkpoint);
}
SegmentInputStream segmentReader = orderer.nextSegment(readers);
if (segmentReader == null) {
Exceptions.handleInterrupted(() -> Thread.sleep(waitTime));
buffer = null;
} else {
segment = segmentReader.getSegmentId();
offset = segmentReader.getOffset();
try {
buffer = segmentReader.read(waitTime);
} catch (EndOfSegmentException e) {
handleEndOfSegment(segmentReader);
buffer = null;
} catch (SegmentTruncatedException e) {
handleSegmentTruncated(segmentReader);
buffer = null;
}
}
} while (buffer == null && timer.getElapsedMillis() < timeout);
if (buffer == null) {
return createEmptyEvent(null);
}
lastRead = Sequence.create(segment.getSegmentNumber(), offset);
int length = buffer.remaining() + WireCommands.TYPE_PLUS_LENGTH_SIZE;
return new EventReadImpl<>(lastRead, deserializer.deserialize(buffer), getPosition(), new EventPointerImpl(segment, offset, length), null);
}
}
use of io.pravega.common.Timer in project pravega by pravega.
the class BookKeeperLog method append.
@Override
public CompletableFuture<LogAddress> append(ArrayView data, Duration timeout) {
ensurePreconditions();
long traceId = LoggerHelpers.traceEnterWithContext(log, this.traceObjectId, "append", data.getLength());
if (data.getLength() > getMaxAppendLength()) {
return Futures.failedFuture(new WriteTooLongException(data.getLength(), getMaxAppendLength()));
}
Timer timer = new Timer();
// Queue up the write.
CompletableFuture<LogAddress> result = new CompletableFuture<>();
this.writes.add(new Write(data, getWriteLedger(), result));
// Trigger Write Processor.
this.writeProcessor.runAsync();
// Post append tasks. We do not need to wait for these to happen before returning the call.
result.whenCompleteAsync((address, ex) -> {
if (ex != null) {
handleWriteException(ex);
} else {
// Update metrics and take care of other logging tasks.
this.metrics.writeCompleted(timer.getElapsed());
LoggerHelpers.traceLeave(log, this.traceObjectId, "append", traceId, data.getLength(), address);
}
}, this.executorService);
return result;
}
use of io.pravega.common.Timer in project pravega by pravega.
the class FileSystemStorage method doWrite.
private Void doWrite(SegmentHandle handle, long offset, InputStream data, int length) throws Exception {
long traceId = LoggerHelpers.traceEnter(log, "write", handle.getSegmentName(), offset, length);
Timer timer = new Timer();
if (handle.isReadOnly()) {
throw new IllegalArgumentException("Write called on a readonly handle of segment " + handle.getSegmentName());
}
Path path = Paths.get(config.getRoot(), handle.getSegmentName());
// This means that writes to readonly files also succeed. We need to explicitly check permissions in this case.
if (!isWritableFile(path)) {
throw new StreamSegmentSealedException(handle.getSegmentName());
}
long fileSize = path.toFile().length();
if (fileSize < offset) {
throw new BadOffsetException(handle.getSegmentName(), fileSize, offset);
} else {
long totalBytesWritten = 0;
try (FileChannel channel = FileChannel.open(path, StandardOpenOption.WRITE)) {
// Wrap the input data into a ReadableByteChannel, but do not close it. Doing so will result in closing
// the underlying InputStream, which is not desirable if it is to be reused.
ReadableByteChannel sourceChannel = Channels.newChannel(data);
while (length != 0) {
long bytesWritten = channel.transferFrom(sourceChannel, offset, length);
assert bytesWritten > 0 : "Unable to make any progress transferring data.";
offset += bytesWritten;
totalBytesWritten += bytesWritten;
length -= bytesWritten;
}
}
FileSystemMetrics.WRITE_LATENCY.reportSuccessEvent(timer.getElapsed());
FileSystemMetrics.WRITE_BYTES.add(totalBytesWritten);
LoggerHelpers.traceLeave(log, "write", traceId);
return null;
}
}
use of io.pravega.common.Timer in project pravega by pravega.
the class FileSystemStorage method doRead.
private int doRead(SegmentHandle handle, long offset, byte[] buffer, int bufferOffset, int length) throws IOException {
long traceId = LoggerHelpers.traceEnter(log, "read", handle.getSegmentName(), offset, bufferOffset, length);
Timer timer = new Timer();
Path path = Paths.get(config.getRoot(), handle.getSegmentName());
long fileSize = Files.size(path);
if (fileSize < offset) {
throw new IllegalArgumentException(String.format("Reading at offset (%d) which is beyond the " + "current size of segment (%d).", offset, fileSize));
}
try (FileChannel channel = FileChannel.open(path, StandardOpenOption.READ)) {
int totalBytesRead = 0;
do {
ByteBuffer readBuffer = ByteBuffer.wrap(buffer, bufferOffset, length);
int bytesRead = channel.read(readBuffer, offset);
bufferOffset += bytesRead;
totalBytesRead += bytesRead;
length -= bytesRead;
} while (length != 0);
FileSystemMetrics.READ_LATENCY.reportSuccessEvent(timer.getElapsed());
FileSystemMetrics.READ_BYTES.add(totalBytesRead);
LoggerHelpers.traceLeave(log, "read", traceId, totalBytesRead);
return totalBytesRead;
}
}
use of io.pravega.common.Timer in project pravega by pravega.
the class AppendProcessor method performNextWrite.
/**
* If there isn't already an append outstanding against the store, write a new one.
* Appends are opportunistically batched here. i.e. If many are waiting they are combined into a single append and
* that is written.
*/
private void performNextWrite() {
Append append = getNextAppend();
if (append == null) {
return;
}
long traceId = LoggerHelpers.traceEnter(log, "storeAppend", append);
Timer timer = new Timer();
storeAppend(append).whenComplete((v, e) -> {
handleAppendResult(append, e);
LoggerHelpers.traceLeave(log, "storeAppend", traceId, v, e);
if (e == null) {
WRITE_STREAM_SEGMENT.reportSuccessEvent(timer.getElapsed());
} else {
WRITE_STREAM_SEGMENT.reportFailEvent(timer.getElapsed());
}
}).whenComplete((v, e) -> append.getData().release());
}
Aggregations