use of org.apache.cassandra.utils.concurrent.SharedCloseableImpl in project cassandra by apache.
the class BigTableWriter method openFinal.
@SuppressWarnings("resource")
private SSTableReader openFinal(SSTableReader.OpenReason openReason) {
if (maxDataAge < 0)
maxDataAge = currentTimeMillis();
IndexSummary indexSummary = null;
FileHandle ifile = null;
FileHandle dfile = null;
SSTableReader sstable = null;
try {
StatsMetadata stats = statsMetadata();
// finalize in-memory state for the reader
indexSummary = iwriter.summary.build(metadata().partitioner);
long indexFileLength = new File(descriptor.filenameFor(Component.PRIMARY_INDEX)).length();
int dataBufferSize = optimizationStrategy.bufferSize(stats.estimatedPartitionSize.percentile(DatabaseDescriptor.getDiskOptimizationEstimatePercentile()));
int indexBufferSize = optimizationStrategy.bufferSize(indexFileLength / indexSummary.size());
ifile = iwriter.builder.bufferSize(indexBufferSize).complete();
if (compression)
dbuilder.withCompressionMetadata(((CompressedSequentialWriter) dataFile).open(0));
dfile = dbuilder.bufferSize(dataBufferSize).complete();
invalidateCacheAtBoundary(dfile);
sstable = SSTableReader.internalOpen(descriptor, components, metadata, ifile, dfile, indexSummary, iwriter.bf.sharedCopy(), maxDataAge, stats, openReason, header);
sstable.first = getMinimalKey(first);
sstable.last = getMinimalKey(last);
return sstable;
} catch (Throwable t) {
JVMStabilityInspector.inspectThrowable(t);
// If we successfully created our sstable, we can rely on its InstanceTidier to clean things up for us
if (sstable != null)
sstable.selfRef().release();
else
Stream.of(indexSummary, ifile, dfile).filter(Objects::nonNull).forEach(SharedCloseableImpl::close);
throw t;
}
}
use of org.apache.cassandra.utils.concurrent.SharedCloseableImpl in project cassandra by apache.
the class BigTableWriter method openEarly.
@SuppressWarnings("resource")
public SSTableReader openEarly() {
// find the max (exclusive) readable key
IndexSummaryBuilder.ReadableBoundary boundary = iwriter.getMaxReadable();
if (boundary == null)
return null;
IndexSummary indexSummary = null;
FileHandle ifile = null;
FileHandle dfile = null;
SSTableReader sstable = null;
try {
StatsMetadata stats = statsMetadata();
assert boundary.indexLength > 0 && boundary.dataLength > 0;
// open the reader early
indexSummary = iwriter.summary.build(metadata().partitioner, boundary);
long indexFileLength = new File(descriptor.filenameFor(Component.PRIMARY_INDEX)).length();
int indexBufferSize = optimizationStrategy.bufferSize(indexFileLength / indexSummary.size());
ifile = iwriter.builder.bufferSize(indexBufferSize).complete(boundary.indexLength);
if (compression)
dbuilder.withCompressionMetadata(((CompressedSequentialWriter) dataFile).open(boundary.dataLength));
int dataBufferSize = optimizationStrategy.bufferSize(stats.estimatedPartitionSize.percentile(DatabaseDescriptor.getDiskOptimizationEstimatePercentile()));
dfile = dbuilder.bufferSize(dataBufferSize).complete(boundary.dataLength);
invalidateCacheAtBoundary(dfile);
sstable = SSTableReader.internalOpen(descriptor, components, metadata, ifile, dfile, indexSummary, iwriter.bf.sharedCopy(), maxDataAge, stats, SSTableReader.OpenReason.EARLY, header);
// now it's open, find the ACTUAL last readable key (i.e. for which the data file has also been flushed)
sstable.first = getMinimalKey(first);
sstable.last = getMinimalKey(boundary.lastKey);
return sstable;
} catch (Throwable t) {
JVMStabilityInspector.inspectThrowable(t);
// If we successfully created our sstable, we can rely on its InstanceTidier to clean things up for us
if (sstable != null)
sstable.selfRef().release();
else
Stream.of(indexSummary, ifile, dfile).filter(Objects::nonNull).forEach(SharedCloseableImpl::close);
throw t;
}
}
Aggregations