use of org.apache.cassandra.io.FSWriteError in project cassandra by apache.
the class ColumnFamilyStore method writeSnapshotManifest.
private void writeSnapshotManifest(final JSONArray filesJSONArr, final String snapshotName) {
final File manifestFile = getDirectories().getSnapshotManifestFile(snapshotName);
try {
if (!manifestFile.getParentFile().exists())
manifestFile.getParentFile().mkdirs();
try (PrintStream out = new PrintStream(manifestFile)) {
final JSONObject manifestJSON = new JSONObject();
manifestJSON.put("files", filesJSONArr);
out.println(manifestJSON.toJSONString());
}
} catch (IOException e) {
throw new FSWriteError(e, manifestFile);
}
}
use of org.apache.cassandra.io.FSWriteError in project cassandra by apache.
the class OnDiskIndexBuilder method finish.
@SuppressWarnings("resource")
protected void finish(Descriptor descriptor, Pair<ByteBuffer, ByteBuffer> range, File file, TermIterator terms) {
SequentialWriter out = null;
try {
out = new SequentialWriter(file, WRITER_OPTION);
out.writeUTF(descriptor.version.toString());
out.writeShort(termSize.size);
// min, max term (useful to find initial scan range from search expressions)
ByteBufferUtil.writeWithShortLength(terms.minTerm(), out);
ByteBufferUtil.writeWithShortLength(terms.maxTerm(), out);
// min, max keys covered by index (useful when searching across multiple indexes)
ByteBufferUtil.writeWithShortLength(range.left, out);
ByteBufferUtil.writeWithShortLength(range.right, out);
out.writeUTF(mode.toString());
out.writeBoolean(marksPartials);
out.skipBytes((int) (BLOCK_SIZE - out.position()));
dataLevel = mode == Mode.SPARSE ? new DataBuilderLevel(out, new MutableDataBlock(termComparator, mode)) : new MutableLevel<>(out, new MutableDataBlock(termComparator, mode));
while (terms.hasNext()) {
Pair<IndexedTerm, TokenTreeBuilder> term = terms.next();
addTerm(new InMemoryDataTerm(term.left, term.right), out);
}
dataLevel.finalFlush();
for (MutableLevel l : levels) // flush all of the buffers
l.flush();
// and finally write levels index
final long levelIndexPosition = out.position();
out.writeInt(levels.size());
for (int i = levels.size() - 1; i >= 0; i--) levels.get(i).flushMetadata();
dataLevel.flushMetadata();
out.writeLong(levelIndexPosition);
// sync contents of the output and disk,
// since it's not done implicitly on close
out.sync();
} catch (IOException e) {
throw new FSWriteError(e, file);
} finally {
FileUtils.closeQuietly(out);
}
}
use of org.apache.cassandra.io.FSWriteError in project cassandra by apache.
the class HintsWriteExecutor method flushInternal.
// writer not closed here
@SuppressWarnings("resource")
private void flushInternal(Iterator<ByteBuffer> iterator, HintsStore store) {
long maxHintsFileSize = DatabaseDescriptor.getMaxHintsFileSize();
HintsWriter writer = store.getOrOpenWriter();
try (HintsWriter.Session session = writer.newSession(writeBuffer)) {
while (iterator.hasNext()) {
session.append(iterator.next());
if (session.position() >= maxHintsFileSize)
break;
}
} catch (IOException e) {
throw new FSWriteError(e, writer.descriptor().fileName());
}
}
Aggregations