use of org.iq80.leveldb.WriteBatch in project cdap by caskdata.
the class LevelDBMessageTable method persist.
@Override
protected void persist(Iterator<RawMessageTableEntry> entries) throws IOException {
try (WriteBatch writeBatch = levelDB.createWriteBatch()) {
while (entries.hasNext()) {
RawMessageTableEntry entry = entries.next();
byte[] rowKey = entry.getKey();
// LevelDB doesn't make copies, and since we reuse RawMessageTableEntry object, we need to create copies.
writeBatch.put(Arrays.copyOf(rowKey, rowKey.length), encodeValue(entry.getTxPtr(), entry.getPayload()));
}
levelDB.write(writeBatch, WRITE_OPTIONS);
} catch (DBException ex) {
throw new IOException(ex);
}
}
use of org.iq80.leveldb.WriteBatch in project cdap by caskdata.
the class LevelDBPayloadTable method persist.
@Override
public void persist(Iterator<RawPayloadTableEntry> entries) throws IOException {
try (WriteBatch writeBatch = levelDB.createWriteBatch()) {
while (entries.hasNext()) {
RawPayloadTableEntry entry = entries.next();
byte[] key = entry.getKey();
byte[] value = entry.getValue();
// LevelDB doesn't make copies, and since we reuse RawPayloadTableEntry object, we need to create copies.
writeBatch.put(Arrays.copyOf(key, key.length), Arrays.copyOf(value, value.length));
}
levelDB.write(writeBatch, WRITE_OPTIONS);
} catch (DBException ex) {
throw new IOException(ex);
}
}
use of org.iq80.leveldb.WriteBatch in project jesos by groupon.
the class JLevelDBState method store.
@Override
public Future<Variable> store(final Variable variable) {
checkNotNull(variable, "variable is null");
checkState(!closed.get(), "already closed");
checkState(variable instanceof JVariable, "can not process native variable, use JVariable");
final JVariable v = (JVariable) variable;
return executor.submit(new Callable<Variable>() {
@Override
public Variable call() throws Exception {
final WriteOptions writeOptions = new WriteOptions();
writeOptions.sync(true);
final String internedName = v.getName().intern();
synchronized (internedName) {
final JVariable current = load(internedName);
if (current == null || current.getUuid().equals(v.getUuid())) {
final JVariable update = new JVariable(internedName, v.value());
final WriteBatch writeBatch = db.createWriteBatch();
writeBatch.delete(bytes(internedName));
writeBatch.put(bytes(internedName), update.getEntry().toByteArray());
db.write(writeBatch, writeOptions);
return update;
} else {
return null;
}
}
}
});
}
use of org.iq80.leveldb.WriteBatch in project camel by apache.
the class LevelDBAggregationRepository method remove.
public void remove(final CamelContext camelContext, final String key, final Exchange exchange) {
LOG.debug("Removing key [{}]", key);
try {
byte[] lDbKey = keyBuilder(repositoryName, key);
final String exchangeId = exchange.getExchangeId();
final Buffer exchangeBuffer = codec.marshallExchange(camelContext, exchange, allowSerializedHeaders);
// remove the exchange
byte[] rc = levelDBFile.getDb().get(lDbKey);
if (rc != null) {
WriteBatch batch = levelDBFile.getDb().createWriteBatch();
try {
batch.delete(lDbKey);
LOG.trace("Removed key index {} -> {}", key, new Buffer(rc));
// add exchange to confirmed index
byte[] confirmedLDBKey = keyBuilder(getRepositoryNameCompleted(), exchangeId);
batch.put(confirmedLDBKey, exchangeBuffer.toByteArray());
LOG.trace("Added confirm index {} for repository {}", exchangeId, getRepositoryNameCompleted());
levelDBFile.getDb().write(batch, levelDBFile.getWriteOptions());
} finally {
batch.close();
}
}
} catch (IOException e) {
throw new RuntimeException("Error removing key " + key + " from repository " + repositoryName, e);
}
}
use of org.iq80.leveldb.WriteBatch in project hadoop by apache.
the class LeveldbTimelineStateStore method storeToken.
@Override
public void storeToken(TimelineDelegationTokenIdentifier tokenId, Long renewDate) throws IOException {
DataOutputStream ds = null;
WriteBatch batch = null;
try {
byte[] k = createTokenEntryKey(tokenId.getSequenceNumber());
if (db.get(k) != null) {
throw new IOException(tokenId + " already exists");
}
byte[] v = buildTokenData(tokenId, renewDate);
ByteArrayOutputStream bs = new ByteArrayOutputStream();
ds = new DataOutputStream(bs);
ds.writeInt(tokenId.getSequenceNumber());
batch = db.createWriteBatch();
batch.put(k, v);
batch.put(LATEST_SEQUENCE_NUMBER_KEY, bs.toByteArray());
db.write(batch);
} catch (DBException e) {
throw new IOException(e);
} finally {
IOUtils.cleanup(LOG, ds);
IOUtils.cleanup(LOG, batch);
}
}
Aggregations