use of herddb.utils.ExtendedDataOutputStream in project herddb by diennea.
the class BookKeeperDataStorageManager method writeTransactionsAtCheckpoint.
@Override
public Collection<PostCheckpointAction> writeTransactionsAtCheckpoint(String tableSpace, LogSequenceNumber sequenceNumber, Collection<Transaction> transactions) throws DataStorageManagerException {
if (sequenceNumber.isStartOfTime() && !transactions.isEmpty()) {
throw new DataStorageManagerException("impossible to write a non empty transactions list at start-of-time");
}
String checkPointFile = getTablespaceTransactionsFile(tableSpace, sequenceNumber);
LOGGER.log(Level.FINE, "writeTransactionsAtCheckpoint for tableSpace {0} sequenceNumber {1} to {2}, active transactions {3}", new Object[] { tableSpace, sequenceNumber, checkPointFile, transactions.size() });
try (VisibleByteArrayOutputStream buffer = new VisibleByteArrayOutputStream();
ExtendedDataOutputStream dout = new ExtendedDataOutputStream(buffer)) {
// version
dout.writeVLong(1);
// flags for future implementations
dout.writeVLong(0);
dout.writeUTF(tableSpace);
dout.writeZLong(sequenceNumber.ledgerId);
dout.writeZLong(sequenceNumber.offset);
dout.writeInt(transactions.size());
for (Transaction t : transactions) {
t.serialize(dout);
}
dout.flush();
writeZNodeEnforceOwnership(tableSpace, checkPointFile, buffer.toByteArray(), null);
} catch (IOException err) {
throw new DataStorageManagerException(err);
}
Collection<PostCheckpointAction> result = new ArrayList<>();
String tableSpaceDirectory = getTableSpaceZNode(tableSpace);
List<String> stream = zkGetChildren(tableSpaceDirectory);
for (String p : stream) {
if (isTransactionsFile(p)) {
try {
byte[] content = readZNode(checkPointFile, new Stat());
if (content != null) {
LogSequenceNumber logPositionInFile = readLogSequenceNumberFromTransactionsFile(tableSpace, content, p);
if (sequenceNumber.after(logPositionInFile)) {
LOGGER.log(Level.FINEST, "transactions metadata file " + p + ". will be deleted after checkpoint end");
result.add(new DeleteZNodeAction(tableSpace, "transactions", "delete transactions file " + p, p));
}
}
} catch (DataStorageManagerException ignore) {
LOGGER.log(Level.SEVERE, "Unparsable transactions file " + p, ignore);
result.add(new DeleteZNodeAction(tableSpace, "transactions", "delete unparsable transactions file " + p, p));
}
}
}
return result;
}
use of herddb.utils.ExtendedDataOutputStream in project herddb by diennea.
the class BookKeeperDataStorageManager method writePage.
/**
* Write a record page
*
* @param newPage data to write
* @param file managed file used for sync operations
* @param stream output stream related to given managed file for write
* operations
* @return
* @throws IOException
*/
private static long writePage(Collection<Record> newPage, VisibleByteArrayOutputStream oo) throws IOException {
try (ExtendedDataOutputStream dataOutput = new ExtendedDataOutputStream(oo)) {
// version
dataOutput.writeVLong(1);
// flags for future implementations
dataOutput.writeVLong(0);
dataOutput.writeInt(newPage.size());
for (Record record : newPage) {
dataOutput.writeArray(record.key);
dataOutput.writeArray(record.value);
}
dataOutput.flush();
long hash = XXHash64Utils.hash(oo.getBuffer(), 0, oo.size());
dataOutput.writeLong(hash);
dataOutput.flush();
return oo.size();
}
}
use of herddb.utils.ExtendedDataOutputStream in project herddb by diennea.
the class RecordSerializer method serializeIndexKey.
public static Bytes serializeIndexKey(DataAccessor record, ColumnsList index, String[] columns) {
String[] indexedColumnsList = index.getPrimaryKey();
if (indexedColumnsList.length == 1) {
String pkColumn = indexedColumnsList[0];
if (columns.length != 1 && !columns[0].equals(pkColumn)) {
throw new IllegalArgumentException("SQLTranslator error, " + Arrays.toString(columns) + " != " + Arrays.asList(pkColumn));
}
Column c = index.getColumn(pkColumn);
Object v = record.get(c.name);
if (v == null) {
if (index.allowNullsForIndexedValues()) {
return null;
}
throw new IllegalArgumentException("key field " + pkColumn + " cannot be null. Record data: " + record);
}
byte[] fieldValue = serialize(v, c.type);
return Bytes.from_array(fieldValue);
} else {
VisibleByteArrayOutputStream key = new VisibleByteArrayOutputStream(columns.length * Long.BYTES);
// beware that sometime we serialize only a part of the PK, for instance of a prefix index scan
try (ExtendedDataOutputStream doo_key = new ExtendedDataOutputStream(key)) {
int i = 0;
for (String indexedColumn : columns) {
if (!indexedColumn.equals(indexedColumnsList[i])) {
throw new IllegalArgumentException("SQLTranslator error, " + Arrays.toString(columns) + " != " + Arrays.asList(indexedColumnsList));
}
Column c = index.getColumn(indexedColumn);
Object v = record.get(c.name);
if (v == null) {
if (!index.allowNullsForIndexedValues()) {
throw new IllegalArgumentException("key field " + indexedColumn + " cannot be null. Record data: " + record);
}
if (i == 0) {
// if the first column is null than we do not index the record at all
return null;
} else {
// we stop serializing the value at the first null
return Bytes.from_array(key.getBuffer(), 0, key.size());
}
}
serializeTo(v, c.type, doo_key);
i++;
}
} catch (IOException err) {
throw new RuntimeException(err);
}
return Bytes.from_array(key.getBuffer(), 0, key.size());
}
}
use of herddb.utils.ExtendedDataOutputStream in project herddb by diennea.
the class RecordSerializer method serializeValueRaw.
public static byte[] serializeValueRaw(Map<String, Object> record, Table table, int expectedSize) {
VisibleByteArrayOutputStream value = new VisibleByteArrayOutputStream(expectedSize <= 0 ? INITIAL_BUFFER_SIZE : expectedSize);
try (ExtendedDataOutputStream doo = new ExtendedDataOutputStream(value)) {
for (Column c : table.columns) {
Object v = record.get(c.name);
if (v != null && !table.isPrimaryKeyColumn(c.name)) {
doo.writeVInt(c.serialPosition);
serializeTypeAndValue(v, c.type, doo);
}
}
} catch (IOException err) {
throw new RuntimeException(err);
}
return value.toByteArrayNoCopy();
}
use of herddb.utils.ExtendedDataOutputStream in project herddb by diennea.
the class RecordSerializer method serializePrimaryKeyRaw.
public static byte[] serializePrimaryKeyRaw(Map<String, Object> record, ColumnsList table, String[] columns) {
String[] primaryKey = table.getPrimaryKey();
if (primaryKey.length == 1) {
String pkColumn = primaryKey[0];
if (columns.length != 1 && !columns[0].equals(pkColumn)) {
throw new IllegalArgumentException("SQLTranslator error, " + Arrays.toString(columns) + " != " + Arrays.asList(pkColumn));
}
Column c = table.getColumn(pkColumn);
Object v = record.get(c.name);
if (v == null) {
throw new IllegalArgumentException("key field " + pkColumn + " cannot be null. Record data: " + record);
}
return serialize(v, c.type);
} else {
VisibleByteArrayOutputStream key = new VisibleByteArrayOutputStream(columns.length * Long.BYTES);
// beware that we can serialize even only a part of the PK, for instance of a prefix index scan
try (ExtendedDataOutputStream doo_key = new ExtendedDataOutputStream(key)) {
int i = 0;
for (String pkColumn : columns) {
if (!pkColumn.equals(primaryKey[i])) {
throw new IllegalArgumentException("SQLTranslator error, " + Arrays.toString(columns) + " != " + Arrays.asList(primaryKey));
}
Column c = table.getColumn(pkColumn);
Object v = record.get(c.name);
if (v == null) {
throw new IllegalArgumentException("key field " + pkColumn + " cannot be null. Record data: " + record);
}
serializeTo(v, c.type, doo_key);
i++;
}
} catch (IOException err) {
throw new RuntimeException(err);
}
return key.toByteArrayNoCopy();
}
}
Aggregations