use of com.orientechnologies.orient.core.storage.impl.local.paginated.OPaginatedCluster in project orientdb by orientechnologies.
the class OConsoleDatabaseApp method displayRawRecord.
@ConsoleCommand(description = "Display a record as raw bytes", onlineHelp = "Console-Command-Display-Raw-Record")
public void displayRawRecord(@ConsoleParameter(name = "rid", description = "The record id to display") final String iRecordId) throws IOException {
checkForDatabase();
ORecordId rid;
if (iRecordId.indexOf(':') > -1)
rid = new ORecordId(iRecordId);
else {
OIdentifiable rec = setCurrentRecord(Integer.parseInt(iRecordId));
if (rec != null)
rid = (ORecordId) rec.getIdentity();
else
return;
}
ORawBuffer record;
ORecordId id = new ORecordId(rid);
if (!(currentDatabase.getStorage() instanceof OLocalPaginatedStorage)) {
record = currentDatabase.getStorage().readRecord(rid, null, false, false, null).getResult();
if (record != null) {
String content;
if (Integer.parseInt(properties.get("maxBinaryDisplay")) < record.buffer.length)
content = new String(Arrays.copyOf(record.buffer, Integer.parseInt(properties.get("maxBinaryDisplay"))));
else
content = new String(record.buffer);
out.println("\nRaw record content. The size is " + record.buffer.length + " bytes, while settings force to print first " + content.length() + " bytes:\n\n" + content);
}
} else {
final OLocalPaginatedStorage storage = (OLocalPaginatedStorage) currentDatabase.getStorage();
final OPaginatedCluster cluster = (OPaginatedCluster) storage.getClusterById(id.getClusterId());
if (cluster == null) {
message("\n cluster with id %i does not exist", id.getClusterId());
return;
}
message("\n\nLOW LEVEL CLUSTER INFO");
final OPaginatedCluster.RECORD_STATUS status = cluster.getRecordStatus(id.getClusterPosition());
message("\n status: %s", status);
final OPaginatedClusterDebug debugInfo = cluster.readDebug(id.getClusterPosition());
message("\n cluster fieldId: %d", debugInfo.fileId);
message("\n cluster name: %s", cluster.getName());
message("\n in cluster position: %d", debugInfo.clusterPosition);
message("\n empty: %b", debugInfo.empty);
message("\n contentSize: %d", debugInfo.contentSize);
message("\n n-pages: %d", debugInfo.pages.size());
message("\n\n +----------PAGE_ID---------------+------IN_PAGE_POSITION----------+---------IN_PAGE_SIZE-----------+----PAGE_CONTENT---->> ");
for (OClusterPageDebug page : debugInfo.pages) {
message("\n |%30d ", page.pageIndex);
message(" |%30d ", page.inPagePosition);
message(" |%30d ", page.inPageSize);
message(" |%s", OBase64Utils.encodeBytes(page.content));
}
record = cluster.readRecord(id.getClusterPosition(), false);
}
if (record == null)
throw new OSystemException("The record has been deleted");
if ("ORecordSerializerBinary".equals(currentDatabase.getSerializer().toString())) {
byte[] buff = record.getBuffer();
ORecordSerializerBinaryDebug debugger = new ORecordSerializerBinaryDebug();
ORecordSerializationDebug deserializeDebug = debugger.deserializeDebug(buff, currentDatabase);
message("\n\nRECORD CONTENT INFO");
message("\n class name: %s", deserializeDebug.className);
message("\n fail on Reading: %b", deserializeDebug.readingFailure);
message("\n fail position: %d", deserializeDebug.failPosition);
if (deserializeDebug.readingException != null) {
StringWriter writer = new StringWriter();
deserializeDebug.readingException.printStackTrace(new PrintWriter(writer));
message("\n Exception On Reading: %s", writer.getBuffer().toString());
}
message("\n number of properties : %d", deserializeDebug.properties.size());
message("\n\n PROPERTIES");
for (ORecordSerializationDebugProperty prop : deserializeDebug.properties) {
message("\n property name: %s", prop.name);
message("\n property type: %s", prop.type.name());
message("\n property globalId: %d", prop.globalId);
message("\n fail on reading: %b", prop.faildToRead);
if (prop.faildToRead) {
message("\n failed on reading position: %b", prop.failPosition);
StringWriter writer = new StringWriter();
prop.readingException.printStackTrace(new PrintWriter(writer));
message("\n Exception on reading: %s", writer.getBuffer().toString());
} else {
if (prop.value instanceof ORidBag) {
message("\n property value: ORidBug ");
((ORidBag) prop.value).debugPrint(System.out);
} else
message("\n property value: %s", prop.value != null ? prop.value.toString() : "null");
}
message("\n");
}
}
}
use of com.orientechnologies.orient.core.storage.impl.local.paginated.OPaginatedCluster in project orientdb by orientechnologies.
the class OIncrementalServerSync method importDelta.
/**
* Deleted records are written in output stream first, then created/updated records. All records are sorted by record id.
* <p>
* Each record in output stream is written using following format:
* <ol>
* <li>Record's cluster id - 4 bytes</li>
* <li>Record's cluster position - 8 bytes</li>
* <li>Delete flag, 1 if record is deleted - 1 byte</li>
* <li>Record version , only if record is not deleted - 4 bytes</li>
* <li>Record type, only if record is not deleted - 1 byte</li>
* <li>Length of binary presentation of record, only if record is not deleted - 4 bytes</li>
* <li>Binary presentation of the record, only if record is not deleted - length of content is provided in above entity</li>
* </ol>
*/
public void importDelta(final OServer serverInstance, final ODatabaseDocumentInternal db, final FileInputStream in, final String iNode) throws IOException {
final String nodeName = serverInstance.getDistributedManager().getLocalNodeName();
try {
serverInstance.openDatabase(db);
OScenarioThreadLocal.executeAsDistributed(new Callable<Object>() {
@Override
public Object call() throws Exception {
db.activateOnCurrentThread();
long totalRecords = 0;
long totalCreated = 0;
long totalUpdated = 0;
long totalDeleted = 0;
long totalHoles = 0;
long totalSkipped = 0;
ODistributedServerLog.info(this, nodeName, iNode, DIRECTION.IN, "Started import of delta for database '" + db.getName() + "'");
long lastLap = System.currentTimeMillis();
// final GZIPInputStream gzipInput = new GZIPInputStream(in);
try {
final DataInputStream input = new DataInputStream(in);
try {
final long records = input.readLong();
for (long i = 0; i < records; ++i) {
final int clusterId = input.readInt();
final long clusterPos = input.readLong();
final boolean deleted = input.readBoolean();
final ORecordId rid = new ORecordId(clusterId, clusterPos);
totalRecords++;
final OPaginatedCluster cluster = (OPaginatedCluster) db.getStorage().getUnderlying().getClusterById(rid.getClusterId());
final OPaginatedCluster.RECORD_STATUS recordStatus = cluster.getRecordStatus(rid.getClusterPosition());
ORecord newRecord = null;
if (deleted) {
ODistributedServerLog.debug(this, nodeName, iNode, DIRECTION.IN, "DELTA <- deleting %s", rid);
switch(recordStatus) {
case REMOVED:
// SKIP IT
totalSkipped++;
continue;
case ALLOCATED:
case PRESENT:
// DELETE IT
db.delete(rid);
break;
case NOT_EXISTENT:
totalSkipped++;
break;
}
totalDeleted++;
} else {
final int recordVersion = input.readInt();
final int recordType = input.readByte();
final int recordSize = input.readInt();
final byte[] recordContent = new byte[recordSize];
input.read(recordContent);
switch(recordStatus) {
case REMOVED:
// SKIP IT
totalSkipped++;
continue;
case ALLOCATED:
case PRESENT:
// UPDATE IT
newRecord = Orient.instance().getRecordFactoryManager().newInstance((byte) recordType);
ORecordInternal.fill(newRecord, rid, ORecordVersionHelper.setRollbackMode(recordVersion), recordContent, true);
final ORecord loadedRecord = rid.getRecord();
if (loadedRecord instanceof ODocument) {
// APPLY CHANGES FIELD BY FIELD TO MARK DIRTY FIELDS FOR INDEXES/HOOKS
ODocument loadedDocument = (ODocument) loadedRecord;
loadedDocument.merge((ODocument) newRecord, false, false);
ORecordInternal.setVersion(loadedRecord, ORecordVersionHelper.setRollbackMode(recordVersion));
loadedDocument.setDirty();
newRecord = loadedDocument;
}
// SAVE THE UPDATE RECORD
newRecord.save();
ODistributedServerLog.debug(this, nodeName, iNode, DIRECTION.IN, "DELTA <- updating rid=%s type=%d size=%d v=%d content=%s", rid, recordType, recordSize, recordVersion, newRecord);
totalUpdated++;
break;
case NOT_EXISTENT:
// CREATE AND DELETE RECORD IF NEEDED
do {
newRecord = Orient.instance().getRecordFactoryManager().newInstance((byte) recordType);
ORecordInternal.fill(newRecord, new ORecordId(rid.getClusterId(), -1), recordVersion, recordContent, true);
try {
newRecord.save();
} catch (ORecordNotFoundException e) {
ODistributedServerLog.info(this, nodeName, iNode, DIRECTION.IN, "DELTA <- error on saving record (not found) rid=%s type=%d size=%d v=%d content=%s", rid, recordType, recordSize, recordVersion, newRecord);
} catch (ORecordDuplicatedException e) {
ODistributedServerLog.info(this, nodeName, iNode, DIRECTION.IN, "DELTA <- error on saving record (duplicated %s) rid=%s type=%d size=%d v=%d content=%s", e.getRid(), rid, recordType, recordSize, recordVersion, newRecord);
// throw OException.wrapException(
// new ODistributedDatabaseDeltaSyncException("Error on delta sync: found duplicated record " + rid), e);
final ORecord duplicatedRecord = db.load(e.getRid(), null, true);
if (duplicatedRecord == null) {
// RECORD REMOVED: THE INDEX IS DIRTY, FIX THE DIRTY INDEX
final ODocument doc = (ODocument) newRecord;
final OIndex<?> index = db.getMetadata().getIndexManager().getIndex(e.getIndexName());
final List<String> fields = index.getDefinition().getFields();
final List<Object> values = new ArrayList<Object>(fields.size());
for (String f : fields) {
values.add(doc.field(f));
}
final Object keyValue = index.getDefinition().createValue(values);
index.remove(keyValue, e.getRid());
// RESAVE THE RECORD
newRecord.save();
} else
break;
}
if (newRecord.getIdentity().getClusterPosition() < clusterPos) {
// DELETE THE RECORD TO CREATE A HOLE
ODistributedServerLog.debug(this, nodeName, iNode, DIRECTION.IN, "DELTA <- creating hole rid=%s", newRecord.getIdentity());
newRecord.delete();
totalHoles++;
}
} while (newRecord.getIdentity().getClusterPosition() < clusterPos);
ODistributedServerLog.debug(this, nodeName, iNode, DIRECTION.IN, "DELTA <- creating rid=%s type=%d size=%d v=%d content=%s", rid, recordType, recordSize, recordVersion, newRecord);
totalCreated++;
break;
}
if (newRecord.getIdentity().isPersistent() && !newRecord.getIdentity().equals(rid))
throw new ODistributedDatabaseDeltaSyncException("Error on synchronization of records, rids are different: saved " + newRecord.getIdentity() + ", but it should be " + rid);
}
final long now = System.currentTimeMillis();
if (now - lastLap > 2000) {
// DUMP STATS EVERY SECOND
ODistributedServerLog.info(this, nodeName, iNode, DIRECTION.IN, "- %,d total entries: %,d created, %,d updated, %,d deleted, %,d holes, %,d skipped...", totalRecords, totalCreated, totalUpdated, totalDeleted, totalHoles, totalSkipped);
lastLap = now;
}
}
db.getMetadata().reload();
} finally {
input.close();
}
} catch (Exception e) {
ODistributedServerLog.error(this, nodeName, iNode, DIRECTION.IN, "Error on installing database delta '%s' on local server", e, db.getName());
throw OException.wrapException(new ODistributedException("Error on installing database delta '" + db.getName() + "' on local server"), e);
} finally {
// gzipInput.close();
}
ODistributedServerLog.info(this, nodeName, iNode, DIRECTION.IN, "Installed database delta for '%s'. %d total entries: %d created, %d updated, %d deleted, %d holes, %,d skipped", db.getName(), totalRecords, totalCreated, totalUpdated, totalDeleted, totalHoles, totalSkipped);
return null;
}
});
db.activateOnCurrentThread();
} catch (Exception e) {
// FORCE FULL DATABASE SYNC
ODistributedServerLog.error(this, nodeName, iNode, DIRECTION.IN, "Error while applying changes of database delta sync on '%s': forcing full database sync...", e, db.getName());
throw OException.wrapException(new ODistributedDatabaseDeltaSyncException("Error while applying changes of database delta sync on '" + db.getName() + "': forcing full database sync..."), e);
}
}
use of com.orientechnologies.orient.core.storage.impl.local.paginated.OPaginatedCluster in project orientdb by orientechnologies.
the class OCommandExecutorSQLHASyncCluster method replaceCluster.
public static Object replaceCluster(final ODistributedAbstractPlugin dManager, final OServer serverInstance, final String databaseName, final String clusterName) {
final ODistributedConfiguration cfg = dManager.getDatabaseConfiguration(databaseName);
final String dbPath = serverInstance.getDatabaseDirectory() + databaseName;
final String nodeName = dManager.getLocalNodeName();
final List<String> nodesWhereClusterIsCfg = cfg.getServers(clusterName, null);
nodesWhereClusterIsCfg.remove(nodeName);
if (nodesWhereClusterIsCfg.isEmpty())
throw new OCommandExecutionException("Cannot synchronize cluster '" + clusterName + "' because is not configured on any running nodes");
final OSyncClusterTask task = new OSyncClusterTask(clusterName);
final ODistributedResponse response = dManager.sendRequest(databaseName, null, nodesWhereClusterIsCfg, task, dManager.getNextMessageIdCounter(), ODistributedRequest.EXECUTION_MODE.RESPONSE, null, null, null);
final Map<String, Object> results = (Map<String, Object>) response.getPayload();
File tempFile = null;
FileOutputStream out = null;
try {
tempFile = new File(Orient.getTempPath() + "/backup_" + databaseName + "_" + clusterName + "_server" + dManager.getLocalNodeId() + "_toInstall.zip");
if (tempFile.exists())
tempFile.delete();
else
tempFile.getParentFile().mkdirs();
tempFile.createNewFile();
long fileSize = 0;
out = new FileOutputStream(tempFile, false);
for (Map.Entry<String, Object> r : results.entrySet()) {
final Object value = r.getValue();
if (value instanceof Boolean) {
continue;
} else if (value instanceof Throwable) {
ODistributedServerLog.error(null, nodeName, r.getKey(), ODistributedServerLog.DIRECTION.IN, "error on installing cluster %s in %s", (Exception) value, databaseName, dbPath);
} else if (value instanceof ODistributedDatabaseChunk) {
ODistributedDatabaseChunk chunk = (ODistributedDatabaseChunk) value;
// DELETE ANY PREVIOUS .COMPLETED FILE
final File completedFile = new File(tempFile.getAbsolutePath() + ".completed");
if (completedFile.exists())
completedFile.delete();
fileSize = writeDatabaseChunk(nodeName, 1, chunk, out);
for (int chunkNum = 2; !chunk.last; chunkNum++) {
final Object result = dManager.sendRequest(databaseName, null, OMultiValue.getSingletonList(r.getKey()), new OCopyDatabaseChunkTask(chunk.filePath, chunkNum, chunk.offset + chunk.buffer.length, false), dManager.getNextMessageIdCounter(), ODistributedRequest.EXECUTION_MODE.RESPONSE, null, null, null);
if (result instanceof Boolean)
continue;
else if (result instanceof Exception) {
ODistributedServerLog.error(null, nodeName, r.getKey(), ODistributedServerLog.DIRECTION.IN, "error on installing database %s in %s (chunk #%d)", (Exception) result, databaseName, dbPath, chunkNum);
} else if (result instanceof ODistributedDatabaseChunk) {
chunk = (ODistributedDatabaseChunk) result;
fileSize += writeDatabaseChunk(nodeName, chunkNum, chunk, out);
}
}
out.flush();
// CREATE THE .COMPLETED FILE TO SIGNAL EOF
new File(tempFile.getAbsolutePath() + ".completed").createNewFile();
}
}
final String tempDirectoryPath = Orient.getTempPath() + "/backup_" + databaseName + "_" + clusterName + "_toInstall";
final File tempDirectory = new File(tempDirectoryPath);
tempDirectory.mkdirs();
OZIPCompressionUtil.uncompressDirectory(new FileInputStream(tempFile), tempDirectory.getAbsolutePath(), null);
ODatabaseDocumentInternal db = ODatabaseRecordThreadLocal.INSTANCE.getIfDefined();
final boolean openDatabaseHere = db == null;
if (db == null)
db = serverInstance.openDatabase("plocal:" + dbPath, "", "", null, true);
try {
final OAbstractPaginatedStorage stg = (OAbstractPaginatedStorage) db.getStorage().getUnderlying();
// TODO: FREEZE COULD IT NOT NEEDED
stg.freeze(false);
try {
final OPaginatedCluster cluster = (OPaginatedCluster) stg.getClusterByName(clusterName);
final File tempClusterFile = new File(tempDirectoryPath + "/" + clusterName + OPaginatedCluster.DEF_EXTENSION);
cluster.replaceFile(tempClusterFile);
} finally {
stg.release();
}
db.getLocalCache().invalidate();
} finally {
if (openDatabaseHere)
db.close();
}
return String.format("Cluster correctly replaced, transferred %d bytes", fileSize);
} catch (Exception e) {
ODistributedServerLog.error(null, nodeName, null, ODistributedServerLog.DIRECTION.NONE, "error on transferring database '%s' to '%s'", e, databaseName, tempFile);
throw OException.wrapException(new ODistributedException("Error on transferring database"), e);
} finally {
try {
if (out != null) {
out.flush();
out.close();
}
} catch (IOException e) {
}
}
}
use of com.orientechnologies.orient.core.storage.impl.local.paginated.OPaginatedCluster in project orientdb by orientechnologies.
the class OCreateRecordTask method executeRecordTask.
@Override
public Object executeRecordTask(final ODistributedRequestId requestId, final OServer iServer, final ODistributedServerManager iManager, final ODatabaseDocumentInternal database) throws Exception {
if (ODistributedServerLog.isDebugEnabled())
ODistributedServerLog.debug(this, iManager.getLocalNodeName(), getNodeSource(), DIRECTION.IN, "Creating record %s/%s v.%d reqId=%s...", database.getName(), rid.toString(), version, requestId);
if (!rid.isPersistent())
throw new ODistributedException("Record " + rid + " has not been saved on owner node first (temporary rid)");
final OPaginatedCluster cluster = (OPaginatedCluster) ODatabaseRecordThreadLocal.INSTANCE.get().getStorage().getClusterById(rid.getClusterId());
final OPaginatedCluster.RECORD_STATUS recordStatus = cluster.getRecordStatus(rid.getClusterPosition());
if (ODistributedServerLog.isDebugEnabled())
ODistributedServerLog.debug(this, iManager.getLocalNodeName(), getNodeSource(), DIRECTION.IN, "Found record %s/%s status=%s reqId=%s...", database.getName(), rid.toString(), recordStatus, requestId);
switch(recordStatus) {
case REMOVED:
{
// RECYCLE THE RID AND OVERWRITE IT WITH THE NEW CONTENT
getRecord();
ODatabaseRecordThreadLocal.INSTANCE.get().recycle(record);
}
case ALLOCATED:
getRecord();
// FORCE CREATION
if (record.getVersion() < 0)
// INCREMENT THE VERSION IN CASE OF ROLLBACK
ORecordInternal.setVersion(record, record.getVersion() + 1);
record.save();
break;
case PRESENT:
{
getRecord();
record.save();
break;
}
case NOT_EXISTENT:
{
// try {
ORecordId newRid;
do {
getRecord();
if (clusterId > -1)
record.save(database.getClusterNameById(clusterId), true);
else if (rid.getClusterId() != -1)
record.save(database.getClusterNameById(rid.getClusterId()), true);
else
record.save();
newRid = (ORecordId) record.getIdentity();
if (newRid.getClusterPosition() >= rid.getClusterPosition())
break;
// CREATE AN HOLE
record.delete();
record = null;
} while (newRid.getClusterPosition() < rid.getClusterPosition());
if (!rid.equals(newRid)) {
ODistributedServerLog.warn(this, iManager.getLocalNodeName(), getNodeSource(), DIRECTION.IN, "Record %s has been saved with the RID %s instead of the expected %s reqId=%s", record, newRid, rid, requestId);
// DELETE THE INVALID RECORD FIRST
record.delete();
throw new ODistributedException("Record " + rid + " has been saved with the different RID " + newRid + " on server " + iManager.getLocalNodeName());
}
ODistributedServerLog.debug(this, iManager.getLocalNodeName(), getNodeSource(), DIRECTION.IN, "+-> assigning new rid %s/%s v.%d reqId=%s", database.getName(), rid.toString(), record.getVersion(), requestId);
}
}
// IMPROVED TRANSPORT BY AVOIDING THE RECORD CONTENT, BUT JUST RID + VERSION
return new OPlaceholder(record);
}
use of com.orientechnologies.orient.core.storage.impl.local.paginated.OPaginatedCluster in project orientdb by orientechnologies.
the class OSyncClusterTask method execute.
@Override
public Object execute(ODistributedRequestId requestId, final OServer iServer, final ODistributedServerManager iManager, final ODatabaseDocumentInternal database) throws Exception {
if (getNodeSource() == null || !getNodeSource().equals(iManager.getLocalNodeName())) {
if (database == null)
throw new ODistributedException("Database instance is null");
final String databaseName = database.getName();
try {
final Long lastDeployment = (Long) iManager.getConfigurationMap().get(DEPLOYCLUSTER + databaseName + "." + clusterName);
if (lastDeployment != null && lastDeployment.longValue() == random) {
// SKIP IT
ODistributedServerLog.debug(this, iManager.getLocalNodeName(), getNodeSource(), DIRECTION.NONE, "Skip deploying cluster '%s' because already executed", clusterName);
return Boolean.FALSE;
}
iManager.getConfigurationMap().put(DEPLOYCLUSTER + databaseName + "." + clusterName, random);
ODistributedServerLog.info(this, iManager.getLocalNodeName(), getNodeSource(), DIRECTION.OUT, "deploying cluster %s...", databaseName);
final File backupFile = new File(Orient.getTempPath() + "/backup_" + databaseName + "_" + clusterName + "_server" + iManager.getLocalNodeId() + ".zip");
if (backupFile.exists())
backupFile.delete();
else
backupFile.getParentFile().mkdirs();
backupFile.createNewFile();
ODistributedServerLog.info(this, iManager.getLocalNodeName(), getNodeSource(), DIRECTION.OUT, "Creating backup of cluster '%s' in directory: %s...", databaseName, backupFile.getAbsolutePath());
final OPaginatedCluster cluster = (OPaginatedCluster) database.getStorage().getClusterByName(clusterName);
switch(mode) {
case MERGE:
throw new IllegalArgumentException("Merge mode not supported");
case FULL_REPLACE:
final FileOutputStream fileOutputStream = new FileOutputStream(backupFile);
final File completedFile = new File(backupFile.getAbsolutePath() + ".completed");
if (completedFile.exists())
completedFile.delete();
new Thread(new Runnable() {
@Override
public void run() {
Thread.currentThread().setName("OrientDB SyncCluster node=" + iManager.getLocalNodeName() + " db=" + databaseName + " cluster=" + clusterName);
try {
database.activateOnCurrentThread();
database.freeze();
try {
final String fileName = cluster.getFileName();
final String dbPath = iServer.getDatabaseDirectory() + databaseName;
final ArrayList<String> fileNames = new ArrayList<String>();
// COPY PCL AND CPM FILE
fileNames.add(fileName);
fileNames.add(fileName.substring(0, fileName.length() - 4) + OClusterPositionMap.DEF_EXTENSION);
final OClass clazz = database.getMetadata().getSchema().getClassByClusterId(cluster.getId());
if (clazz != null) {
// CHECK FOR AUTO-SHARDED INDEXES
final OIndex<?> asIndex = clazz.getAutoShardingIndex();
if (asIndex != null) {
final int partition = OCollections.indexOf(clazz.getClusterIds(), cluster.getId());
final String indexName = asIndex.getName();
fileNames.add(indexName + "_" + partition + OAutoShardingIndexEngine.SUBINDEX_METADATA_FILE_EXTENSION);
fileNames.add(indexName + "_" + partition + OAutoShardingIndexEngine.SUBINDEX_TREE_FILE_EXTENSION);
fileNames.add(indexName + "_" + partition + OAutoShardingIndexEngine.SUBINDEX_BUCKET_FILE_EXTENSION);
fileNames.add(indexName + "_" + partition + OAutoShardingIndexEngine.SUBINDEX_NULL_BUCKET_FILE_EXTENSION);
}
}
OZIPCompressionUtil.compressFiles(dbPath, fileNames.toArray(new String[fileNames.size()]), fileOutputStream, null, OGlobalConfiguration.DISTRIBUTED_DEPLOYDB_TASK_COMPRESSION.getValueAsInteger());
} catch (IOException e) {
OLogManager.instance().error(this, "Cannot execute backup of cluster '%s.%s' for deploy cluster", e, databaseName, clusterName);
} finally {
database.release();
}
} finally {
try {
fileOutputStream.close();
} catch (IOException e) {
}
try {
completedFile.createNewFile();
} catch (IOException e) {
OLogManager.instance().error(this, "Cannot create file of backup completed: %s", e, completedFile);
}
}
}
}).start();
// TODO: SUPPORT BACKUP ON CLUSTER
final long fileSize = backupFile.length();
ODistributedServerLog.info(this, iManager.getLocalNodeName(), getNodeSource(), DIRECTION.OUT, "Sending the compressed cluster '%s.%s' over the NETWORK to node '%s', size=%s...", databaseName, clusterName, getNodeSource(), OFileUtils.getSizeAsString(fileSize));
final ODistributedDatabaseChunk chunk = new ODistributedDatabaseChunk(backupFile, 0, CHUNK_MAX_SIZE, null, false);
ODistributedServerLog.info(this, iManager.getLocalNodeName(), getNodeSource(), DIRECTION.OUT, "- transferring chunk #%d offset=%d size=%s...", 1, 0, OFileUtils.getSizeAsNumber(chunk.buffer.length));
return chunk;
}
} catch (OLockException e) {
ODistributedServerLog.debug(this, iManager.getLocalNodeName(), getNodeSource(), DIRECTION.NONE, "Skip deploying cluster %s.%s because another node is doing it", databaseName, clusterName);
} finally {
ODistributedServerLog.info(this, iManager.getLocalNodeName(), getNodeSource(), ODistributedServerLog.DIRECTION.OUT, "Deploy cluster %s task completed", clusterName);
}
} else
ODistributedServerLog.debug(this, iManager.getLocalNodeName(), getNodeSource(), DIRECTION.NONE, "Skip deploying cluster %s.%s from the same node");
return Boolean.FALSE;
}
Aggregations