use of com.baidu.hugegraph.backend.BackendException in project incubator-hugegraph by apache.
the class MysqlSessions method dropDatabase.
public void dropDatabase() {
LOG.debug("Drop database: {}", this.database());
String sql = this.buildDropDatabase(this.database());
try (Connection conn = this.openWithoutDB(DROP_DB_TIMEOUT)) {
conn.createStatement().execute(sql);
} catch (SQLException e) {
if (e.getCause() instanceof SocketTimeoutException) {
LOG.warn("Drop database '{}' timeout", this.database());
} else {
throw new BackendException("Failed to drop database '%s'", e, this.database());
}
}
}
use of com.baidu.hugegraph.backend.BackendException in project incubator-hugegraph by apache.
the class MysqlSessions method connect.
private Connection connect(String url) throws SQLException {
LOG.info("Connect to the jdbc url: '{}'", url);
String driverName = this.config.get(MysqlOptions.JDBC_DRIVER);
String username = this.config.get(MysqlOptions.JDBC_USERNAME);
String password = this.config.get(MysqlOptions.JDBC_PASSWORD);
try {
// Register JDBC driver
Class.forName(driverName);
} catch (ClassNotFoundException e) {
throw new BackendException("Invalid driver class '%s'", driverName);
}
return DriverManager.getConnection(url, username, password);
}
use of com.baidu.hugegraph.backend.BackendException in project incubator-hugegraph by apache.
the class MysqlEntryIterator method fetch.
@Override
protected final boolean fetch() {
assert this.current == null;
if (this.next != null) {
this.current = this.next;
this.next = null;
}
try {
while (this.results.next()) {
MysqlBackendEntry entry = this.row2Entry(this.results.resultSet());
this.lastest = entry;
BackendEntry merged = this.merger.apply(this.current, entry);
if (this.current == null) {
// The first time to read
this.current = merged;
} else if (merged == this.current) {
// Does the next entry belongs to the current entry
assert merged != null;
} else {
// New entry
assert this.next == null;
this.next = merged;
break;
}
// When limit exceed, stop fetching
if (this.reachLimit(this.fetched() - 1)) {
this.exceedLimit = true;
// Need remove last one because fetched limit + 1 records
this.removeLastRecord();
this.results.close();
break;
}
}
} catch (SQLException e) {
throw new BackendException("Fetch next error", e);
}
return this.current != null;
}
use of com.baidu.hugegraph.backend.BackendException in project incubator-hugegraph by apache.
the class HbaseStore method clear.
@Override
public void clear(boolean clearSpace) {
this.checkConnectionOpened();
// Return if not exists namespace
try {
if (!this.sessions.existsNamespace()) {
return;
}
} catch (IOException e) {
throw new BackendException("Exception when checking for the existence of '%s'", e, this.namespace);
}
if (!clearSpace) {
// Drop tables
for (String table : this.tableNames()) {
try {
this.sessions.dropTable(table);
} catch (TableNotFoundException e) {
LOG.warn("The table '{}' of '{}' store does not exist " + "when trying to drop", table, this.store);
} catch (IOException e) {
throw new BackendException("Failed to drop table '%s' of '%s' store", e, table, this.store);
}
}
} else {
// Drop namespace
try {
this.sessions.dropNamespace();
} catch (IOException e) {
String notEmpty = "Only empty namespaces can be removed";
if (e.getCause().getMessage().contains(notEmpty)) {
LOG.debug("Can't drop namespace '{}': {}", this.namespace, e);
} else {
throw new BackendException("Failed to drop namespace '%s' of '%s' store", e, this.namespace, this.store);
}
}
}
LOG.debug("Store cleared: {}", this.store);
}
use of com.baidu.hugegraph.backend.BackendException in project incubator-hugegraph by apache.
the class LZ4Util method decompress.
public static BytesBuffer decompress(byte[] bytes, int blockSize, float bufferRatio) {
float ratio = bufferRatio <= 0.0F ? DEFAULT_BUFFER_RATIO : bufferRatio;
LZ4Factory factory = LZ4Factory.fastestInstance();
LZ4FastDecompressor decompressor = factory.fastDecompressor();
ByteArrayInputStream bais = new ByteArrayInputStream(bytes);
int initBufferSize = Math.min(Math.round(bytes.length * ratio), BytesBuffer.MAX_BUFFER_CAPACITY);
BytesBuffer buf = new BytesBuffer(initBufferSize);
LZ4BlockInputStream lzInput = new LZ4BlockInputStream(bais, decompressor);
int count;
byte[] buffer = new byte[blockSize];
try {
while ((count = lzInput.read(buffer)) != -1) {
buf.write(buffer, 0, count);
}
lzInput.close();
} catch (IOException e) {
throw new BackendException("Failed to decompress", e);
}
/*
* If need to perform reading outside the method,
* remember to call forReadWritten()
*/
return buf;
}
Aggregations