use of org.apache.ignite.internal.util.typedef.internal.SB in project ignite by apache.
the class GridH2CollocationModel method toString.
/** {@inheritDoc} */
@Override
public String toString() {
calculate();
SB b = new SB();
for (int lvl = 0; lvl < 20; lvl++) {
if (!toString(b, lvl))
break;
b.a('\n');
}
return b.toString();
}
use of org.apache.ignite.internal.util.typedef.internal.SB in project ignite by apache.
the class GridH2TreeIndex method toString.
/** {@inheritDoc} */
@Override
public String toString() {
SB sb = new SB((indexType.isUnique() ? "Unique index '" : "Index '") + getName() + "' [");
boolean first = true;
for (IndexColumn col : getIndexColumns()) {
if (first)
first = false;
else
sb.a(", ");
sb.a(col.getSQL());
}
sb.a(" ]");
return sb.toString();
}
use of org.apache.ignite.internal.util.typedef.internal.SB in project ignite by apache.
the class GridSqlAlias method getSQL.
/** {@inheritDoc} */
@Override
public String getSQL() {
SB b = new SB();
GridSqlAst child = child(0);
boolean tbl = child instanceof GridSqlTable;
b.a(tbl ? ((GridSqlTable) child).getBeforeAliasSql() : child.getSQL());
b.a(useAs ? " AS " : " ");
b.a(Parser.quoteIdentifier(alias));
if (tbl)
b.a(((GridSqlTable) child).getAfterAliasSQL());
return b.toString();
}
use of org.apache.ignite.internal.util.typedef.internal.SB in project ignite by apache.
the class GridSqlTable method getAfterAliasSQL.
/**
* @return SQL for the table after alias.
*/
public String getAfterAliasSQL() {
if (useIndexes == null)
return "";
SB b = new SB();
b.a(" USE INDEX (");
boolean first = true;
for (String idx : useIndexes) {
if (first)
first = false;
else
b.a(", ");
b.a(Parser.quoteIdentifier(idx));
}
b.a(')');
return b.toString();
}
use of org.apache.ignite.internal.util.typedef.internal.SB in project ignite by apache.
the class HadoopIgfsWrapper method delegate.
/**
* Get delegate creating it if needed.
*
* @return Delegate.
* @throws HadoopIgfsCommunicationException On error.
*/
private Delegate delegate() throws HadoopIgfsCommunicationException {
// These fields will contain possible exceptions from shmem and TCP endpoints.
Exception errShmem = null;
Exception errTcp = null;
Exception errClient = null;
// 1. If delegate is set, return it immediately.
Delegate curDelegate = delegateRef.get();
if (curDelegate != null)
return curDelegate;
// 2. Guess that we are in the same VM.
boolean skipInProc = parameter(conf, PARAM_IGFS_ENDPOINT_NO_EMBED, authority, false);
if (!skipInProc) {
HadoopIgfsInProc hadoop = HadoopIgfsInProc.create(endpoint.igfs(), log, userName);
if (hadoop != null)
curDelegate = new Delegate(hadoop, hadoop.handshake(logDir));
}
// 3. Try Ignite client
String igniteCliCfgPath = parameter(conf, PARAM_IGFS_ENDPOINT_IGNITE_CFG_PATH, authority, null);
if (curDelegate == null && !F.isEmpty(igniteCliCfgPath)) {
HadoopIgfsInProc hadoop = null;
try {
hadoop = HadoopIgfsInProc.create(igniteCliCfgPath, endpoint.igfs(), log, userName);
if (hadoop != null)
curDelegate = new Delegate(hadoop, hadoop.handshake(logDir));
} catch (Exception e) {
if (hadoop != null)
hadoop.close(true);
if (log.isDebugEnabled())
log.debug("Failed to connect to IGFS using Ignite client [host=" + endpoint.host() + ", port=" + endpoint.port() + ", igniteCfg=" + igniteCliCfgPath + ']', e);
errClient = e;
}
}
// 4. Try connecting using shmem.
boolean skipLocShmem = parameter(conf, PARAM_IGFS_ENDPOINT_NO_LOCAL_SHMEM, authority, false);
if (curDelegate == null && !skipLocShmem && !U.isWindows()) {
HadoopIgfsEx hadoop = null;
try {
hadoop = new HadoopIgfsOutProc(endpoint.port(), endpoint.igfs(), log, userName);
curDelegate = new Delegate(hadoop, hadoop.handshake(logDir));
} catch (IOException | IgniteCheckedException e) {
if (e instanceof HadoopIgfsCommunicationException)
hadoop.close(true);
if (log.isDebugEnabled())
log.debug("Failed to connect to IGFS using shared memory [port=" + endpoint.port() + ']', e);
errShmem = e;
}
}
// 5. Try local TCP connection.
boolean skipLocTcp = parameter(conf, PARAM_IGFS_ENDPOINT_NO_LOCAL_TCP, authority, false);
if (curDelegate == null && !skipLocTcp) {
HadoopIgfsEx hadoop = null;
try {
hadoop = new HadoopIgfsOutProc(LOCALHOST, endpoint.port(), endpoint.igfs(), log, userName);
curDelegate = new Delegate(hadoop, hadoop.handshake(logDir));
} catch (IOException | IgniteCheckedException e) {
if (e instanceof HadoopIgfsCommunicationException)
hadoop.close(true);
if (log.isDebugEnabled())
log.debug("Failed to connect to IGFS using TCP [host=" + endpoint.host() + ", port=" + endpoint.port() + ']', e);
errTcp = e;
}
}
// 6. Try remote TCP connection.
boolean skipRmtTcp = parameter(conf, PARAM_IGFS_ENDPOINT_NO_REMOTE_TCP, authority, false);
if (curDelegate == null && !skipRmtTcp && (skipLocTcp || !F.eq(LOCALHOST, endpoint.host()))) {
HadoopIgfsEx hadoop = null;
try {
hadoop = new HadoopIgfsOutProc(endpoint.host(), endpoint.port(), endpoint.igfs(), log, userName);
curDelegate = new Delegate(hadoop, hadoop.handshake(logDir));
} catch (IOException | IgniteCheckedException e) {
if (e instanceof HadoopIgfsCommunicationException)
hadoop.close(true);
if (log.isDebugEnabled())
log.debug("Failed to connect to IGFS using TCP [host=" + endpoint.host() + ", port=" + endpoint.port() + ']', e);
errTcp = e;
}
}
if (curDelegate != null) {
if (!delegateRef.compareAndSet(null, curDelegate))
curDelegate.doomed = true;
return curDelegate;
} else {
SB errMsg = new SB("Failed to connect to IGFS [endpoint=igfs://" + authority + ", attempts=[");
if (errShmem != null)
errMsg.a("[type=SHMEM, port=" + endpoint.port() + ", err=" + errShmem + "], ");
if (errTcp != null)
errMsg.a("[type=TCP, host=" + endpoint.host() + ", port=" + endpoint.port() + ", err=" + errTcp + "]] ");
if (errClient != null)
errMsg.a("[type=CLIENT, cfg=" + igniteCliCfgPath + ", err=" + errClient + "]] ");
errMsg.a("(ensure that IGFS is running and have IPC endpoint enabled; ensure that " + "ignite-shmem-1.0.0.jar is in Hadoop classpath if you use shared memory endpoint).");
throw new HadoopIgfsCommunicationException(errMsg.toString());
}
}
Aggregations