use of org.apache.ignite.internal.processors.bulkload.BulkLoadAckClientParameters in project ignite by apache.
the class DmlStatementsProcessor method processBulkLoadCommand.
/**
* Process bulk load COPY command.
*
* @param cmd The command.
* @return The context (which is the result of the first request/response).
* @throws IgniteCheckedException If something failed.
*/
public FieldsQueryCursor<List<?>> processBulkLoadCommand(SqlBulkLoadCommand cmd) throws IgniteCheckedException {
if (cmd.packetSize() == null)
cmd.packetSize(BulkLoadAckClientParameters.DFLT_PACKET_SIZE);
GridH2Table tbl = idx.dataTable(cmd.schemaName(), cmd.tableName());
if (tbl == null) {
idx.kernalContext().cache().createMissingQueryCaches();
tbl = idx.dataTable(cmd.schemaName(), cmd.tableName());
}
if (tbl == null) {
throw new IgniteSQLException("Table does not exist: " + cmd.tableName(), IgniteQueryErrorCode.TABLE_NOT_FOUND);
}
UpdatePlan plan = UpdatePlanBuilder.planForBulkLoad(cmd, tbl);
IgniteClosureX<List<?>, IgniteBiTuple<?, ?>> dataConverter = new BulkLoadDataConverter(plan);
GridCacheContext cache = tbl.cache();
IgniteDataStreamer<Object, Object> streamer = cache.grid().dataStreamer(cache.name());
BulkLoadCacheWriter outputWriter = new BulkLoadStreamerWriter(streamer);
BulkLoadParser inputParser = BulkLoadParser.createParser(cmd.inputFormat());
BulkLoadProcessor processor = new BulkLoadProcessor(inputParser, dataConverter, outputWriter);
BulkLoadAckClientParameters params = new BulkLoadAckClientParameters(cmd.localFileName(), cmd.packetSize());
return new BulkLoadContextCursor(processor, params);
}
use of org.apache.ignite.internal.processors.bulkload.BulkLoadAckClientParameters in project ignite by apache.
the class JdbcBulkLoadAckResult method readBinary.
/**
* {@inheritDoc}
*/
@Override
public void readBinary(BinaryReaderExImpl reader) throws BinaryObjectException {
super.readBinary(reader);
qryId = reader.readLong();
String locFileName = reader.readString();
int batchSize = reader.readInt();
if (!BulkLoadAckClientParameters.isValidPacketSize(batchSize))
throw new BinaryObjectException(BulkLoadAckClientParameters.packetSizeErrorMesssage(batchSize));
params = new BulkLoadAckClientParameters(locFileName, batchSize);
}
use of org.apache.ignite.internal.processors.bulkload.BulkLoadAckClientParameters in project ignite by apache.
the class JdbcRequestHandler method executeQuery.
/**
* {@link JdbcQueryExecuteRequest} command handler.
*
* @param req Execute query request.
* @return Response.
*/
@SuppressWarnings("unchecked")
private JdbcResponse executeQuery(JdbcQueryExecuteRequest req) {
int cursorCnt = qryCursors.size();
if (maxCursors > 0 && cursorCnt >= maxCursors)
return new JdbcResponse(IgniteQueryErrorCode.UNKNOWN, "Too many open cursors (either close other " + "open cursors or increase the limit through " + "ClientConnectorConfiguration.maxOpenCursorsPerConnection) [maximum=" + maxCursors + ", current=" + cursorCnt + ']');
long qryId = QRY_ID_GEN.getAndIncrement();
assert !cliCtx.isStream();
try {
String sql = req.sqlQuery();
SqlFieldsQuery qry;
switch(req.expectedStatementType()) {
case ANY_STATEMENT_TYPE:
qry = new SqlFieldsQuery(sql);
break;
case SELECT_STATEMENT_TYPE:
qry = new SqlFieldsQueryEx(sql, true);
break;
default:
assert req.expectedStatementType() == JdbcStatementType.UPDATE_STMT_TYPE;
qry = new SqlFieldsQueryEx(sql, false);
if (cliCtx.isSkipReducerOnUpdate())
((SqlFieldsQueryEx) qry).setSkipReducerOnUpdate(true);
}
qry.setArgs(req.arguments());
qry.setDistributedJoins(cliCtx.isDistributedJoins());
qry.setEnforceJoinOrder(cliCtx.isEnforceJoinOrder());
qry.setCollocated(cliCtx.isCollocated());
qry.setReplicatedOnly(cliCtx.isReplicatedOnly());
qry.setLazy(cliCtx.isLazy());
if (req.pageSize() <= 0)
return new JdbcResponse(IgniteQueryErrorCode.UNKNOWN, "Invalid fetch size: " + req.pageSize());
qry.setPageSize(req.pageSize());
String schemaName = req.schemaName();
if (F.isEmpty(schemaName))
schemaName = QueryUtils.DFLT_SCHEMA;
qry.setSchema(schemaName);
List<FieldsQueryCursor<List<?>>> results = ctx.query().querySqlFields(null, qry, cliCtx, true, protocolVer.compareTo(VER_2_3_0) < 0);
FieldsQueryCursor<List<?>> fieldsCur = results.get(0);
if (fieldsCur instanceof BulkLoadContextCursor) {
BulkLoadContextCursor blCur = (BulkLoadContextCursor) fieldsCur;
BulkLoadProcessor blProcessor = blCur.bulkLoadProcessor();
BulkLoadAckClientParameters clientParams = blCur.clientParams();
bulkLoadRequests.put(qryId, new JdbcBulkLoadProcessor(blProcessor));
return new JdbcResponse(new JdbcBulkLoadAckResult(qryId, clientParams));
}
if (results.size() == 1) {
JdbcQueryCursor cur = new JdbcQueryCursor(qryId, req.pageSize(), req.maxRows(), (QueryCursorImpl) fieldsCur);
JdbcQueryExecuteResult res;
if (cur.isQuery())
res = new JdbcQueryExecuteResult(qryId, cur.fetchRows(), !cur.hasNext());
else {
List<List<Object>> items = cur.fetchRows();
assert items != null && items.size() == 1 && items.get(0).size() == 1 && items.get(0).get(0) instanceof Long : "Invalid result set for not-SELECT query. [qry=" + sql + ", res=" + S.toString(List.class, items) + ']';
res = new JdbcQueryExecuteResult(qryId, (Long) items.get(0).get(0));
}
if (res.last() && (!res.isQuery() || autoCloseCursors))
cur.close();
else
qryCursors.put(qryId, cur);
return new JdbcResponse(res);
} else {
List<JdbcResultInfo> jdbcResults = new ArrayList<>(results.size());
List<List<Object>> items = null;
boolean last = true;
for (FieldsQueryCursor<List<?>> c : results) {
QueryCursorImpl qryCur = (QueryCursorImpl) c;
JdbcResultInfo jdbcRes;
if (qryCur.isQuery()) {
jdbcRes = new JdbcResultInfo(true, -1, qryId);
JdbcQueryCursor cur = new JdbcQueryCursor(qryId, req.pageSize(), req.maxRows(), qryCur);
qryCursors.put(qryId, cur);
qryId = QRY_ID_GEN.getAndIncrement();
if (items == null) {
items = cur.fetchRows();
last = cur.hasNext();
}
} else
jdbcRes = new JdbcResultInfo(false, (Long) ((List<?>) qryCur.getAll().get(0)).get(0), -1);
jdbcResults.add(jdbcRes);
}
return new JdbcResponse(new JdbcQueryExecuteMultipleStatementsResult(jdbcResults, items, last));
}
} catch (Exception e) {
qryCursors.remove(qryId);
U.error(log, "Failed to execute SQL query [reqId=" + req.requestId() + ", req=" + req + ']', e);
return exceptionToResult(e);
}
}
Aggregations