use of org.talend.daikon.exception.error.DefaultErrorCode in project components by Talend.
the class SalesforceWriter method delete.
private DeleteResult[] delete(IndexedRecord input) throws IOException {
// schema to delete rows.
if (deleteFieldId == -1) {
String ID = "Id";
Schema.Field idField = input.getSchema().getField(ID);
if (null == idField) {
throw new ComponentException(new DefaultErrorCode(HttpServletResponse.SC_BAD_REQUEST, "message"), ExceptionContext.build().put("message", ID + " not found"));
}
deleteFieldId = idField.pos();
}
String id = (String) input.get(deleteFieldId);
if (id != null) {
deleteItems.add(input);
if (deleteItems.size() >= commitLevel) {
return doDelete();
}
}
return null;
}
use of org.talend.daikon.exception.error.DefaultErrorCode in project components by Talend.
the class SalesforceBulkRuntime method doBulkQuery.
/**
* Creates and executes job for bulk query. Job must be finished in 2 minutes on Salesforce side.<br/>
* From Salesforce documentation two scenarios are possible here:
* <ul>
* <li>simple bulk query. It should have status - {@link BatchStateEnum#Completed}.</li>
* <li>primary key chunking bulk query. It should return first batch info with status - {@link BatchStateEnum#NotProcessed}.<br/>
* Other batch info's should have status - {@link BatchStateEnum#Completed}</li>
* </ul>
*
* @param moduleName - input module name.
* @param queryStatement - to be executed.
* @throws AsyncApiException
* @throws InterruptedException
* @throws ConnectionException
*/
public void doBulkQuery(String moduleName, String queryStatement) throws AsyncApiException, InterruptedException, ConnectionException {
job = new JobInfo();
job.setObject(moduleName);
job.setOperation(OperationEnum.query);
if (concurrencyMode != null) {
job.setConcurrencyMode(concurrencyMode);
}
job.setContentType(ContentType.CSV);
job = createJob(job);
if (job.getId() == null) {
// job creation failed
throw new ComponentException(new DefaultErrorCode(HttpServletResponse.SC_INTERNAL_SERVER_ERROR, "failedBatch"), ExceptionContext.build().put("failedBatch", job));
}
ByteArrayInputStream bout = new ByteArrayInputStream(queryStatement.getBytes());
BatchInfo info = createBatchFromStream(job, bout);
int secToWait = 1;
int tryCount = 0;
while (true) {
LOGGER.debug("Awaiting " + secToWait + " seconds for results ...\n" + info);
Thread.sleep(secToWait * 1000);
info = getBatchInfo(job.getId(), info.getId());
if (info.getState() == BatchStateEnum.Completed || (BatchStateEnum.NotProcessed == info.getState() && 0 < chunkSize)) {
break;
} else if (info.getState() == BatchStateEnum.Failed) {
throw new ComponentException(new DefaultErrorCode(HttpServletResponse.SC_BAD_REQUEST, "failedBatch"), ExceptionContext.build().put("failedBatch", info));
}
tryCount++;
if (tryCount % 3 == 0 && secToWait < 120) {
// after 3 attempt to get the result we multiply the time to wait by 2
// if secToWait < 120 : don't increase exponentially, no need to sleep more than 128 seconds
secToWait = secToWait * 2;
}
// https://developer.salesforce.com/docs/atlas.en-us.api_asynch.meta/api_asynch/asynch_api_concepts_limits.htm
if (jobTimeOut > 0) {
// if 0, timeout is disabled
long processingTime = System.currentTimeMillis() - job.getCreatedDate().getTimeInMillis();
if (processingTime > jobTimeOut) {
throw new ComponentException(new DefaultErrorCode(HttpServletResponse.SC_REQUEST_TIMEOUT, "failedBatch"), ExceptionContext.build().put("failedBatch", info));
}
}
}
retrieveResultsOfQuery(info);
}
use of org.talend.daikon.exception.error.DefaultErrorCode in project components by Talend.
the class SnowflakeRowWriter method validateResultSet.
private boolean validateResultSet() throws SQLException {
List<Field> fields = mainSchema.getFields();
ResultSetMetaData rsMetadata = rs.getMetaData();
if (CUD_RESULT_SET_COLUMN_NAMES.contains(rsMetadata.getColumnName(1))) {
return false;
}
if (fields.size() != rsMetadata.getColumnCount()) {
throw new ComponentException(new DefaultErrorCode(400, "errorMessage"), new ExceptionContextBuilder().put("errorMessage", I18N_MESSAGES.getMessage("error.resultSetMapping")).build());
}
int counter = 0;
for (int i = 0; i < rsMetadata.getColumnCount(); i++) {
String rsColumnName = rsMetadata.getColumnName(i + 1);
for (Field field : fields) {
if (rsColumnName.equalsIgnoreCase(field.name())) {
counter++;
}
}
}
if (counter != rsMetadata.getColumnCount()) {
throw new ComponentException(new DefaultErrorCode(400, "errorMessage"), new ExceptionContextBuilder().put("errorMessage", I18N_MESSAGES.getMessage("error.resultSetMapping")).build());
}
resultSetValidation = true;
return true;
}
Aggregations