use of org.apache.hop.core.RowMetaAndData in project hop by apache.
the class DatabaseMeta method getFeatureSummary.
/**
* @return a feature list for the chosen database type.
*/
public List<RowMetaAndData> getFeatureSummary(IVariables variables) {
List<RowMetaAndData> list = new ArrayList<>();
RowMetaAndData r = null;
final String par = "Parameter";
final String val = "Value";
IValueMeta testValue = new ValueMetaString("FIELD");
testValue.setLength(30);
if (iDatabase != null) {
// Type of database
r = new RowMetaAndData();
r.addValue(par, IValueMeta.TYPE_STRING, "Database type");
r.addValue(val, IValueMeta.TYPE_STRING, getPluginId());
list.add(r);
// Type of access
r = new RowMetaAndData();
r.addValue(par, IValueMeta.TYPE_STRING, "Access type");
r.addValue(val, IValueMeta.TYPE_STRING, getAccessTypeDesc());
list.add(r);
// Name of database
r = new RowMetaAndData();
r.addValue(par, IValueMeta.TYPE_STRING, "Database name");
r.addValue(val, IValueMeta.TYPE_STRING, getDatabaseName());
list.add(r);
// server host name
r = new RowMetaAndData();
r.addValue(par, IValueMeta.TYPE_STRING, "Server hostname");
r.addValue(val, IValueMeta.TYPE_STRING, getHostname());
list.add(r);
// Port number
r = new RowMetaAndData();
r.addValue(par, IValueMeta.TYPE_STRING, "Service port");
r.addValue(val, IValueMeta.TYPE_STRING, getPort());
list.add(r);
// Username
r = new RowMetaAndData();
r.addValue(par, IValueMeta.TYPE_STRING, "Username");
r.addValue(val, IValueMeta.TYPE_STRING, getUsername());
list.add(r);
// Informix server
r = new RowMetaAndData();
r.addValue(par, IValueMeta.TYPE_STRING, "Informix server name");
r.addValue(val, IValueMeta.TYPE_STRING, getServername());
list.add(r);
// Other properties...
for (String key : getAttributes().keySet()) {
String value = getAttributes().get(key);
r = new RowMetaAndData();
r.addValue(par, IValueMeta.TYPE_STRING, "Extra attribute [" + key + "]");
r.addValue(val, IValueMeta.TYPE_STRING, value);
list.add(r);
}
// driver class
r = new RowMetaAndData();
r.addValue(par, IValueMeta.TYPE_STRING, "Driver class");
r.addValue(val, IValueMeta.TYPE_STRING, getDriverClass(variables));
list.add(r);
// URL
String pwd = getPassword();
// Don't give away the password in the URL!
setPassword("password");
String url = "";
try {
url = getURL(variables);
} catch (Exception e) {
url = "";
}
// SAP etc.
r = new RowMetaAndData();
r.addValue(par, IValueMeta.TYPE_STRING, "URL");
r.addValue(val, IValueMeta.TYPE_STRING, url);
list.add(r);
setPassword(pwd);
// SQL: Next sequence value
r = new RowMetaAndData();
r.addValue(par, IValueMeta.TYPE_STRING, "SQL: next sequence value");
r.addValue(val, IValueMeta.TYPE_STRING, getSeqNextvalSql("SEQUENCE"));
list.add(r);
// is set fetch size supported
r = new RowMetaAndData();
r.addValue(par, IValueMeta.TYPE_STRING, "supported: set fetch size");
r.addValue(val, IValueMeta.TYPE_STRING, isFetchSizeSupported() ? "Y" : "N");
list.add(r);
// needs place holder for auto increment
r = new RowMetaAndData();
r.addValue(par, IValueMeta.TYPE_STRING, "auto increment field needs placeholder");
r.addValue(val, IValueMeta.TYPE_STRING, needsPlaceHolder() ? "Y" : "N");
list.add(r);
// Sum function
r = new RowMetaAndData();
r.addValue(par, IValueMeta.TYPE_STRING, "SUM aggregate function");
r.addValue(val, IValueMeta.TYPE_STRING, getFunctionSum());
list.add(r);
// Avg function
r = new RowMetaAndData();
r.addValue(par, IValueMeta.TYPE_STRING, "AVG aggregate function");
r.addValue(val, IValueMeta.TYPE_STRING, getFunctionAverage());
list.add(r);
// Minimum function
r = new RowMetaAndData();
r.addValue(par, IValueMeta.TYPE_STRING, "MIN aggregate function");
r.addValue(val, IValueMeta.TYPE_STRING, getFunctionMinimum());
list.add(r);
// Maximum function
r = new RowMetaAndData();
r.addValue(par, IValueMeta.TYPE_STRING, "MAX aggregate function");
r.addValue(val, IValueMeta.TYPE_STRING, getFunctionMaximum());
list.add(r);
// Count function
r = new RowMetaAndData();
r.addValue(par, IValueMeta.TYPE_STRING, "COUNT aggregate function");
r.addValue(val, IValueMeta.TYPE_STRING, getFunctionCount());
list.add(r);
// Schema-table combination
r = new RowMetaAndData();
r.addValue(par, IValueMeta.TYPE_STRING, "Schema / Table combination");
r.addValue(val, IValueMeta.TYPE_STRING, getQuotedSchemaTableCombination(variables, "SCHEMA", "TABLE"));
list.add(r);
// Limit clause
r = new RowMetaAndData();
r.addValue(par, IValueMeta.TYPE_STRING, "LIMIT clause for 100 rows");
r.addValue(val, IValueMeta.TYPE_STRING, getLimitClause(100));
list.add(r);
// add column statement
r = new RowMetaAndData();
r.addValue(par, IValueMeta.TYPE_STRING, "Add column statement");
r.addValue(val, IValueMeta.TYPE_STRING, getAddColumnStatement("TABLE", testValue, null, false, null, false));
list.add(r);
// drop column statement
r = new RowMetaAndData();
r.addValue(par, IValueMeta.TYPE_STRING, "Drop column statement");
r.addValue(val, IValueMeta.TYPE_STRING, getDropColumnStatement("TABLE", testValue, null, false, null, false));
list.add(r);
// Modify column statement
r = new RowMetaAndData();
r.addValue(par, IValueMeta.TYPE_STRING, "Modify column statement");
r.addValue(val, IValueMeta.TYPE_STRING, getModifyColumnStatement("TABLE", testValue, null, false, null, false));
list.add(r);
// List of reserved words
String reserved = "";
if (getReservedWords() != null) {
for (int i = 0; i < getReservedWords().length; i++) {
reserved += (i > 0 ? ", " : "") + getReservedWords()[i];
}
}
r = new RowMetaAndData();
r.addValue(par, IValueMeta.TYPE_STRING, "List of reserved words");
r.addValue(val, IValueMeta.TYPE_STRING, reserved);
list.add(r);
// Quote reserved words?
r = new RowMetaAndData();
r.addValue(par, IValueMeta.TYPE_STRING, "Quote reserved words?");
r.addValue(val, IValueMeta.TYPE_STRING, quoteReservedWords() ? "Y" : "N");
list.add(r);
// Start Quote
r = new RowMetaAndData();
r.addValue(par, IValueMeta.TYPE_STRING, "Start quote for reserved words");
r.addValue(val, IValueMeta.TYPE_STRING, getStartQuote());
list.add(r);
// End Quote
r = new RowMetaAndData();
r.addValue(par, IValueMeta.TYPE_STRING, "End quote for reserved words");
r.addValue(val, IValueMeta.TYPE_STRING, getEndQuote());
list.add(r);
// List of table types
String types = "";
String[] slist = getTableTypes();
if (slist != null) {
for (int i = 0; i < slist.length; i++) {
types += (i > 0 ? ", " : "") + slist[i];
}
}
r = new RowMetaAndData();
r.addValue(par, IValueMeta.TYPE_STRING, "List of JDBC table types");
r.addValue(val, IValueMeta.TYPE_STRING, types);
list.add(r);
// List of view types
types = "";
slist = getViewTypes();
if (slist != null) {
for (int i = 0; i < slist.length; i++) {
types += (i > 0 ? ", " : "") + slist[i];
}
}
r = new RowMetaAndData();
r.addValue(par, IValueMeta.TYPE_STRING, "List of JDBC view types");
r.addValue(val, IValueMeta.TYPE_STRING, types);
list.add(r);
// List of synonym types
types = "";
slist = getSynonymTypes();
if (slist != null) {
for (int i = 0; i < slist.length; i++) {
types += (i > 0 ? ", " : "") + slist[i];
}
}
r = new RowMetaAndData();
r.addValue(par, IValueMeta.TYPE_STRING, "List of JDBC synonym types");
r.addValue(val, IValueMeta.TYPE_STRING, types);
list.add(r);
// Use schema-name to get list of tables?
r = new RowMetaAndData();
r.addValue(par, IValueMeta.TYPE_STRING, "use schema name to get table list?");
r.addValue(val, IValueMeta.TYPE_STRING, useSchemaNameForTableList() ? "Y" : "N");
list.add(r);
// supports view?
r = new RowMetaAndData();
r.addValue(par, IValueMeta.TYPE_STRING, "supports views?");
r.addValue(val, IValueMeta.TYPE_STRING, supportsViews() ? "Y" : "N");
list.add(r);
// supports synonyms?
r = new RowMetaAndData();
r.addValue(par, IValueMeta.TYPE_STRING, "supports synonyms?");
r.addValue(val, IValueMeta.TYPE_STRING, supportsSynonyms() ? "Y" : "N");
list.add(r);
// SQL: get list of procedures?
r = new RowMetaAndData();
r.addValue(par, IValueMeta.TYPE_STRING, "SQL: list of procedures");
r.addValue(val, IValueMeta.TYPE_STRING, getSqlListOfProcedures());
list.add(r);
// SQL: get truncate table statement?
r = new RowMetaAndData();
r.addValue(par, IValueMeta.TYPE_STRING, "SQL: truncate table");
String truncateStatement = getTruncateTableStatement(variables, "SCHEMA", "TABLE");
r.addValue(val, IValueMeta.TYPE_STRING, truncateStatement != null ? truncateStatement : "Not supported by this database type");
list.add(r);
// supports float rounding on update?
r = new RowMetaAndData();
r.addValue(par, IValueMeta.TYPE_STRING, "supports floating point rounding on update/insert");
r.addValue(val, IValueMeta.TYPE_STRING, supportsFloatRoundingOnUpdate() ? "Y" : "N");
list.add(r);
// supports time stamp to date conversion
r = new RowMetaAndData();
r.addValue(par, IValueMeta.TYPE_STRING, "supports timestamp-date conversion");
r.addValue(val, IValueMeta.TYPE_STRING, supportsTimeStampToDateConversion() ? "Y" : "N");
list.add(r);
// supports batch updates
r = new RowMetaAndData();
r.addValue(par, IValueMeta.TYPE_STRING, "supports batch updates");
r.addValue(val, IValueMeta.TYPE_STRING, supportsBatchUpdates() ? "Y" : "N");
list.add(r);
// supports boolean values
r = new RowMetaAndData();
r.addValue(par, IValueMeta.TYPE_STRING, "supports boolean data type");
r.addValue(val, IValueMeta.TYPE_STRING, supportsBooleanDataType() ? "Y" : "N");
list.add(r);
}
return list;
}
use of org.apache.hop.core.RowMetaAndData in project hop by apache.
the class DatabaseConnectionPoolParameter method getRowList.
public static final List<RowMetaAndData> getRowList(DatabaseConnectionPoolParameter[] poolParameters, String titleParameter, String titleDefaultValue, String titleDescription) {
IRowMeta rowMeta = new RowMeta();
rowMeta.addValueMeta(new ValueMetaString(titleParameter));
rowMeta.addValueMeta(new ValueMetaString(titleDefaultValue));
rowMeta.addValueMeta(new ValueMetaString(titleDescription));
List<RowMetaAndData> list = new ArrayList<>();
for (int i = 0; i < poolParameters.length; i++) {
DatabaseConnectionPoolParameter p = poolParameters[i];
Object[] row = new Object[rowMeta.size()];
row[0] = p.getParameter();
row[1] = p.getDefaultValue();
row[2] = p.getDescription();
list.add(new RowMetaAndData(rowMeta, row));
}
return list;
}
use of org.apache.hop.core.RowMetaAndData in project hop by apache.
the class ActionFtpDelete method execute.
/**
* Needed for the Vector coming from sshclient.ls() *
*/
@Override
@SuppressWarnings("unchecked")
public Result execute(Result previousResult, int nr) {
log.logBasic(BaseMessages.getString(PKG, "ActionFTPDelete.Started", serverName));
RowMetaAndData resultRow = null;
Result result = previousResult;
List<RowMetaAndData> rows = result.getRows();
result.setResult(false);
nrErrors = 0;
nrFilesDeleted = 0;
successConditionBroken = false;
HashSet<String> listPreviousFiles = new HashSet<>();
// Here let's put some controls before stating the workflow
String realServerName = resolve(serverName);
String realServerPassword = Utils.resolvePassword(this, password);
String realFtpDirectory = resolve(remoteDirectory);
int realServerPort = Const.toInt(resolve(serverPort), 0);
String realUsername = resolve(userName);
String realPassword = Utils.resolvePassword(this, password);
String realProxyHost = resolve(proxyHost);
String realProxyUsername = resolve(proxyUsername);
String realProxyPassword = Utils.resolvePassword(this, proxyPassword);
int realProxyPort = Const.toInt(resolve(proxyPort), 0);
String realKeyFilename = resolve(keyFilename);
String realKeyPass = resolve(keyFilePass);
// The following is used to apply a path for SSH because the SFTPv3Client doesn't let us
// specify/change dirs
//
String sourceFolder = "";
if (isDetailed()) {
logDetailed(BaseMessages.getString(PKG, "ActionFTPDelete.Start"));
}
if (copyPrevious && rows.size() == 0) {
if (isDetailed()) {
logDetailed(BaseMessages.getString(PKG, "ActionFTPDelete.ArgsFromPreviousNothing"));
}
result.setResult(true);
return result;
}
try {
// Get all the files in the current directory...
String[] filelist = null;
if (protocol.equals(PROTOCOL_FTP)) {
// establish the connection
ftpConnect(realFtpDirectory);
filelist = ftpclient.listNames();
// CHECK THIS !!!
if (filelist.length == 1) {
String translatedWildcard = resolve(wildcard);
if (!Utils.isEmpty(translatedWildcard)) {
if (filelist[0].startsWith(translatedWildcard)) {
throw new HopException(filelist[0]);
}
}
}
} else if (protocol.equals(PROTOCOL_SFTP)) {
// establish the secure connection
sftpConnect(realServerName, realUsername, realServerPort, realPassword, realFtpDirectory);
// Get all the files in the current directory...
filelist = sftpclient.dir();
}
if (isDetailed()) {
logDetailed("ActionFTPDelete.FoundNFiles", String.valueOf(filelist.length));
}
int found = filelist == null ? 0 : filelist.length;
if (found == 0) {
result.setResult(true);
return result;
}
Pattern pattern = null;
if (copyPrevious) {
// Copy the input row to the (command line) arguments
for (int iteration = 0; iteration < rows.size(); iteration++) {
resultRow = rows.get(iteration);
// Get file names
String filePrevious = resultRow.getString(0, null);
if (!Utils.isEmpty(filePrevious)) {
listPreviousFiles.add(filePrevious);
}
}
} else {
if (!Utils.isEmpty(wildcard)) {
String realWildcard = resolve(wildcard);
pattern = Pattern.compile(realWildcard);
}
}
if (!getSuccessCondition().equals(SUCCESS_IF_ALL_FILES_DOWNLOADED)) {
limitFiles = Const.toInt(resolve(getLimitSuccess()), 10);
}
// Get the files in the list...
for (int i = 0; i < filelist.length && !parentWorkflow.isStopped(); i++) {
if (successConditionBroken) {
throw new Exception(BaseMessages.getString(PKG, "ActionFTPDelete.SuccesConditionBroken"));
}
boolean getIt = false;
if (isDebug()) {
logDebug(BaseMessages.getString(PKG, "ActionFTPDelete.AnalysingFile", filelist[i]));
}
try {
// First see if the file matches the regular expression!
if (copyPrevious) {
if (listPreviousFiles.contains(filelist[i])) {
getIt = true;
}
} else {
if (pattern != null) {
Matcher matcher = pattern.matcher(filelist[i]);
getIt = matcher.matches();
}
}
if (getIt) {
// Delete file
if (protocol.equals(PROTOCOL_FTP)) {
ftpclient.deleteFile(filelist[i]);
} else if (protocol.equals(PROTOCOL_SFTP)) {
sftpclient.delete(filelist[i]);
}
if (isDetailed()) {
logDetailed("ActionFTPDelete.RemotefileDeleted", filelist[i]);
}
updateDeletedFiles();
}
} catch (Exception e) {
// Update errors number
updateErrors();
logError(BaseMessages.getString(PKG, "ActionFtp.UnexpectedError", e.getMessage()));
if (successConditionBroken) {
throw new Exception(BaseMessages.getString(PKG, "ActionFTPDelete.SuccesConditionBroken"));
}
}
}
// end for
} catch (Exception e) {
updateErrors();
logError(BaseMessages.getString(PKG, "ActionFTPDelete.ErrorGetting", e.getMessage()));
logError(Const.getStackTracker(e));
} finally {
if (ftpclient != null && ftpclient.isConnected()) {
try {
ftpclient.quit();
ftpclient = null;
} catch (Exception e) {
logError(BaseMessages.getString(PKG, "ActionFTPDelete.ErrorQuitting", e.getMessage()));
}
}
if (sftpclient != null) {
try {
sftpclient.disconnect();
sftpclient = null;
} catch (Exception e) {
logError(BaseMessages.getString(PKG, "ActionFTPDelete.ErrorQuitting", e.getMessage()));
}
}
FtpClientUtil.clearSocksJvmSettings();
}
result.setResult(!successConditionBroken);
result.setNrFilesRetrieved(nrFilesDeleted);
result.setNrErrors(nrErrors);
return result;
}
use of org.apache.hop.core.RowMetaAndData in project hop by apache.
the class DatabaseImpact method getRow.
public RowMetaAndData getRow() {
RowMetaAndData r = new RowMetaAndData();
r.addValue(new ValueMetaString(BaseMessages.getString(PKG, "DatabaseImpact.RowDesc.Label.Type")), getTypeDesc());
r.addValue(new ValueMetaString(BaseMessages.getString(PKG, "DatabaseImpact.RowDesc.Label.Pipeline")), getPipelineName());
r.addValue(new ValueMetaString(BaseMessages.getString(PKG, "DatabaseImpact.RowDesc.Label.Transform")), getTransformName());
r.addValue(new ValueMetaString(BaseMessages.getString(PKG, "DatabaseImpact.RowDesc.Label.Database")), getDatabaseName());
r.addValue(new ValueMetaString(BaseMessages.getString(PKG, "DatabaseImpact.RowDesc.Label.Table")), getTable());
r.addValue(new ValueMetaString(BaseMessages.getString(PKG, "DatabaseImpact.RowDesc.Label.Field")), getField());
r.addValue(new ValueMetaString(BaseMessages.getString(PKG, "DatabaseImpact.RowDesc.Label.Value")), getValue());
r.addValue(new ValueMetaString(BaseMessages.getString(PKG, "DatabaseImpact.RowDesc.Label.ValueOrigin")), getValueOrigin());
r.addValue(new ValueMetaString(BaseMessages.getString(PKG, "DatabaseImpact.RowDesc.Label.SQL")), getSql());
r.addValue(new ValueMetaString(BaseMessages.getString(PKG, "DatabaseImpact.RowDesc.Label.Remarks")), getRemark());
return r;
}
use of org.apache.hop.core.RowMetaAndData in project hop by apache.
the class ActionHttp_PDI208_Test method testHttpResultDefaultRows.
@Test
public void testHttpResultDefaultRows() throws IOException {
File localFileForUpload = getInputFile("existingFile1", ".tmp");
File tempFileForDownload = File.createTempFile("downloadedFile1", ".tmp");
localFileForUpload.deleteOnExit();
tempFileForDownload.deleteOnExit();
Object[] r = new Object[] { HTTP_SERVER_BASEURL + "/uploadFile", localFileForUpload.getCanonicalPath(), tempFileForDownload.getCanonicalPath() };
RowMeta rowMetaDefault = new RowMeta();
rowMetaDefault.addValueMeta(new ValueMetaString("URL"));
rowMetaDefault.addValueMeta(new ValueMetaString("UPLOAD"));
rowMetaDefault.addValueMeta(new ValueMetaString("DESTINATION"));
List<RowMetaAndData> rows = new ArrayList<>();
rows.add(new RowMetaAndData(rowMetaDefault, r));
Result previousResult = new Result();
previousResult.setRows(rows);
ActionHttp http = new ActionHttp();
http.setParentWorkflow(new LocalWorkflowEngine());
http.setRunForEveryRow(true);
http.setAddFilenameToResult(false);
http.execute(previousResult, 0);
assertTrue(FileUtils.contentEquals(localFileForUpload, tempFileForDownload));
}
Aggregations