use of org.pentaho.di.core.exception.KettleFileException in project pentaho-kettle by pentaho.
the class CubeInput method init.
public boolean init(StepMetaInterface smi, StepDataInterface sdi) {
meta = (CubeInputMeta) smi;
data = (CubeInputData) sdi;
if (super.init(smi, sdi)) {
try {
String filename = environmentSubstitute(meta.getFilename());
// Add filename to result filenames ?
if (meta.isAddResultFile()) {
ResultFile resultFile = new ResultFile(ResultFile.FILE_TYPE_GENERAL, KettleVFS.getFileObject(filename, getTransMeta()), getTransMeta().getName(), toString());
resultFile.setComment("File was read by a Cube Input step");
addResultFile(resultFile);
}
data.fis = KettleVFS.getInputStream(filename, this);
data.zip = new GZIPInputStream(data.fis);
data.dis = new DataInputStream(data.zip);
try {
data.meta = new RowMeta(data.dis);
return true;
} catch (KettleFileException kfe) {
logError(BaseMessages.getString(PKG, "CubeInput.Log.UnableToReadMetadata"), kfe);
return false;
}
} catch (Exception e) {
logError(BaseMessages.getString(PKG, "CubeInput.Log.ErrorReadingFromDataCube"), e);
}
}
return false;
}
use of org.pentaho.di.core.exception.KettleFileException in project pentaho-kettle by pentaho.
the class CubeOutput method prepareFile.
private void prepareFile() throws KettleFileException {
try {
String filename = environmentSubstitute(meta.getFilename());
if (meta.isAddToResultFiles()) {
// Add this to the result file names...
ResultFile resultFile = new ResultFile(ResultFile.FILE_TYPE_GENERAL, KettleVFS.getFileObject(filename, getTransMeta()), getTransMeta().getName(), getStepname());
resultFile.setComment("This file was created with a cube file output step");
addResultFile(resultFile);
}
data.fos = KettleVFS.getOutputStream(filename, getTransMeta(), false);
data.zip = new GZIPOutputStream(data.fos);
data.dos = new DataOutputStream(data.zip);
} catch (Exception e) {
throw new KettleFileException(e);
}
}
use of org.pentaho.di.core.exception.KettleFileException in project pentaho-kettle by pentaho.
the class ValueMetaBase method writeMeta.
@Override
public void writeMeta(DataOutputStream outputStream) throws KettleFileException {
try {
int type = getType();
// Handle type
outputStream.writeInt(type);
// Handle storage type
outputStream.writeInt(storageType);
switch(storageType) {
case STORAGE_TYPE_INDEXED:
// Save the indexed strings...
if (index == null) {
// null
outputStream.writeInt(-1);
} else {
outputStream.writeInt(index.length);
for (int i = 0; i < index.length; i++) {
try {
switch(type) {
case TYPE_STRING:
writeString(outputStream, (String) index[i]);
break;
case TYPE_NUMBER:
writeNumber(outputStream, (Double) index[i]);
break;
case TYPE_INTEGER:
writeInteger(outputStream, (Long) index[i]);
break;
case TYPE_DATE:
writeDate(outputStream, (Date) index[i]);
break;
case TYPE_BIGNUMBER:
writeBigNumber(outputStream, (BigDecimal) index[i]);
break;
case TYPE_BOOLEAN:
writeBoolean(outputStream, (Boolean) index[i]);
break;
case TYPE_BINARY:
writeBinary(outputStream, (byte[]) index[i]);
break;
default:
throw new KettleFileException(toString() + " : Unable to serialize indexe storage type for data type " + getType());
}
} catch (ClassCastException e) {
throw new RuntimeException(toString() + " : There was a data type error: the data type of " + index[i].getClass().getName() + " object [" + index[i] + "] does not correspond to value meta [" + toStringMeta() + "]");
}
}
}
break;
case STORAGE_TYPE_BINARY_STRING:
// Save the storage meta data...
//
outputStream.writeBoolean(storageMetadata != null);
if (storageMetadata != null) {
storageMetadata.writeMeta(outputStream);
}
break;
default:
break;
}
// Handle name-length
writeString(outputStream, name);
// length & precision
outputStream.writeInt(getLength());
outputStream.writeInt(getPrecision());
// Origin
writeString(outputStream, origin);
// Comments
writeString(outputStream, comments);
// formatting Mask, decimal, grouping, currency
writeString(outputStream, conversionMask);
writeString(outputStream, decimalSymbol);
writeString(outputStream, groupingSymbol);
writeString(outputStream, currencySymbol);
outputStream.writeInt(trimType);
// Case sensitivity of compare
outputStream.writeBoolean(caseInsensitive);
// Collator Locale
writeString(outputStream, collatorLocale.toLanguageTag());
// Collator Disabled of compare
outputStream.writeBoolean(collatorDisabled);
// Collator strength of compare
outputStream.writeInt(collatorStrength);
// Sorting information
outputStream.writeBoolean(sortedDescending);
// Padding information
outputStream.writeBoolean(outputPaddingEnabled);
// date format lenient?
outputStream.writeBoolean(dateFormatLenient);
// date format locale?
writeString(outputStream, dateFormatLocale != null ? dateFormatLocale.toString() : null);
// date time zone?
writeString(outputStream, dateFormatTimeZone != null ? dateFormatTimeZone.getID() : null);
// string to number conversion lenient?
outputStream.writeBoolean(lenientStringToNumber);
} catch (IOException e) {
throw new KettleFileException(toString() + " : Unable to write value metadata to output stream", e);
}
}
use of org.pentaho.di.core.exception.KettleFileException in project pentaho-kettle by pentaho.
the class ValueMetaTimestamp method writeData.
@Override
public void writeData(DataOutputStream outputStream, Object object) throws KettleFileException {
try {
// Is the value NULL?
outputStream.writeBoolean(object == null);
if (object != null) {
switch(storageType) {
case STORAGE_TYPE_NORMAL:
// Handle Content -- only when not NULL
Timestamp timestamp = convertDateToTimestamp((Date) object);
outputStream.writeLong(timestamp.getTime());
outputStream.writeInt(timestamp.getNanos());
break;
case STORAGE_TYPE_BINARY_STRING:
// Handle binary string content -- only when not NULL
// In this case, we opt not to convert anything at all for speed.
// That way, we can save on CPU power.
// Since the streams can be compressed, volume shouldn't be an issue
// at all.
//
writeBinaryString(outputStream, (byte[]) object);
break;
case STORAGE_TYPE_INDEXED:
// just an index
writeInteger(outputStream, (Integer) object);
break;
default:
throw new KettleFileException(toString() + " : Unknown storage type " + getStorageType());
}
}
} catch (ClassCastException e) {
throw new RuntimeException(toString() + " : There was a data type error: the data type of " + object.getClass().getName() + " object [" + object + "] does not correspond to value meta [" + toStringMeta() + "]");
} catch (IOException e) {
throw new KettleFileException(toString() + " : Unable to write value timestamp data to output stream", e);
} catch (KettleValueException e) {
throw new RuntimeException(toString() + " : There was a data type error: the data type of " + object.getClass().getName() + " object [" + object + "] does not correspond to value meta [" + toStringMeta() + "]");
}
}
use of org.pentaho.di.core.exception.KettleFileException in project pentaho-kettle by pentaho.
the class KettleVFS method createTempFile.
public static FileObject createTempFile(String prefix, String suffix, String directory, VariableSpace space) throws KettleFileException {
try {
FileObject fileObject;
do {
// Build temporary file name using UUID to ensure uniqueness. Old mechanism would fail using Sort Rows (for
// example)
// when there multiple nodes with multiple JVMs on each node. In this case, the temp file names would end up
// being
// duplicated which would cause the sort to fail.
String filename = new StringBuilder(50).append(directory).append('/').append(prefix).append('_').append(UUIDUtil.getUUIDAsString()).append(suffix).toString();
fileObject = getFileObject(filename, space);
} while (fileObject.exists());
return fileObject;
} catch (IOException e) {
throw new KettleFileException(e);
}
}
Aggregations