use of org.eclipse.jst.jsp.core.internal.contenttype.JSPResourceEncodingDetector in project webtools.sourceediting by eclipse.
the class ContentDescriberForJSPedCSS method handleStandardCalculations.
/**
* @param description
* @param detector
* @throws IOException
*/
private void handleStandardCalculations(IContentDescription description, IResourceCharsetDetector detector) throws IOException {
// note: if we're asked for one, we set them all. I need to be sure if
// called
// mulitiple times (one for each, say) that we don't waste time
// processing same
// content again.
EncodingMemento encodingMemento = ((JSPResourceEncodingDetector) detector).getEncodingMemento();
// TODO: I need to verify to see if this BOM work is always done
// by text type.
Object detectedByteOrderMark = encodingMemento.getUnicodeBOM();
if (detectedByteOrderMark != null) {
Object existingByteOrderMark = description.getProperty(IContentDescription.BYTE_ORDER_MARK);
// need to "push" up into base.
if (!detectedByteOrderMark.equals(existingByteOrderMark))
description.setProperty(IContentDescription.BYTE_ORDER_MARK, detectedByteOrderMark);
}
if (!encodingMemento.isValid()) {
// note: after setting here, its the mere presence of
// IContentDescriptionExtended.UNSUPPORTED_CHARSET
// in the resource's description that can be used to determine if
// invalid
// in those cases, the "detected" property contains an
// "appropriate default" to use.
description.setProperty(IContentDescriptionExtended.UNSUPPORTED_CHARSET, encodingMemento.getInvalidEncoding());
description.setProperty(IContentDescriptionExtended.APPROPRIATE_DEFAULT, encodingMemento.getAppropriateDefault());
}
Object detectedCharset = encodingMemento.getDetectedCharsetName();
Object javaCharset = encodingMemento.getJavaCharsetName();
// we always include detected, if its different than java
handleDetectedSpecialCase(description, detectedCharset, javaCharset);
if (javaCharset != null) {
Object existingCharset = description.getProperty(IContentDescription.CHARSET);
if (javaCharset.equals(existingCharset)) {
handleDetectedSpecialCase(description, detectedCharset, javaCharset);
} else {
// we may need to add what we found, but only need to add
// if different from the default.
Object defaultCharset = detector.getSpecDefaultEncoding();
if (defaultCharset != null) {
if (!defaultCharset.equals(javaCharset)) {
description.setProperty(IContentDescription.CHARSET, javaCharset);
}
} else {
// assuming if there is no spec default, we always need to
// add, I'm assuming
description.setProperty(IContentDescription.CHARSET, javaCharset);
}
}
}
}
use of org.eclipse.jst.jsp.core.internal.contenttype.JSPResourceEncodingDetector in project webtools.sourceediting by eclipse.
the class JSPEncodingTests method testXMLNormalNonDefault.
public void testXMLNormalNonDefault() throws IOException {
String filename = fileLocation + "NormalNonDefault.jsp";
String ianaInFile = "ISO-8859-8";
doTestFileStream(filename, ianaInFile, new JSPResourceEncodingDetector());
}
use of org.eclipse.jst.jsp.core.internal.contenttype.JSPResourceEncodingDetector in project webtools.sourceediting by eclipse.
the class JSPEncodingTests method testXMLNoEncoding.
public void testXMLNoEncoding() throws IOException {
String filename = fileLocation + "noEncoding.jsp";
doTestFileStream(filename, "ISO-8859-1", new JSPResourceEncodingDetector());
}
use of org.eclipse.jst.jsp.core.internal.contenttype.JSPResourceEncodingDetector in project webtools.sourceediting by eclipse.
the class JSPEncodingTests method doTestFileStream.
private void doTestFileStream(String filename, String expectedIANAEncoding, IResourceCharsetDetector detector) throws IOException {
File file = JSPEncodingTestsPlugin.getTestFile(filename);
if (!file.exists())
throw new IllegalArgumentException(filename + " was not found");
InputStream inputStream = new FileInputStream(file);
// InputStream inStream = getClass().getResourceAsStream(filename);
InputStream istream = getMarkSupportedStream(inputStream);
try {
detector.set(istream);
EncodingMemento encodingMemento = ((JSPResourceEncodingDetector) detector).getEncodingMemento();
String foundIANAEncoding = encodingMemento.getJavaCharsetName();
// I changed many "equals" to "equalsIgnoreCase" on 11/4/2002,
// since
// some issues with SHIFT_JIS vs. Shift_JIS were causing failures.
// We do want to be tolerant on input, and accept either, but I
// think
// that SupportedJavaEncodings needs to be changed to "recommend"
// Shift_JIS.
boolean expectedIANAResult = false;
expectedIANAResult = expectedIANAEncoding.equalsIgnoreCase(foundIANAEncoding);
assertTrue("encoding test file " + filename + " expected: " + expectedIANAEncoding + " found: " + foundIANAEncoding, expectedIANAResult);
// a very simple read test ... will cause JUnit error (not fail)
// if throws exception.
ensureCanRead(filename, foundIANAEncoding, istream);
} finally {
if (istream != null) {
istream.close();
}
if (inputStream != null) {
inputStream.close();
}
}
}
use of org.eclipse.jst.jsp.core.internal.contenttype.JSPResourceEncodingDetector in project webtools.sourceediting by eclipse.
the class JSPEncodingTests method testUtf16UnicodeStreamWithNoEncodingInHeader.
/**
* This test shows unicode BOM should take priority over settings/defaults
*/
public void testUtf16UnicodeStreamWithNoEncodingInHeader() throws IOException {
String filename = fileLocation + "utf16UnicodeStreamWithNoEncodingInHeader2.jsp";
doTestFileStream(filename, "UTF-16", new JSPResourceEncodingDetector());
}
Aggregations