use of java.nio.charset.Charset in project camel by apache.
the class Mina2Consumer method configureDefaultCodecFactory.
protected void configureDefaultCodecFactory(String type, IoService service, Mina2Configuration configuration) {
if (configuration.isTextline()) {
Charset charset = getEncodingParameter(type, configuration);
LineDelimiter delimiter = getLineDelimiterParameter(configuration.getTextlineDelimiter());
Mina2TextLineCodecFactory codecFactory = new Mina2TextLineCodecFactory(charset, delimiter);
if (configuration.getEncoderMaxLineLength() > 0) {
codecFactory.setEncoderMaxLineLength(configuration.getEncoderMaxLineLength());
}
if (configuration.getDecoderMaxLineLength() > 0) {
codecFactory.setDecoderMaxLineLength(configuration.getDecoderMaxLineLength());
}
addCodecFactory(service, codecFactory);
if (LOG.isDebugEnabled()) {
LOG.debug("{}: Using TextLineCodecFactory: {} using encoding: {} line delimiter: {}({})", new Object[] { type, codecFactory, charset, configuration.getTextlineDelimiter(), delimiter });
LOG.debug("Encoder maximum line length: {}. Decoder maximum line length: {}", codecFactory.getEncoderMaxLineLength(), codecFactory.getDecoderMaxLineLength());
}
} else {
ObjectSerializationCodecFactory codecFactory = new ObjectSerializationCodecFactory();
addCodecFactory(service, codecFactory);
LOG.debug("{}: Using ObjectSerializationCodecFactory: {}", type, codecFactory);
}
}
use of java.nio.charset.Charset in project sonarqube by SonarSource.
the class IssueExclusionsLoader method execute.
/**
* {@inheritDoc}
*/
public void execute() {
Charset sourcesEncoding = fileSystem.encoding();
for (InputFile inputFile : fileSystem.inputFiles(fileSystem.predicates().all())) {
try {
String componentEffectiveKey = ((DefaultInputFile) inputFile).key();
if (componentEffectiveKey != null) {
String path = inputFile.relativePath();
inclusionPatternInitializer.initializePatternsForPath(path, componentEffectiveKey);
exclusionPatternInitializer.initializePatternsForPath(path, componentEffectiveKey);
if (exclusionPatternInitializer.hasFileContentPattern()) {
regexpScanner.scan(componentEffectiveKey, inputFile.file(), sourcesEncoding);
}
}
} catch (Exception e) {
throw new IllegalStateException("Unable to read the source file : '" + inputFile.absolutePath() + "' with the charset : '" + sourcesEncoding.name() + "'.", e);
}
}
}
use of java.nio.charset.Charset in project sonarqube by SonarSource.
the class MetadataGenerator method setMetadata.
/**
* Sets all metadata in the file, including charset and status.
* It is an expensive computation, reading the entire file.
*/
public void setMetadata(final DefaultInputFile inputFile, Charset defaultEncoding) {
try {
Charset charset = detectCharset(inputFile.path(), defaultEncoding);
inputFile.setCharset(charset);
Metadata metadata = fileMetadata.readMetadata(inputFile.file(), charset);
inputFile.setMetadata(metadata);
inputFile.setStatus(statusDetection.status(inputModule.definition().getKeyWithBranch(), inputFile.relativePath(), metadata.hash()));
LOG.debug("'{}' generated metadata {} with charset '{}'", inputFile.relativePath(), inputFile.type() == Type.TEST ? "as test " : "", charset);
} catch (Exception e) {
throw new IllegalStateException(e);
}
}
use of java.nio.charset.Charset in project che by eclipse.
the class FileStoreTextFileBuffer method commitFileBufferContent.
/*
* @see org.eclipse.core.internal.filebuffers.FileBuffer#commitFileBufferContent(org.eclipse.core.runtime.IProgressMonitor, boolean)
*/
protected void commitFileBufferContent(IProgressMonitor monitor, boolean overwrite) throws CoreException {
// if (!isSynchronized() && !overwrite)
// throw new CoreException(new Status(IStatus.WARNING, FileBuffersPlugin.PLUGIN_ID, IResourceStatus.OUT_OF_SYNC_LOCAL, FileBuffersMessages.FileBuffer_error_outOfSync, null));
String encoding = computeEncoding();
Charset charset;
try {
charset = Charset.forName(encoding);
} catch (UnsupportedCharsetException ex) {
String message = NLSUtility.format(FileBuffersMessages.ResourceTextFileBuffer_error_unsupported_encoding_message_arg, encoding);
IStatus s = new Status(IStatus.ERROR, FileBuffersPlugin.PLUGIN_ID, IStatus.OK, message, ex);
throw new CoreException(s);
} catch (IllegalCharsetNameException ex) {
String message = NLSUtility.format(FileBuffersMessages.ResourceTextFileBuffer_error_illegal_encoding_message_arg, encoding);
IStatus s = new Status(IStatus.ERROR, FileBuffersPlugin.PLUGIN_ID, IStatus.OK, message, ex);
throw new CoreException(s);
}
CharsetEncoder encoder = charset.newEncoder();
encoder.onMalformedInput(CodingErrorAction.REPLACE);
encoder.onUnmappableCharacter(CodingErrorAction.REPORT);
byte[] bytes;
int bytesLength;
try {
ByteBuffer byteBuffer = encoder.encode(CharBuffer.wrap(fDocument.get()));
bytesLength = byteBuffer.limit();
if (byteBuffer.hasArray())
bytes = byteBuffer.array();
else {
bytes = new byte[bytesLength];
byteBuffer.get(bytes);
}
} catch (CharacterCodingException ex) {
Assert.isTrue(ex instanceof UnmappableCharacterException);
String message = NLSUtility.format(FileBuffersMessages.ResourceTextFileBuffer_error_charset_mapping_failed_message_arg, encoding);
IStatus s = new Status(IStatus.ERROR, FileBuffersPlugin.PLUGIN_ID, IFileBufferStatusCodes.CHARSET_MAPPING_FAILED, message, null);
throw new CoreException(s);
}
IFileInfo fileInfo = fFileStore.fetchInfo();
if (fileInfo != null && fileInfo.exists()) {
if (!overwrite)
checkSynchronizationState();
InputStream stream = new ByteArrayInputStream(bytes, 0, bytesLength);
/*
* XXX:
* This is a workaround for a corresponding bug in Java readers and writer,
* see http://developer.java.sun.com/developer/bugParade/bugs/4508058.html
*/
if (fHasBOM && CHARSET_UTF_8.equals(encoding))
stream = new SequenceInputStream(new ByteArrayInputStream(IContentDescription.BOM_UTF_8), stream);
// here the file synchronizer should actually be removed and afterwards added again. However,
// we are already inside an operation, so the delta is sent AFTER we have added the listener
setFileContents(stream, monitor);
// set synchronization stamp to know whether the file synchronizer must become active
fSynchronizationStamp = fFileStore.fetchInfo().getLastModified();
// if (fAnnotationModel instanceof IPersistableAnnotationModel) {
// IPersistableAnnotationModel persistableModel= (IPersistableAnnotationModel) fAnnotationModel;
// persistableModel.commit(fDocument);
// }
} else {
fFileStore.getParent().mkdir(EFS.NONE, null);
OutputStream out = fFileStore.openOutputStream(EFS.NONE, null);
try {
/*
* XXX:
* This is a workaround for a corresponding bug in Java readers and writer,
* see http://developer.java.sun.com/developer/bugParade/bugs/4508058.html
*/
if (fHasBOM && CHARSET_UTF_8.equals(encoding))
out.write(IContentDescription.BOM_UTF_8);
out.write(bytes, 0, bytesLength);
out.flush();
out.close();
} catch (IOException x) {
IStatus s = new Status(IStatus.ERROR, FileBuffersPlugin.PLUGIN_ID, IStatus.OK, x.getLocalizedMessage(), x);
throw new CoreException(s);
} finally {
try {
out.close();
} catch (IOException x) {
}
}
// set synchronization stamp to know whether the file synchronizer must become active
fSynchronizationStamp = fFileStore.fetchInfo().getLastModified();
}
}
use of java.nio.charset.Charset in project neo4j by neo4j.
the class ImportToolTest method shouldImportFromInputDataEncodedWithSpecificCharset.
@Test
public void shouldImportFromInputDataEncodedWithSpecificCharset() throws Exception {
// GIVEN
List<String> nodeIds = nodeIds();
Configuration config = Configuration.COMMAS;
Charset charset = Charset.forName("UTF-16");
// WHEN
importTool("--into", dbRule.getStoreDirAbsolutePath(), "--input-encoding", charset.name(), "--nodes", nodeData(true, config, nodeIds, TRUE, charset).getAbsolutePath(), "--relationships", relationshipData(true, config, nodeIds, TRUE, true, charset).getAbsolutePath());
// THEN
verifyData();
}
Aggregations