我正在尝试使用Java读取CSV文件。一些文件在开头可能有一个字节顺序标记,但不是全部。当存在时,字节顺序与第一行的其余部分一起被读取,从而导致字符串比较出现问题。
是否有一种简单的方法可以跳过字节顺序标记?
谢谢!
答案 0 :(得分:110)
编辑:我在GitHub上发布了正确的版本:https://github.com/gpakosz/UnicodeBOMInputStream
这是我刚刚编写的一个类,我只是在粘贴之前编辑了包名。没什么特别的,它与SUN的bug数据库中发布的解决方案非常相似。将它合并到你的代码中就可以了。
/* ____________________________________________________________________________
*
* File: UnicodeBOMInputStream.java
* Author: Gregory Pakosz.
* Date: 02 - November - 2005
* ____________________________________________________________________________
*/
package com.stackoverflow.answer;
import java.io.IOException;
import java.io.InputStream;
import java.io.PushbackInputStream;
/**
* The <code>UnicodeBOMInputStream</code> class wraps any
* <code>InputStream</code> and detects the presence of any Unicode BOM
* (Byte Order Mark) at its beginning, as defined by
* <a href="http://www.faqs.org/rfcs/rfc3629.html">RFC 3629 - UTF-8, a transformation format of ISO 10646</a>
*
* <p>The
* <a href="http://www.unicode.org/unicode/faq/utf_bom.html">Unicode FAQ</a>
* defines 5 types of BOMs:<ul>
* <li><pre>00 00 FE FF = UTF-32, big-endian</pre></li>
* <li><pre>FF FE 00 00 = UTF-32, little-endian</pre></li>
* <li><pre>FE FF = UTF-16, big-endian</pre></li>
* <li><pre>FF FE = UTF-16, little-endian</pre></li>
* <li><pre>EF BB BF = UTF-8</pre></li>
* </ul></p>
*
* <p>Use the {@link #getBOM()} method to know whether a BOM has been detected
* or not.
* </p>
* <p>Use the {@link #skipBOM()} method to remove the detected BOM from the
* wrapped <code>InputStream</code> object.</p>
*/
public class UnicodeBOMInputStream extends InputStream
{
/**
* Type safe enumeration class that describes the different types of Unicode
* BOMs.
*/
public static final class BOM
{
/**
* NONE.
*/
public static final BOM NONE = new BOM(new byte[]{},"NONE");
/**
* UTF-8 BOM (EF BB BF).
*/
public static final BOM UTF_8 = new BOM(new byte[]{(byte)0xEF,
(byte)0xBB,
(byte)0xBF},
"UTF-8");
/**
* UTF-16, little-endian (FF FE).
*/
public static final BOM UTF_16_LE = new BOM(new byte[]{ (byte)0xFF,
(byte)0xFE},
"UTF-16 little-endian");
/**
* UTF-16, big-endian (FE FF).
*/
public static final BOM UTF_16_BE = new BOM(new byte[]{ (byte)0xFE,
(byte)0xFF},
"UTF-16 big-endian");
/**
* UTF-32, little-endian (FF FE 00 00).
*/
public static final BOM UTF_32_LE = new BOM(new byte[]{ (byte)0xFF,
(byte)0xFE,
(byte)0x00,
(byte)0x00},
"UTF-32 little-endian");
/**
* UTF-32, big-endian (00 00 FE FF).
*/
public static final BOM UTF_32_BE = new BOM(new byte[]{ (byte)0x00,
(byte)0x00,
(byte)0xFE,
(byte)0xFF},
"UTF-32 big-endian");
/**
* Returns a <code>String</code> representation of this <code>BOM</code>
* value.
*/
public final String toString()
{
return description;
}
/**
* Returns the bytes corresponding to this <code>BOM</code> value.
*/
public final byte[] getBytes()
{
final int length = bytes.length;
final byte[] result = new byte[length];
// Make a defensive copy
System.arraycopy(bytes,0,result,0,length);
return result;
}
private BOM(final byte bom[], final String description)
{
assert(bom != null) : "invalid BOM: null is not allowed";
assert(description != null) : "invalid description: null is not allowed";
assert(description.length() != 0) : "invalid description: empty string is not allowed";
this.bytes = bom;
this.description = description;
}
final byte bytes[];
private final String description;
} // BOM
/**
* Constructs a new <code>UnicodeBOMInputStream</code> that wraps the
* specified <code>InputStream</code>.
*
* @param inputStream an <code>InputStream</code>.
*
* @throws NullPointerException when <code>inputStream</code> is
* <code>null</code>.
* @throws IOException on reading from the specified <code>InputStream</code>
* when trying to detect the Unicode BOM.
*/
public UnicodeBOMInputStream(final InputStream inputStream) throws NullPointerException,
IOException
{
if (inputStream == null)
throw new NullPointerException("invalid input stream: null is not allowed");
in = new PushbackInputStream(inputStream,4);
final byte bom[] = new byte[4];
final int read = in.read(bom);
switch(read)
{
case 4:
if ((bom[0] == (byte)0xFF) &&
(bom[1] == (byte)0xFE) &&
(bom[2] == (byte)0x00) &&
(bom[3] == (byte)0x00))
{
this.bom = BOM.UTF_32_LE;
break;
}
else
if ((bom[0] == (byte)0x00) &&
(bom[1] == (byte)0x00) &&
(bom[2] == (byte)0xFE) &&
(bom[3] == (byte)0xFF))
{
this.bom = BOM.UTF_32_BE;
break;
}
case 3:
if ((bom[0] == (byte)0xEF) &&
(bom[1] == (byte)0xBB) &&
(bom[2] == (byte)0xBF))
{
this.bom = BOM.UTF_8;
break;
}
case 2:
if ((bom[0] == (byte)0xFF) &&
(bom[1] == (byte)0xFE))
{
this.bom = BOM.UTF_16_LE;
break;
}
else
if ((bom[0] == (byte)0xFE) &&
(bom[1] == (byte)0xFF))
{
this.bom = BOM.UTF_16_BE;
break;
}
default:
this.bom = BOM.NONE;
break;
}
if (read > 0)
in.unread(bom,0,read);
}
/**
* Returns the <code>BOM</code> that was detected in the wrapped
* <code>InputStream</code> object.
*
* @return a <code>BOM</code> value.
*/
public final BOM getBOM()
{
// BOM type is immutable.
return bom;
}
/**
* Skips the <code>BOM</code> that was found in the wrapped
* <code>InputStream</code> object.
*
* @return this <code>UnicodeBOMInputStream</code>.
*
* @throws IOException when trying to skip the BOM from the wrapped
* <code>InputStream</code> object.
*/
public final synchronized UnicodeBOMInputStream skipBOM() throws IOException
{
if (!skipped)
{
in.skip(bom.bytes.length);
skipped = true;
}
return this;
}
/**
* {@inheritDoc}
*/
public int read() throws IOException
{
return in.read();
}
/**
* {@inheritDoc}
*/
public int read(final byte b[]) throws IOException,
NullPointerException
{
return in.read(b,0,b.length);
}
/**
* {@inheritDoc}
*/
public int read(final byte b[],
final int off,
final int len) throws IOException,
NullPointerException
{
return in.read(b,off,len);
}
/**
* {@inheritDoc}
*/
public long skip(final long n) throws IOException
{
return in.skip(n);
}
/**
* {@inheritDoc}
*/
public int available() throws IOException
{
return in.available();
}
/**
* {@inheritDoc}
*/
public void close() throws IOException
{
in.close();
}
/**
* {@inheritDoc}
*/
public synchronized void mark(final int readlimit)
{
in.mark(readlimit);
}
/**
* {@inheritDoc}
*/
public synchronized void reset() throws IOException
{
in.reset();
}
/**
* {@inheritDoc}
*/
public boolean markSupported()
{
return in.markSupported();
}
private final PushbackInputStream in;
private final BOM bom;
private boolean skipped = false;
} // UnicodeBOMInputStream
你正在以这种方式使用它:
import java.io.BufferedReader;
import java.io.FileInputStream;
import java.io.InputStreamReader;
public final class UnicodeBOMInputStreamUsage
{
public static void main(final String[] args) throws Exception
{
FileInputStream fis = new FileInputStream("test/offending_bom.txt");
UnicodeBOMInputStream ubis = new UnicodeBOMInputStream(fis);
System.out.println("detected BOM: " + ubis.getBOM());
System.out.print("Reading the content of the file without skipping the BOM: ");
InputStreamReader isr = new InputStreamReader(ubis);
BufferedReader br = new BufferedReader(isr);
System.out.println(br.readLine());
br.close();
isr.close();
ubis.close();
fis.close();
fis = new FileInputStream("test/offending_bom.txt");
ubis = new UnicodeBOMInputStream(fis);
isr = new InputStreamReader(ubis);
br = new BufferedReader(isr);
ubis.skipBOM();
System.out.print("Reading the content of the file after skipping the BOM: ");
System.out.println(br.readLine());
br.close();
isr.close();
ubis.close();
fis.close();
}
} // UnicodeBOMInputStreamUsage
答案 1 :(得分:86)
Apache Commons IO库有一个InputStream
可以检测并丢弃BOM:BOMInputStream
(javadoc):
BOMInputStream bomIn = new BOMInputStream(in);
int firstNonBOMByte = bomIn.read(); // Skips BOM
if (bomIn.hasBOM()) {
// has a UTF-8 BOM
}
如果您还需要检测不同的编码,它还可以区分各种不同的字节顺序标记,例如: UTF-8与UTF-16大+小端 - 在上面的doc链接中的详细信息。然后,您可以使用检测到的ByteOrderMark
选择Charset
来解码流。 (如果您需要所有这些功能,可能还有一种更简化的方法 - 可能是BalusC的答案中的UnicodeReader?)。请注意,通常情况下,没有一种非常好的方法来检测某些字节的编码,但如果流以BOM开头,显然这可能会有所帮助。
编辑:如果您需要以UTF-16,UTF-32等检测BOM,那么构造函数应为:
new BOMInputStream(is, ByteOrderMark.UTF_8, ByteOrderMark.UTF_16BE,
ByteOrderMark.UTF_16LE, ByteOrderMark.UTF_32BE, ByteOrderMark.UTF_32LE)
Upvote @ martin-charlesworth的评论:)
答案 2 :(得分:29)
更简单的解决方案:
public class BOMSkipper
{
public static void skip(Reader reader) throws IOException
{
reader.mark(1);
char[] possibleBOM = new char[1];
reader.read(possibleBOM);
if (possibleBOM[0] != '\ufeff')
{
reader.reset();
}
}
}
使用示例:
BufferedReader input = new BufferedReader(new InputStreamReader(new FileInputStream(file), fileExpectedCharset));
BOMSkipper.skip(input);
//Now UTF prefix not present:
input.readLine();
...
适用于所有5种UTF编码!
答案 3 :(得分:24)
Google Data API有一个UnicodeReader
,可以自动检测编码。
您可以使用它代替InputStreamReader
。这是一个非常紧凑的源代码摘录,非常简单:
public class UnicodeReader extends Reader {
private static final int BOM_SIZE = 4;
private final InputStreamReader reader;
/**
* Construct UnicodeReader
* @param in Input stream.
* @param defaultEncoding Default encoding to be used if BOM is not found,
* or <code>null</code> to use system default encoding.
* @throws IOException If an I/O error occurs.
*/
public UnicodeReader(InputStream in, String defaultEncoding) throws IOException {
byte bom[] = new byte[BOM_SIZE];
String encoding;
int unread;
PushbackInputStream pushbackStream = new PushbackInputStream(in, BOM_SIZE);
int n = pushbackStream.read(bom, 0, bom.length);
// Read ahead four bytes and check for BOM marks.
if ((bom[0] == (byte) 0xEF) && (bom[1] == (byte) 0xBB) && (bom[2] == (byte) 0xBF)) {
encoding = "UTF-8";
unread = n - 3;
} else if ((bom[0] == (byte) 0xFE) && (bom[1] == (byte) 0xFF)) {
encoding = "UTF-16BE";
unread = n - 2;
} else if ((bom[0] == (byte) 0xFF) && (bom[1] == (byte) 0xFE)) {
encoding = "UTF-16LE";
unread = n - 2;
} else if ((bom[0] == (byte) 0x00) && (bom[1] == (byte) 0x00) && (bom[2] == (byte) 0xFE) && (bom[3] == (byte) 0xFF)) {
encoding = "UTF-32BE";
unread = n - 4;
} else if ((bom[0] == (byte) 0xFF) && (bom[1] == (byte) 0xFE) && (bom[2] == (byte) 0x00) && (bom[3] == (byte) 0x00)) {
encoding = "UTF-32LE";
unread = n - 4;
} else {
encoding = defaultEncoding;
unread = n;
}
// Unread bytes if necessary and skip BOM marks.
if (unread > 0) {
pushbackStream.unread(bom, (n - unread), unread);
} else if (unread < -1) {
pushbackStream.unread(bom, 0, 0);
}
// Use given encoding.
if (encoding == null) {
reader = new InputStreamReader(pushbackStream);
} else {
reader = new InputStreamReader(pushbackStream, encoding);
}
}
public String getEncoding() {
return reader.getEncoding();
}
public int read(char[] cbuf, int off, int len) throws IOException {
return reader.read(cbuf, off, len);
}
public void close() throws IOException {
reader.close();
}
}
答案 4 :(得分:13)
@rescdsk已经提到了Apache Commons IO
图书馆的BOMInputStream,但我没有看到它如何在没有 BOM的情况下获得InputStream
。
以下是我在Scala中的表现。
import java.io._
val file = new File(path_to_xml_file_with_BOM)
val fileInpStream = new FileInputStream(file)
val bomIn = new BOMInputStream(fileInpStream,
false); // false means don't include BOM
答案 5 :(得分:4)
要简单地从文件中删除BOM字符,我建议使用Apache Common IO
public BOMInputStream(InputStream delegate,
boolean include)
Constructs a new BOM InputStream that detects a a ByteOrderMark.UTF_8 and optionally includes it.
Parameters:
delegate - the InputStream to delegate to
include - true to include the UTF-8 BOM or false to exclude it
将include设置为false,将排除您的BOM字符。
答案 6 :(得分:2)
遗憾的是没有。你必须识别并跳过自己。 This page详细说明了您需要注意的事项。有关详细信息,另请参阅this SO question。
答案 7 :(得分:0)
我遇到了同样的问题,因为我没有阅读一堆文件,所以我做了一个更简单的解决方案。我认为我的编码是UTF-8,因为当我在此页面的帮助下打印出有问题的字符时:Get unicode value of a character我发现它是\ufeff
。我使用代码System.out.println( "\\u" + Integer.toHexString(str.charAt(0) | 0x10000).substring(1) );
打印出有问题的unicode值。
一旦我有了令人讨厌的unicode值,我在我继续阅读之前将其替换为我文件的第一行。该部分的业务逻辑:
String str = reader.readLine().trim();
str = str.replace("\ufeff", "");
这解决了我的问题。然后我就可以继续处理文件而没有任何问题。我添加trim()
只是为了防止空格或尾随空格,你可以根据你的具体需求做到或不做。
答案 8 :(得分:0)
NotePad ++是将UTF-8编码转换为UTF-8(BOM)编码的好工具。
https://notepad-plus-plus.org/downloads/
UTF8BOMTester.java
public class UTF8BOMTester {
public static void main(String[] args) throws FileNotFoundException, IOException {
// TODO Auto-generated method stub
File file = new File("test.txt");
boolean same = UTF8BOMInputStream.isSameEncodingType(file);
System.out.println(same);
if (same) {
UTF8BOMInputStream is = new UTF8BOMInputStream(file);
BufferedReader br = new BufferedReader(new InputStreamReader(is, "UTF-8"));
System.out.println(br.readLine());
}
}
static void bytesPrint(byte[] b) {
for (byte a : b)
System.out.printf("%x ", a);
}}
UTF8BOMInputStream.java
public class UTF8BOMInputStream extends InputStream {
byte[] SYMBLE_BOM = { (byte) 0xEF, (byte) 0xBB, (byte) 0xBF };
FileInputStream fis;
final boolean isSameEncodingType;
public UTF8BOMInputStream(File file) throws IOException {
FileInputStream fis=new FileInputStream(file);
byte[] symble=new byte[3];
fis.read(symble);
bytesPrint(symble);
isSameEncodingType=isSameEncodingType(symble);
if(isSameEncodingType)
this.fis=fis;
else
this.fis=null;
}
@Override
public int read() throws IOException {
return fis.read();
}
void bytesPrint(byte[] b) {
for (byte a : b)
System.out.printf("%x ", a);
}
boolean bytesCompare(byte[] a, byte[] b) {
if (a.length != b.length)
return false;
for (int i = 0; i < a.length; i++) {
if (a[i] != b[i])
return false;
}
return true;
}
boolean isSameEncodingType(byte[] symble) {
return bytesCompare(symble,SYMBLE_BOM);
}
public static boolean isSameEncodingType(File file) throws IOException {
return (new UTF8BOMInputStream(file)).isSameEncodingType;
}
答案 9 :(得分:0)
这是我在大多数字符集中读取 csv 文件的代码。它应该涵盖 99% 的情况。
try(InputStream inputStream = new FileInputStream(csvFile);){
BOMInputStream bomInputStream = new BOMInputStream(inputStream ,ByteOrderMark.UTF_8, ByteOrderMark.UTF_16LE, ByteOrderMark.UTF_16BE, ByteOrderMark.UTF_32LE, ByteOrderMark.UTF_32BE);
Charset charset;
if(!bomInputStream.hasBOM()) charset = StandardCharsets.UTF_8;
else if(bomInputStream.hasBOM(ByteOrderMark.UTF_8)) charset = StandardCharsets.UTF_8;
else if(bomInputStream.hasBOM(ByteOrderMark.UTF_16LE)) charset = StandardCharsets.UTF_16LE;
else if(bomInputStream.hasBOM(ByteOrderMark.UTF_16BE)) charset = StandardCharsets.UTF_16BE;
else { throw new Exception("The charset of the file " + csvFile + " is not supported.");}
try(Reader streamReader = new InputStreamReader(bomInputStream, charset);
BufferedReader bufferedReader = new BufferedReader(streamReader);) {
for(String line; (line = bufferedReader.readLine()) != null; ) {
String[] columns = line.split(",");
//read csv columns
}
}