我用java下载yahoo上面的日文搜索结果网页。我已经设置了编码为UTF-8,但是下载到的网页字符串和通过浏览器得到的网友不同,所有的日文字符都变成了空格。不知道哪里有问题?
下面是我使用的下载网页的代码:public static void main(String[] args) throws UnsupportedEncodingException {
String strEncoding = "UTF-8";
System.out.println(strEncoding);
String strText = getHtmlText("http://search.yahoo.com/search?ei=UTF-8&&fr=yfp-t-501&fp_ip=CN&vm=p&b=1&n=10&va_vt=any&vo_vt=any&ve_vt=any&vp_vt=any&vd=m3&vf=pdf&fl=1&vl=lang_ja&vs=&p=123+"
, 30 * 1000, strEncoding, null, null);
System.out.println(strText);
}public static String getHtmlText(String strUrl, int timeout, String strEnCoding, String cookies, Proxy proxy) {
if (strUrl == null || strUrl.length() == 0) {
return null;
} StringBuffer strHtml = null;
String strLine = "";
HttpURLConnection httpConnection = null;// 这里可以定义成HttpURLConnection
InputStream urlStream = null;
BufferedInputStream buff = null;
BufferedReader br = null;
boolean isError = false;
try {
//链接网络得到网页源代码
URL url = new URL(strUrl);
if (proxy != null) {
httpConnection = (HttpURLConnection) url.openConnection(proxy);
}
else {
httpConnection = (HttpURLConnection) url.openConnection();
}
httpConnection.addRequestProperty("User-Agent", "IcewolfHttp/1.0");
httpConnection.addRequestProperty("Accept",
"www/source; text/html; image/gif; */*");
httpConnection.addRequestProperty("Accept-Language", "");
if (cookies != null) {
httpConnection.setRequestProperty("Cookie", cookies);
}
httpConnection.setConnectTimeout(timeout);
httpConnection.setReadTimeout(timeout);
urlStream = httpConnection.getInputStream();
buff = new BufferedInputStream(urlStream);
Reader r = null;
if (strEnCoding == null || strEnCoding.compareTo("null") == 0) {
r = new InputStreamReader(buff);
} else {
try {
r = new InputStreamReader(buff, strEnCoding);
} catch (UnsupportedEncodingException e) {
r = new InputStreamReader(buff);
}
}
br = new BufferedReader(r);
strHtml = new StringBuffer("");
while ((strLine = br.readLine()) != null) {
strHtml.append(strLine + "\r\n");
}
}catch (java.lang.OutOfMemoryError out) {
System.out.println("内存占用:" + strHtml.capacity());
out.printStackTrace();
}
catch (Exception e) {
e.printStackTrace();
System.out.println(e.getClass() + "下载网页" + strUrl + "失败");
isError = true;
} finally{
try{
if (httpConnection != null)
httpConnection.disconnect();
if (br != null)
br.close();
if (buff != null)
buff.close();
if (urlStream != null)
urlStream.close();
}catch(Exception e){
System.out.println(e.getClass() + "下载网页" + strUrl + "连接关闭失败");
return null;
}
}
if (strHtml == null || isError)
return null;
return fromNCR(strHtml.toString());
}
下面是我使用的下载网页的代码:public static void main(String[] args) throws UnsupportedEncodingException {
String strEncoding = "UTF-8";
System.out.println(strEncoding);
String strText = getHtmlText("http://search.yahoo.com/search?ei=UTF-8&&fr=yfp-t-501&fp_ip=CN&vm=p&b=1&n=10&va_vt=any&vo_vt=any&ve_vt=any&vp_vt=any&vd=m3&vf=pdf&fl=1&vl=lang_ja&vs=&p=123+"
, 30 * 1000, strEncoding, null, null);
System.out.println(strText);
}public static String getHtmlText(String strUrl, int timeout, String strEnCoding, String cookies, Proxy proxy) {
if (strUrl == null || strUrl.length() == 0) {
return null;
} StringBuffer strHtml = null;
String strLine = "";
HttpURLConnection httpConnection = null;// 这里可以定义成HttpURLConnection
InputStream urlStream = null;
BufferedInputStream buff = null;
BufferedReader br = null;
boolean isError = false;
try {
//链接网络得到网页源代码
URL url = new URL(strUrl);
if (proxy != null) {
httpConnection = (HttpURLConnection) url.openConnection(proxy);
}
else {
httpConnection = (HttpURLConnection) url.openConnection();
}
httpConnection.addRequestProperty("User-Agent", "IcewolfHttp/1.0");
httpConnection.addRequestProperty("Accept",
"www/source; text/html; image/gif; */*");
httpConnection.addRequestProperty("Accept-Language", "");
if (cookies != null) {
httpConnection.setRequestProperty("Cookie", cookies);
}
httpConnection.setConnectTimeout(timeout);
httpConnection.setReadTimeout(timeout);
urlStream = httpConnection.getInputStream();
buff = new BufferedInputStream(urlStream);
Reader r = null;
if (strEnCoding == null || strEnCoding.compareTo("null") == 0) {
r = new InputStreamReader(buff);
} else {
try {
r = new InputStreamReader(buff, strEnCoding);
} catch (UnsupportedEncodingException e) {
r = new InputStreamReader(buff);
}
}
br = new BufferedReader(r);
strHtml = new StringBuffer("");
while ((strLine = br.readLine()) != null) {
strHtml.append(strLine + "\r\n");
}
}catch (java.lang.OutOfMemoryError out) {
System.out.println("内存占用:" + strHtml.capacity());
out.printStackTrace();
}
catch (Exception e) {
e.printStackTrace();
System.out.println(e.getClass() + "下载网页" + strUrl + "失败");
isError = true;
} finally{
try{
if (httpConnection != null)
httpConnection.disconnect();
if (br != null)
br.close();
if (buff != null)
buff.close();
if (urlStream != null)
urlStream.close();
}catch(Exception e){
System.out.println(e.getClass() + "下载网页" + strUrl + "连接关闭失败");
return null;
}
}
if (strHtml == null || isError)
return null;
return fromNCR(strHtml.toString());
}
pageEncoding="Shift_JIS" %>
request.setCharacterEncoding("Shift_JIS");
public static void main(String[] args) throws UnsupportedEncodingException {
final String strUrl =
"http://www.google.cn/search?as_q=&hl=zh-CN&newwindow=1&rlz=1B3GGGL_zh-CNCN282CN282&num=10&btnG=Google+%E6%90%9C%E7%B4%A2&as_epq=Accept+Charset&as_oq=&as_eq=&lr=lang_ko&cr=&as_ft=i&as_filetype=&as_qdr=all&as_occt=any&as_dt=i&as_sitesearch=&as_rights=";
String strEncoding = /*JHtmlUpdateCheck.getEncoding(strUrl
, 30 * 1000);*/"EUC-KR";
System.out.println(strEncoding);
String strText = JQueryBase.getHtmlText(strUrl
, 30 * 1000, strEncoding, null, null);
System.out.println(strText);
}
这是一个很头痛的问题。
我想如果你要做得通用,那只能到response里面拿字符集了 String contentType=httpConnection.getContentType();
strEnCoding=contentType.substring(contentType.lastIndexOf("charset=")+8);
new InputStreamReader(buff, "EUC-KR");
所有的韩文字符不识别,都变成了“?”。
你把接到的字符串存为HTML文件,用浏览器打开就不是乱码。
直接运行乱码的原因是开发工具不支持EUC-KR字符集
因为我需要将得到内容统一转换成UTF-8编码的字符串,然后进行一些匹配,提取信息的操作。如果不同语言的网页使用不同的编码格式,无法进行下一步的操作。无论是字节数组还是Reader流,最终都要进行一步转换编码的操作。
虽然开发工具不支持EUC-KR,但是它一定支持UTF-8吧。有没有办法将取得的EUC-KR编码的字节数组,转换成UTF-8编码的字节数组,然后输出为字符串,正确的显示在控制台。
我试了一下 System.out.println(new String(str.getBytes("EUC-KR"),"UTF-8"));
不行。不知道怎么弄了
byte[] EUC_KR_byteArray=...//确保它一定是EUC-KR编码的字节数组,
则转成UTF-8编码的字节数组的代码是:
byte[] UTF_8_byteArray= new String(EUC_KR_byteArray,"EUC-KR").getBytes("UTF-8");
答:经过我的调试,将你的韩文网页按"EUC-KR"编码读取下来,再转换成"UTF-8"编码,是没有任何问题的.
参考代码:import java.io.*;
import java.net.*;
public class Encoding {
public static void main(String[] args) throws UnsupportedEncodingException, Exception {
String strEncoding = "EUC-KR";
System.out.println(strEncoding);
String strText = getHtmlText("http://www.google.cn/search?as_q=&hl=zh-CN&newwindow=1&rlz=1B3GGGL_zh-CNCN282CN282&num=10&btnG=Google+%E6%90%9C%E7%B4%A2&as_epq=Accept+Charset&as_oq=&as_eq=&lr=lang_ko&cr=&as_ft=i&as_filetype=&as_qdr=all&as_occt=any&as_dt=i&as_sitesearch=&as_rights="
, 30 * 1000, strEncoding, null, null);
//将韩文网页的内容转换成UTF-8,保存在e:/utf8_hanwen.html中,用浏览器在UTF-8编码下查看,一切正常.
PrintWriter pw=new PrintWriter(
new OutputStreamWriter(new FileOutputStream("e:/utf8_hanwen.html"),"UTF-8"));
pw.print(strText);
pw.flush();
pw.close();
System.out.println(strText); //在WINDOWS下的GBK下查看,部分出现??,是因为韩文在GBK下没有对应符号.
//在LINUX下(内码是UTF-8)运行,此语句韩文正确.没有任何??
}
public static String getHtmlText(String strUrl, int timeout, String strEnCoding, String cookies, Proxy proxy) {
if (strUrl == null || strUrl.length() == 0) {
return null;
}
StringBuffer strHtml = null;
String strLine = "";
HttpURLConnection httpConnection = null;// 这里可以定义成HttpURLConnection
InputStream urlStream = null;
BufferedInputStream buff = null;
BufferedReader br = null;
boolean isError = false;
try {
//链接网络得到网页源代码
URL url = new URL(strUrl);
if (proxy != null) {
httpConnection = (HttpURLConnection) url.openConnection(proxy);
}
else {
httpConnection = (HttpURLConnection) url.openConnection();
}
httpConnection.addRequestProperty("User-Agent", "IcewolfHttp/1.0");
httpConnection.addRequestProperty("Accept",
"www/source; text/html; image/gif; */*");
httpConnection.addRequestProperty("Accept-Language", "");
if (cookies != null) {
httpConnection.setRequestProperty("Cookie", cookies);
}
httpConnection.setConnectTimeout(timeout);
httpConnection.setReadTimeout(timeout);
urlStream = httpConnection.getInputStream();
buff = new BufferedInputStream(urlStream);
Reader r = null;
if (strEnCoding == null || strEnCoding.compareTo("null") == 0) {
r = new InputStreamReader(buff);
} else {
try {
r = new InputStreamReader(buff, strEnCoding);
} catch (UnsupportedEncodingException e) {
r = new InputStreamReader(buff);
}
}
br = new BufferedReader(r);
strHtml = new StringBuffer("");
while ((strLine = br.readLine()) != null) {
strHtml.append(strLine + "\r\n");
}
}catch (java.lang.OutOfMemoryError out) {
System.out.println("内存占用:" + strHtml.capacity());
out.printStackTrace();
}
catch (Exception e) {
e.printStackTrace();
System.out.println(e.getClass() + "下载网页" + strUrl + "失败");
isError = true;
} finally{
try{
if (httpConnection != null)
// httpConnection.disconnect();
if (br != null)
br.close();
if (buff != null)
buff.close();
if (urlStream != null)
urlStream.close();
}catch(Exception e){
System.out.println(e.getClass() + "下载网页" + strUrl + "连接关闭失败");
return null;
}
}
if (strHtml == null || isError)
return null;
return (strHtml.toString());
} }结果分析:
1)你的程序(即:public static String getHtmlText(...)方法 )是正常的,没有问题的.
2)将你的程序在LINUX下(内码默认是UTF-8),则运行结果一切正常,即:System.out.println(strText);
输出的网页内容(包括韩文)都是正常的,没有任何??等不正常字符
3)同样的程序在WIN下运行(内码默认是GBK),则运行结果即:System.out.println(strText); 输出的网页内容(包括韩文)是不正常的.
4)为此,在WIN下,将网页内容(包括韩文)转换成"UTF-8"保存在e:/utf8_hanwen.html下,然后用任何支持UTF-8的浏览器(或编辑器如:WORD,MYECLIPSE下打开)查看,网页内容(包括韩文)都是正常的.结论:你的程序正常,乱码原因是:
网页内容(包括韩文)在程序中用:System.out.println(..)输出时,
若是在WIN下(默认GBK编码)输出时乱码,在LINUX下(默认UTF-8编码)正常.
因此,在WIN平台下,按上边4)的办法就正常看到了.
而:WIN下用System.out.println(...)输出时,无论如何内部都是按GBK解释输出的,因而造成不管你怎么弄,用System.out.println(...)输出都是乱码的原因(而在LINUX下,用System.out.println(...)就正常)
一句话:WIN下用System.out.println(...)输出时都是强制按GBK输出,而韩文在GBK下没有对应字符,因而变成了??的乱码.
GBK的A4,A5区的确定义了日本字符。怪不得呢