C#获取网页源代码

方法一:

using System.Text;
using System.Net;
private string getHtml(string url)
{
WebClient myWebClient = new WebClient();
byte[] myDataBuffer = myWebClient.DownloadData (url);
return Encoding.Default.GetString(myDataBuffer);
}

方法二:

public string getHttp(string HttpUrl,string RefererUrl)
{
string html = "";
try
{
MSXML2.XMLHTTP Http = new MSXML2.XMLHTTPClass();
xmlhttp.open("GET", Url, false, null, null);
Http.open("GET",HttpUrl,false,null,null);
Http.setRequestHeader("Referer",RefererUrl);
//Http.setRequestHeader("Referer",RefererUrl);
Http.setRequestHeader("Content-Type", "text/html;charset=gb2312");
Http.send("");
html = Encoding.Default.GetString((byte[])Http.responseBody);
Http = null;
}
catch
{

}
return html;
}

public bool getweb(string strURL,out string buf)
{
buf="";
try
{
//Uri url=new Uri(strURL,false);
HttpWebRequest request;
request = (HttpWebRequest)WebRequest.Create(strURL);
request.Method="POST"; //Post请求方式
request.ContentType="text/html;charset=gb2312"; //内容类型
string paraUrlCoded = System.Web.HttpUtility.UrlEncode(""); //参数经过URL编码
byte[] payload;
payload = System.Text.Encoding.GetEncoding("GB2312").GetBytes(paraUrlCoded); //将URL编码后的字符串转化为字节
request.ContentLength = payload.Length; //设置请求的ContentLength
Stream writer = request.GetRequestStream(); //获得请求流
writer.Write(payload,0,payload.Length); //将请求参数写入流
writer.Close(); //关闭请求流
HttpWebResponse response;
response = (HttpWebResponse)request.GetResponse(); //获得响应流
Stream s;
s = response.GetResponseStream();
StreamReader objReader = new StreamReader(s,System.Text.Encoding.GetEncoding("GB2312"));
string HTML = "";
string sLine ="";
int i = 0;
while (sLine!=null)
{
i++;
sLine = objReader.ReadLine();
if (sLine!=null)
HTML += sLine;
}
//HTML = HTML.Replace("&lt;","<");
//HTML = HTML.Replace("&gt;",">");
buf=HTML;
return true;
}
catch (Exception x)
{
buf=x.Message.ToString();
return false;
}
} 带Cookie:CookieContainer cc = new CookieContainer();public bool getweb(string strURL,out string buf)
{
buf="";
try
{ HttpWebRequest request;
request = (HttpWebRequest)WebRequest.Create(strURL);
request.Method="POST"; //Post请求方式
request.ContentType="text/html;charset=gb2312"; //内容类型
string paraUrlCoded = System.Web.HttpUtility.UrlEncode(""); //参数经过URL编码
byte[] payload;
payload = System.Text.Encoding.GetEncoding("GB2312").GetBytes(paraUrlCoded); //将URL编码后的字符串转化为字节
request.ContentLength = payload.Length; //设置请求的ContentLength
Stream writer = request.GetRequestStream(); //获得请求流
writer.Write(payload,0,payload.Length); //将请求参数写入流
writer.Close(); //关闭请求流
HttpWebResponse response;
response = (HttpWebResponse)request.GetResponse(); //获得响应流
Stream s;
s = response.GetResponseStream();
StreamReader objReader = new StreamReader(s,System.Text.Encoding.GetEncoding("GB2312"));
string HTML = "";
string sLine ="";
int i = 0;
while (sLine!=null)
{
i++;
sLine = objReader.ReadLine();
if (sLine!=null)
HTML += sLine;
}

buf=HTML;
return true;
}
catch (Exception x)
{
buf=x.Message.ToString();
return false;
}
}
public bool getweb(string strURL,out string buf,string postData)
{
buf="";
try
{

ASCIIEncoding encoding = new ASCIIEncoding();
byte[] data = encoding.GetBytes(postData); HttpWebRequest request = (HttpWebRequest)WebRequest.Create(strURL);
request.Method = "POST";
request.ContentType = "application/x-www-form-urlencoded";
request.ContentLength = data.Length;
Stream newStream = request.GetRequestStream();
newStream.Write(data, 0, data.Length); newStream.Close();

request.CookieContainer = cc;

HttpWebResponse response = (HttpWebResponse)request.GetResponse();
cc.Add(response.Cookies);
Stream stream = response.GetResponseStream();
string sHtml = new StreamReader(stream, System.Text.Encoding.Default).ReadToEnd();
buf=sHtml;
return true;
}
catch (Exception x)
{
buf=x.Message.ToString();
return false;
}
}private string getWebresourceFile1(string url)
{

WebClient myWebClient = new WebClient();
byte[] myDataBuffer = myWebClient.DownloadData(url);
string SourceCode = Encoding.Default.GetString(myDataBuffer);
saveSourceCode(SourceCode);
return SourceCode;
}

方法2
private string getWebresourceFile2(string url)
{
HttpWebRequest request=(HttpWebRequest)WebRequest.Create(url);
HttpWebResponse response=(HttpWebResponse)request.GetResponse();
request.Method = "GET ";
Stream receiveStream=response.GetResponseStream();
StreamReader readStream=new StreamReader(receiveStream,Encoding.Default);
string SourceCode=readStream.ReadToEnd();
saveSourceCode(SourceCode);
response.Close();
readStream.Close();
return SourceCode;

}
方法3
private string getWebresourceFile3(string url)
{
WebClient wc = new WebClient();
wc.Credentials = CredentialCache.DefaultCredentials;
Byte[] pageData = wc.DownloadData(url);
string SourceCode = Encoding.Default.GetString(pageData);
saveSourceCode(SourceCode);
wc.Dispose();
return SourceCode;
}

方法4
private string getWebresourceFile4(string url)
{
WebClient wc = new WebClient();
wc.Credentials = CredentialCache.DefaultCredentials;
Stream resStream = wc.OpenRead(url);
StreamReader sr = new StreamReader(resStream,System.Text.Encoding.Default);

string SourceCode = sr.ReadToEnd();
saveSourceCode(SourceCode);
resStream.Close();
wc.Dispose();
return SourceCode;
}
方法5
private string getWebresourceFile5(string url)
{
WebRequest request = WebRequest.Create(url);
WebResponse response = request.GetResponse();
Stream resStream = response.GetResponseStream();
StreamReader sr = new StreamReader(resStream, System.Text.Encoding.Default);
string SourceCode = sr.ReadToEnd();
saveSourceCode(SourceCode);
resStream.Close();
sr.Close();
return SourceCode;
}
  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
描述:由C#编写的多线程异步抓取网页网络爬虫控制台程序 功能:目前只能提取网络链接,所用的两个记录文件并不需要很大。网页文本、图片、视频和html代码暂时不能抓取,请见谅。 但需要注意,网页的数目是非常庞大的,如下代码理论上大概可以把整个互联网网页链接都抓下来。 但事实上,由于处理器功能和网络条件(主要是网速)限制,一般的家用电脑最多能胜任12个线程左右的抓取任务,抓取速度有限。可以抓取,但需要时间和耐心。 当然,这个程序把所有链接抓下来是可能的,因为链接占系统空间并不多,而且有记录文件的帮助,已抓取网页的数量可以堆积下去, 甚至可以把所有的互联网网络链接都存取下来,当然,最好是分批次。建议设置maxNum为500-1000左右,慢慢累积下去。 另外因为是控制台程序,有时候显示字符过多会系统会暂停显示,这时候只要点击控制台按下回车键就可以了。程序假死的时候,可以按回车键(Enter)试试。 /// 使用本程序,请确保已创建相应的记录文件,出于简化代码的考虑,本程序做的并不健壮,请见谅。 /// 默认的文件创建在E盘根目录“已抓取网址.txt”和“待抓取网址.txt”这两个文本文件中,使用者需要自行创建这两个文件,注意后缀名不要搞错。 这两个文件里面的链接基本都是有效链接,可以单独处理使用。 本爬虫程序的速度如下: 10线程最快大概500个链接每分钟 6-8线程最快大概400-500个链接每分钟 2-4线程最快大概200-400个链接每分钟 单线程最快大概70-100个链接每分钟 之所以用多线程异步抓取完全是出于效率考虑,本程序多线程同步并不能带来速度的提升,只要抓取网页不要太多重复和冗余就可以,异步并不意味着错误。

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值