关闭

采集方法

296人阅读 评论(0) 收藏 举报

详细出处参考:http://www.jb51.net/article/21468.htm

最近做一个网站,该网站需要添加4000多 产品信息,如果用人工方法去别的网站copy那至少要花费半月时间才能完成,所以我个办法使用c#作出来了一个网页数据采集软件

//提取产品列表页中产品最终页的网页
private void button1_Click(object sender, EventArgs e)
{
if (textBox1.Text.Trim() == "" || textBox2.Text.Trim() == "")
{
MessageBox.Show("网址和域名不能为空!", "信息提示", MessageBoxButtons.OK, MessageBoxIcon.Information);
return;
}
try
{
string Html = inc.GetHtml("http://study.pctoday.net.cn");
//ArrayList al = inc.GetMatchesStr(Html, "<a[^>]*?>.*?</a>");
ArrayList al = inc.GetMatchesStr(Html, @"href/s*=/s*(?:[/'/""/s](?<1>[^/""/']*)[/'/""])");//提取链接

" title="Replica Watches:">Replica Watches Buy Full Quality Popular Luxury Watches at Amazing Price, Your One Stop Discount Swiss Watches StoreExclusive Replica Rolex Watches, Tag Heuer Watches Replica, Cartier Watches online Sale!
StringBuilder sb = new StringBuilder();
foreach (object var in al)
{
string a = var.ToString().Replace("/"", "").Replace("'", "");
a = Regex.Replace(a, "href=", "", RegexOptions.IgnoreCase | RegexOptions.Multiline);
if (a.StartsWith("/"))
a = textBox2.Text.Trim() + a;
if (!a.StartsWith("http://"))
a = "http://" + a;
sb.Append(a + "/r/n");
}
textBox5.Text = sb.ToString();//把提取到网址输出到一个textBox,每个链接占一行

MessageBox.Show("共提取" + al.Count.ToString() + "个链接", "信息提示", MessageBoxButtons.OK, MessageBoxIcon.Information);
}
catch (Exception err)
{
MessageBox.Show("提取出错!原因:" + err.Message, "信息提示", MessageBoxButtons.OK, MessageBoxIcon.Information);
}
}


//把采集的产品页面html代码进行字符串处理,提取需要的代码,最后保存到本地一个access数据库中,同时提取产品图片地址并自动现在图片到本地images文件夹下
private void backgroundWorker1_DoWork(object sender, DoWorkEventArgs e)
{
//填充产品表
Database.ExecuteNonQuery("delete from Tb_Product");
DataTable dt2 = new DataTable();
OleDbConnection conn = new OleDbConnection(Database.ConnectionStrings);
OleDbDataAdapter da = new OleDbDataAdapter("select * from Tb_Product", conn);
OleDbCommandBuilder cb = new OleDbCommandBuilder(da);
da.Fill(dt2);
dt2.Rows.Clear();
BackgroundWorker worker = (BackgroundWorker)sender;//这个是做一个进度条
string[] Urls = textBox5.Text.Trim().ToLower().Replace("/r/n", ",").Split(',');
DataTable dt = new DataTable();
StringBuilder ErrorStr = new StringBuilder();
string html = "", ImageDir = AppDomain.CurrentDomain.BaseDirectory + "Images//";
//循环每次采集网址
for (int i = 0; i < Urls.Length; i++)
{
try
{
if (!worker.CancellationPending)
{
if (Urls[i] == "")
return;
html = inc.GetHtml(Urls[i]);//获取该url的html代码
DataRow NewRow = dt2.NewRow();
//产品名
string ProductName = html.Substring(html.IndexOf("<title>") + 7);
NewRow["ProductName"] = ProductName.Remove(ProductName.IndexOf("</title>")).Trim();
//产品编号
NewRow["ModelId"] = NewRow["ProductName"].ToString().Substring(NewRow["ProductName"].ToString().IndexOf("Model:") + 6).Trim();
//产品介绍,这些都是根据不同网站的html做相应的修改
string Introduce = html.Substring(html.IndexOf("Product Details") + 26);
Introduce = Introduce.Remove(Introduce.IndexOf("</table>") + 8).Trim()
NewRow["Introduce"] = Introduce;

" title="Replica Watches:">Replica Watches Buy Full Quality Popular Luxury Watches at Amazing Price, Your One Stop Discount Swiss Watches StoreExclusive Replica Rolex Watches, Tag Heuer Watches Replica, Cartier Watches online Sale!
//下载图片
string ProductImage = html.Substring(html.IndexOf("align=center><img") + 17);
ProductImage = textBox2.Text.Trim() + ProductImage.Substring(ProductImage.IndexOf("src=/"") + 5);
ProductImage = ProductImage.Remove(ProductImage.IndexOf("/""));
try
{
inc.DownFile(ProductImage, ImageDir + ProductImage.Substring(ProductImage.LastIndexOf("/") + 1));
}
catch (Exception)
{
ErrorStr.Append("下载图片失败,图片地址:" + ImageDir + ProductImage.Substring(ProductImage.LastIndexOf("/") + 1) + "/r/n");
}

dt2.Rows.Add(NewRow);
//Thread.Sleep(100);
worker.ReportProgress((i + 1) * 100 / Urls.Length, i);
toolStripStatusLabel1.Text = "处理进度:" + (i + 1).ToString() + "/" + Urls.Length.ToString();//进度条
}
}
catch (Exception err)
{
ErrorStr.Append("采集错误:" + err.Message + ";网址:" + Urls[i] + "/r/n");
}
}
da.Update(dt2);
DataBind(dt2);
ShowError(ErrorStr.ToString());
}
/// <summary>
/// ASPX页面生成静态Html页面,作者:郑少群
/// </summary>
public static string GetHtml(string url)
{
StreamReader sr = null;
string str = null;
//读取远程路径
WebRequest request = WebRequest.Create(url);
HttpWebResponse response = (HttpWebResponse)request.GetResponse();
sr = new StreamReader(response.GetResponseStream(), Encoding.GetEncoding(response.CharacterSet));
str = sr.ReadToEnd();
sr.Close();
return str;
}

// 提取HTML代码中的网址
public static ArrayList GetMatchesStr(string htmlCode, string strRegex)
{
ArrayList al = new ArrayList();
Regex r = new Regex(strRegex, RegexOptions.IgnoreCase | RegexOptions.Multiline);
MatchCollection m = r.Matches(htmlCode);
for (int i = 0; i < m.Count; i++)
{
bool rep = false;
string strNew = m[i].ToString();
// 过滤重复的URL
foreach (string str in al)
{
if (strNew == str)
{
rep = true;
break;
}
}
if (!rep) al.Add(strNew);
}
al.Sort();
return al;
}
public static void DownFile(string Url, string Path)
{
HttpWebRequest request = (HttpWebRequest)WebRequest.Create(Url);
HttpWebResponse response = (HttpWebResponse)request.GetResponse();
Stream stream = response.GetResponseStream();
long size = response.ContentLength;
//创建文件流对象
using (FileStream fs = new FileStream(Path, FileMode.OpenOrCreate, FileAccess.Write))
{
byte[] b = new byte[1025];
int n = 0;
while ((n = stream.Read(b, 0, 1024)) > 0)
{
fs.Write(b, 0, n);
}
}
}

 

正文开始:

        假如我们采集网址:http://info.laser.hc360.com/list/z_news_yw.shtml 上的新闻,要求采集标题、时间、内容、单篇文章如果有翻页则采集完全。

      这种类型的采集就是从指定网页获得新闻列表(即url),然后通过其url获得新闻详情,这是一种很常见的采集方式,有可能到很多页面上去采集,所以我们可以采用接口来构造基类。

     首先定义  IGatherInfo.cs

Code

         接口定义了三个成员:gatherTime采集时间,GatherUrlList()从指定网址抽取新闻Url,GatherNewsDetail()读取新闻详细内容。

      下面我们分析一下该程序中可能要用的公共方法,定义在 GatherInfoBase.cs

            1.时间转换函数string DateToString()

Code

            2.获取远程文件源代码 string GetRemoteHtmlCode(string url)

Code

            3.从HtmlCode截取字符串 string SniffwebCode(string code, string wordsBegin, string wordsEnd),用于抽取标题,时间,正文

Code

            4.替换HTML源代码 string RemoveHTML(string HtmlCode),用于将抽取到的正文内容去Html

Code

            5.更改文件名方法string changFileName(string filename, string addStr),利用其分页规律定义其增加的字符,

 

Code

     因为程序比较小,所以我采用access来存取数据,创建GatherInfo_laser_hc360.db,添加两个表

      GatherUrlsstrUrl  备注,strGahterTime 文本

     GatherInfosstrUrl  备注,upTime 文本,title 文本,content 备注

用数据集实现数据连接,代码中可见。

最后我们来实现对所给网址的采集,直接给出代码

Code

Code

             6.获取页面连接

Code


接下来我们定义一个NewsDeatil.cs

0
0

查看评论
* 以上用户言论只代表其个人观点,不代表CSDN网站的观点或立场
    个人资料
    • 访问:304805次
    • 积分:6498
    • 等级:
    • 排名:第3957名
    • 原创:278篇
    • 转载:65篇
    • 译文:1篇
    • 评论:35条
    最新评论