ASP.NET采集系统万能正则表达式

2010-08-28 10:52:45来源:西部e网作者:

由于经常要写一些采集的程序,下面的三个函数是采集中的很常用的函数。姑且叫采集系统万能正则表达式吧。

第一个://获取页面的html源码
 public  string GetHtmlSource(string Url, string charset)
        
{
            
if (charset == "" || charset == null) charset = "gb2312";
            
string text1 = "";
            
try
            
{
                HttpWebRequest request1 
= (HttpWebRequest)WebRequest.Create(Url);
                HttpWebResponse response1 
= (HttpWebResponse)request1.GetResponse();
                Stream stream1 
= response1.GetResponseStream();
                StreamReader reader1 
= new StreamReader(stream1, Encoding.GetEncoding(charset));
                text1 
= reader1.ReadToEnd();
                stream1.Close();
                response1.Close();
            }

            
catch (Exception exception1)
            
{
            }

            
return text1;
        }

第二个:截取字符串

public string SniffwebCode(string code, string wordsBegin, string wordsEnd)
        
{
            
string NewsTitle = "";
            Regex regex1 
= new Regex("" + wordsBegin + @"(?<title>[\s\S]+?)" + wordsEnd + "", RegexOptions.Compiled | RegexOptions.IgnoreCase);
            
for (Match match1 = regex1.Match(code); match1.Success; match1 = match1.NextMatch())
            
{
                NewsTitle 
= match1.Groups["title"].ToString();
            }

            
return NewsTitle;

        }

第三个:截取网址

public ArrayList SniffwebCodeReturnList(string code, string wordsBegin, string wordsEnd)
        
{
            ArrayList urlList 
= new ArrayList();
            
//string NewsTitle = "";
            Regex regex1 = new Regex("" + wordsBegin + @"(?<title>[\s\S]+?)" + wordsEnd + "", RegexOptions.Compiled | RegexOptions.IgnoreCase);
            
for (Match match1 = regex1.Match(code); match1.Success; match1 = match1.NextMatch())
            
{
                urlList.Add(match1.Groups[
"title"].ToString());
            }

            
return urlList;

        }

全部代码如下:

using System;
using System.Collections.Generic;
using System.Text;
using System.Data;
using System.Data.OleDb;
using System.IO;
using System.Text.RegularExpressions;
using System.Text;
using System.Collections;
using System.Net;
namespace getWeb
{
    public class DBconn
    {
      //   public string dbConnString = @"User ID=sa;Data Source=.;Password=sa;Initial Catalog=GetWeb;Provider=SQLOLEDB.1";
        public string dbConnString = @"provider=microsoft.jet.oledb.4.0;data source=Getweb.mdb";
        public static string GetSource(string Url, string charset)
        {
            if (charset == "" || charset == null) charset = "gb2312";
            string text1 = "";
            try
            {
                Stream stream1 = new WebClient().OpenRead(Url);
                text1 = new StreamReader(stream1, Encoding.GetEncoding(charset)).ReadToEnd();
                stream1.Close();
            }
            catch (Exception exception1)
            {
            }
            return text1;
        }

        public  string GetHtmlSource(string Url, string charset)
        {
            if (charset == "" || charset == null) charset = "gb2312";
            string text1 = "";
            try
            {
                HttpWebRequest request1 = (HttpWebRequest)WebRequest.Create(Url);
                HttpWebResponse response1 = (HttpWebResponse)request1.GetResponse();
                Stream stream1 = response1.GetResponseStream();
                StreamReader reader1 = new StreamReader(stream1, Encoding.GetEncoding(charset));
                text1 = reader1.ReadToEnd();
                stream1.Close();
                response1.Close();
            }
            catch (Exception exception1)
            {
            }
            return text1;
        }

        public string Get_Http(string a_strUrl, int timeout)
        {
            string strResult;

            try
            {
                HttpWebRequest myReq = (HttpWebRequest)HttpWebRequest.Create(a_strUrl);
                myReq.Timeout = timeout;
                HttpWebResponse HttpWResp = (HttpWebResponse)myReq.GetResponse();

                Stream myStream = HttpWResp.GetResponseStream();

                StreamReader sr = new StreamReader(myStream, Encoding.Default);
                StringBuilder strBuilder = new StringBuilder();
                while (-1 != sr.Peek())
                {
                    strBuilder.Append(sr.ReadLine() + "\r\n");
                }

                strResult = strBuilder.ToString();
            }
            catch (Exception exp)
            {
                strResult = "错误:" + exp.Message;
            }

            return strResult;

        }

        //获取页面内容后,分析页面中连接地址取到要抓取的url:
        //处理页面标题和链接
        public string SniffwebCode(string code, string wordsBegin, string wordsEnd)
        {
            string NewsTitle = "\";
            Regex regex1 = new Regex("" + wordsBegin + @"(?<title>[\s\S]+?)" + wordsEnd + "", RegexOptions.Compiled | RegexOptions.IgnoreCase);
            for (Match match1 = regex1.Match(code); match1.Success; match1 = match1.NextMatch())
            {
                NewsTitle = match1.Groups["title"].ToString();
            }
            return NewsTitle;

        }


        public ArrayList SniffwebCodeReturnList(string code, string wordsBegin, string wordsEnd)
        {
            ArrayList urlList = new ArrayList();
            //string NewsTitle = "\";
            Regex regex1 = new Regex("" + wordsBegin + @"(?<title>[\s\S]+?)" + wordsEnd + "", RegexOptions.Compiled | RegexOptions.IgnoreCase);
            for (Match match1 = regex1.Match(code); match1.Success; match1 = match1.NextMatch())
            {
                urlList.Add(match1.Groups["title"].ToString());
            }
            return urlList;

        }
    

    }
}

关键词:ASP.NET