google-hack相关的python脚本

import  urllib2,urllib
import  simplejson
import  json
import  random


searchstr='inurl:.action+filetype:action+'


def random_useragent():
USER_AGENTS = [
"Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; AcooBrowser; .NET CLR 1.1.4322; .NET CLR 2.0.50727)",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0; Acoo Browser; SLCC1; .NET CLR 2.0.50727; Media Center PC 5.0; .NET CLR 3.0.04506)",
"Mozilla/4.0 (compatible; MSIE 7.0; AOL 9.5; AOLBuild 4337.35; Windows NT 5.1; .NET CLR 1.1.4322; .NET CLR 2.0.50727)",
"Mozilla/5.0 (Windows; U; MSIE 9.0; Windows NT 9.0; en-US)",
"Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Win64; x64; Trident/5.0; .NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET CLR 2.0.50727; Media Center PC 6.0)",
"Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET CLR 1.0.3705; .NET CLR 1.1.4322)",
"Mozilla/4.0 (compatible; MSIE 7.0b; Windows NT 5.2; .NET CLR 1.1.4322; .NET CLR 2.0.50727; InfoPath.2; .NET CLR 3.0.04506.30)",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN) AppleWebKit/523.15 (KHTML, like Gecko, Safari/419.3) Arora/0.3 (Change: 287 c9dfb30)",
"Mozilla/5.0 (X11; U; Linux; en-US) AppleWebKit/527+ (KHTML, like Gecko, Safari/419.3) Arora/0.6",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.1.2pre) Gecko/20070215 K-Ninja/2.1.1",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN; rv:1.9) Gecko/20080705 Firefox/3.0 Kapiko/3.0",
"Mozilla/5.0 (X11; Linux i686; U;) Gecko/20070322 Kazehakase/0.4.5",
"Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.8) Gecko Fedora/1.9.0.8-1.fc10 Kazehakase/0.5.6",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.56 Safari/535.11",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_3) AppleWebKit/535.20 (KHTML, like Gecko) Chrome/19.0.1036.7 Safari/535.20",
"Opera/9.80 (Macintosh; Intel Mac OS X 10.6.8; U; fr) Presto/2.9.168 Version/11.52",
]
return random.choice(USER_AGENTS)


def  get_result(url):


    headers={
        'User-Agent':random_useragent(),
        }
        


    try:
        request=urllib2.Request(
            url,None,headers)
        response=urllib2.urlopen(request)
        results=simplejson.load(response)


        info=results['responseData']['results']


    except Exception,e:
        print e
        print url
    else:
        for  aa   in info:
            print aa['url']




if __name__=="__main__":




    for x  in range(10):
        print "page:%s" %(x+1)
        page=x*8


        url=('https://ajax.googleapis.com/ajax/services/search/web'
                  '?v=1.0&q=%s&rsz=8&start=%s') % (searchstr,page)


        
        result=get_result(url)
        print json.dumps(result)

    

Python多线程抓取Google搜索链接网页
2013-04-09       0  个评论       作者:cscmaker
收藏     我要投稿

1)urllib2+BeautifulSoup抓取Goolge搜索链接

近期,参与的项目需要对Google搜索结果进行处理,之前学习了Python处理网页相关的工具。实际应用中,使用了urllib2和beautifulsoup来进行网页的抓取,但是在抓取google搜索结果的时候,发现如果是直接对google搜索结果页面的源代码进行处理,会得到很多“脏”链接。

看下图为搜索“titanic  james”的结果:

 

QQ截图20130407145449
图中红色标记的是不需要的,蓝色标记的是需要抓取处理的。

这种“脏链接”当然可以通过规则过滤的方法来过滤掉,但是这样程序的复杂度就高了。正当自己愁眉苦脸的正在写过滤规则时。同学提醒说google应该提供相关的api,才恍然大明白。

(2)Google Web Search API+多线程

 

文档中给出使用Python进行搜索的例子:


[python]
import simplejson 
  
# The request also includes the userip parameter which provides the end  
# user's IP address. Doing so will help distinguish this legitimate  
# server-side traffic from traffic which doesn't come from an end-user.  
url = ('https://ajax.googleapis.com/ajax/services/search/web' 
       '?v=1.0&q=Paris%20Hilton&userip=USERS-IP-ADDRESS') 
  
request = urllib2.Request( 
    url, None, {'Referer': /* Enter the URL of your site here */}) 
response = urllib2.urlopen(request) 
  
# Process the JSON string.  
results = simplejson.load(response) 
# now have some fun with the results... 

import simplejson
 
# The request also includes the userip parameter which provides the end
# user's IP address. Doing so will help distinguish this legitimate
# server-side traffic from traffic which doesn't come from an end-user.
url = ('https://ajax.googleapis.com/ajax/services/search/web'
       '?v=1.0&q=Paris%20Hilton&userip=USERS-IP-ADDRESS')
 
request = urllib2.Request(
    url, None, {'Referer': /* Enter the URL of your site here */})
response = urllib2.urlopen(request)
 
# Process the JSON string.
results = simplejson.load(response)
# now have some fun with the results...


实际应用中可能需要抓取google的很多网页,所以还需要使用多线程来分担抓取任务。使用google web search api的参考详细介绍,请看此处(这里介绍了Standard URL Arguments)。另外要特别注意,url中参数rsz必须是8(包括8)以下的值,若大于8,会报错的!

(3)代码实现

代码实现还存在问题,但是能够运行,鲁棒性差,还需要进行改进,希望各路大神指出错误(初学Python),不胜感激。


[python]
#-*-coding:utf-8-*-  
import urllib2,urllib 
import simplejson 
import os, time,threading 
 
import common, html_filter 
#input the keywords  
keywords = raw_input('Enter the keywords: ')                                  
 
#define rnum_perpage, pages  
rnum_perpage=8 
pages=8                         
 
#定义线程函数  
def thread_scratch(url, rnum_perpage, page): 
 url_set = []  
 try: 
   request = urllib2.Request(url, None, {'Referer': 'http://www.sina.com'}) 
   response = urllib2.urlopen(request) 
   # Process the JSON string.  
   results = simplejson.load(response) 
   info = results['responseData']['results'] 
 except Exception,e: 
   print 'error occured' 
   print e 
 else: 
   for minfo in info: 
      url_set.append(minfo['url']) 
      print minfo['url'] 
  #处理链接  
 i = 0 
 for u in url_set: 
   try: 
     request_url = urllib2.Request(u, None, {'Referer': 'http://www.sina.com'}) 
     request_url.add_header( 
     'User-agent', 
     'CSC' 
     ) 
     response_data = urllib2.urlopen(request_url).read() 
     #过滤文件  
     #content_data = html_filter.filter_tags(response_data)  
     #写入文件  
     filenum = i+page 
     filename = dir_name+'/related_html_'+str(filenum) 
     print '  write start: related_html_'+str(filenum) 
     f = open(filename, 'w+', -1) 
     f.write(response_data) 
     #print content_data  
     f.close() 
     print '  write down: related_html_'+str(filenum) 
   except Exception, e: 
     print 'error occured 2' 
     print e 
   i = i+1 
 return  
 
#创建文件夹  
dir_name = 'related_html_'+urllib.quote(keywords) 
if os.path.exists(dir_name): 
   print 'exists  file' 
   common.delete_dir_or_file(dir_name) 
os.makedirs(dir_name) 
 
#抓取网页  
print 'start to scratch web pages:' 
for x in range(pages): 
  print "page:%s"%(x+1) 
  page = x * rnum_perpage 
  url = ('https://ajax.googleapis.com/ajax/services/search/web' 
                  '?v=1.0&q=%s&rsz=%s&start=%s') % (urllib.quote(keywords), rnum_perpage,page) 
  print url 
  t = threading.Thread(target=thread_scratch, args=(url,rnum_perpage, page)) 
  t.start() 
 
#主线程等待子线程抓取完  
main_thread = threading.currentThread() 
for t in threading.enumerate(): 
  if t is main_thread: 
    continue 
  t.join() 


       
  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值