sina weibo sphinx爬虫

#coding=utf-8
import urllib2
import cookielib
from bs4 import BeautifulSoup
import sys
import urllib
import lxml.html as HTML
reload(sys)
sys.setdefaultencoding('utf8')

fl=open('weibo','w')
hosturl="http://login.weibo.cn/login/?ns=1&revalid=2&backURL=http%3A%2F%2Fweibo.cn%2F&backTitle=%CE%A2%B2%A9&vt="

headers={'Host':'login.weibo.cn','User-Agent':'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:35.0) Gecko/20100101 Firefox/35.0','Accept':'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8','Accept-Language':'en-US,en;q=0.5','Accept-Encoding':'gzip, deflate','Cookie':'SUB=_2AkMiYG3bdcNhrAJXn_oWz2_lZI1H-jjGieLBAH-8JWlrHRqF_uLgtSWJEuPvAj30W5XwiIVLDA..; SUHB=0YhANvDiUi7Fdv; _T_WM=126711f664c5f3edec7f393f5c9aa798','Connection':'keep-alive'}

cj=cookielib.MozillaCookieJar()
cookie_support=urllib2.HTTPCookieProcessor(cj)

opener=urllib2.build_opener(cookie_support)
urllib2.install_opener(opener)

request=urllib2.Request(hosturl)
request.add_header('User-Agent','Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:35.0) Gecko/20100101 Firefox/35.0')
response=urllib2.urlopen(request).read()

header={'User-Agent':'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:35.0) Gecko/20100101 Firefox/35.0'}
urlrand="http://login.weibo.cn/login/?ns=1&revalid=2&backURL=http%3A%2F%2Fweibo.cn%2F&backTitle=%CE%A2%B2%A9&vt="
req=urllib2.Request(urlrand,"",{'User-Agent':'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:35.0) Gecko/20100101 Firefox/35.0'})
logininfo=urllib2.urlopen(req).read()
#bs=BeautifulSoup(str(logininfo))

index=logininfo.index('form action="?rand')
rand=logininfo[index+19:index+28]
index2=logininfo.index('password_')

pwd=logininfo[index2:index2+13]
index3=logininfo.index('name="vk"')
vk=logininfo[index3+17:index3+37]

data=urllib.urlencode({'mobile':'wangzhen8828@gmail.com',pwd:'1234asdf','remember':'on','backURL':'http://weibo.cn','backTitle':'微博','tryCount':'','vk':vk,'submit':'登录','encoding':'utf-8'})
url='http://login.weibo.cn/login/?rand='+rand
request=urllib2.Request(url,data,header)
text=urllib2.urlopen(request).read()


urlset=set()
url='http://weibo.cn/1672334780/follow?vt=4'
urlset.add(url)

while True:
        url=urlset.pop()
        hostuid=url[16:26]
        request=urllib2.Request(url,"",header)
        text=urllib2.urlopen(request).read()
        soup=BeautifulSoup(text)
        sources=soup.find_all('table')
        for source in sources:
                try:
                        index=str(BeautifulSoup(str(source.tr.contents[1])).find_all('a')[1]).index('uid')
                        uid=str(BeautifulSoup(str(source.tr.contents[1])).find_all('a')[1])[index+4:index+14]
                        print uid
                        name= source.tr.contents[1].a.string
                        print name
                        fl.write(hostuid+":"+uid+":"+name+"/n")
                        urlset.add('http://weibo.cn/'+uid+'/follow?vt=4')
                except:
                        continue
        try:
                count=soup.find('div',class_='pa')

                num=int(count.contents[0].contents[0].contents[2]['value'])
        except:
                continue
        if num>1:
                for pagenum in range(2,num+1):
                        request=urllib2.Request(url+"&page="+str(pagenum),"",header)
                        text=urllib2.urlopen(request).read()
                        soup=BeautifulSoup(text)
                        sources=soup.find_all('table')
                        for source in sources:
                                try:
                                        index=str(BeautifulSoup(str(source.tr.contents[1])).find_all('a')[1]).index('uid')
                                        uid=str(BeautifulSoup(str(source.tr.contents[1])).find_all('a')[1])[index+4:index+14]
                                        print uid
                                        name=source.tr.contents[1].a.string
                                        print name
                                        fl.write(hostuid+":"+uid+":"+name+"/n")
                                        urlset.add('http://weibo.cn/'+uid+'/follow?vt=4')
                                except:
                                        continue

                                             

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值