#!/usr/bin/env python
# -*- coding:utf-8 -*-
from time import ctime
import urllib2 # Python中的cURL库
import time # 时间函数库,包含休眠函数sleep()
import threading
url = 'https://blog.csdn.net/zhuangmezhuang/article/details/82415522', \
'https://blog.csdn.net/zhuangmezhuang/article/details/82379849', \
'https://blog.csdn.net/zhuangmezhuang/article/details/82379542' # 希望刷阅读量的文章的URL
refererData = 'https://www.baidu.com/s?ie=utf-8&f=8&rsv_bp=1&tn=84053098_3_dg&wd=%E4%BA%AC%E4%B8%9' \
'C&oq=zhuangmezhuang&rsv_pq=cef42c6d000106b0&rsv_t=3944F%2FSUrb9ANKFFfjJaevuvmElUpvSXgs' \
'Oyu4%2BSG8N7TLy3smIHFP31nemUoSZQSLbVXw&rqlang=cn&rsv_enter=1&rsv_sug3=25&rsv_sug1=13&rsv_s' \
'ug7=101&rsv_sug2=0&inputT=5120&rsv_sug4=5727'
refererData2 = 'https://www.jd.com/?cu=true&utm_source=baidu-search&utm_medium=cpc&utm_campaign=t_262767352' \
'_baidusearch&utm_term=69805951198_0_4ebf589c0df640df8a7b564203334975'
refererData3 = 'https://www.baidu.com/s?ie=utf-8&f=8&rsv_bp=1&tn=84053098_3_dg&wd=pachong&oq=%25E7%2588%2' \
'5AC%25E8%2599%25AB&rsv_pq=fddc136f00035534&rsv_t=03c5tHg6CnTEDO8dj4xzDOrR%2FKdUZ8wtnujC3PQ' \
'XSXJH%2FyRYso3wjPd%2BWuycvcsHO74%2BNQ&rqlang=cn&rsv_enter=0'
refererData_one = [refererData, refererData2, refererData3]
def bush_request(url, refererData):
data = '' # 将GET方法中待发送的数据设置为空
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36'
,
'Cookie': '_message_m=23yegwleahbzf4fy5a05grgr; uuid=e7680a5d-2824-45d9-ac7a-06289c3d3cd8; avh=53945000%2c52282490; dc_tos=os5x0v; dc_session_id=1498493448566'
,
'Referer': refererData
}
count = 0 # 初始化计数器
request = urllib2.Request(url, data, headers) # 组装GET方法的请求
while True: # 一旦开刷就停不下来
rec = urllib2.urlopen(request) # 发送GET请求,获取博客文章页面资源
count += 1 # 计数器加1
print count # 打印当前循环次数
if count % 6: # 每6次访问为1个循环,其中5次访问等待时间为31秒,另1次为61秒
time.sleep(31) # 为每次页面访问设置等待时间是必须的,过于频繁的访问会让服务器发现刷阅读量的猥琐行为并停止累计阅读次数
else:
time.sleep(61)
def main():
print('starting at:', ctime())
threads = []
nloops = range(len(url))
for i in nloops:
t = threading.Thread(target=bush_request, args=(url[i], refererData[i]))
threads.append(t)
for i in nloops:
threads[i].start()
for i in nloops:
threads[i].join()
print('continue...')
if __name__ == '__main__':
main()
多线程编程,可以多对多,也可以一对多。