# -*- coding: utf-8 -*-
import requests
from bs4 import BeautifulSoup
from requests.exceptions import RequestException
import time
import random
import MySQLdb
import threading
import socket
import math
socket.setdefaulttimeout(60)#这里对整个socket层设置超时时间。后续文件中如果再使用到socket,不必再设置
glock = threading.Lock() #定义全局锁
CATEGORY_URL= ['http://www.we123.com/gzh/onclick/'] #获取地区分类链接
all_url = [] #
ALL_URLS = [] #所有详细页面链接
proxy_list = [] #IP池
URL = 'http://www.we123.com'
PAGE_URL = [] #所有分页链接
#获取Ip池
def get_ip():
headers = {'User-Agent':'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36'}
url = 'http://http-webapi.zhimaruanjian.com'#可以使用芝麻代理,好用稳定还不贵
resp = requests.get(url,headers=headers)
obj = resp.json() #获取json ip池对象
for ip in obj:
arr = 'http://' + str(ip['ip']) + ':' + str(ip['port'])
proxy_list.append(arr)
#获取页面源码函数
def get_html(url):
# headers = {}
user_agent_list = [
'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/53.0.2785.104 Safari/537.36 Core/1.53.3538.400 QQBrowser/9.6.12501.400',
'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:56.0) Gecko/20100101 Firefox/56.0'
]
# user_agent = random.choice(user_agent_list)
headers = {
'User-Agent':'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/53.0.2785.104 Safari/537.36 Core/1.53.3538.400 QQBrowser/9.6.12501.400'
}
# 代理,免费的代理只能维持一会可能就没用了,自行更换
# proxy_list = [
# "http://27.192.185.62:3252",
# ]
# proxy_ip =
Python 爬虫实战案例 : 微信公众号的爬取
最新推荐文章于 2025-03-25 16:21:08 发布