result = pd.DataFrame()
抓取网页内容
def scrape_page(page, url):
page.goto(url)
page.wait_for_load_state(‘networkidle’)
获取解析内容
def parse_index(page):
# 获取网页内容请求
elements = page.query_selector_all(‘tr td:nth-child(2) a’)
if page.query_selector(‘table.towntable’):
BASE_URL = BASE_URL_2
else:
BASE_URL = BASE_URL_1
# 获取元素信息
for element in elements:
part_of_url = element.get_attribute(‘href’)
detail_url = urljoin(BASE_URL, part_of_url)
url_list.append(detail_url)
parse_detail(page)
def parse_detail(page):
global result
global count
data = page.query_selector_all(‘.villagetable tbody tr, .countytable tbody tr, .towntable tbody tr’)
for i in range(1,len(data)):
acode = data[i].query_selector(‘td:nth-child(1)’).text_content()
region = data[i].query_selector(‘td:nth-child