import time
import urllib, time, os, base64, json
import re, sys
import urllib
from lxml import etree
import requests
def getPage(base_url):
try:
page = urllib.request.urlopen(base_url) # 5
content = page.read().decode("utf-8", "ignore").lower()
re_script=re.compile('<\s*script[\S\s]*<\s*/\s*script\s*>',re.I) #Script [\\S\\s]+?
re_style=re.compile('<\s*style[^>]*>[^<]*<\s*/\s*style\s*>',re.I) #style
content=re_script.sub('',content) #去掉SCRIPT
content=re_style.sub('',content)#去掉style
selector = etree.HTML(content.encode("utf-8",'ignore'))
# answer one
# menu_items = selector.xpath("/html/body/header/div/ul[@id='head_nav_list']/li/a") # 5
# for item in menu_items:
# writefile("/home/output/crawler_result.csv", item.attrib.get("href")) # 2
# answer two
menu_items = selector.xpath("/html/body/header/div/ul[@id='head_nav_list']/li/a/@href") # 5
for item in menu_items:
writefile("/home/output/crawler_result.csv", item) # 2
except Exception as e: # 3
print("Failed to read from %s." % base_url)
print(sys.exc_info())
return False
def writefile(filename, content):
try:
fp = open(filename, 'a') # 5
fp.write(content + "\n") # 5
fp.close() # 5
except:
return False
now = time.strftime('%Y-%m-%d %X', time.localtime(time.time()))
try:
# 5
url = '1'
getPage(url)
except Exception as e:
info = '%s\nError: %s' % (now, e)
writefile('Error.log', info)
print (info)
time.sleep(1)
test3
于 2022-07-01 09:49:44 首次发布