文件:
import scrapy
from demo1.items import Demo1Item
import urllib
from scrapy import log
# BOSS直聘网站爬虫职位
class DemoSpider(scrapy.Spider):
# 爬虫名, 启动爬虫时需要的参数*必填
name = 'demo'
# 爬取域范围,允许爬虫在这个域名下进行爬取(可选)
allowed_domains = ['zhipin.com']
# 爬虫需要的url
start_urls = ['https://www.zhipin.com/c101280600/h_101280600/?query=测试']
def parse(self, response):
node_list = response.xpath("//div[@class='job-primary']")
# 用来存储所有的item字段
# items = []
for node in node_list:
item = Demo1Item()
# extract() 将xpath对象转换为Unicode字符串
href = node.xpath("./div[@class='info-primary']//a/@href").extract()
job_title = node.xpath("./div[@class='info-primary']//a/div[@class='job-title']/text()").extract()
salary = node.xpath("./div[@class='info-primary']//a/span/text()").extract()
working_place = node.xpath("./div[@class='info-primary']/p/text()").extract()
company_name = node.xpath("./div[@class='info-company']//a/text()").extract()
item['href'] = href[0]
item['job_title'] = job_title[0]
item['sa
报错:
C:\Users\xieqianyun\AppData\Local\Programs\Python\Python36\python.exe "C:\Users\xieqianyun\PyCharm Community Edition 2019.2.5\helpers\pydev\pydevconsole.py" --mode=client --port=55825
import sys; print('Python %s on %s' % (sys.version, sys.platform))
sys.path.extend(['C:\\Users\\xieqianyun\\demo1', 'C:/Users/xieqianyun/demo1'])
Python 3.6.5 (v3.6.5:f59c0932b4, Mar 28 2018, 17:00:18) [MSC v.1900 64 bit (AMD64)]
Type 'copyright', 'credits' or 'license' for more information
IPython 7.10.0 -- An enhanced Interactive Python. Type '?' for help.
PyDev console: using IPython 7.10.0
Python 3.6.5 (v3.6.5:f59c0932b4, Mar 28 2018, 17:00:18) [MSC v.1900 64 bit (AMD64)] on win32
runfile('C:/Users/xieqianyun/demo1/demo1/begin.py', wdir='C:/Users/xieqianyun/demo1/demo1')
Traceback (most recent call last):
File "C:\Users\xieqianyun\AppData\Local\Programs\Python\Python36\lib\site-packages\IPython\core\interactiveshell.py", line 3319, in run_code
exec(code_obj, self.user_global_ns, self.user_ns)
File "<ipython-input-2-fc5979762143>", line 1, in <module>
runfile('C:/Users/xieqianyun/demo1/demo1/begin.py', wdir='C:/Users/xieqianyun/demo1/demo1')
File "C:\Users\xieqianyun\PyCharm Community Edition 2019.2.5\helpers\pydev\_pydev_bundle\pydev_umd.py", line 197, in runfile
pydev_imports.execfile(filename, global_vars, local_vars) # execute the script
File "C:\Users\xieqianyun\PyCharm Community Edition 2019.2.5\helpers\pydev\_pydev_imps\_pydev_execfile.py", line 18, in execfile
exec(compile(contents+"\n", file, 'exec'), glob, loc)
File "C:/Users/xieqianyun/demo1/demo1/begin.py", line 3, in <module>
cmdline.execute('scrapy crawl demo'.split())
File "C:\Users\xieqianyun\AppData\Local\Programs\Python\Python36\lib\site-packages\scrapy\cmdline.py", line 145, in execute
cmd.crawler_process = CrawlerProcess(settings)
File "C:\Users\xieqianyun\AppData\Local\Programs\Python\Python36\lib\site-packages\scrapy\crawler.py", line 267, in __init__
super(CrawlerProcess, self).__init__(settings)
File "C:\Users\xieqianyun\AppData\Local\Programs\Python\Python36\lib\site-packages\scrapy\crawler.py", line 145, in __init__
self.spider_loader = _get_spider_loader(settings)
File "C:\Users\xieqianyun\AppData\Local\Programs\Python\Python36\lib\site-packages\scrapy\crawler.py", line 347, in _get_spider_loader
return loader_cls.from_settings(settings.frozencopy())
File "C:\Users\xieqianyun\AppData\Local\Programs\Python\Python36\lib\site-packages\scrapy\spiderloader.py", line 61, in from_settings
return cls(settings)
File "C:\Users\xieqianyun\AppData\Local\Programs\Python\Python36\lib\site-packages\scrapy\spiderloader.py", line 25, in __init__
self._load_all_spiders()
File "C:\Users\xieqianyun\AppData\Local\Programs\Python\Python36\lib\site-packages\scrapy\spiderloader.py", line 47, in _load_all_spiders
for module in walk_modules(name):
File "C:\Users\xieqianyun\AppData\Local\Programs\Python\Python36\lib\site-packages\scrapy\utils\misc.py", line 73, in walk_modules
submod = import_module(fullpath)
File "C:\Users\xieqianyun\AppData\Local\Programs\Python\Python36\lib\importlib\__init__.py", line 126, in import_module
return _bootstrap._gcd_import(name[level:], package, level)
File "<frozen importlib._bootstrap>", line 994, in _gcd_import
File "<frozen importlib._bootstrap>", line 971, in _find_and_load
File "<frozen importlib._bootstrap>", line 955, in _find_and_load_unlocked
File "<frozen importlib._bootstrap>", line 665, in _load_unlocked
File "<frozen importlib._bootstrap_external>", line 678, in exec_module
File "<frozen importlib._bootstrap>", line 219, in _call_with_frames_removed
File "C:\Users\xieqianyun\demo1\demo1\spiders\demo.py", line 4, in <module>
from scrapy import log
ImportError: cannot import name 'log'