urllib
import urllib
url = 'http://www.someserver.com/cgi-bin/register.cgi'
user_agent='Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)'
values = {'name' : 'Michael Foord',
'location' : 'Northampton',
'language' : 'Python' }
headers = { 'User-Agent' : user_agent } #字典类型
data = urllib.urlencode(values) #返回application/x-www-urlencoded格式字符串
req = urllib2.Request(url, data,headers) #data附加信息,相当于post方法,headera模仿浏览器提交请求
response = urllib2.urlopen(req)
the_page = response.read()
#print the_page
#也可以request对象调用add_header(key:val)方法附加header
req = urllib2.Request('http://www.example.com/')
req.add_header('Referer', 'http://www.python.org/')
r = urllib2.urlopen(req)
opener=urllib2.build_opener()
opener.addheaders=[('User-agent','Mozilla/5.0')]
opener.open('http://www.example.com')
urllib2
import urllib2
response = urllib2.urlopen('https://mail.dlut.edu.cn/coremail/')
html = response.read() #读取html
#print html
req=urllib2.Request('https://mail.dlut.edu.cn/coremail/')
response=urllib2.urlopen(req)
the_page=response.read()
#print the_page
req = urllib2.Request('http://www.python.org/')
opener=urllib2.build_opener()
urllib2.install_opener(opener)
f = opener.open(req)
url="https://mail.dlut.edu.cn/coremail/"
headers={'User-Agent':'Mozilla/5.0 (Windows NT 6.1; Win64; x64)\
AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36'}
proxy_handler=urllib2.ProxyHandler({
'http':'xxx.xxx.com:8080',
'http':'yyy.yyy.com:8080'
})
opener=urllib2.build_opener(proxy_handler)
request=urllib2.Request(url=url,headers=headers)
urllib2.install_opener(opener)
response=opener.open(request)
html=response.read()
#print html
try:
response=urllib2.urlopen(url)
print response
except urllib2.URLError as e:
print e.reason
try:
response=urllib2.urlopen(url)
except urllib2.HTTPError as e:
print('code:'+e.code+'\n')
print('reason:'+e.reason+'\n')
print('headers:'+e.headers+'\n')
requests
import requests
url="https://mail.dlut.edu.cn/coremail/"
headers={'User-Agent':'Mozilla/5.0 (Windows NT 6.1; Win64; x64)\
AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36'}
param={'key1':'value1','key2':'value2'}
data={'key':'value'}
response=requests.request('GET',url,params=param,data=data,headers=headers,timeout=5)
#response属性
print response.status_code #HTTP响应报文中的状态码
print response.text #HTTP响应报文内容,用字符串表示
print response.encoding #HTTP响应报文头中的内容编码
print response.content #HTTP响应报文内容,用二进制表示
BeautifulSoup
from bs4 import BeautifulSoup
html="""
<html><head><title>Hello World!</title></head>
<body>
<p class="p_style1">paragraph1</p>
<p class="p_style2">
<a href="https://mail.dlut.edu.cn/coremail/">email</a>
</p></body></html>"""
soup=BeautifulSoup(html,'lxml')
print soup.prettify() #格式化输出soup对象中的内容
print soup.title #对象名.标签名形式获取相应对象
print type(soup.title)
print soup.p.name
print soup.p.attrs
print soup.p.attrs['class']
print soup.p['class']
print soup.p.has_attr('class')
print soup.p.string
print type(soup.p.string)
#过滤方法
print soup.find(name='p')
print soup.find_all(name='p')
print soup.select('.p_style2')
#常用属性
print soup.head.children
for c in soup.head.children: #对直接子节点进行迭代访问
print c
print soup.head.next_siblings
for s in soup.head.next_siblings: #向后迭代当前节点的兄弟节点
print s