1,xlsx.文件
import openpyxl wb = openpyxl.load_workbook('2.xlsx') sheet = wb.worksheets[0] print(sheet) for i in range(sheet.max_row): for j in range(sheet.max_column): print(str(sheet.cell(row=i + 1, column=j + 1).value) + '\t', end="") print()
2,xls.文件
import xlrd ExcelFile=xlrd.open_workbook(r'E:\python\haha.xls') #路径 table = ExcelFile.sheets()[0] #通过索引顺序获取工作表 print(table) print(table.nrows) #打印表格行数 c = table.cell(1,0).value #指定单元格的值 print(c) print(table.row_values(1,3)) #第二行第四列及以后的值 print(table.col_values(1)) #第二列的值 nRow=table.nrows #行赋值 nCol=table.ncols list1 = [] #列表形式 for i in range(nRow): for j in range(nCol): print(i, j, str(table.row_values(i)[j])) #循环所有的值 list1.append(str(table.row_values(i)[j])) #横向显示所有的值 print(list1) for i in range (nRow): if i == 0: continue print(table.row_values(i)[1:5]) #第二到第五列所有值
3,urllib发送post请求
from urllib.request import urlopen from urllib.request import Request from urllib import parse req = Request('https://kyfw.12306.cn/otn/leftTicket/init') postdata = parse.urlencode([ ("leftTicketDTO.from_station_name", "北京"), ("leftTicketDTO.to_station_name", "哈尔滨"), ("leftTicketDTO.from_station", "BJP"), ("leftTicketDTO.to_station", "HBB"), ("leftTicketDTO.train_date", "2018-04-10"), #逗号加空格隔开 ("back_train_date", "2018-04-10"), ("flag", "dc"), ("purpose_code", "ADULT"), ("pre_step_flag", "index"), #网页中F12---network---Doc---点击name下的选项(提前选好出发和目的地才有)---Headers---Form Data中的数据, ]) req.add_header("Origin","https://kyfw.12306.cn") req.add_header("User-Agent","Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 UBrowser/6.2.3964.2 Safari/537.36") resp = urlopen(req,data=postdata.encode('utf-8')) #假装模拟真实浏览器,使网站认为你不是爬虫软件 print(resp.read().decode("utf-8"))
4,beautifulsoup获取thml文件
from bs4 import BeautifulSoup html_doc = """ <html><head><title>The Dormouse's story</title></head> <body> <p class="title"><b>The Dormouse's story</b></p> <p class="story">Once upon a time there were three little sisters; and their names were <a href="http://example.com/elsie" class="sister" id="link1">Elsie</a>, <a href="http://example.com/lacie" class="sister" id="link2">Lacie</a> and <a href="http://example.com/tillie" class="sister" id="link3">Tillie</a>; and they lived at the bottom of a well.</p> <p class="story">...</p> """ soup = BeautifulSoup(html_doc,"html.parser") #print(soup.prettify()) print(soup.title.string) print(soup.a) #获取a标签, print(soup.find(id = "link2")) #find(id = "link2")获取ID为link2,加.string获取其中的内容 print(soup.findAll("a")) #获取所有a标签内容不可用string,要用循环,如下 for link in soup.findAll("a"): print(link.string) #具体用法详见官网 print(soup.find("p", {"class":"story"}).get_text()) #获取p标签下的指定内容,当标签后有副标签时,使用string获取不到内容,需要使用get_text()