python不登陆爬取淘宝数据_python登录并爬取淘宝信息代码示例

本文主要分享关于python登录并爬取淘宝信息的相关代码,还是挺不错的,大家可以了解下。

#!/usr/bin/env python

# -*- coding:utf-8 -*-

from selenium import webdriver

import time

import datetime

import traceback

import logging

import os

from selenium.webdriver.common.action_chains import ActionChains

import codecs

#登录

def login(driver,site):

driver.get(site)

time.sleep(5)

try:

#点击请登录

driver.find_element_by_class_name("h").click()

time.sleep(5)

#输入账号和密码

driver.find_element_by_id("TPL_username_1").send_keys(u"yourusername")

time.sleep(5)

#print driver.find_element_by_id("TPL_username_1")

driver.find_element_by_id("TPL_password_1").send_keys(u"yourpsd")

time.sleep(5)

#点击登录

driver.find_element_by_id("J_SubmitStatic").click()

time.sleep(30)

except:

print u"failure"

def crawlmarket(driver,filename,site):

#driver = webdriver.Firefox()

driver.get(site)

driver.maximize_window()

time.sleep(10)

driver.refresh()

time.sleep(10)

test = driver.find_elements_by_xpath("//a[@class='J_ItemLink']")

#是否获取到消息,若无则登录

if len(test)==0:

login(driver,site)

time.sleep(30)

resultstrall=""

resultstr=""

strinfo =""

for i in range(0,len(test),1):

if test[i].text != "" :

resultstr = test[i].text.strip()+'\n'

print resultstr

resultstrall += resultstr

#是否成功抓取

if resultstrall !="":

f = codecs.open(filename,'w','utf-8')

f.write(resultstrall)

f.close()

#若没有成功抓取将网站写入error

else:

strinfo = filename+","+site

print strinfo

ferror = codecs.open("error.txt",'a','utf-8')

ferror.write(strinfo)

ferror.close()

driver.quit()

def crawltaobaosousuo(driver,filename,site):

#driver = webdriver.Firefox()

driver.get(site)

driver.maximize_window()

time.sleep(10)

driver.get(site)

time.sleep(30)

driver.refresh()

test = driver.find_elements_by_xpath("//a[@class='J_ClickStat']")

resultstrall=""

resultstr=""

strinfo =""

for i in range(0,len(test),1):

if test[i].text != "" :

resultstr = test[i].text.strip()+'\n'

print resultstr

resultstrall += resultstr

if resultstrall !="":

f = codecs.open(filename,'w','utf-8')

f.write(resultstrall)

f.close()

else:

strinfo = filename+","+site

print strinfo

ferror = codecs.open("error.txt",'a','utf-8')

ferror.write(strinfo)

ferror.close()

driver.quit()

def jiexi(driver):

f = open("1.txt","r")

for line in f:

time.sleep(60)

info = line.split(",")

href = info[1]

filename = info[0].decode("utf-8")

print filename

if "markets" in href:

crawlmarket(driver,filename,href)

else:

crawltaobaosousuo(driver,filename,href)

if __name__ =='__main__':

driver = webdriver.Firefox()

jiexi(driver)

小结

有改进策略一起探讨,可以抓取淘宝部分网页内容,根据自己的需求改改吧,会被风控。个人觉得不登录的效果更好。

以上就是本文关于python 登录并爬取淘宝信息代码示例的全部内容,希望对大家有所帮助。感兴趣的朋友可以继续参阅本站其他相关专题。如有不足之处,欢迎留言指出。感谢朋友们对本站的支持!

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值