python爬虫基础class 3(爬取京东商品姓名和爬取股票信息)

 

# 京东笔记本
import requests
import re
import bs4

num = 0


def getHtmlText(url):
    try:
        hd = {'user-agent': 'Mozilla/5.0'}
        r = requests.get(url, headers=hd, timeout=20)
        r.raise_for_status()
        r.encoding = r.apparent_encoding
        print(r.url)
        return r.text
    except:
        print('getHtmlText error')
        return None


def parsePage(ilt, html):
    try:
        global num
        plt_price = re.findall(r'<em>.*?</em>', html, flags=re.S)
        for i in plt_price:
            sun = i
            if re.search('</span>', sun):
                sun = re.sub(r'<span.*</span>', '', sun)
            bs = bs4.BeautifulSoup(sun, 'html.parser')
            if bs.em.string == None:
                continue
            if len(bs.em.string) > 15:
                s = str(bs.em.string).replace('\n', '')
                s = s.replace(' ', '')
                num += 1
                ilt.append((num, s))
    except:
        print('parsePage error')


def printGoodsList(ilt):
    with open('JD.txt', 'wt') as fp:
        for item in ilt:
            print('{:<5} {}'.format(item[0], item[1]))
            fp.write('{:<5} {}\n'.format(item[0], item[1]))


def main():
    global num
    infoilt = list()
    url_B = 'https://list.jd.com/list.html?cat=670,671,672&page='
    url_E = '&sort=sort_totalsales15_desc&trans=1&JL=6_0_0#J_main'
    for i in range(100):  # 打印100页
        try:
            html = getHtmlText(url_B + str(i + 1) + url_E)
            parsePage(infoilt, html)
            print(num)
        except:
            print('run error {} page'.format(i + 1))
            continue
    printGoodsList(infoilt)


try:
    main()
except:
    print('run error main')
# A股 证券之星 http://quote.stockstar.com
# -*- coding: utf-8 -*-
import re
import requests
import bs4
import time


def GetHtmlText(url):
    try:
        hd = {'user-agent': 'Mozilla/5.0'}
        r = requests.get(url, headers=hd, timeout=30)
        print('status.code:{}'.format(r.status_code))
        r.raise_for_status()
        r.encoding = r.apparent_encoding
        print(r.url)
        return r.text
    except:
        print('getHtmlText error')
        return None


def ParsePage(ilt, html):
    try:
        soup = bs4.BeautifulSoup(html, 'html.parser')
        for line in soup.find('tbody').children:
            if isinstance(line, bs4.element.Tag):
                tds = line.find_all('td')
                s = str(tds[1].a.string).replace(' ', '')
                ilt.append(
                    (tds[0].a.string, s, tds[2].string, tds[3].string, tds[4].string, tds[5].string))
    except:
        print('ParsePage error')


def WriteText(ilt):
    try:
        ti = time.gmtime()
        s = time.strftime('%m_%d_%H_%M|', ti)
        with open(s + 'stock.txt', 'wt') as fp:
            print('{1:{0}^8}\t{2:{0}^8}\t{3:^10}\t{4:^10}\t{5:^10}\t{6:^10}'.format(chr(12288), '代码', '简称', '流通市值(万元)',
                                                                                    '总市值(万元)', '流通股本(万元)', '总股本(万元)'))
            fp.write(
                '{1:{0}^8}\t{2:{0}^8}\t{3:^10}\t{4:^10}\t{5:^10}\t{6:^10}\n'.format(chr(12288), '代码', '简称', '流通市值(万元)',
                                                                                    '总市值(万元)', '流通股本(万元)', '总股本(万元)'))
            for line in ilt:
                print('{1:{0}^10}\t{2:{0}^10}\t{3:^18}\t{4:^18}\t{5:^18}\t{6:^18}'.format(chr(12288), line[0], line[1],
                                                                                          line[2], line[3], line[4],
                                                                                          line[5]))
                fp.write(
                    '{1:{0}^10}\t{2:{0}^10}\t{3:^18}\t{4:^18}\t{5:^18}\t{6:^18}\n'.format(chr(12288), line[0], line[1],
                                                                                          line[2], line[3], line[4],
                                                                                          line[5]))
    except:
        print('WriteText error')


def main():
    infoilt = list()
    url_start, url_end = 'http://quote.stockstar.com/stock/ranklist_a_3_1_', '.html'
    for i in range(10):
        try:
            html = GetHtmlText(url_start + str(i + 1) + url_end)
            ParsePage(infoilt, html)
        except:
            print('error in page {}'.format(i + 1))
    WriteText(infoilt)


try:
    main()
except:
    print('main error')
# 股域网 https://hq.gucheng.com/gpdmylb.html
# -*- coding: utf-8 -*-
import requests
import re
import bs4
import time
import os
import traceback


def GetHtmlText(url):
    try:
        hd = {'user-agent': 'Mozilla/5.0'}
        r = requests.get(url, headers=hd, timeout=30)
        r.raise_for_status()
        #r.encoding = r.apparent_encoding
        print(r.url)
        return r.text
    except:
        print('getHtmlText error')
        print('status.code:{}'.format(r.status_code))
        return None


def ParsePage(html, ilt):
    try:
        plt_basic = re.search(
            r'<header class="stock_title">.*?<h1>(.*?)</h1>.*?<h2>(.*?)</h2>.*?<em>(.*?)</em>.*?<time>(.*?)</time>',
            html, flags=re.S)
        basic = [plt_basic.group(1), plt_basic.group(2), plt_basic.group(3),
                 plt_basic.group(4)]  # [股票名称,股票代码,交易状态,更新时间]
        plt_price = re.search(
            r'<div class="s_price">.*?em class=".*?">(.*?)</em>.*?<em class=".*?">(.*?)</em>.*?<em class=".*?">(.*?)</em>.*?</div>',
            html, flags=re.S)
        price = [plt_price.group(1), plt_price.group(2),
                 plt_price.group(3)]  # [当前股价,涨跌额,涨跌幅]
        plt_HL = re.search(
            r'<dl class="s_height">.*?<dt>最高</dt>.*?<dd class=".*?">(.*?)</dd>.*?<dt>最低</dt>.*?<dd class=".*?">(.*?)</dd>.*?</dl>',
            html, flags=re.S)
        HL = [plt_HL.group(1), plt_HL.group(2)]  # [最高价,最低价]
        plt_data = re.search(
            r'<div class="s_date">.*?<dt>今开</dt>.*?<dd class=".*?">(.*?)</dd>.*?<dt>昨收</dt>.*?<dd>(.*?)</dd>.*?</dl>'
            r'.*?<dl>.*?<dt>涨停</dt>.*?<dd class=".*?">(.*?)</dd>.*?<dt>跌停</dt>.*?<dd class=".*?">(.*?)</dd>.*?</dl>'
            r'.*?<dl>.*?<dt>换手率</dt>.*?<dd>(.*?)</dd>.*?<dt>振幅</dt>.*?<dd>(.*?)</dd>.*?</dl>'
            r'.*?<dl>.*?<dt>成交量</dt>.*?<dd>(.*?)</dd>.*?<dt>成交额</dt>.*?<dd>(.*?)</dd>.*?</dl>'
            r'.*?<dl>.*?<dt>内盘</dt>.*?<dd>(.*?)</dd>.*?<dt>外盘</dt>.*?<dd>(.*?)</dd>.*?</dl>'
            r'.*?<dl>.*?<dt>委比</dt>.*?<dd>(.*?)</dd>.*?</dl>'
            r'.*?<dl>.*?<dd>(.*?)</dd>.*?<dt>市净率</dt>.*?<dd>(.*?)</dd>.*?</dl>'
            r'.*?<dl>.*?<dt>流通市值</dt>.*?<dd>(.*?)</dd>.*?<dt>总市值</dt>.*?<dd>(.*?)</dd>.*?</div>'
            , html, flags=re.S)
        data = [plt_data.group(1), plt_data.group(2), plt_data.group(3), plt_data.group(4), plt_data.group(5),
                plt_data.group(6), plt_data.group(7), plt_data.group(8), plt_data.group(9), plt_data.group(10),
                plt_data.group(11), plt_data.group(12), plt_data.group(13), plt_data.group(14), plt_data.group(15)]
        # [今开,昨收,涨停,跌停,换手率,振幅,成交量,成交额,内盘,外盘,委比,市盈率(动),市净率,流通市值,总市值]
        ilt.append([basic, price, HL, data])
    except:
        traceback.print_exc()


def PrintWrite(ilt):
    s = [['股票名称', '股票代码', '交易状态', '更新时间'],
         ['当前股价', '涨跌额', '涨跌幅'],
         ['最高价', '最低价'],
         ['今开', '昨收', '涨停', '跌停', '换手率', '振幅', '成交量', '成交额', '内盘', '外盘', '委比', '市盈率(动)', '市净率', '流通市值', '总市值']]
    root = 'stock/'
    ti = time.gmtime()
    path = root + time.strftime('%m_%d_%H_%M', ti) + '.txt'
    if not os.path.exists(root):  # 判断目录是否存在
        os.mkdir(root)
    with open(path, 'wt')as fp:
        for item in ilt:
            try:
                for i in range(len(item)):
                    for j in range(len(item[i])):
                        if ((i + 1) == 4) and ((j + 1) % 5 == 0):
                            fp.write('{}:\t{}\n'.format(s[i][j], item[i][j]))
                            continue
                        fp.write('{}:\t{}\t'.format(s[i][j], item[i][j]))
                    fp.write('\n')
                fp.write('\n\n')
            except:
                print('PrintWrite error')


def main():
    info = list()
    url_all = 'https://hq.gucheng.com/gpdmylb.html'
    html_all = GetHtmlText(url_all)
    match_all = re.search(r'<section class="stockTable">(.*)</section>', html_all, flags=re.S)
    try:
        soup = bs4.BeautifulSoup(match_all.group(1), 'html.parser')
        xx=len(soup('a'))
        count=0
        for line in soup('a'):
            html = GetHtmlText(line.attrs['href'])
            ParsePage(html, info)
            count+=1
            print('当前完成:{:.2f}%({}|{})'.format(count/xx*100,count,xx))
        PrintWrite(info)
    except:
        print('error match')
        traceback.print_exc()


main()

 

 

  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值