兄弟作业

本文将引导初学者如何利用Python进行数据预处理、数据清洗、数据可视化和基本的统计分析。通过实例介绍pandas库的用法,以及matplotlib和seaborn库用于创建图表。此外,还涵盖了numpy库在数值计算中的应用。
摘要由CSDN通过智能技术生成
#-*-coding:utf-8-*-

import re
import json
import requests

ll = []
for c in range(10): #一页显示20条
    url=r'https://movie.douban.com/j/new_search_subjects?sort=S&range=0,10&tags=电影&start={}&countries=中国大陆&year_range=2018,2018'.format(c*20)
    headers={'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36'}
    page=requests.get(url=url,headers=headers).json()
    page_list = []
    for r in range(20):              #每次加载20条
        list=page['data']
        dict=list[r]
        item = {}
        item['name'] = dict['title']    #电影名称
        item['img_src'] = dict['cover']     #图片链接
        item['score']=dict['rate']          #电影评分
        page_list.append(item)
    ll.extend(page_list)
with open("D:/xuefeng/ans.json", 'w', encoding='utf-8') as f:
    json.dump(ll, f,ensure_ascii=False)

https://www.cnblogs.com/moonyue/p/11037205.html

import json
import csv

# 需求 json 中的数据 转换  成 csv文件


# 1.分别 读 , 创建文件
json_fp = open('ans.json', 'r',encoding='UTF-8')
csv_fp = open('ans.csv', 'w')

# 2.提出 表头 , 表内容
data_list = json.load(json_fp)

sheet_title = data_list[0].keys()
print(sheet_title)


sheet_data = []
for data in data_list:
    sheet_data.append(data.values())

# 3. csv 写入器
writer = csv.writer(csv_fp)

# 4. 写入表头
writer.writerow(sheet_title)
# 5. 写入内容
writer.writerows(sheet_data)

# 6. 关闭两个文件
json_fp.close()
csv_fp.close()

在这里插入图片描述

from urllib.request import urlopen
import re
import urllib

headers = {'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36',
'Cookie':'bid=wjbgW95-3Po; douban-fav-remind=1; __gads=ID=f44317af32574b60:T=1563323120:S=ALNI_Mb4JL8QlSQPmt0MdlZqPmwzWxVvnw; __yadk_uid=hwbnNUvhSSk1g7uvfCrKmCPDbPTclx9b; ll="108288"; _vwo_uuid_v2=D5473510F988F78E248AD90E6B29E476A|f4279380144650467e3ec3c0f649921e; trc_cookie_storage=taboola%2520global%253Auser-id%3Dff1b4d9b-cc03-4cbd-bd8e-1f56bb076864-tuct427f071; viewed="26437066"; gr_user_id=7281cfee-c4d0-4c28-b233-5fc175fee92a; dbcl2="158217797:78albFFVRw4"; ck=4CNe; _pk_ref.100001.4cf6=%5B%22%22%2C%22%22%2C1583798461%2C%22https%3A%2F%2Faccounts.douban.com%2Fpassport%2Flogin%3Fredir%3Dhttps%253A%252F%252Fmovie.douban.com%252Ftop250%22%5D; _pk_ses.100001.4cf6=*; __utma=30149280.1583974348.1563323123.1572242065.1583798461.8; __utmb=30149280.0.10.1583798461; __utmc=30149280; __utmz=30149280.1583798461.8.7.utmcsr=accounts.douban.com|utmccn=(referral)|utmcmd=referral|utmcct=/passport/login; __utma=223695111.424744929.1563344208.1572242065.1583798461.4; __utmb=223695111.0.10.1583798461; __utmc=223695111; __utmz=223695111.1583798461.4.4.utmcsr=accounts.douban.com|utmccn=(referral)|utmcmd=referral|utmcct=/passport/login; push_noty_num=0; push_doumail_num=0; _pk_id.100001.4cf6=06303e97d36c6c15.1563344208.4.1583798687.1572242284.'}
base_url = 'https://movie.douban.com/top250?start=%d&filter='


class spider_douban250(object):
        def __init__(self,url = None, start = 0, step = 25 , total = 250):
            self.durl = url
            self.dstart = start
            self.dstep =step
            self.dtotal = total

        def start_download(self):
            while self.dstart < self.dtotal:
                durl = self.durl%self.dstart
                print(durl)
                self.load_page(durl)
                self.dstart += self.dstep

        def load_page(self,url):
            req=urllib.request.Request(url=url,headers=headers)
            req = urlopen(req)
            if req.code != 200:
                return
            con = req.read().decode('utf-8')
            listli = re.findall(r'<li>(.*?)</li>', con,re.S)
            if listli:
                listli = listli[1:]
            else:
                return
            for li in listli:
                imginfo = re.findall(r'<img.*?>', li)
                if imginfo:
                    imginfo = imginfo[0]
                    info = [item.split('=')[1].strip()[1:-1] for item in imginfo.split(' ')[2:4]]
                    self.load_img(info)

        def load_img(self,info):
            print("callhere load img:", info)
            req = urllib.request.Request(url=info[1], headers=headers)
            imgreq = urlopen(req)
            img_c = imgreq.read()
            path = r'D:\\xuefeng\images\\' + info[0] + '.jpg'
            print('path:', path)
            imgf = open(path, 'wb')
            imgf.write(img_c)
            imgf.close()

spider = spider_douban250(base_url,start=0,step=25,total=25)
spider.start_download()

在这里插入图片描述

from requests_html import HTMLSession


class Spider(object):
    def __init__(self):
        self.session = HTMLSession()  ##首先生成这个对象
        self.api = 'https://movie.douban.com/j/new_search_subjects'
        ##绑定这个url

    def get_params(self):  ##确定筛选的条件,这个也就是get请求拼接在url后面的参数
        #   sort=S&range=0,10&tags=&start=0&year_range=2015,2019
        # genres = input("输入你要筛选的电影类型,例如动作:")
        ##这个字典的其他参数也可以自己输入设置,但是这里只输入一个
        self.params = {
            'sort': 'S',  ###这个是排序方式
            'range': '0,10',  ###页数范围
            'year_range': '2018,2018',  ###年份
            # 'genres': genres
            'genres': '动作'
        }

    def get_film_info_dic(self):
        for i in range(0, 10):  ##分页 一共10页  要获取前两百个 每页有20个
            self.params['start'] = i * 20
            # print(self.session.get(url=self.api, params=self.params).json(),'\r\n')
            page_info = self.session.get(url=self.api, params=self.params).json()
            page_info_values = page_info['data']
            # print(page_info_values,'\r\n')
            with open('movie_info.json', mode='at+', encoding='utf-8') as f:
                for one_page_info in page_info_values:
                    # print(one_page_info,'\r\n')
                    f.write(str(one_page_info) + '\r\n', )

    def run(self):
        self.get_params()
        self.get_film_info_dic()


if __name__ == '__main__':
    douban = Spider()
    douban.run()
  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值