python

#encoding:utf-8                                                                      
import datetime
import pandas as pd
import xlsxwriter as xlw
import requests
from bs4 import BeautifulSoup as bs
import re
import time


#  法一:datetime,先转换为datetime类型,再做加减
def dateRange(start, end):  # start='2014-09'
    strptime, strftime = datetime.datetime.strptime, datetime.datetime.strftime
    days = (strptime(end, "%Y-%m") - strptime(start, "%Y-%m")).days
    datelist1 = [strftime(strptime(start, "%Y-%m") + datetime.timedelta(i), "%Y%m") for i in range(0, days, 1)]
    datelist = sorted(list(set(datelist1)))
    return datelist

# 法二:pandas产生时间序列
def dateRange1(start, end):
    datelist1 = [datetime.datetime.strftime(x, '%Y%m') for x in list(pd.date_range(start=start, end=end))]
    datelist = sorted(list(set(datelist1)))
    return datelist

def list_to_excel(weather_result, filename):
    workbook = xlw.Workbook('E:\%s.xlsx' % filename)
    sheet = workbook.add_worksheet('weather_report')
    title = ['日期', '最高气温', '最低气温', '天气', '风向', '风力']
    for i in range(len(title)):
        sheet.write_string(0, i, title[i], workbook.add_format({'bold': True}))  # 写入表头,字体加粗
    row, col = 1, 0
    # print(len(weather_result[2]))
    # weather_result[30][11] = '\0'
    print(weather_result[len(weather_result) - 1])
    print(weather_result[0].splitlines()[1])

    for a in range(0, len(weather_result)):
        for b in range(1, 6):
            sheet.write_string(row, col, weather_result[a].splitlines()[b])#按照行分割,共6行,每行写入一个单元格
            col += 1
        row += 1
        col = 0
    workbook.close()


def getCommentsById(city, start, end):  # city为字符串,year为列表,month为列表
    weather_result = []
    datelist = dateRange(start, end)
    
    for i in datelist:
        url = 'http://lishi.tianqi.com/' + city + '/' + i + '.html'
        print(i)
        headers={'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.110 Safari/537.36'}
        response = requests.get(url,headers=headers)
        #print(response.text)

        soup = bs(response.text, 'html.parser')
        weather_m = soup.select('.thrui')
        uls = weather_m[0].find_all('li')

        for ul in uls:
        # print(ul)
            al = " ".join('%s' %id for id in ul)#转化为字符串
            dr = re.compile(r'<[^>]+>', re.S)   #去除掉html中的特殊字符
            dd = dr.sub('', al)
            weather_result.append(dd)
        #time.sleep(0.5)     #延时,防止被服务器判定为爬虫
    
    print(weather_result[0])
    return weather_result


if __name__ == '__main__':
    data = getCommentsById('chongqing', '2010-01', '2021-01')#城市 开始日期, 截止日期
    list_to_excel(data, '重庆python天气') #生成excel的文件名




```bash
import requests        #导入requests包
from bs4 import    BeautifulSoup



url='http://www.cntour.cn/'
strhtml=requests.get(url)
soup=BeautifulSoup(strhtml.text,'lxml')
data = soup.select('#main>div>div.mtop.firstMod.clearfix>div.centerBox>ul.newsList>li>a')
print(data)
print(soup.p.string)


headers={'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.110 Safari/537.36'}
response = request.get(url,headers=headers)

for item in data:
    result={
        'title':item.get_text(),
        'link':item.get('href')
    }
print(result)

import re
for item in data:
    result={
        "title":item.get_text(),
        "link":item.get('href'),
        'ID':re.findall('\d+',item.get('href'))
    }
print(result)
#!/usr/bin/env python3
# -*- coding: utf-8 -*-

import requests        #导入requests包
import json
def get_translate_date(word=None):
    url = 'http://fanyi.youdao.com/translate?smartresult=dict&smartresult=rule'
    From_data={'i':word,
    'from':'AUTO',
    'to':'AUTO',
    'smartresult':'dict',
    'client':'fanyideskweb',
    'salt':'16147623573022',
    'sign':'48d7cce186aa05fbe627ec746da39358',
    'ts':'1614762357302',
    'bv':'19413bb132e864b42a71e17c0a92015a',
    'doctype':'json',
    'version':'2.1',
    'keyfrom':'fanyi.web',
    'action':'FY_BY_REALTIME',
    'typoResult':'false'}
    #请求表单数据
    response = requests.post(url,data=From_data)
    #将Json格式字符串转字典
    content = json.loads(response.text)
    print(content)
    #打印翻译后的数据
    print(content['translateResult'][0][0]['tgt'])
if __name__=='__main__':
    get_translate_date('我爱中国')
#!/usr/bin/env python3
# -*- coding: utf-8 -*-

import requests
url = 'https://lishi.tianqi.com/chongqing/201901.html'
strhtml = requests.get(url)
print(strhtml.text)

file = open('E:\c.txt', 'w', encoding='utf-8')
file.write(strhtml.text)
  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 2
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 2
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值