python 小游戏

python节日倒计时
import datetime
import requests
from loguru import logger
 
# 节日锚点
holiday_list = [
    {"平安夜": "2021-12-24"},
    {"圣诞节": "2021-12-25"},
    {"元旦假期": "2022-01-01"},
    {"春节假期": "2022-01-31"},
]
 
 
def get_holiday():
    global holiday_list
    """
    获取配置中的节日设置
    :return: list——>[{'节日名':'节日日期'}]
    """
    holiday_content = ''
    # 今天日期
    now_str = datetime.datetime.now().strftime('%Y-%m-%d')
    now = datetime.datetime.strptime(now_str, "%Y-%m-%d")
    for holiday_info in holiday_list:
        holiday_name = list(holiday_info.keys())[0]
        holiday_date = holiday_info[holiday_name]
        future = datetime.datetime.strptime(holiday_date, "%Y-%m-%d")
        days = (future - now).days
        holiday_content = holiday_content + '距离' + holiday_name + '还有' + str(days) + '天' + '\n'
    return holiday_content
 
 
def get_tg():
    """
    获取日记
    :return: bool or str
    """
    url = f"https://fabiaoqing.com/jichou/randomrj.html"
    try:
        res = requests.post(url=url).json()
        return res['tgrj'] + '\n'
    except:
        return False
 
 
def get_weather():
    """
    获取天气预报
    :return: str or false
    """
    url = f"http://apis.juhe.cn/simpleWeather/query"
    params = {
        'city': '北京',
        'key': '7612ddda2313a41481327cbef5261b46',
    }
    try:
        res = requests.get(url=url, params=params).json()
        now_str = datetime.datetime.now().strftime('%Y-%m-%d')
        weather_content = f"""【摸鱼办公室】\n今天是 {now_str} 星期 {datetime.datetime.now().weekday() + 1}\n{res['result']['city']} 当前天气 {res['result']['realtime']['info']} {res['result']['realtime']['temperature']}摄氏度\n早上好,摸鱼人!上班点快到了,收拾收拾,该吃饭吃饭,该溜达溜达,该上厕所上厕所。别闲着\n"""
        return weather_content
    except:
        return False
 
 
if __name__ == '__main__':
    holiday_content = get_holiday()
    if not holiday_content:
        logger.error(f"节日为空。")
        holiday_content = ''
    else:
        logger.info(f"获取到节日:\n{holiday_content}")
    tg_content = get_tg()
    if not tg_content:
        logger.error(f"日记为空。")
        tg_content = ''
    else:
        logger.info(f"获取到日记:\n{tg_content}")
    weather_content = get_weather()
    if not weather_content:
        logger.error(f"天气为空。")
        weather_content = ''
    else:
        logger.info(f"获取到天气:\n{weather_content}")
    complete_content = weather_content + holiday_content + tg_content + '工作再累 一定不要忘记摸鱼哦!有事没事起身去茶水间去厕所去廊道走走,别老在工位上坐着钱是老板的,但命是自己的'
    logger.info(f"整合内容开始推送:\n{complete_content}")

pexels图片提取
import csv
import random
import time

import grequests
from faker import Factory
from lxml import etree
from selenium.webdriver import Chrome
from selenium.webdriver.chrome.options import Options

def get_url_file():
    opt = Options()
    # opt.add_argument('--headless')
    opt.add_experimental_option('useAutomationExtension', False)
    opt.add_experimental_option("excludeSwitches", ['enable-automation'])
    opt.add_argument('--disable-blink-features=AutomationControlled')
    opt.add_argument(f'user_agent="{Factory.create().user_agent()}"')

    driver = Chrome(options=opt)

    url = 'https://www.pexels.com/zh-cn/search/%E6%8A%BD%E8%B1%A1/'
    driver.get(url=url)
    print('已请求链接,正在下拉')
    driver.maximize_window()
    time.sleep(2)
    a = 1
    for y in range(300):
        js = f'window.scrollBy(0,{random.randint(2500,3500)})'
        driver.execute_script(js)
        time.sleep(random.randint(1,3))
        print(f'正在下拉第{a}下')
        a += 1

    html = driver.page_source
    # print(html)
    tree = etree.HTML(html)
    with open('url_file.csv','a',encoding='utf-8',newline='') as file:
        url_file_csv = csv.writer(file)
        div_list = tree.xpath('//div[@class="photos__column"]/div')
        i = 1
        for div in div_list:
            try:
                img_url = div.xpath('.//a[@class="js-photo-link photo-item__link"]/img/@srcset')[0].split(',')[0]
                print(img_url)
                url_file_csv.writerow([img_url])
                print(f'第{i}条url写入文件')
                i += 1
            # print(html)
            except Exception as e:
                print(e)
    driver.quit()

def save_png():
    with open('./url_file.csv','r',encoding='utf-8') as file:
        url_list = list(csv.reader(file))
        headers = {
            'user-agent':Factory.create().user_agent()
        }
        req_list = []
        for url in url_list[:]:
            req = grequests.get(url=url[0],headers=headers,timeout=20)
            req_list.append(req)
        html_list = grequests.imap(req_list,size=100)
        i = 1
        for html in html_list:
            # print(html.status_code)
            img_name = f'pexels_抽象_{i}.png'
            with open(f'./img/{img_name}','wb') as f:
                f.write(html.content)
                print(f'第{i}个图片储存完毕')

            i += 1

if __name__ == '__main__':
    # get_url_file()
    # 获得所有包含url的url文件
    save_png()
    # 保存图片
python拨打电话

jdk下载安装链接
https://blog.csdn.net/weixin_37601546/article/details/88623530

TB拍卖
import csv
import json
import re
import time

import requests
from faker import Factory

def get_first_into(writer):
    url = 'https://sf.taobao.com/list/0_____%C9%EE%DB%DA.htm?spm=a213w.7398504.pagination.8.749c2468MMGtxr&auction_source=0&st_param=-1&auction_start_seg=-1&page=1'
    headers = {
        'cookie':'thw=cn; t=f76a6d415fff51c3cdc3ee41c607675a; cna=W9okGechEgMCASdtfEr2YVQW; lgc=%5Cu6881%5Cu5C71%5Cu7684%5Cu5C0F%5Cu77F3%5Cu5934; tracknick=%5Cu6881%5Cu5C71%5Cu7684%5Cu5C0F%5Cu77F3%5Cu5934; enc=N2rwnR%2BxvMYMseUz41NZyNs6Vpja9rBT%2BGOui0uayAqLcCVFaAfAuEZO6Lb9NBHk3xC%2BQQVjxdx2B9Jf20O0JQ%3D%3D; hng=CN%7Czh-CN%7CCNY%7C156; xlly_s=1; _m_h5_tk=a712b3f4b3018f01a124675aa8146f2a_1633006447995; _m_h5_tk_enc=21776bf336eca6de44a63f1341f8886c; _tb_token_=eee7ebe33e76e; cookie2=113f39951e3aec481a6234ee19902e8f; _samesite_flag_=true; sgcookie=E100yn8QMe%2BNCKmX%2BMXdloTF1TEv20VjKPP8ENYQYGbvcAXqQd2LZE0bp8xMkxCs3Kq0puw4Gr6UIiga3AFOUu1LSNAZixkOSCzpfiRWBrqO3OE%3D; unb=3400637622; uc3=nk2=okgS%2BYgJcRVn8WDP&id2=UNQ3HxCU%2FfLX4A%3D%3D&vt3=F8dCujaOW90OgAyK4GU%3D&lg2=VFC%2FuZ9ayeYq2g%3D%3D; csg=d3e60103; cancelledSubSites=empty; cookie17=UNQ3HxCU%2FfLX4A%3D%3D; dnk=%5Cu6881%5Cu5C71%5Cu7684%5Cu5C0F%5Cu77F3%5Cu5934; skt=918895384256aadf; existShop=MTYzMjk5ODgyNg%3D%3D; uc4=id4=0%40UgP8IrSpIxOc2X6zDVXMcto8we1s&nk4=0%40oEryIG%2BDPNs%2F9Bq1Qvb5fswbI1yKbNo%3D; _cc_=VT5L2FSpdA%3D%3D; _l_g_=Ug%3D%3D; sg=%E5%A4%B42d; _nk_=%5Cu6881%5Cu5C71%5Cu7684%5Cu5C0F%5Cu77F3%5Cu5934; cookie1=VAdfNmtE6KFzM8%2Fn8eWCjVZGNwzjccmHmHvTwzUkH9E%3D; mt=ci=0_1; uc1=existShop=false&cookie15=VT5L2FSpMGV7TQ%3D%3D&cookie16=UtASsssmPlP%2Ff1IHDsDaPRu%2BPw%3D%3D&cookie21=Vq8l%2BKCLjA%2Bl&pas=0&cookie14=Uoe3dYiYFoEFew%3D%3D; isg=BBMTRrnldm_EMDuBblhdQGFMopc9yKeK0Y8GJMUwbzJpRDPmTZg32nGWerQqf_-C; l=eBMzQci7jt0unm1sBOfanurza77OSIRYYuPzaNbMiOCPO2CB5WIlW6eWMqT6C3GVh6y9R3RILX7BBeYBqQAonxvTkZm8e8Hmn; tfstk=cF15BNY250m53D2EaaaqYeQqPQddw736R8tlP9gabu8eah1DPf-6y_Wnvjp9l',
        # 'user-agent':Factory.create().user_agent(),
        'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36',
    }
    html = requests.get(url=url,headers=headers,timeout=20)
    data_str = re.search(r'sf-item-list-data.*?({.*?)</script>',html.text,re.S).group(1)
    data_dict = json.loads(data_str)
    for d in data_dict['data']:
        status = d['status']
        title = d['title']
        initialPrice = str(int(d['initialPrice'])/10000) + '万'
        # 初始价格
        currentPrice = str(int(d['currentPrice'])/10000) + '万'
        # 现价
        consultPrice = str(int(d['consultPrice'])/10000) + '万'
        # 评估价
        start_time = d['start']
        start_time = time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(round(int(start_time)/1000)))
        # 起拍时间
        end_time = d['end']
        end_time = time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(round(int(end_time)/1000)))
        # 结束时间
        viewerCount = d['viewerCount']
        # 围观人数
        applyCount = d['applyCount']
        # 报名人数
        xmppVersion = d['xmppVersion']

        url = 'https:' + d['itemUrl']

        info = [title,status,initialPrice,currentPrice,consultPrice,start_time,end_time,viewerCount,applyCount,xmppVersion,url]

        print(info)
        writer.writerow(info)
        # return info

if __name__ == '__main__':
    with open('淘宝拍卖.csv','a',encoding='utf-8',newline='') as file:
        writer = csv.writer(file)
        get_first_into(writer)
弹窗整人小游戏
import tkinter as tk
import random
import threading
import time

def boom():
    window = tk.Tk()
    width = window.winfo_screenwidth()
    height = window.winfo_screenheight()
    a = random.randrange(0, width)
    b = random.randrange(0, height)
    window.title('就是玩')
    window.geometry("200x50" + "+" + str(a) + "+" + str(b))
    tk.Label(window, text='你好毒,你好毒,你好毒毒毒毒毒', bg='white',
             font=('宋体', 17), width=20, height=4).pack()
    window.mainloop()

threads = []

for i in range(100):
    t = threading.Thread(target=boom)
    threads.append(t)
    time.sleep(0.1)
    threads[i].start()

自动发邮件内容
# -*- coding: utf-8 -*-

import requests, bs4
import smtplib
import schedule
import time
from bs4 import BeautifulSoup
from email.mime.text import MIMEText
from email.header import Header

# account = '{0}'.format('发件人qq邮箱')
# password = '{0}'.format('qq邮箱授权码')
# receiver = '{0}'.format('收件人163邮箱或者qq邮箱')
account = '{0}'.format('123456789@qq.com')
password = '{0}'.format('qwertyuiop')
receiver = '{0}'.format('raymand6325@163.com')
# 更改自己的账户和授权码


# 爬虫任务,获取sobooks网站上的书名和作者,其中页面的话,可以根据自己需求进行修改
def recipe_spider():
    list_all = ''
    num = 0
    for a in range(1, 3):
        n = '{0}{1}'.format('https://sobooks.cc/page/', a)
        res = requests.get(n)
        res.encoding = res.apparent_encoding
        bs = BeautifulSoup(res.text, 'html.parser')
        # print(bs)

        books = bs.find_all('h3')
        authors = bs.find_all('p')

        for i in range(len(books)):
            num = num + 1
            book = books[i].text.strip()
            author = authors[i + 1].text.strip()
            # list_books.append([book,author])
            #  list_books.append(list_book)

            n = '''
 书名%s: %s,作者: %s
            ''' % (num, book, author)
            list_all = list_all + n
    return list_all

# 将获取到的内容发送邮件
def send_email(list_all):
    global account, password, receiver
    mailhost = 'smtp.qq.com'
    qqmail = smtplib.SMTP()
    qqmail.connect(mailhost, 25)
    qqmail.login(account, password)
    content = '亲爱的,今天书单' + list_all
    print(content)
    message = MIMEText(content, 'plain', 'utf-8')

    subject = '今天看什么'
    message['Subject'] = Header(subject, 'utf-8')
    try:
        qqmail.sendmail(account, receiver, message.as_string())
        print('邮件发送成功')
    except:
        print('邮件发送失败')
    qqmail.quit()


def job():
    print('开始一次任务')
    list_all = recipe_spider()
    send_email(list_all)
    print('任务完成')


if __name__ == '__main__':
    # 定时任务,其中0.05是间隔的意思,以分钟为间隔,时间默认是整数。
    schedule.every(0.05).minutes.do(job)
    while True:
        schedule.run_pending()
        time.sleep(1)
  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值