课设/作品大部分采用的现有数据集,但也有少部分用的爬虫抓取:
下面这个是课设或者写项目自己用的一个爬虫基本代码,偶尔换换url/headers或者改几行就能写:
import requests from bs4 import BeautifulSoup import pandas as pd import os # 爬内容 def crawl(url): try: headers = { 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/15.2 Safari/605.1.15' } response = requests.get(url, headers=headers) response.raise_for_status() # 确保请求成功 return response.text except requests.RequestException as e: print(f"请求失败: {e}") return None # 解析内容 def parse_html(html): bs = BeautifulSoup(html, 'html.parser') recipes = [] bs.find_all('div', class_='info pure-u') # 查找所有包含菜谱信息的元素,根据具体的HTML结构调整查找方式 for div in bs.find_all('div', class_='info pure-u'): a_tag = div.find('a') if a_tag: name = a_tag.text.strip() link = 'https://www.xiachufang.com' + a_tag['href'] # 获取配料信息 ingredient_div = div.find('p', class_='ing ellipsis') ingredients = ingredient_div.text.strip() if ingredient_div else "No ingredients listed" recipes.append({'name': name, 'link': link, 'ingredients': ingredients}) return recipes # 保存到Excel文件 def save_to_excel(recipes, filename='recipes.xlsx'): desktop_path = os.path.join(os.path.expanduser('~'), 'Desktop') file_path = os.path.join(desktop_path, filename) df = pd.DataFrame(recipes) df.to_excel(file_path, index=False) print(f"数据已保存到 {file_path}") if __name__ == "__main__": url = 'https://www.xiachufang.com/explore/' html_content = crawl(url) if html_content: recipes = parse_html(html_content) save_to_excel(recipes)