自从2022年3月为了处理疫情建档开始着手学习python办公自动化以及爬虫以来,python在提高我个人信息获取和解决实际办公问题的能力方面具有非常重要的作用,每当我在尝试解决一个技术问题时,我总会在CSDN社区中找到相对应的前人的解析以及构思。因此,作为对社区的回馈,作为对python知识的贡献,作为相信知识的传播可以引领更多人发展python的角度出发,我将我自己常用的几个代码写在下方,各位看客有需要的自取。
一、超级鹰—解决爬虫时登录验证码的问题。建议进超级鹰官网后,对其应用进行了解,非常实用。以下代码并非我本人所写,而是摘自超级鹰官网的模块中。
class Chaojiying_Client(object):
def __init__(self, username, password, soft_id):
self.username = username
password = password.encode('utf8')
self.password = md5(password).hexdigest()
self.soft_id = soft_id
self.base_params = {
'user': self.username,
'pass2': self.password,
'softid': self.soft_id,
}
self.headers = {
'Connection': 'Keep-Alive',
'User-Agent': 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0)',
}
def PostPic(self, im, codetype):
"""
im: 图片字节
codetype: 题目类型 参考 http://www.chaojiying.com/price.html
"""
params = {
'codetype': codetype,
}
params.update(self.base_params)
files = {'userfile': ('ccc.jpg', im)}
r = requests.post('http://upload.chaojiying.net/Upload/Processing.php', data=params, files=files,
headers=self.headers)
return r.json()
def ReportError(self, im_id):
"""
im_id:报错题目的图片ID
"""
params = {
'id': im_id,
}
params.update(self.base_params)
r = requests.post('http://upload.chaojiying.net/Upload/ReportError.php', data=params, headers=self.headers)
return r.json()
二、批量下载—利用url和requests模块,可对图片、视频等文件进行批量下载。优点是,前面集成了许多user_agent和headers,再用上random模块,就可以实现部分的网站反爬虫。
def download_by_url_links(url, filename):
'''
这个程序的优点在于简洁明了,利用requests直接获取内容,并保存。
关键在于构建了许多user_agent和headers,利用random绕开网站的部分反扒机制。
'''
user_agent_list = [
"Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36",
"Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36",
"Mozilla/5.0 (Windows NT 10.0; WOW64) Gecko/20100101 Firefox/61.0",
"Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.186 Safari/537.36",
"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.62 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/45.0.2454.101 Safari/537.36",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0)",
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.114 Safari/537.36',
"Mozilla/5.0 (Macintosh; U; PPC Mac OS X 10.5; en-US; rv:1.9.2.15) Gecko/20110303 Firefox/3.6.15",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/534.57.2 (KHTML, like Gecko) Version/5.1.7 Safari/534.57.2",
# Safari
"MAC: Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.122 Safari/537.36",
"Windows: Mozilla/5.0 (Windows; U; Windows NT 6.1; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50",
"Mozilla/5.0 (iPhone; U; CPU iPhone OS 4_3_3 like Mac OS X; en-us) AppleWebKit/533.17.9 (KHTML, like Gecko) Version/5.0.2 Mobile/8J2 Safari/6533.18.5",
"Mozilla/5.0 (iPhone; U; CPU iPhone OS 4_3_3 like Mac OS X; en-us) AppleWebKit/533.17.9 (KHTML, like Gecko) Version/5.0.2 Mobile/8J2 Safari/6533.18.5",
"Mozilla/5.0 (iPad; U; CPU OS 4_3_3 like Mac OS X; en-us) AppleWebKit/533.17.9 (KHTML, like Gecko) Version/5.0.2 Mobile/8J2 Safari/6533.18.5"
]
headers = {
'Connection': 'close',
'authority': 'https://cn.bing.com',
'sec-ch-ua': '" Not;A Brand";v="99", "Google Chrome";v="91", "Chromium";v="91"',
'sec-ch-ua-mobile': '?0',
'upgrade-insecure-requests': '1',
'user-agent': random.choice(user_agent_list),
'referer': 'https://cn.bing.com/',
'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
'sec-fetch-site': 'none',
'sec-fetch-mode': 'navigate',
'sec-fetch-user': '?1',
'sec-fetch-dest': 'document',
'accept-language': 'zh-CN,zh;q=0.9,en-US;q=0.8,en;q=0.7,zh-TW;q=0.6'
}
response = requests.get(url, stream=True, verify=False, headers=headers)
with open(filename, "wb+") as file:
for chunk in response.iter_content(chunk_size=1024):
if chunk:
file.write(chunk)
三、利用google chrome浏览器下载PDF—主要用于浏览一些信息时,对网页信息的及时保存,我之前曾用这个工具下载维基百科自己感兴趣的知识。
def Merged_plugin_and_downloadPDF(new_url,save_path, title_name):
# 设置打印机的纸张大小、打印类型、保存路径等
chrome_options = webdriver.ChromeOptions()
chrome_options.add_argument('--user-data-dir=' + r'/Users/XXXXXXX/Library/Application Support/Google/Chrome_3/') # 设置成用户自己的数据
settings = {
"recentDestinations": [{
"id": "Save as PDF",
"origin": "local",
"account": ""
}],
"selectedDestinationId": "Save as PDF",
"version": 2,
"isHeaderFooterEnabled": False,
# "customMargins": {},
# "marginsType": 2,#边距(2是最小值、0是默认)
# "scaling": 100,
# "scalingType": 3,
# "scalingTypePdf": 3,
# "isLandscapeEnabled": True, # 若不设置该参数,默认值为纵向
"isCssBackgroundEnabled": True,
"mediaSize": {
"height_microns": 297000,
"name": "ISO_A4",
"width_microns": 210000,
"custom_display_name": "A4"
},
}
chrome_options.add_argument('--enable-print-browser')
# chrome_options.add_argument('--headless') #headless模式下,浏览器窗口不可见,可提高效率
prefs = {
'printing.print_preview_sticky_settings.appState': json.dumps(settings),
'savefile.default_directory': save_path
# 此处填写你希望文件保存的路径,可填写your file path默认下载地址
}
chrome_options.add_argument('--kiosk-printing') # 静默打印,无需用户点击打印页面的确定按钮
chrome_options.add_experimental_option('prefs', prefs)
# new_browser.switch_to.new_window(new_url)
new_browser = webdriver.Chrome("/Users/XXXXXX/PycharmProjects/pythonProject/venv/Selenium模块学习/chromedriver",options=chrome_options)
new_browser.get(new_url)
new_browser.maximize_window()
time.sleep(0.1)
input_script=f'document.title="{title_name}";window.print();'
new_browser.execute_script(input_script) # 利用js修改网页的title,该title最终就是PDF文件名,利用js的window.print可以快速调出浏览器打印窗口,避免使用热键ctrl+P
new_browser.quit()
四、将下载的ts文件进行合并。
def merged_all_ts_files_2_single_file(folder_path,ouput_file_name):
file_list = [f for f in os.listdir(folder_path) if f.endswith(".ts")]
file_number = len(file_list)
output_file = open(ouput_file_name, 'wb')
if 'index0.ts' in file_list:
for i in range(0, file_number):
file_name = f'index{i}.ts'
with open(os.path.join(folder_path, file_name), 'rb') as input_file:
output_file.write(input_file.read())
wait_for_del_file=folder_path+f'/index{i}.ts'
os.remove(wait_for_del_file)
else:
for i in range(0, file_number):
file_name = f'{i}.ts'
with open(os.path.join(folder_path, file_name), 'rb') as input_file:
output_file.write(input_file.read())
wait_for_del_file=folder_path+f'/{i}.ts'
os.remove(wait_for_del_file)
output_file.close()
五、将doc文件批量转换为docx—这个程序可能在办公领域会有所应用。
def transfer_doc_into_docx_files():
source = "/Users/XXXXXXX/PycharmProjects/pythonProject/venv/docx库应用与探索/"
dest = "/Users/XXXXXXXX/PycharmProjects/pythonProject/venv/docx库应用与探索/转换" # 提前建好
app_path = "/Applications/LibreOffice.app/Contents/MacOS/soffice" # LibreOffice的安装路径
g = os.listdir(source)
file_path = [f for f in g if f.endswith(('.doc'))]
for i in file_path:
file = (source + '/' + i)
output = subprocess.check_output([
app_path,
"--headless",
"--convert-to",
"docx",
file,
"--outdir",
dest])
先就这些吧,对于一些初学者想要解决一些技术问题,会有所帮助,后续将上传一些有意思的应用!