#coding=utf8
import os
import urllib
import random
#获取地址,然后写入文件
def save_url_content(url,folder_path):
if not (url.startswith('http://') and url.startswith('https://')):
print u'error'
if not os.path.isdir(folder_path):
return u'folder_path not a folder'
d = urllib.urlopen(url)
content = d.read()
print content
random_name = 'test_%s.txt' % random.randint(1,1000)
#filepath = '%s%s' %(folder_path,random_name)
filepath = os.path.join(folder_path,random_name)
file_handle = open(filepath,'w')
file_handle.write(content)
file_handle.close()
return filepath
#print save_url_content('aa','dfsf')
#print save_url_content('http://www.baidu.com','fdsfsd')
#print save_url_content('http://www.baidu.com','F:\\')
#获取url中的连接数量
def get_url_list(url):
if not (url.startswith('http://') and url.startswith('https://')):
print u'error'
d = urllib.urlopen(url)
content = d.read()
print content
return len(content.split('<a href=')) -1
#print get_url_list("http://www.baidu.com")
#递归解决目录下的所有文件,只能用递归来解决,否则需要用栈来记忆递归过程
import sys
#sys.setrecursionlimit(1000000) 设置递归层数
'''
#open(file_path,'ab+')
if not os.path.exists(folder_path):
return 'no exist!'
print len(os.listdir(folder_path))
for f in os.listdir(folder_path):
file_path = os.path.join(folder_path,f)
print file_path
if os.path.isdir(file_path):
merge(folder_path)
else:
merge_file = open('F:\\merge_test','ab+')
content = open(file_path,'r').read()
print content
merge_file.write(content)
merge_file.close()
'''
global x
import os
x=0
def merge(folder_path):
global x
if not os.path.exists(folder_path):
return 'not exists'
for f in os.listdir(folder_path):
file_path = os.path.join(folder_path, f)
if os.path.isdir(file_path):
x += 1
merge(file_path)
else:
merge_file = open('F:\merge_test', 'ab+')
#如果不清理,会从上一次运行结果处重新添加内容
content = open(file_path, 'r').read()
merge_file.write(content)
merge_file.close()
print "print merge('F:\\test')"
print merge('F:\\test')
print x
import urlparse
def qs(url):
query = urlparse.urlparse(url).query
return dict([(k,v[0]) for k,v in urlparse.parse_qs(query).items()])
#print qs('http://126.com')
print qs('http://api/api?f=56&q=5&y=5')
print qs('http://api/api?ll=53')
def delete(folder_path):
# open(file_path,'ab+')
if not os.path.exists(folder_path):
return 'no exist!'
for f in os.listdir(folder_path):
file_path = os.path.join(folder_path, f)
if os.path.isdir(file_path):
delete(file_path)
else:
os.remove(file_path)
delete('F:\\test')
Python自学--第九次作业
最新推荐文章于 2024-04-22 00:00:00 发布