对Twitter进行数据收集: 首先创建相应的文件,用于储存读取的数据
例:
os.path.join(os.path.expanduser("~"), "Data", "twitter")
或者写入数据
with open (file_name) as inf:
inf.write(json.dump(?))#写入json格式的文件
保存训练模型或使用训练模型:
from sklearn.externals import joblib
joblib.dump(model_name,output_filename)#创建名称为model_name的模型于output_filename中
model=joblib.load(model_name)#引用相应模型
model.predict(train_x)
通过从tweet内容中选出与目标内容相关的
例:
[tweet[i] for i in len(tweets) if y_predict[i]==1]
#tweets中被预测分类为1的全为目标内容
获取好友信息
直接构建获取好友的函数
def get_friends(t, user_id):#由于Twitter使用游标管理多页数据,cursor=-1代表数据开始,cursor=0代表数据结束
friends = []
cursor = -1 # Start with the first page
while cursor != 0: # If zero, that is the end:
try:
results = t.friends.ids(user_id=user_id, cursor=cursor, count=5000)#TwitterAPI中找朋友编号的办法
friends.extend([friend for friend in results['ids']])
cursor = results['next_cursor']#更新游标,这里将游标想象成页码,游标为0代表到最后一页
if len(friends) >= 10000:
break
if cursor != 0:
print("Collected {} friends so far, but there are more".format(len(friends)))
sys.stdout.flush
except TypeError as e:
#出现类型错误的异常,将等待5分钟,然后再执行循环
if results is None:
print("You probably reached your API limit, waiting for 5 minutes")
#连续输出,而不是等到最后再一个一个输出
sys.stdout.flush()
time.sleep(5*60) # 5 minute wait
else:
raise e
except twitter.TwitterHTTPError as e:
break
finally:
time.sleep(60) # Wait 1 minute before continuing
return friends
sys.stdout.flush()#循环过程中习惯性缓冲下
构建网络进行说明
从最初得到的相关人士进行遍历,新建friends={},键为user_id,值为好友id
friends = {user_id:friends[user_id] for user_id in friends
if len(friends[user_id]) > 0}
由于相关用户太少,所以从现有用户好友中选取关系网最大,最密集的人
所以先统计好友数量
def count_friends(friends):
friend_count = defaultdict(int)
for friend_list in friends.values():
for friend in friend_list:
friend_count[friend] += 1
return friend_count
通过计算关系最大的好友并对其进行排序sorted()
#遍历直到朋友大于150人
while len(friends) < 150:
#获取不是在friends中的用户,并将该用户的好友进行统计
for user_id, count in best_friends:
if user_id not in friends and str(user_id) != '467407284':
break
print("Getting friends of user {}".format(user_id))
sys.stdout.flush()
friends[user_id] = get_friends(t, user_id)
print("Received {} friends".format(len(friends[user_id])))
print("We now have the friends of {} users".format(len(friends)))
sys.stdout.flush()
# Update friend_count
for friend in friends[user_id]:
friend_count[friend] += 1
# Update the best friends list
best_friends = sorted(friend_count.items(), key=itemgetter(1), reverse=True)
ps:python中的字典与json格式可以轻松转换
创建关系网络图
pip install network
import networkx as nx
G = nx.DiGraph()
main_users = friends.keys()
G.add_nodes_from(main_users)#创建顶点
for user_id in friends:
for friend in friends[user_id]:
if friend in main_users:
G.add_edge(user_id, friend) #创建边
G
nx.draw(G)#画图
可以将图设置为长方形的图
nx.draw(G, alpha=0.1, edge_color='b', node_color='g', node_size=2000)
创建用户关系图得用到杰克卡得相似系数
def compute_similarity(friends1, friends2):#杰克卡得相似系数,前提将friends中的好友列表变为集合的形式
return len(friends1 & friends2) / len(friends1 | friends2)#&为交集;|为并集
#画用户相似图
def create_graph(followers, threshold=0):
G = nx.Graph()
for user1 in friends.keys():
for user2 in friends.keys():
if user1 == user2:
continue
weight = compute_similarity(friends[user1], friends[user2])
if weight >= threshold:#如果权重超过阈值
G.add_node(user1)
G.add_node(user2)
G.add_edge(user1, user2, weight=weight)
return G
利用spring_layout将关系图展示的好看些;
具体用法如下:
pos = nx.spring_layout(G)#使用spring_layout方法布局
nx.draw_networkx_nodes(G, pos)#使用pos布局方法确定顶点位置
edgewidth = [ d['weight'] for (u,v,d) in G.edges(data=True)]#遍历图中每条边并获得其权重
nx.draw_networkx_edges(G, pos, width=edgewidth)#绘制
寻找子图:
类似于聚类-
sub_graphs=nx.connected_component_subgraphs(G)#寻找图中的连通分支,sub_graphs为生成器
nx.draw(list(sub_graphs)[index])#画相应index中的连通分支图
fig=plt.figure(figsize=(长,宽))
fig.add_subplot()#对画的图确定好位置
#silhouette_score为计算总轮廓系数,它的参数为关系图中各顶点间的相似值Weight以及连通子图的标签
from sklearn.metrics import silhouette_score
def compute_silhouette(threshold, friends):
G = create_graph(friends, threshold=threshold)
if len(G.nodes()) < 2:
return -99
sub_graphs = nx.connected_components(G)
if not (2 <= nx.number_connected_components(G) < len(G.nodes()) - 1):
return -99
label_dict = {}
for i, sub_graph in enumerate(sub_graphs):
print(type(sub_graph))
print(sub_graph)
for node in sub_graph: #.nodes():
label_dict[node] = i
labels = np.array([label_dict[node] for node in G.nodes()])
X = nx.to_scipy_sparse_matrix(G)#.todense()
#X = 1 - X
return silhouette_score(X, labels, metric='precomputed')