pyspark 实现PageRank

#!/usr/bin/env python
# -*- coding: utf-8 -*-

from __future__ import print_function
import re
import sys
from operator import add
from pyspark import SparkConf, SparkContext

def compute_contribs(urls, rank):
    """
    给urls计算
    Args:
        urls: 目标url相邻的urls集合
        rank: 目标url的当前rank

    Returns:
        url: 相邻urls中的一个url
        rank: 当前url的新的rank
    """
    num_urls = len(urls)
    for url in urls:
        yield (url, rank / num_urls)


def split_url(url_line):
    """
    把一行url切分开来
    Args:
        url_line: 一行url,如 1 2

    Returns:
        url, neighbor_url
    """
    parts = re.split(r'\s+', url_line) # 正则
    return parts[0], parts[1]


def compute_pagerank(sc, url_data_file, iterations):
    """
    计算各个page的排名
    Args:
        sc: SparkContext
        url_data_file: 测试数据文件
        iterations: 迭代次数

    Returns:
        status: 成功就返回0
    """

    # 读取url文件 ['1 2', '1 3', '2 1', '3 1']
    lines = sc.textFile(url_data_file).map(lambda line: line.encode('utf8'))

    # 建立Pair RDD (url, neighbor_urls) [(1,[2,3]), (2,[1]), (3, [1])]
    links = lines.map(lambda line : split_url(line)).distinct().groupByKey().mapValues(lambda x: list(x)).cache()
    # 初始化所有url的rank为1 [(1, 1), (2, 1), (3, 1)]
    ranks = lines.map(lambda line : (line[0], 1))

    for i in range(iterations):
        # (url, [(neighbor_urls), rank]) join neighbor_urls and rank
        # 把当前url的rank分别contribute到其他相邻的url (url, rank)
        contribs = links.join(ranks).flatMap(
            lambda url_urls_rank: compute_contribs(url_urls_rank[1][0], url_urls_rank[1][1])
            )
        # 把url的所有rank加起来,再赋值新的
        ranks = contribs.reduceByKey(add).mapValues(lambda rank : rank * 0.85 + 0.15)

    for (link, rank) in ranks.collect():
        print("%s has rank %s." % (link, rank))

    return 0

if __name__ == '__main__':
    data_file = "/data/zz/data.txt"
    iterations = 10

    conf = SparkConf().setAppName('PythonPageRank')
    sc = SparkContext(conf=conf)

    ret = compute_pagerank(sc, data_file, iterations)
    sys.exit(ret)
# coding:utf-8
from pyspark import SparkConf, SparkContext


def f(x):
    list1 = []
    s = len(x[1][0])
    for y in x[1][0]:
        list1.append(tuple((y, x[1][1]/s)))
    return list1


if __name__== "__main__":
    list = [('A', ('D',)), ('B', ('A',)), ('C', ('A', 'B')), ('D', ('A', 'C'))]
    conf = SparkConf().setAppName('PythonPageRank').setMaster('local')
    sc = SparkContext(conf=conf)

    pages = sc.parallelize(list).map(lambda x: (x[0],  tuple(x[1]))).partitionBy(4).cache()

    # 初始pr值都设置为1
    links = sc.parallelize(['A', 'B', 'C', 'D']).map(lambda x: (x, 1.0))
    # join会把links和page按k合并,如('A',('D',))和('A',1.0) join之后变成 ('A', ('D',1.0))
    # flatMap调用了f函数,并把结果平铺
    rank = pages.join(links).flatMap(f)
    # reduce
    links = rank.reduceByKey(lambda x, y: x+y)
    # 修正
    links = links.mapValues(lambda x: 0.15+0.85*x)
    links.saveAsTextFile("./pagerank")
    j = pages.collect()

    for i in j:
        print(i)

  • 0
    点赞
  • 4
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值