RDD分析

MapPartitionsRDD分析

package org.apache.spark.day02

import org.apache.spark.rdd.{
   MapPartitionsRDD, RDD}
import org.apache.spark.{
   SparkConf, SparkContext}

/**
 * 对RDD进行操作,本质上是对RDD里面的每一个分区进行操作,一个分区其实就是对应一个迭代器
 * 对RDD进行map操作,就是对RDD里面的每一个分区对应的迭代器进行map操作
 */
object MapPartitionersRddDemo {
   
  def main(args: Array[String]): Unit = {
   
    val conf: SparkConf = new SparkConf().setAppName("data").setMaster("local[*]")
    val sc = new SparkContext(conf)

    val array = Array(1,2,3, 4,5,6, 7,8,9,10)
    val dataRDD: RDD[Int] = sc.parallelize(array, 3)

    val mapRDD: RDD[Int] = dataRDD.map(_ * 10)

    /**
     *   def map[U: ClassTag](f: T => U): RDD[U] = withScope {
            val cleanF = sc.clean(f)
            new MapPartitionsRDD[U, T](this, (context, pid, iter) => iter.map(cleanF))
          }
     */

    val func1 = (e: Int) => e * 10
    val mapPartitionsRdd: MapPartitionsRDD[Int, Int] =
      new MapPartitionsRDD[Int, Int](dataRDD, (_, _, _iterator) => _iterator.map(func1))

    println("map(): ", mapRDD.collect().toBuffer)
    println("MapPartitionsRDD--map: ", mapPartitionsRdd.collect().toBuffer)

    /*--------------------------------------------------------------*/
    val filterRDD: RDD[Int] = dataRDD.filter(_ % 2 == 0)

    val func2 = (e: Int) => e % 2 == 0
    val mapPartitionsRDDFilter: MapPartitionsRDD[Int, Int] =
      new MapPartitionsRDD[Int, Int](dataRDD, (_, _, _iterator) => _iterator.filter(func2))

    println("filterRDD: ", filterRDD.collect().toBuffer)
    println("MapPartitionsRDD--filter: ", mapPartitionsRDDFilter.collect().toBuffer)

    sc.stop()
  }

}

关于Map窄依赖

在这里插入图片描述

package cn.huq.day02

import org.apache.spark.rdd.RDD
import org.apache.spark.{
   SparkConf, SparkContext}

class MapDemo {
   
  def main(args: Array[String]): Unit = {
   
    val sc: SparkContext = new SparkContext(config = new SparkConf().setAppName("map demo").setMaster("local"))

    val dataRDD = sc.textFile(args(0))

    val rdd1: RDD[Int] = dataRDD.map(_.toInt)

    val rdd2: RDD[Int] = rdd1.filter(_ % 2 == 0)

    val rdd3: RDD[Int] = rdd2.map(_ * 10)

    rdd3.saveAsTextFile(args(1))

    sc.stop()
  }

}

  def textFile(
      path: String,
      minPartitions: Int = defaultMinPartitions): RDD[String] = withScope {
   
    assertNotStopped()
    hadoopFile(path, classOf[TextInputFormat], classOf[LongWritable], classOf[Text],
      minPartitions).map(pair => pair._2.toString).setName(path)
  }
  def map[U: ClassTag](f: T => U): RDD[U] = withScope {
   
    val cleanF = sc.clean(f)
    new MapPartitionsRDD[U, T](this, (context, pid, iter) => iter.map(cleanF))
  }
  def filter(f: T => Boolean): RDD[T] = withScope {
   
    val cleanF = sc.clean(f)
    new MapPartitionsRDD[T, T](
      this,
      (context, pid, iter) => iter.filter(cleanF),
      preservesPartitioning = true)
  }
  def saveAsTextFile(path: String): Unit = withScope {
   
    val nullWritableClassTag = implicitly[ClassTag[NullWritable]]
    val textClassTag = implicitly
  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 打赏
    打赏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

TigRer

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值