其他分享
首页 > 其他分享> > GroupByKey

GroupByKey

作者:互联网

package sparkcore

import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

object Demo07GroupByKey {
def main(args: Array[String]): Unit = {
val conf: SparkConf = new SparkConf().setAppName("GroupByKey").setMaster("local")

val sc = new SparkContext(conf)

val linesRDD: RDD[String] = sc.textFile("data/words.txt")
//flatmap将数据进行切分,一行切成多行
val wordsRDD: RDD[String] = linesRDD.flatMap(_.split(","))

// wordsRDD.foreach(println)
//将切好的一个个单词进行map,转换成kv格式
val kvRDD: RDD[(String, Int)] = wordsRDD.map(word => (word, 1))

/**
* groupByKey:通过key进行分组,将value放在迭代器中
* groupBy:指定一个分组的列
*
* 都会产生shuffle
*/
val groupByKeyRDD: RDD[(String, Iterable[Int])] = kvRDD.groupByKey()

val countRDD: RDD[(String, Int)] =groupByKeyRDD.map {
case (word: String, values: Iterable[Int]) =>
(word, values.sum)
}
countRDD.foreach(println)

val groupByRDD: RDD[(String, Iterable[(String, Int)])] = kvRDD.groupBy(kv => kv._1)

groupByRDD.foreach(println)
}

}

标签:wordsRDD,word,String,val,Int,GroupByKey,RDD
来源: https://www.cnblogs.com/xiguabigdata/p/15022271.html