【spark RDD】RDD编程

%spark

/*创建RDD*/
val data0=sc.parallelize(Array(1,2,3,3))


/*************************************对一个数据的RDD进行转换操作************************************************/
/*map*/
val data1=data0.map(x=>x+1)//每个元素加1
data1.collect().foreach(print)
println()


/*flatMap*/
val data2=data0.flatMap(x=>x.to(3))
data2.collect().foreach(print)
println()


/*filter*/
val data3=data0.filter(x=>x!=1)//筛选
data3.collect().foreach(print)
println()


/*distinct*/
val data4=data0.distinct()//去重
data4.collect().foreach(print)
println()


/*************************************对一个数据的RDD进行行动操作************************************************/
/*collect*/
val data5=data0.collect() //返回RDD所有元素
/*count*/
val data6=data0.count() //RDD的元素个数
/*countByValue*/
val data7=data0.countByValue() //各元素在RDD中出现的次数
/*take*/
val data8=data0.take(2) //从RDD返回num个元素
/*top*/
val data9=data0.top(2) //从RDD返回排名num个元素


/*reduce*/
val data10=data0.reduce((x,y)=>(x+y)) //并行整合RDD中所有元素,例如sum


/*fold*/
val data11=data0.fold(0)((x,y)=>(x+y)) //和reduce一样,但是需要提供初始值


/*************************************对2个数据的RDD进行转化操作************************************************/
val aa=sc.parallelize(Array(1,2,3))
val bb=sc.parallelize(Array(3,4,5))


/*union*/
val aa_bb_union=aa.union(bb) //生成一个包含两个RDD中的所有元素的RDD
aa_bb_union.collect().foreach(print)
println()


/*intersection*/
val join=aa.intersection(bb) //求两个RDD共同的元素RDD
join.collect().foreach(println)


/*subtract*/


val quchu=aa.subtract(bb) //移除一个RDD中的内容
quchu.collect().foreach(println)


/*cartesian*/
val cc=aa.cartesian(bb) //与另一个RDD的笛卡尔积
cc.collect().foreach(println)


result:


data0: org.apache.spark.rdd.RDD[Int] = ParallelCollectionRDD[18494] at parallelize at <console>:27
data1: org.apache.spark.rdd.RDD[Int] = MapPartitionsRDD[18495] at map at <console>:31
2344
data2: org.apache.spark.rdd.RDD[Int] = MapPartitionsRDD[18496] at flatMap at <console>:30
1232333
data3: org.apache.spark.rdd.RDD[Int] = MapPartitionsRDD[18497] at filter at <console>:30
233

data4: org.apache.spark.rdd.RDD[Int] = MapPartitionsRDD[18500] at distinct at <console>:30
213
data5: Array[Int] = Array(1, 2, 3, 3)
data6: Long = 4
data7: scala.collection.Map[Int,Long] = Map(2 -> 1, 1 -> 1, 3 -> 2)
data8: Array[Int] = Array(1, 2)
data9: Array[Int] = Array(3, 3)
data10: Int = 9
data11: Int = 9
aa: org.apache.spark.rdd.RDD[Int] = ParallelCollectionRDD[18505] at parallelize at <console>:31
bb: org.apache.spark.rdd.RDD[Int] = ParallelCollectionRDD[18506] at parallelize at <console>:26
aa_bb_union: org.apache.spark.rdd.RDD[Int] = UnionRDD[18507] at union at <console>:32
123345
join: org.apache.spark.rdd.RDD[Int] = MapPartitionsRDD[18513] at intersection at <console>:32
3
quchu: org.apache.spark.rdd.RDD[Int] = MapPartitionsRDD[18517] at subtract at <console>:33
2
1
cc: org.apache.spark.rdd.RDD[(Int, Int)] = CartesianRDD[18518] at cartesian at <console>:32
(1,3)
(1,4)
(1,5)
(2,3)
(3,3)
(2,4)
(2,5)
(3,4)
(3,5)
已标记关键词 清除标记
©️2020 CSDN 皮肤主题: 编程工作室 设计师:CSDN官方博客 返回首页