分享

Spark Python API函数学习:pyspark API(3) – 过往记忆

 dazheng 2015-11-05

histogram

spark histogram
01# histogram (example #1)
02x = sc.parallelize([1,3,1,2,3])
03y = x.histogram(buckets = 2)
04print(x.collect())
05print(y)
06 
07[1, 3, 1, 2, 3]
08([1, 2, 3], [2, 3])
09 
10# histogram (example #2)
11x = sc.parallelize([1,3,1,2,3])
12y = x.histogram([0,0.5,1,1.5,2,2.5,3,3.5])
13print(x.collect())
14print(y)
15 
16[1, 3, 1, 2, 3]
17([0, 0.5, 1, 1.5, 2, 2.5, 3, 3.5], [0, 0, 2, 0, 1, 0, 2])

mean

spark mean
1# mean
2x = sc.parallelize([1,3,2])
3y = x.mean()
4print(x.collect())
5print(y)
6 
7[1, 3, 2]
82.0

variance

spark variance
1# variance
2x = sc.parallelize([1,3,2])
3y = x.variance()  # divides by N
4print(x.collect())
5print(y)
6[1, 3, 2]
70.666666666667

stdev

spark stdev
1# stdev
2x = sc.parallelize([1,3,2])
3y = x.stdev()  # divides by N
4print(x.collect())
5print(y)
6 
7[1, 3, 2]
80.816496580928

sampleStdev

spark sampleStdev
1# sampleStdev
2x = sc.parallelize([1,3,2])
3y = x.sampleStdev() # divides by N-1
4print(x.collect())
5print(y)
6[1, 3, 2]
71.0

sampleVariance

spark sampleVariance
1# sampleVariance
2x = sc.parallelize([1,3,2])
3y = x.sampleVariance()  # divides by N-1
4print(x.collect())
5print(y)
6 
7[1, 3, 2]
81.0

countByValue

spark countByValue
1# countByValue
2x = sc.parallelize([1,3,1,2,3])
3y = x.countByValue()
4print(x.collect())
5print(y)
6 
7[1, 3, 1, 2, 3]
8defaultdict(<type 'int'>, {1: 2, 2: 1, 3: 2})

top

spark top
1# top
2x = sc.parallelize([1,3,1,2,3])
3y = x.top(num = 3)
4print(x.collect())
5print(y)
6 
7[1, 3, 1, 2, 3]
8[3, 3, 2]

takeOrdered

spark takeOrdered
1# takeOrdered
2x = sc.parallelize([1,3,1,2,3])
3y = x.takeOrdered(num = 3)
4print(x.collect())
5print(y)
6 
7[1, 3, 1, 2, 3]
8[1, 1, 2]

take

spark take
1# take
2x = sc.parallelize([1,3,1,2,3])
3y = x.take(num = 3)
4print(x.collect())
5print(y)
6 
7[1, 3, 1, 2, 3]
8[1, 3, 1]

first

spark first
1# first
2x = sc.parallelize([1,3,1,2,3])
3y = x.first()
4print(x.collect())
5print(y)
6 
7[1, 3, 1, 2, 3]
81

collectAsMap

spark collectAsMap
1# collectAsMap
2x = sc.parallelize([('C',3),('A',1),('B',2)])
3y = x.collectAsMap()
4print(x.collect())
5print(y)
6 
7[('C', 3), ('A', 1), ('B', 2)]
8{'A': 1, 'C': 3, 'B': 2}

keys

spark keys
1# keys
2x = sc.parallelize([('C',3),('A',1),('B',2)])
3y = x.keys()
4print(x.collect())
5print(y.collect())
6 
7[('C', 3), ('A', 1), ('B', 2)]
8['C', 'A', 'B']

values

spark values
1# values
2x = sc.parallelize([('C',3),('A',1),('B',2)])
3y = x.values()
4print(x.collect())
5print(y.collect())
6 
7[('C', 3), ('A', 1), ('B', 2)]
8[3, 1, 2]

reduceByKey

spark reduceByKey
1# reduceByKey
2x = sc.parallelize([('B',1),('B',2),('A',3),('A',4),('A',5)])
3y = x.reduceByKey(lambda agg, obj: agg + obj)
4print(x.collect())
5print(y.collect())
6 
7[('B', 1), ('B', 2), ('A', 3), ('A', 4), ('A', 5)]
8[('A', 12), ('B', 3)]

reduceByKeyLocally

spark reduceByKeyLocally
1# reduceByKeyLocally
2x = sc.parallelize([('B',1),('B',2),('A',3),('A',4),('A',5)])
3y = x.reduceByKeyLocally(lambda agg, obj: agg + obj)
4print(x.collect())
5print(y)
6 
7[('B', 1), ('B', 2), ('A', 3), ('A', 4), ('A', 5)]
8{'A': 12, 'B': 3}

    本站是提供个人知识管理的网络存储空间,所有内容均由用户发布,不代表本站观点。请注意甄别内容中的联系方式、诱导购买等信息,谨防诈骗。如发现有害或侵权内容,请点击一键举报。
    转藏 分享 献花(0

    0条评论

    发表

    请遵守用户 评论公约

    类似文章 更多