spark模式匹配问题点

import org.apache.spark.{SparkConf, SparkContext}

object hello {

  def main(args: Array[String]): Unit = {

    val conf = new SparkConf()
      .setAppName("WordFreq_Spark")
      .setMaster("local")
    val sc = new SparkContext(conf)
   /val rdddata =sc.parallelize(Array(("201800001",83,1),("201900002",38,2),("201900003",90,3)),3)
    //val rdddata[String,String,String]=sc.textFile("D:\\\\杂七杂八\\\\瞎画\\\\test.csv")             //问题点:为什么文件形式不行
    val rdddata02=rdddata.mapPartitions(iter => {
      var result=List[String]()
      while(iter.hasNext){
        result=iter.next() match{
          case (id,grade,num) if num <= 10 =>id + "_对应成绩为_" +grade :: result
          case _=>result
        }
      }
      result.iterator
    }
    )
    rdddata02.foreach(println)
  }
}

猜你喜欢

转载自www.cnblogs.com/tangsonghuai/p/12005818.html