Spark 实例

1. parquet

object testSparkReadParquet {
    def main(args: Array[String]): Unit = {
        var spark = SparkSession.builder().appName("TestSparkSession").master("local").getOrCreate()
        
        val df = spark.read.parquet("D:\\tools\\testSparkFile\\users.parquet");
        
        df.printSchema()
        
        df.select("name","favorite_color","favorite_numbers").show()
        
         df.select("name","favorite_color").write.mode("overwrite").save("D:\\tools\\testSparkFile\\namesAndFavColors.parquet")
         
         val df2 = spark.read.parquet("D:\\tools\\testSparkFile\\namesAndFavColors.parquet");
        df2.printSchema()
    }
}
object TestParquet {
    def main(args: Array[String]): Unit = {
       val spark = SparkSession
                  .builder()
                  .appName("Java Spark SQL basic example")
                  .config("spark.some.config.option", "some-value")
                  .master("local")
                  .getOrCreate();
        import spark.implicits._
        val squaresDF = spark.sparkContext.makeRDD(1 to 5).map(i => (i, i * i)).toDF("value", "square")
        squaresDF.write.mode("append").parquet("D:\\tools\\testSparkFile\\test\\key=1")
        
        val cubesDF = spark.sparkContext.makeRDD(6 to 10).map(i => (i, i * i * i)).toDF("value", "cube")
        cubesDF.write.mode("append").parquet("D:\\tools\\testSparkFile\\test\\key=2")
        
        // Read the partitioned table
        val mergedDF = spark.read.option("mergeSchema", "true").parquet("D:\\tools\\testSparkFile\\test\\")
        mergedDF.select("value", "square","key").show()
        mergedDF.printSchema()
    }
}

2. DataFrame

object DFExample {
    case class Student(id:Int, name:String, phone:String, email:String, age:Int)
    def main(args: Array[String]): Unit = {
        val spark = SparkSession.builder().appName("DFExample").master("local").getOrCreate()
        import spark.implicits._
        val studentDF = spark.sparkContext.textFile("D:\\tools\\testSparkFile\\dfTestFile.txt").map(_.split("\\|"))
        .map(line =>Student(line(0).trim().toInt, line(1).trim(), line(2).trim(),line(3).trim(),line(4).trim().toInt)).toDF()
        
        val studentDF2 = studentDF
        
        studentDF.show()
        
        //过滤掉名字为空的数据
        //studentDF.filter("name !='' OR name != 'NULL' " ).show()  //没起作用
        studentDF.filter("name !='NULL'" ).filter("name !=''").show()  
        
        //找出名字以l开头的人
        studentDF.filter("substr(name,0,1)='l'").show()
        
//        spark.sql("show functions").show(2000)
        import org.apache.spark.sql.functions._
        import spark.implicits._
        
        //按照名字排序操作
        studentDF.sort("name").show()
        studentDF.sort(studentDF.col("name").desc).show()
        
        //队列进行重新命名
        studentDF.select(studentDF.col("name").as("student_name")).show()
        
        //join操作
        studentDF.join(studentDF2,studentDF.col("id") === studentDF2.col("id")).sort(studentDF.col("id")).show()
    }
}

猜你喜欢

转载自www.cnblogs.com/redhat0019/p/11423811.html