SCALA实现:
package com.lm.spark.sql import org.apache.spark.sql.SQLContext import org.apache.spark.{SparkConf, SparkContext} object RDD2DataFrame { case class Person(id: Int, name: String, age: Int) def main(args: Array[String]) { val conf=new SparkConf().setAppName("RDDTODATAFRAME").setMaster("local") val sc=new SparkContext(conf) val sqlcontext=new SQLContext(sc) import sqlcontext.implicits._ val lines = sc.textFile("resources/person.txt") val df = lines.map(_.split(",")).map { splited => Person(splited(0).trim().toInt, splited(1), splited(2).trim().toInt) }.toDF() df.registerTempTable("persons") val bigDatas = sqlcontext.sql("select * from persons where age >= 6") val personList = bigDatas.javaRDD.collect() for (p <- personList.toArray) { println(p) } sc.stop() } }
注意:
case class Person(id: Int, name: String, age: Int)
要定义在main函数之外。
package org.lm.spark.sql;
import java.util.List;
import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.api.java.function.Function;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Row;
import org.apache.spark.sql.SQLContext;
public class RDD2DataFrameByReflection {
@SuppressWarnings("deprecation")
public static void main(String[] args) {
// TODO Auto-generated method stub
SparkConf conf=new SparkConf().setAppName("RDD2DATAFRAME").setMaster("local");
JavaSparkContext sc=new JavaSparkContext(conf);
SQLContext sqlcontext=new SQLContext(sc);
JavaRDD<String> lines=sc.textFile("D:\\workspace\\SparkApps\\resources\\person.txt");
JavaRDD<Person> persons=lines.map(new Function<String,Person>(){
/**
*
*/
private static final long serialVersionUID = 1L;
@Override
public Person call(String line) throws Exception {
String[] splited=line.split(",");
Person p=new Person();
p.setId(Integer.valueOf(splited[0].trim()));
p.setName(splited[1].trim());
p.setAge(Integer.valueOf(splited[2].trim()));
return p;
}
});
Dataset<Row> df=sqlcontext.createDataFrame(persons, Person.class);
df.registerTempTable("persons");
Dataset<Row> bigdatas=sqlcontext.sql("select * from persons where age>=6");
JavaRDD<Row> bigdataRDD=bigdatas.javaRDD();
/*
List<Row> resultrdd=bigdataRDD.collect();
for (Row r:resultrdd) {
System.out.println(r);
}
*/
JavaRDD<Person> result=bigdataRDD.map(new Function<Row,Person>(){
private static final long serialVersionUID = 1L;
@Override
public Person call(Row row) throws Exception {
Person p=new Person();
p.setId(row.getInt(1));
p.setName(row.getString(2));
p.setAge(row.getInt(0));
return p;
}});
List<Person> personlist=result.collect();
for(Person p:personlist) {
System.out.println(p);
}
sc.stop();
}
}
Person类要定义为public,存放到独立的文件中
package org.lm.spark.sql;
import java.io.Serializable;
public class Person implements Serializable{
/**
*
*/
private static final long serialVersionUID = 1L;
private int id;
private String name;
private int age;
public int getId() {
return id;
}
public void setId(int id) {
this.id = id;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public int getAge() {
return age;
}
public void setAge(int age) {
this.age = age;
}
@Override
public String toString() {
return "Person [id=" + id + ", name=" + name + ", age=" + age + "]";
}
}