项目依赖
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<groupId>org.example</groupId>
<artifactId>spark01</artifactId>
<version>1.0-SNAPSHOT</version>
<!-- 指定仓库位置,依次为aliyun、cloudera和jboss仓库 -->
<repositories>
<repository>
<id>aliyun</id>
<url>http://maven.aliyun.com/nexus/content/groups/public/</url>
</repository>
<repository>
<id>cloudera</id>
<url>https://repository.cloudera.com/artifactory/cloudera-repos/</url>
</repository>
<repository>
<id>jboss</id>
<url>http://repository.jboss.com/nexus/content/groups/public</url>
</repository>
<repository>
<id>scala-tools.org</id>
<name>Scala-tools Maven2 Repository</name>
<url>http://scala-tools.org/repo-releases</url>
</repository>
</repositories>
<pluginRepositories>
<pluginRepository>
<id>scala-tools.org</id>
<name>Scala-tools Maven2 Repository</name>
<url>http://scala-tools.org/repo-releases</url>
</pluginRepository>
</pluginRepositories>
<properties>
<maven.compiler.source>1.8</maven.compiler.source>
<maven.compiler.target>1.8</maven.compiler.target>
<encoding>UTF-8</encoding>
<scala.version>2.11.8</scala.version>
<scala.compat.version>2.11</scala.compat.version>
<hadoop.version>2.7.4</hadoop.version>
<spark.version>2.2.0</spark.version>
</properties>
<dependencies>
<dependency>
<groupId>org.scala-lang</groupId>
<artifactId>scala-library</artifactId>
<version>${scala.version}</version>
</dependency>
<dependency>
<groupId>org.apache.spark</groupId>
<artifactId>spark-core_2.11</artifactId>
<version>${spark.version}</version>
</dependency>
<dependency>
<groupId>org.apache.spark</groupId>
<artifactId>spark-sql_2.11</artifactId>
<version>${spark.version}</version>
</dependency>
<dependency>
<groupId>org.apache.spark</groupId>
<artifactId>spark-hive_2.11</artifactId>
<version>${spark.version}</version>
</dependency>
<dependency>
<groupId>org.apache.spark</groupId>
<artifactId>spark-hive-thriftserver_2.11</artifactId>
<version>${spark.version}</version>
</dependency>
<dependency>
<groupId>org.apache.spark</groupId>
<artifactId>spark-streaming_2.11</artifactId>
<version>${spark.version}</version>
</dependency>
<!-- <dependency>
<groupId>org.apache.spark</groupId>
<artifactId>spark-streaming-kafka-0-8_2.11</artifactId>
<version>${spark.version}</version>
</dependency>-->
<dependency>
<groupId>org.apache.spark</groupId>
<artifactId>spark-streaming-kafka-0-10_2.11</artifactId>
<version>${spark.version}</version>
</dependency>
<dependency>
<groupId>org.apache.spark</groupId>
<artifactId>spark-sql-kafka-0-10_2.11</artifactId>
<version>${spark.version}</version>
</dependency>
<!--<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-client</artifactId>
<version>2.6.0-mr1-cdh5.14.0</version>
</dependency>
<dependency>
<groupId>org.apache.hbase</groupId>
<artifactId>hbase-client</artifactId>
<version>1.2.0-cdh5.14.0</version>
</dependency>
<dependency>
<groupId>org.apache.hbase</groupId>
<artifactId>hbase-server</artifactId>
<version>1.2.0-cdh5.14.0</version>
</dependency>-->
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-client</artifactId>
<version>2.7.4</version>
</dependency>
<dependency>
<groupId>org.apache.hbase</groupId>
<artifactId>hbase-client</artifactId>
<version>1.3.1</version>
</dependency>
<dependency>
<groupId>org.apache.hbase</groupId>
<artifactId>hbase-server</artifactId>
<version>1.3.1</version>
</dependency>
<dependency>
<groupId>com.typesafe</groupId>
<artifactId>config</artifactId>
<version>1.3.3</version>
</dependency>
<dependency>
<groupId>mysql</groupId>
<artifactId>mysql-connector-java</artifactId>
<version>5.1.38</version>
</dependency>
</dependencies>
<build>
<sourceDirectory>src/main/scala</sourceDirectory>
<testSourceDirectory>src/test/scala</testSourceDirectory>
<plugins>
<!-- 指定编译java的插件 -->
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-compiler-plugin</artifactId>
<version>3.5.1</version>
</plugin>
<!-- 指定编译scala的插件 -->
<plugin>
<groupId>net.alchim31.maven</groupId>
<artifactId>scala-maven-plugin</artifactId>
<version>3.2.2</version>
<executions>
<execution>
<goals>
<goal>compile</goal>
<goal>testCompile</goal>
</goals>
<configuration>
<args>
<arg>-dependencyfile</arg>
<arg>${project.build.directory}/.scala_dependencies</arg>
</args>
</configuration>
</execution>
</executions>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-surefire-plugin</artifactId>
<version>2.18.1</version>
<configuration>
<useFile>false</useFile>
<disableXmlReport>true</disableXmlReport>
<includes>
<include>**/*Test.*</include>
<include>**/*Suite.*</include>
</includes>
</configuration>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-shade-plugin</artifactId>
<version>2.3</version>
<executions>
<execution>
<phase>package</phase>
<goals>
<goal>shade</goal>
</goals>
<configuration>
<filters>
<filter>
<artifact>*:*</artifact>
<excludes>
<exclude>META-INF/*.SF</exclude>
<exclude>META-INF/*.DSA</exclude>
<exclude>META-INF/*.RSA</exclude>
</excludes>
</filter>
</filters>
<transformers>
<transformer
implementation="org.apache.maven.plugins.shade.resource.ManifestResourceTransformer">
<mainClass></mainClass>
</transformer>
</transformers>
</configuration>
</execution>
</executions>
</plugin>
</plugins>
</build>
</project>
1. socket 方式读取nc中的访问
import org.apache.spark.sql.streaming.Trigger
import org.apache.spark.sql.{DataFrame, Dataset, Row, SparkSession}
/**
* @author 红尘丶世界
* @version v 1.0
* 运行时需要先在linux执行 nc -lk 9999
*/
object StructStreamingSocket {
def main(args: Array[String]): Unit = {
//创建sparkSession
val spark = SparkSession.builder().master("local[*]").appName("StructStreamingSocket").getOrCreate()
//读取
val df: DataFrame = spark.readStream.format("socket")
.option("host", "hadoop01").option("port", "9999").load()
//根据业务逻辑进行计算
import spark.implicits._
val ds: Dataset[String] = df.as[String]
//对数据进行拆分
val word: Dataset[String] = ds.flatMap(_.split(" "))
//计算单词数量
val wordCount: Dataset[Row] = word.groupBy("value").count().sort($"count")
//输出结果
wordCount.writeStream
.format("console") //输出到控制台
.outputMode("complete") //全部输出
.trigger(trigger = Trigger.ProcessingTime(0))
.start()
.awaitTermination()
}
}
2.读取json
json 文件内容如下:
{"name":"json","age":23,"hobby":"running"}
{"name":"charles","age":32,"hobby":"basketball"}
{"name":"tom","age":28,"hobby":"football"}
{"name":"lili","age":24,"hobby":"running"}
{"name":"bob","age":20,"hobby":"swimming"}
import org.apache.spark.sql.{Dataset, RelationalGroupedDataset, Row, SparkSession}
import org.apache.spark.sql.types.StructType
/**
* @author 红尘丶世界
* @version v 1.0
*/
object StructStreamingJson {
def main(args: Array[String]): Unit = {
//创建sparkSession
val spark: SparkSession = SparkSession.builder().master("local[*]")
.appName("StructStreamingJson")
.getOrCreate()
//设置日志级别
spark.sparkContext.setLogLevel("ERROR")
//读取文件
//创建元数据信息
val structType: StructType = new StructType()
.add("name", "string")
.add("age", "integer")
.add("hobby", "string")
//todo: 读取数据 路径指定为文件夹 ,不能指定为文件
val fileData = spark.readStream.schema(structType).json("D:\\dev\\大数据\\大数据资料\\spark\\input\\json")
//导入隐式转换
import spark.implicits._
//获取数据并进行统计
val hobbyDs: Dataset[Row] = fileData.groupBy("hobby").count().sort($"count".asc)
hobbyDs.writeStream.format("console")
.outputMode("complete")
.start()
.awaitTermination()
}
}
3. 集成kafka
准备工作
在kafka创建一个topic
bin/kafka-topics.sh --zookeeper hadoop01:2181,hadoop02:2181,hadoop03:2181 --create --replication-factor 2 --partitions 2 --topic test01
模拟生产者生产数据
bin/kafka-console-producer.sh --broker-list hadoop01:9092,hadoop02:9092,hadoop03:9092 --topic test01
import org.apache.spark.sql.{DataFrame, SparkSession}
/**
* @author 红尘丶世界
* @version v 1.0
* @date 2020.4.16
*/
object StructStreamingKafka {
def main(args: Array[String]): Unit = {
//创建sparkSession
val spark: SparkSession = SparkSession.builder().master("local[*]").appName("StructStreamingKafka")
.getOrCreate()
//设置日志级别
spark.sparkContext.setLogLevel("ERROR")
//todo : 从kafka 中读取数据 设置kafka 参数
val kafkaData: DataFrame = spark.readStream.format("kafka")
.option("kafka.bootstrap.servers", "hadoop01:9092,hadoop02:9092,hadoop03:9092")
.option("subscribe", "test01").load()
//处理数据
import spark.implicits._
//把数据转化成string类型 的 key,value键值对的形式
val kafkaDataString = kafkaData.selectExpr("CAST(key AS string)", "CAST(value AS string)").as[(String, String)]
val wordCount = kafkaDataString.flatMap(_._2.split(" "))
.groupBy("value")
.count()
.sort($"count".desc)
//输出
wordCount.writeStream.format("console")
.outputMode("complete")
.start()
.awaitTermination()
}
}