前提:
1.SpringBoot 已经接入 Spark
2.已配置 JavaSparkContext
3.已配置 SparkSession
@Resource
private SparkSession sparkSession;
@Resource
private JavaSparkContext javaSparkContext;
●flatMap:过滤数据
●mapToPair:把每个元素都转换成一个类型的对象,如 <123,1>,<456,1>
●reduceByKey:对相同key的数据集进行预聚合
public void testSparkText() {
String file = "D:\\TEMP\\word.txt";
JavaRDD fileRDD = javaSparkContext.textFile(file);
JavaRDD wordsRDD = fileRDD.flatMap(line -> Arrays.asList(line.split(" ")).iterator());
JavaPairRDD wordAndOneRDD = wordsRDD.mapToPair(word -> new Tuple2<>(word, 1));
JavaPairRDD wordAndCountRDD = wordAndOneRDD.reduceByKey((a, b) -> a + b);
//输出结果
List> result = wordAndCountRDD.collect();
result.forEach(System.out::println);
}
public void testSparkCsv() {
String file = "D:\\TEMP\\testcsv.csv";
JavaRDD fileRDD = javaSparkContext.textFile(file);
JavaRDD wordsRDD = fileRDD.flatMap(line -> Arrays.asList(line.split(",")).iterator());
//输出结果
System.out.println(wordsRDD.collect());
}
●option.url:添加 MySQL 连接 url
●option.user:MySQL 用户名
●option.password:MySQL 用户密码
●option.dbtable:sql 语句
●option.driver:数据库 driver,MySQL 使用 com.mysql.cj.jdbc.Driver
public void testSparkMysql() throws IOException {
Dataset jdbcDF = sparkSession.read()
.format("jdbc")
.option("url", "jdbc:mysql://192.168.140.1:3306/user?useUnicode=true&characterEncoding=UTF-8&serverTimezone=Asia/Shanghai")
.option("dbtable", "(SELECT * FROM xxxtable) tmp")
.option("user", "root")
.option("password", "xxxxxxxxxx*k")
.option("driver", "com.mysql.cj.jdbc.Driver")
.load();
jdbcDF.printSchema();
jdbcDF.show();
//转化为RDD
JavaRDD rowJavaRDD = jdbcDF.javaRDD();
System.out.println(rowJavaRDD.collect());
}
List list = rowJavaRDD.collect();
BufferedWriter bw;
bw = new BufferedWriter(new FileWriter("d:/test.txt"));
for (int j = 0; j < list.size(); j++) {
bw.write(list.get(j).toString());
bw.newLine();
bw.flush();
}
bw.close();
[{
"name": "name1",
"age": "1"
}, {
"name": "name2",
"age": "2"
}, {
"name": "name3",
"age": "3"
}, {
"name": "name4",
"age": "4"
}]
●sparkSession.sql:使用 sql 对 t 进行查询,输出 age 大于 3 的数据
public void testSparkJson() {
Dataset df = sparkSession.read().json("D:\\TEMP\\testjson.json");
df.printSchema();
df.createOrReplaceTempView("t");
Dataset row = sparkSession.sql("select age,name from t where age > 3");
JavaRDD rowJavaRDD = row.javaRDD();
System.out.println(rowJavaRDD.collect());
}
public void testSparkCsv() {
String file = "D:\\TEMP\\testcsv.csv";
JavaRDD fileRDD = javaSparkContext.textFile(file);
JavaRDD wordsRDD = fileRDD.flatMap(line -> Arrays.asList(line.split(",")).iterator());
//输出结果
System.out.println(wordsRDD.collect());
}
public void testSparkCsv() {
String file = "D:\\TEMP\\testcsv.csv";
String code = "gbk";
JavaRDD gbkRDD = javaSparkContext.hadoopFile(file, TextInputFormat.class, LongWritable.class, Text.class).map(p -> new String(p._2.getBytes(), 0, p._2.getLength(), code));
JavaRDD gbkWordsRDD = gbkRDD.flatMap(line -> Arrays.asList(line.split(",")).iterator());
//输出结果
System.out.println(gbkWordsRDD.collect());
}
免责申明:
本文系转载,版权归原作者所有,如若侵权请联系我们进行删除!
《数据治理行业实践白皮书》下载地址:https://fs80.cn/4w2atu
《数栈V6.0产品白皮书》下载地址:https://fs80.cn/cw0iw1
想了解或咨询更多有关袋鼠云大数据产品、行业解决方案、客户案例的朋友,浏览袋鼠云官网:https://www.dtstack.com/?src=bbs
同时,欢迎对大数据开源项目有兴趣的同学加入「袋鼠云开源框架钉钉技术群」,交流最新开源技术信息,群号码:30537511,项目地址:https://github.com/DTStack