网站目录字典,电商网站设计趋势,自己怎么创建微信小程序,信誉最好的20个网投网站题目要求#xff1a; 抽取ds_db01库中customer_inf的增量数据进入Hive的ods库中表customer_inf。根据ods.user_info表中modified_time作为增量字段#xff0c;只将新增的数据抽入#xff0c;字段名称、类型不变#xff0c;同时添加静态分区#xff0c;分区字段为etl_date 抽取ds_db01库中customer_inf的增量数据进入Hive的ods库中表customer_inf。根据ods.user_info表中modified_time作为增量字段只将新增的数据抽入字段名称、类型不变同时添加静态分区分区字段为etl_date类型为String且值为当前日期的前一天日期分区字段格式为yyyyMMdd。使用hive cli执行show partitions ods.customer_inf命令
代码实现
package org.exampleimport org.apache.spark.SparkConf
import org.apache.spark.sql.SparkSessionimport java.time.LocalDateobject Demo {def main(args: Array[String]): Unit {
// 创建sparkval conf new SparkConf().setMaster(local[*]).setAppName(one).set(spark.testing.memory, 2147480000).set(dfs.client.use.datanode.hostname, true)System.setProperty(HADOOP_USER_NAME, root)// 连接hive
val spark SparkSession.builder()// 配置 Hive Metastore 的连接地址.config(hive.metastore.uris, thrift://192.168.23.60:9083)// 配置 Hive 数据仓库的存储位置.config(hive.metastore.warehouse, hdfs://192.168.23.60://9000/user/hive/warehouse)// 配置 Spark SQL 的存储分配策略为 LEGACY.config(spark.sql.storeAssignmentPolicy, LEGACY)// 添加其他自定义的 Spark 配置.config(conf)// 启用对 Hive 的支持使得可以使用 Hive 的表和查询.enableHiveSupport()// 创建 SparkSession 对象.getOrCreate()//连接mysqlspark.read.format(jdbc).option(url,jdbc:mysql://192.168.23.60:3306/ds_db01??characterEncodingUTF-8).option(driver,com.mysql.jdbc.Driver).option(user,root).option(password,123456).option(dbtable,customer_inf).load().createOrReplaceTempView(v) //对该表创建视图spark.sql(select * from v)// 获取当天时间的前一天
val unit java.time.LocalDate.now().plusYears(-1).plusMonths(-1).plusDays(-1).toString().replace(-, )val unit1 unit.toInt
//全量抽取
// spark.sql(
// s
// |insert overwrite table gh_test.customer_inf
// |partition (etl_date${unit})
// |select * from v
// |
// |.stripMargin).show()
//
//spark.sql(select * from gh_test.customer_inf).show//将modified_time类型转换为yyyyMMddspark.sql(s|select customer_inf_id,customer_id,customer_name,identity_card_type,identity_card_no,mobile_phone,|customer_email,gender,customer_point,register_time,birthday,customer_level,customer_money,|from_unixtime(unix_timestamp(modified_time,yyyy-MM-dd),yyyyMMdd) as modified_time|from v|.stripMargin).createOrReplaceTempView(v1)
// spark.sql(select count(*) from gh_test.customer_inf).show//从mysql中增量抽取到hivespark.sql(s|insert overwrite table gh_test.customer_inf|partition (etl_date${unit})|select * from v where modified_time${unit1}|.stripMargin).show()// spark.sql(select * from gh_test.customer_inf).show// 查询抽取后的条数spark.sql(select count(*) from gh_test.customer_inf).show
// spark.sql(desc gh_test.customer_inf)}
}