Spark SQL支持在Hive中的数据读写,但是Hive中有大量的依赖在Spark中不存在,所以在使用过程中要配置这些依赖。 Configuration of Hive is done by placing your hive-site.xml, core-site.xml (for security configuration), and hdfs-site.xml (for HDFS configuration) file in conf/.
下面是使用Hive的测试:
import org.apache.log4j.{Level, Logger}
import org.apache.spark.SparkConf
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, Dataset, Row, SaveMode, SparkSession}
case class Info(timestamp: String, pro: String, city: String, category: String, ad: String)
object WriteToHDFS {
def main(args: Array[String]): Unit = {
Logger.getLogger("org").setLevel(Level.ERROR)
val conf: SparkConf = new SparkConf().setMaster("local[*]").setAppName("WriteToHDFS")
val spark: SparkSession = SparkSession.builder()
.config(conf)
.config("spark.sql.warehouse.dir", "hdfs://hadoop:9000/user/hive/warehouse")
.enableHiveSupport()
.getOrCreate()
import spark.implicits._
val df: DataFrame = spark.table("ad_info")
val df2: DataFrame = spark.sql("select * from ad_info where cast(ad as int) >= 10")
val df3: DataFrame = df.repartition(5).toDF()
df3.write.mode(SaveMode.Append).saveAsTable("hive_records03")
df3.show()
spark.stop()
}
}
更详细的信息可以参考官网
|