val spark = SparkSession.builder.master("local").getOrCreate()
val sc = spark.sparkContext
spark.range(1000).toDF("number").show()
val data = Seq(
Row("A", 10, 112233),
Row("B", 20, 223311),
Row("C", 30, 331122))
val schema = StructType(List(
StructField("name", StringType),
StructField("age", IntegerType),
StructField("phone", IntegerType)))
spark.createDataFrame(sc.makeRDD(data), schema).show()
/* data.json
{"name":"A","age":10,"phone":112233}
{"name":"B", "age":20,"phone":223311}
{"name":"C", "age":30,"phone":331122}
*/
spark.read.format("json").load("/Users/tobe/temp2/data.json").show()
/* data.csv
name,age,phone
A,10,112233
B,20,223311
C,30,331122
*/
spark.read.option("header", true).csv("/Users/tobe/temp2/data.csv").show()
/* data.csv
name,age,phone
A,10,112233
B,20,223311
C,30,331122
*/
spark.read.option("header", true).csv("/Users/tobe/temp2/data.csv").show()
/* data.csv
name,age,phone
A,10,112233
B,20,223311
C,30,331122
*/
spark.read.option("header", true).csv("/Users/tobe/temp2/data.csv").show()
Spark的TimestampType类型与Java的java.sql.Timestamp对应,
/* data.csv
name,age,phone
A,10,112233
B,20,223311
C,30,331122
*/
spark.read.option("header", true).csv("/Users/tobe/temp2/data.csv").show()
Spark的DateType类型与Java的java.sql.Date对应,
/* data.csv
name,age,phone
A,10,112233
B,20,223311
C,30,331122
*/
spark.read.option("header", true).csv("/Users/tobe/temp2/data.csv").show()