本文使用postgresql数据库,使用mysql等其他数据库时,需要更改为如下内容,同时重写提交到mysql时使用的参数也应该做对应的修改,更新采用删除再添加的方式实现:
#连接mysql数据库
jdbcDF = spark.read.format("jdbc")\
.option("driver","com.mysql.jdbc.Driver")\
.option("url","jdbc:mysql://***.***.**.***:3306/Gadaite")\
.option("dbtable","RedditNews")\
.option("user","root")\
.option("password","*******")\
.load()
代码如下:
未解决的问题为:sparksql操作临时表进行插入和更新报错:
规避方式:将spark的dataframe转为pandas的dataframe
#该单元报错,待解决,使用方式二从spark的dataframe转为pandas的dataframe处理
# if count==0:
# str_insert = """insert into temptable (imsi,col1,col2,col3,col4) values(%s,%s,%s,%s,%s)"""%(imsi,col1,col2,col3,col4)
# print(str_insert)
# outputDF = spark.sql(str_insert)
# outputDF.show()
主要实现功能:
1.假定imsi为主键
2.在sql原始表中,没有出现该imsi,则将数据插入到sql表中
3.在sql原始表中,已经包含该imsi,则将数据更新替换原sql表中imsi所在行的数据
完整代码如下:(IP以及password已用***隐藏)
#%%
from pandas.core.dtypes.common import is_numeric_v_string_like
from pandas.io.parsers import count_empty_vals
from pyspark import SparkConf,SparkContext
from pyspark.sql import SparkSession
spark = SparkSession.builder.config(conf=SparkConf()).getOrCreate()
# %%
import pandas as pd
print(pd.__version__)
#%%
#连接postgresql数据库
jdbcDF = spark.read.format("jdbc")\
.option("driver","org.postgresql.Driver")\
.option("url","jdbc:postgresql://***.***.**.***:5432/trajectory")\
.option("dbtable","newtable")\
.option("user","postgres")\
.option("password","*******")\
.load()
jdbcDF.show()
#java.lang.ClassNotFoundException: org.postgresql.Driver---找不到jar包
# %%
dict_pd = {'imsi':[103],'col1':[112.1],'col2':[113.1],'col3':[114.1],'col4':[115.1]}
pd_df = pd.DataFrame(dict_pd)
print(pd_df)
# %%
inputDF = spark.createDataFrame(pd_df)
inputDF.show()
#%%
for row in inputDF.collect():
imsi = row[0]
col1 = row[1]
col2 = row[2]
col3 = row[3]
col4 = row[4]
print(imsi,col1,col2,col3,col4)
#%%
jdbcDF.createOrReplaceTempView("temptable")
spark.sql("""
select * from temptable
""").show()
#%%
#判断主键是否已经存在表中,不存在则进行插入,存在则需要更新
# spark.sql("""select count(imsi) from temptable where imsi=101""")
str_count = "select count(imsi) from temptable where imsi=%s"%(imsi)
print(str_count,type(str_count))
for row in spark.sql(str_count).collect():
count = row[0]
print(count,type(count))
#%%
#原表不存在的情况,对数据进行插入
#该单元报错,待解决,使用方式二从spark的dataframe转为pandas的dataframe处理
# if count==0:
# str_insert = """insert into temptable (imsi,col1,col2,col3,col4) values(%s,%s,%s,%s,%s)"""%(imsi,col1,col2,col3,col4)
# print(str_insert)
# outputDF = spark.sql(str_insert)
# outputDF.show()
#%%
#使用pandas的dataframe
convert_pandasDF = jdbcDF.toPandas()
print(convert_pandasDF)
str_insert_dict = [{'imsi':imsi,'col1':col1,'col2':col2,\
'col3':col3,'col4':col4}]
if count==0:
print("No duplicate IMSI!!")
convert_pandasDF = convert_pandasDF.append(str_insert_dict,ignore_index=False)
print(convert_pandasDF)
outputDF = spark.createDataFrame(convert_pandasDF)
outputDF.show()
if count!=0:
print("Duplicate IMSI!!")
convert_pandasDF = convert_pandasDF[(convert_pandasDF.imsi!=imsi)]
convert_pandasDF = convert_pandasDF.append(str_insert_dict,ignore_index=False)
print(convert_pandasDF)
outputDF = spark.createDataFrame(convert_pandasDF)
outputDF.show()
#%%
#数据写入postgresql数据库
prop = {}
prop['user'] = 'postgres'
prop['password'] = '*******'
prop['driver'] = "org.postgresql.Driver"
outputDF.write.jdbc("jdbc:postgresql://***.***.**.***:5432/trajectory",'newtable','overwrite', prop)
# %%