代碼如下,步驟流程在代碼注釋中可見:
# -*- coding: utf-8 -*-import pandas as pdfrom pyspark.sql import SparkSessionfrom pyspark.sql import SQLContextfrom pyspark import SparkContext #初始化數據 #初始化pandas DataFramedf = pd.DataFrame([[1, 2, 3], [4, 5, 6]], index=['row1', 'row2'], columns=['c1', 'c2', 'c3']) #打印數據print df #初始化spark DataFramesc = SparkContext()if __name__ == "__main__": spark = SparkSession/ .builder/ .appName("testDataFrame")/ .getOrCreate() sentenceData = spark.createDataFrame([ (0.0, "I like Spark"), (1.0, "Pandas is useful"), (2.0, "They are coded by Python ")], ["label", "sentence"]) #顯示數據sentenceData.select("label").show() #spark.DataFrame 轉換成 pandas.DataFramesqlContest = SQLContext(sc)spark_df = sqlContest.createDataFrame(df) #顯示數據spark_df.select("c1").show() # pandas.DataFrame 轉換成 spark.DataFramepandas_df = sentenceData.toPandas() #打印數據print pandas_df
程序結果:
以上這篇pyspark.sql.DataFrame與pandas.DataFrame之間的相互轉換實例就是小編分享給大家的全部內容了,希望能給大家一個參考,也希望大家多多支持武林站長站。
新聞熱點
疑難解答