Professional Documents
Culture Documents
Python For Data Science >>> df = df.dropDuplicates() >>> df.groupBy("age")\ #Group by age, count the members in the groups
.count() \
.show()
>>> df.select("firstName","lastName") \
>>> df.orderBy(["age","city"],ascending=[0,1])\
.show()
.collect()
explode("phoneNumber") \
.alias("contactInfo")) \
.select("contactInfo.type",
> Repartitioning
Spark SQL is Apache Spark's module
"firstName",
"age") \
>>> df.repartition(10)\ #df with 10 partitions
.show()
>>> df.select(df["firstName"],df["age"]+ 1)
#Show all entries in firstName and age,
.getNumPartitions()
.show()
add 1 to the entries of age >>> df.coalesce(1).rdd.getNumPartitions() #df with 1 partition
>>> df.select(df['age'] > 24).show() #Show all entries where age >24
When
>>> df.select("firstName", #Show firstName and 0 or 1 depending
on age >30
>>> q
from pyspark.s l import SparkSession
.otherwise(0)) \
Registering DataFrames as Views
>>> spark = SparkSession \
.show()
d \
.buil er
>>> df.createTempView("customer")
Like
.getOrCreate() Query Views
>>> df.select("firstName", #Show firstName, and lastName is
TRUE if lastName is like Smith
df.lastName.like("Smith")) \
>>> df5 = spark.sql("SELECT * FROM customer").show()
.show()
>>> peopledf2 = spark.sql("SELECT * FROM global_temp.people")\
.show()
df.lastName \
from pyspark.s l.types import >>> df.dtypes #Return df column names and data types
.show()
>>> df.show() #Display the content of df
Infer Schema
b
Su string >>> df.head() #Return first n rows
>>> sc = C
.alias("name")) \
.collect()
>>> people = parts.map(lambda p: Row(name=p[0],age=int(p[1])))
>>> df.count() #Count the number of rows in df
>>> peopledf = spark.createDataFrame(people) >>> df.distinct().count() #Count the number of distinct rows in df
Between
>>> df.printSchema() #Print the schema of df
Specify Schema >>> df.select(df.age.between(22, 24)) \ #Show age: values are TRUE if between
22 and 24
>>> df.explain() #Print the (logical and physical) plans
.show()
>>> people = (
parts.map lamb a p d : Row(name=p[0],
age=int(p[1].strip())))
>>> schemaString =
"name age"
>>> fields = [StructField(field_name, StringType(), True) for
> Output
field_name in schemaString.split()]
>>> df = df.withColumn('city',df.address.city) \
>>> df.toJSON().first() #Convert df into a RDD of string
.withColumn('postalCode',df.address.postalCode) \
>>> df.toPandas() #Return the contents of df as Pandas
DataFrame
From Spark Data Sources .withColumn('state',df.address.state) \
.withColumn('streetAddress',df.address.streetAddress) \
Write & Save to Files
.withColumn('telePhoneNumber', explode(df.phoneNumber.number)) \
>>> df = spark.read.json("customer.json")
.save("nameAndCity.parquet")
.save("namesAndAges.json",format="json")
Removing Columns
>>> df2 = d
spark.rea .loa d("people.json", = j )
format " son" >>> df = df.drop("address", "phoneNumber")
>>> df = df.drop(df.address).drop(df.phoneNumber)
Parquet files
>>> df3 = d
spark.rea .loa d("users.parquet") > Stopping SparkSession
TXT files
>>> df4 = d (
spark.rea .text "people.txt" )
> Missing & Replacing Values >>> spark.stop()
.show()
#Filter entries of age, only keep those records of which the values are >24