Reputation: 33
I have a DataFrame which contains one struct field. I want to remove the values which are null from the struct field.
temp_df_struct = Df.withColumn("VIN_COUNTRY_CD",struct('BXSR_VEHICLE_1_VIN_COUNTRY_CD','BXSR_VEHICLE_2_VIN_COUNTRY_CD','BXSR_VEHICLE_3_VIN_COUNTRY_CD','BXSR_VEHICLE_4_VIN_COUNTRY_CD','BXSR_VEHICLE_5_VIN_COUNTRY_CD'))
In these various columns some contains NULLs. Is there any way to remove null from the struct field?
Upvotes: 0
Views: 4470
Reputation: 13581
You should always provide a small reproducible example - but here's my guess as to what you want
data = [("1", "10", "20", None, "30", "40"), ("2", None, "15", "25", "35", None)]
names_of_cols = [
"id",
"BXSR_VEHICLE_1_VIN_COUNTRY_CD",
"BXSR_VEHICLE_2_VIN_COUNTRY_CD",
"BXSR_VEHICLE_3_VIN_COUNTRY_CD",
"BXSR_VEHICLE_4_VIN_COUNTRY_CD",
"BXSR_VEHICLE_5_VIN_COUNTRY_CD",
]
df = spark.createDataFrame(data, names_of_cols)
df.show(truncate=False)
# +---+-----------------------------+-----------------------------+-----------------------------+-----------------------------+-----------------------------+
# | id|BXSR_VEHICLE_1_VIN_COUNTRY_CD|BXSR_VEHICLE_2_VIN_COUNTRY_CD|BXSR_VEHICLE_3_VIN_COUNTRY_CD|BXSR_VEHICLE_4_VIN_COUNTRY_CD|BXSR_VEHICLE_5_VIN_COUNTRY_CD|
# +---+-----------------------------+-----------------------------+-----------------------------+-----------------------------+-----------------------------+
# | 1| 10| 20| null| 30| 40|
# | 2| null| 15| 25| 35| null|
# +---+-----------------------------+-----------------------------+-----------------------------+-----------------------------+-----------------------------+
You want to collect values from multiple columns into an array, such as
import re
from pyspark.sql.functions import col, array
collect_cols = [c for c in df.columns if re.match('BXSR_VEHICLE_\\d_VIN_COUNTRY_CD', c)]
collect_cols
# ['BXSR_VEHICLE_1_VIN_COUNTRY_CD', 'BXSR_VEHICLE_2_VIN_COUNTRY_CD', 'BXSR_VEHICLE_3_VIN_COUNTRY_CD', 'BXSR_VEHICLE_4_VIN_COUNTRY_CD', 'BXSR_VEHICLE_5_VIN_COUNTRY_CD']
(
df.
withColumn(
"VIN_COUNTRY_CD",
array(*collect_cols)
).
select('id', 'VIN_COUNTRY_CD').
show(truncate=False)
)
# +---+-----------------+
# |id |VIN_COUNTRY_CD |
# +---+-----------------+
# |1 |[10, 20,, 30, 40]|
# |2 |[, 15, 25, 35,] |
# +---+-----------------+
And then remove NULLs from the array
from pyspark.sql.functions import array, struct, lit, array_except
(
df.
withColumn(
"VIN_COUNTRY_CD",
array(*collect_cols)
).
withColumn(
'VIN_COUNTRY_CD',
array_except(
col('VIN_COUNTRY_CD'),
array(lit(None).cast('string'))
)
).
select('id', 'VIN_COUNTRY_CD').
show(truncate=False)
)
# +---+----------------+
# |id |VIN_COUNTRY_CD |
# +---+----------------+
# |1 |[10, 20, 30, 40]|
# |2 |[15, 25, 35] |
# +---+----------------+
Upvotes: 3