TRCL
TRCL

Reputation: 35

Get last value in previous partition in pyspark

I have this dataframe:

+------+-------------------+------+----------+------+
|catulz|             hatulz|ccontr|    dmovto|amount|
+------+-------------------+------+----------+------+
|     I|1900-01-01 16:00:00|   123|2022-09-01|300.00|
|     U|1900-01-01 17:00:00|   123|2022-09-02|500.00|
|     I|1900-01-01 16:00:00|   123|2022-09-02|150.00|
|     U|1900-01-01 18:00:00|   123|2022-09-03|500.00|
|     I|1900-01-01 16:00:00|   123|2022-09-03|150.00|
|     I|1900-01-01 16:00:00|   123|2022-09-04|150.00|
|     U|1900-01-01 19:00:00|   123|2022-09-04|150.00|
|     I|1900-01-01 16:00:00|   123|2022-09-05|150.00|
|     I|1900-01-01 16:00:00|   123|2022-09-06|150.00|
|     I|1900-01-01 16:00:00|   123|2022-09-07|150.00|
+------+-------------------+------+----------+------+

I need get the amount with this rules:

Like that:

+------+-------------------+------+----------+------+----------+
|catulz|             hatulz|ccontr|    dmovto|amount|new_amount|
+------+-------------------+------+----------+------+----------+
|     I|1900-01-01 16:00:00|   123|2022-09-01|300.00|  300.00  |
|     U|1900-01-01 17:00:00|   123|2022-09-02|500.00|  300.00  |
|     I|1900-01-01 16:00:00|   123|2022-09-02|150.00|  300.00  |
|     U|1900-01-01 18:00:00|   123|2022-09-03|500.00|  500.00  |
|     I|1900-01-01 16:00:00|   123|2022-09-03|150.00|  500.00  |
|     I|1900-01-01 16:00:00|   123|2022-09-04|150.00|  500.00  |
|     U|1900-01-01 19:00:00|   123|2022-09-04|150.00|  500.00  |
|     I|1900-01-01 16:00:00|   123|2022-09-05|150.00|  150.00  |
|     I|1900-01-01 16:00:00|   123|2022-09-06|150.00|  150.00  |
|     I|1900-01-01 16:00:00|   123|2022-09-07|150.00|  150.00  |
+------+-------------------+------+----------+------+----------+

PS: "U" is from "updated", and it's the priority. If I create a window by "ccontr" + "dmovto" and order by "hatulz", it could work

I tryed create a Window.partitionBy(["ccontr","dmovto"]).orderBy("hatulz") and use lag and last but without success

import pyspark
from pyspark.sql import SparkSession
from pyspark.sql.functions import *
from pyspark.sql.types import StructType,StructField, IntegerType, StringType, DateType, DecimalType, \
    StringType, TimestampType
from datetime import datetime
from decimal import Decimal
from pyspark.sql.window import Window
from pyspark.sql.functions import avg,col

spark = SparkSession.builder.master("local[4]").appName("tests").getOrCreate()

vdata = [
    ('I',datetime.strptime('16:00:00:00',"%H:%M:%S:%f"),123,datetime.strptime('2022-09-01','%Y-%m-%d'),Decimal(300)),
    ('U',datetime.strptime('17:00:00:00',"%H:%M:%S:%f"),123,datetime.strptime('2022-09-02','%Y-%m-%d'),Decimal(500)),
    ('I',datetime.strptime('16:00:00:00',"%H:%M:%S:%f"),123,datetime.strptime('2022-09-02','%Y-%m-%d'),Decimal(150)),
    ('U',datetime.strptime('18:00:00:00',"%H:%M:%S:%f"),123,datetime.strptime('2022-09-03','%Y-%m-%d'),Decimal(500)),
    ('I',datetime.strptime('16:00:00:00',"%H:%M:%S:%f"),123,datetime.strptime('2022-09-03','%Y-%m-%d'),Decimal(150)),
    ('I',datetime.strptime('16:00:00:00',"%H:%M:%S:%f"),123,datetime.strptime('2022-09-04','%Y-%m-%d'),Decimal(150)),
    ('U',datetime.strptime('19:00:00:00',"%H:%M:%S:%f"),123,datetime.strptime('2022-09-04','%Y-%m-%d'),Decimal(150)),
    ('I',datetime.strptime('16:00:00:00',"%H:%M:%S:%f"),123,datetime.strptime('2022-09-05','%Y-%m-%d'),Decimal(150)),
    ('I',datetime.strptime('16:00:00:00',"%H:%M:%S:%f"),123,datetime.strptime('2022-09-06','%Y-%m-%d'),Decimal(150)),
    ('I',datetime.strptime('16:00:00:00',"%H:%M:%S:%f"),123,datetime.strptime('2022-09-07','%Y-%m-%d'),Decimal(150)),
]

schema = StructType([
    StructField("catulz",StringType(),False),
    StructField("hatulz",TimestampType(),False),
    StructField("ccontr",IntegerType(),False),
    StructField("dmovto",DateType(),False),
    StructField("amount",DecimalType(10,2),False)])

df = spark.createDataFrame(vdata,schema)

Upvotes: 2

Views: 707

Answers (2)

Luiz Viola
Luiz Viola

Reputation: 2436

first, create a pivoted df on catulz column, picking the amounts on each pivoted column:

pvt_df = df\
            .groupby('ccontr','dmovto', 'amount')\
            .pivot("catulz")\
            .agg(first("amount"))\
            .select(col('ccontr'),
                    col('dmovto').alias('lag_dmovto'),
                    col('I'),
                    col('U'))\
            .groupby('ccontr','lag_dmovto')\
            .agg(sum("I").alias('I'), sum("U").alias('U'))\
            .orderBy('lag_dmovto')
    
pvt_df.show()

# +------+----------+------+------+
# |ccontr|lag_dmovto|     I|     U|
# +------+----------+------+------+
# |   123|2022-09-01|300.00|  null|
# |   123|2022-09-02|150.00|500.00|
# |   123|2022-09-03|150.00|500.00|
# |   123|2022-09-04|150.00|150.00|
# |   123|2022-09-05|150.00|  null|
# |   123|2022-09-06|150.00|  null|
# |   123|2022-09-07|150.00|  null|
# +------+----------+------+------+

Create a df with the dates and the corresponding previous day (a lag column):

dates_df = df.groupBy('dmovto').agg(max('dmovto'))\
             .withColumn('lag_dmovto', F.lag(F.col('dmovto')).over(Window.partitionBy().orderBy('dmovto')))\
             .select('dmovto', 'lag_dmovto')

Creates count_dmovto column. if the count is = 1 than take the amount from that row. Otherwise:

  • join with the dates_df to find the previous day
  • join with the pivoted df to bring the I and U amounts
  • now that we have all columns, introduce the logic using when and picking the appropriate column
df\
    .withColumn('count_dmovto', count(F.col('dmovto')).over(Window.partitionBy('dmovto')))\
    .join(dates_df, 'dmovto', 'left')\
    .join(pvt_df, ['ccontr','lag_dmovto'], 'left')\
    .withColumn('new_amount', when(col('count_dmovto') == 1, col('amount'))
                             .when(col('U').isNotNull(), col('U'))
                             .otherwise(col('I')))\
    .select('catulz','ccontr','dmovto','amount','new_amount')\
    .show()

# +------+------+----------+------+----------+
# |catulz|ccontr|    dmovto|amount|new_amount|
# +------+------+----------+------+----------+
# |     I|   123|2022-09-01|300.00|    300.00|
# |     I|   123|2022-09-02|150.00|    300.00|
# |     U|   123|2022-09-02|500.00|    300.00|
# |     I|   123|2022-09-03|150.00|    500.00|
# |     U|   123|2022-09-03|500.00|    500.00|
# |     I|   123|2022-09-04|150.00|    500.00|
# |     U|   123|2022-09-04|150.00|    500.00|
# |     I|   123|2022-09-05|150.00|    150.00|
# |     I|   123|2022-09-06|150.00|    150.00|
# |     I|   123|2022-09-07|150.00|    150.00|
# +------+------+----------+------+----------+

Upvotes: 1

Deku07
Deku07

Reputation: 146

-----------
Solution 
--------------

from pyspark.sql.types import StructType,StructField, IntegerType, StringType, DateType, DecimalType, \
    StringType, TimestampType
from datetime import datetime
from decimal import Decimal

vdata = [
    ('I',datetime.strptime('16:00:00:00',"%H:%M:%S:%f"),123,datetime.strptime('2022-09-01','%Y-%m-%d'),Decimal(300)),
    ('U',datetime.strptime('17:00:00:00',"%H:%M:%S:%f"),123,datetime.strptime('2022-09-02','%Y-%m-%d'),Decimal(500)),
    ('I',datetime.strptime('16:00:00:00',"%H:%M:%S:%f"),123,datetime.strptime('2022-09-02','%Y-%m-%d'),Decimal(150)),
    ('U',datetime.strptime('18:00:00:00',"%H:%M:%S:%f"),123,datetime.strptime('2022-09-03','%Y-%m-%d'),Decimal(500)),
    ('I',datetime.strptime('16:00:00:00',"%H:%M:%S:%f"),123,datetime.strptime('2022-09-03','%Y-%m-%d'),Decimal(150)),
    ('I',datetime.strptime('16:00:00:00',"%H:%M:%S:%f"),123,datetime.strptime('2022-09-04','%Y-%m-%d'),Decimal(150)),
    ('U',datetime.strptime('19:00:00:00',"%H:%M:%S:%f"),123,datetime.strptime('2022-09-04','%Y-%m-%d'),Decimal(150)),
    ('I',datetime.strptime('16:00:00:00',"%H:%M:%S:%f"),123,datetime.strptime('2022-09-05','%Y-%m-%d'),Decimal(150)),
    ('I',datetime.strptime('16:00:00:00',"%H:%M:%S:%f"),123,datetime.strptime('2022-09-06','%Y-%m-%d'),Decimal(150)),
    ('I',datetime.strptime('16:00:00:00',"%H:%M:%S:%f"),123,datetime.strptime('2022-09-07','%Y-%m-%d'),Decimal(150)),
]
schema = StructType([
    StructField("catulz",StringType(),False),
    StructField("hatulz",TimestampType(),False),
    StructField("ccontr",IntegerType(),False),
    StructField("dmovto",DateType(),False),
    StructField("amount",DecimalType(10,2),False)])

df = spark.createDataFrame(vdata,schema)


Actual Implementations


Window_spec= Window.partitionBy(["ccontr","dmovto"])
window_previous_partition= Window_spec.orderBy(F.asc("hatulz")).rowsBetween(Window.unboundedPreceding,Window.unboundedFollowing)
previous_partition_value_filter= (F.filter("previous_amount_values_lst",lambda x:x['0']=="U"))
previous_partition_value_cond_ = F.when(F.size("previous_amount_values_flt")==1,F.col("previous_amount_values_flt")).otherwise(F.col("previous_amount_values_lst"))

window_spec_previous_amount_values = Window.partitionBy(["ccontr"]).orderBy(["dmovto"]).rowsBetween(Window.unboundedPreceding,Window.currentRow-1)
dmtvo_distinct_amount_values_cond_ = (F.arrays_zip(F.collect_list("dmovto").over(window_spec_previous_amount_values),F.collect_list(F.col("final_previous_partition_values")[0]['1']).over(window_spec_previous_amount_values)))

final_amount_cond_ = (F.when(F.col("count_rows_per_partition")==2,F.col("max_previous_amount_part").getItem("1")).otherwise(F.col("amount")))

df_fnl=df\
.withColumn("previous_amount_values_lst",
            F.arrays_zip(F.collect_list("catulz").over(window_previous_partition),F.collect_list("amount").over(window_previous_partition)))
df_fnl_flt = df_fnl.withColumn("previous_amount_values_flt",previous_partition_value_filter)\
.withColumn("final_previous_partition_values",previous_partition_value_cond_)\
.withColumn("count_rows_per_partition",F.count("*").over(Window_spec))\
.withColumn("dmtvo_distinct_amount_values",dmtvo_distinct_amount_values_cond_)\
.withColumn("max_previous_amount_part",F.array_max(F.filter("dmtvo_distinct_amount_values",lambda x:x.getItem('0')< F.col("dmovto"))))\
.withColumn("final_amount",final_amount_cond_)\
.drop("previous_amount_values_lst","previous_amount_values_flt",
      "dmtvo_distinct_amount_values","count_rows_per_partition","final_previous_partition_values","max_previous_amount_part")



df_fnl_flt.show(10,0)

-----------
final output 
-----------
+------+-------------------+------+----------+------+------------+
|catulz|hatulz             |ccontr|dmovto    |amount|final_amount|
+------+-------------------+------+----------+------+------------+
|I     |1900-01-01 16:00:00|123   |2022-09-01|300.00|300.00      |
|I     |1900-01-01 16:00:00|123   |2022-09-02|150.00|300.00      |
|U     |1900-01-01 17:00:00|123   |2022-09-02|500.00|300.00      |
|I     |1900-01-01 16:00:00|123   |2022-09-03|150.00|500.00      |
|U     |1900-01-01 18:00:00|123   |2022-09-03|500.00|500.00      |
|I     |1900-01-01 16:00:00|123   |2022-09-04|150.00|500.00      |
|U     |1900-01-01 19:00:00|123   |2022-09-04|150.00|500.00      |
|I     |1900-01-01 16:00:00|123   |2022-09-05|150.00|150.00      |
|I     |1900-01-01 16:00:00|123   |2022-09-06|150.00|150.00      |
|I     |1900-01-01 16:00:00|123   |2022-09-07|150.00|150.00      |
+------+-------------------+------+----------+------+------------+

Kindly upvote if you like my solution .

Upvotes: 1

Related Questions