2024-02-11 23:55:11 +01:00
|
|
|
# Business Data Challenge - Team 1
|
|
|
|
|
|
|
|
import pandas as pd
|
|
|
|
import numpy as np
|
|
|
|
import os
|
|
|
|
import s3fs
|
|
|
|
import re
|
|
|
|
import warnings
|
|
|
|
|
|
|
|
# Import cleaning and merge functions
|
|
|
|
exec(open('BDC-team-1/0_KPI_functions.py').read())
|
2024-02-12 22:08:35 +01:00
|
|
|
## 2 - Construction of KPIs on a given period
|
|
|
|
|
2024-02-14 12:44:18 +01:00
|
|
|
def explanatory_variables(min_date, max_date, df_campaigns_information = df1_campaigns_information, df_products_purchased_reduced = df1_products_purchased_reduced, df_customerplus_clean = df1_customerplus_clean):
|
2024-02-12 22:08:35 +01:00
|
|
|
|
|
|
|
# Filtre de cohérence pour la mise en pratique de notre méthode
|
|
|
|
max_date = pd.to_datetime(max_date, utc = True, format = 'ISO8601')
|
|
|
|
min_date = pd.to_datetime(min_date, utc = True, format = 'ISO8601')
|
|
|
|
|
|
|
|
#Filtre de la base df_campaigns_information
|
|
|
|
df_campaigns_information = df_campaigns_information[(df_campaigns_information['sent_at'] <= max_date) & (df_campaigns_information['sent_at'] >= min_date)]
|
|
|
|
df_campaigns_information['opened_at'][df_campaigns_information['opened_at'] >= max_date] = np.datetime64('NaT')
|
|
|
|
|
|
|
|
#Filtre de la base df_products_purchased_reduced
|
|
|
|
df_products_purchased_reduced = df_products_purchased_reduced[(df_products_purchased_reduced['purchase_date'] <= max_date) & (df_products_purchased_reduced['purchase_date'] >= min_date)]
|
|
|
|
|
|
|
|
print("Data filtering : SUCCESS")
|
|
|
|
|
|
|
|
# Fusion de l'ensemble et creation des KPI
|
2024-02-13 19:32:32 +01:00
|
|
|
|
|
|
|
# KPI sur les campagnes publicitaires
|
2024-02-12 22:08:35 +01:00
|
|
|
df_campaigns_kpi = campaigns_kpi_function(campaigns_information = df_campaigns_information)
|
2024-02-13 19:32:32 +01:00
|
|
|
|
|
|
|
# KPI sur le comportement d'achat
|
2024-02-12 22:08:35 +01:00
|
|
|
df_tickets_kpi = tickets_kpi_function(tickets_information = df_products_purchased_reduced)
|
|
|
|
|
2024-02-13 19:32:32 +01:00
|
|
|
# KPI sur les données socio-demographique
|
|
|
|
|
|
|
|
## Le genre
|
|
|
|
df_customerplus_clean["gender_label"] = df_customerplus_clean["gender"].map({
|
|
|
|
0: 'female',
|
|
|
|
1: 'male',
|
|
|
|
2: 'other'
|
|
|
|
})
|
|
|
|
gender_dummies = pd.get_dummies(df_customerplus_clean["gender_label"], prefix='gender').astype(int)
|
|
|
|
df_customerplus_clean = pd.concat([df_customerplus_clean, gender_dummies], axis=1)
|
|
|
|
|
|
|
|
## Indicatrice si individue vit en France
|
|
|
|
df_customerplus_clean["country_fr"] = df_customerplus_clean["country"].apply(lambda x : int(x=="fr") if pd.notna(x) else np.nan)
|
|
|
|
|
2024-02-12 22:08:35 +01:00
|
|
|
print("KPIs construction : SUCCESS")
|
2024-02-13 19:32:32 +01:00
|
|
|
|
2024-02-12 22:08:35 +01:00
|
|
|
# Fusion avec KPI liés au customer
|
|
|
|
df_customer = pd.merge(df_customerplus_clean, df_campaigns_kpi, on = 'customer_id', how = 'left')
|
|
|
|
|
|
|
|
# Fill NaN values
|
|
|
|
df_customer[['nb_campaigns', 'nb_campaigns_opened']] = df_customer[['nb_campaigns', 'nb_campaigns_opened']].fillna(0)
|
|
|
|
|
|
|
|
# Fusion avec KPI liés au comportement d'achat
|
|
|
|
df_customer_product = pd.merge(df_tickets_kpi, df_customer, on = 'customer_id', how = 'outer')
|
|
|
|
|
|
|
|
# Fill NaN values
|
|
|
|
df_customer_product[['nb_tickets', 'nb_purchases', 'total_amount', 'nb_suppliers', 'vente_internet_max', 'nb_tickets_internet']] = df_customer_product[['nb_tickets', 'nb_purchases', 'total_amount', 'nb_suppliers', 'vente_internet_max', 'nb_tickets_internet']].fillna(0)
|
2024-02-11 23:55:11 +01:00
|
|
|
|
2024-02-13 19:32:32 +01:00
|
|
|
print("Explanatory variable construction : SUCCESS")
|
|
|
|
|
2024-02-12 22:08:35 +01:00
|
|
|
return df_customer_product
|
2024-02-11 23:55:11 +01:00
|
|
|
|
2024-02-12 22:08:35 +01:00
|
|
|
# Fonction pour créer les variables expliquée
|
2024-02-14 12:44:18 +01:00
|
|
|
def explained_variable(min_date, max_date, df_products_purchased_reduced = df1_products_purchased_reduced):
|
2024-02-11 23:55:11 +01:00
|
|
|
|
2024-02-12 22:08:35 +01:00
|
|
|
# Filtrer la base d'achat
|
|
|
|
df_products_purchased_reduced = df_products_purchased_reduced[(df_products_purchased_reduced['purchase_date'] <= max_date) & (df_products_purchased_reduced['purchase_date'] > min_date)]
|
2024-02-11 23:55:11 +01:00
|
|
|
|
2024-02-12 22:08:35 +01:00
|
|
|
# Indicatrice d'achat
|
|
|
|
df_products_purchased_reduced['y_has_purchased'] = 1
|
2024-02-11 23:55:11 +01:00
|
|
|
|
2024-02-12 22:08:35 +01:00
|
|
|
y = df_products_purchased_reduced[['customer_id', 'event_type_id', 'y_has_purchased']].drop_duplicates()
|
|
|
|
|
2024-02-13 19:32:32 +01:00
|
|
|
print("Explained variable construction : SUCCESS")
|
|
|
|
|
2024-02-12 22:08:35 +01:00
|
|
|
return y
|
2024-02-11 23:55:11 +01:00
|
|
|
|
|
|
|
## Exportation
|
|
|
|
|
2024-02-12 22:08:35 +01:00
|
|
|
# Dossier d'exportation
|
|
|
|
BUCKET_OUT = "projet-bdc2324-team1/1_Output/Logistique Regression databases - First approach"
|
|
|
|
|
2024-02-13 19:32:32 +01:00
|
|
|
# Dataset test
|
2024-02-12 22:08:35 +01:00
|
|
|
X_test = explanatory_variables(min_date = "2021-08-01", max_date = "2023-08-01", df_campaigns_information = df1_campaigns_information, df_products_purchased_reduced = df1_products_purchased_reduced, df_customerplus_clean = df1_customerplus_clean)
|
|
|
|
|
|
|
|
y_test = explained_variable(min_date = "2023-08-01", max_date = "2023-11-01", df_products_purchased_reduced = df1_products_purchased_reduced)
|
|
|
|
|
|
|
|
dataset_test = pd.merge(X_test, y_test, on = ['customer_id', 'event_type_id'], how = 'left')
|
|
|
|
|
2024-02-13 19:32:32 +01:00
|
|
|
# Exportation
|
2024-02-12 22:08:35 +01:00
|
|
|
FILE_KEY_OUT_S3 = "dataset_test.csv"
|
|
|
|
FILE_PATH_OUT_S3 = BUCKET_OUT + "/" + FILE_KEY_OUT_S3
|
|
|
|
|
|
|
|
with fs.open(FILE_PATH_OUT_S3, 'w') as file_out:
|
|
|
|
dataset_test.to_csv(file_out, index = False)
|
|
|
|
|
|
|
|
print("Exportation dataset test : SUCCESS")
|
|
|
|
|
2024-02-13 19:32:32 +01:00
|
|
|
# Dataset train
|
2024-02-12 22:08:35 +01:00
|
|
|
X_train = explanatory_variables(min_date = "2021-05-01", max_date = "2023-05-01", df_campaigns_information = df1_campaigns_information, df_products_purchased_reduced = df1_products_purchased_reduced, df_customerplus_clean = df1_customerplus_clean)
|
|
|
|
|
|
|
|
y_train = explained_variable(min_date = "2023-05-01", max_date = "2023-08-01", df_products_purchased_reduced = df1_products_purchased_reduced)
|
|
|
|
|
|
|
|
dataset_train = pd.merge(X_train, y_train, on = ['customer_id', 'event_type_id'], how = 'left')
|
|
|
|
|
2024-02-13 19:32:32 +01:00
|
|
|
# Exportation
|
2024-02-12 22:08:35 +01:00
|
|
|
FILE_KEY_OUT_S3 = "dataset_train.csv"
|
2024-02-11 23:55:11 +01:00
|
|
|
FILE_PATH_OUT_S3 = BUCKET_OUT + "/" + FILE_KEY_OUT_S3
|
|
|
|
|
|
|
|
with fs.open(FILE_PATH_OUT_S3, 'w') as file_out:
|
2024-02-14 12:44:18 +01:00
|
|
|
dataset_train.to_csv(file_out, index = False)
|
2024-02-12 22:08:35 +01:00
|
|
|
|
|
|
|
print("Exportation dataset train : SUCCESS")
|
|
|
|
|
|
|
|
|
2024-02-13 19:32:32 +01:00
|
|
|
print("FIN DE LA GENERATION DES DATASETS : SUCCESS")
|