BDC-team-1/0_2_Dataset_construction.py

129 lines
5.3 KiB
Python
Raw Normal View History

# Business Data Challenge - Team 1
import pandas as pd
import numpy as np
import os
import s3fs
import re
import warnings
2024-02-20 23:45:54 +01:00
# Create filesystem object
S3_ENDPOINT_URL = "https://" + os.environ["AWS_S3_ENDPOINT"]
fs = s3fs.S3FileSystem(client_kwargs={'endpoint_url': S3_ENDPOINT_URL})
# Import cleaning and merge functions
2024-02-20 23:45:54 +01:00
exec(open('0_KPI_functions.py').read())
2024-02-20 23:45:54 +01:00
# Ignore warning
warnings.filterwarnings('ignore')
2024-02-20 23:45:54 +01:00
def dataset_construction(min_date, end_features_date, max_date, directory_path):
# Import customerplus
df_customerplus_clean = display_databases(directory_path, file_name = "customerplus_cleaned")
df_campaigns_information = display_databases(directory_path, file_name = "campaigns_information", datetime_col = ['opened_at', 'sent_at', 'campaign_sent_at'])
df_products_purchased_reduced = display_databases(directory_path, file_name = "products_purchased_reduced", datetime_col = ['purchase_date'])
# Filtre de cohérence pour la mise en pratique de notre méthode
max_date = pd.to_datetime(max_date, utc = True, format = 'ISO8601')
2024-02-20 23:45:54 +01:00
end_features_date = pd.to_datetime(end_features_date, utc = True, format = 'ISO8601')
min_date = pd.to_datetime(min_date, utc = True, format = 'ISO8601')
#Filtre de la base df_campaigns_information
2024-02-20 23:45:54 +01:00
df_campaigns_information = df_campaigns_information[(df_campaigns_information['sent_at'] <= end_features_date) & (df_campaigns_information['sent_at'] >= min_date)]
df_campaigns_information['opened_at'][df_campaigns_information['opened_at'] >= end_features_date] = np.datetime64('NaT')
#Filtre de la base df_products_purchased_reduced
2024-02-20 23:45:54 +01:00
df_products_purchased_reduced = df_products_purchased_reduced[(df_products_purchased_reduced['purchase_date'] <= end_features_date) & (df_products_purchased_reduced['purchase_date'] >= min_date)]
print("Data filtering : SUCCESS")
# Fusion de l'ensemble et creation des KPI
2024-02-13 19:32:32 +01:00
# KPI sur les campagnes publicitaires
df_campaigns_kpi = campaigns_kpi_function(campaigns_information = df_campaigns_information)
2024-02-13 19:32:32 +01:00
# KPI sur le comportement d'achat
df_tickets_kpi = tickets_kpi_function(tickets_information = df_products_purchased_reduced)
2024-02-13 19:32:32 +01:00
# KPI sur les données socio-demographique
## Le genre
df_customerplus_clean["gender_label"] = df_customerplus_clean["gender"].map({
0: 'female',
1: 'male',
2: 'other'
})
gender_dummies = pd.get_dummies(df_customerplus_clean["gender_label"], prefix='gender').astype(int)
df_customerplus_clean = pd.concat([df_customerplus_clean, gender_dummies], axis=1)
## Indicatrice si individue vit en France
df_customerplus_clean["country_fr"] = df_customerplus_clean["country"].apply(lambda x : int(x=="fr") if pd.notna(x) else np.nan)
print("KPIs construction : SUCCESS")
2024-02-13 19:32:32 +01:00
# Fusion avec KPI liés au customer
df_customer = pd.merge(df_customerplus_clean, df_campaigns_kpi, on = 'customer_id', how = 'left')
# Fill NaN values
df_customer[['nb_campaigns', 'nb_campaigns_opened']] = df_customer[['nb_campaigns', 'nb_campaigns_opened']].fillna(0)
# Fusion avec KPI liés au comportement d'achat
df_customer_product = pd.merge(df_tickets_kpi, df_customer, on = 'customer_id', how = 'outer')
# Fill NaN values
df_customer_product[['nb_tickets', 'nb_purchases', 'total_amount', 'nb_suppliers', 'vente_internet_max', 'nb_tickets_internet']] = df_customer_product[['nb_tickets', 'nb_purchases', 'total_amount', 'nb_suppliers', 'vente_internet_max', 'nb_tickets_internet']].fillna(0)
2024-02-13 19:32:32 +01:00
print("Explanatory variable construction : SUCCESS")
2024-02-20 23:45:54 +01:00
# 2. Construction of the explained variable
df_products_purchased_to_predict = df_products_purchased_reduced[(df_products_purchased_reduced['purchase_date'] <= max_date) & (df_products_purchased_reduced['purchase_date'] > end_features_date)]
# Indicatrice d'achat
2024-02-20 23:45:54 +01:00
df_products_purchased_to_predict['y_has_purchased'] = 1
2024-02-20 23:45:54 +01:00
y = df_products_purchased_to_predict[['customer_id', 'y_has_purchased']].drop_duplicates()
2024-02-13 19:32:32 +01:00
print("Explained variable construction : SUCCESS")
2024-02-20 23:45:54 +01:00
# 3. Merge between explained and explanatory variables
dataset = pd.merge(df_customer_product, y, on = ['customer_id'], how = 'left')
# 0 if there is no purchase
dataset[['y_has_purchased']].fillna(0)
return dataset
## Exportation
# Dossier d'exportation
2024-02-21 23:08:33 +01:00
BUCKET_OUT = "projet-bdc2324-team1/2_Output/Logistique Regression databases - First approach"
2024-02-13 19:32:32 +01:00
# Dataset test
2024-02-20 23:45:54 +01:00
dataset_test = dataset_construction(min_date = "2021-08-01", end_features_date = "2023-08-01", max_date = "2023-11-01", directory_path = "1")
2024-02-21 23:08:33 +01:00
# Exportation
FILE_KEY_OUT_S3 = "dataset_test.csv"
FILE_PATH_OUT_S3 = BUCKET_OUT + "/" + FILE_KEY_OUT_S3
2024-02-21 23:08:33 +01:00
with fs.open(FILE_PATH_OUT_S3, 'w') as file_out:
dataset_test.to_csv(file_out, index = False)
2024-02-21 23:08:33 +01:00
print("Exportation dataset test : SUCCESS")
2024-02-13 19:32:32 +01:00
# Dataset train
2024-02-20 23:45:54 +01:00
dataset_train = dataset_construction(min_date = "2021-05-01", end_features_date = "2023-05-01", max_date = "2023-08-01", directory_path = "1")
2024-02-21 23:08:33 +01:00
# Export
FILE_KEY_OUT_S3 = "dataset_train.csv"
FILE_PATH_OUT_S3 = BUCKET_OUT + "/" + FILE_KEY_OUT_S3
2024-02-21 23:08:33 +01:00
with fs.open(FILE_PATH_OUT_S3, 'w') as file_out:
dataset_train.to_csv(file_out, index = False)
2024-02-21 23:08:33 +01:00
print("Exportation dataset train : SUCCESS")
2024-02-13 19:32:32 +01:00
print("FIN DE LA GENERATION DES DATASETS : SUCCESS")