2024-02-11 23:55:11 +01:00
|
|
|
# Business Data Challenge - Team 1
|
|
|
|
|
|
|
|
import pandas as pd
|
|
|
|
import numpy as np
|
|
|
|
import os
|
|
|
|
import s3fs
|
|
|
|
import re
|
|
|
|
import warnings
|
2024-02-22 15:57:34 +01:00
|
|
|
from datetime import date, timedelta, datetime
|
2024-02-11 23:55:11 +01:00
|
|
|
|
2024-02-20 23:45:54 +01:00
|
|
|
# Create filesystem object
|
|
|
|
S3_ENDPOINT_URL = "https://" + os.environ["AWS_S3_ENDPOINT"]
|
|
|
|
fs = s3fs.S3FileSystem(client_kwargs={'endpoint_url': S3_ENDPOINT_URL})
|
|
|
|
|
|
|
|
|
2024-02-27 22:01:20 +01:00
|
|
|
# Import KPI construction functions
|
2024-02-20 23:45:54 +01:00
|
|
|
exec(open('0_KPI_functions.py').read())
|
2024-02-12 22:08:35 +01:00
|
|
|
|
2024-02-20 23:45:54 +01:00
|
|
|
# Ignore warning
|
|
|
|
warnings.filterwarnings('ignore')
|
2024-02-12 22:08:35 +01:00
|
|
|
|
2024-02-22 15:57:34 +01:00
|
|
|
|
|
|
|
def display_covering_time(df, company, datecover):
|
|
|
|
"""
|
|
|
|
This function draws the time coverage of each company
|
|
|
|
"""
|
|
|
|
min_date = df['purchase_date'].min().strftime("%Y-%m-%d")
|
|
|
|
max_date = df['purchase_date'].max().strftime("%Y-%m-%d")
|
|
|
|
datecover[company] = [datetime.strptime(min_date, "%Y-%m-%d") + timedelta(days=x) for x in range((datetime.strptime(max_date, "%Y-%m-%d") - datetime.strptime(min_date, "%Y-%m-%d")).days)]
|
|
|
|
print(f'Couverture Company {company} : {min_date} - {max_date}')
|
|
|
|
return datecover
|
|
|
|
|
|
|
|
|
|
|
|
def compute_time_intersection(datecover):
|
|
|
|
"""
|
|
|
|
This function returns the time coverage for all companies
|
|
|
|
"""
|
|
|
|
timestamps_sets = [set(timestamps) for timestamps in datecover.values()]
|
|
|
|
intersection = set.intersection(*timestamps_sets)
|
|
|
|
intersection_list = list(intersection)
|
|
|
|
formated_dates = [dt.strftime("%Y-%m-%d") for dt in intersection_list]
|
|
|
|
return sorted(formated_dates)
|
|
|
|
|
|
|
|
|
2024-03-06 13:42:39 +01:00
|
|
|
def df_coverage_modelization(sport, coverage_features = 0.7):
|
2024-02-22 15:57:34 +01:00
|
|
|
"""
|
|
|
|
This function returns start_date, end_of_features and final dates
|
|
|
|
that help to construct train and test datasets
|
|
|
|
"""
|
|
|
|
datecover = {}
|
|
|
|
for company in sport:
|
|
|
|
df_products_purchased_reduced = display_databases(company, file_name = "products_purchased_reduced",
|
|
|
|
datetime_col = ['purchase_date'])
|
|
|
|
datecover = display_covering_time(df_products_purchased_reduced, company, datecover)
|
|
|
|
#print(datecover.keys())
|
|
|
|
dt_coverage = compute_time_intersection(datecover)
|
|
|
|
start_date = dt_coverage[0]
|
|
|
|
end_of_features = dt_coverage[int(0.7 * len(dt_coverage))]
|
|
|
|
final_date = dt_coverage[-1]
|
|
|
|
return start_date, end_of_features, final_date
|
|
|
|
|
|
|
|
|
2024-02-20 23:45:54 +01:00
|
|
|
def dataset_construction(min_date, end_features_date, max_date, directory_path):
|
|
|
|
|
|
|
|
# Import customerplus
|
2024-02-25 18:31:14 +01:00
|
|
|
df_customerplus_clean_0 = display_databases(directory_path, file_name = "customerplus_cleaned")
|
2024-02-20 23:45:54 +01:00
|
|
|
df_campaigns_information = display_databases(directory_path, file_name = "campaigns_information", datetime_col = ['opened_at', 'sent_at', 'campaign_sent_at'])
|
|
|
|
df_products_purchased_reduced = display_databases(directory_path, file_name = "products_purchased_reduced", datetime_col = ['purchase_date'])
|
2024-03-06 12:49:37 +01:00
|
|
|
|
|
|
|
# if directory_path == "101":
|
|
|
|
# df_products_purchased_reduced_1 = display_databases(directory_path, file_name = "products_purchased_reduced_1", datetime_col = ['purchase_date'])
|
|
|
|
# df_products_purchased_reduced = pd.concat([df_products_purchased_reduced, df_products_purchased_reduced_1])
|
2024-02-20 23:45:54 +01:00
|
|
|
|
2024-02-12 22:08:35 +01:00
|
|
|
# Filtre de cohérence pour la mise en pratique de notre méthode
|
|
|
|
max_date = pd.to_datetime(max_date, utc = True, format = 'ISO8601')
|
2024-02-20 23:45:54 +01:00
|
|
|
end_features_date = pd.to_datetime(end_features_date, utc = True, format = 'ISO8601')
|
2024-02-12 22:08:35 +01:00
|
|
|
min_date = pd.to_datetime(min_date, utc = True, format = 'ISO8601')
|
|
|
|
|
|
|
|
#Filtre de la base df_campaigns_information
|
2024-02-20 23:45:54 +01:00
|
|
|
df_campaigns_information = df_campaigns_information[(df_campaigns_information['sent_at'] <= end_features_date) & (df_campaigns_information['sent_at'] >= min_date)]
|
|
|
|
df_campaigns_information['opened_at'][df_campaigns_information['opened_at'] >= end_features_date] = np.datetime64('NaT')
|
|
|
|
|
2024-02-12 22:08:35 +01:00
|
|
|
#Filtre de la base df_products_purchased_reduced
|
2024-03-06 13:42:39 +01:00
|
|
|
df_products_purchased_features = df_products_purchased_reduced[(df_products_purchased_reduced['purchase_date'] <= end_features_date) & (df_products_purchased_reduced['purchase_date'] >= min_date)]
|
2024-02-12 22:08:35 +01:00
|
|
|
|
|
|
|
print("Data filtering : SUCCESS")
|
|
|
|
|
|
|
|
# Fusion de l'ensemble et creation des KPI
|
2024-02-13 19:32:32 +01:00
|
|
|
|
|
|
|
# KPI sur les campagnes publicitaires
|
2024-02-12 22:08:35 +01:00
|
|
|
df_campaigns_kpi = campaigns_kpi_function(campaigns_information = df_campaigns_information)
|
2024-02-13 19:32:32 +01:00
|
|
|
|
|
|
|
# KPI sur le comportement d'achat
|
2024-03-06 13:42:39 +01:00
|
|
|
df_tickets_kpi = tickets_kpi_function(tickets_information = df_products_purchased_features)
|
2024-02-12 22:08:35 +01:00
|
|
|
|
2024-02-25 18:31:14 +01:00
|
|
|
# KPI sur les données socio-démographiques
|
|
|
|
df_customerplus_clean = customerplus_kpi_function(customerplus_clean = df_customerplus_clean_0)
|
|
|
|
|
2024-02-12 22:08:35 +01:00
|
|
|
print("KPIs construction : SUCCESS")
|
2024-02-13 19:32:32 +01:00
|
|
|
|
2024-02-12 22:08:35 +01:00
|
|
|
# Fusion avec KPI liés au customer
|
|
|
|
df_customer = pd.merge(df_customerplus_clean, df_campaigns_kpi, on = 'customer_id', how = 'left')
|
|
|
|
|
|
|
|
# Fill NaN values
|
|
|
|
df_customer[['nb_campaigns', 'nb_campaigns_opened']] = df_customer[['nb_campaigns', 'nb_campaigns_opened']].fillna(0)
|
|
|
|
|
|
|
|
# Fusion avec KPI liés au comportement d'achat
|
|
|
|
df_customer_product = pd.merge(df_tickets_kpi, df_customer, on = 'customer_id', how = 'outer')
|
|
|
|
|
|
|
|
# Fill NaN values
|
|
|
|
df_customer_product[['nb_tickets', 'nb_purchases', 'total_amount', 'nb_suppliers', 'vente_internet_max', 'nb_tickets_internet']] = df_customer_product[['nb_tickets', 'nb_purchases', 'total_amount', 'nb_suppliers', 'vente_internet_max', 'nb_tickets_internet']].fillna(0)
|
2024-02-11 23:55:11 +01:00
|
|
|
|
2024-02-13 19:32:32 +01:00
|
|
|
print("Explanatory variable construction : SUCCESS")
|
2024-02-11 23:55:11 +01:00
|
|
|
|
2024-02-20 23:45:54 +01:00
|
|
|
# 2. Construction of the explained variable
|
|
|
|
df_products_purchased_to_predict = df_products_purchased_reduced[(df_products_purchased_reduced['purchase_date'] <= max_date) & (df_products_purchased_reduced['purchase_date'] > end_features_date)]
|
2024-02-11 23:55:11 +01:00
|
|
|
|
2024-02-12 22:08:35 +01:00
|
|
|
# Indicatrice d'achat
|
2024-02-20 23:45:54 +01:00
|
|
|
df_products_purchased_to_predict['y_has_purchased'] = 1
|
2024-02-11 23:55:11 +01:00
|
|
|
|
2024-02-20 23:45:54 +01:00
|
|
|
y = df_products_purchased_to_predict[['customer_id', 'y_has_purchased']].drop_duplicates()
|
2024-02-12 22:08:35 +01:00
|
|
|
|
2024-02-13 19:32:32 +01:00
|
|
|
print("Explained variable construction : SUCCESS")
|
|
|
|
|
2024-02-20 23:45:54 +01:00
|
|
|
# 3. Merge between explained and explanatory variables
|
|
|
|
dataset = pd.merge(df_customer_product, y, on = ['customer_id'], how = 'left')
|
|
|
|
|
|
|
|
# 0 if there is no purchase
|
2024-02-29 10:09:04 +01:00
|
|
|
dataset[['y_has_purchased']].fillna(0)
|
|
|
|
|
|
|
|
# add id_company prefix to customer_id
|
|
|
|
dataset['customer_id'] = directory_path + '_' + dataset['customer_id'].astype('str')
|
2024-02-20 23:45:54 +01:00
|
|
|
|
|
|
|
return dataset
|
2024-02-11 23:55:11 +01:00
|
|
|
|
|
|
|
## Exportation
|
|
|
|
|
2024-03-06 12:49:37 +01:00
|
|
|
companies = {'musee' : ['1', '2', '3', '4'], # , '101'
|
2024-02-22 15:57:34 +01:00
|
|
|
'sport': ['5', '6', '7', '8', '9'],
|
|
|
|
'musique' : ['10', '11', '12', '13', '14']}
|
|
|
|
|
|
|
|
type_of_comp = input('Choisissez le type de compagnie : sport ? musique ? musee ?')
|
|
|
|
list_of_comp = companies[type_of_comp]
|
2024-02-12 22:08:35 +01:00
|
|
|
# Dossier d'exportation
|
2024-02-22 15:57:34 +01:00
|
|
|
BUCKET_OUT = f'projet-bdc2324-team1/Generalization/{type_of_comp}'
|
2024-02-12 22:08:35 +01:00
|
|
|
|
2024-02-22 15:57:34 +01:00
|
|
|
# Create test dataset and train dataset for sport companies
|
2024-02-12 22:08:35 +01:00
|
|
|
|
2024-03-07 13:57:21 +01:00
|
|
|
<<<<<<< HEAD
|
2024-03-06 13:42:39 +01:00
|
|
|
#start_date, end_of_features, final_date = df_coverage_modelization(list_of_comp, coverage_features = 0.7)
|
2024-03-07 13:57:21 +01:00
|
|
|
=======
|
2024-03-05 18:46:06 +01:00
|
|
|
# start_date, end_of_features, final_date = df_coverage_modelization(list_of_comp, coverage_train = 0.7)
|
2024-03-07 13:57:21 +01:00
|
|
|
>>>>>>> main
|
2024-03-06 12:49:37 +01:00
|
|
|
start_date = "2021-05-01"
|
|
|
|
end_of_features = "2022-11-01"
|
|
|
|
final_date = "2023-11-01"
|
|
|
|
|
2024-03-07 13:57:21 +01:00
|
|
|
<<<<<<< HEAD
|
2024-03-06 12:49:37 +01:00
|
|
|
anonymous_customer = {'1' : 1_1, '2' : 2_12184, '3' : 3_1, '4' : 4_2, '101' : 101_1,
|
|
|
|
'5' : 5_191835, '6' : 6_591412, '7' : 7_49632, '8' : 8_1942, '9' : 9_19683}
|
2024-03-07 13:57:21 +01:00
|
|
|
=======
|
2024-03-05 20:26:41 +01:00
|
|
|
anonymous_customer = {'1' : 1, '2' : 12184, '3' : 1, '4' : 2, '101' : 1,
|
|
|
|
'5' : 191835, '6' : 591412, '7' : 49632, '8' : 1942, '9' : 19683}
|
2024-03-07 13:57:21 +01:00
|
|
|
>>>>>>> main
|
2024-02-12 22:08:35 +01:00
|
|
|
|
2024-02-22 15:57:34 +01:00
|
|
|
for company in list_of_comp:
|
2024-03-06 12:49:37 +01:00
|
|
|
dataset = dataset_construction(min_date = start_date, end_features_date = end_of_features,
|
2024-02-22 15:57:34 +01:00
|
|
|
max_date = final_date, directory_path = company)
|
2024-03-06 12:49:37 +01:00
|
|
|
|
|
|
|
# On retire le client anonyme
|
|
|
|
dataset = dataset[dataset['customer_id'] != anonymous_customer[company]]
|
2024-02-12 22:08:35 +01:00
|
|
|
|
2024-03-07 13:57:21 +01:00
|
|
|
<<<<<<< HEAD
|
2024-03-06 12:49:37 +01:00
|
|
|
#train test set
|
|
|
|
np.random.seed(42)
|
|
|
|
|
|
|
|
# Dataset Test
|
|
|
|
split_ratio = 0.7
|
|
|
|
split_index = int(len(dataset) * split_ratio)
|
|
|
|
dataset = dataset.sample(frac=1).reset_index(drop=True)
|
|
|
|
dataset_train = dataset.iloc[:split_index]
|
|
|
|
dataset_test = dataset.iloc[split_index:]
|
2024-03-07 13:57:21 +01:00
|
|
|
=======
|
2024-03-05 20:26:41 +01:00
|
|
|
# On retire le client anonyme
|
|
|
|
dataset_test = dataset_test[dataset_test['customer_id'] != anonymous_customer[company]]
|
2024-03-07 13:57:21 +01:00
|
|
|
>>>>>>> main
|
2024-03-06 12:49:37 +01:00
|
|
|
|
2024-02-22 15:57:34 +01:00
|
|
|
# Exportation
|
|
|
|
FILE_KEY_OUT_S3 = "dataset_test" + company + ".csv"
|
2024-02-29 09:33:05 +01:00
|
|
|
FILE_PATH_OUT_S3 = BUCKET_OUT + "/Test_set/" + FILE_KEY_OUT_S3
|
2024-02-22 15:57:34 +01:00
|
|
|
|
|
|
|
with fs.open(FILE_PATH_OUT_S3, 'w') as file_out:
|
|
|
|
dataset_test.to_csv(file_out, index = False)
|
|
|
|
|
|
|
|
print("Exportation dataset test : SUCCESS")
|
2024-02-12 22:08:35 +01:00
|
|
|
|
2024-03-06 12:49:37 +01:00
|
|
|
# Dataset train
|
|
|
|
|
2024-02-22 15:57:34 +01:00
|
|
|
# Export
|
|
|
|
FILE_KEY_OUT_S3 = "dataset_train" + company + ".csv"
|
2024-03-06 12:49:37 +01:00
|
|
|
FILE_PATH_OUT_S3 = BUCKET_OUT + "/Train_set/" + FILE_KEY_OUT_S3
|
2024-02-12 22:08:35 +01:00
|
|
|
|
2024-02-22 15:57:34 +01:00
|
|
|
with fs.open(FILE_PATH_OUT_S3, 'w') as file_out:
|
|
|
|
dataset_train.to_csv(file_out, index = False)
|
|
|
|
|
|
|
|
print("Exportation dataset train : SUCCESS")
|
2024-02-12 22:08:35 +01:00
|
|
|
|
|
|
|
|
2024-02-13 19:32:32 +01:00
|
|
|
print("FIN DE LA GENERATION DES DATASETS : SUCCESS")
|