Ajout variables KPI targets

This commit is contained in:
Antoine JOUBREL 2024-03-23 11:51:18 +00:00
parent 1a0a5a40cf
commit 7a9548f295
3 changed files with 303 additions and 4087 deletions

View File

@ -1,5 +1,8 @@
# Business Data Challenge - Team 1 # Purpose of the script : Construction of training and test datasets for modelling by company
# Input : KPI construction function and clean databases in the 0_Input folder
# Output : Train and test datasets by compagnies
# Packages
import pandas as pd import pandas as pd
import numpy as np import numpy as np
import os import os
@ -9,12 +12,10 @@ import warnings
from datetime import date, timedelta, datetime from datetime import date, timedelta, datetime
from sklearn.model_selection import train_test_split from sklearn.model_selection import train_test_split
# Create filesystem object # Create filesystem object
S3_ENDPOINT_URL = "https://" + os.environ["AWS_S3_ENDPOINT"] S3_ENDPOINT_URL = "https://" + os.environ["AWS_S3_ENDPOINT"]
fs = s3fs.S3FileSystem(client_kwargs={'endpoint_url': S3_ENDPOINT_URL}) fs = s3fs.S3FileSystem(client_kwargs={'endpoint_url': S3_ENDPOINT_URL})
# Import KPI construction functions # Import KPI construction functions
exec(open('0_KPI_functions.py').read()) exec(open('0_KPI_functions.py').read())
@ -24,50 +25,59 @@ warnings.filterwarnings('ignore')
def dataset_construction(min_date, end_features_date, max_date, directory_path): def dataset_construction(min_date, end_features_date, max_date, directory_path):
# Import customerplus # Import of cleaned and merged datasets
df_customerplus_clean_0 = display_input_databases(directory_path, file_name = "customerplus_cleaned") df_customerplus_clean_0 = display_input_databases(directory_path, file_name = "customerplus_cleaned")
df_campaigns_information = display_input_databases(directory_path, file_name = "campaigns_information", datetime_col = ['opened_at', 'sent_at', 'campaign_sent_at']) df_campaigns_information = display_input_databases(directory_path, file_name = "campaigns_information", datetime_col = ['opened_at', 'sent_at', 'campaign_sent_at'])
df_products_purchased_reduced = display_input_databases(directory_path, file_name = "products_purchased_reduced", datetime_col = ['purchase_date']) df_products_purchased_reduced = display_input_databases(directory_path, file_name = "products_purchased_reduced", datetime_col = ['purchase_date'])
df_target_information = display_input_databases(directory_path, file_name = "target_information")
# if directory_path == "101": # Dates in datetime format
# df_products_purchased_reduced_1 = display_databases(directory_path, file_name = "products_purchased_reduced_1", datetime_col = ['purchase_date'])
# df_products_purchased_reduced = pd.concat([df_products_purchased_reduced, df_products_purchased_reduced_1])
# Filtre de cohérence pour la mise en pratique de notre méthode
max_date = pd.to_datetime(max_date, utc = True, format = 'ISO8601') max_date = pd.to_datetime(max_date, utc = True, format = 'ISO8601')
end_features_date = pd.to_datetime(end_features_date, utc = True, format = 'ISO8601') end_features_date = pd.to_datetime(end_features_date, utc = True, format = 'ISO8601')
min_date = pd.to_datetime(min_date, utc = True, format = 'ISO8601') min_date = pd.to_datetime(min_date, utc = True, format = 'ISO8601')
#Filtre de la base df_campaigns_information # Filter for database df_campaigns_information
df_campaigns_information = df_campaigns_information[(df_campaigns_information['sent_at'] <= end_features_date) & (df_campaigns_information['sent_at'] >= min_date)] df_campaigns_information = df_campaigns_information[(df_campaigns_information['sent_at'] <= end_features_date) & (df_campaigns_information['sent_at'] >= min_date)]
df_campaigns_information['opened_at'][df_campaigns_information['opened_at'] >= end_features_date] = np.datetime64('NaT') df_campaigns_information['opened_at'][df_campaigns_information['opened_at'] >= end_features_date] = np.datetime64('NaT')
#Filtre de la base df_products_purchased_reduced # Filter for database df_products_purchased_reduced
df_products_purchased_features = df_products_purchased_reduced[(df_products_purchased_reduced['purchase_date'] <= end_features_date) & (df_products_purchased_reduced['purchase_date'] >= min_date)] df_products_purchased_features = df_products_purchased_reduced[(df_products_purchased_reduced['purchase_date'] <= end_features_date) & (df_products_purchased_reduced['purchase_date'] >= min_date)]
print("Data filtering : SUCCESS") print("Data filtering : SUCCESS")
# Fusion de l'ensemble et creation des KPI # Building and merging features
# KPI sur les campagnes publicitaires # Campaigns features
df_campaigns_kpi = campaigns_kpi_function(campaigns_information = df_campaigns_information, max_date = end_features_date) df_campaigns_kpi = campaigns_kpi_function(campaigns_information = df_campaigns_information, max_date = end_features_date)
# KPI sur le comportement d'achat # Purchasing behavior features
df_tickets_kpi = tickets_kpi_function(tickets_information = df_products_purchased_features) df_tickets_kpi = tickets_kpi_function(tickets_information = df_products_purchased_features)
# KPI sur les données socio-démographiques # Socio-demographic features
df_customerplus_clean = customerplus_kpi_function(customerplus_clean = df_customerplus_clean_0) df_customerplus_clean = customerplus_kpi_function(customerplus_clean = df_customerplus_clean_0)
# Targets features
df_targets_kpi = targets_KPI(df_target = df_target_information)
print("KPIs construction : SUCCESS") print("KPIs construction : SUCCESS")
# Fusion avec KPI liés au customer # Merge - campaigns features
df_customer = pd.merge(df_customerplus_clean, df_campaigns_kpi, on = 'customer_id', how = 'left') df_customer = pd.merge(df_customerplus_clean, df_campaigns_kpi, on = 'customer_id', how = 'left')
# Fill NaN values # Fill NaN values
df_customer[['nb_campaigns', 'nb_campaigns_opened']] = df_customer[['nb_campaigns', 'nb_campaigns_opened']].fillna(0) df_customer[['nb_campaigns', 'nb_campaigns_opened']] = df_customer[['nb_campaigns', 'nb_campaigns_opened']].fillna(0)
# Fusion avec KPI liés au comportement d'achat # Merge - targets features
df_customer_product = pd.merge(df_tickets_kpi, df_customer, on = 'customer_id', how = 'outer') df_customer = pd.merge(df_customer, df_targets_kpi, on = 'customer_id', how = 'left')
# Fill NaN values
df_customer[['nb_targets', 'target_jeune', 'target_optin', 'target_optout', 'target_scolaire', 'target_entreprise', 'target_famille', 'target_newsletter', 'target_abonne']] = df_customer[['nb_targets', 'target_jeune', 'target_optin', 'target_optout', 'target_scolaire', 'target_entreprise', 'target_famille', 'target_newsletter', 'target_abonne']].fillna(0)
# We standardise the number of targets closely linked to the company's operations
df_customer['nb_targets'] = (df_customer['nb_targets'] - (df_customer['nb_targets'].mean())) / (df_customer['nb_targets'].std())
# Merge - purchasing behavior features
df_customer_product = pd.merge(df_customer, df_tickets_kpi, on = 'customer_id', how = 'outer')
# Fill NaN values # Fill NaN values
df_customer_product[['nb_tickets', 'nb_purchases', 'total_amount', 'nb_suppliers', 'vente_internet_max', 'nb_tickets_internet']] = df_customer_product[['nb_tickets', 'nb_purchases', 'total_amount', 'nb_suppliers', 'vente_internet_max', 'nb_tickets_internet']].fillna(0) df_customer_product[['nb_tickets', 'nb_purchases', 'total_amount', 'nb_suppliers', 'vente_internet_max', 'nb_tickets_internet']] = df_customer_product[['nb_tickets', 'nb_purchases', 'total_amount', 'nb_suppliers', 'vente_internet_max', 'nb_tickets_internet']].fillna(0)
@ -84,7 +94,7 @@ def dataset_construction(min_date, end_features_date, max_date, directory_path):
# 2. Construction of the explained variable # 2. Construction of the explained variable
df_products_purchased_to_predict = df_products_purchased_reduced[(df_products_purchased_reduced['purchase_date'] <= max_date) & (df_products_purchased_reduced['purchase_date'] > end_features_date)] df_products_purchased_to_predict = df_products_purchased_reduced[(df_products_purchased_reduced['purchase_date'] <= max_date) & (df_products_purchased_reduced['purchase_date'] > end_features_date)]
# Indicatrice d'achat # Construction of the dependant variable
df_products_purchased_to_predict['y_has_purchased'] = 1 df_products_purchased_to_predict['y_has_purchased'] = 1
y = df_products_purchased_to_predict[['customer_id', 'y_has_purchased']].drop_duplicates() y = df_products_purchased_to_predict[['customer_id', 'y_has_purchased']].drop_duplicates()
@ -103,28 +113,24 @@ def dataset_construction(min_date, end_features_date, max_date, directory_path):
return dataset return dataset
## Exportation ## Exportation
# Sectors
companies = {'musee' : ['1', '2', '3', '4'], # , '101' companies = {'musee' : ['1', '2', '3', '4'], # , '101'
'sport': ['5', '6', '7', '8', '9'], 'sport': ['5', '6', '7', '8', '9'],
'musique' : ['10', '11', '12', '13', '14']} 'musique' : ['10', '11', '12', '13', '14']}
# Choosed sector
type_of_comp = input('Choisissez le type de compagnie : sport ? musique ? musee ?') type_of_comp = input('Choisissez le type de compagnie : sport ? musique ? musee ?')
list_of_comp = companies[type_of_comp] list_of_comp = companies[type_of_comp]
# Dossier d'exportation
# Export folder
BUCKET_OUT = f'projet-bdc2324-team1/Generalization/{type_of_comp}' BUCKET_OUT = f'projet-bdc2324-team1/Generalization/{type_of_comp}'
# Create test dataset and train dataset for sport companies # Dates used for the construction of features and the dependant variable
#start_date, end_of_features, final_date = df_coverage_modelization(list_of_comp, coverage_features = 0.7)
# start_date, end_of_features, final_date = df_coverage_modelization(list_of_comp, coverage_train = 0.7)
start_date = "2021-05-01" start_date = "2021-05-01"
end_of_features = "2022-11-01" end_of_features = "2022-11-01"
final_date = "2023-11-01" final_date = "2023-11-01"
# Anonymous customer to be deleted from the datasets
anonymous_customer = {'1' : '1_1', '2' : '2_12184', '3' : '3_1', '4' : '4_2', '101' : '101_1', anonymous_customer = {'1' : '1_1', '2' : '2_12184', '3' : '3_1', '4' : '4_2', '101' : '101_1',
'5' : '5_191835', '6' : '6_591412', '7' : '7_49632', '8' : '8_1942', '9' : '9_19683', '5' : '5_191835', '6' : '6_591412', '7' : '7_49632', '8' : '8_1942', '9' : '9_19683',
'10' : '10_19521', '11' : '11_36', '12' : '12_1706757', '13' : '13_8422', '14' : '14_6354'} '10' : '10_19521', '11' : '11_36', '12' : '12_1706757', '13' : '13_8422', '14' : '14_6354'}
@ -133,33 +139,23 @@ for company in list_of_comp:
dataset = dataset_construction(min_date = start_date, end_features_date = end_of_features, dataset = dataset_construction(min_date = start_date, end_features_date = end_of_features,
max_date = final_date, directory_path = company) max_date = final_date, directory_path = company)
# On retire le client anonyme # Deletion of the anonymous customer
dataset = dataset[dataset['customer_id'] != anonymous_customer[company]] dataset = dataset[dataset['customer_id'] != anonymous_customer[company]]
# Split between train and test
# #train test set
# np.random.seed(42)
# split_ratio = 0.7
# split_index = int(len(dataset) * split_ratio)
# dataset = dataset.sample(frac=1).reset_index(drop=True)
# dataset_train = dataset.iloc[:split_index]
# dataset_test = dataset.iloc[split_index:]
dataset_train, dataset_test = train_test_split(dataset, test_size=0.3, random_state=42) dataset_train, dataset_test = train_test_split(dataset, test_size=0.3, random_state=42)
# Dataset Test # Dataset Test
# Exportation # Export
FILE_KEY_OUT_S3 = "dataset_test" + company + ".csv" FILE_KEY_OUT_S3 = "dataset_test" + company + ".csv"
FILE_PATH_OUT_S3 = BUCKET_OUT + "/Test_set/" + FILE_KEY_OUT_S3 FILE_PATH_OUT_S3 = BUCKET_OUT + "/Test_set/" + FILE_KEY_OUT_S3
with fs.open(FILE_PATH_OUT_S3, 'w') as file_out: with fs.open(FILE_PATH_OUT_S3, 'w') as file_out:
dataset_test.to_csv(file_out, index = False) dataset_test.to_csv(file_out, index = False)
print("Exportation dataset test : SUCCESS") print("Export of dataset test : SUCCESS")
# Dataset train # Dataset train
# Export # Export
FILE_KEY_OUT_S3 = "dataset_train" + company + ".csv" FILE_KEY_OUT_S3 = "dataset_train" + company + ".csv"
FILE_PATH_OUT_S3 = BUCKET_OUT + "/Train_set/" + FILE_KEY_OUT_S3 FILE_PATH_OUT_S3 = BUCKET_OUT + "/Train_set/" + FILE_KEY_OUT_S3
@ -167,7 +163,7 @@ for company in list_of_comp:
with fs.open(FILE_PATH_OUT_S3, 'w') as file_out: with fs.open(FILE_PATH_OUT_S3, 'w') as file_out:
dataset_train.to_csv(file_out, index = False) dataset_train.to_csv(file_out, index = False)
print("Exportation dataset train : SUCCESS") print("Export of dataset train : SUCCESS")
print("FIN DE LA GENERATION DES DATASETS : SUCCESS") print("End of dataset generation for ", type_of_comp," compagnies : SUCCESS")

View File

@ -44,7 +44,6 @@ def campaigns_kpi_function(campaigns_information = None, max_date = None):
return campaigns_reduced return campaigns_reduced
def tickets_kpi_function(tickets_information = None): def tickets_kpi_function(tickets_information = None):
tickets_information_copy = tickets_information.copy() tickets_information_copy = tickets_information.copy()
@ -128,3 +127,38 @@ def customerplus_kpi_function(customerplus_clean = None):
return customerplus_clean return customerplus_clean
def concatenate_names(names):
return ', '.join(names)
def targets_KPI(df_target = None):
df_target['target_name'] = df_target['target_name'].fillna('').str.lower()
# Target name cotegory musees /
df_target['target_jeune'] = df_target['target_name'].str.contains('|'.join(['jeune', 'pass_culture', 'etudiant', '12-25 ans', 'student', 'jeunesse']), case=False).astype(int)
df_target['target_optin'] = df_target['target_name'].str.contains('|'.join(['optin' ,'opt-in']), case=False).astype(int)
df_target['target_optout'] = df_target['target_name'].str.contains('|'.join(['optout', 'unsubscribed']), case=False).astype(int)
df_target['target_scolaire'] = df_target['target_name'].str.contains('|'.join(['scolaire' , 'enseignant', 'chercheur', 'schulen', 'école']), case=False).astype(int)
df_target['target_entreprise'] = df_target['target_name'].str.contains('|'.join(['b2b', 'btob', 'cse']), case=False).astype(int)
df_target['target_famille'] = df_target['target_name'].str.contains('|'.join(['famille', 'enfants', 'family']), case=False).astype(int)
df_target['target_newsletter'] = df_target['target_name'].str.contains('|'.join(['nl', 'newsletter']), case=False).astype(int)
# Target name category for sport compagnies
df_target['target_abonne'] = ((
df_target['target_name']
.str.contains('|'.join(['abo', 'adh']), case=False)
& ~df_target['target_name'].str.contains('|'.join(['hors abo', 'anciens abo']), case=False)
).astype(int))
df_target_categorie = df_target.groupby('customer_id')[['target_jeune', 'target_optin', 'target_optout', 'target_scolaire', 'target_entreprise', 'target_famille', 'target_newsletter', 'target_abonne']].max()
target_agg = df_target.groupby('customer_id').agg(
nb_targets=('target_name', 'nunique') # Utilisation de tuples pour spécifier les noms de colonnes
# all_targets=('target_name', concatenate_names),
# all_target_types=('target_type_name', concatenate_names)
).reset_index()
target_agg = pd.merge(target_agg, df_target_categorie, how='left', on='customer_id')
return target_agg

File diff suppressed because it is too large Load Diff