Compare commits
	
		
			No commits in common. "main" and "generalization" have entirely different histories.
		
	
	
		
			main
			...
			generaliza
		
	
		
							
								
								
									
										40
									
								
								0_6_Segmentation.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										40
									
								
								0_6_Segmentation.py
									
									
									
									
									
										Normal file
									
								
							| 
						 | 
				
			
			@ -0,0 +1,40 @@
 | 
			
		|||
import pandas as pd
 | 
			
		||||
import numpy as np
 | 
			
		||||
import os
 | 
			
		||||
import io
 | 
			
		||||
import s3fs
 | 
			
		||||
import re
 | 
			
		||||
import pickle
 | 
			
		||||
import warnings
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
exec(open('utils_segmentation.py').read())
 | 
			
		||||
warnings.filterwarnings('ignore')
 | 
			
		||||
 | 
			
		||||
# Create filesystem object
 | 
			
		||||
S3_ENDPOINT_URL = "https://" + os.environ["AWS_S3_ENDPOINT"]
 | 
			
		||||
fs = s3fs.S3FileSystem(client_kwargs={'endpoint_url': S3_ENDPOINT_URL})
 | 
			
		||||
 | 
			
		||||
# choose the type of companies for which you want to run the pipeline
 | 
			
		||||
type_of_activity = input('Choisissez le type de compagnie : sport ? musique ? musee ?')
 | 
			
		||||
 | 
			
		||||
# load test set
 | 
			
		||||
dataset_test = load_test_file(type_of_activity)
 | 
			
		||||
 | 
			
		||||
# Load Model 
 | 
			
		||||
model = load_model(type_of_activity, 'LogisticRegression_Benchmark')
 | 
			
		||||
 | 
			
		||||
# Processing
 | 
			
		||||
X_test = dataset_test[['nb_tickets', 'nb_purchases', 'total_amount', 'nb_suppliers', 'vente_internet_max', 'purchase_date_min', 'purchase_date_max', 
 | 
			
		||||
            'time_between_purchase', 'nb_tickets_internet',  'is_email_true', 'opt_in', #'is_partner',
 | 
			
		||||
            'gender_female', 'gender_male', 'gender_other', 'nb_campaigns', 'nb_campaigns_opened']]
 | 
			
		||||
 | 
			
		||||
y_test = dataset_test[['y_has_purchased']]
 | 
			
		||||
 | 
			
		||||
# Prediction
 | 
			
		||||
y_pred_prob = model.predict_proba(X_test)[:, 1]
 | 
			
		||||
 | 
			
		||||
# Add probability to dataset_test
 | 
			
		||||
dataset_test['Probability_to_buy'] = y_pred_prob
 | 
			
		||||
print('probability added to dataset_test')
 | 
			
		||||
print(dataset_test.head())
 | 
			
		||||
							
								
								
									
										99
									
								
								0_6_segmentation_V2TP.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										99
									
								
								0_6_segmentation_V2TP.py
									
									
									
									
									
										Normal file
									
								
							| 
						 | 
				
			
			@ -0,0 +1,99 @@
 | 
			
		|||
### importations ###
 | 
			
		||||
### not necesary ?? As we exec the utils .py file associated 
 | 
			
		||||
 | 
			
		||||
"""
 | 
			
		||||
import pandas as pd
 | 
			
		||||
import numpy as np
 | 
			
		||||
import os
 | 
			
		||||
import io
 | 
			
		||||
import s3fs
 | 
			
		||||
import re
 | 
			
		||||
import pickle
 | 
			
		||||
import warnings
 | 
			
		||||
import matplotlib.pyplot as plt
 | 
			
		||||
"""
 | 
			
		||||
 | 
			
		||||
### --- beginning of the code --- ###
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
### hyperparameters of the code ###
 | 
			
		||||
 | 
			
		||||
###################################
 | 
			
		||||
 | 
			
		||||
# choose the type of companies for which you want to run the pipeline
 | 
			
		||||
type_of_activity = input('Choisissez le type de compagnie : sport ? musique ? musee ?')
 | 
			
		||||
 | 
			
		||||
# choose the model we use for the segmentation
 | 
			
		||||
model_name = "LogisticRegression_Benchmark"
 | 
			
		||||
 | 
			
		||||
###################################
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# execute file including functions we need
 | 
			
		||||
exec(open('utils_segmentation_V2TP.py').read())
 | 
			
		||||
 | 
			
		||||
warnings.filterwarnings('ignore')
 | 
			
		||||
 | 
			
		||||
# Create filesystem object
 | 
			
		||||
S3_ENDPOINT_URL = "https://" + os.environ["AWS_S3_ENDPOINT"]
 | 
			
		||||
fs = s3fs.S3FileSystem(client_kwargs={'endpoint_url': S3_ENDPOINT_URL})
 | 
			
		||||
 | 
			
		||||
# load test set
 | 
			
		||||
dataset_test = load_test_file(type_of_activity)
 | 
			
		||||
 | 
			
		||||
# Load Model 
 | 
			
		||||
model = load_model(type_of_activity, model_name)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
### Preprocessing of data
 | 
			
		||||
X_test = dataset_test[['nb_tickets', 'nb_purchases', 'total_amount', 'nb_suppliers', 'vente_internet_max', 'purchase_date_min', 'purchase_date_max', 
 | 
			
		||||
            'time_between_purchase', 'nb_tickets_internet',  'is_email_true', 'opt_in', #'is_partner',
 | 
			
		||||
            'gender_female', 'gender_male', 'gender_other', 'nb_campaigns', 'nb_campaigns_opened', 'country_fr']]
 | 
			
		||||
 | 
			
		||||
y_test = dataset_test[['y_has_purchased']]
 | 
			
		||||
 | 
			
		||||
X_test_segment = X_test
 | 
			
		||||
 | 
			
		||||
# add y_has_purchased to X_test
 | 
			
		||||
X_test_segment["has_purchased"] = y_test
 | 
			
		||||
 | 
			
		||||
# Add prediction and probability to dataset_test
 | 
			
		||||
y_pred = model.predict(X_test)
 | 
			
		||||
X_test_segment["has_purchased_estim"] = y_pred
 | 
			
		||||
 | 
			
		||||
y_pred_prob = model.predict_proba(X_test)[:, 1]
 | 
			
		||||
X_test_segment['score'] = y_pred_prob
 | 
			
		||||
 | 
			
		||||
X_test_segment["segment"] = np.where(X_test_segment['score']<0.25, '1',
 | 
			
		||||
                   np.where(X_test_segment['score']<0.5, '2',
 | 
			
		||||
                   np.where(X_test_segment['score']<0.75, '3', '4')))
 | 
			
		||||
 | 
			
		||||
### 1. business KPIs 
 | 
			
		||||
 | 
			
		||||
business_var = ["nb_tickets", "nb_purchases", "total_amount", "nb_campaigns"]
 | 
			
		||||
X_test_business_fig = df_business_fig(X_test_segment, "segment", business_var)
 | 
			
		||||
 | 
			
		||||
# save histogram to Minio
 | 
			
		||||
hist_segment_business_KPIs(X_test_business_fig, "segment", "size", "nb_tickets", 
 | 
			
		||||
                           "nb_purchases", "total_amount", "nb_campaigns")
 | 
			
		||||
save_file_s3_mp(File_name = "segments_business_KPI_", type_of_activity = type_of_activity)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
### 2. description of marketing personae (spider chart)
 | 
			
		||||
 | 
			
		||||
# table summarizing variables relative to marketing personae
 | 
			
		||||
X_test_segment_mp = df_segment_mp(X_test_segment, "segment", "gender_female", 
 | 
			
		||||
                                  "gender_male", "gender_other", "country_fr")
 | 
			
		||||
 | 
			
		||||
# table relative to purchasing behaviour
 | 
			
		||||
X_test_segment_pb = df_segment_pb(X_test_segment, "segment", "nb_tickets_internet", "nb_tickets", 
 | 
			
		||||
                                  "nb_campaigns_opened", "nb_campaigns", "opt_in")
 | 
			
		||||
 | 
			
		||||
# concatenation of tables to prepare the plot
 | 
			
		||||
X_test_segment_caract = pd.concat([X_test_segment_pb, X_test_segment_mp[['share_known_gender', 'share_of_women', 'country_fr']]], axis=1)
 | 
			
		||||
 | 
			
		||||
# visualization and save the graphic to the MinIo
 | 
			
		||||
categories = list(X_test_segment_caract.drop("segment", axis=1).columns)
 | 
			
		||||
radar_mp_plot_all(df=X_test_segment_caract, categories=categories)
 | 
			
		||||
save_file_s3_mp(File_name = "spider_chart_all_", type_of_activity = type_of_activity)
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -19,16 +19,15 @@ S3_ENDPOINT_URL = "https://" + os.environ["AWS_S3_ENDPOINT"]
 | 
			
		|||
fs = s3fs.S3FileSystem(client_kwargs={'endpoint_url': S3_ENDPOINT_URL})
 | 
			
		||||
 | 
			
		||||
# importation of functions defined
 | 
			
		||||
exec(open('utils_sales_forecast.py').read())
 | 
			
		||||
exec(open('utils_CA_segment.py').read())
 | 
			
		||||
# from utils_CA_segment import *
 | 
			
		||||
 | 
			
		||||
# define type of activity 
 | 
			
		||||
type_of_activity = input('Choisissez le type de compagnie : sport ? musique ? musee ?')
 | 
			
		||||
PATH = f"projet-bdc2324-team1/2_Output/2_3_Sales_Forecast/{type_of_activity}/"
 | 
			
		||||
PATH = f"projet-bdc2324-team1/Output_expected_CA/{type_of_activity}/"
 | 
			
		||||
 | 
			
		||||
# type of model for the score
 | 
			
		||||
type_of_model = "LogisticRegression_cv"
 | 
			
		||||
# type_of_model = "LogisticRegression_Benchmark"
 | 
			
		||||
 | 
			
		||||
# load train and test sets
 | 
			
		||||
dataset_train, dataset_test = load_train_test(type_of_activity)
 | 
			
		||||
| 
						 | 
				
			
			@ -69,10 +68,6 @@ save_file_s3_ca("hist_score_adjusted_", type_of_activity)
 | 
			
		|||
X_test_table_adjusted_scores = (100 * X_test_segment.groupby("quartile")[["score","score_adjusted", "has_purchased"]].mean()).round(2).reset_index()
 | 
			
		||||
X_test_table_adjusted_scores = X_test_table_adjusted_scores.rename(columns = {col : f"{col} (%)" for col in X_test_table_adjusted_scores.columns if col in ["score","score_adjusted", "has_purchased"]})
 | 
			
		||||
 | 
			
		||||
print("Table of scores :\n")
 | 
			
		||||
print(X_test_table_adjusted_scores)
 | 
			
		||||
print("\n")
 | 
			
		||||
 | 
			
		||||
# save table
 | 
			
		||||
file_name = "table_adjusted_score_"
 | 
			
		||||
FILE_PATH_OUT_S3 = PATH + file_name +  type_of_activity + ".csv"
 | 
			
		||||
| 
						 | 
				
			
			@ -86,24 +81,14 @@ X_test_segment = project_tickets_CA (X_test_segment, "nb_purchases", "nb_tickets
 | 
			
		|||
 | 
			
		||||
 | 
			
		||||
### 3. table summarizing projections (nb tickets, revenue)
 | 
			
		||||
"""
 | 
			
		||||
X_test_expected_CA = round(summary_expected_CA(df=X_test_segment, segment="quartile", 
 | 
			
		||||
                    nb_tickets_expected="nb_tickets_expected", total_amount_expected="total_amount_expected", 
 | 
			
		||||
                    total_amount="total_amount", pace_purchase="pace_purchase"),2)
 | 
			
		||||
                    """
 | 
			
		||||
 | 
			
		||||
X_test_expected_CA = round(summary_expected_CA(df=X_test_segment, segment="quartile", 
 | 
			
		||||
                    nb_tickets_expected="nb_tickets_expected", total_amount_expected="total_amount_expected", 
 | 
			
		||||
                    total_amount="total_amount_corrected", pace_purchase="pace_purchase"),2)
 | 
			
		||||
 | 
			
		||||
# rename columns
 | 
			
		||||
mapping_dict = {col: col.replace("perct", "(%)").replace("_", " ") for col in X_test_expected_CA.columns}
 | 
			
		||||
X_test_expected_CA = X_test_expected_CA.rename(columns=mapping_dict)
 | 
			
		||||
 | 
			
		||||
print("Summary of forecast :\n")
 | 
			
		||||
print(X_test_expected_CA)
 | 
			
		||||
print("\n")
 | 
			
		||||
 | 
			
		||||
# save table
 | 
			
		||||
file_name = "table_expected_CA_"
 | 
			
		||||
FILE_PATH_OUT_S3 = PATH + file_name +  type_of_activity + ".csv"
 | 
			
		||||
| 
						 | 
				
			
			@ -14,7 +14,7 @@ fs = s3fs.S3FileSystem(client_kwargs={'endpoint_url': S3_ENDPOINT_URL})
 | 
			
		|||
 | 
			
		||||
 | 
			
		||||
# Import KPI construction functions
 | 
			
		||||
exec(open('utils_features_construction.py').read())
 | 
			
		||||
exec(open('0_KPI_functions.py').read())
 | 
			
		||||
 | 
			
		||||
# Ignore warning
 | 
			
		||||
warnings.filterwarnings('ignore')
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -5,12 +5,6 @@ import io
 | 
			
		|||
import s3fs
 | 
			
		||||
import re
 | 
			
		||||
import warnings
 | 
			
		||||
from datetime import date, timedelta, datetime
 | 
			
		||||
import matplotlib.pyplot as plt
 | 
			
		||||
import matplotlib.dates as mdates
 | 
			
		||||
import seaborn as sns
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# Ignore warning
 | 
			
		||||
warnings.filterwarnings('ignore')
 | 
			
		||||
| 
						 | 
				
			
			@ -27,56 +21,54 @@ companies = {'musee' : ['1', '2', '3', '4'], # , '101'
 | 
			
		|||
            'musique' : ['10', '11', '12', '13', '14']}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# type_of_activity = input('Choisissez le type de compagnie : sport ? musique ? musee ?')
 | 
			
		||||
for type_of_activity in ['musee', 'sport', 'musique'] :
 | 
			
		||||
type_of_activity = input('Choisissez le type de compagnie : sport ? musique ? musee ?')
 | 
			
		||||
list_of_comp = companies[type_of_activity] 
 | 
			
		||||
 | 
			
		||||
    list_of_comp = companies[type_of_activity] 
 | 
			
		||||
# Load files
 | 
			
		||||
customer, campaigns_kpi, campaigns_brut, tickets, products, targets = load_files(list_of_comp)
 | 
			
		||||
 | 
			
		||||
    # Load files
 | 
			
		||||
    customer, campaigns_kpi, campaigns_brut, tickets, products, targets = load_files(list_of_comp)
 | 
			
		||||
# Identify anonymous customer for each company and remove them from our datasets
 | 
			
		||||
outlier_list = outlier_detection(tickets, list_of_comp)
 | 
			
		||||
 | 
			
		||||
    # Identify anonymous customer for each company and remove them from our datasets
 | 
			
		||||
    outlier_list = outlier_detection(tickets, list_of_comp)
 | 
			
		||||
# Identify valid customer (customer who bought tickets after starting date or received mails after starting date)
 | 
			
		||||
customer_valid_list = valid_customer_detection(products, campaigns_brut)
 | 
			
		||||
 | 
			
		||||
    # Identify valid customer (customer who bought tickets after starting date or received mails after starting date)
 | 
			
		||||
    customer_valid_list = valid_customer_detection(products, campaigns_brut)
 | 
			
		||||
databases = [customer, campaigns_kpi, campaigns_brut, tickets, products]
 | 
			
		||||
 | 
			
		||||
    databases = [customer, campaigns_kpi, campaigns_brut, tickets, products]
 | 
			
		||||
    
 | 
			
		||||
    for dataset in databases:
 | 
			
		||||
for dataset in databases:
 | 
			
		||||
    dataset['customer_id'] = dataset['customer_id'].apply(lambda x: remove_elements(x, outlier_list))# remove outlier
 | 
			
		||||
    dataset = dataset[dataset['customer_id'].isin(customer_valid_list)] # keep only valid customer
 | 
			
		||||
    #print(f'shape of {dataset} : ', dataset.shape)
 | 
			
		||||
 | 
			
		||||
    # Identify customer who bought during the period of y
 | 
			
		||||
    customer_target_period = identify_purchase_during_target_periode(products)
 | 
			
		||||
    customer['has_purchased_target_period'] = np.where(customer['customer_id'].isin(customer_target_period), 1, 0)
 | 
			
		||||
# Identify customer who bought during the period of y
 | 
			
		||||
customer_target_period = identify_purchase_during_target_periode(products)
 | 
			
		||||
customer['has_purchased_target_period'] = np.where(customer['customer_id'].isin(customer_target_period), 1, 0)
 | 
			
		||||
 | 
			
		||||
    # Generate graph and automatically saved them in the bucket
 | 
			
		||||
    compute_nb_clients(customer, type_of_activity)
 | 
			
		||||
# Generate graph and automatically saved them in the bucket
 | 
			
		||||
compute_nb_clients(customer, type_of_activity)
 | 
			
		||||
 | 
			
		||||
    #maximum_price_paid(customer, type_of_activity)
 | 
			
		||||
#maximum_price_paid(customer, type_of_activity)
 | 
			
		||||
 | 
			
		||||
    target_proportion(customer, type_of_activity)
 | 
			
		||||
target_proportion(customer, type_of_activity)
 | 
			
		||||
 | 
			
		||||
    mailing_consent(customer, type_of_activity)
 | 
			
		||||
mailing_consent(customer, type_of_activity)
 | 
			
		||||
 | 
			
		||||
    mailing_consent_by_target(customer, type_of_activity)
 | 
			
		||||
mailing_consent_by_target(customer)
 | 
			
		||||
 | 
			
		||||
    gender_bar(customer, type_of_activity)
 | 
			
		||||
gender_bar(customer, type_of_activity)
 | 
			
		||||
 | 
			
		||||
    country_bar(customer, type_of_activity)
 | 
			
		||||
country_bar(customer, type_of_activity)
 | 
			
		||||
 | 
			
		||||
    lazy_customer_plot(campaigns_kpi, type_of_activity)
 | 
			
		||||
lazy_customer_plot(campaigns_kpi, type_of_activity)
 | 
			
		||||
 | 
			
		||||
    campaigns_effectiveness(customer, type_of_activity)
 | 
			
		||||
campaigns_effectiveness(customer, type_of_activity)
 | 
			
		||||
 | 
			
		||||
    sale_dynamics(products, campaigns_brut, type_of_activity)
 | 
			
		||||
sale_dynamics(products, campaigns_brut, type_of_activity)
 | 
			
		||||
 | 
			
		||||
    tickets_internet(tickets, type_of_activity)
 | 
			
		||||
tickets_internet(tickets, type_of_activity)
 | 
			
		||||
 | 
			
		||||
    already_bought_online(tickets, type_of_activity)
 | 
			
		||||
already_bought_online(tickets, type_of_activity)
 | 
			
		||||
 | 
			
		||||
    box_plot_price_tickets(tickets, type_of_activity)
 | 
			
		||||
box_plot_price_tickets(tickets, type_of_activity)
 | 
			
		||||
 | 
			
		||||
    target_description(targets, type_of_activity)
 | 
			
		||||
target_description(targets, type_of_activity)
 | 
			
		||||
| 
						 | 
				
			
			@ -1,86 +0,0 @@
 | 
			
		|||
 | 
			
		||||
# Packages
 | 
			
		||||
import pandas as pd
 | 
			
		||||
import numpy as np
 | 
			
		||||
import os
 | 
			
		||||
import io
 | 
			
		||||
import s3fs
 | 
			
		||||
import re
 | 
			
		||||
import pickle
 | 
			
		||||
import warnings
 | 
			
		||||
import matplotlib.pyplot as plt
 | 
			
		||||
from tabulate import tabulate
 | 
			
		||||
 | 
			
		||||
###################################
 | 
			
		||||
 | 
			
		||||
# choose the model we use for the segmentation
 | 
			
		||||
# model_name = "LogisticRegression_Benchmark"
 | 
			
		||||
model_name = "LogisticRegression_cv"
 | 
			
		||||
 | 
			
		||||
###################################
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# execute file including functions we need
 | 
			
		||||
exec(open('utils_segmentation.py').read())
 | 
			
		||||
 | 
			
		||||
warnings.filterwarnings('ignore')
 | 
			
		||||
 | 
			
		||||
# Create filesystem object
 | 
			
		||||
S3_ENDPOINT_URL = "https://" + os.environ["AWS_S3_ENDPOINT"]
 | 
			
		||||
fs = s3fs.S3FileSystem(client_kwargs={'endpoint_url': S3_ENDPOINT_URL})
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# choose the type of companies for which you want to run the pipeline
 | 
			
		||||
# type_of_activity = input('Choisissez le type de compagnie : sport ? musique ? musee ?')
 | 
			
		||||
for type_of_activity in ['musee', 'sport', 'musique'] : 
 | 
			
		||||
    
 | 
			
		||||
 | 
			
		||||
    # load test set
 | 
			
		||||
    dataset_test = load_test_file(type_of_activity)
 | 
			
		||||
    
 | 
			
		||||
    # Load Model 
 | 
			
		||||
    model = load_model(type_of_activity, model_name)
 | 
			
		||||
    
 | 
			
		||||
    
 | 
			
		||||
    ### Preprocessing of data
 | 
			
		||||
    X_test = dataset_test.drop(columns = 'y_has_purchased')
 | 
			
		||||
    
 | 
			
		||||
    y_test = dataset_test[['y_has_purchased']]
 | 
			
		||||
    
 | 
			
		||||
    X_test_segment = X_test
 | 
			
		||||
    
 | 
			
		||||
    # add y_has_purchased to X_test
 | 
			
		||||
    X_test_segment["has_purchased"] = y_test
 | 
			
		||||
    
 | 
			
		||||
    # Add prediction and probability to dataset_test
 | 
			
		||||
    y_pred = model.predict(X_test)
 | 
			
		||||
    X_test_segment["has_purchased_estim"] = y_pred
 | 
			
		||||
    
 | 
			
		||||
    y_pred_prob = model.predict_proba(X_test)[:, 1]
 | 
			
		||||
    X_test_segment['score'] = y_pred_prob
 | 
			
		||||
    
 | 
			
		||||
    X_test_segment["segment"] = np.where(X_test_segment['score']<0.25, '1',
 | 
			
		||||
                       np.where(X_test_segment['score']<0.5, '2',
 | 
			
		||||
                       np.where(X_test_segment['score']<0.75, '3', '4')))
 | 
			
		||||
    
 | 
			
		||||
    ### 1. business KPIs 
 | 
			
		||||
    
 | 
			
		||||
    business_var = ["nb_tickets", "nb_purchases", "total_amount", "nb_campaigns"]
 | 
			
		||||
    X_test_business_fig = df_business_fig(X_test_segment, "segment", business_var)
 | 
			
		||||
    print(f"business figures for {type_of_activity} companies :\n")
 | 
			
		||||
    print(X_test_business_fig)
 | 
			
		||||
    print("\n")
 | 
			
		||||
    
 | 
			
		||||
    # save histogram to Minio
 | 
			
		||||
    hist_segment_business_KPIs(X_test_business_fig, "segment", "size", "nb_tickets", 
 | 
			
		||||
                               "nb_purchases", "total_amount", "nb_campaigns", type_of_activity)
 | 
			
		||||
    save_file_s3_mp(File_name = "segments_business_KPI_", type_of_activity = type_of_activity)
 | 
			
		||||
    
 | 
			
		||||
    
 | 
			
		||||
    ### 2. description of marketing personae 
 | 
			
		||||
    ## A. Spider chart
 | 
			
		||||
    radar_mp_plot_all(df = X_test_segment, type_of_activity = type_of_activity)
 | 
			
		||||
    save_file_s3_mp(File_name = "spider_chart_all_", type_of_activity = type_of_activity)
 | 
			
		||||
    
 | 
			
		||||
    ## B. Latex table
 | 
			
		||||
    known_sociodemo_caracteristics(df = X_test_segment, type_of_activity = type_of_activity)
 | 
			
		||||
							
								
								
									
										148
									
								
								Descriptive_statistics/debug.ipynb
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										148
									
								
								Descriptive_statistics/debug.ipynb
									
									
									
									
									
										Normal file
									
								
							
										
											
												File diff suppressed because one or more lines are too long
											
										
									
								
							
							
								
								
									
										68
									
								
								Descriptive_statistics/generate_stat_desc.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										68
									
								
								Descriptive_statistics/generate_stat_desc.py
									
									
									
									
									
										Normal file
									
								
							| 
						 | 
				
			
			@ -0,0 +1,68 @@
 | 
			
		|||
import pandas as pd
 | 
			
		||||
import numpy as np
 | 
			
		||||
import os
 | 
			
		||||
import io
 | 
			
		||||
import s3fs
 | 
			
		||||
import re
 | 
			
		||||
import warnings
 | 
			
		||||
 | 
			
		||||
# Ignore warning
 | 
			
		||||
warnings.filterwarnings('ignore')
 | 
			
		||||
 | 
			
		||||
exec(open('../0_KPI_functions.py').read())
 | 
			
		||||
exec(open('plot.py').read())
 | 
			
		||||
 | 
			
		||||
# Create filesystem object
 | 
			
		||||
S3_ENDPOINT_URL = "https://" + os.environ["AWS_S3_ENDPOINT"]
 | 
			
		||||
fs = s3fs.S3FileSystem(client_kwargs={'endpoint_url': S3_ENDPOINT_URL})
 | 
			
		||||
 | 
			
		||||
companies = {'musee' : ['1', '2', '3', '4'], # , '101'
 | 
			
		||||
            'sport': ['5'],
 | 
			
		||||
            'musique' : ['10', '11', '12', '13', '14']}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
type_of_activity = input('Choisissez le type de compagnie : sport ? musique ? musee ?')
 | 
			
		||||
list_of_comp = companies[type_of_activity] 
 | 
			
		||||
 | 
			
		||||
# Load files
 | 
			
		||||
customer, campaigns_kpi, campaigns_brut, tickets, products = load_files(list_of_comp)
 | 
			
		||||
 | 
			
		||||
# Identify anonymous customer for each company and remove them from our datasets
 | 
			
		||||
outlier_list = outlier_detection(tickets, list_of_comp)
 | 
			
		||||
 | 
			
		||||
# Identify valid customer (customer who bought tickets after starting date or received mails after starting date)
 | 
			
		||||
customer_valid_list = valid_customer_detection(products, campaigns_brut)
 | 
			
		||||
 | 
			
		||||
databases = [customer, campaigns_kpi, campaigns_brut, tickets, products]
 | 
			
		||||
 | 
			
		||||
for dataset in databases:
 | 
			
		||||
    dataset['customer_id'] = dataset['customer_id'].apply(lambda x: remove_elements(x, outlier_list))# remove outlier
 | 
			
		||||
    dataset = dataset[dataset['customer_id'].isin(customer_valid_list)] # keep only valid customer
 | 
			
		||||
    #print(f'shape of {dataset} : ', dataset.shape)
 | 
			
		||||
 | 
			
		||||
# Identify customer who bought during the period of y
 | 
			
		||||
customer_target_period = identify_purchase_during_target_periode(products)
 | 
			
		||||
customer['has_purchased_target_period'] = np.where(customer['customer_id'].isin(customer_target_period), 1, 0)
 | 
			
		||||
 | 
			
		||||
# Generate graph and automatically saved them in the bucket
 | 
			
		||||
compute_nb_clients(customer, type_of_activity)
 | 
			
		||||
 | 
			
		||||
maximum_price_paid(customer, type_of_activity)
 | 
			
		||||
 | 
			
		||||
mailing_consent(customer, type_of_activity)
 | 
			
		||||
 | 
			
		||||
mailing_consent_by_target(customer)
 | 
			
		||||
 | 
			
		||||
gender_bar(customer, type_of_activity)
 | 
			
		||||
 | 
			
		||||
country_bar(customer, type_of_activity)
 | 
			
		||||
 | 
			
		||||
lazy_customer_plot(campaigns_kpi, type_of_activity)
 | 
			
		||||
 | 
			
		||||
#campaigns_effectiveness(customer, type_of_activity)
 | 
			
		||||
 | 
			
		||||
sale_dynamics(products, campaigns_brut, type_of_activity)
 | 
			
		||||
 | 
			
		||||
tickets_internet(tickets, type_of_activity)
 | 
			
		||||
 | 
			
		||||
box_plot_price_tickets(tickets, type_of_activity)
 | 
			
		||||
							
								
								
									
										328
									
								
								Descriptive_statistics/plot.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										328
									
								
								Descriptive_statistics/plot.py
									
									
									
									
									
										Normal file
									
								
							| 
						 | 
				
			
			@ -0,0 +1,328 @@
 | 
			
		|||
import pandas as pd
 | 
			
		||||
import os
 | 
			
		||||
import s3fs
 | 
			
		||||
import io
 | 
			
		||||
import warnings
 | 
			
		||||
from datetime import date, timedelta, datetime
 | 
			
		||||
import numpy as np
 | 
			
		||||
import matplotlib.pyplot as plt
 | 
			
		||||
import matplotlib.dates as mdates
 | 
			
		||||
import seaborn as sns
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def load_files(nb_compagnie):
 | 
			
		||||
    customer = pd.DataFrame()
 | 
			
		||||
    campaigns_brut = pd.DataFrame()
 | 
			
		||||
    campaigns_kpi = pd.DataFrame()
 | 
			
		||||
    products = pd.DataFrame()
 | 
			
		||||
    tickets = pd.DataFrame()
 | 
			
		||||
    
 | 
			
		||||
    # début de la boucle permettant de générer des datasets agrégés pour les 5 compagnies de spectacle
 | 
			
		||||
    for directory_path in nb_compagnie:
 | 
			
		||||
        df_customerplus_clean_0 = display_databases(directory_path, file_name = "customerplus_cleaned")
 | 
			
		||||
        df_campaigns_brut = display_databases(directory_path, file_name = "campaigns_information", datetime_col = ['opened_at', 'sent_at', 'campaign_sent_at'])
 | 
			
		||||
        df_products_purchased_reduced = display_databases(directory_path, file_name = "products_purchased_reduced", datetime_col = ['purchase_date'])
 | 
			
		||||
        df_target_information = display_databases(directory_path, file_name = "target_information")
 | 
			
		||||
        
 | 
			
		||||
        df_campaigns_kpi = campaigns_kpi_function(campaigns_information = df_campaigns_brut) 
 | 
			
		||||
        df_tickets_kpi = tickets_kpi_function(tickets_information = df_products_purchased_reduced)
 | 
			
		||||
        df_customerplus_clean = customerplus_kpi_function(customerplus_clean = df_customerplus_clean_0)
 | 
			
		||||
    
 | 
			
		||||
            
 | 
			
		||||
    # creation de la colonne Number compagnie, qui permettra d'agréger les résultats
 | 
			
		||||
        df_tickets_kpi["number_company"]=int(directory_path)
 | 
			
		||||
        df_campaigns_brut["number_company"]=int(directory_path)
 | 
			
		||||
        df_campaigns_kpi["number_company"]=int(directory_path)
 | 
			
		||||
        df_customerplus_clean["number_company"]=int(directory_path)
 | 
			
		||||
        df_target_information["number_company"]=int(directory_path)
 | 
			
		||||
    
 | 
			
		||||
    # Traitement des index
 | 
			
		||||
        df_tickets_kpi["customer_id"]= directory_path + '_' +  df_tickets_kpi['customer_id'].astype('str')
 | 
			
		||||
        df_campaigns_brut["customer_id"]= directory_path + '_' +  df_campaigns_brut['customer_id'].astype('str')
 | 
			
		||||
        df_campaigns_kpi["customer_id"]= directory_path + '_' +  df_campaigns_kpi['customer_id'].astype('str') 
 | 
			
		||||
        df_customerplus_clean["customer_id"]= directory_path + '_' +  df_customerplus_clean['customer_id'].astype('str') 
 | 
			
		||||
        df_products_purchased_reduced["customer_id"]= directory_path + '_' +  df_products_purchased_reduced['customer_id'].astype('str') 
 | 
			
		||||
    
 | 
			
		||||
    # Concaténation
 | 
			
		||||
        customer = pd.concat([customer, df_customerplus_clean], ignore_index=True)
 | 
			
		||||
        campaigns_kpi = pd.concat([campaigns_kpi, df_campaigns_kpi], ignore_index=True)
 | 
			
		||||
        campaigns_brut = pd.concat([campaigns_brut, df_campaigns_brut], ignore_index=True) 
 | 
			
		||||
        tickets = pd.concat([tickets, df_tickets_kpi], ignore_index=True)
 | 
			
		||||
        products = pd.concat([products, df_products_purchased_reduced], ignore_index=True)
 | 
			
		||||
 | 
			
		||||
    return customer, campaigns_kpi, campaigns_brut, tickets, products
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def save_file_s3(File_name, type_of_activity):
 | 
			
		||||
    image_buffer = io.BytesIO()
 | 
			
		||||
    plt.savefig(image_buffer, format='png')
 | 
			
		||||
    image_buffer.seek(0)
 | 
			
		||||
    FILE_PATH = f"projet-bdc2324-team1/stat_desc/{type_of_activity}/"
 | 
			
		||||
    FILE_PATH_OUT_S3 = FILE_PATH + File_name + type_of_activity + '.png'
 | 
			
		||||
    with fs.open(FILE_PATH_OUT_S3, 'wb') as s3_file:
 | 
			
		||||
        s3_file.write(image_buffer.read())
 | 
			
		||||
    plt.close()
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def outlier_detection(tickets, company_list, show_diagram=False):
 | 
			
		||||
 | 
			
		||||
    outlier_list = list()
 | 
			
		||||
    
 | 
			
		||||
    for company in company_list:
 | 
			
		||||
        total_amount_share = tickets[tickets['number_company']==int(company)].groupby('customer_id')['total_amount'].sum().reset_index()
 | 
			
		||||
        total_amount_share['CA'] = total_amount_share['total_amount'].sum()
 | 
			
		||||
        total_amount_share['share_total_amount'] = total_amount_share['total_amount']/total_amount_share['CA']
 | 
			
		||||
        
 | 
			
		||||
        total_amount_share_index = total_amount_share.set_index('customer_id')
 | 
			
		||||
        df_circulaire = total_amount_share_index['total_amount'].sort_values(axis = 0, ascending = False)
 | 
			
		||||
        #print('df circulaire : ', df_circulaire.head())
 | 
			
		||||
        top = df_circulaire[:1]
 | 
			
		||||
        #print('top : ', top)
 | 
			
		||||
        outlier_list.append(top.index[0])
 | 
			
		||||
        rest = df_circulaire[1:]
 | 
			
		||||
    
 | 
			
		||||
        rest_sum = rest.sum()
 | 
			
		||||
        
 | 
			
		||||
        new_series = pd.concat([top, pd.Series([rest_sum], index=['Autre'])])
 | 
			
		||||
        
 | 
			
		||||
        if show_diagram:
 | 
			
		||||
            plt.figure(figsize=(3, 3))
 | 
			
		||||
            plt.pie(new_series, labels=new_series.index, autopct='%1.1f%%', startangle=140, pctdistance=0.5)
 | 
			
		||||
            plt.axis('equal')
 | 
			
		||||
            plt.title(f'Répartition des montants totaux pour la compagnie {company}')
 | 
			
		||||
            plt.show()
 | 
			
		||||
    return outlier_list
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def valid_customer_detection(products, campaigns_brut):
 | 
			
		||||
    products_valid = products[products['purchase_date']>="2021-05-01"]
 | 
			
		||||
    consumer_valid_product = products_valid['customer_id'].to_list()
 | 
			
		||||
 | 
			
		||||
    campaigns_valid = campaigns_brut[campaigns_brut["sent_at"]>="2021-05-01"]
 | 
			
		||||
    consumer_valid_campaigns = campaigns_valid['customer_id'].to_list()
 | 
			
		||||
 | 
			
		||||
    consumer_valid = consumer_valid_product + consumer_valid_campaigns
 | 
			
		||||
    return consumer_valid 
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def identify_purchase_during_target_periode(products):
 | 
			
		||||
    products_target_period = products[(products['purchase_date']>="2022-11-01")
 | 
			
		||||
    & (products['purchase_date']<="2023-11-01")]
 | 
			
		||||
    customer_target_period = products_target_period['customer_id'].to_list()
 | 
			
		||||
    return customer_target_period
 | 
			
		||||
 | 
			
		||||
    
 | 
			
		||||
def remove_elements(lst, elements_to_remove):
 | 
			
		||||
    return ''.join([x for x in lst if x not in elements_to_remove])
 | 
			
		||||
    
 | 
			
		||||
 | 
			
		||||
def compute_nb_clients(customer, type_of_activity):
 | 
			
		||||
    company_nb_clients = customer[customer["purchase_count"]>0].groupby("number_company")["customer_id"].count().reset_index()
 | 
			
		||||
    plt.bar(company_nb_clients["number_company"], company_nb_clients["customer_id"]/1000)
 | 
			
		||||
 | 
			
		||||
    plt.xlabel('Company')
 | 
			
		||||
    plt.ylabel("Number of clients (thousands)")
 | 
			
		||||
    plt.title(f"Number of clients for {type_of_activity}")
 | 
			
		||||
    plt.xticks(company_nb_clients["number_company"], ["{}".format(i) for i in company_nb_clients["number_company"]])
 | 
			
		||||
    plt.show()
 | 
			
		||||
    save_file_s3("nb_clients_", type_of_activity)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def maximum_price_paid(customer, type_of_activity):
 | 
			
		||||
    company_max_price = customer.groupby("number_company")["max_price"].max().reset_index()
 | 
			
		||||
    plt.bar(company_max_price["number_company"], company_max_price["max_price"])
 | 
			
		||||
    
 | 
			
		||||
    plt.xlabel('Company')
 | 
			
		||||
    plt.ylabel("Maximal price of a ticket Prix")
 | 
			
		||||
    plt.title(f"Maximal price of a ticket for {type_of_activity}")
 | 
			
		||||
    plt.xticks(company_max_price["number_company"], ["{}".format(i) for i in company_max_price["number_company"]])
 | 
			
		||||
    plt.show()
 | 
			
		||||
    save_file_s3("Maximal_price_", type_of_activity)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def mailing_consent(customer, type_of_activity):
 | 
			
		||||
    mailing_consent = customer.groupby("number_company")["opt_in"].mean().reset_index()
 | 
			
		||||
 | 
			
		||||
    plt.bar(mailing_consent["number_company"], mailing_consent["opt_in"])
 | 
			
		||||
 | 
			
		||||
    plt.xlabel('Company')
 | 
			
		||||
    plt.ylabel('Consent')
 | 
			
		||||
    plt.title(f'Consent of mailing for {type_of_activity}')
 | 
			
		||||
    plt.xticks(mailing_consent["number_company"], ["{}".format(i) for i in mailing_consent["number_company"]])
 | 
			
		||||
    plt.show()
 | 
			
		||||
    save_file_s3("mailing_consent_", type_of_activity)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def mailing_consent_by_target(customer):
 | 
			
		||||
    df_graph = customer.groupby(["number_company", "has_purchased_target_period"])["opt_in"].mean().reset_index()
 | 
			
		||||
    # Création du barplot groupé
 | 
			
		||||
    fig, ax = plt.subplots(figsize=(10, 6))
 | 
			
		||||
    
 | 
			
		||||
    categories = df_graph["number_company"].unique()
 | 
			
		||||
    bar_width = 0.35
 | 
			
		||||
    bar_positions = np.arange(len(categories))
 | 
			
		||||
    
 | 
			
		||||
    # Grouper les données par label et créer les barres groupées
 | 
			
		||||
    for label in df_graph["has_purchased_target_period"].unique():
 | 
			
		||||
        label_data = df_graph[df_graph['has_purchased_target_period'] == label]
 | 
			
		||||
        values = [label_data[label_data['number_company'] == category]['opt_in'].values[0]*100 for category in categories]
 | 
			
		||||
    
 | 
			
		||||
        label_printed = "purchased" if label else "no purchase"
 | 
			
		||||
        ax.bar(bar_positions, values, bar_width, label=label_printed)
 | 
			
		||||
    
 | 
			
		||||
        # Mise à jour des positions des barres pour le prochain groupe
 | 
			
		||||
        bar_positions = [pos + bar_width for pos in bar_positions]
 | 
			
		||||
    
 | 
			
		||||
    # Ajout des étiquettes, de la légende, etc.
 | 
			
		||||
    ax.set_xlabel('Company')
 | 
			
		||||
    ax.set_ylabel('Consent')
 | 
			
		||||
    ax.set_title(f'Consent of mailing according to target for {type_of_activity}')
 | 
			
		||||
    ax.set_xticks([pos + bar_width / 2 for pos in np.arange(len(categories))])
 | 
			
		||||
    ax.set_xticklabels(categories)
 | 
			
		||||
    ax.legend()
 | 
			
		||||
    
 | 
			
		||||
    # Affichage du plot
 | 
			
		||||
    plt.show()
 | 
			
		||||
    save_file_s3("mailing_consent_target_", type_of_activity)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def gender_bar(customer, type_of_activity):
 | 
			
		||||
    company_genders = customer.groupby("number_company")[["gender_male", "gender_female", "gender_other"]].mean().reset_index()
 | 
			
		||||
    
 | 
			
		||||
    # Création du barplot
 | 
			
		||||
    plt.bar(company_genders["number_company"], company_genders["gender_male"], label = "Homme")
 | 
			
		||||
    plt.bar(company_genders["number_company"], company_genders["gender_female"], 
 | 
			
		||||
            bottom = company_genders["gender_male"], label = "Femme")
 | 
			
		||||
    plt.bar(company_genders["number_company"], company_genders["gender_other"], 
 | 
			
		||||
            bottom = company_genders["gender_male"] + company_genders["gender_female"], label = "Inconnu")
 | 
			
		||||
    
 | 
			
		||||
    plt.xlabel('Company')
 | 
			
		||||
    plt.ylabel("Gender")
 | 
			
		||||
    plt.title(f"Gender of Customer for {type_of_activity}")
 | 
			
		||||
    plt.legend()
 | 
			
		||||
    plt.xticks(company_genders["number_company"], ["{}".format(i) for i in company_genders["number_company"]])
 | 
			
		||||
    plt.show()
 | 
			
		||||
    save_file_s3("gender_bar_", type_of_activity)
 | 
			
		||||
    
 | 
			
		||||
 | 
			
		||||
def country_bar(customer, type_of_activity):
 | 
			
		||||
    company_country_fr = customer.groupby("number_company")["country_fr"].mean().reset_index()
 | 
			
		||||
    plt.bar(company_country_fr["number_company"], company_country_fr["country_fr"])
 | 
			
		||||
    
 | 
			
		||||
    plt.xlabel('Company')
 | 
			
		||||
    plt.ylabel("Share of French Customer")
 | 
			
		||||
    plt.title(f"Share of French Customer for {type_of_activity}")
 | 
			
		||||
    plt.xticks(company_country_fr["number_company"], ["{}".format(i) for i in company_country_fr["number_company"]])
 | 
			
		||||
    plt.show()
 | 
			
		||||
    save_file_s3("country_bar_", type_of_activity)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def lazy_customer_plot(campaigns_kpi, type_of_activity):
 | 
			
		||||
    company_lazy_customers = campaigns_kpi.groupby("number_company")["nb_campaigns_opened"].mean().reset_index()
 | 
			
		||||
    plt.bar(company_lazy_customers["number_company"], company_lazy_customers["nb_campaigns_opened"])
 | 
			
		||||
    
 | 
			
		||||
    plt.xlabel('Company')
 | 
			
		||||
    plt.ylabel("Share of Customers who did not open mail")
 | 
			
		||||
    plt.title(f"Share of Customers who did not open mail for {type_of_activity}")
 | 
			
		||||
    plt.xticks(company_lazy_customers["number_company"], ["{}".format(i) for i in company_lazy_customers["number_company"]])
 | 
			
		||||
    plt.show()
 | 
			
		||||
    save_file_s3("lazy_customer_", type_of_activity)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def campaigns_effectiveness(customer, type_of_activity):
 | 
			
		||||
 | 
			
		||||
    campaigns_effectiveness = customer.groupby("number_company")["opt_in"].mean().reset_index()
 | 
			
		||||
 | 
			
		||||
    plt.bar(campaigns_effectiveness["number_company"], campaigns_effectiveness["opt_in"])
 | 
			
		||||
    
 | 
			
		||||
    plt.xlabel('Company')
 | 
			
		||||
    plt.ylabel("Number of Customers (thousands)")
 | 
			
		||||
    plt.title(f"Number of Customers of have bought or have received mails for {type_of_activity}")
 | 
			
		||||
    plt.legend()
 | 
			
		||||
    plt.xticks(campaigns_effectiveness["number_company"], ["{}".format(i) for i in campaigns_effectiveness["number_company"]])
 | 
			
		||||
    plt.show()
 | 
			
		||||
    save_file_s3("campaigns_effectiveness_", type_of_activity)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def sale_dynamics(products, campaigns_brut, type_of_activity):
 | 
			
		||||
    purchase_min = products.groupby(['customer_id'])['purchase_date'].min().reset_index()
 | 
			
		||||
    purchase_min.rename(columns = {'purchase_date' : 'first_purchase_event'}, inplace = True)
 | 
			
		||||
    purchase_min['first_purchase_event'] = pd.to_datetime(purchase_min['first_purchase_event'])
 | 
			
		||||
    purchase_min['first_purchase_month'] = pd.to_datetime(purchase_min['first_purchase_event'].dt.strftime('%Y-%m'))
 | 
			
		||||
    
 | 
			
		||||
    # Mois du premier mails
 | 
			
		||||
    first_mail_received = campaigns_brut.groupby('customer_id')['sent_at'].min().reset_index()
 | 
			
		||||
    first_mail_received.rename(columns = {'sent_at' : 'first_email_reception'}, inplace = True)
 | 
			
		||||
    first_mail_received['first_email_reception'] = pd.to_datetime(first_mail_received['first_email_reception'])
 | 
			
		||||
    first_mail_received['first_email_month'] = pd.to_datetime(first_mail_received['first_email_reception'].dt.strftime('%Y-%m'))
 | 
			
		||||
    
 | 
			
		||||
    # Fusion 
 | 
			
		||||
    known_customer = pd.merge(purchase_min[['customer_id', 'first_purchase_month']], 
 | 
			
		||||
                      first_mail_received[['customer_id', 'first_email_month']], on = 'customer_id', how = 'outer')
 | 
			
		||||
    
 | 
			
		||||
    # Mois à partir duquel le client est considere comme connu
 | 
			
		||||
    
 | 
			
		||||
    known_customer['known_date'] = pd.to_datetime(known_customer[['first_email_month', 'first_purchase_month']].min(axis = 1), utc = True, format = 'ISO8601')
 | 
			
		||||
    
 | 
			
		||||
    # Nombre de commande par mois
 | 
			
		||||
    purchases_count = pd.merge(products[['customer_id', 'purchase_id', 'purchase_date']].drop_duplicates(), known_customer[['customer_id', 'known_date']], on = ['customer_id'], how = 'inner')
 | 
			
		||||
    purchases_count['is_customer_known'] = purchases_count['purchase_date'] > purchases_count['known_date'] + pd.DateOffset(months=1)
 | 
			
		||||
    purchases_count['purchase_date_month'] = pd.to_datetime(purchases_count['purchase_date'].dt.strftime('%Y-%m'))
 | 
			
		||||
    purchases_count = purchases_count[purchases_count['customer_id'] != 1]
 | 
			
		||||
    
 | 
			
		||||
    # Nombre de commande par mois par type de client
 | 
			
		||||
    nb_purchases_graph = purchases_count.groupby(['purchase_date_month', 'is_customer_known'])['purchase_id'].count().reset_index()
 | 
			
		||||
    nb_purchases_graph.rename(columns = {'purchase_id' : 'nb_purchases'}, inplace = True)
 | 
			
		||||
    
 | 
			
		||||
    nb_purchases_graph_2 = purchases_count.groupby(['purchase_date_month', 'is_customer_known'])['customer_id'].nunique().reset_index()
 | 
			
		||||
    nb_purchases_graph_2.rename(columns = {'customer_id' : 'nb_new_customer'}, inplace = True)
 | 
			
		||||
    
 | 
			
		||||
    # Graphique en nombre de commande
 | 
			
		||||
    purchases_graph = nb_purchases_graph
 | 
			
		||||
    
 | 
			
		||||
    purchases_graph_used = purchases_graph[purchases_graph["purchase_date_month"] >= datetime(2021,3,1)]
 | 
			
		||||
    purchases_graph_used_0 = purchases_graph_used[purchases_graph_used["is_customer_known"]==False]
 | 
			
		||||
    purchases_graph_used_1 = purchases_graph_used[purchases_graph_used["is_customer_known"]==True]
 | 
			
		||||
    
 | 
			
		||||
    
 | 
			
		||||
    merged_data = pd.merge(purchases_graph_used_0, purchases_graph_used_1, on="purchase_date_month", suffixes=("_new", "_old"))
 | 
			
		||||
    
 | 
			
		||||
    plt.bar(merged_data["purchase_date_month"], merged_data["nb_purchases_new"], width=12, label="Nouveau client")
 | 
			
		||||
    plt.bar(merged_data["purchase_date_month"], merged_data["nb_purchases_old"], 
 | 
			
		||||
            bottom=merged_data["nb_purchases_new"], width=12, label="Ancien client")
 | 
			
		||||
    
 | 
			
		||||
    
 | 
			
		||||
    # commande pr afficher slt
 | 
			
		||||
    plt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%b%y'))
 | 
			
		||||
    
 | 
			
		||||
    plt.xlabel('Month')
 | 
			
		||||
    plt.ylabel("Number of Sales")
 | 
			
		||||
    plt.title(f"Number of Sales for {type_of_activity}")
 | 
			
		||||
    plt.legend()
 | 
			
		||||
    plt.show()
 | 
			
		||||
    save_file_s3("sale_dynamics_", type_of_activity)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def tickets_internet(tickets, type_of_activity):
 | 
			
		||||
    nb_tickets_internet = tickets.groupby("number_company")[["nb_tickets", "nb_tickets_internet"]].sum().reset_index()
 | 
			
		||||
    nb_tickets_internet["Share_ticket_internet"] = nb_tickets_internet["nb_tickets_internet"]*100 / nb_tickets_internet["nb_tickets"]
 | 
			
		||||
 | 
			
		||||
    plt.bar(nb_tickets_internet["number_company"],  nb_tickets_internet["Share_ticket_internet"])
 | 
			
		||||
    
 | 
			
		||||
    plt.xlabel('Company')
 | 
			
		||||
    plt.ylabel("Share of Tickets Bought Online")
 | 
			
		||||
    plt.title(f"Share of Tickets Bought Online for {type_of_activity}")
 | 
			
		||||
    plt.xticks(nb_tickets_internet["number_company"], ["{}".format(i) for i in nb_tickets_internet["number_company"]])
 | 
			
		||||
    plt.show()
 | 
			
		||||
    save_file_s3("tickets_internet_", type_of_activity)
 | 
			
		||||
    
 | 
			
		||||
 | 
			
		||||
def box_plot_price_tickets(tickets, type_of_activity):
 | 
			
		||||
    price_tickets = tickets[(tickets['total_amount'] > 0)]
 | 
			
		||||
    sns.boxplot(data=price_tickets, y="total_amount", x="number_company", showfliers=False, showmeans=True)
 | 
			
		||||
    plt.title(f"Box plot of price tickets for {type_of_activity}")
 | 
			
		||||
    plt.xticks(price_tickets["number_company"], ["{}".format(i) for i in price_tickets["number_company"]])
 | 
			
		||||
    plt.show()
 | 
			
		||||
    save_file_s3("box_plot_price_tickets_", type_of_activity)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										1964
									
								
								Exploration_billet_AJ.ipynb
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										1964
									
								
								Exploration_billet_AJ.ipynb
									
									
									
									
									
										Normal file
									
								
							
										
											
												File diff suppressed because one or more lines are too long
											
										
									
								
							
							
								
								
									
										1610
									
								
								Identification_entreprise.ipynb
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										1610
									
								
								Identification_entreprise.ipynb
									
									
									
									
									
										Normal file
									
								
							
										
											
												File diff suppressed because it is too large
												Load Diff
											
										
									
								
							
							
								
								
									
										247
									
								
								Notebook_AR.ipynb
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										247
									
								
								Notebook_AR.ipynb
									
									
									
									
									
										Normal file
									
								
							
										
											
												File diff suppressed because one or more lines are too long
											
										
									
								
							
							
								
								
									
										825
									
								
								Notebook_Fanta.ipynb
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										825
									
								
								Notebook_Fanta.ipynb
									
									
									
									
									
										Normal file
									
								
							| 
						 | 
				
			
			@ -0,0 +1,825 @@
 | 
			
		|||
{
 | 
			
		||||
 "cells": [
 | 
			
		||||
  {
 | 
			
		||||
   "cell_type": "markdown",
 | 
			
		||||
   "id": "aa74dbe0-f974-4b5c-94f4-4dba9fbc64fa",
 | 
			
		||||
   "metadata": {},
 | 
			
		||||
   "source": [
 | 
			
		||||
    "# Business Data Challenge - Team 1"
 | 
			
		||||
   ]
 | 
			
		||||
  },
 | 
			
		||||
  {
 | 
			
		||||
   "cell_type": "code",
 | 
			
		||||
   "execution_count": 1,
 | 
			
		||||
   "id": "94c498e7-7c50-45f9-b3f4-a1ab19b7ccc4",
 | 
			
		||||
   "metadata": {},
 | 
			
		||||
   "outputs": [],
 | 
			
		||||
   "source": [
 | 
			
		||||
    "import pandas as pd\n",
 | 
			
		||||
    "import numpy as np\n",
 | 
			
		||||
    "\n",
 | 
			
		||||
    "\n"
 | 
			
		||||
   ]
 | 
			
		||||
  },
 | 
			
		||||
  {
 | 
			
		||||
   "cell_type": "markdown",
 | 
			
		||||
   "id": "7a3b50ac-b1ff-4f3d-9938-e048fdc8e027",
 | 
			
		||||
   "metadata": {},
 | 
			
		||||
   "source": [
 | 
			
		||||
    "Configuration de l'accès aux données"
 | 
			
		||||
   ]
 | 
			
		||||
  },
 | 
			
		||||
  {
 | 
			
		||||
   "cell_type": "code",
 | 
			
		||||
   "execution_count": 2,
 | 
			
		||||
   "id": "0b029d42-fb02-481e-a407-7e41886198a6",
 | 
			
		||||
   "metadata": {},
 | 
			
		||||
   "outputs": [
 | 
			
		||||
    {
 | 
			
		||||
     "data": {
 | 
			
		||||
      "text/plain": [
 | 
			
		||||
       "['bdc2324-data/1',\n",
 | 
			
		||||
       " 'bdc2324-data/10',\n",
 | 
			
		||||
       " 'bdc2324-data/101',\n",
 | 
			
		||||
       " 'bdc2324-data/11',\n",
 | 
			
		||||
       " 'bdc2324-data/12',\n",
 | 
			
		||||
       " 'bdc2324-data/13',\n",
 | 
			
		||||
       " 'bdc2324-data/14',\n",
 | 
			
		||||
       " 'bdc2324-data/2',\n",
 | 
			
		||||
       " 'bdc2324-data/3',\n",
 | 
			
		||||
       " 'bdc2324-data/4',\n",
 | 
			
		||||
       " 'bdc2324-data/5',\n",
 | 
			
		||||
       " 'bdc2324-data/6',\n",
 | 
			
		||||
       " 'bdc2324-data/7',\n",
 | 
			
		||||
       " 'bdc2324-data/8',\n",
 | 
			
		||||
       " 'bdc2324-data/9']"
 | 
			
		||||
      ]
 | 
			
		||||
     },
 | 
			
		||||
     "execution_count": 2,
 | 
			
		||||
     "metadata": {},
 | 
			
		||||
     "output_type": "execute_result"
 | 
			
		||||
    }
 | 
			
		||||
   ],
 | 
			
		||||
   "source": [
 | 
			
		||||
    "import os\n",
 | 
			
		||||
    "import s3fs\n",
 | 
			
		||||
    "# Create filesystem object\n",
 | 
			
		||||
    "S3_ENDPOINT_URL = \"https://\" + os.environ[\"AWS_S3_ENDPOINT\"]\n",
 | 
			
		||||
    "fs = s3fs.S3FileSystem(client_kwargs={'endpoint_url': S3_ENDPOINT_URL})\n",
 | 
			
		||||
    "\n",
 | 
			
		||||
    "BUCKET = \"bdc2324-data\"\n",
 | 
			
		||||
    "fs.ls(BUCKET)"
 | 
			
		||||
   ]
 | 
			
		||||
  },
 | 
			
		||||
  {
 | 
			
		||||
   "cell_type": "code",
 | 
			
		||||
   "execution_count": 4,
 | 
			
		||||
   "id": "fbaf9aa7-ff70-4dbe-a969-b801c593510b",
 | 
			
		||||
   "metadata": {},
 | 
			
		||||
   "outputs": [],
 | 
			
		||||
   "source": [
 | 
			
		||||
    "# Chargement des fichiers campaign_stats.csv\n",
 | 
			
		||||
    "FILE_PATH_S3 = 'bdc2324-data/1/1campaign_stats.csv'\n",
 | 
			
		||||
    "\n",
 | 
			
		||||
    "with fs.open(FILE_PATH_S3, mode=\"rb\") as file_in:\n",
 | 
			
		||||
    "    campaign_stats_1 = pd.read_csv(file_in, sep=\",\")\n",
 | 
			
		||||
    "\n",
 | 
			
		||||
    "FILE_PATH_S3 = 'bdc2324-data/2/2campaign_stats.csv'\n",
 | 
			
		||||
    "\n",
 | 
			
		||||
    "with fs.open(FILE_PATH_S3, mode=\"rb\") as file_in:\n",
 | 
			
		||||
    "    campaign_stats_2 = pd.read_csv(file_in, sep=\",\")\n",
 | 
			
		||||
    "\n",
 | 
			
		||||
    "FILE_PATH_S3 = 'bdc2324-data/3/3campaign_stats.csv'\n",
 | 
			
		||||
    "\n",
 | 
			
		||||
    "with fs.open(FILE_PATH_S3, mode=\"rb\") as file_in:\n",
 | 
			
		||||
    "    campaign_stats_3 = pd.read_csv(file_in, sep=\",\")"
 | 
			
		||||
   ]
 | 
			
		||||
  },
 | 
			
		||||
  {
 | 
			
		||||
   "cell_type": "code",
 | 
			
		||||
   "execution_count": 5,
 | 
			
		||||
   "id": "1e0418bc-8e97-4a04-b7f3-bda3bef7d36e",
 | 
			
		||||
   "metadata": {},
 | 
			
		||||
   "outputs": [],
 | 
			
		||||
   "source": [
 | 
			
		||||
    "# Conversion des dates 'sent_at'\n",
 | 
			
		||||
    "campaign_stats_1['sent_at'] = pd.to_datetime(campaign_stats_1['sent_at'], format = 'ISO8601', utc = True)\n",
 | 
			
		||||
    "campaign_stats_2['sent_at'] = pd.to_datetime(campaign_stats_2['sent_at'], format = 'ISO8601', utc = True)\n",
 | 
			
		||||
    "campaign_stats_3['sent_at'] = pd.to_datetime(campaign_stats_3['sent_at'], format = 'ISO8601', utc = True)"
 | 
			
		||||
   ]
 | 
			
		||||
  },
 | 
			
		||||
  {
 | 
			
		||||
   "cell_type": "code",
 | 
			
		||||
   "execution_count": 6,
 | 
			
		||||
   "id": "cc5c20ba-e827-4e5a-97a5-7f3947e0621c",
 | 
			
		||||
   "metadata": {},
 | 
			
		||||
   "outputs": [
 | 
			
		||||
    {
 | 
			
		||||
     "name": "stdout",
 | 
			
		||||
     "output_type": "stream",
 | 
			
		||||
     "text": [
 | 
			
		||||
      "2023-11-09 18:10:45+00:00\n",
 | 
			
		||||
      "2020-06-02 08:24:08+00:00\n",
 | 
			
		||||
      "2023-10-12 01:39:48+00:00\n",
 | 
			
		||||
      "2023-10-10 17:06:29+00:00\n",
 | 
			
		||||
      "2023-11-01 09:20:48+00:00\n",
 | 
			
		||||
      "2021-03-31 14:59:02+00:00\n"
 | 
			
		||||
     ]
 | 
			
		||||
    }
 | 
			
		||||
   ],
 | 
			
		||||
   "source": [
 | 
			
		||||
    "# Chaque unites correspond à une période ? --> Non, les dossiers ont juste pour but de réduire la taille des fichiers\n",
 | 
			
		||||
    "print(campaign_stats_1['sent_at'].max())\n",
 | 
			
		||||
    "print(campaign_stats_1['sent_at'].min())\n",
 | 
			
		||||
    "\n",
 | 
			
		||||
    "print(campaign_stats_2['sent_at'].max())\n",
 | 
			
		||||
    "print(campaign_stats_2['sent_at'].min())\n",
 | 
			
		||||
    "\n",
 | 
			
		||||
    "print(campaign_stats_3['sent_at'].max())\n",
 | 
			
		||||
    "print(campaign_stats_3['sent_at'].min())"
 | 
			
		||||
   ]
 | 
			
		||||
  },
 | 
			
		||||
  {
 | 
			
		||||
   "cell_type": "code",
 | 
			
		||||
   "execution_count": 7,
 | 
			
		||||
   "id": "c75632df-b018-4bb8-a99d-83f15af94369",
 | 
			
		||||
   "metadata": {},
 | 
			
		||||
   "outputs": [
 | 
			
		||||
    {
 | 
			
		||||
     "data": {
 | 
			
		||||
      "text/plain": [
 | 
			
		||||
       "0         2021-03-28 16:01:09+00:00\n",
 | 
			
		||||
       "1         2021-03-28 16:01:09+00:00\n",
 | 
			
		||||
       "2         2021-03-28 16:00:59+00:00\n",
 | 
			
		||||
       "3         2021-03-28 16:00:59+00:00\n",
 | 
			
		||||
       "4         2021-03-28 16:01:06+00:00\n",
 | 
			
		||||
       "                     ...           \n",
 | 
			
		||||
       "6214803   2023-10-23 09:32:33+00:00\n",
 | 
			
		||||
       "6214804   2023-10-23 09:32:49+00:00\n",
 | 
			
		||||
       "6214805   2023-10-23 09:33:28+00:00\n",
 | 
			
		||||
       "6214806   2023-10-23 09:31:53+00:00\n",
 | 
			
		||||
       "6214807   2023-10-23 09:33:54+00:00\n",
 | 
			
		||||
       "Name: sent_at, Length: 6214808, dtype: datetime64[ns, UTC]"
 | 
			
		||||
      ]
 | 
			
		||||
     },
 | 
			
		||||
     "execution_count": 7,
 | 
			
		||||
     "metadata": {},
 | 
			
		||||
     "output_type": "execute_result"
 | 
			
		||||
    }
 | 
			
		||||
   ],
 | 
			
		||||
   "source": [
 | 
			
		||||
    "campaign_stats_1['sent_at']"
 | 
			
		||||
   ]
 | 
			
		||||
  },
 | 
			
		||||
  {
 | 
			
		||||
   "cell_type": "markdown",
 | 
			
		||||
   "id": "f4c0c63e-0418-4cfe-a57d-7af57bca0c22",
 | 
			
		||||
   "metadata": {},
 | 
			
		||||
   "source": [
 | 
			
		||||
    "### Customersplus.csv"
 | 
			
		||||
   ]
 | 
			
		||||
  },
 | 
			
		||||
  {
 | 
			
		||||
   "cell_type": "code",
 | 
			
		||||
   "execution_count": 8,
 | 
			
		||||
   "id": "d3bf880d-1065-4d5b-9954-1830aa5081af",
 | 
			
		||||
   "metadata": {},
 | 
			
		||||
   "outputs": [
 | 
			
		||||
    {
 | 
			
		||||
     "name": "stderr",
 | 
			
		||||
     "output_type": "stream",
 | 
			
		||||
     "text": [
 | 
			
		||||
      "/tmp/ipykernel_1362/4118060109.py:9: DtypeWarning: Columns (20) have mixed types. Specify dtype option on import or set low_memory=False.\n",
 | 
			
		||||
      "  customers_plus_2 = pd.read_csv(file_in, sep=\",\")\n"
 | 
			
		||||
     ]
 | 
			
		||||
    }
 | 
			
		||||
   ],
 | 
			
		||||
   "source": [
 | 
			
		||||
    "FILE_PATH_S3 = 'bdc2324-data/1/1customersplus.csv'\n",
 | 
			
		||||
    "\n",
 | 
			
		||||
    "with fs.open(FILE_PATH_S3, mode=\"rb\") as file_in:\n",
 | 
			
		||||
    "    customers_plus_1 = pd.read_csv(file_in, sep=\",\")\n",
 | 
			
		||||
    "\n",
 | 
			
		||||
    "FILE_PATH_S3 = 'bdc2324-data/2/2customersplus.csv'\n",
 | 
			
		||||
    "\n",
 | 
			
		||||
    "with fs.open(FILE_PATH_S3, mode=\"rb\") as file_in:\n",
 | 
			
		||||
    "    customers_plus_2 = pd.read_csv(file_in, sep=\",\")"
 | 
			
		||||
   ]
 | 
			
		||||
  },
 | 
			
		||||
  {
 | 
			
		||||
   "cell_type": "code",
 | 
			
		||||
   "execution_count": 10,
 | 
			
		||||
   "id": "7368f381-db8e-4a4d-9fe2-5947eb55be58",
 | 
			
		||||
   "metadata": {},
 | 
			
		||||
   "outputs": [
 | 
			
		||||
    {
 | 
			
		||||
     "data": {
 | 
			
		||||
      "text/plain": [
 | 
			
		||||
       "Index(['id', 'lastname', 'firstname', 'birthdate', 'email', 'street_id',\n",
 | 
			
		||||
       "       'created_at', 'updated_at', 'civility', 'is_partner', 'extra',\n",
 | 
			
		||||
       "       'deleted_at', 'reference', 'gender', 'is_email_true', 'extra_field',\n",
 | 
			
		||||
       "       'identifier', 'opt_in', 'structure_id', 'note', 'profession',\n",
 | 
			
		||||
       "       'language', 'mcp_contact_id', 'need_reload', 'last_buying_date',\n",
 | 
			
		||||
       "       'max_price', 'ticket_sum', 'average_price', 'fidelity',\n",
 | 
			
		||||
       "       'average_purchase_delay', 'average_price_basket',\n",
 | 
			
		||||
       "       'average_ticket_basket', 'total_price', 'preferred_category',\n",
 | 
			
		||||
       "       'preferred_supplier', 'preferred_formula', 'purchase_count',\n",
 | 
			
		||||
       "       'first_buying_date', 'last_visiting_date', 'zipcode', 'country', 'age',\n",
 | 
			
		||||
       "       'tenant_id'],\n",
 | 
			
		||||
       "      dtype='object')"
 | 
			
		||||
      ]
 | 
			
		||||
     },
 | 
			
		||||
     "execution_count": 10,
 | 
			
		||||
     "metadata": {},
 | 
			
		||||
     "output_type": "execute_result"
 | 
			
		||||
    }
 | 
			
		||||
   ],
 | 
			
		||||
   "source": [
 | 
			
		||||
    "customers_plus_1.columns"
 | 
			
		||||
   ]
 | 
			
		||||
  },
 | 
			
		||||
  {
 | 
			
		||||
   "cell_type": "code",
 | 
			
		||||
   "execution_count": null,
 | 
			
		||||
   "id": "08091935-b159-47fa-806c-e1444f3b227e",
 | 
			
		||||
   "metadata": {},
 | 
			
		||||
   "outputs": [],
 | 
			
		||||
   "source": [
 | 
			
		||||
    "customers_plus_1.shape"
 | 
			
		||||
   ]
 | 
			
		||||
  },
 | 
			
		||||
  {
 | 
			
		||||
   "cell_type": "code",
 | 
			
		||||
   "execution_count": null,
 | 
			
		||||
   "id": "9f8c8868-c1ac-4cee-af08-533d928f6764",
 | 
			
		||||
   "metadata": {},
 | 
			
		||||
   "outputs": [],
 | 
			
		||||
   "source": [
 | 
			
		||||
    "customers_plus_1['id'].nunique()"
 | 
			
		||||
   ]
 | 
			
		||||
  },
 | 
			
		||||
  {
 | 
			
		||||
   "cell_type": "code",
 | 
			
		||||
   "execution_count": null,
 | 
			
		||||
   "id": "bf95daf2-4852-4718-b474-207a1ebd8ac4",
 | 
			
		||||
   "metadata": {},
 | 
			
		||||
   "outputs": [],
 | 
			
		||||
   "source": [
 | 
			
		||||
    "customers_plus_2['id'].nunique()"
 | 
			
		||||
   ]
 | 
			
		||||
  },
 | 
			
		||||
  {
 | 
			
		||||
   "cell_type": "code",
 | 
			
		||||
   "execution_count": null,
 | 
			
		||||
   "id": "1425c385-3216-4e4f-ae8f-a121624721ba",
 | 
			
		||||
   "metadata": {},
 | 
			
		||||
   "outputs": [],
 | 
			
		||||
   "source": [
 | 
			
		||||
    "common_id = set(customers_plus_2['id']).intersection(customers_plus_1['id'])"
 | 
			
		||||
   ]
 | 
			
		||||
  },
 | 
			
		||||
  {
 | 
			
		||||
   "cell_type": "code",
 | 
			
		||||
   "execution_count": 61,
 | 
			
		||||
   "id": "92533026-e27c-4f1f-81ca-64eda32a34c0",
 | 
			
		||||
   "metadata": {},
 | 
			
		||||
   "outputs": [
 | 
			
		||||
    {
 | 
			
		||||
     "data": {
 | 
			
		||||
      "text/plain": [
 | 
			
		||||
       "1"
 | 
			
		||||
      ]
 | 
			
		||||
     },
 | 
			
		||||
     "execution_count": 61,
 | 
			
		||||
     "metadata": {},
 | 
			
		||||
     "output_type": "execute_result"
 | 
			
		||||
    }
 | 
			
		||||
   ],
 | 
			
		||||
   "source": [
 | 
			
		||||
    "common_id = set(customers_plus_2['id']).intersection(customers_plus_1['id'])\n",
 | 
			
		||||
    "# Exemple id commun = caractéristiques communes\n",
 | 
			
		||||
    "print(customers_plus_2[customers_plus_2['id'] == list(common_id)[0]])\n",
 | 
			
		||||
    "\n",
 | 
			
		||||
    "print(customers_plus_1[customers_plus_1['id'] == list(common_id)[0]])"
 | 
			
		||||
   ]
 | 
			
		||||
  },
 | 
			
		||||
  {
 | 
			
		||||
   "cell_type": "code",
 | 
			
		||||
   "execution_count": 49,
 | 
			
		||||
   "id": "bf9ebc94-0ba6-443d-8e53-22477a6e79a7",
 | 
			
		||||
   "metadata": {
 | 
			
		||||
    "scrolled": true
 | 
			
		||||
   },
 | 
			
		||||
   "outputs": [
 | 
			
		||||
    {
 | 
			
		||||
     "name": "stdout",
 | 
			
		||||
     "output_type": "stream",
 | 
			
		||||
     "text": [
 | 
			
		||||
      "id                          0.000000\n",
 | 
			
		||||
      "lastname                   43.461341\n",
 | 
			
		||||
      "firstname                  44.995588\n",
 | 
			
		||||
      "birthdate                  96.419870\n",
 | 
			
		||||
      "email                       8.622075\n",
 | 
			
		||||
      "street_id                   0.000000\n",
 | 
			
		||||
      "created_at                  0.000000\n",
 | 
			
		||||
      "updated_at                  0.000000\n",
 | 
			
		||||
      "civility                  100.000000\n",
 | 
			
		||||
      "is_partner                  0.000000\n",
 | 
			
		||||
      "extra                     100.000000\n",
 | 
			
		||||
      "deleted_at                100.000000\n",
 | 
			
		||||
      "reference                 100.000000\n",
 | 
			
		||||
      "gender                      0.000000\n",
 | 
			
		||||
      "is_email_true               0.000000\n",
 | 
			
		||||
      "extra_field               100.000000\n",
 | 
			
		||||
      "identifier                  0.000000\n",
 | 
			
		||||
      "opt_in                      0.000000\n",
 | 
			
		||||
      "structure_id               88.072380\n",
 | 
			
		||||
      "note                       99.403421\n",
 | 
			
		||||
      "profession                 95.913503\n",
 | 
			
		||||
      "language                   99.280945\n",
 | 
			
		||||
      "mcp_contact_id             34.876141\n",
 | 
			
		||||
      "need_reload                 0.000000\n",
 | 
			
		||||
      "last_buying_date           51.653431\n",
 | 
			
		||||
      "max_price                  51.653431\n",
 | 
			
		||||
      "ticket_sum                  0.000000\n",
 | 
			
		||||
      "average_price               8.639195\n",
 | 
			
		||||
      "fidelity                    0.000000\n",
 | 
			
		||||
      "average_purchase_delay     51.653431\n",
 | 
			
		||||
      "average_price_basket       51.653431\n",
 | 
			
		||||
      "average_ticket_basket      51.653431\n",
 | 
			
		||||
      "total_price                43.014236\n",
 | 
			
		||||
      "preferred_category        100.000000\n",
 | 
			
		||||
      "preferred_supplier        100.000000\n",
 | 
			
		||||
      "preferred_formula         100.000000\n",
 | 
			
		||||
      "purchase_count              0.000000\n",
 | 
			
		||||
      "first_buying_date          51.653431\n",
 | 
			
		||||
      "last_visiting_date        100.000000\n",
 | 
			
		||||
      "zipcode                    71.176564\n",
 | 
			
		||||
      "country                     5.459418\n",
 | 
			
		||||
      "age                        96.419870\n",
 | 
			
		||||
      "tenant_id                   0.000000\n",
 | 
			
		||||
      "dtype: float64\n"
 | 
			
		||||
     ]
 | 
			
		||||
    }
 | 
			
		||||
   ],
 | 
			
		||||
   "source": [
 | 
			
		||||
    "pd.DataFrame(customers_plus_1.isna().mean()*100)"
 | 
			
		||||
   ]
 | 
			
		||||
  },
 | 
			
		||||
  {
 | 
			
		||||
   "cell_type": "code",
 | 
			
		||||
   "execution_count": 11,
 | 
			
		||||
   "id": "6d62e73f-3925-490f-9fd4-d0e838903cb2",
 | 
			
		||||
   "metadata": {},
 | 
			
		||||
   "outputs": [],
 | 
			
		||||
   "source": [
 | 
			
		||||
    "# Chargement de toutes les données\n",
 | 
			
		||||
    "liste_base = ['customer_target_mappings', 'customersplus', 'target_types', 'tags', 'events', 'tickets', 'representations', 'purchases', 'products']\n",
 | 
			
		||||
    "\n",
 | 
			
		||||
    "for nom_base in liste_base:\n",
 | 
			
		||||
    "    FILE_PATH_S3 = 'bdc2324-data/11/11' + nom_base + '.csv'\n",
 | 
			
		||||
    "    with fs.open(FILE_PATH_S3, mode=\"rb\") as file_in:\n",
 | 
			
		||||
    "        globals()[nom_base] = pd.read_csv(file_in, sep=\",\")"
 | 
			
		||||
   ]
 | 
			
		||||
  },
 | 
			
		||||
  {
 | 
			
		||||
   "cell_type": "code",
 | 
			
		||||
   "execution_count": 12,
 | 
			
		||||
   "id": "12b24f1c-eb3e-45be-aaf3-b9273180caa3",
 | 
			
		||||
   "metadata": {},
 | 
			
		||||
   "outputs": [
 | 
			
		||||
    {
 | 
			
		||||
     "data": {
 | 
			
		||||
      "text/html": [
 | 
			
		||||
       "<div>\n",
 | 
			
		||||
       "<style scoped>\n",
 | 
			
		||||
       "    .dataframe tbody tr th:only-of-type {\n",
 | 
			
		||||
       "        vertical-align: middle;\n",
 | 
			
		||||
       "    }\n",
 | 
			
		||||
       "\n",
 | 
			
		||||
       "    .dataframe tbody tr th {\n",
 | 
			
		||||
       "        vertical-align: top;\n",
 | 
			
		||||
       "    }\n",
 | 
			
		||||
       "\n",
 | 
			
		||||
       "    .dataframe thead th {\n",
 | 
			
		||||
       "        text-align: right;\n",
 | 
			
		||||
       "    }\n",
 | 
			
		||||
       "</style>\n",
 | 
			
		||||
       "<table border=\"1\" class=\"dataframe\">\n",
 | 
			
		||||
       "  <thead>\n",
 | 
			
		||||
       "    <tr style=\"text-align: right;\">\n",
 | 
			
		||||
       "      <th></th>\n",
 | 
			
		||||
       "      <th>id</th>\n",
 | 
			
		||||
       "      <th>lastname</th>\n",
 | 
			
		||||
       "      <th>firstname</th>\n",
 | 
			
		||||
       "      <th>birthdate</th>\n",
 | 
			
		||||
       "      <th>email</th>\n",
 | 
			
		||||
       "      <th>street_id</th>\n",
 | 
			
		||||
       "      <th>created_at</th>\n",
 | 
			
		||||
       "      <th>updated_at</th>\n",
 | 
			
		||||
       "      <th>civility</th>\n",
 | 
			
		||||
       "      <th>is_partner</th>\n",
 | 
			
		||||
       "      <th>...</th>\n",
 | 
			
		||||
       "      <th>tenant_id</th>\n",
 | 
			
		||||
       "      <th>id_x</th>\n",
 | 
			
		||||
       "      <th>customer_id</th>\n",
 | 
			
		||||
       "      <th>purchase_date</th>\n",
 | 
			
		||||
       "      <th>type_of</th>\n",
 | 
			
		||||
       "      <th>is_from_subscription</th>\n",
 | 
			
		||||
       "      <th>amount</th>\n",
 | 
			
		||||
       "      <th>is_full_price</th>\n",
 | 
			
		||||
       "      <th>start_date_time</th>\n",
 | 
			
		||||
       "      <th>event_name</th>\n",
 | 
			
		||||
       "    </tr>\n",
 | 
			
		||||
       "  </thead>\n",
 | 
			
		||||
       "  <tbody>\n",
 | 
			
		||||
       "    <tr>\n",
 | 
			
		||||
       "      <th>0</th>\n",
 | 
			
		||||
       "      <td>405082</td>\n",
 | 
			
		||||
       "      <td>lastname405082</td>\n",
 | 
			
		||||
       "      <td>NaN</td>\n",
 | 
			
		||||
       "      <td>NaN</td>\n",
 | 
			
		||||
       "      <td>NaN</td>\n",
 | 
			
		||||
       "      <td>6</td>\n",
 | 
			
		||||
       "      <td>2023-01-12 06:30:31.197484+01:00</td>\n",
 | 
			
		||||
       "      <td>2023-01-12 06:30:31.197484+01:00</td>\n",
 | 
			
		||||
       "      <td>NaN</td>\n",
 | 
			
		||||
       "      <td>False</td>\n",
 | 
			
		||||
       "      <td>...</td>\n",
 | 
			
		||||
       "      <td>1556</td>\n",
 | 
			
		||||
       "      <td>992423</td>\n",
 | 
			
		||||
       "      <td>405082</td>\n",
 | 
			
		||||
       "      <td>2023-01-11 17:08:41+01:00</td>\n",
 | 
			
		||||
       "      <td>3</td>\n",
 | 
			
		||||
       "      <td>False</td>\n",
 | 
			
		||||
       "      <td>13.0</td>\n",
 | 
			
		||||
       "      <td>False</td>\n",
 | 
			
		||||
       "      <td>2023-02-06 20:00:00+01:00</td>\n",
 | 
			
		||||
       "      <td>zaide</td>\n",
 | 
			
		||||
       "    </tr>\n",
 | 
			
		||||
       "    <tr>\n",
 | 
			
		||||
       "      <th>1</th>\n",
 | 
			
		||||
       "      <td>405082</td>\n",
 | 
			
		||||
       "      <td>lastname405082</td>\n",
 | 
			
		||||
       "      <td>NaN</td>\n",
 | 
			
		||||
       "      <td>NaN</td>\n",
 | 
			
		||||
       "      <td>NaN</td>\n",
 | 
			
		||||
       "      <td>6</td>\n",
 | 
			
		||||
       "      <td>2023-01-12 06:30:31.197484+01:00</td>\n",
 | 
			
		||||
       "      <td>2023-01-12 06:30:31.197484+01:00</td>\n",
 | 
			
		||||
       "      <td>NaN</td>\n",
 | 
			
		||||
       "      <td>False</td>\n",
 | 
			
		||||
       "      <td>...</td>\n",
 | 
			
		||||
       "      <td>1556</td>\n",
 | 
			
		||||
       "      <td>992423</td>\n",
 | 
			
		||||
       "      <td>405082</td>\n",
 | 
			
		||||
       "      <td>2023-01-11 17:08:41+01:00</td>\n",
 | 
			
		||||
       "      <td>3</td>\n",
 | 
			
		||||
       "      <td>False</td>\n",
 | 
			
		||||
       "      <td>13.0</td>\n",
 | 
			
		||||
       "      <td>False</td>\n",
 | 
			
		||||
       "      <td>2023-02-06 20:00:00+01:00</td>\n",
 | 
			
		||||
       "      <td>zaide</td>\n",
 | 
			
		||||
       "    </tr>\n",
 | 
			
		||||
       "    <tr>\n",
 | 
			
		||||
       "      <th>2</th>\n",
 | 
			
		||||
       "      <td>411168</td>\n",
 | 
			
		||||
       "      <td>lastname411168</td>\n",
 | 
			
		||||
       "      <td>NaN</td>\n",
 | 
			
		||||
       "      <td>NaN</td>\n",
 | 
			
		||||
       "      <td>NaN</td>\n",
 | 
			
		||||
       "      <td>6</td>\n",
 | 
			
		||||
       "      <td>2023-03-17 06:30:35.431967+01:00</td>\n",
 | 
			
		||||
       "      <td>2023-03-17 06:30:35.431967+01:00</td>\n",
 | 
			
		||||
       "      <td>NaN</td>\n",
 | 
			
		||||
       "      <td>False</td>\n",
 | 
			
		||||
       "      <td>...</td>\n",
 | 
			
		||||
       "      <td>1556</td>\n",
 | 
			
		||||
       "      <td>1053934</td>\n",
 | 
			
		||||
       "      <td>411168</td>\n",
 | 
			
		||||
       "      <td>2023-03-16 16:23:10+01:00</td>\n",
 | 
			
		||||
       "      <td>3</td>\n",
 | 
			
		||||
       "      <td>False</td>\n",
 | 
			
		||||
       "      <td>62.0</td>\n",
 | 
			
		||||
       "      <td>False</td>\n",
 | 
			
		||||
       "      <td>2023-03-19 16:00:00+01:00</td>\n",
 | 
			
		||||
       "      <td>luisa miller</td>\n",
 | 
			
		||||
       "    </tr>\n",
 | 
			
		||||
       "    <tr>\n",
 | 
			
		||||
       "      <th>3</th>\n",
 | 
			
		||||
       "      <td>411168</td>\n",
 | 
			
		||||
       "      <td>lastname411168</td>\n",
 | 
			
		||||
       "      <td>NaN</td>\n",
 | 
			
		||||
       "      <td>NaN</td>\n",
 | 
			
		||||
       "      <td>NaN</td>\n",
 | 
			
		||||
       "      <td>6</td>\n",
 | 
			
		||||
       "      <td>2023-03-17 06:30:35.431967+01:00</td>\n",
 | 
			
		||||
       "      <td>2023-03-17 06:30:35.431967+01:00</td>\n",
 | 
			
		||||
       "      <td>NaN</td>\n",
 | 
			
		||||
       "      <td>False</td>\n",
 | 
			
		||||
       "      <td>...</td>\n",
 | 
			
		||||
       "      <td>1556</td>\n",
 | 
			
		||||
       "      <td>1053934</td>\n",
 | 
			
		||||
       "      <td>411168</td>\n",
 | 
			
		||||
       "      <td>2023-03-16 16:23:10+01:00</td>\n",
 | 
			
		||||
       "      <td>3</td>\n",
 | 
			
		||||
       "      <td>False</td>\n",
 | 
			
		||||
       "      <td>62.0</td>\n",
 | 
			
		||||
       "      <td>False</td>\n",
 | 
			
		||||
       "      <td>2023-03-19 16:00:00+01:00</td>\n",
 | 
			
		||||
       "      <td>luisa miller</td>\n",
 | 
			
		||||
       "    </tr>\n",
 | 
			
		||||
       "    <tr>\n",
 | 
			
		||||
       "      <th>4</th>\n",
 | 
			
		||||
       "      <td>4380</td>\n",
 | 
			
		||||
       "      <td>lastname4380</td>\n",
 | 
			
		||||
       "      <td>firstname4380</td>\n",
 | 
			
		||||
       "      <td>NaN</td>\n",
 | 
			
		||||
       "      <td>NaN</td>\n",
 | 
			
		||||
       "      <td>1</td>\n",
 | 
			
		||||
       "      <td>2021-04-22 14:51:55.432952+02:00</td>\n",
 | 
			
		||||
       "      <td>2022-04-14 11:41:33.738500+02:00</td>\n",
 | 
			
		||||
       "      <td>NaN</td>\n",
 | 
			
		||||
       "      <td>False</td>\n",
 | 
			
		||||
       "      <td>...</td>\n",
 | 
			
		||||
       "      <td>1556</td>\n",
 | 
			
		||||
       "      <td>1189141</td>\n",
 | 
			
		||||
       "      <td>4380</td>\n",
 | 
			
		||||
       "      <td>2020-11-26 13:12:53+01:00</td>\n",
 | 
			
		||||
       "      <td>3</td>\n",
 | 
			
		||||
       "      <td>False</td>\n",
 | 
			
		||||
       "      <td>51.3</td>\n",
 | 
			
		||||
       "      <td>False</td>\n",
 | 
			
		||||
       "      <td>2020-12-01 20:00:00+01:00</td>\n",
 | 
			
		||||
       "      <td>iphigenie en tauride</td>\n",
 | 
			
		||||
       "    </tr>\n",
 | 
			
		||||
       "    <tr>\n",
 | 
			
		||||
       "      <th>...</th>\n",
 | 
			
		||||
       "      <td>...</td>\n",
 | 
			
		||||
       "      <td>...</td>\n",
 | 
			
		||||
       "      <td>...</td>\n",
 | 
			
		||||
       "      <td>...</td>\n",
 | 
			
		||||
       "      <td>...</td>\n",
 | 
			
		||||
       "      <td>...</td>\n",
 | 
			
		||||
       "      <td>...</td>\n",
 | 
			
		||||
       "      <td>...</td>\n",
 | 
			
		||||
       "      <td>...</td>\n",
 | 
			
		||||
       "      <td>...</td>\n",
 | 
			
		||||
       "      <td>...</td>\n",
 | 
			
		||||
       "      <td>...</td>\n",
 | 
			
		||||
       "      <td>...</td>\n",
 | 
			
		||||
       "      <td>...</td>\n",
 | 
			
		||||
       "      <td>...</td>\n",
 | 
			
		||||
       "      <td>...</td>\n",
 | 
			
		||||
       "      <td>...</td>\n",
 | 
			
		||||
       "      <td>...</td>\n",
 | 
			
		||||
       "      <td>...</td>\n",
 | 
			
		||||
       "      <td>...</td>\n",
 | 
			
		||||
       "      <td>...</td>\n",
 | 
			
		||||
       "    </tr>\n",
 | 
			
		||||
       "    <tr>\n",
 | 
			
		||||
       "      <th>318964</th>\n",
 | 
			
		||||
       "      <td>19095</td>\n",
 | 
			
		||||
       "      <td>lastname19095</td>\n",
 | 
			
		||||
       "      <td>firstname19095</td>\n",
 | 
			
		||||
       "      <td>1979-07-16</td>\n",
 | 
			
		||||
       "      <td>email19095</td>\n",
 | 
			
		||||
       "      <td>6</td>\n",
 | 
			
		||||
       "      <td>2021-04-22 15:06:30.120537+02:00</td>\n",
 | 
			
		||||
       "      <td>2023-09-12 18:27:36.904104+02:00</td>\n",
 | 
			
		||||
       "      <td>NaN</td>\n",
 | 
			
		||||
       "      <td>False</td>\n",
 | 
			
		||||
       "      <td>...</td>\n",
 | 
			
		||||
       "      <td>1556</td>\n",
 | 
			
		||||
       "      <td>1090839</td>\n",
 | 
			
		||||
       "      <td>19095</td>\n",
 | 
			
		||||
       "      <td>2019-05-19 21:18:36+02:00</td>\n",
 | 
			
		||||
       "      <td>1</td>\n",
 | 
			
		||||
       "      <td>False</td>\n",
 | 
			
		||||
       "      <td>4.5</td>\n",
 | 
			
		||||
       "      <td>False</td>\n",
 | 
			
		||||
       "      <td>2019-05-27 20:00:00+02:00</td>\n",
 | 
			
		||||
       "      <td>entre femmes</td>\n",
 | 
			
		||||
       "    </tr>\n",
 | 
			
		||||
       "    <tr>\n",
 | 
			
		||||
       "      <th>318965</th>\n",
 | 
			
		||||
       "      <td>19095</td>\n",
 | 
			
		||||
       "      <td>lastname19095</td>\n",
 | 
			
		||||
       "      <td>firstname19095</td>\n",
 | 
			
		||||
       "      <td>1979-07-16</td>\n",
 | 
			
		||||
       "      <td>email19095</td>\n",
 | 
			
		||||
       "      <td>6</td>\n",
 | 
			
		||||
       "      <td>2021-04-22 15:06:30.120537+02:00</td>\n",
 | 
			
		||||
       "      <td>2023-09-12 18:27:36.904104+02:00</td>\n",
 | 
			
		||||
       "      <td>NaN</td>\n",
 | 
			
		||||
       "      <td>False</td>\n",
 | 
			
		||||
       "      <td>...</td>\n",
 | 
			
		||||
       "      <td>1556</td>\n",
 | 
			
		||||
       "      <td>1090839</td>\n",
 | 
			
		||||
       "      <td>19095</td>\n",
 | 
			
		||||
       "      <td>2019-05-19 21:18:36+02:00</td>\n",
 | 
			
		||||
       "      <td>1</td>\n",
 | 
			
		||||
       "      <td>False</td>\n",
 | 
			
		||||
       "      <td>4.5</td>\n",
 | 
			
		||||
       "      <td>False</td>\n",
 | 
			
		||||
       "      <td>2019-05-27 20:00:00+02:00</td>\n",
 | 
			
		||||
       "      <td>entre femmes</td>\n",
 | 
			
		||||
       "    </tr>\n",
 | 
			
		||||
       "    <tr>\n",
 | 
			
		||||
       "      <th>318966</th>\n",
 | 
			
		||||
       "      <td>19095</td>\n",
 | 
			
		||||
       "      <td>lastname19095</td>\n",
 | 
			
		||||
       "      <td>firstname19095</td>\n",
 | 
			
		||||
       "      <td>1979-07-16</td>\n",
 | 
			
		||||
       "      <td>email19095</td>\n",
 | 
			
		||||
       "      <td>6</td>\n",
 | 
			
		||||
       "      <td>2021-04-22 15:06:30.120537+02:00</td>\n",
 | 
			
		||||
       "      <td>2023-09-12 18:27:36.904104+02:00</td>\n",
 | 
			
		||||
       "      <td>NaN</td>\n",
 | 
			
		||||
       "      <td>False</td>\n",
 | 
			
		||||
       "      <td>...</td>\n",
 | 
			
		||||
       "      <td>1556</td>\n",
 | 
			
		||||
       "      <td>1090839</td>\n",
 | 
			
		||||
       "      <td>19095</td>\n",
 | 
			
		||||
       "      <td>2019-05-19 21:18:36+02:00</td>\n",
 | 
			
		||||
       "      <td>1</td>\n",
 | 
			
		||||
       "      <td>False</td>\n",
 | 
			
		||||
       "      <td>4.5</td>\n",
 | 
			
		||||
       "      <td>False</td>\n",
 | 
			
		||||
       "      <td>2019-05-27 20:00:00+02:00</td>\n",
 | 
			
		||||
       "      <td>entre femmes</td>\n",
 | 
			
		||||
       "    </tr>\n",
 | 
			
		||||
       "    <tr>\n",
 | 
			
		||||
       "      <th>318967</th>\n",
 | 
			
		||||
       "      <td>19095</td>\n",
 | 
			
		||||
       "      <td>lastname19095</td>\n",
 | 
			
		||||
       "      <td>firstname19095</td>\n",
 | 
			
		||||
       "      <td>1979-07-16</td>\n",
 | 
			
		||||
       "      <td>email19095</td>\n",
 | 
			
		||||
       "      <td>6</td>\n",
 | 
			
		||||
       "      <td>2021-04-22 15:06:30.120537+02:00</td>\n",
 | 
			
		||||
       "      <td>2023-09-12 18:27:36.904104+02:00</td>\n",
 | 
			
		||||
       "      <td>NaN</td>\n",
 | 
			
		||||
       "      <td>False</td>\n",
 | 
			
		||||
       "      <td>...</td>\n",
 | 
			
		||||
       "      <td>1556</td>\n",
 | 
			
		||||
       "      <td>1244277</td>\n",
 | 
			
		||||
       "      <td>19095</td>\n",
 | 
			
		||||
       "      <td>2019-12-31 11:04:07+01:00</td>\n",
 | 
			
		||||
       "      <td>1</td>\n",
 | 
			
		||||
       "      <td>False</td>\n",
 | 
			
		||||
       "      <td>5.5</td>\n",
 | 
			
		||||
       "      <td>False</td>\n",
 | 
			
		||||
       "      <td>2020-02-03 20:00:00+01:00</td>\n",
 | 
			
		||||
       "      <td>a boire et a manger</td>\n",
 | 
			
		||||
       "    </tr>\n",
 | 
			
		||||
       "    <tr>\n",
 | 
			
		||||
       "      <th>318968</th>\n",
 | 
			
		||||
       "      <td>19095</td>\n",
 | 
			
		||||
       "      <td>lastname19095</td>\n",
 | 
			
		||||
       "      <td>firstname19095</td>\n",
 | 
			
		||||
       "      <td>1979-07-16</td>\n",
 | 
			
		||||
       "      <td>email19095</td>\n",
 | 
			
		||||
       "      <td>6</td>\n",
 | 
			
		||||
       "      <td>2021-04-22 15:06:30.120537+02:00</td>\n",
 | 
			
		||||
       "      <td>2023-09-12 18:27:36.904104+02:00</td>\n",
 | 
			
		||||
       "      <td>NaN</td>\n",
 | 
			
		||||
       "      <td>False</td>\n",
 | 
			
		||||
       "      <td>...</td>\n",
 | 
			
		||||
       "      <td>1556</td>\n",
 | 
			
		||||
       "      <td>1244277</td>\n",
 | 
			
		||||
       "      <td>19095</td>\n",
 | 
			
		||||
       "      <td>2019-12-31 11:04:07+01:00</td>\n",
 | 
			
		||||
       "      <td>1</td>\n",
 | 
			
		||||
       "      <td>False</td>\n",
 | 
			
		||||
       "      <td>5.5</td>\n",
 | 
			
		||||
       "      <td>False</td>\n",
 | 
			
		||||
       "      <td>2020-02-03 20:00:00+01:00</td>\n",
 | 
			
		||||
       "      <td>a boire et a manger</td>\n",
 | 
			
		||||
       "    </tr>\n",
 | 
			
		||||
       "  </tbody>\n",
 | 
			
		||||
       "</table>\n",
 | 
			
		||||
       "<p>318969 rows × 52 columns</p>\n",
 | 
			
		||||
       "</div>"
 | 
			
		||||
      ],
 | 
			
		||||
      "text/plain": [
 | 
			
		||||
       "            id        lastname       firstname   birthdate       email  \\\n",
 | 
			
		||||
       "0       405082  lastname405082             NaN         NaN         NaN   \n",
 | 
			
		||||
       "1       405082  lastname405082             NaN         NaN         NaN   \n",
 | 
			
		||||
       "2       411168  lastname411168             NaN         NaN         NaN   \n",
 | 
			
		||||
       "3       411168  lastname411168             NaN         NaN         NaN   \n",
 | 
			
		||||
       "4         4380    lastname4380   firstname4380         NaN         NaN   \n",
 | 
			
		||||
       "...        ...             ...             ...         ...         ...   \n",
 | 
			
		||||
       "318964   19095   lastname19095  firstname19095  1979-07-16  email19095   \n",
 | 
			
		||||
       "318965   19095   lastname19095  firstname19095  1979-07-16  email19095   \n",
 | 
			
		||||
       "318966   19095   lastname19095  firstname19095  1979-07-16  email19095   \n",
 | 
			
		||||
       "318967   19095   lastname19095  firstname19095  1979-07-16  email19095   \n",
 | 
			
		||||
       "318968   19095   lastname19095  firstname19095  1979-07-16  email19095   \n",
 | 
			
		||||
       "\n",
 | 
			
		||||
       "        street_id                        created_at  \\\n",
 | 
			
		||||
       "0               6  2023-01-12 06:30:31.197484+01:00   \n",
 | 
			
		||||
       "1               6  2023-01-12 06:30:31.197484+01:00   \n",
 | 
			
		||||
       "2               6  2023-03-17 06:30:35.431967+01:00   \n",
 | 
			
		||||
       "3               6  2023-03-17 06:30:35.431967+01:00   \n",
 | 
			
		||||
       "4               1  2021-04-22 14:51:55.432952+02:00   \n",
 | 
			
		||||
       "...           ...                               ...   \n",
 | 
			
		||||
       "318964          6  2021-04-22 15:06:30.120537+02:00   \n",
 | 
			
		||||
       "318965          6  2021-04-22 15:06:30.120537+02:00   \n",
 | 
			
		||||
       "318966          6  2021-04-22 15:06:30.120537+02:00   \n",
 | 
			
		||||
       "318967          6  2021-04-22 15:06:30.120537+02:00   \n",
 | 
			
		||||
       "318968          6  2021-04-22 15:06:30.120537+02:00   \n",
 | 
			
		||||
       "\n",
 | 
			
		||||
       "                              updated_at  civility  is_partner  ...  \\\n",
 | 
			
		||||
       "0       2023-01-12 06:30:31.197484+01:00       NaN       False  ...   \n",
 | 
			
		||||
       "1       2023-01-12 06:30:31.197484+01:00       NaN       False  ...   \n",
 | 
			
		||||
       "2       2023-03-17 06:30:35.431967+01:00       NaN       False  ...   \n",
 | 
			
		||||
       "3       2023-03-17 06:30:35.431967+01:00       NaN       False  ...   \n",
 | 
			
		||||
       "4       2022-04-14 11:41:33.738500+02:00       NaN       False  ...   \n",
 | 
			
		||||
       "...                                  ...       ...         ...  ...   \n",
 | 
			
		||||
       "318964  2023-09-12 18:27:36.904104+02:00       NaN       False  ...   \n",
 | 
			
		||||
       "318965  2023-09-12 18:27:36.904104+02:00       NaN       False  ...   \n",
 | 
			
		||||
       "318966  2023-09-12 18:27:36.904104+02:00       NaN       False  ...   \n",
 | 
			
		||||
       "318967  2023-09-12 18:27:36.904104+02:00       NaN       False  ...   \n",
 | 
			
		||||
       "318968  2023-09-12 18:27:36.904104+02:00       NaN       False  ...   \n",
 | 
			
		||||
       "\n",
 | 
			
		||||
       "        tenant_id     id_x  customer_id              purchase_date  type_of  \\\n",
 | 
			
		||||
       "0            1556   992423       405082  2023-01-11 17:08:41+01:00        3   \n",
 | 
			
		||||
       "1            1556   992423       405082  2023-01-11 17:08:41+01:00        3   \n",
 | 
			
		||||
       "2            1556  1053934       411168  2023-03-16 16:23:10+01:00        3   \n",
 | 
			
		||||
       "3            1556  1053934       411168  2023-03-16 16:23:10+01:00        3   \n",
 | 
			
		||||
       "4            1556  1189141         4380  2020-11-26 13:12:53+01:00        3   \n",
 | 
			
		||||
       "...           ...      ...          ...                        ...      ...   \n",
 | 
			
		||||
       "318964       1556  1090839        19095  2019-05-19 21:18:36+02:00        1   \n",
 | 
			
		||||
       "318965       1556  1090839        19095  2019-05-19 21:18:36+02:00        1   \n",
 | 
			
		||||
       "318966       1556  1090839        19095  2019-05-19 21:18:36+02:00        1   \n",
 | 
			
		||||
       "318967       1556  1244277        19095  2019-12-31 11:04:07+01:00        1   \n",
 | 
			
		||||
       "318968       1556  1244277        19095  2019-12-31 11:04:07+01:00        1   \n",
 | 
			
		||||
       "\n",
 | 
			
		||||
       "        is_from_subscription amount  is_full_price            start_date_time  \\\n",
 | 
			
		||||
       "0                      False   13.0          False  2023-02-06 20:00:00+01:00   \n",
 | 
			
		||||
       "1                      False   13.0          False  2023-02-06 20:00:00+01:00   \n",
 | 
			
		||||
       "2                      False   62.0          False  2023-03-19 16:00:00+01:00   \n",
 | 
			
		||||
       "3                      False   62.0          False  2023-03-19 16:00:00+01:00   \n",
 | 
			
		||||
       "4                      False   51.3          False  2020-12-01 20:00:00+01:00   \n",
 | 
			
		||||
       "...                      ...    ...            ...                        ...   \n",
 | 
			
		||||
       "318964                 False    4.5          False  2019-05-27 20:00:00+02:00   \n",
 | 
			
		||||
       "318965                 False    4.5          False  2019-05-27 20:00:00+02:00   \n",
 | 
			
		||||
       "318966                 False    4.5          False  2019-05-27 20:00:00+02:00   \n",
 | 
			
		||||
       "318967                 False    5.5          False  2020-02-03 20:00:00+01:00   \n",
 | 
			
		||||
       "318968                 False    5.5          False  2020-02-03 20:00:00+01:00   \n",
 | 
			
		||||
       "\n",
 | 
			
		||||
       "                  event_name  \n",
 | 
			
		||||
       "0                      zaide  \n",
 | 
			
		||||
       "1                      zaide  \n",
 | 
			
		||||
       "2               luisa miller  \n",
 | 
			
		||||
       "3               luisa miller  \n",
 | 
			
		||||
       "4       iphigenie en tauride  \n",
 | 
			
		||||
       "...                      ...  \n",
 | 
			
		||||
       "318964          entre femmes  \n",
 | 
			
		||||
       "318965          entre femmes  \n",
 | 
			
		||||
       "318966          entre femmes  \n",
 | 
			
		||||
       "318967   a boire et a manger  \n",
 | 
			
		||||
       "318968   a boire et a manger  \n",
 | 
			
		||||
       "\n",
 | 
			
		||||
       "[318969 rows x 52 columns]"
 | 
			
		||||
      ]
 | 
			
		||||
     },
 | 
			
		||||
     "execution_count": 12,
 | 
			
		||||
     "metadata": {},
 | 
			
		||||
     "output_type": "execute_result"
 | 
			
		||||
    }
 | 
			
		||||
   ],
 | 
			
		||||
   "source": [
 | 
			
		||||
    "# Jointure\n",
 | 
			
		||||
    "merge_1 = pd.merge(purchases, tickets, left_on='id', right_on='purchase_id', how='inner')[['id_x', 'customer_id','product_id', 'purchase_date', 'type_of', 'is_from_subscription']]\n",
 | 
			
		||||
    "merge_2 = pd.merge(products, merge_1, left_on='id', right_on='product_id', how='inner')[['id_x', 'customer_id', 'representation_id', 'purchase_date', 'type_of', 'is_from_subscription', 'amount', 'is_full_price']]\n",
 | 
			
		||||
    "merge_3 = pd.merge(representations, merge_2, left_on='id', right_on='representation_id', how='inner')[['id_x', 'customer_id', 'event_id', 'purchase_date', 'type_of', 'is_from_subscription', 'amount', 'is_full_price', 'start_date_time']]\n",
 | 
			
		||||
    "merge_4 = pd.merge(events, merge_3, left_on='id', right_on='event_id', how='inner')[['id_x', 'customer_id', 'purchase_date', 'type_of', 'is_from_subscription', 'amount', 'is_full_price', 'start_date_time', 'name']]\n",
 | 
			
		||||
    "merge_4 = merge_4.rename(columns={'name': 'event_name'})\n",
 | 
			
		||||
    "df_customer_event = pd.merge(customersplus, merge_4, left_on = 'id', right_on = 'customer_id', how = 'inner')[['id_x', 'purchase_date', 'type_of', 'is_from_subscription', 'amount', 'is_full_price', 'start_date_time', 'event_name']]\n",
 | 
			
		||||
    "df_customer_event"
 | 
			
		||||
   ]
 | 
			
		||||
  }
 | 
			
		||||
 ],
 | 
			
		||||
 "metadata": {
 | 
			
		||||
  "kernelspec": {
 | 
			
		||||
   "display_name": "Python 3 (ipykernel)",
 | 
			
		||||
   "language": "python",
 | 
			
		||||
   "name": "python3"
 | 
			
		||||
  },
 | 
			
		||||
  "language_info": {
 | 
			
		||||
   "codemirror_mode": {
 | 
			
		||||
    "name": "ipython",
 | 
			
		||||
    "version": 3
 | 
			
		||||
   },
 | 
			
		||||
   "file_extension": ".py",
 | 
			
		||||
   "mimetype": "text/x-python",
 | 
			
		||||
   "name": "python",
 | 
			
		||||
   "nbconvert_exporter": "python",
 | 
			
		||||
   "pygments_lexer": "ipython3",
 | 
			
		||||
   "version": "3.10.13"
 | 
			
		||||
  }
 | 
			
		||||
 },
 | 
			
		||||
 "nbformat": 4,
 | 
			
		||||
 "nbformat_minor": 5
 | 
			
		||||
}
 | 
			
		||||
							
								
								
									
										54
									
								
								README.md
									
									
									
									
									
								
							
							
						
						
									
										54
									
								
								README.md
									
									
									
									
									
								
							| 
						 | 
				
			
			@ -1,15 +1,7 @@
 | 
			
		|||
# Business data challenge 2023-2024 | ENSAE Paris
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# Arenametrix : customer segmentation
 | 
			
		||||
 | 
			
		||||
<p align="center">
 | 
			
		||||
    <img src="https://dev.arenametrix.fr/assets/logo_ax-806e8204f49bcc2c5e8cd34e9748d16a6038404e37fdb2dc9d61455bb06c6461.png" width=300>
 | 
			
		||||
</p>
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
## Team 1 
 | 
			
		||||
## Team 1 : 
 | 
			
		||||
 | 
			
		||||
* Antoine JOUBREL
 | 
			
		||||
* Alexis REVELLE
 | 
			
		||||
| 
						 | 
				
			
			@ -17,53 +9,25 @@
 | 
			
		|||
* Thomas PIQUÉ
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
## Coaches 
 | 
			
		||||
## Coaches : 
 | 
			
		||||
 | 
			
		||||
* Elia LAPENTA 
 | 
			
		||||
* Michael VISSER
 | 
			
		||||
 | 
			
		||||
## Support team
 | 
			
		||||
 | 
			
		||||
* Patrice MICHEL (Datastorm)
 | 
			
		||||
* Hassan MAISSORO (Datastorm)
 | 
			
		||||
* Alexandre PRINC (Arenametrix)
 | 
			
		||||
 | 
			
		||||
## Microeconomics coordinator
 | 
			
		||||
 | 
			
		||||
* Yuanzhe TANG
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
### Description of the problematic
 | 
			
		||||
The goal of this project is to create segments of customers from 15 companies belonging to 3 different types of activities (sports companies, museum, and music companies). 
 | 
			
		||||
 | 
			
		||||
### More detailled instructions provided by Arenamtrix 
 | 
			
		||||
- Definition of “marketing personae” that can be match with a probability to buy a future event
 | 
			
		||||
- Matching between future event and people in the database (with for instance a probability to buy a future event)
 | 
			
		||||
- And thus, a forecast of the quantity of ticket sold by event by “marketing personae” or by a segment of the database
 | 
			
		||||
- BONUS : What is the best timing to send a communication to each contact in the database and each “marketing personae”
 | 
			
		||||
- BONUS : What should we tell to each contact in the database and each “marketing personae”to make them come back
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
### Our approach
 | 
			
		||||
We opted for a sector-based approach, which means that 3 segmentations have been performed (one for each type of activity).
 | 
			
		||||
As the segments have to be linked to a probability of future purchase, we directly used the probability of purchase during the incoming year to make segments. The first step of the modelization is a pipeline that fits 3 ML models (naive bayes, random forest, and logistic regression) on the data to predict whether the customer will purchase during the year. We then use the probability of purchase estimated to split the customers into 4 segments. For each segment, we can estimate the potential number of tickets and revenue for the incoming year. 
 | 
			
		||||
 | 
			
		||||
### How run the code 
 | 
			
		||||
Codes have to be run in an order following their numbers. Each of them is described below : 
 | 
			
		||||
 | 
			
		||||
- `1_Input_cleaning.py` \
 | 
			
		||||
Clean raw data and generate dataframes that will be used to build datasets with insightful variables. Datasets are exported to location 0_Input/.
 | 
			
		||||
- `2_Datasets_generation.py` \
 | 
			
		||||
Use dataframes previously created and aggregate them to create test and train set for each company. Databases are exported to location 1_Temp/1_0_Modelling_Datasets/ in a folder containing all 5 databases for a type of activity.
 | 
			
		||||
- `3_Modelling_datasets.py` \
 | 
			
		||||
For each type of activity, the test and train sets of the 5 tenants are concatenated. Databases are exported to location 1_Temp/1_0_Modelling_Datasets/.
 | 
			
		||||
- `4_Descriptive_statistics.py` \
 | 
			
		||||
Generate graphics providing some descriptive statistics about the data at the activity level. All graphics are exported to location 2_Output/2_0_Descriptive_Statistics/.
 | 
			
		||||
- `5_Modelling.py` \
 | 
			
		||||
3 ML models will be fitted on the data, and results will be exported for all 3 types of activities. \
 | 
			
		||||
3 pipelines are built, one by type of model (Naive Bayes, Random Forest, Logistic Regression). For the 2 latter ML methods, cross validation was performed to ensure generalization. Graphics displaying the quality of the training are provided. Optimal parameters found are saved in a pickle file (which will be used in the 6th step to add propensity scores to the test set and then determine the segments of the customers). All these files are exported to location 2_Output/2_1_Modeling_results/
 | 
			
		||||
- `6_Segmentation_and_Marketing_Personae.py` \
 | 
			
		||||
The test set will be fitted with the optimal parameters computed previously, and a propensity score (probability of a future purchase) will be assigned to each customer of this dataset. Segmentation is performed according to the scores provided. Graphics describing the marketing personae associated to the segments as well as their business value are exported to location 2_Output/2_2_Segmentation_and_Marketing_Personae/. 
 | 
			
		||||
- `7_Sales_Forecast.py` \
 | 
			
		||||
To ensure a decent recall, and because of the unbalancing of the target variable y (the global probability of purchase is between 4 and 14 %), the probabilities of purchasing are overestimated.The scores will therefore be adjusted so that their mean approximates the overall probability of a purchase. This score adjusted is used to estimate, for each customer, the number of tickets sold and the revenue generated during the incoming year. Results are aggregated at segment level. A histogram displaying the adjusted propensity scores and 2 tables summarizing the forecast outcome are exported to location 2_Output/2_3_Sales_Forecast/. 
 | 
			
		||||
- run 0_1_Input_cleaning.py to clean the raw data and generate dataframes that will be used to build datasets with insightful variables.
 | 
			
		||||
- run 0_2_Dataset_construction.py. 
 | 
			
		||||
- run 0_3_General_modelization_dataset.py to generate test and train sets for the 3 types of activities.
 | 
			
		||||
- run the script 0_4_Generate_stat_desc.py to generate graphics describing the data
 | 
			
		||||
- run  0_5_Machine_Learning.py. 3 ML models will be fitted on the data, and results will be exported for all 3 types of activities
 | 
			
		||||
- run 0_6_Segmentation.py. The test set will be fitted with the optimal parameters computed previously. That will allow to compute a propensity score (probability of a future purchase). Segmentation is performed according to the scores provided. This scripts exports graphics describing the marketing personae associated to the segments as well as their business value.
 | 
			
		||||
- run 0_7_CA_segment.py. The scores will be adjusted to better fit the overall probability of a purchase. This score adjusted is used to estimate the number of tickets sold and the revenue generated during the incoming year.
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
							
								
								
									
										2075
									
								
								Spectacle/2_Modelization_spectacle.ipynb
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										2075
									
								
								Spectacle/2_Modelization_spectacle.ipynb
									
									
									
									
									
										Normal file
									
								
							
										
											
												File diff suppressed because one or more lines are too long
											
										
									
								
							
							
								
								
									
										2866
									
								
								Spectacle/2_bis_logit_baseline_statsmodels.ipynb
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										2866
									
								
								Spectacle/2_bis_logit_baseline_statsmodels.ipynb
									
									
									
									
									
										Normal file
									
								
							
										
											
												File diff suppressed because one or more lines are too long
											
										
									
								
							
							
								
								
									
										2176
									
								
								Spectacle/Exploration_spectacle.ipynb
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										2176
									
								
								Spectacle/Exploration_spectacle.ipynb
									
									
									
									
									
										Normal file
									
								
							
										
											
												File diff suppressed because it is too large
												Load Diff
											
										
									
								
							
							
								
								
									
										9083
									
								
								Spectacle/Stat_desc.ipynb
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										9083
									
								
								Spectacle/Stat_desc.ipynb
									
									
									
									
									
										Normal file
									
								
							
										
											
												File diff suppressed because one or more lines are too long
											
										
									
								
							
							
								
								
									
										1608
									
								
								Sport/Descriptive_statistics/stat_desc_sport.ipynb
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										1608
									
								
								Sport/Descriptive_statistics/stat_desc_sport.ipynb
									
									
									
									
									
										Normal file
									
								
							
										
											
												File diff suppressed because one or more lines are too long
											
										
									
								
							
							
								
								
									
										2821
									
								
								Sport/Modelization/2_Modelization_sport.ipynb
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										2821
									
								
								Sport/Modelization/2_Modelization_sport.ipynb
									
									
									
									
									
										Normal file
									
								
							
										
											
												File diff suppressed because one or more lines are too long
											
										
									
								
							
							
								
								
									
										8910
									
								
								Sport/Modelization/3_logit_cross_val_sport.ipynb
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										8910
									
								
								Sport/Modelization/3_logit_cross_val_sport.ipynb
									
									
									
									
									
										Normal file
									
								
							
										
											
												File diff suppressed because one or more lines are too long
											
										
									
								
							
							
								
								
									
										18751
									
								
								Sport/Modelization/3_model_cv_sport+CA.ipynb
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										18751
									
								
								Sport/Modelization/3_model_cv_sport+CA.ipynb
									
									
									
									
									
										Normal file
									
								
							
										
											
												File diff suppressed because one or more lines are too long
											
										
									
								
							
							
								
								
									
										3658
									
								
								Sport/Modelization/CA_segment_sport.ipynb
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										3658
									
								
								Sport/Modelization/CA_segment_sport.ipynb
									
									
									
									
									
										Normal file
									
								
							
										
											
												File diff suppressed because one or more lines are too long
											
										
									
								
							
							
								
								
									
										2524
									
								
								Sport/Modelization/segment_analysis_sport_0_6.ipynb
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										2524
									
								
								Sport/Modelization/segment_analysis_sport_0_6.ipynb
									
									
									
									
									
										Normal file
									
								
							
										
											
												File diff suppressed because one or more lines are too long
											
										
									
								
							
							
								
								
									
										2296
									
								
								Sport/exploration_sport.ipynb
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										2296
									
								
								Sport/exploration_sport.ipynb
									
									
									
									
									
										Normal file
									
								
							
										
											
												File diff suppressed because one or more lines are too long
											
										
									
								
							
							
								
								
									
										1833
									
								
								Traitement_Fanta.ipynb
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										1833
									
								
								Traitement_Fanta.ipynb
									
									
									
									
									
										Normal file
									
								
							
										
											
												File diff suppressed because it is too large
												Load Diff
											
										
									
								
							| 
						 | 
				
			
			@ -1,249 +0,0 @@
 | 
			
		|||
Package                   Version
 | 
			
		||||
------------------------- ---------------
 | 
			
		||||
aiohttp                   3.9.1
 | 
			
		||||
aiosignal                 1.3.1
 | 
			
		||||
alembic                   1.13.1
 | 
			
		||||
anyio                     4.2.0
 | 
			
		||||
archspec                  0.2.2
 | 
			
		||||
argon2-cffi               23.1.0
 | 
			
		||||
argon2-cffi-bindings      21.2.0
 | 
			
		||||
arrow                     1.3.0
 | 
			
		||||
astroid                   3.0.2
 | 
			
		||||
asttokens                 2.4.1
 | 
			
		||||
async-lru                 2.0.4
 | 
			
		||||
attrs                     23.2.0
 | 
			
		||||
Babel                     2.14.0
 | 
			
		||||
bcrypt                    4.1.2
 | 
			
		||||
beautifulsoup4            4.12.3
 | 
			
		||||
bleach                    6.1.0
 | 
			
		||||
blinker                   1.7.0
 | 
			
		||||
bokeh                     3.3.4
 | 
			
		||||
boltons                   23.1.1
 | 
			
		||||
boto3                     1.34.29
 | 
			
		||||
botocore                  1.34.29
 | 
			
		||||
branca                    0.7.0
 | 
			
		||||
Brotli                    1.1.0
 | 
			
		||||
cached-property           1.5.2
 | 
			
		||||
cachetools                5.3.2
 | 
			
		||||
certifi                   2023.11.17
 | 
			
		||||
cffi                      1.16.0
 | 
			
		||||
charset-normalizer        3.3.2
 | 
			
		||||
click                     8.1.7
 | 
			
		||||
click-plugins             1.1.1
 | 
			
		||||
cligj                     0.7.2
 | 
			
		||||
cloudpickle               3.0.0
 | 
			
		||||
colorama                  0.4.6
 | 
			
		||||
comm                      0.2.1
 | 
			
		||||
conda                     23.11.0
 | 
			
		||||
conda-libmamba-solver     23.12.0
 | 
			
		||||
conda-package-handling    2.2.0
 | 
			
		||||
conda_package_streaming   0.9.0
 | 
			
		||||
configparser              5.3.0
 | 
			
		||||
contourpy                 1.2.0
 | 
			
		||||
cryptography              41.0.7
 | 
			
		||||
cycler                    0.12.1
 | 
			
		||||
cytoolz                   0.12.2
 | 
			
		||||
dask                      2024.1.1
 | 
			
		||||
databricks-cli            0.18.0
 | 
			
		||||
debugpy                   1.8.0
 | 
			
		||||
decorator                 5.1.1
 | 
			
		||||
defusedxml                0.7.1
 | 
			
		||||
dill                      0.3.8
 | 
			
		||||
distributed               2024.1.1
 | 
			
		||||
distro                    1.8.0
 | 
			
		||||
docker                    7.0.0
 | 
			
		||||
duckdb                    0.9.2
 | 
			
		||||
entrypoints               0.4
 | 
			
		||||
exceptiongroup            1.2.0
 | 
			
		||||
executing                 2.0.1
 | 
			
		||||
fastjsonschema            2.19.1
 | 
			
		||||
fiona                     1.9.5
 | 
			
		||||
flake8                    7.0.0
 | 
			
		||||
Flask                     3.0.1
 | 
			
		||||
folium                    0.15.1
 | 
			
		||||
fonttools                 4.47.2
 | 
			
		||||
fqdn                      1.5.1
 | 
			
		||||
frozenlist                1.4.1
 | 
			
		||||
fsspec                    2023.12.2
 | 
			
		||||
GDAL                      3.8.3
 | 
			
		||||
gensim                    4.3.2
 | 
			
		||||
geopandas                 0.14.2
 | 
			
		||||
gitdb                     4.0.11
 | 
			
		||||
GitPython                 3.1.41
 | 
			
		||||
google-auth               2.27.0
 | 
			
		||||
greenlet                  3.0.3
 | 
			
		||||
gunicorn                  21.2.0
 | 
			
		||||
hvac                      2.1.0
 | 
			
		||||
idna                      3.6
 | 
			
		||||
importlib-metadata        7.0.1
 | 
			
		||||
importlib-resources       6.1.1
 | 
			
		||||
ipykernel                 6.29.0
 | 
			
		||||
ipython                   8.20.0
 | 
			
		||||
ipywidgets                8.1.1
 | 
			
		||||
isoduration               20.11.0
 | 
			
		||||
isort                     5.13.2
 | 
			
		||||
itsdangerous              2.1.2
 | 
			
		||||
jedi                      0.19.1
 | 
			
		||||
Jinja2                    3.1.3
 | 
			
		||||
jmespath                  1.0.1
 | 
			
		||||
joblib                    1.3.2
 | 
			
		||||
json5                     0.9.14
 | 
			
		||||
jsonpatch                 1.33
 | 
			
		||||
jsonpointer               2.4
 | 
			
		||||
jsonschema                4.21.1
 | 
			
		||||
jsonschema-specifications 2023.12.1
 | 
			
		||||
jupyter-cache             1.0.0
 | 
			
		||||
jupyter_client            8.6.0
 | 
			
		||||
jupyter_core              5.7.1
 | 
			
		||||
jupyter-events            0.9.0
 | 
			
		||||
jupyter-lsp               2.2.2
 | 
			
		||||
jupyter_server            2.12.5
 | 
			
		||||
jupyter-server-mathjax    0.2.6
 | 
			
		||||
jupyter_server_terminals  0.5.2
 | 
			
		||||
jupyterlab                4.0.11
 | 
			
		||||
jupyterlab_git            0.50.0
 | 
			
		||||
jupyterlab_pygments       0.3.0
 | 
			
		||||
jupyterlab_server         2.25.2
 | 
			
		||||
jupyterlab-widgets        3.0.9
 | 
			
		||||
kiwisolver                1.4.5
 | 
			
		||||
kubernetes                29.0.0
 | 
			
		||||
libmambapy                1.5.5
 | 
			
		||||
llvmlite                  0.41.1
 | 
			
		||||
locket                    1.0.0
 | 
			
		||||
lz4                       4.3.3
 | 
			
		||||
Mako                      1.3.1
 | 
			
		||||
mamba                     1.5.5
 | 
			
		||||
mapclassify               2.6.1
 | 
			
		||||
Markdown                  3.5.2
 | 
			
		||||
MarkupSafe                2.1.4
 | 
			
		||||
matplotlib                3.8.2
 | 
			
		||||
matplotlib-inline         0.1.6
 | 
			
		||||
mccabe                    0.7.0
 | 
			
		||||
menuinst                  2.0.2
 | 
			
		||||
mistune                   3.0.2
 | 
			
		||||
mlflow                    2.10.0
 | 
			
		||||
msgpack                   1.0.7
 | 
			
		||||
multidict                 6.0.4
 | 
			
		||||
munkres                   1.1.4
 | 
			
		||||
mypy                      1.8.0
 | 
			
		||||
mypy-extensions           1.0.0
 | 
			
		||||
nbclient                  0.8.0
 | 
			
		||||
nbconvert                 7.14.2
 | 
			
		||||
nbdime                    4.0.1
 | 
			
		||||
nbformat                  5.9.2
 | 
			
		||||
nest_asyncio              1.6.0
 | 
			
		||||
networkx                  3.2.1
 | 
			
		||||
nltk                      3.8.1
 | 
			
		||||
notebook_shim             0.2.3
 | 
			
		||||
numba                     0.58.1
 | 
			
		||||
numpy                     1.26.3
 | 
			
		||||
oauthlib                  3.2.2
 | 
			
		||||
opencv-python-headless    4.9.0.80
 | 
			
		||||
overrides                 7.7.0
 | 
			
		||||
packaging                 23.2
 | 
			
		||||
pandas                    2.2.0
 | 
			
		||||
pandocfilters             1.5.0
 | 
			
		||||
paramiko                  3.4.0
 | 
			
		||||
parso                     0.8.3
 | 
			
		||||
partd                     1.4.1
 | 
			
		||||
patsy                     0.5.6
 | 
			
		||||
pexpect                   4.9.0
 | 
			
		||||
pickleshare               0.7.5
 | 
			
		||||
pillow                    10.2.0
 | 
			
		||||
pip                       23.3.2
 | 
			
		||||
pkgutil_resolve_name      1.3.10
 | 
			
		||||
platformdirs              4.1.0
 | 
			
		||||
plotly                    5.18.0
 | 
			
		||||
pluggy                    1.3.0
 | 
			
		||||
polars                    0.20.6
 | 
			
		||||
prometheus-client         0.19.0
 | 
			
		||||
prometheus-flask-exporter 0.23.0
 | 
			
		||||
prompt-toolkit            3.0.42
 | 
			
		||||
protobuf                  4.24.4
 | 
			
		||||
psutil                    5.9.8
 | 
			
		||||
ptyprocess                0.7.0
 | 
			
		||||
pure-eval                 0.2.2
 | 
			
		||||
pyarrow                   14.0.2
 | 
			
		||||
pyarrow-hotfix            0.6
 | 
			
		||||
pyasn1                    0.5.1
 | 
			
		||||
pyasn1-modules            0.3.0
 | 
			
		||||
pycodestyle               2.11.1
 | 
			
		||||
pycosat                   0.6.6
 | 
			
		||||
pycparser                 2.21
 | 
			
		||||
pyflakes                  3.2.0
 | 
			
		||||
Pygments                  2.17.2
 | 
			
		||||
PyJWT                     2.8.0
 | 
			
		||||
pylint                    3.0.3
 | 
			
		||||
PyNaCl                    1.5.0
 | 
			
		||||
pyOpenSSL                 23.3.0
 | 
			
		||||
pyparsing                 3.1.1
 | 
			
		||||
pyproj                    3.6.1
 | 
			
		||||
PySocks                   1.7.1
 | 
			
		||||
python-dateutil           2.8.2
 | 
			
		||||
python-json-logger        2.0.7
 | 
			
		||||
pytz                      2023.3.post1
 | 
			
		||||
pyu2f                     0.1.5
 | 
			
		||||
PyYAML                    6.0.1
 | 
			
		||||
pyzmq                     25.1.2
 | 
			
		||||
querystring-parser        1.2.4
 | 
			
		||||
referencing               0.32.1
 | 
			
		||||
regex                     2023.12.25
 | 
			
		||||
requests                  2.31.0
 | 
			
		||||
requests-oauthlib         1.3.1
 | 
			
		||||
rfc3339-validator         0.1.4
 | 
			
		||||
rfc3986-validator         0.1.1
 | 
			
		||||
rpds-py                   0.17.1
 | 
			
		||||
rsa                       4.9
 | 
			
		||||
Rtree                     1.2.0
 | 
			
		||||
ruamel.yaml               0.18.5
 | 
			
		||||
ruamel.yaml.clib          0.2.7
 | 
			
		||||
s3fs                      0.4.2
 | 
			
		||||
s3transfer                0.10.0
 | 
			
		||||
scikit-learn              1.4.0
 | 
			
		||||
scipy                     1.12.0
 | 
			
		||||
seaborn                   0.13.2
 | 
			
		||||
Send2Trash                1.8.2
 | 
			
		||||
setuptools                68.2.2
 | 
			
		||||
shapely                   2.0.2
 | 
			
		||||
six                       1.16.0
 | 
			
		||||
smart-open                6.4.0
 | 
			
		||||
smmap                     5.0.0
 | 
			
		||||
sniffio                   1.3.0
 | 
			
		||||
sortedcontainers          2.4.0
 | 
			
		||||
soupsieve                 2.5
 | 
			
		||||
SQLAlchemy                2.0.25
 | 
			
		||||
sqlparse                  0.4.4
 | 
			
		||||
stack-data                0.6.2
 | 
			
		||||
statsmodels               0.14.1
 | 
			
		||||
tabulate                  0.9.0
 | 
			
		||||
tblib                     3.0.0
 | 
			
		||||
tenacity                  8.2.3
 | 
			
		||||
terminado                 0.18.0
 | 
			
		||||
threadpoolctl             3.2.0
 | 
			
		||||
tinycss2                  1.2.1
 | 
			
		||||
tomli                     2.0.1
 | 
			
		||||
tomlkit                   0.12.3
 | 
			
		||||
toolz                     0.12.1
 | 
			
		||||
tornado                   6.3.3
 | 
			
		||||
tqdm                      4.66.1
 | 
			
		||||
traitlets                 5.14.1
 | 
			
		||||
truststore                0.8.0
 | 
			
		||||
types-python-dateutil     2.8.19.20240106
 | 
			
		||||
typing_extensions         4.9.0
 | 
			
		||||
typing-utils              0.1.0
 | 
			
		||||
tzdata                    2023.4
 | 
			
		||||
uri-template              1.3.0
 | 
			
		||||
urllib3                   1.26.18
 | 
			
		||||
wcwidth                   0.2.13
 | 
			
		||||
webcolors                 1.13
 | 
			
		||||
webencodings              0.5.1
 | 
			
		||||
websocket-client          1.7.0
 | 
			
		||||
Werkzeug                  3.0.1
 | 
			
		||||
wheel                     0.42.0
 | 
			
		||||
widgetsnbextension        4.0.9
 | 
			
		||||
xgboost                   2.0.3
 | 
			
		||||
xyzservices               2023.10.1
 | 
			
		||||
yarl                      1.9.4
 | 
			
		||||
zict                      3.0.0
 | 
			
		||||
zipp                      3.17.0
 | 
			
		||||
zstandard                 0.22.0
 | 
			
		||||
							
								
								
									
										460
									
								
								code_base_train_test.ipynb
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										460
									
								
								code_base_train_test.ipynb
									
									
									
									
									
										Normal file
									
								
							| 
						 | 
				
			
			@ -0,0 +1,460 @@
 | 
			
		|||
{
 | 
			
		||||
 "cells": [
 | 
			
		||||
  {
 | 
			
		||||
   "cell_type": "code",
 | 
			
		||||
   "execution_count": 1,
 | 
			
		||||
   "id": "bf34b03c-536f-4f93-93a5-e452552653aa",
 | 
			
		||||
   "metadata": {},
 | 
			
		||||
   "outputs": [
 | 
			
		||||
    {
 | 
			
		||||
     "name": "stdin",
 | 
			
		||||
     "output_type": "stream",
 | 
			
		||||
     "text": [
 | 
			
		||||
      "Choisissez le type de compagnie : sport ? musique ? musee ? musique\n"
 | 
			
		||||
     ]
 | 
			
		||||
    },
 | 
			
		||||
    {
 | 
			
		||||
     "name": "stdout",
 | 
			
		||||
     "output_type": "stream",
 | 
			
		||||
     "text": [
 | 
			
		||||
      "File path :  projet-bdc2324-team1/0_Input/Company_10/products_purchased_reduced.csv\n",
 | 
			
		||||
      "Couverture Company 10 : 2016-03-07 - 2023-09-25\n",
 | 
			
		||||
      "File path :  projet-bdc2324-team1/0_Input/Company_11/products_purchased_reduced.csv\n",
 | 
			
		||||
      "Couverture Company 11 : 2015-06-26 - 2023-11-08\n",
 | 
			
		||||
      "File path :  projet-bdc2324-team1/0_Input/Company_12/products_purchased_reduced.csv\n",
 | 
			
		||||
      "Couverture Company 12 : 2016-06-14 - 2023-11-08\n",
 | 
			
		||||
      "File path :  projet-bdc2324-team1/0_Input/Company_13/products_purchased_reduced.csv\n",
 | 
			
		||||
      "Couverture Company 13 : 2010-07-31 - 2023-11-08\n",
 | 
			
		||||
      "File path :  projet-bdc2324-team1/0_Input/Company_14/products_purchased_reduced.csv\n",
 | 
			
		||||
      "Couverture Company 14 : 1901-01-01 - 2023-11-08\n",
 | 
			
		||||
      "File path :  projet-bdc2324-team1/0_Input/Company_10/customerplus_cleaned.csv\n",
 | 
			
		||||
      "File path :  projet-bdc2324-team1/0_Input/Company_10/campaigns_information.csv\n",
 | 
			
		||||
      "File path :  projet-bdc2324-team1/0_Input/Company_10/products_purchased_reduced.csv\n",
 | 
			
		||||
      "Data filtering : SUCCESS\n",
 | 
			
		||||
      "KPIs construction : SUCCESS\n",
 | 
			
		||||
      "Explanatory variable construction : SUCCESS\n",
 | 
			
		||||
      "Explained variable construction : SUCCESS\n",
 | 
			
		||||
      "Exportation dataset test : SUCCESS\n",
 | 
			
		||||
      "File path :  projet-bdc2324-team1/0_Input/Company_10/customerplus_cleaned.csv\n",
 | 
			
		||||
      "File path :  projet-bdc2324-team1/0_Input/Company_10/campaigns_information.csv\n",
 | 
			
		||||
      "File path :  projet-bdc2324-team1/0_Input/Company_10/products_purchased_reduced.csv\n",
 | 
			
		||||
      "Data filtering : SUCCESS\n",
 | 
			
		||||
      "KPIs construction : SUCCESS\n",
 | 
			
		||||
      "Explanatory variable construction : SUCCESS\n",
 | 
			
		||||
      "Explained variable construction : SUCCESS\n",
 | 
			
		||||
      "Exportation dataset train : SUCCESS\n",
 | 
			
		||||
      "File path :  projet-bdc2324-team1/0_Input/Company_11/customerplus_cleaned.csv\n",
 | 
			
		||||
      "File path :  projet-bdc2324-team1/0_Input/Company_11/campaigns_information.csv\n",
 | 
			
		||||
      "File path :  projet-bdc2324-team1/0_Input/Company_11/products_purchased_reduced.csv\n",
 | 
			
		||||
      "Data filtering : SUCCESS\n",
 | 
			
		||||
      "KPIs construction : SUCCESS\n",
 | 
			
		||||
      "Explanatory variable construction : SUCCESS\n",
 | 
			
		||||
      "Explained variable construction : SUCCESS\n",
 | 
			
		||||
      "Exportation dataset test : SUCCESS\n",
 | 
			
		||||
      "File path :  projet-bdc2324-team1/0_Input/Company_11/customerplus_cleaned.csv\n",
 | 
			
		||||
      "File path :  projet-bdc2324-team1/0_Input/Company_11/campaigns_information.csv\n",
 | 
			
		||||
      "File path :  projet-bdc2324-team1/0_Input/Company_11/products_purchased_reduced.csv\n",
 | 
			
		||||
      "Data filtering : SUCCESS\n",
 | 
			
		||||
      "KPIs construction : SUCCESS\n",
 | 
			
		||||
      "Explanatory variable construction : SUCCESS\n",
 | 
			
		||||
      "Explained variable construction : SUCCESS\n",
 | 
			
		||||
      "Exportation dataset train : SUCCESS\n",
 | 
			
		||||
      "File path :  projet-bdc2324-team1/0_Input/Company_12/customerplus_cleaned.csv\n",
 | 
			
		||||
      "File path :  projet-bdc2324-team1/0_Input/Company_12/campaigns_information.csv\n",
 | 
			
		||||
      "File path :  projet-bdc2324-team1/0_Input/Company_12/products_purchased_reduced.csv\n",
 | 
			
		||||
      "Data filtering : SUCCESS\n",
 | 
			
		||||
      "KPIs construction : SUCCESS\n",
 | 
			
		||||
      "Explanatory variable construction : SUCCESS\n",
 | 
			
		||||
      "Explained variable construction : SUCCESS\n",
 | 
			
		||||
      "Exportation dataset test : SUCCESS\n",
 | 
			
		||||
      "File path :  projet-bdc2324-team1/0_Input/Company_12/customerplus_cleaned.csv\n",
 | 
			
		||||
      "File path :  projet-bdc2324-team1/0_Input/Company_12/campaigns_information.csv\n",
 | 
			
		||||
      "File path :  projet-bdc2324-team1/0_Input/Company_12/products_purchased_reduced.csv\n",
 | 
			
		||||
      "Data filtering : SUCCESS\n",
 | 
			
		||||
      "KPIs construction : SUCCESS\n",
 | 
			
		||||
      "Explanatory variable construction : SUCCESS\n",
 | 
			
		||||
      "Explained variable construction : SUCCESS\n",
 | 
			
		||||
      "Exportation dataset train : SUCCESS\n",
 | 
			
		||||
      "File path :  projet-bdc2324-team1/0_Input/Company_13/customerplus_cleaned.csv\n",
 | 
			
		||||
      "File path :  projet-bdc2324-team1/0_Input/Company_13/campaigns_information.csv\n",
 | 
			
		||||
      "File path :  projet-bdc2324-team1/0_Input/Company_13/products_purchased_reduced.csv\n",
 | 
			
		||||
      "Data filtering : SUCCESS\n",
 | 
			
		||||
      "KPIs construction : SUCCESS\n",
 | 
			
		||||
      "Explanatory variable construction : SUCCESS\n",
 | 
			
		||||
      "Explained variable construction : SUCCESS\n",
 | 
			
		||||
      "Exportation dataset test : SUCCESS\n",
 | 
			
		||||
      "File path :  projet-bdc2324-team1/0_Input/Company_13/customerplus_cleaned.csv\n",
 | 
			
		||||
      "File path :  projet-bdc2324-team1/0_Input/Company_13/campaigns_information.csv\n",
 | 
			
		||||
      "File path :  projet-bdc2324-team1/0_Input/Company_13/products_purchased_reduced.csv\n",
 | 
			
		||||
      "Data filtering : SUCCESS\n",
 | 
			
		||||
      "KPIs construction : SUCCESS\n",
 | 
			
		||||
      "Explanatory variable construction : SUCCESS\n",
 | 
			
		||||
      "Explained variable construction : SUCCESS\n",
 | 
			
		||||
      "Exportation dataset train : SUCCESS\n",
 | 
			
		||||
      "File path :  projet-bdc2324-team1/0_Input/Company_14/customerplus_cleaned.csv\n",
 | 
			
		||||
      "File path :  projet-bdc2324-team1/0_Input/Company_14/campaigns_information.csv\n",
 | 
			
		||||
      "File path :  projet-bdc2324-team1/0_Input/Company_14/products_purchased_reduced.csv\n",
 | 
			
		||||
      "Data filtering : SUCCESS\n",
 | 
			
		||||
      "KPIs construction : SUCCESS\n",
 | 
			
		||||
      "Explanatory variable construction : SUCCESS\n",
 | 
			
		||||
      "Explained variable construction : SUCCESS\n",
 | 
			
		||||
      "Exportation dataset test : SUCCESS\n",
 | 
			
		||||
      "File path :  projet-bdc2324-team1/0_Input/Company_14/customerplus_cleaned.csv\n",
 | 
			
		||||
      "File path :  projet-bdc2324-team1/0_Input/Company_14/campaigns_information.csv\n",
 | 
			
		||||
      "File path :  projet-bdc2324-team1/0_Input/Company_14/products_purchased_reduced.csv\n",
 | 
			
		||||
      "Data filtering : SUCCESS\n",
 | 
			
		||||
      "KPIs construction : SUCCESS\n",
 | 
			
		||||
      "Explanatory variable construction : SUCCESS\n",
 | 
			
		||||
      "Explained variable construction : SUCCESS\n",
 | 
			
		||||
      "Exportation dataset train : SUCCESS\n",
 | 
			
		||||
      "FIN DE LA GENERATION DES DATASETS : SUCCESS\n"
 | 
			
		||||
     ]
 | 
			
		||||
    }
 | 
			
		||||
   ],
 | 
			
		||||
   "source": [
 | 
			
		||||
    "# Business Data Challenge - Team 1\n",
 | 
			
		||||
    "\n",
 | 
			
		||||
    "import pandas as pd\n",
 | 
			
		||||
    "import numpy as np\n",
 | 
			
		||||
    "import os\n",
 | 
			
		||||
    "import s3fs\n",
 | 
			
		||||
    "import re\n",
 | 
			
		||||
    "import warnings\n",
 | 
			
		||||
    "from datetime import date, timedelta, datetime\n",
 | 
			
		||||
    "\n",
 | 
			
		||||
    "# Create filesystem object\n",
 | 
			
		||||
    "S3_ENDPOINT_URL = \"https://\" + os.environ[\"AWS_S3_ENDPOINT\"]\n",
 | 
			
		||||
    "fs = s3fs.S3FileSystem(client_kwargs={'endpoint_url': S3_ENDPOINT_URL})\n",
 | 
			
		||||
    "\n",
 | 
			
		||||
    "\n",
 | 
			
		||||
    "# Import KPI construction functions\n",
 | 
			
		||||
    "exec(open('0_KPI_functions.py').read())\n",
 | 
			
		||||
    "\n",
 | 
			
		||||
    "# Ignore warning\n",
 | 
			
		||||
    "warnings.filterwarnings('ignore')\n",
 | 
			
		||||
    "\n",
 | 
			
		||||
    "\n",
 | 
			
		||||
    "def display_covering_time(df, company, datecover):\n",
 | 
			
		||||
    "    \"\"\"\n",
 | 
			
		||||
    "    This function draws the time coverage of each company\n",
 | 
			
		||||
    "    \"\"\"\n",
 | 
			
		||||
    "    min_date = df['purchase_date'].min().strftime(\"%Y-%m-%d\")\n",
 | 
			
		||||
    "    max_date = df['purchase_date'].max().strftime(\"%Y-%m-%d\")\n",
 | 
			
		||||
    "    datecover[company] = [datetime.strptime(min_date, \"%Y-%m-%d\") + timedelta(days=x) for x in range((datetime.strptime(max_date, \"%Y-%m-%d\") - datetime.strptime(min_date, \"%Y-%m-%d\")).days)]\n",
 | 
			
		||||
    "    print(f'Couverture Company {company} : {min_date} - {max_date}')\n",
 | 
			
		||||
    "    return datecover\n",
 | 
			
		||||
    "\n",
 | 
			
		||||
    "\n",
 | 
			
		||||
    "def compute_time_intersection(datecover):\n",
 | 
			
		||||
    "    \"\"\"\n",
 | 
			
		||||
    "    This function returns the time coverage for all companies\n",
 | 
			
		||||
    "    \"\"\"\n",
 | 
			
		||||
    "    timestamps_sets = [set(timestamps) for timestamps in datecover.values()]\n",
 | 
			
		||||
    "    intersection = set.intersection(*timestamps_sets)\n",
 | 
			
		||||
    "    intersection_list = list(intersection)\n",
 | 
			
		||||
    "    formated_dates = [dt.strftime(\"%Y-%m-%d\") for dt in intersection_list]\n",
 | 
			
		||||
    "    return sorted(formated_dates)\n",
 | 
			
		||||
    "\n",
 | 
			
		||||
    "\n",
 | 
			
		||||
    "def df_coverage_modelization(sport, coverage_train = 0.7):\n",
 | 
			
		||||
    "    \"\"\"\n",
 | 
			
		||||
    "    This function returns start_date, end_of_features and final dates\n",
 | 
			
		||||
    "    that help to construct train and test datasets\n",
 | 
			
		||||
    "    \"\"\"\n",
 | 
			
		||||
    "    datecover = {}\n",
 | 
			
		||||
    "    for company in sport:\n",
 | 
			
		||||
    "        df_products_purchased_reduced = display_databases(company, file_name = \"products_purchased_reduced\",\n",
 | 
			
		||||
    "                                                          datetime_col = ['purchase_date'])\n",
 | 
			
		||||
    "        datecover = display_covering_time(df_products_purchased_reduced, company, datecover)\n",
 | 
			
		||||
    "    #print(datecover.keys())\n",
 | 
			
		||||
    "    dt_coverage = compute_time_intersection(datecover)\n",
 | 
			
		||||
    "    start_date = dt_coverage[0]\n",
 | 
			
		||||
    "    end_of_features = dt_coverage[int(0.7 * len(dt_coverage))]\n",
 | 
			
		||||
    "    final_date = dt_coverage[-1]\n",
 | 
			
		||||
    "    return start_date, end_of_features, final_date\n",
 | 
			
		||||
    "    \n",
 | 
			
		||||
    "\n",
 | 
			
		||||
    "def dataset_construction(min_date, end_features_date, max_date, directory_path):\n",
 | 
			
		||||
    "    \n",
 | 
			
		||||
    "    # Import customerplus\n",
 | 
			
		||||
    "    df_customerplus_clean_0 = display_databases(directory_path, file_name = \"customerplus_cleaned\")\n",
 | 
			
		||||
    "    df_campaigns_information = display_databases(directory_path, file_name = \"campaigns_information\", datetime_col = ['opened_at', 'sent_at', 'campaign_sent_at'])\n",
 | 
			
		||||
    "    df_products_purchased_reduced = display_databases(directory_path, file_name = \"products_purchased_reduced\", datetime_col = ['purchase_date'])\n",
 | 
			
		||||
    "    \n",
 | 
			
		||||
    "    # Filtre de cohérence pour la mise en pratique de notre méthode\n",
 | 
			
		||||
    "    max_date =  pd.to_datetime(max_date, utc = True, format = 'ISO8601') \n",
 | 
			
		||||
    "    end_features_date = pd.to_datetime(end_features_date, utc = True, format = 'ISO8601')\n",
 | 
			
		||||
    "    min_date = pd.to_datetime(min_date, utc = True, format = 'ISO8601')\n",
 | 
			
		||||
    "\n",
 | 
			
		||||
    "    #Filtre de la base df_campaigns_information\n",
 | 
			
		||||
    "    df_campaigns_information = df_campaigns_information[(df_campaigns_information['sent_at'] <= end_features_date) & (df_campaigns_information['sent_at'] >= min_date)]\n",
 | 
			
		||||
    "    df_campaigns_information['opened_at'][df_campaigns_information['opened_at'] >= end_features_date] = np.datetime64('NaT')\n",
 | 
			
		||||
    "    \n",
 | 
			
		||||
    "    #Filtre de la base df_products_purchased_reduced\n",
 | 
			
		||||
    "    df_products_purchased_reduced = df_products_purchased_reduced[(df_products_purchased_reduced['purchase_date'] <= end_features_date) & (df_products_purchased_reduced['purchase_date'] >= min_date)]\n",
 | 
			
		||||
    "\n",
 | 
			
		||||
    "    print(\"Data filtering : SUCCESS\")\n",
 | 
			
		||||
    "    \n",
 | 
			
		||||
    "    # Fusion de l'ensemble et creation des KPI\n",
 | 
			
		||||
    "\n",
 | 
			
		||||
    "    # KPI sur les campagnes publicitaires\n",
 | 
			
		||||
    "    df_campaigns_kpi = campaigns_kpi_function(campaigns_information = df_campaigns_information) \n",
 | 
			
		||||
    "\n",
 | 
			
		||||
    "    # KPI sur le comportement d'achat\n",
 | 
			
		||||
    "    df_tickets_kpi = tickets_kpi_function(tickets_information = df_products_purchased_reduced)\n",
 | 
			
		||||
    "\n",
 | 
			
		||||
    "    # KPI sur les données socio-démographiques\n",
 | 
			
		||||
    "    df_customerplus_clean = customerplus_kpi_function(customerplus_clean = df_customerplus_clean_0)\n",
 | 
			
		||||
    "    \n",
 | 
			
		||||
    "    print(\"KPIs construction : SUCCESS\")\n",
 | 
			
		||||
    "    \n",
 | 
			
		||||
    "    # Fusion avec KPI liés au customer\n",
 | 
			
		||||
    "    df_customer = pd.merge(df_customerplus_clean, df_campaigns_kpi, on = 'customer_id', how = 'left')\n",
 | 
			
		||||
    "    \n",
 | 
			
		||||
    "    # Fill NaN values\n",
 | 
			
		||||
    "    df_customer[['nb_campaigns', 'nb_campaigns_opened']] = df_customer[['nb_campaigns', 'nb_campaigns_opened']].fillna(0)\n",
 | 
			
		||||
    "    \n",
 | 
			
		||||
    "    # Fusion avec KPI liés au comportement d'achat\n",
 | 
			
		||||
    "    df_customer_product = pd.merge(df_tickets_kpi, df_customer, on = 'customer_id', how = 'outer')\n",
 | 
			
		||||
    "    \n",
 | 
			
		||||
    "    # Fill NaN values\n",
 | 
			
		||||
    "    df_customer_product[['nb_tickets', 'nb_purchases', 'total_amount', 'nb_suppliers', 'vente_internet_max', 'nb_tickets_internet']] = df_customer_product[['nb_tickets', 'nb_purchases', 'total_amount', 'nb_suppliers', 'vente_internet_max', 'nb_tickets_internet']].fillna(0)\n",
 | 
			
		||||
    "\n",
 | 
			
		||||
    "    print(\"Explanatory variable construction : SUCCESS\")\n",
 | 
			
		||||
    "\n",
 | 
			
		||||
    "    # 2. Construction of the explained variable \n",
 | 
			
		||||
    "    df_products_purchased_to_predict = df_products_purchased_reduced[(df_products_purchased_reduced['purchase_date'] <= max_date) & (df_products_purchased_reduced['purchase_date'] > end_features_date)]\n",
 | 
			
		||||
    "\n",
 | 
			
		||||
    "    # Indicatrice d'achat\n",
 | 
			
		||||
    "    df_products_purchased_to_predict['y_has_purchased'] = 1\n",
 | 
			
		||||
    "\n",
 | 
			
		||||
    "    y = df_products_purchased_to_predict[['customer_id', 'y_has_purchased']].drop_duplicates()\n",
 | 
			
		||||
    "\n",
 | 
			
		||||
    "    print(\"Explained variable construction : SUCCESS\")\n",
 | 
			
		||||
    "    \n",
 | 
			
		||||
    "    # 3. Merge between explained and explanatory variables\n",
 | 
			
		||||
    "    dataset = pd.merge(df_customer_product, y, on = ['customer_id'], how = 'left')\n",
 | 
			
		||||
    "\n",
 | 
			
		||||
    "    # 0 if there is no purchase\n",
 | 
			
		||||
    "    dataset[['y_has_purchased']].fillna(0)\n",
 | 
			
		||||
    "\n",
 | 
			
		||||
    "    # add id_company prefix to customer_id\n",
 | 
			
		||||
    "    dataset['customer_id'] = directory_path + '_' + dataset['customer_id'].astype('str')\n",
 | 
			
		||||
    "    \n",
 | 
			
		||||
    "    return dataset\n",
 | 
			
		||||
    "\n",
 | 
			
		||||
    "## Exportation\n",
 | 
			
		||||
    "\n",
 | 
			
		||||
    "companies = {'musee' : ['1', '2', '3', '4', '101'],\n",
 | 
			
		||||
    "            'sport': ['5', '6', '7', '8', '9'],\n",
 | 
			
		||||
    "            'musique' : ['10', '11', '12', '13', '14']}\n",
 | 
			
		||||
    "\n",
 | 
			
		||||
    "type_of_comp = input('Choisissez le type de compagnie : sport ? musique ? musee ?')\n",
 | 
			
		||||
    "list_of_comp = companies[type_of_comp] \n",
 | 
			
		||||
    "# Dossier d'exportation\n",
 | 
			
		||||
    "BUCKET_OUT = f'projet-bdc2324-team1/Generalization/{type_of_comp}'\n",
 | 
			
		||||
    "\n",
 | 
			
		||||
    "# Create test dataset and train dataset for sport companies\n",
 | 
			
		||||
    "\n",
 | 
			
		||||
    "start_date, end_of_features, final_date = df_coverage_modelization(list_of_comp, coverage_train = 0.7)\n",
 | 
			
		||||
    "\n",
 | 
			
		||||
    "for company in list_of_comp:\n",
 | 
			
		||||
    "    dataset_test = dataset_construction(min_date = start_date, end_features_date = end_of_features,\n",
 | 
			
		||||
    "                                        max_date = final_date, directory_path = company)    \n",
 | 
			
		||||
    "\n",
 | 
			
		||||
    "    # Exportation\n",
 | 
			
		||||
    "    FILE_KEY_OUT_S3 = \"dataset_test\" + company +  \".csv\"\n",
 | 
			
		||||
    "    FILE_PATH_OUT_S3 = BUCKET_OUT + \"/Test_set/\" + FILE_KEY_OUT_S3\n",
 | 
			
		||||
    "    \n",
 | 
			
		||||
    "    with fs.open(FILE_PATH_OUT_S3, 'w') as file_out:\n",
 | 
			
		||||
    "        dataset_test.to_csv(file_out, index = False)\n",
 | 
			
		||||
    "    \n",
 | 
			
		||||
    "    print(\"Exportation dataset test : SUCCESS\")\n",
 | 
			
		||||
    "\n",
 | 
			
		||||
    "# Dataset train\n",
 | 
			
		||||
    "    dataset_train = dataset_construction(min_date = start_date, end_features_date = end_of_features,\n",
 | 
			
		||||
    "                                        max_date = final_date, directory_path = company)\n",
 | 
			
		||||
    "    # Export\n",
 | 
			
		||||
    "    FILE_KEY_OUT_S3 = \"dataset_train\" + company + \".csv\" \n",
 | 
			
		||||
    "    FILE_PATH_OUT_S3 = BUCKET_OUT + \"/Train_test/\" + FILE_KEY_OUT_S3\n",
 | 
			
		||||
    "    \n",
 | 
			
		||||
    "    with fs.open(FILE_PATH_OUT_S3, 'w') as file_out:\n",
 | 
			
		||||
    "        dataset_train.to_csv(file_out, index = False)\n",
 | 
			
		||||
    "        \n",
 | 
			
		||||
    "    print(\"Exportation dataset train : SUCCESS\")\n",
 | 
			
		||||
    "\n",
 | 
			
		||||
    "\n",
 | 
			
		||||
    "print(\"FIN DE LA GENERATION DES DATASETS : SUCCESS\")\n"
 | 
			
		||||
   ]
 | 
			
		||||
  },
 | 
			
		||||
  {
 | 
			
		||||
   "cell_type": "code",
 | 
			
		||||
   "execution_count": 2,
 | 
			
		||||
   "id": "3721427e-5957-4556-b278-2e7ffca892f4",
 | 
			
		||||
   "metadata": {},
 | 
			
		||||
   "outputs": [
 | 
			
		||||
    {
 | 
			
		||||
     "data": {
 | 
			
		||||
      "text/plain": [
 | 
			
		||||
       "'projet-bdc2324-team1/Generalization/musique/Train_test/dataset_train14.csv'"
 | 
			
		||||
      ]
 | 
			
		||||
     },
 | 
			
		||||
     "execution_count": 2,
 | 
			
		||||
     "metadata": {},
 | 
			
		||||
     "output_type": "execute_result"
 | 
			
		||||
    }
 | 
			
		||||
   ],
 | 
			
		||||
   "source": [
 | 
			
		||||
    "FILE_PATH_OUT_S3"
 | 
			
		||||
   ]
 | 
			
		||||
  },
 | 
			
		||||
  {
 | 
			
		||||
   "cell_type": "code",
 | 
			
		||||
   "execution_count": 10,
 | 
			
		||||
   "id": "f8546992-f425-4d1e-ad75-ad26a8052a18",
 | 
			
		||||
   "metadata": {},
 | 
			
		||||
   "outputs": [
 | 
			
		||||
    {
 | 
			
		||||
     "ename": "NameError",
 | 
			
		||||
     "evalue": "name 'projet' is not defined",
 | 
			
		||||
     "output_type": "error",
 | 
			
		||||
     "traceback": [
 | 
			
		||||
      "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
 | 
			
		||||
      "\u001b[0;31mNameError\u001b[0m                                 Traceback (most recent call last)",
 | 
			
		||||
      "Cell \u001b[0;32mIn[10], line 1\u001b[0m\n\u001b[0;32m----> 1\u001b[0m \u001b[43mprojet\u001b[49m\u001b[38;5;241m-\u001b[39mbdc2324\u001b[38;5;241m-\u001b[39mteam1\u001b[38;5;241m/\u001b[39mGeneralization\u001b[38;5;241m/\u001b[39mmusique\u001b[38;5;241m/\u001b[39mTrain_test\n",
 | 
			
		||||
      "\u001b[0;31mNameError\u001b[0m: name 'projet' is not defined"
 | 
			
		||||
     ]
 | 
			
		||||
    }
 | 
			
		||||
   ],
 | 
			
		||||
   "source": [
 | 
			
		||||
    "projet-bdc2324-team1/Generalization/musique/Train_test"
 | 
			
		||||
   ]
 | 
			
		||||
  },
 | 
			
		||||
  {
 | 
			
		||||
   "cell_type": "code",
 | 
			
		||||
   "execution_count": 12,
 | 
			
		||||
   "id": "0dd34710-6da2-4438-9e1d-0ac092c1d28c",
 | 
			
		||||
   "metadata": {},
 | 
			
		||||
   "outputs": [
 | 
			
		||||
    {
 | 
			
		||||
     "data": {
 | 
			
		||||
      "text/plain": [
 | 
			
		||||
       "(343126, 41)"
 | 
			
		||||
      ]
 | 
			
		||||
     },
 | 
			
		||||
     "execution_count": 12,
 | 
			
		||||
     "metadata": {},
 | 
			
		||||
     "output_type": "execute_result"
 | 
			
		||||
    }
 | 
			
		||||
   ],
 | 
			
		||||
   "source": [
 | 
			
		||||
    "dataset_train.shape"
 | 
			
		||||
   ]
 | 
			
		||||
  },
 | 
			
		||||
  {
 | 
			
		||||
   "cell_type": "code",
 | 
			
		||||
   "execution_count": 9,
 | 
			
		||||
   "id": "a3bfeeb6-2db0-4f1d-866c-8721343e97c5",
 | 
			
		||||
   "metadata": {},
 | 
			
		||||
   "outputs": [
 | 
			
		||||
    {
 | 
			
		||||
     "data": {
 | 
			
		||||
      "text/plain": [
 | 
			
		||||
       "customer_id               0.000000\n",
 | 
			
		||||
       "nb_tickets                0.000000\n",
 | 
			
		||||
       "nb_purchases              0.000000\n",
 | 
			
		||||
       "total_amount              0.000000\n",
 | 
			
		||||
       "nb_suppliers              0.000000\n",
 | 
			
		||||
       "vente_internet_max        0.000000\n",
 | 
			
		||||
       "purchase_date_min         0.858950\n",
 | 
			
		||||
       "purchase_date_max         0.858950\n",
 | 
			
		||||
       "time_between_purchase     0.858950\n",
 | 
			
		||||
       "nb_tickets_internet       0.000000\n",
 | 
			
		||||
       "street_id                 0.000000\n",
 | 
			
		||||
       "structure_id              0.869838\n",
 | 
			
		||||
       "mcp_contact_id            0.276677\n",
 | 
			
		||||
       "fidelity                  0.000000\n",
 | 
			
		||||
       "tenant_id                 0.000000\n",
 | 
			
		||||
       "is_partner                0.000000\n",
 | 
			
		||||
       "deleted_at                1.000000\n",
 | 
			
		||||
       "gender                    0.000000\n",
 | 
			
		||||
       "is_email_true             0.000000\n",
 | 
			
		||||
       "opt_in                    0.000000\n",
 | 
			
		||||
       "last_buying_date          0.709626\n",
 | 
			
		||||
       "max_price                 0.709626\n",
 | 
			
		||||
       "ticket_sum                0.000000\n",
 | 
			
		||||
       "average_price             0.709626\n",
 | 
			
		||||
       "average_purchase_delay    0.709731\n",
 | 
			
		||||
       "average_price_basket      0.709731\n",
 | 
			
		||||
       "average_ticket_basket     0.709731\n",
 | 
			
		||||
       "total_price               0.000000\n",
 | 
			
		||||
       "purchase_count            0.000000\n",
 | 
			
		||||
       "first_buying_date         0.709626\n",
 | 
			
		||||
       "country                   0.152090\n",
 | 
			
		||||
       "gender_label              0.000000\n",
 | 
			
		||||
       "gender_female             0.000000\n",
 | 
			
		||||
       "gender_male               0.000000\n",
 | 
			
		||||
       "gender_other              0.000000\n",
 | 
			
		||||
       "country_fr                0.152090\n",
 | 
			
		||||
       "has_tags                  0.000000\n",
 | 
			
		||||
       "nb_campaigns              0.000000\n",
 | 
			
		||||
       "nb_campaigns_opened       0.000000\n",
 | 
			
		||||
       "time_to_open              0.848079\n",
 | 
			
		||||
       "y_has_purchased           1.000000\n",
 | 
			
		||||
       "dtype: float64"
 | 
			
		||||
      ]
 | 
			
		||||
     },
 | 
			
		||||
     "execution_count": 9,
 | 
			
		||||
     "metadata": {},
 | 
			
		||||
     "output_type": "execute_result"
 | 
			
		||||
    }
 | 
			
		||||
   ],
 | 
			
		||||
   "source": [
 | 
			
		||||
    " dataset_train.isna().sum()/dataset_train.shape[0]"
 | 
			
		||||
   ]
 | 
			
		||||
  },
 | 
			
		||||
  {
 | 
			
		||||
   "cell_type": "code",
 | 
			
		||||
   "execution_count": 8,
 | 
			
		||||
   "id": "75f9a672-641f-49a2-a8d6-7673845506f5",
 | 
			
		||||
   "metadata": {},
 | 
			
		||||
   "outputs": [],
 | 
			
		||||
   "source": [
 | 
			
		||||
    "#Creation de la variable dependante fictive: 1 si l'individu a effectué un achat au cours de la periode de train et 0 sinon\n",
 | 
			
		||||
    "\n",
 | 
			
		||||
    "dataset_train_modif=dataset_train\n"
 | 
			
		||||
   ]
 | 
			
		||||
  },
 | 
			
		||||
  {
 | 
			
		||||
   "cell_type": "code",
 | 
			
		||||
   "execution_count": null,
 | 
			
		||||
   "id": "c121c1e2-d8e4-4b93-a882-9385581b63c9",
 | 
			
		||||
   "metadata": {},
 | 
			
		||||
   "outputs": [],
 | 
			
		||||
   "source": [
 | 
			
		||||
    "dataset_train_modif[\""
 | 
			
		||||
   ]
 | 
			
		||||
  }
 | 
			
		||||
 ],
 | 
			
		||||
 "metadata": {
 | 
			
		||||
  "kernelspec": {
 | 
			
		||||
   "display_name": "Python 3 (ipykernel)",
 | 
			
		||||
   "language": "python",
 | 
			
		||||
   "name": "python3"
 | 
			
		||||
  },
 | 
			
		||||
  "language_info": {
 | 
			
		||||
   "codemirror_mode": {
 | 
			
		||||
    "name": "ipython",
 | 
			
		||||
    "version": 3
 | 
			
		||||
   },
 | 
			
		||||
   "file_extension": ".py",
 | 
			
		||||
   "mimetype": "text/x-python",
 | 
			
		||||
   "name": "python",
 | 
			
		||||
   "nbconvert_exporter": "python",
 | 
			
		||||
   "pygments_lexer": "ipython3",
 | 
			
		||||
   "version": "3.11.6"
 | 
			
		||||
  }
 | 
			
		||||
 },
 | 
			
		||||
 "nbformat": 4,
 | 
			
		||||
 "nbformat_minor": 5
 | 
			
		||||
}
 | 
			
		||||
							
								
								
									
										2880
									
								
								code_valeur manquante.ipynb
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										2880
									
								
								code_valeur manquante.ipynb
									
									
									
									
									
										Normal file
									
								
							
										
											
												File diff suppressed because it is too large
												Load Diff
											
										
									
								
							
							
								
								
									
										7990
									
								
								exploratory_analysis/TP_exploratory_analysis-Copy1.ipynb
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										7990
									
								
								exploratory_analysis/TP_exploratory_analysis-Copy1.ipynb
									
									
									
									
									
										Normal file
									
								
							
										
											
												File diff suppressed because it is too large
												Load Diff
											
										
									
								
							
							
								
								
									
										1768
									
								
								notebooks_merge/TP_merge_target_campaigns_links.ipynb
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										1768
									
								
								notebooks_merge/TP_merge_target_campaigns_links.ipynb
									
									
									
									
									
										Normal file
									
								
							
										
											
												File diff suppressed because it is too large
												Load Diff
											
										
									
								
							
							
								
								
									
										2850
									
								
								useless/0_Cleaning_and_merge.ipynb
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										2850
									
								
								useless/0_Cleaning_and_merge.ipynb
									
									
									
									
									
										Normal file
									
								
							
										
											
												File diff suppressed because it is too large
												Load Diff
											
										
									
								
							
							
								
								
									
										2101
									
								
								useless/1_Descriptive_Statistics.ipynb
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										2101
									
								
								useless/1_Descriptive_Statistics.ipynb
									
									
									
									
									
										Normal file
									
								
							
										
											
												File diff suppressed because one or more lines are too long
											
										
									
								
							
							
								
								
									
										374
									
								
								useless/2_Regression_logistique.ipynb
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										374
									
								
								useless/2_Regression_logistique.ipynb
									
									
									
									
									
										Normal file
									
								
							
										
											
												File diff suppressed because one or more lines are too long
											
										
									
								
							
							
								
								
									
										2770
									
								
								useless/2_modelisation_pipeline+visu.ipynb
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										2770
									
								
								useless/2_modelisation_pipeline+visu.ipynb
									
									
									
									
									
										Normal file
									
								
							
										
											
												File diff suppressed because one or more lines are too long
											
										
									
								
							
							
								
								
									
										1215
									
								
								useless/TP_access_merge_data.ipynb
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										1215
									
								
								useless/TP_access_merge_data.ipynb
									
									
									
									
									
										Normal file
									
								
							
										
											
												File diff suppressed because it is too large
												Load Diff
											
										
									
								
							
							
								
								
									
										958
									
								
								useless/Temporary_barplot_example_TP.ipynb
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										958
									
								
								useless/Temporary_barplot_example_TP.ipynb
									
									
									
									
									
										Normal file
									
								
							
										
											
												File diff suppressed because one or more lines are too long
											
										
									
								
							| 
						 | 
				
			
			@ -13,19 +13,7 @@ import io
 | 
			
		|||
# functions
 | 
			
		||||
 | 
			
		||||
def load_train_test(type_of_activity):
 | 
			
		||||
    """
 | 
			
		||||
    Loads the training and test datasets from S3 storage for the type of activity specified.
 | 
			
		||||
 | 
			
		||||
    Args:
 | 
			
		||||
    - type_of_activity (str)
 | 
			
		||||
 | 
			
		||||
    Returns:
 | 
			
		||||
    DataFrame: Training dataset.
 | 
			
		||||
    DataFrame: Test dataset.
 | 
			
		||||
    """
 | 
			
		||||
    
 | 
			
		||||
    # BUCKET = f"projet-bdc2324-team1/Generalization/{type_of_activity}"
 | 
			
		||||
    BUCKET = f"projet-bdc2324-team1/1_Temp/1_0_Modelling_Datasets/{type_of_activity}"
 | 
			
		||||
    BUCKET = f"projet-bdc2324-team1/Generalization/{type_of_activity}"
 | 
			
		||||
    File_path_train = BUCKET + "/Train_set.csv"
 | 
			
		||||
    File_path_test = BUCKET + "/Test_set.csv"
 | 
			
		||||
    
 | 
			
		||||
| 
						 | 
				
			
			@ -41,47 +29,29 @@ def load_train_test(type_of_activity):
 | 
			
		|||
 | 
			
		||||
 | 
			
		||||
def features_target_split(dataset_train, dataset_test):
 | 
			
		||||
    """
 | 
			
		||||
    Splits the dataset into features and target variables for training and testing.
 | 
			
		||||
 | 
			
		||||
    Args:
 | 
			
		||||
    - dataset_train (DataFrame): Training dataset.
 | 
			
		||||
    - dataset_test (DataFrame): Test dataset.
 | 
			
		||||
 | 
			
		||||
    Returns:
 | 
			
		||||
    DataFrame: Features of the training dataset.
 | 
			
		||||
    DataFrame: Features of the test dataset.
 | 
			
		||||
    DataFrame: Target variable of the training dataset.
 | 
			
		||||
    DataFrame: Target variable of the test dataset.
 | 
			
		||||
    """
 | 
			
		||||
    
 | 
			
		||||
    features_l = ['nb_tickets', 'nb_purchases', 'total_amount', 'nb_suppliers', 'vente_internet_max', 'purchase_date_min', 'purchase_date_max', 
 | 
			
		||||
            'time_between_purchase', 'fidelity',  'is_email_true', 'opt_in', #'is_partner', 'nb_tickets_internet',
 | 
			
		||||
            'time_between_purchase', 'nb_tickets_internet', 'fidelity',  'is_email_true', 'opt_in', #'is_partner',
 | 
			
		||||
            'gender_female', 'gender_male', 'gender_other', 'nb_campaigns', 'nb_campaigns_opened']
 | 
			
		||||
 | 
			
		||||
    X_train = dataset_train # [features_l]
 | 
			
		||||
    # we suppress fidelity, time between purchase, and gender other (colinearity issue)
 | 
			
		||||
    """
 | 
			
		||||
    features_l = ['nb_tickets', 'nb_purchases', 'total_amount', 'nb_suppliers', 'vente_internet_max', 
 | 
			
		||||
                  'purchase_date_min', 'purchase_date_max', 'nb_tickets_internet',  'is_email_true', 
 | 
			
		||||
                  'opt_in', 'gender_female', 'gender_male', 'nb_campaigns', 'nb_campaigns_opened']
 | 
			
		||||
    """
 | 
			
		||||
    
 | 
			
		||||
    X_train = dataset_train[features_l]
 | 
			
		||||
    y_train = dataset_train[['y_has_purchased']]
 | 
			
		||||
 | 
			
		||||
    X_test = dataset_test # [features_l]
 | 
			
		||||
    X_test = dataset_test[features_l]
 | 
			
		||||
    y_test = dataset_test[['y_has_purchased']]
 | 
			
		||||
    
 | 
			
		||||
    return X_train, X_test, y_train, y_test
 | 
			
		||||
    
 | 
			
		||||
 | 
			
		||||
def load_model(type_of_activity, model):
 | 
			
		||||
    """
 | 
			
		||||
    Loads from S3 storage the optimal parameters of the chosen ML model saved in a pickle file.
 | 
			
		||||
 | 
			
		||||
    Args:
 | 
			
		||||
    - type_of_activity (str)
 | 
			
		||||
    - model (str)
 | 
			
		||||
 | 
			
		||||
    Returns:
 | 
			
		||||
    Model: machine learning model pre-trained with a scikit learn pipeline.
 | 
			
		||||
    """
 | 
			
		||||
    
 | 
			
		||||
    # BUCKET = f"projet-bdc2324-team1/Output_model/{type_of_activity}/{model}/"
 | 
			
		||||
    BUCKET = f"projet-bdc2324-team1/2_Output/2_1_Modeling_results/standard/{type_of_activity}/{model}/"
 | 
			
		||||
    BUCKET = f"projet-bdc2324-team1/Output_model/{type_of_activity}/{model}/"
 | 
			
		||||
    filename = model + '.pkl'
 | 
			
		||||
    file_path = BUCKET + filename
 | 
			
		||||
    with fs.open(file_path, mode="rb") as f:
 | 
			
		||||
| 
						 | 
				
			
			@ -92,17 +62,6 @@ def load_model(type_of_activity, model):
 | 
			
		|||
    
 | 
			
		||||
 | 
			
		||||
def df_segment(df, y, model) :
 | 
			
		||||
    """
 | 
			
		||||
    Segments customers into 4 groups based on the propensity scores given by a previously-loaded ML model.
 | 
			
		||||
 | 
			
		||||
    Args:
 | 
			
		||||
    - df (DataFrame): DataFrame to be segmented.
 | 
			
		||||
    - y (Series): True target variable.
 | 
			
		||||
    - model (Model): Pre-trained machine learning model for prediction.
 | 
			
		||||
 | 
			
		||||
    Returns:
 | 
			
		||||
    DataFrame: Segmented DataFrame with predicted values and true values for y.
 | 
			
		||||
    """
 | 
			
		||||
 | 
			
		||||
    y_pred = model.predict(df)
 | 
			
		||||
    y_pred_prob = model.predict_proba(df)[:, 1]
 | 
			
		||||
| 
						 | 
				
			
			@ -122,7 +81,7 @@ def df_segment(df, y, model) :
 | 
			
		|||
def odd_ratio(score) :
 | 
			
		||||
    """
 | 
			
		||||
    Args:
 | 
			
		||||
    - score (Union[float, int])
 | 
			
		||||
    - score (Union[float, int]): Score value.
 | 
			
		||||
 | 
			
		||||
    Returns:
 | 
			
		||||
    float: Odd ratio value.
 | 
			
		||||
| 
						 | 
				
			
			@ -137,7 +96,7 @@ def adjust_score_1(score) :
 | 
			
		|||
    Allows to compute odd ratios then.
 | 
			
		||||
 | 
			
		||||
    Args:
 | 
			
		||||
    - score (List[Union[float, int]])
 | 
			
		||||
    - score (List[Union[float, int]]): List of score values.
 | 
			
		||||
 | 
			
		||||
    Returns:
 | 
			
		||||
    np.ndarray: Adjusted score values.
 | 
			
		||||
| 
						 | 
				
			
			@ -153,8 +112,8 @@ def adjusted_score(odd_ratio, bias) :
 | 
			
		|||
    Adjust the score based on the odd ratio and bias.
 | 
			
		||||
 | 
			
		||||
    Args:
 | 
			
		||||
    - odd_ratio (Union[float, int])
 | 
			
		||||
    - bias (Union[float, int])
 | 
			
		||||
    - odd_ratio (Union[float, int]): Odd ratio value.
 | 
			
		||||
    - bias (Union[float, int]): Bias value.
 | 
			
		||||
 | 
			
		||||
    Returns:
 | 
			
		||||
    float: Adjusted score value.
 | 
			
		||||
| 
						 | 
				
			
			@ -166,12 +125,12 @@ def adjusted_score(odd_ratio, bias) :
 | 
			
		|||
 | 
			
		||||
def find_bias(odd_ratios, y_objective, initial_guess=10) :
 | 
			
		||||
    """
 | 
			
		||||
    Find the bias needed to adjust scores so that their sum is equal to the total number of purchases observed. 
 | 
			
		||||
    Find the bias needed to adjust scores according to the purchases observed
 | 
			
		||||
 | 
			
		||||
    Args:
 | 
			
		||||
    - odd_ratios (List[float]): List of odd ratios associated to the scores that have be adjusted.
 | 
			
		||||
    - y_objective (Union[float, int]): Objective value => total number of purchases.
 | 
			
		||||
    - initial_guess (Union[float, int], optional): Initial guess for the bias. Default is 10 (bias is approximately 6 for sports, 10 for music and 22 for museums)
 | 
			
		||||
    - odd_ratios (List[float]): List of odd ratios.
 | 
			
		||||
    - y_objective (Union[float, int]): Objective value to achieve.
 | 
			
		||||
    - initial_guess (Union[float, int], optional): Initial guess for the bias. Default is 6.
 | 
			
		||||
 | 
			
		||||
    Returns:
 | 
			
		||||
    float: Estimated bias value.
 | 
			
		||||
| 
						 | 
				
			
			@ -208,52 +167,28 @@ def plot_hist_scores(df, score, score_adjusted, type_of_activity) :
 | 
			
		|||
 | 
			
		||||
def project_tickets_CA (df, nb_purchases, nb_tickets, total_amount, score_adjusted, duration_ref, duration_projection) : 
 | 
			
		||||
    """
 | 
			
		||||
    Project tickets sold and total amount based on the adjusted scores and the duration of periods of study / projection. 
 | 
			
		||||
    Project ticket counts and total amount for a given duration and adjust based on a score.
 | 
			
		||||
 | 
			
		||||
    Args:
 | 
			
		||||
    - df (DataFrame): DataFrame containing information about past sales.
 | 
			
		||||
    - df (DataFrame): DataFrame containing ticket data.
 | 
			
		||||
    - nb_purchases (str) : Name of the column in df representing the number of purchases.
 | 
			
		||||
    - nb_tickets (str): Name of the column in df representing the number of tickets.
 | 
			
		||||
    - total_amount (str): Name of the column in df representing the total amount.
 | 
			
		||||
    - score_adjusted (str): Name of the column in df representing the adjusted score.
 | 
			
		||||
    - duration_ref (int or float): Duration of the period of reference for the construction of the variables X.
 | 
			
		||||
    - duration_ref (int or float): duration of the period of reference for the construction of the variables X.
 | 
			
		||||
    - duration_projection (int or float): Duration of the period of projection of sales / revenue. 
 | 
			
		||||
 | 
			
		||||
    Returns:
 | 
			
		||||
    DataFrame: DataFrame completed with sales and total amount projections. 
 | 
			
		||||
    DataFrame: DataFrame with projected ticket counts and total amount adjusted based on the score.
 | 
			
		||||
    duration_ratio = duration_ref/duration_projection
 | 
			
		||||
    """
 | 
			
		||||
 | 
			
		||||
    duration_ratio = duration_ref/duration_projection
 | 
			
		||||
 | 
			
		||||
    df_output = df
 | 
			
		||||
 | 
			
		||||
    # project number of tickets : at least 1 ticket purchased if the customer purchased
 | 
			
		||||
    df_output.loc[:,"nb_tickets_projected"] = df_output.loc[:,nb_tickets].apply(lambda x : max(1, x /duration_ratio))
 | 
			
		||||
 | 
			
		||||
    # project amount : if the customer buys a ticket, we expect the amount to be at least the average price of tickets 
 | 
			
		||||
    # for customers purchasing exactly one ticket
 | 
			
		||||
    if df_output.loc[df_output[nb_tickets]==1].shape[0] > 0 :
 | 
			
		||||
        avg_price = df_output.loc[df_output[nb_tickets]==1][total_amount].mean()
 | 
			
		||||
    else :
 | 
			
		||||
        avg_price = df_output[total_amount].mean()
 | 
			
		||||
 | 
			
		||||
    # we compute the avg price of ticket for each customer
 | 
			
		||||
    df_output["avg_ticket_price"] = df_output[total_amount]/df_output[nb_tickets]
 | 
			
		||||
 | 
			
		||||
    # correct negatives total amounts
 | 
			
		||||
    df_output.loc[:,"total_amount_corrected"] = np.where(df_output[total_amount] < 0, 
 | 
			
		||||
                                                         avg_price * df_output[nb_tickets],
 | 
			
		||||
                                                         df_output[total_amount])
 | 
			
		||||
 | 
			
		||||
    df_output.loc[:,"total_amount_projected"] = np.where(
 | 
			
		||||
        # if no ticket bought in the past, we take the average price
 | 
			
		||||
        df_output[nb_tickets]==0, avg_price,
 | 
			
		||||
        # if avg prices of tickets are negative, we recompute the expected amount based on the avg price of a ticket
 | 
			
		||||
        # observed on the whole population
 | 
			
		||||
        np.where(X_test_segment["avg_ticket_price"] < 0, avg_price * df_output.loc[:,"nb_tickets_projected"],
 | 
			
		||||
        # else, the amount projected is the average price of tickets bought by the customer * nb tickets projected
 | 
			
		||||
                 df_output["avg_ticket_price"] * df_output.loc[:,"nb_tickets_projected"])
 | 
			
		||||
        )
 | 
			
		||||
    df_output.loc[:,"nb_tickets_projected"] = df_output.loc[:,nb_tickets] / duration_ratio
 | 
			
		||||
    df_output.loc[:,"total_amount_projected"] = df_output.loc[:,total_amount] / duration_ratio
 | 
			
		||||
    
 | 
			
		||||
    df_output.loc[:,"nb_tickets_expected"] = df_output.loc[:,score_adjusted] * df_output.loc[:,"nb_tickets_projected"]
 | 
			
		||||
    df_output.loc[:,"total_amount_expected"] = df_output.loc[:,score_adjusted] * df_output.loc[:,"total_amount_projected"]
 | 
			
		||||
| 
						 | 
				
			
			@ -266,7 +201,7 @@ def project_tickets_CA (df, nb_purchases, nb_tickets, total_amount, score_adjust
 | 
			
		|||
def summary_expected_CA(df, segment, nb_tickets_expected, total_amount_expected, total_amount, pace_purchase,
 | 
			
		||||
                       duration_ref=17, duration_projection=12) :  
 | 
			
		||||
    """
 | 
			
		||||
    Generate a summary of expected customer sales based on segments.
 | 
			
		||||
    Generate a summary of expected customer acquisition based on segments.
 | 
			
		||||
 | 
			
		||||
    Args:
 | 
			
		||||
    - df (DataFrame): DataFrame containing customer data.
 | 
			
		||||
| 
						 | 
				
			
			@ -274,12 +209,9 @@ def summary_expected_CA(df, segment, nb_tickets_expected, total_amount_expected,
 | 
			
		|||
    - nb_tickets_expected (str): Name of the column in df representing the expected number of tickets.
 | 
			
		||||
    - total_amount_expected (str): Name of the column in df representing the expected total amount.
 | 
			
		||||
    - total_amount (str): Name of the column in df representing the total amount.
 | 
			
		||||
    - pace_purchase (str) : Name of the column in df representing the average time between 2 purchases in months.
 | 
			
		||||
    - duration_ref (int or float): Duration of the period of reference for the construction of the variables X.
 | 
			
		||||
    - duration_projection (int or float): Duration of the period of projection of sales / revenue. 
 | 
			
		||||
 | 
			
		||||
    Returns:
 | 
			
		||||
    DataFrame: Summary DataFrame containing expected customer sales metrics.
 | 
			
		||||
    DataFrame: Summary DataFrame containing expected customer acquisition metrics.
 | 
			
		||||
    """
 | 
			
		||||
    
 | 
			
		||||
    # compute nb tickets estimated and total amount expected
 | 
			
		||||
| 
						 | 
				
			
			@ -297,9 +229,6 @@ def summary_expected_CA(df, segment, nb_tickets_expected, total_amount_expected,
 | 
			
		|||
    df_expected_CA["revenue_recovered_perct"] = 100 * duration_ratio * df_expected_CA[total_amount_expected] / \
 | 
			
		||||
    df.groupby(segment)[total_amount].sum().values
 | 
			
		||||
 | 
			
		||||
    df_expected_CA["share_future_revenue_perct"] = 100 * duration_ratio * df_expected_CA[total_amount_expected] / \
 | 
			
		||||
    df[total_amount].sum()
 | 
			
		||||
 | 
			
		||||
    df_drop_null_pace = df.dropna(subset=[pace_purchase])
 | 
			
		||||
    df_expected_CA["pace_purchase"] = df_drop_null_pace.groupby(segment)[pace_purchase].mean().values
 | 
			
		||||
    
 | 
			
		||||
| 
						 | 
				
			
			@ -307,18 +236,10 @@ def summary_expected_CA(df, segment, nb_tickets_expected, total_amount_expected,
 | 
			
		|||
 | 
			
		||||
 | 
			
		||||
def save_file_s3_ca(File_name, type_of_activity):
 | 
			
		||||
    """
 | 
			
		||||
    Saves a file in S3 storage.
 | 
			
		||||
 | 
			
		||||
    Args:
 | 
			
		||||
    - File_name (str)
 | 
			
		||||
    - type_of_activity (str)
 | 
			
		||||
    """
 | 
			
		||||
    
 | 
			
		||||
    image_buffer = io.BytesIO()
 | 
			
		||||
    plt.savefig(image_buffer, format='png', dpi=120)
 | 
			
		||||
    plt.savefig(image_buffer, format='png')
 | 
			
		||||
    image_buffer.seek(0)
 | 
			
		||||
    PATH = f"projet-bdc2324-team1/2_Output/2_3_Sales_Forecast/{type_of_activity}/"
 | 
			
		||||
    PATH = f"projet-bdc2324-team1/Output_expected_CA/{type_of_activity}/"
 | 
			
		||||
    FILE_PATH_OUT_S3 = PATH + File_name + type_of_activity + '.png'
 | 
			
		||||
    with fs.open(FILE_PATH_OUT_S3, 'wb') as s3_file:
 | 
			
		||||
        s3_file.write(image_buffer.read())
 | 
			
		||||
							
								
								
									
										23
									
								
								utils_ml.py
									
									
									
									
									
								
							
							
						
						
									
										23
									
								
								utils_ml.py
									
									
									
									
									
								
							| 
						 | 
				
			
			@ -49,9 +49,6 @@ def load_train_test(type_of_activity, type_of_model):
 | 
			
		|||
 | 
			
		||||
 | 
			
		||||
def save_file_s3(File_name, type_of_activity, type_of_model, model):
 | 
			
		||||
    """
 | 
			
		||||
    save plot into s3 storage
 | 
			
		||||
    """
 | 
			
		||||
    image_buffer = io.BytesIO()
 | 
			
		||||
    plt.savefig(image_buffer, format='png')
 | 
			
		||||
    image_buffer.seek(0)
 | 
			
		||||
| 
						 | 
				
			
			@ -63,9 +60,6 @@ def save_file_s3(File_name, type_of_activity, type_of_model, model):
 | 
			
		|||
 | 
			
		||||
 | 
			
		||||
def save_result_set_s3(result_set, File_name, type_of_activity, type_of_model, model=None, model_path=False):
 | 
			
		||||
    """
 | 
			
		||||
    save result into s3 storage
 | 
			
		||||
    """
 | 
			
		||||
    if model_path:
 | 
			
		||||
        FILE_PATH_OUT_S3 = f"projet-bdc2324-team1/2_Output/2_1_Modeling_results/{type_of_model}/{type_of_activity}/{model}/" + File_name + '.csv'
 | 
			
		||||
    else:
 | 
			
		||||
| 
						 | 
				
			
			@ -75,9 +69,6 @@ def save_result_set_s3(result_set, File_name, type_of_activity, type_of_model, m
 | 
			
		|||
 | 
			
		||||
 | 
			
		||||
def save_model_s3(File_name, type_of_activity, type_of_model, model, classifier):
 | 
			
		||||
    """
 | 
			
		||||
    save model into pickle file
 | 
			
		||||
    """
 | 
			
		||||
    model_bytes = pickle.dumps(classifier)
 | 
			
		||||
    FILE_PATH_OUT_S3 = f"projet-bdc2324-team1/2_Output/2_1_Modeling_results/{type_of_model}/{type_of_activity}/{model}/" + File_name + '.pkl'
 | 
			
		||||
    with fs.open(FILE_PATH_OUT_S3, 'wb') as f:
 | 
			
		||||
| 
						 | 
				
			
			@ -97,9 +88,6 @@ def compute_recall_companies(dataset_test, y_pred, type_of_activity, model):
 | 
			
		|||
 | 
			
		||||
 | 
			
		||||
def features_target_split(dataset_train, dataset_test):
 | 
			
		||||
    """
 | 
			
		||||
    return train and test set
 | 
			
		||||
    """
 | 
			
		||||
    features_l = ['nb_campaigns', 'taux_ouverture_mail', 'prop_purchases_internet', 'nb_tickets', 'nb_purchases', 'total_amount', 'nb_suppliers', 'time_to_open',
 | 
			
		||||
                           'purchases_10_2021','purchases_10_2022', 'purchases_11_2021', 'purchases_12_2021','purchases_1_2022', 'purchases_2_2022', 'purchases_3_2022',
 | 
			
		||||
                            'purchases_4_2022', 'purchases_5_2021', 'purchases_5_2022', 'purchases_6_2021', 'purchases_6_2022', 'purchases_7_2021', 'purchases_7_2022', 'purchases_8_2021',
 | 
			
		||||
| 
						 | 
				
			
			@ -117,9 +105,6 @@ def features_target_split(dataset_train, dataset_test):
 | 
			
		|||
 | 
			
		||||
 | 
			
		||||
def preprocess(type_of_model,  type_of_activity):
 | 
			
		||||
    """
 | 
			
		||||
    preprocess variables before running machine learning pipeline
 | 
			
		||||
    """
 | 
			
		||||
 | 
			
		||||
    numeric_features = ['nb_campaigns', 'taux_ouverture_mail', 'prop_purchases_internet', 'nb_tickets', 'nb_purchases', 'total_amount', 'nb_suppliers',
 | 
			
		||||
                           'purchases_10_2021','purchases_10_2022', 'purchases_11_2021', 'purchases_12_2021','purchases_1_2022', 'purchases_2_2022', 'purchases_3_2022',
 | 
			
		||||
| 
						 | 
				
			
			@ -161,7 +146,7 @@ def preprocess(type_of_model,  type_of_activity):
 | 
			
		|||
 | 
			
		||||
def draw_confusion_matrix(y_test, y_pred, model):
 | 
			
		||||
    conf_matrix = confusion_matrix(y_test, y_pred)
 | 
			
		||||
    sns.heatmap(conf_matrix, annot=True, fmt='d', cmap='Blues', xticklabels=['Class 0', 'Class 1'], yticklabels=['Class 0', 'Class 1'], annot_kws={"size": 14})
 | 
			
		||||
    sns.heatmap(conf_matrix, annot=True, fmt='d', cmap='Blues', xticklabels=['Class 0', 'Class 1'], yticklabels=['Class 0', 'Class 1'])
 | 
			
		||||
    plt.xlabel('Predicted')
 | 
			
		||||
    plt.ylabel('Actual')
 | 
			
		||||
    plt.title('Confusion Matrix')
 | 
			
		||||
| 
						 | 
				
			
			@ -180,10 +165,10 @@ def draw_roc_curve(X_test, y_pred_prob, model):
 | 
			
		|||
    plt.plot(fpr, tpr, label="ROC curve(area = %0.3f)" % roc_auc)
 | 
			
		||||
    plt.plot([0, 1], [0, 1], color="red",label="Random Baseline", linestyle="--")
 | 
			
		||||
    plt.grid(color='gray', linestyle='--', linewidth=0.5)
 | 
			
		||||
    plt.xlabel("False Positive Rate", fontsize=14)
 | 
			
		||||
    plt.ylabel("True Positive Rate", fontsize=14)
 | 
			
		||||
    plt.xlabel("False Positive Rate")
 | 
			
		||||
    plt.ylabel("True Positive Rate")
 | 
			
		||||
    plt.title("ROC Curve", size=18)
 | 
			
		||||
    plt.legend(loc="lower right", fontsize=14)
 | 
			
		||||
    plt.legend(loc="lower right")
 | 
			
		||||
    plt.show()
 | 
			
		||||
    save_file_s3("Roc_curve_", type_of_activity, type_of_model, model)
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -1,18 +1,15 @@
 | 
			
		|||
# functions for segmentation and graphics associated
 | 
			
		||||
import pandas as pd
 | 
			
		||||
import numpy as np
 | 
			
		||||
import os
 | 
			
		||||
import io
 | 
			
		||||
import s3fs
 | 
			
		||||
import re
 | 
			
		||||
import pickle
 | 
			
		||||
import warnings
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def load_model(type_of_activity, model):
 | 
			
		||||
    """
 | 
			
		||||
    Loads from S3 storage the optimal parameters of the chosen ML model saved in a pickle file.
 | 
			
		||||
 | 
			
		||||
    Args:
 | 
			
		||||
    - type_of_activity (str)
 | 
			
		||||
    - model (str)
 | 
			
		||||
 | 
			
		||||
    Returns:
 | 
			
		||||
    Model: machine learning model pre-trained with a scikit learn pipeline.
 | 
			
		||||
    """
 | 
			
		||||
    
 | 
			
		||||
    BUCKET = f"projet-bdc2324-team1/2_Output/2_1_Modeling_results/standard/{type_of_activity}/{model}/"
 | 
			
		||||
    BUCKET = f"projet-bdc2324-team1/Output_model/{type_of_activity}/{model}/"
 | 
			
		||||
    filename = model + '.pkl'
 | 
			
		||||
    file_path = BUCKET + filename
 | 
			
		||||
    with fs.open(file_path, mode="rb") as f:
 | 
			
		||||
| 
						 | 
				
			
			@ -23,313 +20,8 @@ def load_model(type_of_activity, model):
 | 
			
		|||
 | 
			
		||||
 | 
			
		||||
def load_test_file(type_of_activity):
 | 
			
		||||
    """
 | 
			
		||||
    Load the test dataset from S3 storage for the type of activity specified.
 | 
			
		||||
 | 
			
		||||
    Args:
 | 
			
		||||
    - type_of_activity (str)
 | 
			
		||||
 | 
			
		||||
    Returns:
 | 
			
		||||
    DataFrame: Test dataset.
 | 
			
		||||
    """
 | 
			
		||||
    
 | 
			
		||||
    file_path_test = f"projet-bdc2324-team1/1_Temp/1_0_Modelling_Datasets/{type_of_activity}/Test_set.csv"
 | 
			
		||||
    file_path_test = f"projet-bdc2324-team1/Generalization/{type_of_activity}/Test_set.csv"
 | 
			
		||||
    with fs.open(file_path_test, mode="rb") as file_in:
 | 
			
		||||
        dataset_test = pd.read_csv(file_in, sep=",")
 | 
			
		||||
    return dataset_test
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def save_file_s3_mp(File_name, type_of_activity):
 | 
			
		||||
    """
 | 
			
		||||
    Save a matplotlib figure to S3 storage to the location assigned for the type of activity specified. 
 | 
			
		||||
 | 
			
		||||
    Args:
 | 
			
		||||
    - File_name (str)
 | 
			
		||||
    - type_of_activity (str)
 | 
			
		||||
 | 
			
		||||
    Returns:
 | 
			
		||||
    None
 | 
			
		||||
    """
 | 
			
		||||
    
 | 
			
		||||
    image_buffer = io.BytesIO()
 | 
			
		||||
    plt.savefig(image_buffer, format='png', dpi=110)
 | 
			
		||||
    image_buffer.seek(0)
 | 
			
		||||
    PATH = f"projet-bdc2324-team1/2_Output/2_2_Segmentation_and_Marketing_Personae/{type_of_activity}/"
 | 
			
		||||
    FILE_PATH_OUT_S3 = PATH + File_name + type_of_activity + '.png'
 | 
			
		||||
    with fs.open(FILE_PATH_OUT_S3, 'wb') as s3_file:
 | 
			
		||||
        s3_file.write(image_buffer.read())
 | 
			
		||||
    plt.close()
 | 
			
		||||
 | 
			
		||||
def save_txt_file_s3(file_name, type_of_activity, content):
 | 
			
		||||
    """
 | 
			
		||||
    Save a text file to S3 storage to the location assigned for the type of activity specified.
 | 
			
		||||
 | 
			
		||||
    Args:
 | 
			
		||||
    - file_name (str)
 | 
			
		||||
    - type_of_activity (str)
 | 
			
		||||
    - content (str)
 | 
			
		||||
 | 
			
		||||
    Returns:
 | 
			
		||||
    None
 | 
			
		||||
    """
 | 
			
		||||
    
 | 
			
		||||
    FILE_PATH = f"projet-bdc2324-team1/2_Output/2_2_Segmentation_and_Marketing_Personae/{type_of_activity}/"
 | 
			
		||||
    FILE_PATH_OUT_S3 = FILE_PATH + file_name + type_of_activity + '.txt'
 | 
			
		||||
    with fs.open(FILE_PATH_OUT_S3, 'w') as s3_file:
 | 
			
		||||
        s3_file.write(content)
 | 
			
		||||
        
 | 
			
		||||
def df_business_fig(df, segment, list_var) :
 | 
			
		||||
    """
 | 
			
		||||
    Compute business key performance indicators (KPIs) based on segment-wise aggregation of variables.
 | 
			
		||||
 | 
			
		||||
    Args:
 | 
			
		||||
    - df (DataFrame): The DataFrame containing data.
 | 
			
		||||
    - segment (str): The column name representing segments.
 | 
			
		||||
    - list_var (list of str): The list of variable names to be aggregated.
 | 
			
		||||
 | 
			
		||||
    Returns:
 | 
			
		||||
    DataFrame: The DataFrame containing business KPIs.
 | 
			
		||||
    """
 | 
			
		||||
    
 | 
			
		||||
    df_business_kpi = df.groupby(segment)[list_var].sum().reset_index()
 | 
			
		||||
    df_business_kpi.insert(1, "size", df.groupby(segment).size().values)
 | 
			
		||||
    all_var = ["size"] + list_var
 | 
			
		||||
    df_business_kpi[all_var] = 100 * df_business_kpi[all_var] / df_business_kpi[all_var].sum()
 | 
			
		||||
 | 
			
		||||
    return df_business_kpi
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def hist_segment_business_KPIs(df, segment, size, nb_tickets, nb_purchases, total_amount, nb_campaigns, type_of_activity) :
 | 
			
		||||
    """
 | 
			
		||||
    Plot a histogram stacking the relative weight of each segment regarding some key business indicators.
 | 
			
		||||
 | 
			
		||||
    Args:
 | 
			
		||||
    - df (DataFrame): The DataFrame containing pre aggregated data about some key business indicators
 | 
			
		||||
    - segment (str): The column name representing segments.
 | 
			
		||||
    - size (str): The column name representing the size.
 | 
			
		||||
    - nb_tickets (str): The column name representing the number of tickets.
 | 
			
		||||
    - nb_purchases (str): The column name representing the number of purchases.
 | 
			
		||||
    - total_amount (str): The column name representing the total amount.
 | 
			
		||||
    - nb_campaigns (str): The column name representing the number of campaigns.
 | 
			
		||||
    - type_of_activity (str)
 | 
			
		||||
 | 
			
		||||
    Returns:
 | 
			
		||||
    None
 | 
			
		||||
    """
 | 
			
		||||
    
 | 
			
		||||
    plt.figure()
 | 
			
		||||
 | 
			
		||||
    df_plot = df[[segment, size, nb_tickets, nb_purchases, total_amount, nb_campaigns]]
 | 
			
		||||
    
 | 
			
		||||
    x = ["number of\ncustomers", "number of\ntickets", "number of\npurchases", "total\namount", 
 | 
			
		||||
         "number of\ncampaigns"]
 | 
			
		||||
    
 | 
			
		||||
    bottom = np.zeros(5)
 | 
			
		||||
    
 | 
			
		||||
    # types of blue color
 | 
			
		||||
    colors = plt.cm.Blues(np.linspace(0.1, 0.9, 4))
 | 
			
		||||
    
 | 
			
		||||
    for i in range(4) :
 | 
			
		||||
        height = list(df_plot.loc[i,size:].values)
 | 
			
		||||
        plt.bar(x=x, height=height, label = str(df_plot[segment][i]), bottom=bottom, color=colors[i])
 | 
			
		||||
        bottom+=height
 | 
			
		||||
 | 
			
		||||
    # Ajust margins
 | 
			
		||||
    plt.subplots_adjust(left = 0.125, right = 0.8, bottom = 0.1, top = 0.9)
 | 
			
		||||
   
 | 
			
		||||
    plt.legend(title = "segment", loc = "upper right", bbox_to_anchor=(1.2, 1))
 | 
			
		||||
    plt.ylabel("Fraction represented by the segment (%)")
 | 
			
		||||
    plt.title(f"Relative weight of each segment regarding business KPIs\nfor {type_of_activity} companies", size=12)
 | 
			
		||||
    # plt.show()
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# def df_segment_mp(df) :
 | 
			
		||||
#     df_mp = df.groupby("segment")[["gender_female", "gender_male", "gender_other", "country_fr"]].mean().reset_index()
 | 
			
		||||
#     df_mp.insert(3, "share_known_gender", df_mp["gender_female"]+df_mp["gender_male"])
 | 
			
		||||
#     df_mp.insert(4, "share_of_women", df_mp["gender_female"]/(df_mp["share_known_gender"]))
 | 
			
		||||
#     return df_mp
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# def df_segment_pb (df) :
 | 
			
		||||
#     df_pb = df.groupby("segment")[["prop_purchases_internet", "taux_ouverture_mail", "opt_in"]].mean().reset_index()
 | 
			
		||||
#     return df_pb
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def radar_mp_plot(df, categories, index) :
 | 
			
		||||
    """
 | 
			
		||||
    Plot a radar chart describing marketing personae of the segment associated to index for the given categories, for the type of activity specified.
 | 
			
		||||
 | 
			
		||||
    Args:
 | 
			
		||||
    - df (DataFrame): The DataFrame containing data about categories describing the marketing personae associated to each segment
 | 
			
		||||
    - categories (list of str):
 | 
			
		||||
    - index (int): The index (between 0 and 3) identifying the segment. Here, index = number of the segment - 1
 | 
			
		||||
 | 
			
		||||
    Returns:
 | 
			
		||||
    None
 | 
			
		||||
    """
 | 
			
		||||
    
 | 
			
		||||
    categories = categories
 | 
			
		||||
 | 
			
		||||
    # true values are used to print the true value in parenthesis
 | 
			
		||||
    tvalues = list(df.loc[index,categories]) 
 | 
			
		||||
 | 
			
		||||
    max_values = df[categories].max()
 | 
			
		||||
 | 
			
		||||
    # values are true values / max among the 4 segments, allows to 
 | 
			
		||||
    # put values in relation with the values for other segments
 | 
			
		||||
    # if the point has a maximal abscisse it means that value is maximal for the segment considered
 | 
			
		||||
    # , event if not equal to 1
 | 
			
		||||
    
 | 
			
		||||
    values = list(df.loc[index,categories]/max_values)
 | 
			
		||||
                  
 | 
			
		||||
    # values normalized are used to adjust the value around the circle
 | 
			
		||||
    # for instance if the maximum of values is equal to 0.8, we want the point to be 
 | 
			
		||||
    # at 8/10th of the circle radius, not at the edge 
 | 
			
		||||
    values_normalized = [ max(values) * elt for elt in values]
 | 
			
		||||
 | 
			
		||||
    # Nb of categories
 | 
			
		||||
    num_categories = len(categories)
 | 
			
		||||
 | 
			
		||||
    angles = np.linspace(0, 2 * np.pi, num_categories, endpoint=False).tolist()
 | 
			
		||||
    
 | 
			
		||||
    # Initialize graphic
 | 
			
		||||
    fig, ax = plt.subplots(figsize=(6, 6), subplot_kw=dict(polar=True))
 | 
			
		||||
    
 | 
			
		||||
    # we have to draw first a transparent line (alpha=0) of values to adjust the radius of the circle
 | 
			
		||||
    # which is based on max(value)
 | 
			
		||||
    # if we don't plot this transparent line, the radius of the circle will be too small
 | 
			
		||||
    ax.plot(angles + angles[:1], values + values[:1], color='skyblue', alpha=0, linewidth=1.5)
 | 
			
		||||
    ax.plot(angles + angles[:1], values_normalized + values_normalized[:1], color='black', alpha = 0.5, linewidth=1.2)
 | 
			
		||||
    
 | 
			
		||||
    # fill the sector
 | 
			
		||||
    ax.fill(angles, values_normalized, color='orange', alpha=0.4)
 | 
			
		||||
    
 | 
			
		||||
    # labels
 | 
			
		||||
    ax.set_yticklabels([])
 | 
			
		||||
    ax.set_xticks(angles)
 | 
			
		||||
    ticks = [categories[i].replace("_"," ") + f"\n({round(100 * tvalues[i],2)}%)" for i in range(len(categories))]
 | 
			
		||||
    ax.set_xticklabels(ticks, color="black")
 | 
			
		||||
    
 | 
			
		||||
    ax.spines['polar'].set_visible(False)
 | 
			
		||||
    
 | 
			
		||||
    plt.title(f'Characteristics of the segment {index+1}\n')
 | 
			
		||||
    
 | 
			
		||||
    # plt.show()
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def radar_mp_plot_all(df, type_of_activity) :
 | 
			
		||||
    """
 | 
			
		||||
    Plot exactly the same radar charts as radar_mp_plot, but for all segments. 
 | 
			
		||||
 | 
			
		||||
    Args:
 | 
			
		||||
    - df (DataFrame)
 | 
			
		||||
    - type_of_activity (str)
 | 
			
		||||
 | 
			
		||||
    Returns:
 | 
			
		||||
    None
 | 
			
		||||
    """
 | 
			
		||||
    
 | 
			
		||||
    # table summarizing variables relative to marketing personae
 | 
			
		||||
    df_mp = df.groupby("segment")[["gender_female", "gender_male", "gender_other", "age"]].mean().reset_index()
 | 
			
		||||
    #df_mp.insert(3, "share_known_gender", df_mp["gender_female"]+df_mp["gender_male"])
 | 
			
		||||
    df_mp.insert(4, "share_of_women", df_mp["gender_female"]/(df_mp["gender_female"]+df_mp["gender_male"]))
 | 
			
		||||
 | 
			
		||||
    # table relative to purchasing behaviour
 | 
			
		||||
    df_pb = df.groupby("segment")[["prop_purchases_internet", "taux_ouverture_mail", "opt_in"]].mean().reset_index()
 | 
			
		||||
 | 
			
		||||
    # concatenation of tables to prepare the plot
 | 
			
		||||
    df_used = pd.concat([df_pb, df_mp[[ 'share_of_women', 'age']]], axis=1)
 | 
			
		||||
 | 
			
		||||
    # rename columns for the plot
 | 
			
		||||
    df_used = df_used.rename(columns={'taux_ouverture_mail': 'mails_opened', 'prop_purchases_internet': 'purchases_internet'})
 | 
			
		||||
 | 
			
		||||
    # visualization
 | 
			
		||||
    nb_segments = df_used.shape[0]
 | 
			
		||||
    categories = list(df_used.drop("segment", axis=1).columns)
 | 
			
		||||
 | 
			
		||||
    var_not_perc = ["age"]
 | 
			
		||||
 | 
			
		||||
    # Initialize graphic
 | 
			
		||||
    fig, ax = plt.subplots(2,2, figsize=(20, 21), subplot_kw=dict(polar=True))
 | 
			
		||||
    
 | 
			
		||||
    for index in range(nb_segments) :
 | 
			
		||||
        row = index // 2  # Division entière pour obtenir le numéro de ligne
 | 
			
		||||
        col = index % 2 
 | 
			
		||||
    
 | 
			
		||||
        # true values are used to print the true value in parenthesis
 | 
			
		||||
        tvalues = list(df_used.loc[index,categories]) 
 | 
			
		||||
        
 | 
			
		||||
        max_values = df_used[categories].max()
 | 
			
		||||
        
 | 
			
		||||
        # values are true values / max among the 4 segments, allows to 
 | 
			
		||||
        # put values in relation with the values for other segments
 | 
			
		||||
        # if the point has a maximal abscisse it means that value is maximal for the segment considered
 | 
			
		||||
        # , event if not equal to 1
 | 
			
		||||
 | 
			
		||||
        values = list(df_used.loc[index,categories]/max_values)
 | 
			
		||||
                          
 | 
			
		||||
        # values normalized are used to adjust the value around the circle
 | 
			
		||||
        # for instance if the maximum of values is equal to 0.8, we want the point to be 
 | 
			
		||||
        # at 8/10th of the circle radius, not at the edge 
 | 
			
		||||
        values_normalized = [ max(values) * elt for elt in values]
 | 
			
		||||
        
 | 
			
		||||
        # Nb of categories
 | 
			
		||||
        num_categories = len(categories)
 | 
			
		||||
    
 | 
			
		||||
        angles = np.linspace(0, 2 * np.pi, num_categories, endpoint=False).tolist()
 | 
			
		||||
    
 | 
			
		||||
        # we have to draw first a transparent line (alpha=0) of values to adjust the radius of the circle
 | 
			
		||||
        # which is based on max(value)
 | 
			
		||||
        # if we don't plot this transparent line, the radius of the circle will be too small
 | 
			
		||||
        ax[row, col].plot(angles + angles[:1], values + values[:1], color='skyblue', alpha=0, linewidth=1.5)
 | 
			
		||||
        ax[row, col].plot(angles + angles[:1], values_normalized + values_normalized[:1], color='black', alpha = 0.5,
 | 
			
		||||
                          linewidth=1.2)
 | 
			
		||||
        
 | 
			
		||||
        # fill the sector
 | 
			
		||||
        ax[row, col].fill(angles, values_normalized, color='orange', alpha=0.4, label = index)
 | 
			
		||||
        
 | 
			
		||||
        # labels
 | 
			
		||||
        ax[row, col].set_yticklabels([])
 | 
			
		||||
        ax[row, col].set_xticks(angles)
 | 
			
		||||
 | 
			
		||||
        # define the ticks
 | 
			
		||||
        values_printed = [str(round(tvalues[i],2)) if categories[i] in var_not_perc else f"{round(100 * tvalues[i],2)}%" for i in range(len(categories))]       
 | 
			
		||||
        ticks = [categories[i].replace("_"," ") + f"\n({values_printed[i]})" for i in range(len(categories))]
 | 
			
		||||
        ax[row, col].set_xticklabels(ticks, color="black", size = 20)
 | 
			
		||||
 | 
			
		||||
        ax[row, col].spines['polar'].set_visible(False)
 | 
			
		||||
        
 | 
			
		||||
        ax[row, col].set_title(f'Segment {index+1}\n', size = 24)
 | 
			
		||||
        
 | 
			
		||||
    fig.suptitle(f"Characteristics of marketing personae of {type_of_activity} companies", size=32)
 | 
			
		||||
 | 
			
		||||
    plt.tight_layout()
 | 
			
		||||
    # plt.show()
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def known_sociodemo_caracteristics(df, type_of_activity) :
 | 
			
		||||
    """
 | 
			
		||||
    Compute the share of non-NaN values for some sociodemographic caracteristics features and save the result in a latex table.
 | 
			
		||||
 | 
			
		||||
    Args:
 | 
			
		||||
    - df (DataFrame)
 | 
			
		||||
    - type_of_activity (str)
 | 
			
		||||
 | 
			
		||||
    Returns:
 | 
			
		||||
    None
 | 
			
		||||
    """
 | 
			
		||||
    
 | 
			
		||||
    table_share_known = df.groupby("segment")[["is_profession_known", "is_zipcode_known", "categorie_age_inconnue", "gender_other"]].mean().mul(100).reset_index()
 | 
			
		||||
    table_share_known.columns = ['Segment', 'Share of Known Profession (%)', 'Share of Known Zipcode (%)', 'Share of Unknown Age (%)', 'Share of Unknown Gender (%)']
 | 
			
		||||
    table_share_known= table_share_known.pivot_table(index=None, columns='Segment')
 | 
			
		||||
    
 | 
			
		||||
    # Arrondir les valeurs du DataFrame à une décimale
 | 
			
		||||
    table_share_known_rounded = table_share_known.round(1)
 | 
			
		||||
    
 | 
			
		||||
    # Convertir le DataFrame en format LaTeX avec les valeurs arrondies et le symbole '%'
 | 
			
		||||
    latex_table = tabulate(table_share_known_rounded, headers='keys', tablefmt='latex_raw', floatfmt=".1f")
 | 
			
		||||
    latex_table = latex_table.replace('%', '\\%')
 | 
			
		||||
 | 
			
		||||
    save_txt_file_s3("table_known_socio_demo_caracteristics", type_of_activity, latex_table)
 | 
			
		||||
    
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
							
								
								
									
										201
									
								
								utils_segmentation_V2TP.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										201
									
								
								utils_segmentation_V2TP.py
									
									
									
									
									
										Normal file
									
								
							| 
						 | 
				
			
			@ -0,0 +1,201 @@
 | 
			
		|||
### importations ###
 | 
			
		||||
 | 
			
		||||
import pandas as pd
 | 
			
		||||
import numpy as np
 | 
			
		||||
import os
 | 
			
		||||
import io
 | 
			
		||||
import s3fs
 | 
			
		||||
import re
 | 
			
		||||
import pickle
 | 
			
		||||
import warnings
 | 
			
		||||
import matplotlib.pyplot as plt
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
### functions for segmentation and graphics associated ###
 | 
			
		||||
 | 
			
		||||
def load_model(type_of_activity, model):
 | 
			
		||||
    BUCKET = f"projet-bdc2324-team1/Output_model/{type_of_activity}/{model}/"
 | 
			
		||||
    filename = model + '.pkl'
 | 
			
		||||
    file_path = BUCKET + filename
 | 
			
		||||
    with fs.open(file_path, mode="rb") as f:
 | 
			
		||||
        model_bytes = f.read()
 | 
			
		||||
 | 
			
		||||
    model = pickle.loads(model_bytes)
 | 
			
		||||
    return model
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def load_test_file(type_of_activity):
 | 
			
		||||
    file_path_test = f"projet-bdc2324-team1/Generalization/{type_of_activity}/Test_set.csv"
 | 
			
		||||
    with fs.open(file_path_test, mode="rb") as file_in:
 | 
			
		||||
        dataset_test = pd.read_csv(file_in, sep=",")
 | 
			
		||||
    return dataset_test
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def save_file_s3_mp(File_name, type_of_activity):
 | 
			
		||||
    image_buffer = io.BytesIO()
 | 
			
		||||
    plt.savefig(image_buffer, format='png', dpi=110)
 | 
			
		||||
    image_buffer.seek(0)
 | 
			
		||||
    PATH = f"projet-bdc2324-team1/Output_marketing_personae_analysis/{type_of_activity}/"
 | 
			
		||||
    FILE_PATH_OUT_S3 = PATH + File_name + type_of_activity + '.png'
 | 
			
		||||
    with fs.open(FILE_PATH_OUT_S3, 'wb') as s3_file:
 | 
			
		||||
        s3_file.write(image_buffer.read())
 | 
			
		||||
    plt.close()
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def df_business_fig(df, segment, list_var) :
 | 
			
		||||
    df_business_kpi = df.groupby(segment)[list_var].sum().reset_index()
 | 
			
		||||
    df_business_kpi.insert(1, "size", df.groupby(segment).size().values)
 | 
			
		||||
    all_var = ["size"] + list_var
 | 
			
		||||
    df_business_kpi[all_var] = 100 * df_business_kpi[all_var] / df_business_kpi[all_var].sum()
 | 
			
		||||
 | 
			
		||||
    return df_business_kpi
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def hist_segment_business_KPIs(df, segment, size, nb_tickets, nb_purchases, total_amount, nb_campaigns) :
 | 
			
		||||
    
 | 
			
		||||
    plt.figure()
 | 
			
		||||
 | 
			
		||||
    df_plot = df[[segment, size, nb_tickets, nb_purchases, total_amount, nb_campaigns]]
 | 
			
		||||
    
 | 
			
		||||
    x = ["number of\ncustomers", "number of\ntickets", "number of\npurchases", "total\namount", 
 | 
			
		||||
         "number of\ncampaigns"]
 | 
			
		||||
    
 | 
			
		||||
    bottom = np.zeros(5)
 | 
			
		||||
    
 | 
			
		||||
    # types of blue color
 | 
			
		||||
    colors = plt.cm.Blues(np.linspace(0.1, 0.9, 4))
 | 
			
		||||
    
 | 
			
		||||
    for i in range(4) :
 | 
			
		||||
        height = list(df_plot.loc[i,size:].values)
 | 
			
		||||
        plt.bar(x=x, height=height, label = str(df_plot[segment][i]), bottom=bottom, color=colors[i])
 | 
			
		||||
        bottom+=height
 | 
			
		||||
 | 
			
		||||
    # Ajust margins
 | 
			
		||||
    plt.subplots_adjust(left = 0.125, right = 0.8, bottom = 0.1, top = 0.9)
 | 
			
		||||
   
 | 
			
		||||
    plt.legend(title = "segment", loc = "upper right", bbox_to_anchor=(1.2, 1))
 | 
			
		||||
    plt.ylabel("Fraction represented by the segment (%)")
 | 
			
		||||
    plt.title(f"Relative weight of each segment regarding business KPIs\nfor {type_of_activity} companies", size=12)
 | 
			
		||||
    # plt.show()
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def df_segment_mp(df, segment, gender_female, gender_male, gender_other, country_fr) :
 | 
			
		||||
    df_mp = df.groupby(segment)[[gender_female, gender_male, gender_other, country_fr]].mean().reset_index()
 | 
			
		||||
    df_mp.insert(3, "share_known_gender", df_mp[gender_female]+df_mp[gender_male])
 | 
			
		||||
    df_mp.insert(4, "share_of_women", df_mp[gender_female]/(df_mp["share_known_gender"]))
 | 
			
		||||
    return df_mp
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def df_segment_pb (df, segment, nb_tickets_internet, nb_tickets, nb_campaigns_opened, nb_campaigns, opt_in) :
 | 
			
		||||
    df_used = df
 | 
			
		||||
    df_used["share_tickets_internet"] = df_used[nb_tickets_internet]/df_used[nb_tickets]
 | 
			
		||||
    df_used["share_campaigns_opened"] = df_used[nb_campaigns_opened]/df_used[nb_campaigns]
 | 
			
		||||
    df_pb = df_used.groupby(segment)[["share_tickets_internet", "share_campaigns_opened", opt_in]].mean().reset_index()
 | 
			
		||||
    return df_pb
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def radar_mp_plot(df, categories, index) :
 | 
			
		||||
    categories = categories
 | 
			
		||||
 | 
			
		||||
    # true values are used to print the true value in parenthesis
 | 
			
		||||
    tvalues = list(df.loc[index,categories]) 
 | 
			
		||||
 | 
			
		||||
    max_values = df[categories].max()
 | 
			
		||||
 | 
			
		||||
    # values are true values / max among the 4 segments, allows to 
 | 
			
		||||
    # put values in relation with the values for other segments
 | 
			
		||||
    # if the point has a maximal abscisse it means that value is maximal for the segment considered
 | 
			
		||||
    # , event if not equal to 1
 | 
			
		||||
    
 | 
			
		||||
    values = list(df.loc[index,categories]/max_values)
 | 
			
		||||
                  
 | 
			
		||||
    # values normalized are used to adjust the value around the circle
 | 
			
		||||
    # for instance if the maximum of values is equal to 0.8, we want the point to be 
 | 
			
		||||
    # at 8/10th of the circle radius, not at the edge 
 | 
			
		||||
    values_normalized = [ max(values) * elt for elt in values]
 | 
			
		||||
 | 
			
		||||
    # Nb of categories
 | 
			
		||||
    num_categories = len(categories)
 | 
			
		||||
 | 
			
		||||
    angles = np.linspace(0, 2 * np.pi, num_categories, endpoint=False).tolist()
 | 
			
		||||
    
 | 
			
		||||
    # Initialize graphic
 | 
			
		||||
    fig, ax = plt.subplots(figsize=(6, 6), subplot_kw=dict(polar=True))
 | 
			
		||||
    
 | 
			
		||||
    # we have to draw first a transparent line (alpha=0) of values to adjust the radius of the circle
 | 
			
		||||
    # which is based on max(value)
 | 
			
		||||
    ax.plot(angles + angles[:1], values + values[:1], color='skyblue', alpha=0, linewidth=1.5)
 | 
			
		||||
    ax.plot(angles + angles[:1], values_normalized + values_normalized[:1], color='black', alpha = 0.5, linewidth=1.2)
 | 
			
		||||
    
 | 
			
		||||
    # fill the sector
 | 
			
		||||
    ax.fill(angles, values_normalized, color='orange', alpha=0.4)
 | 
			
		||||
    
 | 
			
		||||
    # labels
 | 
			
		||||
    ax.set_yticklabels([])
 | 
			
		||||
    ax.set_xticks(angles)
 | 
			
		||||
    ticks = [categories[i].replace("_"," ") + f"\n({round(100 * tvalues[i],2)}%)" for i in range(len(categories))]
 | 
			
		||||
    ax.set_xticklabels(ticks, color="black")
 | 
			
		||||
    
 | 
			
		||||
    ax.spines['polar'].set_visible(False)
 | 
			
		||||
    
 | 
			
		||||
    plt.title(f'Characteristics of the segment {index+1}\n')
 | 
			
		||||
    
 | 
			
		||||
    # plt.show()
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def radar_mp_plot_all(df, categories) :
 | 
			
		||||
    
 | 
			
		||||
    nb_segments = df.shape[0]
 | 
			
		||||
    categories = categories
 | 
			
		||||
 | 
			
		||||
    # Initialize graphic
 | 
			
		||||
    fig, ax = plt.subplots(2,2, figsize=(25, 20), subplot_kw=dict(polar=True))
 | 
			
		||||
    
 | 
			
		||||
    for index in range(nb_segments) :
 | 
			
		||||
        row = index // 2  # Division entière pour obtenir le numéro de ligne
 | 
			
		||||
        col = index % 2 
 | 
			
		||||
    
 | 
			
		||||
        # true values are used to print the true value in parenthesis
 | 
			
		||||
        tvalues = list(df.loc[index,categories]) 
 | 
			
		||||
        
 | 
			
		||||
        max_values = df[categories].max()
 | 
			
		||||
        
 | 
			
		||||
        # values are true values / max among the 4 segments, allows to 
 | 
			
		||||
        # put values in relation with the values for other segments
 | 
			
		||||
        # if the point has a maximal abscisse it means that value is maximal for the segment considered
 | 
			
		||||
        # , event if not equal to 1
 | 
			
		||||
 | 
			
		||||
        values = list(df.loc[index,categories]/max_values)
 | 
			
		||||
                          
 | 
			
		||||
        # values normalized are used to adjust the value around the circle
 | 
			
		||||
        # for instance if the maximum of values is equal to 0.8, we want the point to be 
 | 
			
		||||
        # at 8/10th of the circle radius, not at the edge 
 | 
			
		||||
        values_normalized = [ max(values) * elt for elt in values]
 | 
			
		||||
        
 | 
			
		||||
        # Nb of categories
 | 
			
		||||
        num_categories = len(categories)
 | 
			
		||||
    
 | 
			
		||||
        angles = np.linspace(0, 2 * np.pi, num_categories, endpoint=False).tolist()
 | 
			
		||||
    
 | 
			
		||||
        # we have to draw first a transparent line (alpha=0) of values to adjust the radius of the circle
 | 
			
		||||
        # which is based on max(value)
 | 
			
		||||
        ax[row, col].plot(angles + angles[:1], values + values[:1], color='skyblue', alpha=0, linewidth=1.5)
 | 
			
		||||
        ax[row, col].plot(angles + angles[:1], values_normalized + values_normalized[:1], color='black', alpha = 0.5,
 | 
			
		||||
                          linewidth=1.2)
 | 
			
		||||
        
 | 
			
		||||
        # fill the sector
 | 
			
		||||
        ax[row, col].fill(angles, values_normalized, color='orange', alpha=0.4, label = index)
 | 
			
		||||
        
 | 
			
		||||
        # labels
 | 
			
		||||
        ax[row, col].set_yticklabels([])
 | 
			
		||||
        ax[row, col].set_xticks(angles)
 | 
			
		||||
        ticks = [categories[i].replace("_"," ") + f"\n({round(100 * tvalues[i],2)}%)" for i in range(len(categories))]
 | 
			
		||||
        ax[row, col].set_xticklabels(ticks, color="black", size = 20)
 | 
			
		||||
 | 
			
		||||
        ax[row, col].spines['polar'].set_visible(False)
 | 
			
		||||
        
 | 
			
		||||
        ax[row, col].set_title(f'Segment {index+1}\n', size = 24)
 | 
			
		||||
        
 | 
			
		||||
    fig.suptitle(f"Characteristics of marketing personae of {type_of_activity} companies", size=32)
 | 
			
		||||
    # plt.show()
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -1,10 +1,16 @@
 | 
			
		|||
import pandas as pd
 | 
			
		||||
import os
 | 
			
		||||
import s3fs
 | 
			
		||||
import io
 | 
			
		||||
import warnings
 | 
			
		||||
from datetime import date, timedelta, datetime
 | 
			
		||||
import numpy as np
 | 
			
		||||
import matplotlib.pyplot as plt
 | 
			
		||||
import matplotlib.dates as mdates
 | 
			
		||||
import seaborn as sns
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def load_files(nb_compagnie):
 | 
			
		||||
    """
 | 
			
		||||
    load and preprocess dataframes
 | 
			
		||||
    """
 | 
			
		||||
    
 | 
			
		||||
    customer = pd.DataFrame()
 | 
			
		||||
    campaigns_brut = pd.DataFrame()
 | 
			
		||||
    campaigns_kpi = pd.DataFrame()
 | 
			
		||||
| 
						 | 
				
			
			@ -12,6 +18,7 @@ def load_files(nb_compagnie):
 | 
			
		|||
    tickets = pd.DataFrame()
 | 
			
		||||
    targets = pd.DataFrame()
 | 
			
		||||
    
 | 
			
		||||
    # début de la boucle permettant de générer des datasets agrégés pour les 5 compagnies de spectacle
 | 
			
		||||
    for directory_path in nb_compagnie:
 | 
			
		||||
        df_customerplus_clean_0 = display_input_databases(directory_path, file_name = "customerplus_cleaned")
 | 
			
		||||
        df_campaigns_brut = display_input_databases(directory_path, file_name = "campaigns_information", datetime_col = ['opened_at', 'sent_at', 'campaign_sent_at'])
 | 
			
		||||
| 
						 | 
				
			
			@ -29,7 +36,7 @@ def load_files(nb_compagnie):
 | 
			
		|||
        targets_columns.remove('customer_id')
 | 
			
		||||
        df_target_KPI[targets_columns] = df_target_KPI[targets_columns].fillna(0)
 | 
			
		||||
        
 | 
			
		||||
    # Create company identifier
 | 
			
		||||
    # creation de la colonne Number compagnie, qui permettra d'agréger les résultats
 | 
			
		||||
        df_tickets_kpi["number_company"]=int(directory_path)
 | 
			
		||||
        df_campaigns_brut["number_company"]=int(directory_path)
 | 
			
		||||
        df_campaigns_kpi["number_company"]=int(directory_path)
 | 
			
		||||
| 
						 | 
				
			
			@ -37,7 +44,7 @@ def load_files(nb_compagnie):
 | 
			
		|||
        df_target_information["number_company"]=int(directory_path)
 | 
			
		||||
        df_target_KPI["number_company"]=int(directory_path)
 | 
			
		||||
    
 | 
			
		||||
    # Clean index
 | 
			
		||||
    # Traitement des index
 | 
			
		||||
        df_tickets_kpi["customer_id"]= directory_path + '_' +  df_tickets_kpi['customer_id'].astype('str')
 | 
			
		||||
        df_campaigns_brut["customer_id"]= directory_path + '_' +  df_campaigns_brut['customer_id'].astype('str')
 | 
			
		||||
        df_campaigns_kpi["customer_id"]= directory_path + '_' +  df_campaigns_kpi['customer_id'].astype('str') 
 | 
			
		||||
| 
						 | 
				
			
			@ -54,7 +61,7 @@ def load_files(nb_compagnie):
 | 
			
		|||
        df_target_KPI["customer_id"]= directory_path + '_' +  df_target_KPI['customer_id'].astype('str')
 | 
			
		||||
 | 
			
		||||
    
 | 
			
		||||
    # Concatenation
 | 
			
		||||
    # Concaténation
 | 
			
		||||
        customer = pd.concat([customer, df_customerplus_clean], ignore_index=True)
 | 
			
		||||
        campaigns_kpi = pd.concat([campaigns_kpi, df_campaigns_kpi], ignore_index=True)
 | 
			
		||||
        campaigns_brut = pd.concat([campaigns_brut, df_campaigns_brut], ignore_index=True) 
 | 
			
		||||
| 
						 | 
				
			
			@ -65,7 +72,7 @@ def load_files(nb_compagnie):
 | 
			
		|||
    return customer, campaigns_kpi, campaigns_brut, tickets, products, targets
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def remove_outlier_total_amount(tickets : pd.DataFrame):
 | 
			
		||||
def remove_outlier_total_amount(tickets):
 | 
			
		||||
    Q1 = tickets['total_amount'].quantile(0.25)
 | 
			
		||||
    Q3 = tickets['total_amount'].quantile(0.75)
 | 
			
		||||
    IQR = Q3 - Q1
 | 
			
		||||
| 
						 | 
				
			
			@ -76,11 +83,8 @@ def remove_outlier_total_amount(tickets : pd.DataFrame):
 | 
			
		|||
    
 | 
			
		||||
 | 
			
		||||
def save_file_s3(File_name, type_of_activity):
 | 
			
		||||
    """
 | 
			
		||||
    save plots into s3 storage
 | 
			
		||||
    """
 | 
			
		||||
    image_buffer = io.BytesIO()
 | 
			
		||||
    plt.savefig(image_buffer, format='png', pad_inches=1, bbox_inches="tight", dpi = 150)
 | 
			
		||||
    plt.savefig(image_buffer, format='png')
 | 
			
		||||
    image_buffer.seek(0)
 | 
			
		||||
    FILE_PATH = f"projet-bdc2324-team1/2_Output/2_0_Descriptive_Statistics/{type_of_activity}/"
 | 
			
		||||
    FILE_PATH_OUT_S3 = FILE_PATH + File_name + type_of_activity + '.png'
 | 
			
		||||
| 
						 | 
				
			
			@ -89,10 +93,8 @@ def save_file_s3(File_name, type_of_activity):
 | 
			
		|||
    plt.close()
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def outlier_detection(tickets : pd.DataFrame, company_list, show_diagram=False):
 | 
			
		||||
    """
 | 
			
		||||
    detect anonymous customers
 | 
			
		||||
    """
 | 
			
		||||
def outlier_detection(tickets, company_list, show_diagram=False):
 | 
			
		||||
 | 
			
		||||
    outlier_list = list()
 | 
			
		||||
    
 | 
			
		||||
    for company in company_list:
 | 
			
		||||
| 
						 | 
				
			
			@ -116,15 +118,12 @@ def outlier_detection(tickets : pd.DataFrame, company_list, show_diagram=False):
 | 
			
		|||
            plt.figure(figsize=(3, 3))
 | 
			
		||||
            plt.pie(new_series, labels=new_series.index, autopct='%1.1f%%', startangle=140, pctdistance=0.5)
 | 
			
		||||
            plt.axis('equal')
 | 
			
		||||
            # plt.title(f'Répartition des montants totaux pour la compagnie {company}')
 | 
			
		||||
            plt.title(f'Répartition des montants totaux pour la compagnie {company}')
 | 
			
		||||
            plt.show()
 | 
			
		||||
    return outlier_list
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def valid_customer_detection(products : pd.DataFrame, campaigns_brut : pd.DataFrame):
 | 
			
		||||
    """
 | 
			
		||||
    identify customer that are in our time perimeter
 | 
			
		||||
    """
 | 
			
		||||
def valid_customer_detection(products, campaigns_brut):
 | 
			
		||||
    products_valid = products[products['purchase_date']>="2021-05-01"]
 | 
			
		||||
    consumer_valid_product = products_valid['customer_id'].to_list()
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -135,10 +134,7 @@ def valid_customer_detection(products : pd.DataFrame, campaigns_brut : pd.DataFr
 | 
			
		|||
    return consumer_valid 
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def identify_purchase_during_target_periode(products : pd.DataFrame):
 | 
			
		||||
    """
 | 
			
		||||
    identify customer who purchased ticket during the target period
 | 
			
		||||
    """
 | 
			
		||||
def identify_purchase_during_target_periode(products):
 | 
			
		||||
    products_target_period = products[(products['purchase_date']>="2022-11-01")
 | 
			
		||||
    & (products['purchase_date']<="2023-11-01")]
 | 
			
		||||
    customer_target_period = products_target_period['customer_id'].to_list()
 | 
			
		||||
| 
						 | 
				
			
			@ -149,60 +145,60 @@ def remove_elements(lst, elements_to_remove):
 | 
			
		|||
    return ''.join([x for x in lst if x not in elements_to_remove])
 | 
			
		||||
    
 | 
			
		||||
 | 
			
		||||
def compute_nb_clients(customer: pd.DataFrame, type_of_activity: str):
 | 
			
		||||
def compute_nb_clients(customer, type_of_activity):
 | 
			
		||||
    company_nb_clients = customer[customer["purchase_count"]>0].groupby("number_company")["customer_id"].count().reset_index()
 | 
			
		||||
    plt.figure(figsize=(4,3))
 | 
			
		||||
    plt.bar(company_nb_clients["number_company"], company_nb_clients["customer_id"]/1000)
 | 
			
		||||
    plt.xlabel('Company Number')
 | 
			
		||||
 | 
			
		||||
    plt.xlabel('Company')
 | 
			
		||||
    plt.ylabel("Number of clients (thousands)")
 | 
			
		||||
    # plt.title(f"Number of clients Across {type_of_activity} Companies")
 | 
			
		||||
    plt.title(f"Number of clients Across {type_of_activity} Companies")
 | 
			
		||||
    plt.xticks(company_nb_clients["number_company"], ["{}".format(i) for i in company_nb_clients["number_company"]])
 | 
			
		||||
    plt.show()
 | 
			
		||||
    save_file_s3("nb_clients_", type_of_activity)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def maximum_price_paid(customer: pd.DataFrame, type_of_activity: str):
 | 
			
		||||
def maximum_price_paid(customer, type_of_activity):
 | 
			
		||||
    company_max_price = customer.groupby("number_company")["max_price"].max().reset_index()
 | 
			
		||||
    plt.bar(company_max_price["number_company"], company_max_price["max_price"])
 | 
			
		||||
    
 | 
			
		||||
    plt.xlabel('Company Number')
 | 
			
		||||
    plt.ylabel("Maximal price of a ticket Prix")
 | 
			
		||||
    # plt.title(f"Maximal price of a ticket Across {type_of_activity} Companies")
 | 
			
		||||
    plt.title(f"Maximal price of a ticket Across {type_of_activity} Companies")
 | 
			
		||||
    plt.xticks(company_max_price["number_company"], ["{}".format(i) for i in company_max_price["number_company"]])
 | 
			
		||||
    plt.show()
 | 
			
		||||
    save_file_s3("Maximal_price_", type_of_activity)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def target_proportion(customer: pd.DataFrame, type_of_activity: str):
 | 
			
		||||
def target_proportion(customer, type_of_activity):
 | 
			
		||||
    df_y = customer.groupby(["number_company"]).agg({"has_purchased_target_period" : 'sum',
 | 
			
		||||
                                                 'customer_id' : 'nunique'}).reset_index()
 | 
			
		||||
    df_y['prop_has_purchased_target_period'] = (df_y["has_purchased_target_period"]/df_y['customer_id'])*100
 | 
			
		||||
    plt.bar(df_y["number_company"], df_y["prop_has_purchased_target_period"])
 | 
			
		||||
    plt.xlabel('Company Number')
 | 
			
		||||
    plt.ylabel('Share (%)')
 | 
			
		||||
    # plt.title(f'Share of Customers who Bought during the Target Period Across {type_of_activity} Companies')
 | 
			
		||||
    plt.title(f'Share of Customers who Bought during the Target Period Across {type_of_activity} Companies')
 | 
			
		||||
    plt.xticks(df_y["number_company"], ["{}".format(i) for i in df_y["number_company"]])
 | 
			
		||||
    plt.show()
 | 
			
		||||
    save_file_s3("share_target_", type_of_activity)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def mailing_consent(customer: pd.DataFrame, type_of_activity: str):
 | 
			
		||||
def mailing_consent(customer, type_of_activity):
 | 
			
		||||
    mailing_consent = customer.groupby("number_company")["opt_in"].mean().reset_index()
 | 
			
		||||
    mailing_consent["opt_in"] *= 100
 | 
			
		||||
    plt.bar(mailing_consent["number_company"], mailing_consent["opt_in"])
 | 
			
		||||
 | 
			
		||||
    plt.xlabel('Company Number')
 | 
			
		||||
    plt.ylabel('Mailing Consent (%)')
 | 
			
		||||
    # plt.title(f'Consent of mailing Across {type_of_activity} Companies')
 | 
			
		||||
    plt.title(f'Consent of mailing Across {type_of_activity} Companies')
 | 
			
		||||
    plt.xticks(mailing_consent["number_company"], ["{}".format(i) for i in mailing_consent["number_company"]])
 | 
			
		||||
    plt.show()
 | 
			
		||||
    save_file_s3("mailing_consent_", type_of_activity)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def mailing_consent_by_target(customer: pd.DataFrame, type_of_activity: str):
 | 
			
		||||
def mailing_consent_by_target(customer):
 | 
			
		||||
    df_graph = customer.groupby(["number_company", "has_purchased_target_period"])["opt_in"].mean().reset_index()
 | 
			
		||||
    # Création du barplot groupé
 | 
			
		||||
    fig, ax = plt.subplots(figsize=(5, 3))
 | 
			
		||||
    fig, ax = plt.subplots(figsize=(10, 6))
 | 
			
		||||
    
 | 
			
		||||
    categories = df_graph["number_company"].unique()
 | 
			
		||||
    bar_width = 0.35
 | 
			
		||||
| 
						 | 
				
			
			@ -222,7 +218,7 @@ def mailing_consent_by_target(customer: pd.DataFrame, type_of_activity: str):
 | 
			
		|||
    # Ajout des étiquettes, de la légende, etc.
 | 
			
		||||
    ax.set_xlabel('Company Number')
 | 
			
		||||
    ax.set_ylabel('Mailing Consent (%)')
 | 
			
		||||
    # ax.set_title(f'Consent of mailing according to target Across {type_of_activity} Companies')
 | 
			
		||||
    ax.set_title(f'Consent of mailing according to target Across {type_of_activity} Companies')
 | 
			
		||||
    ax.set_xticks([pos + bar_width / 2 for pos in np.arange(len(categories))])
 | 
			
		||||
    ax.set_xticklabels(categories)
 | 
			
		||||
    ax.legend()
 | 
			
		||||
| 
						 | 
				
			
			@ -232,7 +228,7 @@ def mailing_consent_by_target(customer: pd.DataFrame, type_of_activity: str):
 | 
			
		|||
    save_file_s3("mailing_consent_target_", type_of_activity)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def gender_bar(customer: pd.DataFrame, type_of_activity: str):
 | 
			
		||||
def gender_bar(customer, type_of_activity):
 | 
			
		||||
    company_genders = customer.groupby("number_company")[["gender_male", "gender_female", "gender_other"]].mean().reset_index()
 | 
			
		||||
    
 | 
			
		||||
    company_genders["gender_male"] *= 100
 | 
			
		||||
| 
						 | 
				
			
			@ -240,7 +236,6 @@ def gender_bar(customer: pd.DataFrame, type_of_activity: str):
 | 
			
		|||
    company_genders["gender_other"] *= 100
 | 
			
		||||
    
 | 
			
		||||
    # Création du barplot
 | 
			
		||||
    plt.figure(figsize=(4,3))
 | 
			
		||||
    plt.bar(company_genders["number_company"], company_genders["gender_male"], label = "Male")
 | 
			
		||||
    plt.bar(company_genders["number_company"], company_genders["gender_female"], 
 | 
			
		||||
            bottom = company_genders["gender_male"], label = "Female")
 | 
			
		||||
| 
						 | 
				
			
			@ -249,65 +244,42 @@ def gender_bar(customer: pd.DataFrame, type_of_activity: str):
 | 
			
		|||
    
 | 
			
		||||
    plt.xlabel('Company Number')
 | 
			
		||||
    plt.ylabel("Frequency (%)")
 | 
			
		||||
    # plt.title(f"Gender Distribution of Customers Across {type_of_activity} Companies")
 | 
			
		||||
    plt.title(f"Gender Distribution of Customers Across {type_of_activity} Companies")
 | 
			
		||||
    plt.legend()
 | 
			
		||||
    plt.xticks(company_genders["number_company"], ["{}".format(i) for i in company_genders["number_company"]])
 | 
			
		||||
    plt.show()
 | 
			
		||||
    save_file_s3("gender_bar_", type_of_activity)
 | 
			
		||||
    
 | 
			
		||||
 | 
			
		||||
def country_bar(customer: pd.DataFrame, type_of_activity: str):
 | 
			
		||||
def country_bar(customer, type_of_activity):
 | 
			
		||||
    company_country_fr = customer.groupby("number_company")["country_fr"].mean().reset_index()
 | 
			
		||||
    company_country_fr["country_fr"] *= 100
 | 
			
		||||
    plt.figure(figsize=(4,3))
 | 
			
		||||
    plt.bar(company_country_fr["number_company"], company_country_fr["country_fr"])
 | 
			
		||||
    
 | 
			
		||||
    plt.xlabel('Company Number')
 | 
			
		||||
    plt.ylabel("Share of French Customer (%)")
 | 
			
		||||
    # plt.title(f"Share of French Customer Across {type_of_activity} Companies")
 | 
			
		||||
    plt.title(f"Share of French Customer Across {type_of_activity} Companies")
 | 
			
		||||
    plt.xticks(company_country_fr["number_company"], ["{}".format(i) for i in company_country_fr["number_company"]])
 | 
			
		||||
    plt.show()
 | 
			
		||||
    save_file_s3("country_bar_", type_of_activity)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def lazy_customer_plot(campaigns_kpi: pd.DataFrame, type_of_activity: str):
 | 
			
		||||
    company_lazy_customers = campaigns_kpi.groupby("number_company")[["nb_campaigns", "taux_ouverture_mail"]].mean().reset_index()
 | 
			
		||||
    company_lazy_customers["taux_ouverture_mail"] *= 100
 | 
			
		||||
    
 | 
			
		||||
    # Initialize the figure
 | 
			
		||||
    fig, ax1 = plt.subplots(figsize=(6, 3))
 | 
			
		||||
    width = 0.4
 | 
			
		||||
    x = range(len(company_lazy_customers))
 | 
			
		||||
    
 | 
			
		||||
    # Plot the bars for "nb_campaigns" on the first y-axis
 | 
			
		||||
    ax1.bar([i - width/2 for i in x], company_lazy_customers['nb_campaigns'], width=width, align='center', label='Amount of Campaigns', color = 'steelblue')
 | 
			
		||||
    
 | 
			
		||||
    # Set labels and title for the first y-axis
 | 
			
		||||
    ax1.set_ylabel('Number of Mails Received', color='steelblue')
 | 
			
		||||
    ax1.tick_params(axis='y', labelcolor='steelblue')
 | 
			
		||||
    
 | 
			
		||||
    # Create another y-axis for "taux_ouverture_mail"
 | 
			
		||||
    ax2 = ax1.twinx()
 | 
			
		||||
    
 | 
			
		||||
    # Plot the bars for "taux_ouverture_mail" on the second y-axis
 | 
			
		||||
    ax2.bar([i + width/2 for i in x], company_lazy_customers['taux_ouverture_mail'], width=width, align='center', label='Open Mail Rate', color = 'darkorange')
 | 
			
		||||
    
 | 
			
		||||
    # Set labels and title for the second y-axis
 | 
			
		||||
    ax2.set_ylabel('Open Mail Rate (%)', color='darkorange')
 | 
			
		||||
    ax2.tick_params(axis='y', labelcolor='darkorange')
 | 
			
		||||
    
 | 
			
		||||
    # Set x-axis ticks and labels
 | 
			
		||||
    ax1.set_xticks(x)
 | 
			
		||||
    ax1.set_xticklabels(company_lazy_customers['number_company'])
 | 
			
		||||
def lazy_customer_plot(campaigns_kpi, type_of_activity):
 | 
			
		||||
    company_lazy_customers = campaigns_kpi.groupby("number_company")["nb_campaigns_opened"].mean().reset_index()
 | 
			
		||||
    plt.bar(company_lazy_customers["number_company"], company_lazy_customers["nb_campaigns_opened"])
 | 
			
		||||
    
 | 
			
		||||
    plt.xlabel('Company Number')
 | 
			
		||||
    plt.title(f"Share of Customers who did not Open Mail Across {type_of_activity} Companies")
 | 
			
		||||
    plt.xticks(company_lazy_customers["number_company"], ["{}".format(i) for i in company_lazy_customers["number_company"]])
 | 
			
		||||
    plt.show()
 | 
			
		||||
    save_file_s3("lazy_customer_", type_of_activity)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def campaigns_effectiveness(customer: pd.DataFrame, type_of_activity: str):
 | 
			
		||||
def campaigns_effectiveness(customer, type_of_activity):
 | 
			
		||||
 | 
			
		||||
    campaigns_effectiveness = customer.groupby(["number_company", "has_purchased_target_period"])["opt_in"].mean().reset_index()
 | 
			
		||||
 | 
			
		||||
    fig, ax = plt.subplots(figsize=(5, 3))
 | 
			
		||||
    fig, ax = plt.subplots(figsize=(10, 6))
 | 
			
		||||
    
 | 
			
		||||
    categories = campaigns_effectiveness["number_company"].unique()
 | 
			
		||||
    bar_width = 0.35
 | 
			
		||||
| 
						 | 
				
			
			@ -327,7 +299,7 @@ def campaigns_effectiveness(customer: pd.DataFrame, type_of_activity: str):
 | 
			
		|||
    # Ajout des étiquettes, de la légende, etc.
 | 
			
		||||
    ax.set_xlabel('Company Number')
 | 
			
		||||
    ax.set_ylabel('Share of Consent (%)')
 | 
			
		||||
    # ax.set_title(f"Proportion of customers who have given their consent to receive emails, by customer class  ({type_of_activity} companies)")
 | 
			
		||||
    ax.set_title(f"Proportion of customers who have given their consent to receive emails, by customer class  ({type_of_activity} companies)")
 | 
			
		||||
    ax.set_xticks([pos + bar_width / 2 for pos in np.arange(len(categories))])
 | 
			
		||||
    ax.set_xticklabels(categories)
 | 
			
		||||
    ax.legend()
 | 
			
		||||
| 
						 | 
				
			
			@ -335,7 +307,7 @@ def campaigns_effectiveness(customer: pd.DataFrame, type_of_activity: str):
 | 
			
		|||
    save_file_s3("campaigns_effectiveness_", type_of_activity)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def sale_dynamics(products : pd.DataFrame, campaigns_brut : pd.DataFrame, type_of_activity):
 | 
			
		||||
def sale_dynamics(products, campaigns_brut, type_of_activity):
 | 
			
		||||
    purchase_min = products.groupby(['customer_id'])['purchase_date'].min().reset_index()
 | 
			
		||||
    purchase_min.rename(columns = {'purchase_date' : 'first_purchase_event'}, inplace = True)
 | 
			
		||||
    purchase_min['first_purchase_event'] = pd.to_datetime(purchase_min['first_purchase_event'])
 | 
			
		||||
| 
						 | 
				
			
			@ -377,7 +349,6 @@ def sale_dynamics(products : pd.DataFrame, campaigns_brut : pd.DataFrame, type_o
 | 
			
		|||
    
 | 
			
		||||
    
 | 
			
		||||
    merged_data = pd.merge(purchases_graph_used_0, purchases_graph_used_1, on="purchase_date_month", suffixes=("_new", "_old"))
 | 
			
		||||
    plt.figure(figsize=(5.5,4))
 | 
			
		||||
    
 | 
			
		||||
    plt.bar(merged_data["purchase_date_month"], merged_data["nb_purchases_new"], width=12, label="New Customers")
 | 
			
		||||
    plt.bar(merged_data["purchase_date_month"], merged_data["nb_purchases_old"], 
 | 
			
		||||
| 
						 | 
				
			
			@ -389,26 +360,26 @@ def sale_dynamics(products : pd.DataFrame, campaigns_brut : pd.DataFrame, type_o
 | 
			
		|||
    
 | 
			
		||||
    plt.xlabel('Month')
 | 
			
		||||
    plt.ylabel("Number of Sales")
 | 
			
		||||
    # plt.title(f"Number of Sales Across {type_of_activity} Companies")
 | 
			
		||||
    plt.title(f"Number of Sales Across {type_of_activity} Companies")
 | 
			
		||||
    plt.legend()
 | 
			
		||||
    plt.show()
 | 
			
		||||
    save_file_s3("sale_dynamics_", type_of_activity)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def tickets_internet(tickets: pd.DataFrame, type_of_activity: str):
 | 
			
		||||
def tickets_internet(tickets, type_of_activity):
 | 
			
		||||
    nb_tickets_internet = tickets.groupby("number_company")['prop_purchases_internet'].mean().reset_index()
 | 
			
		||||
    nb_tickets_internet['prop_purchases_internet'] *=100
 | 
			
		||||
    plt.bar(nb_tickets_internet["number_company"],  nb_tickets_internet["prop_purchases_internet"])
 | 
			
		||||
    
 | 
			
		||||
    plt.xlabel('Company Number')
 | 
			
		||||
    plt.ylabel("Share of Purchases Bought Online (%)")
 | 
			
		||||
    # plt.title(f"Share of Online Purchases Across {type_of_activity} Companies")
 | 
			
		||||
    plt.title(f"Share of Online Purchases Across {type_of_activity} Companies")
 | 
			
		||||
    plt.xticks(nb_tickets_internet["number_company"], ["{}".format(i) for i in nb_tickets_internet["number_company"]])
 | 
			
		||||
    plt.show()
 | 
			
		||||
    save_file_s3("tickets_internet_", type_of_activity)
 | 
			
		||||
    
 | 
			
		||||
 | 
			
		||||
def already_bought_online(tickets: pd.DataFrame, type_of_activity: str):
 | 
			
		||||
def already_bought_online(tickets, type_of_activity):
 | 
			
		||||
    nb_consumers_online = (tickets.groupby("number_company").agg({'achat_internet' : 'sum',
 | 
			
		||||
                                                                        'customer_id' : 'nunique'}
 | 
			
		||||
                                                                        ).reset_index())
 | 
			
		||||
| 
						 | 
				
			
			@ -418,23 +389,20 @@ def already_bought_online(tickets: pd.DataFrame, type_of_activity: str):
 | 
			
		|||
    
 | 
			
		||||
    plt.xlabel('Company Number')
 | 
			
		||||
    plt.ylabel("Share of Customer who Bought Online at least once (%)")
 | 
			
		||||
    # plt.title(f"Share of Customer who Bought Online at least once Across {type_of_activity} Companies")
 | 
			
		||||
    plt.title(f"Share of Customer who Bought Online at least once Across {type_of_activity} Companies")
 | 
			
		||||
    plt.xticks(nb_consumers_online["number_company"], ["{}".format(i) for i in nb_consumers_online["number_company"]])
 | 
			
		||||
    plt.show()
 | 
			
		||||
    save_file_s3("First_buy_internet_", type_of_activity)
 | 
			
		||||
    
 | 
			
		||||
 | 
			
		||||
def box_plot_price_tickets(tickets: pd.DataFrame, type_of_activity: str):
 | 
			
		||||
def box_plot_price_tickets(tickets, type_of_activity):
 | 
			
		||||
    price_tickets = tickets[(tickets['total_amount'] > 0)]
 | 
			
		||||
    plt.figure(figsize=(4,3))
 | 
			
		||||
    sns.boxplot(data=price_tickets, y="total_amount", x="number_company", showfliers=False, showmeans=True)
 | 
			
		||||
    # plt.title(f"Box plot of price tickets Across {type_of_activity} Companies")
 | 
			
		||||
    plt.xlabel('Company Number')
 | 
			
		||||
    plt.ylabel("Total Amount Spent")
 | 
			
		||||
    plt.title(f"Box plot of price tickets Across {type_of_activity} Companies")
 | 
			
		||||
    plt.show()
 | 
			
		||||
    save_file_s3("box_plot_price_tickets_", type_of_activity)
 | 
			
		||||
 | 
			
		||||
def target_description(targets : pd.DataFrame, type_of_activity: str):
 | 
			
		||||
def target_description(targets, type_of_activity):
 | 
			
		||||
 | 
			
		||||
    describe_target = targets.groupby('number_company').agg(
 | 
			
		||||
        prop_target_jeune=('target_jeune', lambda x: (x.sum() / x.count())*100),
 | 
			
		||||
| 
						 | 
				
			
			@ -449,7 +417,7 @@ def target_description(targets : pd.DataFrame, type_of_activity: str):
 | 
			
		|||
    plot = describe_target.plot.bar()
 | 
			
		||||
    
 | 
			
		||||
    # Adding a title
 | 
			
		||||
    # plot.set_title(f"Distribution of Targets by Category for {type_of_activity} companies")
 | 
			
		||||
    plot.set_title(f"Distribution of Targets by Category for {type_of_activity} companies")
 | 
			
		||||
    
 | 
			
		||||
    # Adding labels for x and y axes
 | 
			
		||||
    plot.set_xlabel("Company Number")
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
		Loading…
	
		Reference in New Issue
	
	Block a user