# Business Data Challenge - Team 1 import pandas as pd import numpy as np import os import s3fs import re import warnings from datetime import date, timedelta, datetime # Create filesystem object S3_ENDPOINT_URL = "https://" + os.environ["AWS_S3_ENDPOINT"] fs = s3fs.S3FileSystem(client_kwargs={'endpoint_url': S3_ENDPOINT_URL}) # Import KPI construction functions exec(open('0_KPI_functions.py').read()) # Ignore warning warnings.filterwarnings('ignore') def display_covering_time(df, company, datecover): """ This function draws the time coverage of each company """ min_date = df['purchase_date'].min().strftime("%Y-%m-%d") max_date = df['purchase_date'].max().strftime("%Y-%m-%d") datecover[company] = [datetime.strptime(min_date, "%Y-%m-%d") + timedelta(days=x) for x in range((datetime.strptime(max_date, "%Y-%m-%d") - datetime.strptime(min_date, "%Y-%m-%d")).days)] print(f'Couverture Company {company} : {min_date} - {max_date}') return datecover def compute_time_intersection(datecover): """ This function returns the time coverage for all companies """ timestamps_sets = [set(timestamps) for timestamps in datecover.values()] intersection = set.intersection(*timestamps_sets) intersection_list = list(intersection) formated_dates = [dt.strftime("%Y-%m-%d") for dt in intersection_list] return sorted(formated_dates) def df_coverage_modelization(sport, coverage_train = 0.7): """ This function returns start_date, end_of_features and final dates that help to construct train and test datasets """ datecover = {} for company in sport: df_products_purchased_reduced = display_databases(company, file_name = "products_purchased_reduced", datetime_col = ['purchase_date']) datecover = display_covering_time(df_products_purchased_reduced, company, datecover) #print(datecover.keys()) dt_coverage = compute_time_intersection(datecover) start_date = dt_coverage[0] end_of_features = dt_coverage[int(0.7 * len(dt_coverage))] final_date = dt_coverage[-1] return start_date, end_of_features, final_date def dataset_construction(min_date, end_features_date, max_date, directory_path): # Import customerplus df_customerplus_clean_0 = display_databases(directory_path, file_name = "customerplus_cleaned") df_campaigns_information = display_databases(directory_path, file_name = "campaigns_information", datetime_col = ['opened_at', 'sent_at', 'campaign_sent_at']) df_products_purchased_reduced = display_databases(directory_path, file_name = "products_purchased_reduced", datetime_col = ['purchase_date']) # Filtre de cohérence pour la mise en pratique de notre méthode max_date = pd.to_datetime(max_date, utc = True, format = 'ISO8601') end_features_date = pd.to_datetime(end_features_date, utc = True, format = 'ISO8601') min_date = pd.to_datetime(min_date, utc = True, format = 'ISO8601') #Filtre de la base df_campaigns_information df_campaigns_information = df_campaigns_information[(df_campaigns_information['sent_at'] <= end_features_date) & (df_campaigns_information['sent_at'] >= min_date)] df_campaigns_information['opened_at'][df_campaigns_information['opened_at'] >= end_features_date] = np.datetime64('NaT') #Filtre de la base df_products_purchased_reduced df_products_purchased_reduced = df_products_purchased_reduced[(df_products_purchased_reduced['purchase_date'] <= end_features_date) & (df_products_purchased_reduced['purchase_date'] >= min_date)] print("Data filtering : SUCCESS") # Fusion de l'ensemble et creation des KPI # KPI sur les campagnes publicitaires df_campaigns_kpi = campaigns_kpi_function(campaigns_information = df_campaigns_information) # KPI sur le comportement d'achat df_tickets_kpi = tickets_kpi_function(tickets_information = df_products_purchased_reduced) # KPI sur les données socio-démographiques df_customerplus_clean = customerplus_kpi_function(customerplus_clean = df_customerplus_clean_0) print("KPIs construction : SUCCESS") # Fusion avec KPI liés au customer df_customer = pd.merge(df_customerplus_clean, df_campaigns_kpi, on = 'customer_id', how = 'left') # Fill NaN values df_customer[['nb_campaigns', 'nb_campaigns_opened']] = df_customer[['nb_campaigns', 'nb_campaigns_opened']].fillna(0) # Fusion avec KPI liés au comportement d'achat df_customer_product = pd.merge(df_tickets_kpi, df_customer, on = 'customer_id', how = 'outer') # Fill NaN values df_customer_product[['nb_tickets', 'nb_purchases', 'total_amount', 'nb_suppliers', 'vente_internet_max', 'nb_tickets_internet']] = df_customer_product[['nb_tickets', 'nb_purchases', 'total_amount', 'nb_suppliers', 'vente_internet_max', 'nb_tickets_internet']].fillna(0) print("Explanatory variable construction : SUCCESS") # 2. Construction of the explained variable df_products_purchased_to_predict = df_products_purchased_reduced[(df_products_purchased_reduced['purchase_date'] <= max_date) & (df_products_purchased_reduced['purchase_date'] > end_features_date)] # Indicatrice d'achat df_products_purchased_to_predict['y_has_purchased'] = 1 y = df_products_purchased_to_predict[['customer_id', 'y_has_purchased']].drop_duplicates() print("Explained variable construction : SUCCESS") # 3. Merge between explained and explanatory variables dataset = pd.merge(df_customer_product, y, on = ['customer_id'], how = 'left') # 0 if there is no purchase dataset[['y_has_purchased']].fillna(0) # add id_company prefix to customer_id dataset['customer_id'] = directory_path + '_' + dataset['customer_id'].astype('str') return dataset ## Exportation companies = {'musee' : ['1', '2', '3', '4', '101'], 'sport': ['5', '6', '7', '8', '9'], 'musique' : ['10', '11', '12', '13', '14']} type_of_comp = input('Choisissez le type de compagnie : sport ? musique ? musee ?') list_of_comp = companies[type_of_comp] # Dossier d'exportation BUCKET_OUT = f'projet-bdc2324-team1/Generalization/{type_of_comp}' # Create test dataset and train dataset for sport companies start_date, end_of_features, final_date = df_coverage_modelization(list_of_comp, coverage_train = 0.7) for company in list_of_comp: dataset_test = dataset_construction(min_date = start_date, end_features_date = end_of_features, max_date = final_date, directory_path = company) # Exportation FILE_KEY_OUT_S3 = "dataset_test" + company + ".csv" FILE_PATH_OUT_S3 = BUCKET_OUT + "/Test_set/" + FILE_KEY_OUT_S3 with fs.open(FILE_PATH_OUT_S3, 'w') as file_out: dataset_test.to_csv(file_out, index = False) print("Exportation dataset test : SUCCESS") # Dataset train dataset_train = dataset_construction(min_date = start_date, end_features_date = end_of_features, max_date = final_date, directory_path = company) # Export FILE_KEY_OUT_S3 = "dataset_train" + company + ".csv" FILE_PATH_OUT_S3 = BUCKET_OUT + "/Train_test/" + FILE_KEY_OUT_S3 with fs.open(FILE_PATH_OUT_S3, 'w') as file_out: dataset_train.to_csv(file_out, index = False) print("Exportation dataset train : SUCCESS") print("FIN DE LA GENERATION DES DATASETS : SUCCESS")