# Business Data Challenge - Team 1 import pandas as pd import numpy as np import os import s3fs import re import warnings # Import cleaning and merge functions exec(open('BDC-team-1/0_Cleaning_and_merge_functions.py').read()) exec(open('BDC-team-1/0_KPI_functions.py').read()) # Create filesystem object S3_ENDPOINT_URL = "https://" + os.environ["AWS_S3_ENDPOINT"] fs = s3fs.S3FileSystem(client_kwargs={'endpoint_url': S3_ENDPOINT_URL}) # Ignore warning warnings.filterwarnings('ignore') # Data loading BUCKET = "bdc2324-data/1" liste_database = fs.ls(BUCKET) # loop to create dataframes from liste client_number = liste_database[0].split("/")[1] df_prefix = "df" + str(client_number) + "_" for i in range(len(liste_database)) : current_path = liste_database[i] with fs.open(current_path, mode="rb") as file_in: df = pd.read_csv(file_in) # the pattern of the name is df1xxx nom_dataframe = df_prefix + re.search(r'\/(\d+)\/(\d+)([a-zA-Z_]+)\.csv$', current_path).group(3) globals()[nom_dataframe] = df ## 1 - Cleaning of the datasets # Cleaning customerplus df1_customerplus_clean = preprocessing_customerplus(df1_customersplus) # Cleaning target area df1_target_information = preprocessing_target_area(targets = df1_targets, target_types = df1_target_types, customer_target_mappings = df1_customer_target_mappings) # Cleaning campaign area df1_campaigns_information = preprocessing_campaigns_area(campaign_stats = df1_campaign_stats, campaigns = df1_campaigns) # Exportation BUCKET_OUT = "projet-bdc2324-team1" FILE_KEY_OUT_S3 = "0_Temp/Company 1 - Campaigns dataset clean.csv" FILE_PATH_OUT_S3 = BUCKET_OUT + "/" + FILE_KEY_OUT_S3 with fs.open(FILE_PATH_OUT_S3, 'w') as file_out: df1_campaigns_information.to_csv(file_out, index = False) ## Cleaning product area # Cleaning ticket area df1_ticket_information = preprocessing_tickets_area(tickets = df1_tickets, purchases = df1_purchases, suppliers = df1_suppliers, type_ofs = df1_type_ofs) BUCKET = "bdc2324-data" directory_path = '1' products_theme = create_products_table() events_theme= create_events_table() representation_theme = create_representations_table() products_global = uniform_product_df() # Fusion liée au product df1_products_purchased = pd.merge(df1_ticket_information, products_global, left_on = 'product_id', right_on = 'id_products', how = 'inner') # Selection des variables d'intérêts df1_products_purchased_reduced = df1_products_purchased[['ticket_id', 'customer_id', 'purchase_id' ,'event_type_id', 'supplier_name', 'purchase_date', 'type_of_ticket_name', 'amount', 'children', 'is_full_price', 'name_event_types', 'name_facilities', 'name_categories', 'name_events', 'name_seasons']] #Exportation BUCKET_OUT = "projet-bdc2324-team1" FILE_KEY_OUT_S3 = "0_Temp/Company 1 - Purchases.csv" FILE_PATH_OUT_S3 = BUCKET_OUT + "/" + FILE_KEY_OUT_S3 with fs.open(FILE_PATH_OUT_S3, 'w') as file_out: df1_products_purchased_reduced.to_csv(file_out, index = False) ## 2 - Construction of KPIs on a given period def explanatory_variables(min_date = "2021-09-01", max_date = "2023-09-01", df_campaigns_information = df1_campaigns_information, df_products_purchased_reduced = df1_products_purchased_reduced, df_customerplus_clean = df1_customerplus_clean): # Filtre de cohérence pour la mise en pratique de notre méthode max_date = pd.to_datetime(max_date, utc = True, format = 'ISO8601') min_date = pd.to_datetime(min_date, utc = True, format = 'ISO8601') #Filtre de la base df_campaigns_information df_campaigns_information = df_campaigns_information[(df_campaigns_information['sent_at'] <= max_date) & (df_campaigns_information['sent_at'] >= min_date)] df_campaigns_information['opened_at'][df_campaigns_information['opened_at'] >= max_date] = np.datetime64('NaT') #Filtre de la base df_products_purchased_reduced df_products_purchased_reduced = df_products_purchased_reduced[(df_products_purchased_reduced['purchase_date'] <= max_date) & (df_products_purchased_reduced['purchase_date'] >= min_date)] print("Data filtering : SUCCESS") # Fusion de l'ensemble et creation des KPI # KPI sur les campagnes publicitaires df_campaigns_kpi = campaigns_kpi_function(campaigns_information = df_campaigns_information) # KPI sur le comportement d'achat df_tickets_kpi = tickets_kpi_function(tickets_information = df_products_purchased_reduced) # KPI sur les données socio-demographique ## Le genre df_customerplus_clean["gender_label"] = df_customerplus_clean["gender"].map({ 0: 'female', 1: 'male', 2: 'other' }) gender_dummies = pd.get_dummies(df_customerplus_clean["gender_label"], prefix='gender').astype(int) df_customerplus_clean = pd.concat([df_customerplus_clean, gender_dummies], axis=1) ## Indicatrice si individue vit en France df_customerplus_clean["country_fr"] = df_customerplus_clean["country"].apply(lambda x : int(x=="fr") if pd.notna(x) else np.nan) print("KPIs construction : SUCCESS") # Fusion avec KPI liés au customer df_customer = pd.merge(df_customerplus_clean, df_campaigns_kpi, on = 'customer_id', how = 'left') # Fill NaN values df_customer[['nb_campaigns', 'nb_campaigns_opened']] = df_customer[['nb_campaigns', 'nb_campaigns_opened']].fillna(0) # Fusion avec KPI liés au comportement d'achat df_customer_product = pd.merge(df_tickets_kpi, df_customer, on = 'customer_id', how = 'outer') # Fill NaN values df_customer_product[['nb_tickets', 'nb_purchases', 'total_amount', 'nb_suppliers', 'vente_internet_max', 'nb_tickets_internet']] = df_customer_product[['nb_tickets', 'nb_purchases', 'total_amount', 'nb_suppliers', 'vente_internet_max', 'nb_tickets_internet']].fillna(0) print("Explanatory variable construction : SUCCESS") return df_customer_product # Fonction pour créer les variables expliquée def explained_variable(min_date = "2023-08-01", max_date = "2023-11-01", df_products_purchased_reduced = df1_products_purchased_reduced): # Filtrer la base d'achat df_products_purchased_reduced = df_products_purchased_reduced[(df_products_purchased_reduced['purchase_date'] <= max_date) & (df_products_purchased_reduced['purchase_date'] > min_date)] # Indicatrice d'achat df_products_purchased_reduced['y_has_purchased'] = 1 y = df_products_purchased_reduced[['customer_id', 'event_type_id', 'y_has_purchased']].drop_duplicates() print("Explained variable construction : SUCCESS") return y ## Exportation # Dossier d'exportation BUCKET_OUT = "projet-bdc2324-team1/1_Output/Logistique Regression databases - First approach" # Dataset test X_test = explanatory_variables(min_date = "2021-08-01", max_date = "2023-08-01", df_campaigns_information = df1_campaigns_information, df_products_purchased_reduced = df1_products_purchased_reduced, df_customerplus_clean = df1_customerplus_clean) y_test = explained_variable(min_date = "2023-08-01", max_date = "2023-11-01", df_products_purchased_reduced = df1_products_purchased_reduced) dataset_test = pd.merge(X_test, y_test, on = ['customer_id', 'event_type_id'], how = 'left') # Exportation FILE_KEY_OUT_S3 = "dataset_test.csv" FILE_PATH_OUT_S3 = BUCKET_OUT + "/" + FILE_KEY_OUT_S3 with fs.open(FILE_PATH_OUT_S3, 'w') as file_out: dataset_test.to_csv(file_out, index = False) print("Exportation dataset test : SUCCESS") # Dataset train X_train = explanatory_variables(min_date = "2021-05-01", max_date = "2023-05-01", df_campaigns_information = df1_campaigns_information, df_products_purchased_reduced = df1_products_purchased_reduced, df_customerplus_clean = df1_customerplus_clean) y_train = explained_variable(min_date = "2023-05-01", max_date = "2023-08-01", df_products_purchased_reduced = df1_products_purchased_reduced) dataset_train = pd.merge(X_train, y_train, on = ['customer_id', 'event_type_id'], how = 'left') # Exportation FILE_KEY_OUT_S3 = "dataset_train.csv" FILE_PATH_OUT_S3 = BUCKET_OUT + "/" + FILE_KEY_OUT_S3 with fs.open(FILE_PATH_OUT_S3, 'w') as file_out: dataset_test.to_csv(file_out, index = False) print("Exportation dataset train : SUCCESS") print("FIN DE LA GENERATION DES DATASETS : SUCCESS")