90 lines
3.6 KiB
Python
90 lines
3.6 KiB
Python
|
# Business Data Challenge - Team 1
|
||
|
|
||
|
import pandas as pd
|
||
|
import numpy as np
|
||
|
import os
|
||
|
import s3fs
|
||
|
import re
|
||
|
import warnings
|
||
|
|
||
|
# Import cleaning and merge functions
|
||
|
exec(open('BDC-team-1/0_Cleaning_and_merge_functions.py').read())
|
||
|
exec(open('BDC-team-1/0_KPI_functions.py').read())
|
||
|
|
||
|
# Create filesystem object
|
||
|
S3_ENDPOINT_URL = "https://" + os.environ["AWS_S3_ENDPOINT"]
|
||
|
fs = s3fs.S3FileSystem(client_kwargs={'endpoint_url': S3_ENDPOINT_URL})
|
||
|
|
||
|
# Ignore warning
|
||
|
warnings.filterwarnings('ignore')
|
||
|
|
||
|
# Data loading
|
||
|
BUCKET = "bdc2324-data/1"
|
||
|
liste_database = fs.ls(BUCKET)
|
||
|
|
||
|
# loop to create dataframes from liste
|
||
|
client_number = liste_database[0].split("/")[1]
|
||
|
df_prefix = "df" + str(client_number) + "_"
|
||
|
|
||
|
for i in range(len(liste_database)) :
|
||
|
current_path = liste_database[i]
|
||
|
with fs.open(current_path, mode="rb") as file_in:
|
||
|
df = pd.read_csv(file_in)
|
||
|
# the pattern of the name is df1xxx
|
||
|
nom_dataframe = df_prefix + re.search(r'\/(\d+)\/(\d+)([a-zA-Z_]+)\.csv$', current_path).group(3)
|
||
|
globals()[nom_dataframe] = df
|
||
|
|
||
|
# Cleaning customerplus
|
||
|
df1_customerplus_clean = preprocessing_customerplus(df1_customersplus)
|
||
|
|
||
|
# Cleaning ticket area
|
||
|
df1_ticket_information = preprocessing_tickets_area(tickets = df1_tickets, purchases = df1_purchases, suppliers = df1_suppliers, type_ofs = df1_type_ofs)
|
||
|
|
||
|
# Cleaning target area
|
||
|
df1_target_information = preprocessing_target_area(targets = df1_targets, target_types = df1_target_types, customer_target_mappings = df1_customer_target_mappings)
|
||
|
|
||
|
# Cleaning campaign area
|
||
|
df1_campaigns_information = preprocessing_campaigns_area(campaign_stats = df1_campaign_stats, campaigns = df1_campaigns)
|
||
|
|
||
|
# Cleaning product area
|
||
|
BUCKET = "bdc2324-data"
|
||
|
directory_path = '1'
|
||
|
|
||
|
products_theme = create_products_table()
|
||
|
events_theme= create_events_table()
|
||
|
representation_theme = create_representations_table()
|
||
|
products_global = uniform_product_df()
|
||
|
|
||
|
# Fusion liée au product
|
||
|
df1_products_purchased = pd.merge(df1_ticket_information, products_global, left_on = 'product_id', right_on = 'id_products', how = 'inner')
|
||
|
|
||
|
# Selection des variables d'intérêts
|
||
|
df1_products_purchased_reduced = df1_products_purchased[['ticket_id', 'customer_id', 'purchase_id' ,'event_type_id', 'supplier_name', 'purchase_date', 'type_of_ticket_name', 'amount', 'children', 'is_full_price', 'name_event_types', 'name_facilities', 'name_categories', 'name_events', 'name_seasons']]
|
||
|
|
||
|
# Fusion de l'ensemble et creation des KPI
|
||
|
df1_campaigns_kpi = campaigns_kpi_function(campaigns_information = df1_campaigns_information)
|
||
|
|
||
|
df1_tickets_kpi = tickets_kpi_function(tickets_information = df1_products_purchased_reduced)
|
||
|
|
||
|
# Fusion avec KPI liés au customer
|
||
|
df1_customer = pd.merge(df1_customerplus_clean, df1_campaigns_kpi, on = 'customer_id', how = 'left')
|
||
|
|
||
|
# Fill NaN values
|
||
|
df1_customer[['nb_campaigns', 'nb_campaigns_opened']] = df1_customer[['nb_campaigns', 'nb_campaigns_opened']].fillna(0)
|
||
|
|
||
|
# Fusion avec KPI liés au comportement d'achat
|
||
|
df1_customer_product = pd.merge(df1_tickets_kpi, df1_customer, on = 'customer_id', how = 'outer')
|
||
|
|
||
|
# Fill NaN values
|
||
|
df1_customer_product[['nb_tickets', 'nb_purchases', 'total_amount', 'nb_suppliers', 'vente_internet_max', 'nb_tickets_internet']] = df1_customer_product[['nb_tickets', 'nb_purchases', 'total_amount', 'nb_suppliers', 'vente_internet_max', 'nb_tickets_internet']].fillna(0)
|
||
|
|
||
|
## Exportation
|
||
|
|
||
|
# Exportation vers 'projet-bdc2324-team1'
|
||
|
BUCKET_OUT = "projet-bdc2324-team1"
|
||
|
FILE_KEY_OUT_S3 = "1_Output/Company 1 - Segmentation base.csv"
|
||
|
FILE_PATH_OUT_S3 = BUCKET_OUT + "/" + FILE_KEY_OUT_S3
|
||
|
|
||
|
with fs.open(FILE_PATH_OUT_S3, 'w') as file_out:
|
||
|
df1_customer_product.to_csv(file_out, index = False)
|