Merge pull request 'generalization' (#6) from generalization into main

Reviewed-on: #6
This commit is contained in:
Antoine JOUBREL 2024-02-29 20:26:00 +01:00
commit ccddaf2f12
3 changed files with 88 additions and 3 deletions

View File

@ -122,7 +122,10 @@ def dataset_construction(min_date, end_features_date, max_date, directory_path):
dataset = pd.merge(df_customer_product, y, on = ['customer_id'], how = 'left')
# 0 if there is no purchase
dataset[['y_has_purchased']].fillna(0)
dataset[['y_has_purchased']].fillna(0)
# add id_company prefix to customer_id
dataset['customer_id'] = directory_path + '_' + dataset['customer_id'].astype('str')
return dataset
@ -147,7 +150,7 @@ for company in list_of_comp:
# Exportation
FILE_KEY_OUT_S3 = "dataset_test" + company + ".csv"
FILE_PATH_OUT_S3 = BUCKET_OUT + "/" + FILE_KEY_OUT_S3
FILE_PATH_OUT_S3 = BUCKET_OUT + "/Test_set/" + FILE_KEY_OUT_S3
with fs.open(FILE_PATH_OUT_S3, 'w') as file_out:
dataset_test.to_csv(file_out, index = False)
@ -159,7 +162,7 @@ for company in list_of_comp:
max_date = final_date, directory_path = company)
# Export
FILE_KEY_OUT_S3 = "dataset_train" + company + ".csv"
FILE_PATH_OUT_S3 = BUCKET_OUT + "/" + FILE_KEY_OUT_S3
FILE_PATH_OUT_S3 = BUCKET_OUT + "/Train_test/" + FILE_KEY_OUT_S3
with fs.open(FILE_PATH_OUT_S3, 'w') as file_out:
dataset_train.to_csv(file_out, index = False)

View File

@ -0,0 +1,68 @@
# Business Data Challenge - Team 1
import pandas as pd
import numpy as np
import os
import s3fs
import re
import warnings
from datetime import date, timedelta, datetime
# Create filesystem object
S3_ENDPOINT_URL = "https://" + os.environ["AWS_S3_ENDPOINT"]
fs = s3fs.S3FileSystem(client_kwargs={'endpoint_url': S3_ENDPOINT_URL})
# Import KPI construction functions
exec(open('0_KPI_functions.py').read())
# Ignore warning
warnings.filterwarnings('ignore')
# functions
def generate_test_set(type_of_comp):
file_path_list = fs.ls(f"projet-bdc2324-team1/Generalization/{type_of_comp}/Test_set")
test_set = pd.DataFrame()
for file in file_path_list:
print(file)
with fs.open(file, mode="rb") as file_in:
df = pd.read_csv(file_in, sep=",")
test_set = pd.concat([test_set, df], ignore_index = True)
return test_set
def generate_train_set(type_of_comp):
file_path_list = fs.ls(f"projet-bdc2324-team1/Generalization/{type_of_comp}/Train_set")
train_set = pd.DataFrame()
for file in file_path_list:
print(file)
with fs.open(file, mode="rb") as file_in:
df = pd.read_csv(file_in, sep=",")
train_set = pd.concat([test_set, df], ignore_index = True)
return train_set
type_of_comp = input('Choisissez le type de compagnie : sport ? musique ? musee ?')
BUCKET_OUT = f'projet-bdc2324-team1/Generalization/{type_of_comp}/'
# create test and train datasets
test_set = generate_test_set(type_of_comp)
train_set = generate_train_set(type_of_comp)
# Exportation test set
FILE_KEY_OUT_S3 = "Test_set.csv"
FILE_PATH_OUT_S3 = BUCKET_OUT + FILE_KEY_OUT_S3
with fs.open(FILE_PATH_OUT_S3, 'w') as file_out:
test_set.to_csv(file_out, index = False)
print("Exportation dataset test : SUCCESS")
# Exportation train set
FILE_KEY_OUT_S3 = "Train_set.csv"
FILE_PATH_OUT_S3 = BUCKET_OUT + FILE_KEY_OUT_S3
with fs.open(FILE_PATH_OUT_S3, 'w') as file_out:
train_set.to_csv(file_out, index = False)
print("Exportation dataset train : SUCCESS")

View File

@ -0,0 +1,14 @@
import pandas as pd
import numpy as np
import os
import s3fs
import re
import warnings
# Create filesystem object
S3_ENDPOINT_URL = "https://" + os.environ["AWS_S3_ENDPOINT"]
fs = s3fs.S3FileSystem(client_kwargs={'endpoint_url': S3_ENDPOINT_URL})
# Ignore warning
warnings.filterwarnings('ignore')