generalization #6
|
@ -122,7 +122,10 @@ def dataset_construction(min_date, end_features_date, max_date, directory_path):
|
||||||
dataset = pd.merge(df_customer_product, y, on = ['customer_id'], how = 'left')
|
dataset = pd.merge(df_customer_product, y, on = ['customer_id'], how = 'left')
|
||||||
|
|
||||||
# 0 if there is no purchase
|
# 0 if there is no purchase
|
||||||
dataset[['y_has_purchased']].fillna(0)
|
dataset[['y_has_purchased']].fillna(0)
|
||||||
|
|
||||||
|
# add id_company prefix to customer_id
|
||||||
|
dataset['customer_id'] = directory_path + '_' + dataset['customer_id'].astype('str')
|
||||||
|
|
||||||
return dataset
|
return dataset
|
||||||
|
|
||||||
|
@ -147,7 +150,7 @@ for company in list_of_comp:
|
||||||
|
|
||||||
# Exportation
|
# Exportation
|
||||||
FILE_KEY_OUT_S3 = "dataset_test" + company + ".csv"
|
FILE_KEY_OUT_S3 = "dataset_test" + company + ".csv"
|
||||||
FILE_PATH_OUT_S3 = BUCKET_OUT + "/" + FILE_KEY_OUT_S3
|
FILE_PATH_OUT_S3 = BUCKET_OUT + "/Test_set/" + FILE_KEY_OUT_S3
|
||||||
|
|
||||||
with fs.open(FILE_PATH_OUT_S3, 'w') as file_out:
|
with fs.open(FILE_PATH_OUT_S3, 'w') as file_out:
|
||||||
dataset_test.to_csv(file_out, index = False)
|
dataset_test.to_csv(file_out, index = False)
|
||||||
|
@ -159,7 +162,7 @@ for company in list_of_comp:
|
||||||
max_date = final_date, directory_path = company)
|
max_date = final_date, directory_path = company)
|
||||||
# Export
|
# Export
|
||||||
FILE_KEY_OUT_S3 = "dataset_train" + company + ".csv"
|
FILE_KEY_OUT_S3 = "dataset_train" + company + ".csv"
|
||||||
FILE_PATH_OUT_S3 = BUCKET_OUT + "/" + FILE_KEY_OUT_S3
|
FILE_PATH_OUT_S3 = BUCKET_OUT + "/Train_test/" + FILE_KEY_OUT_S3
|
||||||
|
|
||||||
with fs.open(FILE_PATH_OUT_S3, 'w') as file_out:
|
with fs.open(FILE_PATH_OUT_S3, 'w') as file_out:
|
||||||
dataset_train.to_csv(file_out, index = False)
|
dataset_train.to_csv(file_out, index = False)
|
||||||
|
|
68
0_3_General_modelization_dataset.py
Normal file
68
0_3_General_modelization_dataset.py
Normal file
|
@ -0,0 +1,68 @@
|
||||||
|
# Business Data Challenge - Team 1
|
||||||
|
|
||||||
|
import pandas as pd
|
||||||
|
import numpy as np
|
||||||
|
import os
|
||||||
|
import s3fs
|
||||||
|
import re
|
||||||
|
import warnings
|
||||||
|
from datetime import date, timedelta, datetime
|
||||||
|
|
||||||
|
# Create filesystem object
|
||||||
|
S3_ENDPOINT_URL = "https://" + os.environ["AWS_S3_ENDPOINT"]
|
||||||
|
fs = s3fs.S3FileSystem(client_kwargs={'endpoint_url': S3_ENDPOINT_URL})
|
||||||
|
|
||||||
|
|
||||||
|
# Import KPI construction functions
|
||||||
|
exec(open('0_KPI_functions.py').read())
|
||||||
|
|
||||||
|
# Ignore warning
|
||||||
|
warnings.filterwarnings('ignore')
|
||||||
|
|
||||||
|
# functions
|
||||||
|
def generate_test_set(type_of_comp):
|
||||||
|
file_path_list = fs.ls(f"projet-bdc2324-team1/Generalization/{type_of_comp}/Test_set")
|
||||||
|
test_set = pd.DataFrame()
|
||||||
|
for file in file_path_list:
|
||||||
|
print(file)
|
||||||
|
with fs.open(file, mode="rb") as file_in:
|
||||||
|
df = pd.read_csv(file_in, sep=",")
|
||||||
|
test_set = pd.concat([test_set, df], ignore_index = True)
|
||||||
|
return test_set
|
||||||
|
|
||||||
|
|
||||||
|
def generate_train_set(type_of_comp):
|
||||||
|
file_path_list = fs.ls(f"projet-bdc2324-team1/Generalization/{type_of_comp}/Train_set")
|
||||||
|
train_set = pd.DataFrame()
|
||||||
|
for file in file_path_list:
|
||||||
|
print(file)
|
||||||
|
with fs.open(file, mode="rb") as file_in:
|
||||||
|
df = pd.read_csv(file_in, sep=",")
|
||||||
|
train_set = pd.concat([test_set, df], ignore_index = True)
|
||||||
|
return train_set
|
||||||
|
|
||||||
|
|
||||||
|
type_of_comp = input('Choisissez le type de compagnie : sport ? musique ? musee ?')
|
||||||
|
BUCKET_OUT = f'projet-bdc2324-team1/Generalization/{type_of_comp}/'
|
||||||
|
|
||||||
|
# create test and train datasets
|
||||||
|
test_set = generate_test_set(type_of_comp)
|
||||||
|
train_set = generate_train_set(type_of_comp)
|
||||||
|
|
||||||
|
# Exportation test set
|
||||||
|
FILE_KEY_OUT_S3 = "Test_set.csv"
|
||||||
|
FILE_PATH_OUT_S3 = BUCKET_OUT + FILE_KEY_OUT_S3
|
||||||
|
|
||||||
|
with fs.open(FILE_PATH_OUT_S3, 'w') as file_out:
|
||||||
|
test_set.to_csv(file_out, index = False)
|
||||||
|
|
||||||
|
print("Exportation dataset test : SUCCESS")
|
||||||
|
|
||||||
|
# Exportation train set
|
||||||
|
FILE_KEY_OUT_S3 = "Train_set.csv"
|
||||||
|
FILE_PATH_OUT_S3 = BUCKET_OUT + FILE_KEY_OUT_S3
|
||||||
|
|
||||||
|
with fs.open(FILE_PATH_OUT_S3, 'w') as file_out:
|
||||||
|
train_set.to_csv(file_out, index = False)
|
||||||
|
|
||||||
|
print("Exportation dataset train : SUCCESS")
|
14
Sport/Descriptive_statistics/generate_dataset_DS.py
Normal file
14
Sport/Descriptive_statistics/generate_dataset_DS.py
Normal file
|
@ -0,0 +1,14 @@
|
||||||
|
import pandas as pd
|
||||||
|
import numpy as np
|
||||||
|
import os
|
||||||
|
import s3fs
|
||||||
|
import re
|
||||||
|
import warnings
|
||||||
|
|
||||||
|
# Create filesystem object
|
||||||
|
S3_ENDPOINT_URL = "https://" + os.environ["AWS_S3_ENDPOINT"]
|
||||||
|
fs = s3fs.S3FileSystem(client_kwargs={'endpoint_url': S3_ENDPOINT_URL})
|
||||||
|
|
||||||
|
# Ignore warning
|
||||||
|
warnings.filterwarnings('ignore')
|
||||||
|
|
Loading…
Reference in New Issue
Block a user