diff --git a/0_2_Dataset_construction.py b/0_2_Dataset_construction.py index 917dee9..1c410f5 100644 --- a/0_2_Dataset_construction.py +++ b/0_2_Dataset_construction.py @@ -66,6 +66,10 @@ def dataset_construction(min_date, end_features_date, max_date, directory_path): df_customerplus_clean_0 = display_databases(directory_path, file_name = "customerplus_cleaned") df_campaigns_information = display_databases(directory_path, file_name = "campaigns_information", datetime_col = ['opened_at', 'sent_at', 'campaign_sent_at']) df_products_purchased_reduced = display_databases(directory_path, file_name = "products_purchased_reduced", datetime_col = ['purchase_date']) + + # if directory_path == "101": + # df_products_purchased_reduced_1 = display_databases(directory_path, file_name = "products_purchased_reduced_1", datetime_col = ['purchase_date']) + # df_products_purchased_reduced = pd.concat([df_products_purchased_reduced, df_products_purchased_reduced_1]) # Filtre de cohérence pour la mise en pratique de notre méthode max_date = pd.to_datetime(max_date, utc = True, format = 'ISO8601') @@ -131,7 +135,7 @@ def dataset_construction(min_date, end_features_date, max_date, directory_path): ## Exportation -companies = {'musee' : ['1', '2', '3', '4', '101'], +companies = {'musee' : ['1', '2', '3', '4'], # , '101' 'sport': ['5', '6', '7', '8', '9'], 'musique' : ['10', '11', '12', '13', '14']} @@ -142,12 +146,31 @@ BUCKET_OUT = f'projet-bdc2324-team1/Generalization/{type_of_comp}' # Create test dataset and train dataset for sport companies -start_date, end_of_features, final_date = df_coverage_modelization(list_of_comp, coverage_train = 0.7) +# start_date, end_of_features, final_date = df_coverage_modelization(list_of_comp, coverage_train = 0.7) +start_date = "2021-05-01" +end_of_features = "2022-11-01" +final_date = "2023-11-01" + +anonymous_customer = {'1' : 1_1, '2' : 2_12184, '3' : 3_1, '4' : 4_2, '101' : 101_1, + '5' : 5_191835, '6' : 6_591412, '7' : 7_49632, '8' : 8_1942, '9' : 9_19683} for company in list_of_comp: - dataset_test = dataset_construction(min_date = start_date, end_features_date = end_of_features, + dataset = dataset_construction(min_date = start_date, end_features_date = end_of_features, max_date = final_date, directory_path = company) + + # On retire le client anonyme + dataset = dataset[dataset['customer_id'] != anonymous_customer[company]] + #train test set + np.random.seed(42) + + # Dataset Test + split_ratio = 0.7 + split_index = int(len(dataset) * split_ratio) + dataset = dataset.sample(frac=1).reset_index(drop=True) + dataset_train = dataset.iloc[:split_index] + dataset_test = dataset.iloc[split_index:] + # Exportation FILE_KEY_OUT_S3 = "dataset_test" + company + ".csv" FILE_PATH_OUT_S3 = BUCKET_OUT + "/Test_set/" + FILE_KEY_OUT_S3 @@ -157,12 +180,11 @@ for company in list_of_comp: print("Exportation dataset test : SUCCESS") -# Dataset train - dataset_train = dataset_construction(min_date = start_date, end_features_date = end_of_features, - max_date = final_date, directory_path = company) + # Dataset train + # Export FILE_KEY_OUT_S3 = "dataset_train" + company + ".csv" - FILE_PATH_OUT_S3 = BUCKET_OUT + "/Train_test/" + FILE_KEY_OUT_S3 + FILE_PATH_OUT_S3 = BUCKET_OUT + "/Train_set/" + FILE_KEY_OUT_S3 with fs.open(FILE_PATH_OUT_S3, 'w') as file_out: dataset_train.to_csv(file_out, index = False)