72 lines
3.0 KiB
Python
72 lines
3.0 KiB
Python
# Business Data Challenge - Team 1
|
|
|
|
import pandas as pd
|
|
import numpy as np
|
|
import os
|
|
import s3fs
|
|
import re
|
|
import warnings
|
|
import time
|
|
|
|
# Create filesystem object
|
|
S3_ENDPOINT_URL = "https://" + os.environ["AWS_S3_ENDPOINT"]
|
|
fs = s3fs.S3FileSystem(client_kwargs={'endpoint_url': S3_ENDPOINT_URL})
|
|
|
|
# Import cleaning and merge functions
|
|
exec(open('utils_cleaning_and_merge.py').read())
|
|
|
|
# Output folder
|
|
BUCKET_OUT = "projet-bdc2324-team1"
|
|
|
|
# Ignore warning
|
|
warnings.filterwarnings('ignore')
|
|
|
|
start_all = time.time()
|
|
|
|
def export_dataset(df, output_name):
|
|
print('Export of dataset :', output_name)
|
|
FILE_PATH_OUT_S3 = BUCKET_OUT + "/" + output_name
|
|
with fs.open(FILE_PATH_OUT_S3, 'w') as file_out:
|
|
df.to_csv(file_out, index = False)
|
|
|
|
## 1 - Cleaning of the datasets
|
|
for tenant_id in ["1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11", "12", "13", "14"]:#, "101"
|
|
|
|
# Timer
|
|
start = time.time()
|
|
|
|
# Cleaning customerplus
|
|
df1_customerplus_clean = preprocessing_customerplus(directory_path = tenant_id)
|
|
|
|
## Exportation
|
|
export_dataset(df = df1_customerplus_clean, output_name = "0_Input/Company_"+ tenant_id +"/customerplus_cleaned.csv")
|
|
|
|
# Cleaning target area
|
|
df1_target_information = preprocessing_target_area(directory_path = tenant_id)
|
|
## Exportation
|
|
export_dataset(df = df1_target_information, output_name = "0_Input/Company_"+ tenant_id +"/target_information.csv")
|
|
|
|
# Cleaning campaign area
|
|
df1_campaigns_information = preprocessing_campaigns_area(directory_path = tenant_id)
|
|
## Exportation
|
|
export_dataset(df = df1_campaigns_information, output_name = "0_Input/Company_"+ tenant_id +"/campaigns_information.csv")
|
|
|
|
if tenant_id == "101":
|
|
# Cleaning product area
|
|
products_purchased_reduced, products_purchased_reduced_1 = uniform_product_df(directory_path = tenant_id)
|
|
# Exportation
|
|
export_dataset(df = products_purchased_reduced, output_name = "0_Input/Company_"+ tenant_id +"/products_purchased_reduced.csv")
|
|
export_dataset(df = products_purchased_reduced_1, output_name = "0_Input/Company_"+ tenant_id +"/products_purchased_reduced_1.csv")
|
|
else :
|
|
# Cleaning product area
|
|
products_purchased_reduced = uniform_product_df(directory_path = tenant_id)
|
|
# Exportation
|
|
export_dataset(df = products_purchased_reduced, output_name = "0_Input/Company_"+ tenant_id +"/products_purchased_reduced.csv")
|
|
|
|
|
|
#Exportation
|
|
# export_dataset(df = df1_products_purchased_reduced, output_name = "1_Temp/Company 1 - Purchases.csv")
|
|
print("Time to run the cleaning of company ", tenant_id , " : " ,time.time() - start)
|
|
print("\n ------------------------------------------------------------------ \n --------------------- END CLEANING COMPANY " + tenant_id + " --------------------- \n ------------------------------------------------------------------")
|
|
|
|
print("Time to run the cleaning of all used datasets : " , time.time() - start_all) |