Welfab commited on
Commit
b48fd8e
1 Parent(s): ff93d5d

Upload 4 files

Browse files
Files changed (4) hide show
  1. app.py +63 -0
  2. model.joblib +3 -0
  3. requirements.txt +1 -0
  4. train.py +68 -0
app.py ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Import the libraries
2
+
3
+
4
+
5
+ # Run the training script placed in the same directory as app.py
6
+ # The training script will train and persist a linear regression
7
+ # model with the filename 'model.joblib'
8
+
9
+
10
+
11
+
12
+ # Load the freshly trained model from disk
13
+
14
+
15
+ # Prepare the logging functionality
16
+ log_file = Path("logs/") / f"data_{uuid.uuid4()}.json"
17
+ log_folder = log_file.parent
18
+
19
+ scheduler = CommitScheduler(
20
+ repo_id="-----------", # provide a name "insurance-charge-mlops-logs" for the repo_id
21
+ repo_type="dataset",
22
+ folder_path=log_folder,
23
+ path_in_repo="data",
24
+ every=2
25
+ )
26
+
27
+ # Define the predict function which will take features, convert to dataframe and make predictions using the saved model
28
+ # the functions runs when 'Submit' is clicked or when a API request is made
29
+
30
+
31
+ # While the prediction is made, log both the inputs and outputs to a log file
32
+ # While writing to the log file, ensure that the commit scheduler is locked to avoid parallel
33
+ # access
34
+
35
+ with scheduler.lock:
36
+ with log_file.open("a") as f:
37
+ f.write(json.dumps(
38
+ {
39
+ 'age': age,
40
+ 'bmi': bmi,
41
+ 'children': children,
42
+ 'sex': sex,
43
+ 'smoker': smoker,
44
+ 'region': region,
45
+ 'prediction': prediction[0]
46
+ }
47
+ ))
48
+ f.write("\n")
49
+
50
+ return prediction[0]
51
+
52
+
53
+
54
+ # Set up UI components for input and output
55
+
56
+
57
+
58
+ # Create the gradio interface, make title "HealthyLife Insurance Charge Prediction"
59
+
60
+
61
+ # Launch with a load balancer
62
+ demo.queue()
63
+ demo.launch(share=False)
model.joblib ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a389766ae43175dfcf9d32ddadb5925fe5521ca1b5875de9af11fbe4754616df
3
+ size 3849
requirements.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ scikit-learn==1.3.2
train.py ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pandas as pd
2
+ import joblib
3
+ from sklearn.preprocessing import StandardScaler, OneHotEncoder
4
+ from sklearn.compose import ColumnTransformer
5
+ from sklearn.model_selection import train_test_split, RandomizedSearchCV
6
+ from sklearn.linear_model import Ridge
7
+ from sklearn.pipeline import Pipeline
8
+ from sklearn.metrics import mean_absolute_error, mean_squared_error, r2_score
9
+
10
+ # Read the uploaded file
11
+ df = pd.read_csv('/insurance (1).csv')
12
+
13
+ # Define the target variable
14
+ y = df['charges']
15
+
16
+ # Define the feature columns
17
+ numerical_columns = ['age', 'bmi', 'children']
18
+ categorical_columns = ['sex', 'smoker', 'region']
19
+
20
+ # Define feature matrix X
21
+ X = df[numerical_columns + categorical_columns]
22
+
23
+ # Split the data
24
+ Xtrain, Xtest, ytrain, ytest = train_test_split(
25
+ X, y,
26
+ test_size=0.2,
27
+ random_state=42
28
+ )
29
+
30
+ # Create a column transformer for preprocessing
31
+ preprocessor = ColumnTransformer(
32
+ transformers=[
33
+ ('num', StandardScaler(), numerical_columns), # Standard scaling for numerical columns
34
+ ('cat', OneHotEncoder(handle_unknown='ignore'), categorical_columns) # One-hot encoding for categorical columns
35
+ ]
36
+ )
37
+
38
+ # Create a Ridge regression model pipeline
39
+ ridge_pipeline = Pipeline([
40
+ ('preprocessor', preprocessor),
41
+ ('ridge', Ridge())
42
+ ])
43
+
44
+ # Define a parameter distribution for hyperparameter tuning
45
+ param_distribution = {
46
+ 'ridge__alpha': [0.001, 0.01, 0.1, 0.5, 1, 5, 10]
47
+ }
48
+
49
+ # Perform hyperparameter tuning using RandomizedSearchCV
50
+ random_search = RandomizedSearchCV(ridge_pipeline, param_distribution, n_iter=5, cv=5)
51
+ random_search.fit(Xtrain, ytrain)
52
+
53
+ # Model evaluation for testing set
54
+ y_pred = random_search.best_estimator_.predict(Xtest)
55
+
56
+ mae = mean_absolute_error(ytest, y_pred)
57
+ mse = mean_squared_error(ytest, y_pred)
58
+ r2 = r2_score(ytest, y_pred)
59
+
60
+ print("The model performance for the testing set")
61
+ print("--------------------------------------")
62
+ print('MAE is {}'.format(mae))
63
+ print('MSE is {}'.format(mse))
64
+ print('R2 score is {}'.format(r2))
65
+
66
+ # Save the best model
67
+ saved_model_path = "model.joblib"
68
+ joblib.dump(random_search.best_estimator_, saved_model_path)