diff --git a/Loan_prediction.ipynb b/Loan_prediction/Loan_prediction.ipynb similarity index 100% rename from Loan_prediction.ipynb rename to Loan_prediction/Loan_prediction.ipynb diff --git a/sepal_petal detection/app.py b/sepal_petal detection/app.py new file mode 100644 index 0000000..417da12 --- /dev/null +++ b/sepal_petal detection/app.py @@ -0,0 +1,33 @@ +import numpy as np +import pandas as pd +from flask import Flask, request, jsonify, render_template +import pickle + +app = Flask(__name__) + +model = pickle.load(open("model.pkl", "rb")) + +@app.route("/") +def home(): + return render_template("index.html") + +# @app.route("/predict", methods=["POST"]) +# def predict(): +# float_features = [float(x) for x in request.form.values()] +# features = [np.array(float_features)] +# prediction = model.predict(features) + +# return render_template("index.html", prediction_text="The flower is {}".format(prediction)) + + +@app.route("/predict", methods=["POST"]) +def predict(): + json_ = request.json + query_df = pd.DataFrame(json_) + prediction = model.predict(query_df) + # result = prediction.tolist() + return jsonify({"placement_stat":prediction.tolist()}) + + +if __name__ == "__main__": + app.run(debug=True) \ No newline at end of file diff --git a/sepal_petal detection/iris.csv b/sepal_petal detection/iris.csv new file mode 100644 index 0000000..71a3bf2 --- /dev/null +++ b/sepal_petal detection/iris.csv @@ -0,0 +1,151 @@ +Sepal_Length,Sepal_Width,Petal_Length,Petal_Width,Class +5.1,3.5,1.4,0.2,Setosa +4.9,3,1.4,0.2,Setosa +4.7,3.2,1.3,0.2,Setosa +4.6,3.1,1.5,0.2,Setosa +5,3.6,1.4,0.2,Setosa +5.4,3.9,1.7,0.4,Setosa +4.6,3.4,1.4,0.3,Setosa +5,3.4,1.5,0.2,Setosa +4.4,2.9,1.4,0.2,Setosa +4.9,3.1,1.5,0.1,Setosa +5.4,3.7,1.5,0.2,Setosa +4.8,3.4,1.6,0.2,Setosa +4.8,3,1.4,0.1,Setosa +4.3,3,1.1,0.1,Setosa +5.8,4,1.2,0.2,Setosa +5.7,4.4,1.5,0.4,Setosa +5.4,3.9,1.3,0.4,Setosa +5.1,3.5,1.4,0.3,Setosa +5.7,3.8,1.7,0.3,Setosa +5.1,3.8,1.5,0.3,Setosa +5.4,3.4,1.7,0.2,Setosa +5.1,3.7,1.5,0.4,Setosa +4.6,3.6,1,0.2,Setosa +5.1,3.3,1.7,0.5,Setosa +4.8,3.4,1.9,0.2,Setosa +5,3,1.6,0.2,Setosa +5,3.4,1.6,0.4,Setosa +5.2,3.5,1.5,0.2,Setosa +5.2,3.4,1.4,0.2,Setosa +4.7,3.2,1.6,0.2,Setosa +4.8,3.1,1.6,0.2,Setosa +5.4,3.4,1.5,0.4,Setosa +5.2,4.1,1.5,0.1,Setosa +5.5,4.2,1.4,0.2,Setosa +4.9,3.1,1.5,0.2,Setosa +5,3.2,1.2,0.2,Setosa +5.5,3.5,1.3,0.2,Setosa +4.9,3.6,1.4,0.1,Setosa +4.4,3,1.3,0.2,Setosa +5.1,3.4,1.5,0.2,Setosa +5,3.5,1.3,0.3,Setosa +4.5,2.3,1.3,0.3,Setosa +4.4,3.2,1.3,0.2,Setosa +5,3.5,1.6,0.6,Setosa +5.1,3.8,1.9,0.4,Setosa +4.8,3,1.4,0.3,Setosa +5.1,3.8,1.6,0.2,Setosa +4.6,3.2,1.4,0.2,Setosa +5.3,3.7,1.5,0.2,Setosa +5,3.3,1.4,0.2,Setosa +7,3.2,4.7,1.4,Versicolor +6.4,3.2,4.5,1.5,Versicolor +6.9,3.1,4.9,1.5,Versicolor +5.5,2.3,4,1.3,Versicolor +6.5,2.8,4.6,1.5,Versicolor +5.7,2.8,4.5,1.3,Versicolor +6.3,3.3,4.7,1.6,Versicolor +4.9,2.4,3.3,1,Versicolor +6.6,2.9,4.6,1.3,Versicolor +5.2,2.7,3.9,1.4,Versicolor +5,2,3.5,1,Versicolor +5.9,3,4.2,1.5,Versicolor +6,2.2,4,1,Versicolor +6.1,2.9,4.7,1.4,Versicolor +5.6,2.9,3.6,1.3,Versicolor +6.7,3.1,4.4,1.4,Versicolor +5.6,3,4.5,1.5,Versicolor +5.8,2.7,4.1,1,Versicolor +6.2,2.2,4.5,1.5,Versicolor +5.6,2.5,3.9,1.1,Versicolor +5.9,3.2,4.8,1.8,Versicolor +6.1,2.8,4,1.3,Versicolor +6.3,2.5,4.9,1.5,Versicolor +6.1,2.8,4.7,1.2,Versicolor +6.4,2.9,4.3,1.3,Versicolor +6.6,3,4.4,1.4,Versicolor +6.8,2.8,4.8,1.4,Versicolor +6.7,3,5,1.7,Versicolor +6,2.9,4.5,1.5,Versicolor +5.7,2.6,3.5,1,Versicolor +5.5,2.4,3.8,1.1,Versicolor +5.5,2.4,3.7,1,Versicolor +5.8,2.7,3.9,1.2,Versicolor +6,2.7,5.1,1.6,Versicolor +5.4,3,4.5,1.5,Versicolor +6,3.4,4.5,1.6,Versicolor +6.7,3.1,4.7,1.5,Versicolor +6.3,2.3,4.4,1.3,Versicolor +5.6,3,4.1,1.3,Versicolor +5.5,2.5,4,1.3,Versicolor +5.5,2.6,4.4,1.2,Versicolor +6.1,3,4.6,1.4,Versicolor +5.8,2.6,4,1.2,Versicolor +5,2.3,3.3,1,Versicolor +5.6,2.7,4.2,1.3,Versicolor +5.7,3,4.2,1.2,Versicolor +5.7,2.9,4.2,1.3,Versicolor +6.2,2.9,4.3,1.3,Versicolor +5.1,2.5,3,1.1,Versicolor +5.7,2.8,4.1,1.3,Versicolor +6.3,3.3,6,2.5,Virginica +5.8,2.7,5.1,1.9,Virginica +7.1,3,5.9,2.1,Virginica +6.3,2.9,5.6,1.8,Virginica +6.5,3,5.8,2.2,Virginica +7.6,3,6.6,2.1,Virginica +4.9,2.5,4.5,1.7,Virginica +7.3,2.9,6.3,1.8,Virginica +6.7,2.5,5.8,1.8,Virginica +7.2,3.6,6.1,2.5,Virginica +6.5,3.2,5.1,2,Virginica +6.4,2.7,5.3,1.9,Virginica +6.8,3,5.5,2.1,Virginica +5.7,2.5,5,2,Virginica +5.8,2.8,5.1,2.4,Virginica +6.4,3.2,5.3,2.3,Virginica +6.5,3,5.5,1.8,Virginica +7.7,3.8,6.7,2.2,Virginica +7.7,2.6,6.9,2.3,Virginica +6,2.2,5,1.5,Virginica +6.9,3.2,5.7,2.3,Virginica +5.6,2.8,4.9,2,Virginica +7.7,2.8,6.7,2,Virginica +6.3,2.7,4.9,1.8,Virginica +6.7,3.3,5.7,2.1,Virginica +7.2,3.2,6,1.8,Virginica +6.2,2.8,4.8,1.8,Virginica +6.1,3,4.9,1.8,Virginica +6.4,2.8,5.6,2.1,Virginica +7.2,3,5.8,1.6,Virginica +7.4,2.8,6.1,1.9,Virginica +7.9,3.8,6.4,2,Virginica +6.4,2.8,5.6,2.2,Virginica +6.3,2.8,5.1,1.5,Virginica +6.1,2.6,5.6,1.4,Virginica +7.7,3,6.1,2.3,Virginica +6.3,3.4,5.6,2.4,Virginica +6.4,3.1,5.5,1.8,Virginica +6,3,4.8,1.8,Virginica +6.9,3.1,5.4,2.1,Virginica +6.7,3.1,5.6,2.4,Virginica +6.9,3.1,5.1,2.3,Virginica +5.8,2.7,5.1,1.9,Virginica +6.8,3.2,5.9,2.3,Virginica +6.7,3.3,5.7,2.5,Virginica +6.7,3,5.2,2.3,Virginica +6.3,2.5,5,1.9,Virginica +6.5,3,5.2,2,Virginica +6.2,3.4,5.4,2.3,Virginica +5.9,3,5.1,1.8,Virginica diff --git a/sepal_petal detection/model.pkl b/sepal_petal detection/model.pkl new file mode 100644 index 0000000..73b643f Binary files /dev/null and b/sepal_petal detection/model.pkl differ diff --git a/sepal_petal detection/model.py b/sepal_petal detection/model.py new file mode 100644 index 0000000..2768835 --- /dev/null +++ b/sepal_petal detection/model.py @@ -0,0 +1,31 @@ +import pandas as pd +from sklearn.preprocessing import StandardScaler +from sklearn.ensemble import RandomForestClassifier +from sklearn.model_selection import train_test_split +import pickle + +# Load the csv file +df = pd.read_csv("iris.csv") + +print(df.head()) + +# Select independent and dependent variable +X = df[["Sepal_Length", "Sepal_Width", "Petal_Length", "Petal_Width"]] +y = df["Class"] + +# Split the dataset into train and test +X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=50) + +# Feature scaling +sc = StandardScaler() +X_train = sc.fit_transform(X_train) +X_test= sc.transform(X_test) + +# Instantiate the model +classifier = RandomForestClassifier() + +# Fit the model +classifier.fit(X_train, y_train) + +# Make pickle file of our model +pickle.dump(classifier, open("model.pkl", "wb")) \ No newline at end of file