OmarN121 commited on
Commit
6c203df
·
1 Parent(s): 30a4da0

Upload app.py

Browse files
Files changed (1) hide show
  1. app.py +319 -0
app.py ADDED
@@ -0,0 +1,319 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ """Gradio_Demo.ipynb
3
+
4
+ Automatically generated by Colaboratory.
5
+
6
+ Original file is located at
7
+ https://colab.research.google.com/drive/1eXu8deuh8Jl5Mmm0DxX0XlR4-2E6IN0l
8
+ """
9
+
10
+ !pip install gradio
11
+
12
+ !git clone https://github.com/omnabill/NLP_Task
13
+
14
+ import pandas as pd
15
+ import seaborn as sns
16
+ import matplotlib.pyplot as plt
17
+ import numpy as np
18
+
19
+ # import the necessary libraries
20
+ import nltk
21
+ import string
22
+ import re
23
+
24
+ from time import time
25
+
26
+ from gensim.parsing.preprocessing import STOPWORDS
27
+ from gensim.utils import simple_preprocess
28
+ from sklearn.model_selection import GridSearchCV
29
+
30
+ from sklearn.model_selection import train_test_split
31
+ from sklearn.utils.class_weight import compute_sample_weight
32
+ from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
33
+ from sklearn.metrics import accuracy_score, confusion_matrix
34
+ from sklearn.naive_bayes import MultinomialNB
35
+ from sklearn.pipeline import Pipeline
36
+ from sklearn.feature_extraction.text import TfidfTransformer
37
+ from sklearn.linear_model import SGDClassifier,LogisticRegression
38
+ from sklearn.ensemble import ExtraTreesRegressor
39
+ from sklearn import model_selection, svm
40
+ import gradio as gr
41
+
42
+ import warnings
43
+ warnings.filterwarnings('ignore')
44
+
45
+ """### First Let's read our data"""
46
+
47
+ df = pd.read_csv('/content/NLP_Task/flask-app/Job titles and industries.csv')
48
+
49
+ df.head()
50
+
51
+ df.tail()
52
+
53
+ """### Let's Explore our data to understand it .."""
54
+
55
+ df.info()
56
+
57
+ df.describe()
58
+
59
+ df_count = df['industry'].value_counts().rename_axis('Category').reset_index(name='Count')
60
+
61
+ df_count.head()
62
+
63
+ plt.figure(figsize=(10,7))
64
+ sns.barplot(x=df_count.Category,y=df_count.Count/len(df))
65
+ plt.show()
66
+
67
+ df_count.Count/len(df)
68
+
69
+ """Here there is a Class Less than 5% of existance which is "Accountancy" , So this can be a Way of imbalanced data that can disturb our model .
70
+
71
+ This is a Problem That should be dealed with before passing our data to a model .
72
+
73
+ ### Let's complete First our EDA for our data .
74
+
75
+ #### Let's check first for duplicated rows
76
+ """
77
+
78
+ df[df.duplicated()==True]
79
+
80
+ """There are many duplicate rows let's start deal with them through keep one copy of duplicated rows"""
81
+
82
+ df.drop_duplicates(subset="job title",inplace=True)
83
+
84
+ df.info()
85
+
86
+ plt.figure(figsize=(10,6))
87
+ df.groupby('industry').count().plot.bar()
88
+
89
+ df_count = df['industry'].value_counts().rename_axis('Category').reset_index(name='Count')
90
+
91
+ plt.figure(figsize=(10,7))
92
+ sns.barplot(x=df_count.Category,y=df_count.Count/len(df))
93
+ plt.show()
94
+
95
+ df_count.Count/len(df)
96
+
97
+ """### Let's Deal With text (Text Preprocessing)
98
+
99
+ This Through :
100
+
101
+ 1- Remove Stop words Like 'The','and','of',etc...
102
+
103
+ 2- change numerical numbers to text notation
104
+
105
+ 3- Remove Punctiuations: We remove punctuations so that we don’t have different forms of the same word. If we don’t remove the punctuation, then been. been, been! will be treated separately.
106
+
107
+ 4- Text Lowercase: to reduce the size of the vocabulary of our text data.
108
+
109
+ 5- Remove White spaces
110
+ """
111
+
112
+ import inflect
113
+ p = inflect.engine()
114
+
115
+ def Text_Cleaner(text):
116
+ """
117
+ text: a string
118
+ return: modified clean string
119
+ """
120
+ result = ""
121
+
122
+ for token in text.split(' '):
123
+
124
+ if token not in STOPWORDS and len(token) >= 1:
125
+
126
+ if token.isdigit():
127
+
128
+ temp = p.number_to_words(token) #change numerical number to text
129
+ result+=temp+" "
130
+ else:
131
+
132
+ if not re.match(r'£[0-9]+', token):
133
+
134
+ token = token.lower() #lower case string
135
+ result+=token+" "
136
+ translator = str.maketrans('', '', string.punctuation) #remove punctiuation signs
137
+ return " ".join(result.translate(translator).split())
138
+
139
+
140
+ df['job title'] = df['job title'].map(Text_Cleaner)
141
+ df['job title'].head(20)
142
+
143
+ """### Deal With imbalanced data and pass it to model ..
144
+
145
+ Train More Than 1 Model For Text Classification
146
+
147
+ 1- SGD Classifier with Hyper Parameter Tuning
148
+
149
+ 2- Multi nominal Naive Bayes
150
+
151
+ 3- Logistic Regression
152
+
153
+ 4- SVM (Support vector machine calssifier)
154
+
155
+ ### hyper parameter tuning for SGDClassifier .
156
+ """
157
+
158
+ X = df['job title']
159
+ y = df['industry']
160
+ X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state = 42)
161
+ sample_weight = compute_sample_weight("balanced",y_train)
162
+
163
+
164
+
165
+ pipeline = Pipeline([
166
+ ('vect', CountVectorizer()),
167
+ ('tfidf', TfidfTransformer()),
168
+ ('clf', SGDClassifier()),
169
+ ])
170
+
171
+ # uncommenting more parameters will give better exploring power but will
172
+ # increase processing time in a combinatorial way
173
+ parameters = {
174
+ 'vect__max_df': (0.5, 0.75, 1.0),
175
+ 'vect__max_features': (None, 5000, 10000, 50000),
176
+ 'vect__ngram_range': ((1, 1), (1, 2)), # unigrams or bigrams
177
+ # 'tfidf__use_idf': (True, False),
178
+ # 'tfidf__norm': ('l1', 'l2'),
179
+ 'clf__max_iter': (20,),
180
+ 'clf__alpha': (0.00001, 0.000001),
181
+ 'clf__penalty': ('l2', 'elasticnet'),
182
+ 'clf__loss': ['log','hinge'],
183
+ 'clf__max_iter': (10, 50, 80),
184
+ }
185
+
186
+
187
+ # multiprocessing requires the fork to happen in a __main__ protected
188
+ # block
189
+
190
+ # find the best parameters for both the feature extraction and the
191
+ # classifier
192
+ grid_search = GridSearchCV(pipeline, parameters, n_jobs=-1, verbose=1)
193
+
194
+ print("Performing grid search...")
195
+ print("pipeline:", [name for name, _ in pipeline.steps])
196
+ print("parameters:")
197
+ print(parameters)
198
+ t0 = time()
199
+ grid_search.fit(X_train, y_train)
200
+ print("done in %0.3fs" % (time() - t0))
201
+ print()
202
+
203
+ print("Best score: %0.3f" % grid_search.best_score_)
204
+ print("Best parameters set:")
205
+ best_parameters = grid_search.best_estimator_.get_params()
206
+ for param_name in sorted(parameters.keys()):
207
+ print("\t%s: %r" % (param_name, best_parameters[param_name]))
208
+
209
+ make_pipline = Pipeline([('vect', CountVectorizer(max_df=0.5,max_features= 50000,ngram_range= (1, 1))),
210
+ ('tfidf', TfidfTransformer()),
211
+ ('SGD' , SGDClassifier(loss='log', penalty='l2',alpha=1e-05, random_state=42, max_iter=80)),])
212
+
213
+ make_pipline.fit(X_train, y_train, **{'SGD__sample_weight': sample_weight})
214
+
215
+ y_pred = make_pipline.predict(X_test)
216
+ print('accuracy = {}'.format(accuracy_score(y_pred, y_test)) )
217
+
218
+
219
+
220
+ """### Naive Bayes"""
221
+
222
+ make_pipline = Pipeline([('vect', CountVectorizer()),
223
+ ('tfidf', TfidfTransformer()),
224
+ ('clf', MultinomialNB()),])
225
+
226
+ make_pipline.fit(X_train, y_train, **{'clf__sample_weight': sample_weight})
227
+
228
+ y_pred = make_pipline.predict(X_test)
229
+ print('accuracy %s' % accuracy_score(y_pred, y_test))
230
+
231
+ y_pred
232
+
233
+ """### Logistic Regression"""
234
+
235
+ make_pipline = Pipeline([('vect', CountVectorizer()),
236
+ ('tfidf', TfidfTransformer()),
237
+ ('log' , LogisticRegression(n_jobs=1, C=1e5)),])
238
+
239
+ make_pipline.fit(X_train, y_train, **{'log__sample_weight': sample_weight})
240
+
241
+ y_pred = make_pipline.predict(X_test)
242
+ print('accuracy %s' % accuracy_score(y_pred, y_test))
243
+
244
+
245
+
246
+ """### Support Vector Machine"""
247
+
248
+ make_pipline = Pipeline([('vect', CountVectorizer()),
249
+ ('tfidf', TfidfTransformer()),
250
+ ('clf', svm.SVC(C=1.0, kernel='linear', degree=3, gamma='auto',probability=True)),])
251
+
252
+ make_pipline.fit(X_train, y_train, **{'clf__sample_weight': sample_weight})
253
+
254
+ y_pred = make_pipline.predict(X_test)
255
+ print('accuracy %s' % accuracy_score(y_pred, y_test))
256
+
257
+ X_test.to_csv('out.csv', index=False)
258
+
259
+
260
+
261
+ """### Confusion Matrix for the final model Used SVM """
262
+
263
+ a = np.flip(df['industry'].unique())
264
+
265
+ from sklearn.metrics import confusion_matrix
266
+ mat = confusion_matrix(y_test, y_pred)
267
+ plt.figure(figsize=(10,8))
268
+ ax = sns.heatmap(mat.T, square=True, annot=True, fmt='d', cbar=False,
269
+ xticklabels=a, yticklabels=a)
270
+ bottom, top = ax.get_ylim()
271
+ ax.set_ylim(bottom + 0.5, top - 0.5)
272
+
273
+ plt.title('Confussion Matrix')
274
+ plt.xlabel('true label')
275
+ plt.ylabel('predicted label');
276
+
277
+ """IT is the highest class which missed it's labels , while it appeares that Marketing and education missed the leaset"""
278
+
279
+ from sklearn import metrics
280
+ print(metrics.classification_report(y_test, y_pred, target_names=a))
281
+
282
+ """The performance measurment prove our thoughts from confusion matrix as the marketing and education are the highest classes with true positive rate precision that for classes that are marketing or education and they are classified as marketing or education
283
+ while Accountancy have the leaset true positive rate
284
+ """
285
+
286
+ metrics.roc_auc_score(y_test, make_pipline.predict_proba(X_test), multi_class='ovr')
287
+
288
+ """The Area Under the Receiver Operating Characteristic Curve (AUC) is between 0.5 to 1 which 1 denates the out standing performance of the model, 0.5 the opposite , here 0.97 is a very good score that indicates that our model performance is very good .
289
+
290
+ ### Deploy model as Gradio Api Service
291
+ """
292
+
293
+ def Job_Class(text):
294
+ # img = img.reshape(1, 100, 100, 1)
295
+ prediction = make_pipline.predict([text])
296
+ # class_names = ["Accountancy", "Education","IT","Marketing"]
297
+ return prediction[0]
298
+
299
+ #set the user uploaded image as the input array
300
+ #match same shape as the input shape in the model
301
+ # im = gr.inputs.Image(shape=(100, 100), image_mode='L', invert_colors=False, source="upload")
302
+
303
+ #setup the interface
304
+ iface = gr.Interface(
305
+ fn=Job_Class,
306
+ inputs=gr.inputs.Textbox(lines=2, placeholder="Write here the job descrebtion ..."),
307
+ outputs="text")
308
+ iface.launch(share=True)
309
+
310
+ # prediction = make_pipline.predict(['senior technical support engineer'])
311
+
312
+ # type(prediction[0])
313
+
314
+ !pip install ipython
315
+
316
+ pip install nbconvert
317
+
318
+ !ipython nbconvert — to script Gradio_Demo.ipynb
319
+