-
Notifications
You must be signed in to change notification settings - Fork 0
/
models
784 lines (468 loc) · 19.8 KB
/
models
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
import os
import time
os.environ["KERAS_BACKEND"] = "plaidml.keras.backend"
import keras
import keras.backend as K
K
#import keras.applications as kapp
#For Analysis of data
import pandas as pd
import numpy as np
from scipy.stats import norm
from sklearn.preprocessing import StandardScaler
from scipy import stats
#For Visulization of our data
import matplotlib.pyplot as plt
import seaborn as sns
import warnings
warnings.filterwarnings('ignore')
#%matplotlib inline
import gc
#import os
import string
color = sns.color_palette()
#%matplotlib inline
from plotly import tools
import plotly.offline as py
#py.init_notebook_mode(connected=True)
import plotly.graph_objs as go
#For Preprocessing and modeling of textual data
from sklearn import model_selection, preprocessing, metrics, ensemble, naive_bayes, linear_model
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from sklearn.decomposition import TruncatedSVD
import lightgbm as lgb
pd.options.mode.chained_assignment = None
pd.options.display.max_columns = 999
## importing the data
df_train = pd.read_csv("drugsComTrain_raw.csv", parse_dates=["date"])
df_test = pd.read_csv("drugsComTest_raw.csv", parse_dates=["date"])
df_all = pd.concat([df_train,df_test])
## word Clouds
#https://www.kaggle.com/sudalairajkumar/simple-exploration-notebook-qiqc kernel
from wordcloud import WordCloud, STOPWORDS
# Thanks : https://www.kaggle.com/aashita/word-clouds-of-various-shapes ##
def plot_wordcloud(text, mask=None, max_words=200, max_font_size=100, figure_size=(24.0,16.0),
title = None, title_size=40, image_color=False):
stopwords = set(STOPWORDS)
more_stopwords = {'one', 'br', 'Po', 'th', 'sayi', 'fo', 'Unknown'}
stopwords = stopwords.union(more_stopwords)
wordcloud = WordCloud(background_color='white',
stopwords = stopwords,
max_words = max_words,
max_font_size = max_font_size,
random_state = 42,
width=800,
height=400,
mask = mask)
wordcloud.generate(str(text))
# plt.figure(figsize=figure_size)
# if image_color:
# image_colors = ImageColorGenerator(mask);
# plt.imshow(wordcloud.recolor(color_func=image_colors), interpolation="bilinear");
# plt.title(title, fontdict={'size': title_size,
# 'verticalalignment': 'bottom'})
# else:
# plt.imshow(wordcloud);
# plt.title(title, fontdict={'size': title_size, 'color': 'black',
# 'verticalalignment': 'bottom'})
# plt.axis('off');
# plt.tight_layout()
#plot_wordcloud(df_all["review"], title="Word Cloud of review")
## Fixing rates to be positive or negtive
from collections import defaultdict
df_all_6_10 = df_all[df_all["rating"]>5]
df_all_1_5 = df_all[df_all["rating"]<6]
## N gram function
def generate_ngrams(text, n_gram=1):
token = [token for token in text.lower().split(" ") if token != "" if token not in STOPWORDS]
# * creating the grams.
ngrams = zip(*[token[i:] for i in range(n_gram)])
return [" ".join(ngram) for ngram in ngrams]
## creating 4 grams, 2 of them one for + and one for -
freq_dict = defaultdict(int)
for sent in df_all_1_5["review"]:
for word in generate_ngrams(sent,4):
freq_dict[word] += 1
fd_sorted = pd.DataFrame(sorted(freq_dict.items(), key=lambda x: x[1])[::-1])
fd_sorted.columns = ["word", "wordcount"]
freq_dict = defaultdict(int)
for sent in df_all_6_10["review"]:
for word in generate_ngrams(sent,4):
freq_dict[word] += 1
fd_sorted = pd.DataFrame(sorted(freq_dict.items(), key=lambda x: x[1])[::-1])
fd_sorted.columns = ["word", "wordcount"]
## removing reviews that are missing ones
percent = (df_all.isnull().sum()).sort_values(ascending=False)
print(percent)
## data Preprocessing
df_train = df_train.dropna(axis=0)
df_test = df_test.dropna(axis=0)
df_all = pd.concat([df_train,df_test]).reset_index()
del df_all['index']
#percent = (df_all.isnull().sum()).sort_values(ascending=False)
#percent.plot(kind="bar", figsize = (14,6), fontsize = 10, color='green')
#plt.xlabel("Columns", fontsize = 20)
#plt.ylabel("", fontsize = 20)
#plt.title("Total Missing Value ", fontsize = 20)
### preprocesssing the conditions texts
all_list = set(df_all.index)
span_list = []
for i,j in enumerate(df_all['condition']):
if '</span>' in j:
span_list.append(i)
new_idx = all_list.difference(set(span_list))
df_all = df_all.iloc[list(new_idx)].reset_index()
del df_all['index']
df_condition = df_all.groupby(['condition'])['drugName'].nunique().sort_values(ascending=False)
df_condition = pd.DataFrame(df_condition).reset_index()
#df_condition.tail(20)
df_condition_1 = df_condition[df_condition['drugName']==1].reset_index()
#df_condition_1['condition'][0:10]
all_list = set(df_all.index)
condition_list = []
for i,j in enumerate(df_all['condition']):
for c in list(df_condition_1['condition']):
if j == c:
condition_list.append(i)
new_idx = all_list.difference(set(condition_list))
df_all = df_all.iloc[list(new_idx)].reset_index()
del df_all['index']
### Preprocessing the Review texts
from bs4 import BeautifulSoup
import nltk
from nltk.corpus import stopwords
from nltk.stem.snowball import SnowballStemmer
nltk.download('stopwords')
stops = set(stopwords.words('english'))
#stops
stopwords = set(STOPWORDS)
more_stopwords = {'one', 'br', 'Po', 'th', 'sayi', 'fo', 'Unknown'}
stopwords = stopwords.union(more_stopwords)
not_stop = ["aren't","couldn't","didn't","doesn't","don't","hadn't","hasn't","haven't","isn't","mightn't","mustn't","needn't","no","nor","not","shan't","shouldn't","wasn't","weren't","wouldn't"]
for i in not_stop:
stops.remove(i)
from sklearn import model_selection, preprocessing, metrics, ensemble, naive_bayes, linear_model
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from sklearn.decomposition import TruncatedSVD
import lightgbm as lgb
pd.options.mode.chained_assignment = None
pd.options.display.max_columns = 999
from bs4 import BeautifulSoup
import re
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.pipeline import Pipeline
from sklearn.model_selection import train_test_split
from sklearn import metrics
#stemming
stemmer = SnowballStemmer('english')
import re
def review_to_words(raw_review):
# 1. Delete HTML tags
review_text = BeautifulSoup(raw_review, 'html.parser').get_text()
# 2. insert/replace a space instead of non alphabetic chars
letters_only = re.sub('[^a-zA-Z]', ' ', review_text)
# 3. make all letters to lower
words = letters_only.lower().split()
# 5. removing the Stopwords
meaningful_words = [w for w in words if not w in stops]
# 6. capture the stem of the words
stemming_words = [stemmer.stem(w) for w in meaningful_words]
# 7. space join words
return( ' '.join(stemming_words))
## our cleaned data
df_all['review_clean'] = df_all['review'].apply(review_to_words)
# Deep learning and Machine learning Models
## Deep Learning Model Using N-gram
# Make a rating
df_all['sentiment'] = df_all["rating"].apply(lambda x: 1 if x > 5 else 0)
df_train, df_test = train_test_split(df_all, test_size=0.33, random_state=42)
#pipleline
# https://github.com/corazzon/KaggleStruggle/blob/master/word2vec-nlp-tutorial/tutorial-part-1.ipynb
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.pipeline import Pipeline
vectorizer = CountVectorizer(analyzer = 'word',
tokenizer = None,
preprocessor = None,
stop_words = None,
min_df = 2, # That is where we specify those with more than and equal 2 drugs
ngram_range=(4, 4),
max_features = 20000
)
vectorizer
#https://stackoverflow.com/questions/28160335/plot-a-document-tfidf-2d-graph
pipeline = Pipeline([
('vect', vectorizer),
])
train_data_features = pipeline.fit_transform(df_train['review_clean'])
test_data_features = pipeline.fit_transform(df_test['review_clean'])
## neural network - deep learning
# classifiying positive negative and get sentiment. Binary classification
#from tensorflow.python.keras.models import Sequential
#from tensorflow.python.keras.layers import Dense, Bidirectional, LSTM, BatchNormalization, Dropout
#from tensorflow.python.keras.preprocessing.sequence import pad_sequences
from keras.models import Sequential
from keras.layers import Dense, Bidirectional, LSTM, BatchNormalization, Dropout
from keras.preprocessing.sequence import pad_sequences
#Source code in keras 김태영'blog
# 0. Package
## This is the sentimen analysis, or the calculated sentiments being the target for training
import numpy as np
#import keras
#from keras.models import Sequential
#from keras.layers import Dense
import random
# 1. Dataset
y_train = df_train['sentiment']
y_test = df_test['sentiment']
solution = y_test.copy()
# 2. Model Structure
model = keras.models.Sequential()
model.add(keras.layers.Dense(200, input_shape=(20000,)))
model.add(keras.layers.BatchNormalization())
model.add(keras.layers.Activation('relu'))
model.add(keras.layers.Dropout(0.5))
model.add(keras.layers.Dense(300))
model.add(keras.layers.BatchNormalization())
model.add(keras.layers.Activation('relu'))
model.add(keras.layers.Dropout(0.5))
model.add(keras.layers.Dense(100, activation='relu'))
model.add(keras.layers.Dense(1, activation='sigmoid'))
# 3. Model compile
model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
model.summary()
# 4. Train model
hist = model.fit(train_data_features, y_train, epochs=1, batch_size=64)
# 5. Traing process
#%matplotlib inline
#import matplotlib.pyplot as plt
#fig, loss_ax = plt.subplots()
#acc_ax = loss_ax.twinx()
#loss_ax.set_ylim([0.0, 1.0])
#acc_ax.set_ylim([0.0, 1.0])
#loss_ax.plot(hist.history['loss'], 'y', label='train loss')
#acc_ax.plot(hist.history['acc'], 'b', label='train acc')
#loss_ax.set_xlabel('epoch')
#loss_ax.set_ylabel('loss')
#acc_ax.set_ylabel('accuray')
#loss_ax.legend(loc='upper left')
#acc_ax.legend(loc='lower left')
#plt.show()
# 6. Evaluation
loss_and_metrics = model.evaluate(test_data_features, y_test, batch_size=32)
print('loss_and_metrics : ' + str(loss_and_metrics))
##DUMP ONE MODEL HERE
sub_preds_deep = model.predict(test_data_features,batch_size=32)
#or lets call this
sentiment_classification_model_result = sub_preds_deep
## Improving low accuracy. using Light gradient boost framework
from sklearn.metrics import roc_auc_score, precision_recall_curve, roc_curve, average_precision_score
from sklearn.model_selection import KFold
from lightgbm import LGBMClassifier
from sklearn.metrics import confusion_matrix
#folds = KFold(n_splits=5, shuffle=True, random_state=546789)
target = df_train['sentiment']
feats = ['usefulCount']
sub_preds = np.zeros(df_test.shape[0])
trn_x, val_x, trn_y, val_y = train_test_split(df_train[feats], target, test_size=0.2, random_state=42)
feature_importance_df = pd.DataFrame()
clf = LGBMClassifier(
n_estimators=2000,
learning_rate=0.05,
num_leaves=30,
#colsample_bytree=.9,
subsample=.9,
max_depth=7,
reg_alpha=.1,
reg_lambda=.1,
min_split_gain=.01,
min_child_weight=2,
silent=-1,
verbose=-1,
)
clf.fit(trn_x, trn_y,
eval_set= [(trn_x, trn_y), (val_x, val_y)],
verbose=100, early_stopping_rounds=100 #30
)
#DUMP ONE CLF HERE
sub_preds = clf.predict(df_test[feats])
fold_importance_df = pd.DataFrame()
fold_importance_df["feature"] = feats
fold_importance_df["importance"] = clf.feature_importances_
feature_importance_df = pd.concat([feature_importance_df, fold_importance_df], axis=0)
solution = df_test['sentiment']
confusion_matrix(y_pred=sub_preds, y_true=solution)
## adding more varibales for better accuracy
len_train = df_train.shape[0]
df_all = pd.concat([df_train,df_test])
del df_train, df_test;
gc.collect()
df_all['date'] = pd.to_datetime(df_all['date'])
df_all['day'] = df_all['date'].dt.day
df_all['year'] = df_all['date'].dt.year
df_all['month'] = df_all['date'].dt.month
from textblob import TextBlob
from tqdm import tqdm
reviews = df_all['review_clean']
Predict_Sentiment = []
for review in tqdm(reviews):
blob = TextBlob(review)
Predict_Sentiment += [blob.sentiment.polarity]
df_all["Predict_Sentiment"] = Predict_Sentiment
df_all.head()
## coefficient matrics, Correlations
np.corrcoef(df_all["Predict_Sentiment"], df_all["rating"])
np.corrcoef(df_all["Predict_Sentiment"], df_all["sentiment"])
reviews = df_all['review']
Predict_Sentiment = []
for review in tqdm(reviews):
blob = TextBlob(review)
Predict_Sentiment += [blob.sentiment.polarity]
df_all["Predict_Sentiment2"] = Predict_Sentiment
np.corrcoef(df_all["Predict_Sentiment2"], df_all["rating"])
np.corrcoef(df_all["Predict_Sentiment2"], df_all["sentiment"])
## counting !!
#문장길이 (줄바꿈표시가 몇번나왔는지 셈)
df_all['count_sent']=df_all["review"].apply(lambda x: len(re.findall("\n",str(x)))+1)
#Word count in each comment:(단어갯수)
df_all['count_word']=df_all["review_clean"].apply(lambda x: len(str(x).split()))
#Unique word count(unique한 단어 갯수)
df_all['count_unique_word']=df_all["review_clean"].apply(lambda x: len(set(str(x).split())))
#Letter count(리뷰길이)
df_all['count_letters']=df_all["review_clean"].apply(lambda x: len(str(x)))
#punctuation count(특수문자)
df_all["count_punctuations"] = df_all["review"].apply(lambda x: len([c for c in str(x) if c in string.punctuation]))
#upper case words count(전부다 대문자인 단어 갯수)
df_all["count_words_upper"] = df_all["review"].apply(lambda x: len([w for w in str(x).split() if w.isupper()]))
#title case words count(첫글자가 대문자인 단어 갯수)
df_all["count_words_title"] = df_all["review"].apply(lambda x: len([w for w in str(x).split() if w.istitle()]))
#Number of stopwords(불용어 갯수)
df_all["count_stopwords"] = df_all["review"].apply(lambda x: len([w for w in str(x).lower().split() if w in stops]))
#Average length of the words(평균단어길이)
df_all["mean_word_len"] = df_all["review_clean"].apply(lambda x: np.mean([len(w) for w in str(x).split()]))
## The searson variable
df_all['season'] = df_all["month"].apply(lambda x: 1 if ((x>2) & (x<6)) else(2 if (x>5) & (x<9) else (3 if (x>8) & (x<12) else 4)))
df_train = df_all[:len_train]
df_test = df_all[len_train:]
from sklearn.metrics import roc_auc_score, precision_recall_curve, roc_curve, average_precision_score
from sklearn.model_selection import KFold
from lightgbm import LGBMClassifier
#folds = KFold(n_splits=5, shuffle=True, random_state=546789)
target = df_train['sentiment']
feats = ['usefulCount','day','year','month','Predict_Sentiment','Predict_Sentiment2', 'count_sent',
'count_word', 'count_unique_word', 'count_letters', 'count_punctuations',
'count_words_upper', 'count_words_title', 'count_stopwords', 'mean_word_len', 'season']
sub_preds = np.zeros(df_test.shape[0])
trn_x, val_x, trn_y, val_y = train_test_split(df_train[feats], target, test_size=0.2, random_state=42)
feature_importance_df = pd.DataFrame()
clf = LGBMClassifier(
n_estimators=10000,
learning_rate=0.10,
num_leaves=30,
#colsample_bytree=.9,
subsample=.9,
max_depth=7,
reg_alpha=.1,
reg_lambda=.1,
min_split_gain=.01,
min_child_weight=2,
silent=-1,
verbose=-1,
)
clf.fit(trn_x, trn_y,
eval_set= [(trn_x, trn_y), (val_x, val_y)],
verbose=100, early_stopping_rounds=100 #30
)
#DUMP ANOTHER CLF HERE
sub_preds = clf.predict(df_test[feats])
fold_importance_df = pd.DataFrame()
fold_importance_df["feature"] = feats
fold_importance_df["importance"] = clf.feature_importances_
feature_importance_df = pd.concat([feature_importance_df, fold_importance_df], axis=0)
confusion_matrix(y_pred=sub_preds, y_true=solution)
## adding the Harward dictionary
# import dictionary data
word_table = pd.read_csv("inquirerbasic.csv")
##1. make list of sentiment
#Positiv word list
temp_Positiv = []
Positiv_word_list = []
for i in range(0,len(word_table.Positiv)):
if word_table.iloc[i,2] == "Positiv":
temp = word_table.iloc[i,0].lower()
temp1 = re.sub('\d+', '', temp)
temp2 = re.sub('#', '', temp1)
temp_Positiv.append(temp2)
Positiv_word_list = list(set(temp_Positiv))
len(temp_Positiv)
len(Positiv_word_list) #del temp_Positiv
#Negativ word list
temp_Negativ = []
Negativ_word_list = []
for i in range(0,len(word_table.Negativ)):
if word_table.iloc[i,3] == "Negativ":
temp = word_table.iloc[i,0].lower()
temp1 = re.sub('\d+', '', temp)
temp2 = re.sub('#', '', temp1)
temp_Negativ.append(temp2)
Negativ_word_list = list(set(temp_Negativ))
len(temp_Negativ)
len(Negativ_word_list) #del temp_Negativ
# We counted the number of words in review_clean which are included in dictionary.
##2. counting the word 98590
# this done only test dataframe
#vectorizing, transforming and making arrays that will be feed to DataFrame() data structure.
import numpy as np
from sklearn.feature_extraction.text import CountVectorizer
vectorizer = CountVectorizer(vocabulary = Positiv_word_list)
content = df_test['review_clean']
X = vectorizer.fit_transform(content)
f = X.toarray()
f = pd.DataFrame(f)
f.columns=Positiv_word_list
df_test["num_Positiv_word"] = f.sum(axis=1)
vectorizer2 = CountVectorizer(vocabulary = Negativ_word_list)
content = df_test['review_clean']
X2 = vectorizer2.fit_transform(content)
f2 = X2.toarray()
f2 = pd.DataFrame(f2)
f2.columns=Negativ_word_list
df_test["num_Negativ_word"] = f2.sum(axis=1)
##3. decide on sentiment which needs to be better address in positives vs negatives.
df_test["Positiv_ratio"] = df_test["num_Positiv_word"]/(df_test["num_Positiv_word"]+df_test["num_Negativ_word"])
df_test["sentiment_by_dic"] = df_test["Positiv_ratio"].apply(lambda x: 1 if (x>=0.5) else (0 if (x<0.5) else 0.5))
df_test.head()
def userful_count(data):
grouped = data.groupby(['condition']).size().reset_index(name='user_size')
data = pd.merge(data,grouped,on='condition',how='left')
return data
#___________________________________________________________
df_test = userful_count(df_test)
df_test['usefulCount'] = df_test['usefulCount']/df_test['user_size']
##
##
##As mentioned earlier, we have normalized usefulCount by condition to solve the problem that usefulCount shows bias depending on condition. You can then add three predicted emotion values and multiply them by the normalized usefulCount to get the predicted value.
#Now, we can recommend drug by condition in order of final predicted value.
##
df_test['deep_pred'] = sub_preds_deep
df_test['machine_pred'] = sub_preds
df_test['total_pred'] = (df_test['deep_pred'] + df_test['machine_pred'] + df_test['sentiment_by_dic'])*df_test['usefulCount']
xxx = df_test['total_pred']
print(max(xxx))
print(df_test['deep_pred'].shape)
print(df_test['machine_pred'].shape)
df_test3 = df_test.groupby(['condition','drugName']).agg({'total_pred' : ['mean']})
##
#Saving this newly created dataframe
#loading the recently created dataframe
#fully_trained_df = df_test3
#fully_trained_df.to_pickle("./trained_drug_con_dataFrame.pkl")
#df_test3 = pd.read_pickle("./trained_drug_con_dataFrame.pkl")
## Lets go for checking !!!!
def check_drugs(string):
count = 0
for item in range((len(df_test3)-2)):
if string.lower() in (str(df_test3.iloc[[item][0]:[item+1][0]:[item+1][0]])).lower() and count<5:
print(df_test3.iloc[[item][0]:[item+1][0]:])
count += 1
check_drugs('depression')
#check_drugs(str(input()))