-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathget_data.py
60 lines (42 loc) · 1.81 KB
/
get_data.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
import string
import pandas as pd
from nltk.stem import WordNetLemmatizer
from sklearn.feature_extraction.text import TfidfVectorizer
from wordcloud import STOPWORDS
# Functions to preprocess the data:
def remove_punctuation(text):
return text.translate(str.maketrans('', '', string.punctuation))
def stopwords_(text):
return " ".join([word for word in str(text).split() if word not in STOPWORDS])
lemmatizer = WordNetLemmatizer()
def lemmatizer_(text):
return " ".join([lemmatizer.lemmatize(word) for word in text.split()])
def preprocess(X, lowercase=True, remove_punct=True, remove_stopwords=True, use_lemmatizer=True):
if lowercase:
X = X.str.lower()
if remove_punct:
X = X.apply(lambda text: remove_punctuation(text))
if remove_stopwords:
X = X.apply(lambda text: stopwords_(text))
if use_lemmatizer:
X = X.apply(lambda text: lemmatizer_(text))
return X
def vectorize(X_train, X_test, min_tdf): # turns the text-vectors into Tfidf-arrays
vectorizer = TfidfVectorizer(min_df=min_tdf)
X_train_tfidf = vectorizer.fit_transform(X_train)
X_train_tfidf = X_train_tfidf.toarray()
X_test_tfidf = vectorizer.transform(X_test)
X_test_tfidf = X_test_tfidf.toarray()
return X_train_tfidf, X_test_tfidf
def get_data(min_tdf=3): # reads the csv files and returns the preprocessed and vectorized train and test set
df = pd.read_csv('data/train.csv')
df_Xtest = pd.read_csv('data/test.csv')
df_ytest = pd.read_csv('data/submission.csv')
X_train = df.loc[:, 'text']
y_train = df.loc[:, 'target']
X_test = df_Xtest.loc[:, 'text']
y_test = df_ytest.loc[:, 'target']
X_train = preprocess(X_train)
X_test = preprocess(X_test)
X_train, X_test = vectorize(X_train, X_test, min_tdf)
return X_train, y_train, X_test, y_test