HW7: Comparing MNB & SVM with Kaggle Sentiment Data

OVERVIEW


VECTORIZERS USED:

CountVectorizer
TfidfVectorizer

MODELS USED:

Multinomial Naive Bayes (MNB)
Support Vector Machines (SVM)


VECTORIZATION PARAMS:

Binary
Stopwords
Unigrams, Bigrams
Min & Max df

TODO:

Stemming?
Vadar + TextBlob

FUNCTION & PACKAGE PARTY

In [21]:
## =======================================================
## TOKENIZING
## =======================================================
from nltk.corpus import stopwords
from nltk.tokenize import sent_tokenize, word_tokenize

## =======================================================
## VECTORIZING
## =======================================================
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer

## ----- VECTORIZORS
unigram_bool_cv = CountVectorizer(encoding='latin-1', binary=True, min_df=5, stop_words='english')
unigram_cv = CountVectorizer(encoding='latin-1', binary=False, min_df=5, stop_words='english')
bigram_cv = CountVectorizer(encoding='latin-1', ngram_range=(1,2), min_df=5, stop_words='english')
unigram_tv = TfidfVectorizer(encoding='latin-1', use_idf=True, min_df=5, stop_words='english')
bigram_tv = TfidfVectorizer(encoding='latin-1', use_idf=True, ngram_range=(1,2), min_df=5, stop_words='english')

## =======================================================
## MODELING
## =======================================================
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.svm import LinearSVC
from sklearn.naive_bayes import BernoulliNB, MultinomialNB


def get_test_train_vec(X,y,vectorizer):
    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.4, random_state=0)
    X_train_vec = vectorizer.fit_transform(X_train)
    X_test_vec = vectorizer.transform(X_test)
    return X_train_vec, X_test_vec, y_train, y_test

def run_mnb(X_train_vec, X_test_vec, y_train, y_test, labels, target_names):
    mnb_clf = MultinomialNB()
    mnb_clf.fit(X_train_vec, y_train)
    print(mnb_clf.score(X_test_vec,y_test))
    
def run_svm(X_train_vec, X_test_vec, y_train, y_test, labels, target_names):
    svm_clf = LinearSVC(C=1)
    svm_clf.fit(X_train_vec,y_train)
    print(svm_clf.score(X_test_vec,y_test))

def run_models(X, y, labels, target_names, vec):
    X_train_vec, X_test_vec, y_train, y_test = get_test_train_vec(X,y,vec)
    run_mnb(X_train_vec, X_test_vec, y_train, y_test, labels, target_names)
    run_svm(X_train_vec, X_test_vec, y_train, y_test, labels, target_names)
    

DATA GOES HERE:

In [22]:
import pandas as pd
train=pd.read_csv("kaggle-sentiment/train.tsv", delimiter='\t')
y=train['Sentiment'].values
X=train['Phrase'].values
run_models(X,y,[0,1,2,3,4],['0','1','2','3','4'],unigram_bool_cv)
0.606401384083045
0.6241830065359477
In [ ]: