## =======================================================
## IMPORTING
## =======================================================
import os
def get_data_from_files(path):
directory = os.listdir(path)
results = []
for file in directory:
f=open(path+file)
results.append(f.read())
f.close()
return results
## =======================================================
## TOKENIZING
## =======================================================
from nltk.tokenize import word_tokenize, sent_tokenize
def get_tokens(sentence):
tokens = word_tokenize(sentence)
clean_tokens = [word.lower() for word in tokens if word.isalpha()]
return clean_tokens
def get_sentence_tokens(review):
return sent_tokenize(review)
## =======================================================
## REMOVING STOPWORDS
## =======================================================
from nltk.corpus import stopwords
stop_words = set(stopwords.words("english"))
def remove_stopwords(sentence):
filtered_text = []
for word in sentence:
if word not in stop_words:
filtered_text.append(word)
return filtered_text
## =======================================================
## FREQUENCY DISTRIBUTIONS
## =======================================================
from nltk.probability import FreqDist
def get_most_common(tokens):
fdist = FreqDist(tokens)
return fdist.most_common(12)
def get_most_common(tokens):
fdist = FreqDist(tokens)
return fdist.most_common(12)
def get_fdist(tokens):
return (FreqDist(tokens))
## =======================================================
## SENTIMENT ANALYSIS
## =======================================================
from nltk.sentiment import SentimentAnalyzer
from nltk.sentiment.util import *
from nltk.sentiment.vader import SentimentIntensityAnalyzer
sid = SentimentIntensityAnalyzer()
def get_vader_score(review):
return sid.polarity_scores(review)
def separate_vader_score(vader_score, key):
return vader_score[key]
## =======================================================
## SUMMARIZER
## =======================================================
def get_weighted_freq_dist(review, freq_dist):
try:
max_freq = max(freq_dist.values())
for word in freq_dist.keys():
freq_dist[word] = (freq_dist[word]/max_freq)
return freq_dist
except:
for word in freq_dist.keys():
freq_dist[word] = (freq_dist[word]/1)
return freq_dist
def get_sentence_score(review, freq_dist):
sentence_scores = {}
for sent in review:
for word in nltk.word_tokenize(sent.lower()):
if word in freq_dist.keys():
if len(sent.split(' ')) < 30:
if sent not in sentence_scores.keys():
sentence_scores[sent] = freq_dist[word]
else:
sentence_scores[sent] += freq_dist[word]
return sentence_scores
def get_summary_sentences(sentence_scores):
sorted_sentences = sorted(sentence_scores.items(), key=lambda kv: kv[1], reverse=True)
return ''.join(sent[0] for sent in sorted_sentences[:5])
def get_freq_words(freq_dist):
sorted_words = sorted(freq_dist.items(), key=lambda kv: kv[1], reverse=True)
return ' '.join(word[0] for word in sorted_words[:50])
## =======================================================
## MACHINE LEARNING -- NAIVE BAYES
## =======================================================
from sklearn.model_selection import train_test_split
from sklearn.naive_bayes import GaussianNB, MultinomialNB, BernoulliNB
from sklearn import metrics
from sklearn.metrics import confusion_matrix, classification_report
# def get_NB(small_df, labels):
# x_train, x_test, y_train, y_test = train_test_split(small_df.values, labels, test_size=0.3, random_state = 109)
# gnb = GaussianNB()
# gnb.fit(x_train, y_train)
# y_pred = gnb.predict(x_test)
# print("Accuracy:", metrics.accuracy_score(y_test, y_pred))
def get_NB(small_df, labels, classifier, title):
seeds = [109, 210, 420, 19, 7]
dfs = []
overall = []
print(title)
for seed in seeds:
x_train, x_test, y_train, y_test = train_test_split(small_df.values,
labels, test_size=0.3, random_state = seed)
gnb = classifier
gnb.fit(x_train, y_train).score(x_train, y_train)
y_pred = gnb.predict(x_test)
accuracy = metrics.accuracy_score(y_test, y_pred)
report = metrics.classification_report(y_test, y_pred)
print("Accuracy:", accuracy)
# print(report)
overall.append(accuracy)
cm = confusion_matrix(y_test, y_pred)
# confusion_matrix_graph(cm, accuracy, "NB Multinomial Tokenized")
# tn, fp, fn, tp = cm.ravel()
df = pd.DataFrame(cm.ravel())
dfs.append(df)
print('AVERAGE ACCURACY:', sum(overall)/len(overall))
return dfs
def display_NB_tables(dfs):
for df in dfs:
print(display(df))
## =======================================================
## PLOTS
## =======================================================
import seaborn as sns
import matplotlib.pyplot as plt
def bar_plot(df, title):
graph = sns.barplot(y = "count", x = "word", data = df, palette = "husl")
plt.title(title)
plt.xlabel("Word")
plt.ylabel("Count")
sns.set_context("talk")
plt.xticks(rotation = 90)
return plt
from nltk.tokenize import casual_tokenize
from collections import Counter
## =======================================================
## CLEANERS
## =======================================================
import re, string
def diy_cleaner(review):
try:
both = review.split('\n')
title = both[0]
review = both[1]
review = review.replace("'","")
except:
review = review.replace("'","")
pattern = re.compile('[\W_]+')
review = pattern.sub(' ', review)
cleaned = title + ' ' + title + ' ' + review
return cleaned.lower()
def pruner(review):
clean_review = ' '.join([word for word in review.split() if len(word) > 3])
return clean_review
sentim_analyzer = SentimentAnalyzer()
def get_nltk_negs(tokens):
all_words_neg = sentim_analyzer.all_words([mark_negation(tokens)])
return all_words_neg
def get_unigram_feats(neg_tokens):
unigram_feats = sentim_analyzer.unigram_word_feats(neg_tokens)
return unigram_feats
def get_bigram_feats(tokens):
ngrams = zip(*[tokens[i:] for i in range(2)])
return ["_".join(ngram) for ngram in ngrams]
## =======================================================
## HELPERS
## =======================================================
def get_bow_from_column(df, column):
all_column_data = ' '.join(df[column].tolist())
all_column_fd = Counter(all_column_data.split())
return all_column_fd
def get_common_words(num):
most_common_neg = [word[0] for word in big_bow_n.most_common(num)]
most_common_pos = [word[0] for word in big_bow_p.most_common(num)]
in_both = np.intersect1d(most_common_neg, most_common_pos)
neg_notpos = np.setdiff1d(most_common_neg, most_common_pos)
pos_notneg = np.setdiff1d(most_common_pos, most_common_neg)
return [len(in_both), len(neg_notpos), len(pos_notneg), len(in_both)/num, in_both, neg_notpos, pos_notneg]
def get_only_polarized(tokens, common_words):
return [token for token in tokens if token not in common_words[4]] # 70
## =======================================================
## VISUALS
## =======================================================
import wordcloud
from wordcloud import WordCloud, ImageColorGenerator
from PIL import Image
import seaborn as sns
import matplotlib.pyplot as plt
def create_word_cloud_with_mask(path_of_mask_image, dictionary,
max_num_words, title):
mask = np.array(Image.open(path_of_mask_image))
word_cloud = WordCloud(background_color = "white",
max_words = max_num_words,
mask = mask, max_font_size = 125,
random_state = 1006)
word_cloud.generate_from_frequencies(dictionary)
image_colors = ImageColorGenerator(mask)
plt.figure(figsize = [8,8])
plt.imshow(word_cloud.recolor(color_func = image_colors), interpolation = "bilinear")
plt.title(title)
sns.set_context("poster")
plt.axis("off")
return plt
import seaborn as sns
import matplotlib.pyplot as plt
def bar_plot(df, title):
graph = sns.barplot(y = "count", x = "word", data = df, palette = "husl")
plt.title(title)
plt.xlabel("Word")
plt.ylabel("Count")
sns.set_context("talk")
plt.xticks(rotation = 90)
return plt
import pandas as pd
import numpy as np
# train=pd.read_csv("../WK7/kaggle-sentiment/train.tsv", delimiter='\t')
# y=train['Sentiment'].values
# X=train['Phrase'].values
# all_df = pd.DataFrame(X)
# all_df['labels'] = y
# all_df
df = pd.read_csv('../death_row_discritized.csv')
def to_string(tokens):
try:
return " ".join(eval(tokens))
except:
return "error"
df['statement_string'] = df.apply(lambda x: to_string(x['last_statement']), axis=1)
# y=df['vic_kid'].values
y=df['prior_record'].values
y_labels = list(set(y))
X=df['statement_string'].values
all_df = pd.DataFrame(X)
all_df['labels'] = y
all_df
# neg_df = pd.DataFrame(neg)
# pos_df = pd.DataFrame(pos)
# pos_df['PoN'] = 'P'
# neg_df['PoN'] = 'N'
# all_df = neg_df.append(pos_df)
# neg = get_data_from_files('../NEG_JK/')
# pos = get_data_from_files('../POS_JK/')
# neg = get_data_from_files('../neg_cornell/')
# pos = get_data_from_files('../pos_cornell/')
# neg = get_data_from_files('../neg_hw4/')
# pos = get_data_from_files('../pos_hw4/')
# neg = get_data_from_files('../hw4_lie_false/')
# pos = get_data_from_files('../hw4_lie_true/')
# pos = get_data_from_files('../hw4_lie_false/')
# neg = get_data_from_files('../hw4_lie_true/')
all_df['tokens'] = all_df.apply(lambda x: get_tokens(x[0]), axis=1)
all_df['num_tokens'] = all_df.apply(lambda x: len(x['tokens']), axis=1)
all_df = all_df.drop(all_df[all_df.num_tokens < 1].index)
all_df['sentences'] = all_df.apply(lambda x: get_sentence_tokens(x[0]), axis=1)
all_df['num_sentences'] = all_df.apply(lambda x: len(x['sentences']), axis=1)
all_df['no_sw'] = all_df.apply(lambda x: remove_stopwords(x['tokens']),axis=1)
all_df['num_no_sw'] = all_df.apply(lambda x: len(x['no_sw']),axis=1)
all_df['topwords_unfil'] = all_df.apply(lambda x: get_most_common(x['tokens']),axis=1)
all_df['topwords_fil'] = all_df.apply(lambda x: get_most_common(x['no_sw']),axis=1)
all_df['freq_dist'] = all_df.apply(lambda x: get_fdist(x['no_sw']),axis=1)
all_df['freq_dist_unfil'] = all_df.apply(lambda x: get_fdist(x['tokens']),axis=1)
all_df['vader_all'] = all_df.apply(lambda x: get_vader_score(x[0]),axis=1)
all_df['v_compound'] = all_df.apply(lambda x: separate_vader_score(x['vader_all'], 'compound'),axis=1)
all_df['v_neg'] = all_df.apply(lambda x: separate_vader_score(x['vader_all'], 'neg'),axis=1)
all_df['v_neu'] = all_df.apply(lambda x: separate_vader_score(x['vader_all'], 'neu'),axis=1)
all_df['v_pos'] = all_df.apply(lambda x: separate_vader_score(x['vader_all'], 'pos'),axis=1)
all_df['weighted_freq_dist'] = all_df.apply(lambda x: get_weighted_freq_dist(x['sentences'], x['freq_dist']),axis=1)
all_df['sentence_scores'] = all_df.apply(lambda x: get_sentence_score(x['sentences'], x['freq_dist']),axis=1)
all_df['summary_sentences'] = all_df.apply(lambda x: get_summary_sentences(x['sentence_scores']), axis=1)
all_df['vader_sum_all'] = all_df.apply(lambda x: get_vader_score(x['summary_sentences']),axis=1)
all_df['v_compound_sum'] = all_df.apply(lambda x: separate_vader_score(x['vader_sum_all'], 'compound'),axis=1)
all_df['v_neg_sum'] = all_df.apply(lambda x: separate_vader_score(x['vader_sum_all'], 'neg'),axis=1)
all_df['v_neu_sum'] = all_df.apply(lambda x: separate_vader_score(x['vader_sum_all'], 'neu'),axis=1)
all_df['v_pos_sum'] = all_df.apply(lambda x: separate_vader_score(x['vader_sum_all'], 'pos'),axis=1)
all_df['v_freq_words'] = all_df.apply(lambda x: get_freq_words(x['freq_dist']), axis=1)
all_df['vader_fq_all'] = all_df.apply(lambda x: get_vader_score(x['v_freq_words']),axis=1)
all_df['v_compound_fd'] = all_df.apply(lambda x: separate_vader_score(x['vader_fq_all'], 'compound'),axis=1)
all_df['v_neg_fd'] = all_df.apply(lambda x: separate_vader_score(x['vader_fq_all'], 'neg'),axis=1)
all_df['v_neu_fd'] = all_df.apply(lambda x: separate_vader_score(x['vader_fq_all'], 'neu'),axis=1)
all_df['v_pos_fd'] = all_df.apply(lambda x: separate_vader_score(x['vader_fq_all'], 'pos'),axis=1)
all_df['bow'] = all_df.apply(lambda x: Counter(x['tokens']), axis=1)
all_df['bow_nosw'] = all_df.apply(lambda x: Counter(x['no_sw']), axis=1)
all_df['diy_cleaner'] = all_df.apply(lambda x: diy_cleaner(x[0]), axis=1)
all_df['pruned'] = all_df.apply(lambda x: pruner(x['diy_cleaner']), axis=1)
all_df['nltk_negs'] = all_df.apply(lambda x: get_nltk_negs(x['tokens']), axis=1)
all_df['unigram_feats'] = all_df.apply(lambda x: get_unigram_feats(x['nltk_negs']), axis=1)
all_df['bigram_feats'] = all_df.apply(lambda x: get_bigram_feats(x['tokens']), axis=1)
all_df['bigram_feats_neg'] = all_df.apply(lambda x: get_bigram_feats(x['nltk_negs']), axis=1)
big_bow = get_bow_from_column(all_df, 'pruned')
big_bow_1 = get_bow_from_column(all_df[all_df['labels'] == 'yes'], 'pruned')
big_bow_2 = get_bow_from_column(all_df[all_df['labels'] == 'no'], 'pruned')
big_bow_3 = get_bow_from_column(all_df[all_df['labels'] == 'unknown'], 'pruned')
# big_bow_4 = get_bow_from_column(all_df[all_df['labels'] == 4], 'pruned')
# big_bow_5 = get_bow_from_column(all_df[all_df['labels'] == 5], 'pruned')
# most_common_1 = [word[0] for word in big_bow_n.most_common(100)]
# most_common_2 = [word[0] for word in big_bow_p.most_common(100)]
# all_df['no_shared_words'] = all_df.apply(lambda x: get_only_polarized(x['tokens'], get_common_words(500)), axis=1)
all_df['PoN'] = all_df['labels']
all_df[:3]
# all_df.to_csv('hw7_data_sentiment.csv',index=False)
# all_df['PoN'] = all_df['labels']
# all_df
small_df = all_df.filter(['v_compound','v_pos', 'v_neg', 'v_neu'])
tables = get_NB(small_df, all_df['PoN'], GaussianNB(), 'Vader Scores -- Gaussian')
# display_NB_tables(tables)
small_df = all_df.filter(['v_pos','v_neu'])
tables = get_NB(small_df, all_df['PoN'], MultinomialNB(), 'Positive Vader Scores -- Multinomial')
small_df = all_df.filter(['v_compound_sum','v_pos_sum', 'v_neg_sum', 'v_neu_sum'])
tables = get_NB(small_df, all_df['PoN'], GaussianNB(), 'Vader Scores from Summary -- Gaussian')
small_df = all_df.filter(['v_compound_sum','v_pos_sum', 'v_neg_sum', 'v_neu_sum',
'v_compound','v_pos', 'v_neg', 'v_neu'])
tables = get_NB(small_df, all_df['PoN'], GaussianNB(), 'Vader Scores (original) and Vader Scores (summary) -- Gaussian')
small_df = all_df.filter(['v_compound_fd','v_pos_fd', 'v_neu_fd', 'v_neg_fd'])
tables = get_NB(small_df, all_df['PoN'], GaussianNB(), 'Vader Scores 50 most frequent filtered words -- Gaussian')
all_df['bow_v1'] = all_df.apply(lambda x: Counter(x['tokens']), axis=1)
all_df
new_df = pd.DataFrame(all_df['bow_v1'].tolist(), all_df['PoN'])
new_df = new_df.fillna(0).astype(int)
new_df[:5]
tables = get_NB(new_df, new_df.index, GaussianNB(), 'Starting point -- Gaussian')
tables = get_NB(new_df, new_df.index, GaussianNB(), 'Starting point -- Gaussian')
all_df['bow_v1'] = all_df.apply(lambda x: Counter(x['tokens']), axis=1)
new_df = pd.DataFrame(all_df['bow_v1'].tolist(), all_df['PoN'])
new_df = new_df.fillna(0).astype(int)
new_df[:5]
tables = get_NB(new_df, new_df.index, MultinomialNB(), 'Starting point -- Multinomial')
new_df = new_df.astype(bool).astype(int)
tables = get_NB(new_df, new_df.index, BernoulliNB(), 'Starting point -- Bernoulli')
all_df['bow_v2'] = all_df.apply(lambda x: Counter(casual_tokenize(x['diy_cleaner'])), axis=1)
new_df = pd.DataFrame(all_df['bow_v2'].tolist(), all_df['PoN'])
new_df = new_df.fillna(0).astype(int)
new_df[:5]
tables = get_NB(new_df, new_df.index, GaussianNB(), 'DIY Cleaner -- Gaussian')
all_df['bow_v2'] = all_df.apply(lambda x: Counter(casual_tokenize(x['diy_cleaner'])), axis=1)
new_df = pd.DataFrame(all_df['bow_v2'].tolist(), all_df['PoN'])
new_df = new_df.fillna(0).astype(int)
new_df[:5]
tables = get_NB(new_df, new_df.index, MultinomialNB(), 'DIY Cleaner -- Multinomial')
new_df = new_df.astype(bool).astype(int)
tables = get_NB(new_df, new_df.index, BernoulliNB(), 'DIY Cleaner -- Bernoulli')
all_df['bow_v3'] = all_df.apply(lambda x: Counter(casual_tokenize(x['pruned'])), axis=1)
new_df = pd.DataFrame(all_df['bow_v3'].tolist(), all_df['PoN'])
new_df = new_df.fillna(0).astype(int)
new_df[:5]
tables = get_NB(new_df, new_df.index, GaussianNB(), 'Pruned Words -- Gaussian')
all_df['bow_v3'] = all_df.apply(lambda x: Counter(casual_tokenize(x['pruned'])), axis=1)
new_df = pd.DataFrame(all_df['bow_v3'].tolist(), all_df['PoN'])
new_df = new_df.fillna(0).astype(int)
new_df[:5]
tables = get_NB(new_df, new_df.index, MultinomialNB(), 'Pruned Words -- Multinomial')
new_df = new_df.astype(bool).astype(int)
tables = get_NB(new_df, new_df.index, BernoulliNB(), 'Pruned Words -- Bernoulli')
all_df['bow_v4'] = all_df.apply(lambda x: Counter(casual_tokenize(' '.join(x['nltk_negs']))), axis=1)
new_df = pd.DataFrame(all_df['bow_v4'].tolist(), all_df['PoN'])
new_df = new_df.fillna(0).astype(int)
new_df[:5]
tables = get_NB(new_df, new_df.index, GaussianNB(), 'NLTK negs -- Gaussian')
all_df['bow_v4'] = all_df.apply(lambda x: Counter(casual_tokenize(' '.join(x['nltk_negs']))), axis=1)
new_df = pd.DataFrame(all_df['bow_v4'].tolist(), all_df['PoN'])
new_df = new_df.fillna(0).astype(int)
new_df[:5]
tables = get_NB(new_df, new_df.index, GaussianNB(), 'NLTK negs -- Multinomial')
new_df = new_df.astype(bool).astype(int)
tables = get_NB(new_df, new_df.index, BernoulliNB(), 'NLTK negs -- Bernoulli')
all_df['bow_v5'] = all_df.apply(lambda x: Counter(casual_tokenize(' '.join(x['bigram_feats']))), axis=1)
new_df = pd.DataFrame(all_df['bow_v5'].tolist(), all_df['PoN'])
new_df = new_df.fillna(0).astype(int)
new_df[:5]
tables = get_NB(new_df, new_df.index, GaussianNB(), 'Bigram Feats -- Gaussian')
all_df['bow_v5'] = all_df.apply(lambda x: Counter(casual_tokenize(' '.join(x['bigram_feats']))), axis=1)
new_df = pd.DataFrame(all_df['bow_v5'].tolist(), all_df['PoN'])
new_df = new_df.fillna(0).astype(int)
new_df[:5]
tables = get_NB(new_df, new_df.index, MultinomialNB(), 'Bigram Feats -- Multinomial')
new_df = new_df.astype(bool).astype(int)
tables = get_NB(new_df, new_df.index, BernoulliNB(), 'Bigram Feats -- Bernoulli')
# all_df['bow_v6'] = all_df.apply(lambda x: Counter(casual_tokenize(' '.join(x['no_shared_words']))), axis=1)
# new_df = pd.DataFrame(all_df['bow_v6'].tolist(), all_df['PoN'])
# new_df = new_df.fillna(0).astype(int)
# new_df[:5]
# tables = get_NB(new_df, new_df.index, GaussianNB(), 'No Shared Words -- Gaussian')
all_df['bow_v6'] = all_df.apply(lambda x: Counter(casual_tokenize(' '.join(x['no_shared_words']))), axis=1)
new_df = pd.DataFrame(all_df['bow_v6'].tolist(), all_df['PoN'])
new_df = new_df.fillna(0).astype(int)
new_df[:5]
tables = get_NB(new_df, new_df.index, MultinomialNB(), 'No Shared Words -- Multinomial')
new_df = new_df.astype(bool).astype(int)
tables = get_NB(new_df, new_df.index, BernoulliNB(), 'No Shared Words -- Bernoulli')
create_word_cloud_with_mask('yellow_square.png', big_bow, 750, "Top Words")
create_word_cloud_with_mask('red_square.png', big_bow_n, 750, "Top Negative Words")
create_word_cloud_with_mask('green_square.png', big_bow_p, 750, "Top Positive Words")
from sklearn.pipeline import Pipeline
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.model_selection import cross_val_score
from sklearn.naive_bayes import BernoulliNB, MultinomialNB
def runPipeline(classifier, boolean, cv, X, y):
nb_clf_pipe = Pipeline([('vect', CountVectorizer(encoding='latin-1', binary=boolean)),('nb', classifier)])
scores = cross_val_score(nb_clf_pipe, X, y, cv=cv)
avg=sum(scores)/len(scores)
# pretty_line = "{} | Accuracy using {} -- and booleans? {}"
pretty_line = "{} | B? {} | CV: {} | Classifier: {}"
print(pretty_line.format(avg, str(boolean)[0], cv, str(classifier).split('(')[0]))
# X = array of data
# y = array of labels
hw6 = all_df[[0,'PoN']]
X = hw6[0].tolist()
y = hw6['PoN'].tolist()
runPipeline(BernoulliNB(), False, 5, X=X, y=y)
runPipeline(BernoulliNB(), False, 3, X=X, y=y)
runPipeline(MultinomialNB(), False, 5, X=X, y=y)
runPipeline(MultinomialNB(), False, 3, X=X, y=y)
runPipeline(MultinomialNB(), True, 5, X=X, y=y)
runPipeline(MultinomialNB(), True, 3, X=X, y=y)
from tabulate import tabulate
df = hw6
def shorten(long_string):
return long_string[:1] if len(long_string) < 21 else long_string[:20]
def df_for_tabulate(df, column):
pretty_df = df.copy()
pretty_df[column] = pretty_df.apply(lambda x: shorten(x[column]), axis = 1)
return pretty_df
tabulate_df = df_for_tabulate(df, 0)
print(tabulate(tabulate_df[:10], tablefmt="simple", headers=tabulate_df.columns))