-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy path2_cos_similarity_code.R
72 lines (48 loc) · 2.16 KB
/
2_cos_similarity_code.R
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
library(RSQLite)
library(dplyr)
library(reshape2)
library("tm")
library(stringi)
library(pbapply)
library(parallel)
library(text2vec)
library(tokenizers)
library(XML)
library(httr)
library(rvest)
load("RDatafiles//words_codenames.RData")
#### Choose one of these: ####
#load("RDatafiles//reddit_coments_sample.RData")
load("RDatafiles//wiki_good_articles.RData")
#learning_text <- readLines("RDatafiles//text8", n = 1, warn = FALSE)
#learning_text <- c(readLines("RDatafiles//TheHobbit.txt",encoding="UTF-8"),
# readLines("RDatafiles//LotR_FotR.txt",encoding="UTF-8"),
# readLines("RDatafiles//LotR_TTT.txt",encoding="UTF-8"),
# readLines("RDatafiles//LotR_RotK.txt",encoding="UTF-8"))
#### Learning of the text ####
it<-learning_text %>%
itoken(tokenizer = tokenize_words,
lowercase = TRUE,
strip_punctuation = TRUE)
vocab<- it %>%
create_vocabulary(stopwords=stopwords()) %>%
prune_vocabulary(term_count_min = 5)
vectorizer <- vocab %>%
vocab_vectorizer(grow_dtm = FALSE,
skip_grams_window = 5)
tcm <- create_tcm(it, vectorizer)
glove = GlobalVectors$new(word_vectors_size = 100, vocabulary = vocab, x_max = 10)
glove$fit(tcm, n_iter = 20)
wv<-glove$get_word_vectors()
save(wv, file="wordvectors_wiki")
# test from paper about word2vec by Mikolov et al.
# Paris to France is same as Berlin to Germany. This is easily quantified with vectors.
# vec(Paris)-vec(France) = vec(Berlin) - vec(Germany)
# Now, we can ask the algorithm which was obtained through textmining
# Given the relationship of Paris to France, what is the equivalent for germany?
# answer should be berlin, and we rearange previous equation to
# vec(Paris)-vec(France) + vec(Germany) = vec(Berlin)
cos_sim = sim2(x = wv, y = wv["paris", , drop=FALSE]-wv["france", , drop=FALSE]+wv["germany", , drop=FALSE], method = "cosine", norm = "l2")
#we had asked for "vec(Paris)-vec(France)+vec(Germany)". Let's look what are the results and what is the best match.
head(sort(cos_sim[,1], decreasing = TRUE), 10)
#It works with the testing dataset from wikipedia. i am not so sure whether it works if we take subreddit data.