-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathPreproc_Hansard.Rmd
More file actions
191 lines (139 loc) · 6.42 KB
/
Preproc_Hansard.Rmd
File metadata and controls
191 lines (139 loc) · 6.42 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
---
title: "Corpus preprocessing for translation"
output:
html_document:
theme: paper
---
```{r, message=FALSE, warning=FALSE}
library(data.table)
library(readr)
library(mxnet)
library(stringr)
library(stringi)
library(plotly)
```
# Load raw source data
Source: [University of Haifa](http://cl.haifa.ac.il/projects/translationese/index.shtml).
```{r}
raw_en <- read_lines("data/hansard/train.en.tok")
raw_fr <- read_lines("data/hansard/train.fr.tok")
```
```{r}
cat("English:\n")
cat(paste(raw_en[1:5], collapse = "\n"))
cat("\n\nFrench:\n")
cat(raw_fr[1:5])
cat("English:\n")
cat(paste(raw_en[(1e6+1):(1e6+5)], collapse = "\n"))
cat("\n\nFrench:\n")
cat(raw_fr[(1e6+1):(1e6+5)])
```
### Pre-processing
- Prepare source and target corpus in parallel
- Convert sequences into vectors of words
- Trim out sequences of length outside min-max constraints
- Build dictionnary and ignore words below a count threshold
- Assign each word with an index for embedding
```{r}
corpus_pre_process <- function(source, target, min_seq_len=1, max_seq_len=50, word_count_min = 20, dic = NULL) {
# do not convert to ascii because acceted words will be split by the tokenizer
# head(raw_vec)
# raw_vec <- ::stri_en stri_enc_toascii(str = raw_vec)
# string pre-process
source <- stringi::stri_trans_tolower(source)
target <- stringi::stri_trans_tolower(target)
# add <BOS> and <EOS> token indicators
source <- paste("<BOS>", source, "<EOS>")
target <- paste("<BOS>", target, "<EOS>")
# split raw sequence vectors into lists of word vectors (one list element per sequence)
source_word_vec_list <- stri_split_boundaries(source, type = "word",
skip_word_none = T,
skip_word_number = F,
simplify = F)
target_word_vec_list <- stri_split_boundaries(target, type = "word",
skip_word_none = T,
skip_word_number = F,
simplify = F)
# number of entries per sequence
source_seq_length <- sapply(source_word_vec_list, length)
target_seq_length <- sapply(target_word_vec_list, length)
plot <- plot_ly(x = source_seq_length, type="histogram", name="source") %>%
add_trace(x = target_seq_length, name = "target")
# keep sequences meeting seq length criteria
seq_filter <- which(source_seq_length >= min_seq_len & target_seq_length >= min_seq_len &
source_seq_length <= max_seq_len & target_seq_length <= max_seq_len)
# filter seq length and word vec lists
source_seq_length <- source_seq_length[seq_filter]
target_seq_length <- target_seq_length[seq_filter]
source_word_vec_list <- source_word_vec_list[seq_filter]
target_word_vec_list <- target_word_vec_list[seq_filter]
# word position within each sequence
seq_word_id_source <- unlist(sapply(source_seq_length, function(x) 1:x))
seq_word_id_target <- unlist(sapply(target_seq_length, function(x) 1:x))
source_dt <- data.table(word = unlist(source_word_vec_list),
seq_id = rep(1:length(source_seq_length), times = source_seq_length),
seq_word_id = seq_word_id_source)
target_dt <- data.table(word = unlist(target_word_vec_list),
seq_id = rep(1:length(target_seq_length), times = target_seq_length),
seq_word_id = seq_word_id_target)
setkeyv(source_dt, "word")
setkeyv(target_dt, "word")
rm(source_word_vec_list)
rm(target_word_vec_list)
gc()
# Build vocabulary
if (is.null(dic)) {
# count number of occurence of each word in the corpus
source_word_count = source_dt[, .N, by=word]
source_dic = source_word_count[N>=word_count_min,,][order(-N)]
target_word_count = target_dt[, .N, by=word]
target_dic = target_word_count[N>=word_count_min,,][order(-N)]
# add special tokens for padding - unknown words and beginning/end of sequence
source_dic_words = c("<PAD>", "<UNKNOWN>", source_dic$word)
source_dic = data.table(word_id = 1:length(source_dic_words)-1, word = source_dic_words)
setkeyv(source_dic, "word")
target_dic_words = c("<PAD>", "<UNKNOWN>", target_dic$word)
target_dic = data.table(word_id = 1:length(target_dic_words)-1, word = target_dic_words)
setkeyv(target_dic, "word")
}
# index dictionnary word_id on corpus - replace words not present in dic by <UNKNOWN> id
source_dt <- source_dic[source_dt][order(seq_id, seq_word_id)]
source_dt <- setDT(source_dt)[is.na(word_id), word_id := 1L]
target_dt <- target_dic[target_dt][order(seq_id, seq_word_id)]
target_dt <- setDT(target_dt)[is.na(word_id), word_id := 1L]
return(list(source_dt = source_dt,
source_dic = source_dic,
source_seq_length = source_seq_length,
target_dt = target_dt,
target_dic = target_dic,
target_seq_length = target_seq_length))
}
```
```{r, eval = FALSE}
preprocess <- corpus_pre_process(source = raw_en, target = raw_fr, min_seq_len = 4, max_seq_len = 24, word_count_min = 20, dic = NULL)
write_rds(preprocess, path = "data/preprocess_en_fr_4_24.rds")
```
### Make bucket data
```{r, eval = FALSE}
preprocess <- read_rds(path = "data/preprocess_en_fr_4_24.rds")
create_buckets <- function(source, target, seq_len = c(225),
source_align = "left", target_align = "left",
source_dic, target_dic) {
# convert long format into array of shape max_seq_length * samples
source <- dcast(data = source, seq_word_id ~ seq_id, value.var = "word_id", fill = 0)
source <- as.matrix(source[ , c("seq_word_id") := NULL])
target <- dcast(data = target, seq_word_id ~ seq_id, value.var = "word_id", fill = 0)
target <- as.matrix(target[ , c("seq_word_id") := NULL])
buckets = list("24" = list(data = source, label = target))
return(list(buckets = buckets,
source_dic = source_dic,
target_dic = target_dic))
}
```
```{r, eval = FALSE}
buckets <- create_buckets(source = preprocess$source_dt,
target = preprocess$target_dt,
source_dic = preprocess$source_dic,
target_dic = preprocess$target_dic)
write_rds(buckets, "data/buckets_en_fr_4_24.rds")
```