3// see https://en.wikipedia.org/wiki/Naive_Bayes_spam_filtering
4// - todo: better html parsing?
5// - todo: try reading text in pdf?
6// - todo: try to detect language, have words per language? can be in the same dictionary. currently my dictionary is biased towards treating english as spam.
16 "golang.org/x/net/html"
20 "github.com/mjl-/mox/message"
23func (f *Filter) tokenizeMail(path string) (bool, map[string]struct{}, error) {
24 mf, err := os.Open(path)
26 return false, nil, err
30 f.log.Check(err, "closing message file")
34 return false, nil, err
36 p, _ := message.EnsurePart(f.log.Logger, false, mf, fi.Size())
37 words, err := f.ParseMessage(p)
38 return true, words, err
41// ParseMessage reads a mail and returns a map with words.
42func (f *Filter) ParseMessage(p message.Part) (map[string]struct{}, error) {
43 metaWords := map[string]struct{}{}
44 textWords := map[string]struct{}{}
45 htmlWords := map[string]struct{}{}
47 hdrs, err := p.Header()
49 return nil, fmt.Errorf("parsing headers: %v", err)
52 // Add words from the header, annotated with <field>+":".
53 // todo: add whether header is dkim-verified?
54 for k, l := range hdrs {
57 case "From", "To", "Cc", "Bcc", "Reply-To", "Subject", "Sender", "Return-Path":
58 // case "Subject", "To":
62 words := map[string]struct{}{}
63 f.tokenizeText(strings.NewReader(h), words)
64 for w := range words {
69 if len(s) > bbolt.MaxKeySize {
72 metaWords[s] = struct{}{}
77 if err := f.mailParse(p, metaWords, textWords, htmlWords); err != nil {
78 return nil, fmt.Errorf("parsing message: %w", err)
81 for w := range metaWords {
82 textWords[w] = struct{}{}
84 for w := range htmlWords {
85 textWords[w] = struct{}{}
91// mailParse looks through the mail for the first text and html parts, and tokenizes their words.
92func (f *Filter) mailParse(p message.Part, metaWords, textWords, htmlWords map[string]struct{}) error {
93 ct := p.MediaType + "/" + p.MediaSubType
95 if ct == "TEXT/HTML" {
96 err := f.tokenizeHTML(p.ReaderUTF8OrBinary(), metaWords, htmlWords)
97 // log.Printf("html parsed, words %v", htmlWords)
100 if ct == "" || strings.HasPrefix(ct, "TEXT/") {
101 err := f.tokenizeText(p.ReaderUTF8OrBinary(), textWords)
102 // log.Printf("text parsed, words %v", textWords)
105 if p.Message != nil {
106 // Nested message, happens for forwarding.
107 if err := p.SetMessageReaderAt(); err != nil {
108 return fmt.Errorf("setting reader on nested message: %w", err)
110 return f.mailParse(*p.Message, metaWords, textWords, htmlWords)
112 for _, sp := range p.Parts {
113 if err := f.mailParse(sp, metaWords, textWords, htmlWords); err != nil {
120func looksRandom(s string) bool {
121 // Random strings, eg 2fvu9stm9yxhnlu. ASCII only and a many consonants in a stretch.
123 const consonants = "bcdfghjklmnpqrstvwxyzBCDFGHJKLMNPQRSTVWXYZ23456789" // 0 and 1 may be used as o and l/i
125 for _, c := range s {
129 if strings.ContainsRune(consonants, c) {
144func looksNumeric(s string) bool {
145 s = strings.TrimPrefix(s, "0x") // Hexadecimal.
146 var digits, hex, other, digitstretch, maxdigitstretch int
147 for _, c := range s {
148 if c >= '0' && c <= '9' {
152 } else if c >= 'a' && c <= 'f' || c >= 'A' && c <= 'F' {
157 if digitstretch > maxdigitstretch {
158 maxdigitstretch = digitstretch
161 if digitstretch > maxdigitstretch {
162 maxdigitstretch = digitstretch
164 return maxdigitstretch >= 4 || other == 0 && maxdigitstretch >= 3
167func (f *Filter) tokenizeText(r io.Reader, words map[string]struct{}) error {
168 b := &strings.Builder{}
172 wordAdd := func(s string) {
173 if len(s) > bbolt.MaxKeySize {
176 words[s] = struct{}{}
186 s = strings.Trim(s, "'")
188 for _, c := range s {
189 if !unicode.IsDigit(c) {
195 if !(nondigit && len(s) > 2) {
206 // todo: do something for URLs, parse them? keep their domain only?
208 if f.Threegrams && prev2 != "" && prev != "" {
209 wordAdd(prev2 + " " + prev + " " + s)
211 if f.Twograms && prev != "" {
212 wordAdd(prev + " " + s)
221 br := bufio.NewReader(r)
223 peekLetter := func() bool {
224 c, _, err := br.ReadRune()
226 err = br.UnreadRune()
228 return err == nil && unicode.IsLetter(c)
232 c, _, err := br.ReadRune()
239 if !unicode.IsLetter(c) && !unicode.IsDigit(c) && (c != '\'' || b.Len() > 0 && peekLetter()) {
242 b.WriteRune(unicode.ToLower(c))
249// tokenizeHTML parses html, and tokenizes its text into words.
250func (f *Filter) tokenizeHTML(r io.Reader, meta, words map[string]struct{}) error {
251 htmlReader := &htmlTextReader{
252 t: html.NewTokenizer(r),
253 meta: map[string]struct{}{},
255 return f.tokenizeText(htmlReader, words)
258type htmlTextReader struct {
260 meta map[string]struct{}
266func (r *htmlTextReader) Read(buf []byte) (n int, err error) {
267 // todo: deal with invalid html better. the tokenizer is just tokenizing, we need to fix up the nesting etc. eg, rules say some elements close certain open elements.
268 // todo: deal with inline elements? they shouldn't cause a word break.
270 give := func(nbuf []byte) (int, error) {
271 n := min(len(buf), len(nbuf))
274 if len(nbuf) < cap(r.buf) {
275 r.buf = r.buf[:len(nbuf)]
277 r.buf = make([]byte, len(nbuf), 3*len(nbuf)/2)
292 case html.ErrorToken:
296 if len(r.tagStack) > 0 {
297 switch r.tagStack[len(r.tagStack)-1] {
298 case "script", "style", "svg":
306 case html.StartTagToken:
307 tagBuf, moreAttr := r.t.TagName()
308 tag := string(tagBuf)
309 //log.Printf("tag %q %v", tag, r.tagStack)
311 if tag == "img" && moreAttr {
314 key, val, moreAttr = r.t.TagAttr()
315 if string(key) == "alt" && len(val) > 0 {
321 // Empty elements, https://developer.mozilla.org/en-US/docs/Glossary/Empty_element
323 case "area", "base", "br", "col", "embed", "hr", "img", "input", "link", "meta", "param", "source", "track", "wbr":
327 r.tagStack = append(r.tagStack, tag)
328 case html.EndTagToken:
329 // log.Printf("tag pop %v", r.tagStack)
330 if len(r.tagStack) > 0 {
331 r.tagStack = r.tagStack[:len(r.tagStack)-1]
333 case html.SelfClosingTagToken:
334 case html.CommentToken:
335 case html.DoctypeToken: