R4nc1d
R4nc1d

Reputation: 3123

Elasicsearch mixing NGram with Simple query string query

Currently, I am using Ngram tokenizer to-do partial matching of Employees.

I can match on FullName, Email address and Employee Number

My current setup looks as follow:

"tokenizer": {
  "my_tokenizer": {
    "type": "ngram",
    "min_gram": 3,
    "max_gram": 3,
    "token_chars": [
      "letter",
      "digit"
    ]
  }
}

The problem that I am facing is that Employee Number can be 1 character long and because of the min_gram and max_gram, I can never match. I can't make the min_gram 1 either because the results do not look correct.

So I tried to mix the Ngram with a standard tokenizer and instead of doing in Multimatch search I am doing an simple_query_string.

This seems to also work partially.

My question is how can I partially match on all 3 fields bearing in mind that employee number can be 1 or 2 chars long. And exact match if I use semi quotes around a word or number

In the below example how can search for 11 and return documents 4 and 5? Also, I would like document 2 to return if I had to search for 706 which is a partial match, but if I had to search with "7061" I would only return document 2

Full Code

PUT index
{
  "settings": {
    "analysis": {
      "analyzer": {
        "english_exact": {
          "tokenizer": "standard",
          "filter": [
            "lowercase"
          ]
        },
        "my_analyzer": {
            "filter": [
              "lowercase",
              "asciifolding"
            ],
            "tokenizer": "my_tokenizer"
          }
      },
       "tokenizer": {
          "my_tokenizer": {
            "type": "ngram",
            "min_gram": 3,
            "max_gram": 3,
            "token_chars": [
              "letter",
              "digit"
            ]
          }
        },
       "normalizer": {
          "lowersort": {
            "type": "custom",
            "filter": [
              "lowercase"
            ]
          }
        }
    }
  },
  "mappings": {
    "properties": {
      "number": {
        "type": "text",
        "analyzer": "english",
        "fields": {
          "exact": {
            "type": "text",
            "analyzer": "english_exact"
          }
        }
      },
       "fullName": {
        "type": "text",
        "fields": {
          "ngram": {
            "type": "text",
            "analyzer": "my_analyzer"
          }
        },
        "analyzer": "standard"
      }
    }
  }
}
PUT index/_doc/1
{
  "number" : 1,
  "fullName": "Brenda eaton"
}

PUT index/_doc/2
{
  "number" : 7061,
  "fullName": "Bruce wayne"
}

PUT index/_doc/3
{
  "number" : 23,
  "fullName": "Bruce Banner"
}

PUT index/_doc/4
{
  "number" : 111,
  "fullName": "Cat woman"
}

PUT index/_doc/5
{
  "number" : 1112,
  "fullName": "0723568521"
}

GET index/_search
{
  "query": {
    "simple_query_string": {
      "fields": [ "fullName.ngram", "number.exact"],
      "query": "11"
    }
  }
}

Upvotes: 2

Views: 767

Answers (1)

Bhavya
Bhavya

Reputation: 16192

You need to change the analyzer of the number.exact field and reduce the min_gram count to 2. Modify the index mapping as shown below

Adding a working example

Index Mapping:

    {
  "settings": {
    "analysis": {
      "analyzer": {
        "english_exact": {
          "tokenizer": "standard",
          "filter": [
            "lowercase"
          ]
        },
        "my_analyzer": {
          "filter": [
            "lowercase",
            "asciifolding"
          ],
          "tokenizer": "my_tokenizer"
        }
      },
      "tokenizer": {
        "my_tokenizer": {
          "type": "ngram",
          "min_gram": 2,
          "max_gram": 3,
          "token_chars": [
            "letter",
            "digit"
          ]
        }
      },
      "normalizer": {
        "lowersort": {
          "type": "custom",
          "filter": [
            "lowercase"
          ]
        }
      }
    }
  },
  "mappings": {
    "properties": {
      "number": {
        "type": "keyword",          // note this
        "fields": {
          "exact": {
            "type": "text",
            "analyzer": "my_analyzer"
          }
        }
      },
      "fullName": {
        "type": "text",
        "fields": {
          "ngram": {
            "type": "text",
            "analyzer": "my_analyzer"
          }
        },
        "analyzer": "standard"
      }
    }
  }
}

Search Query:

{
  "query": {
    "simple_query_string": {
      "fields": [ "fullName.ngram", "number.exact"],
      "query": "11"
    }
  }
}

Search Result:

"hits": [
      {
        "_index": "66311552",
        "_type": "_doc",
        "_id": "4",
        "_score": 0.9929736,
        "_source": {
          "number": 111,
          "fullName": "Cat woman"
        }
      },
      {
        "_index": "66311552",
        "_type": "_doc",
        "_id": "5",
        "_score": 0.8505551,
        "_source": {
          "number": 1112,
          "fullName": "0723568521"
        }
      }
    ]

Update 1:

If you just need to search for 1, modify the data type of the number field from text type to keyword type, as shown in the index mapping above.

Search Query:

{
  "query": {
    "simple_query_string": {
      "fields": [ "fullName.ngram", "number.exact","number"],
      "query": "1"
    }
  }
}

Search Result will be

"hits": [
      {
        "_index": "66311552",
        "_type": "_doc",
        "_id": "1",
        "_score": 1.3862942,
        "_source": {
          "number": 1,
          "fullName": "Brenda eaton"
        }
      }
    ]

Update 2:

You can use two separate analyzers with n-gram tokenizer for the fullName field and number field. Modify with the below index mapping:

{
  "settings": {
    "analysis": {
      "analyzer": {
        "english_exact": {
          "tokenizer": "standard",
          "filter": [
            "lowercase"
          ]
        },
        "name_analyzer": {
          "filter": [
            "lowercase",
            "asciifolding"
          ],
          "tokenizer": "name_tokenizer"
        },
        "number_analyzer": {
          "filter": [
            "lowercase",
            "asciifolding"
          ],
          "tokenizer": "number_tokenizer"
        }
      },
      "tokenizer": {
        "name_tokenizer": {
          "type": "ngram",
          "min_gram": 3,
          "max_gram": 3,
          "token_chars": [
            "letter",
            "digit"
          ]
        },
         "number_tokenizer": {
          "type": "ngram",
          "min_gram": 2,
          "max_gram": 3,
          "token_chars": [
            "letter",
            "digit"
          ]
        }
      },
      "normalizer": {
        "lowersort": {
          "type": "custom",
          "filter": [
            "lowercase"
          ]
        }
      }
    }
  },
  "mappings": {
    "properties": {
      "number": {
        "type": "keyword",
        "fields": {
          "exact": {
            "type": "text",
            "analyzer": "number_analyzer"
          }
        }
      },
      "fullName": {
        "type": "text",
        "fields": {
          "ngram": {
            "type": "text",
            "analyzer": "name_analyzer"
          }
        },
        "analyzer": "standard"
      }
    }
  }
}

Upvotes: 2

Related Questions