Reputation: 448
I am working in ElasticSearch environment, I have installed elasticsearch on my local machine for version 5.4.3. I am trying to create index by defining some settings along with mappings. Following are my settings and mappings,
{
"settings":{
"index":{
"analysis":{
"analyzer":{
"index_analyzer":{
"filter":[
"standard",
"lowercase",
"asciifolding"
],
"tokenizer":"standard"
},
"autocomplete":{
"type":"custom",
"tokenizer":"standard",
"filter":[
"lowercase",
"autocomplete_filter"
]
},
"search_analyzer":{
"filter":[
"standard",
"lowercase",
"asciifolding"
],
"tokenizer":"standard"
},
"sortable":{
"filter":"lowercaseFilter",
"tokenizer":"keyword",
"type":"custom"
}
},
"filter":{
"lowercaseFilter":{
"type":"lowercase"
},
"autocomplete_filter":{
"type":"edge_ngram",
"min_gram":1,
"max_gram":20
}
},
"tokenizer":{
"keyword":{
"type":"keyword"
}
}
}
}
}
}
this is my mappings,
{
"geo_data":{
"_all":{
"enabled":true,
"index_analyzer":"index_analyzer",
"search_analyzer":"search_analyzer"
},
"properties":{
"subscriber_level":{
"analyzer":"index_analyzer,search_analyzer,autocomplete_analyzer",
"type":"text"
},
"att_id":{
"analyzer":"index_analyzer,search_analyzer,autocomplete_analyzer",
"type":"text"
},
"id":{
"include_in_all":false,
"type":"text"
},
"name":{
"analyzer":"index_analyzer,search_analyzer,autocomplete_analyzer",
"type":"text"
},
"state_name":{
"analyzer":"index_analyzer,search_analyzer,autocomplete_analyzer",
"type":"text"
}
}
}
}
What I want to achieve is, I want to apply all custom analyzers to a single field. But above mappings on fields for analyzers giving following exception,
{
"error":{
"root_cause":[
{
"type":"mapper_parsing_exception",
"reason":"analyzer [index_analyzer,search_analyzer,autocomplete_analyzer] not found for field [subscriber_level]"
}
],
"type":"mapper_parsing_exception",
"reason":"analyzer [index_analyzer,search_analyzer,autocomplete_analyzer] not found for field [subscriber_level]"
},
"status":400
}
Please anybody can help me to fix this issue, struggling on it.
Upvotes: 3
Views: 4247
Reputation: 4803
you look to tokenize a same field with multiple analyzer. You can use multi-fields and apply different analyzer to each type inside multi-fields.
Also following this github issue, configuration for _all field are changed for 5.4. If your indexed is already exist,
PUT some_index/_mappings/type_name
{
"_all": {
"enabled": true,
"type": "text",
"analyzer": "index_analyzer"
},
"properties": {
"subscriber_level": {
"type": "keyword",
"fields": {
"index_analyzed": {
"type": "text",
"analyzer": "index_analyzer"
},
"search_analyzed": {
"type": "text",
"analyzer": "search_analyzer"
},
"autocomplete_analyzed": {
"type": "text",
"analyzer": "autocomplete"
}
}
},
"att_id": {
"type": "keyword",
"fields": {
"index_analyzed": {
"type": "text",
"analyzer": "index_analyzer"
},
"search_analyzed": {
"type": "text",
"analyzer": "search_analyzer"
},
"autocomplete_analyzed": {
"type": "text",
"analyzer": "autocomplete"
}
}
},
"id": {
"include_in_all": false,
"type": "text"
},
"name": {
"type": "keyword",
"fields": {
"index_analyzed": {
"type": "text",
"analyzer": "index_analyzer"
},
"search_analyzed": {
"type": "text",
"analyzer": "search_analyzer"
},
"autocomplete_analyzed": {
"type": "text",
"analyzer": "autocomplete"
}
}
},
"state_name": {
"type": "keyword",
"fields": {
"index_analyzed": {
"type": "text",
"analyzer": "index_analyzer"
},
"search_analyzed": {
"type": "text",
"analyzer": "search_analyzer"
},
"autocomplete_analyzed": {
"type": "text",
"analyzer": "autocomplete"
}
}
}
},
"settings": {
"index": {
"analysis": {
"analyzer": {
"index_analyzer": {
"filter": [
"standard",
"lowercase",
"asciifolding"
],
"tokenizer": "standard"
},
"autocomplete": {
"type": "custom",
"tokenizer": "standard",
"filter": [
"lowercase",
"autocomplete_filter"
]
},
"search_analyzer": {
"filter": [
"standard",
"lowercase",
"asciifolding"
],
"tokenizer": "standard"
},
"sortable": {
"filter": "lowercaseFilter",
"tokenizer": "keyword",
"type": "custom"
}
},
"filter": {
"lowercaseFilter": {
"type": "lowercase"
},
"autocomplete_filter": {
"type": "edge_ngram",
"min_gram": 1,
"max_gram": 20
}
},
"tokenizer": {
"keyword": {
"type": "keyword"
}
}
}
}
}
}
Now you use any of the analyzed field for query like following
POST some_index/_search
{
"query": {
"term": {
"state_name.index_analyzed": {
"value": "VALUE"
}
}
}
}
Thanks
Upvotes: 6