From d6fe58f84e8351f2ee131864fa21678cbd3d37d4 Mon Sep 17 00:00:00 2001 From: Sarah Hoffmann <lonvia@denofr.de> Date: Sat, 25 Nov 2023 21:01:27 +0100 Subject: [PATCH 01/14] fix polygon selection for classtable lookups Polygons should be used preferably with higher address ranks where the areas are smaller. --- nominatim/api/search/db_searches.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nominatim/api/search/db_searches.py b/nominatim/api/search/db_searches.py index 41434f062..63da4c5d5 100644 --- a/nominatim/api/search/db_searches.py +++ b/nominatim/api/search/db_searches.py @@ -296,7 +296,7 @@ async def lookup_category(self, results: nres.SearchResults, sql = sql.join(table, t.c.place_id == table.c.place_id)\ .join(tgeom, table.c.centroid.ST_CoveredBy( - sa.case((sa.and_(tgeom.c.rank_address < 9, + sa.case((sa.and_(tgeom.c.rank_address > 9, tgeom.c.geometry.is_area()), tgeom.c.geometry), else_ = tgeom.c.centroid.ST_Expand(0.05))))\ From 8fcc2bb7f58db967c17a3575f1869c134c60486e Mon Sep 17 00:00:00 2001 From: Sarah Hoffmann <lonvia@denofr.de> Date: Sun, 26 Nov 2023 09:50:59 +0100 Subject: [PATCH 02/14] avoid duplicate lines during category search --- nominatim/api/search/db_searches.py | 36 ++++++++++++++++++----------- 1 file changed, 22 insertions(+), 14 deletions(-) diff --git a/nominatim/api/search/db_searches.py b/nominatim/api/search/db_searches.py index 63da4c5d5..ee638bc4d 100644 --- a/nominatim/api/search/db_searches.py +++ b/nominatim/api/search/db_searches.py @@ -279,28 +279,36 @@ async def lookup_category(self, results: nres.SearchResults, """ table = await conn.get_class_table(*category) - t = conn.t.placex tgeom = conn.t.placex.alias('pgeom') - sql = _select_placex(t).where(tgeom.c.place_id.in_(ids))\ - .where(t.c.class_ == category[0])\ - .where(t.c.type == category[1]) - if table is None: # No classtype table available, do a simplified lookup in placex. - sql = sql.join(tgeom, t.c.geometry.ST_DWithin(tgeom.c.centroid, 0.01))\ - .order_by(tgeom.c.centroid.ST_Distance(t.c.centroid)) + table = conn.t.placex.alias('inner') + sql = sa.select(table.c.place_id, + sa.func.min(tgeom.c.centroid.ST_Distance(table.c.centroid)) + .label('dist'))\ + .join(tgeom, table.c.geometry.intersects(tgeom.c.centroid.ST_Expand(0.01)))\ + .where(table.c.class_ == category[0])\ + .where(table.c.type == category[1]) else: # Use classtype table. We can afford to use a larger # radius for the lookup. - sql = sql.join(table, t.c.place_id == table.c.place_id)\ - .join(tgeom, - table.c.centroid.ST_CoveredBy( - sa.case((sa.and_(tgeom.c.rank_address > 9, + sql = sa.select(table.c.place_id, + sa.func.min(tgeom.c.centroid.ST_Distance(table.c.centroid)) + .label('dist'))\ + .join(tgeom, + table.c.centroid.ST_CoveredBy( + sa.case((sa.and_(tgeom.c.rank_address > 9, tgeom.c.geometry.is_area()), - tgeom.c.geometry), - else_ = tgeom.c.centroid.ST_Expand(0.05))))\ - .order_by(tgeom.c.centroid.ST_Distance(table.c.centroid)) + tgeom.c.geometry), + else_ = tgeom.c.centroid.ST_Expand(0.05)))) + + inner = sql.where(tgeom.c.place_id.in_(ids))\ + .group_by(table.c.place_id).subquery() + + t = conn.t.placex + sql = _select_placex(t).join(inner, inner.c.place_id == t.c.place_id)\ + .order_by(inner.c.dist) sql = sql.where(no_index(t.c.rank_address).between(MIN_RANK_PARAM, MAX_RANK_PARAM)) if details.countries: From 580a7b032ff549d259fe9ce2db1dd8b9cd901073 Mon Sep 17 00:00:00 2001 From: Sarah Hoffmann <lonvia@denofr.de> Date: Sun, 26 Nov 2023 16:48:04 +0100 Subject: [PATCH 03/14] order near searches by distance instead of importance --- nominatim/api/search/db_searches.py | 22 ++++++++++------------ nominatim/api/search/geocoder.py | 5 ++++- 2 files changed, 14 insertions(+), 13 deletions(-) diff --git a/nominatim/api/search/db_searches.py b/nominatim/api/search/db_searches.py index ee638bc4d..10f1e3b44 100644 --- a/nominatim/api/search/db_searches.py +++ b/nominatim/api/search/db_searches.py @@ -66,7 +66,7 @@ def _select_placex(t: SaFromClause) -> SaSelect: t.c.class_, t.c.type, t.c.address, t.c.extratags, t.c.housenumber, t.c.postcode, t.c.country_code, - t.c.importance, t.c.wikipedia, + t.c.wikipedia, t.c.parent_place_id, t.c.rank_address, t.c.rank_search, t.c.linked_place_id, t.c.admin_level, t.c.centroid, @@ -158,7 +158,8 @@ async def _get_placex_housenumbers(conn: SearchConnection, place_ids: List[int], details: SearchDetails) -> AsyncIterator[nres.SearchResult]: t = conn.t.placex - sql = _select_placex(t).where(t.c.place_id.in_(place_ids)) + sql = _select_placex(t).add_columns(t.c.importance)\ + .where(t.c.place_id.in_(place_ids)) if details.geometry_output: sql = _add_geometry_columns(sql, t.c.geometry, details) @@ -307,7 +308,8 @@ async def lookup_category(self, results: nres.SearchResults, .group_by(table.c.place_id).subquery() t = conn.t.placex - sql = _select_placex(t).join(inner, inner.c.place_id == t.c.place_id)\ + sql = _select_placex(t).add_columns((-inner.c.dist).label('importance'))\ + .join(inner, inner.c.place_id == t.c.place_id)\ .order_by(inner.c.dist) sql = sql.where(no_index(t.c.rank_address).between(MIN_RANK_PARAM, MAX_RANK_PARAM)) @@ -350,6 +352,8 @@ async def lookup(self, conn: SearchConnection, # simply search in placex table def _base_query() -> SaSelect: return _select_placex(t) \ + .add_columns((-t.c.centroid.ST_Distance(NEAR_PARAM)) + .label('importance'))\ .where(t.c.linked_place_id == None) \ .where(t.c.geometry.ST_DWithin(NEAR_PARAM, NEAR_RADIUS_PARAM)) \ .order_by(t.c.centroid.ST_Distance(NEAR_PARAM)) \ @@ -378,6 +382,7 @@ def _base_query() -> SaSelect: table = await conn.get_class_table(*category) if table is not None: sql = _select_placex(t)\ + .add_columns(t.c.importance)\ .join(table, t.c.place_id == table.c.place_id)\ .where(t.c.class_ == category[0])\ .where(t.c.type == category[1]) @@ -423,6 +428,7 @@ async def lookup(self, conn: SearchConnection, ccodes = self.countries.values sql = _select_placex(t)\ + .add_columns(t.c.importance)\ .where(t.c.country_code.in_(ccodes))\ .where(t.c.rank_address == 4) @@ -599,15 +605,7 @@ async def lookup(self, conn: SearchConnection, tsearch = conn.t.search_name sql: SaLambdaSelect = sa.lambda_stmt(lambda: - sa.select(t.c.place_id, t.c.osm_type, t.c.osm_id, t.c.name, - t.c.class_, t.c.type, - t.c.address, t.c.extratags, t.c.admin_level, - t.c.housenumber, t.c.postcode, t.c.country_code, - t.c.wikipedia, - t.c.parent_place_id, t.c.rank_address, t.c.rank_search, - t.c.centroid, - t.c.geometry.ST_Expand(0).label('bbox')) - .where(t.c.place_id == tsearch.c.place_id)) + _select_placex(t).where(t.c.place_id == tsearch.c.place_id)) if details.geometry_output: diff --git a/nominatim/api/search/geocoder.py b/nominatim/api/search/geocoder.py index 7ff3ed08a..91c45b65a 100644 --- a/nominatim/api/search/geocoder.py +++ b/nominatim/api/search/geocoder.py @@ -134,7 +134,10 @@ def rerank_by_query(self, query: QueryStruct, results: SearchResults) -> None: return for result in results: - if not result.display_name: + # Negative importance indicates ordering by distance, which is + # more important than word matching. + if not result.display_name\ + or (result.importance is not None and result.importance < 0): continue distance = 0.0 norm = self.query_analyzer.normalize_text(result.display_name) From 47ca56f21b7da4ac8131c6f4101984a6532e7b39 Mon Sep 17 00:00:00 2001 From: Sarah Hoffmann <lonvia@denofr.de> Date: Sun, 26 Nov 2023 17:11:15 +0100 Subject: [PATCH 04/14] deduplicate categories/qualifiers --- nominatim/api/search/db_search_builder.py | 15 ++++++++------- nominatim/api/search/db_search_fields.py | 15 +++++++++++---- 2 files changed, 19 insertions(+), 11 deletions(-) diff --git a/nominatim/api/search/db_search_builder.py b/nominatim/api/search/db_search_builder.py index 905b5c621..7826925ae 100644 --- a/nominatim/api/search/db_search_builder.py +++ b/nominatim/api/search/db_search_builder.py @@ -7,7 +7,7 @@ """ Convertion from token assignment to an abstract DB search. """ -from typing import Optional, List, Tuple, Iterator +from typing import Optional, List, Tuple, Iterator, Dict import heapq from nominatim.api.types import SearchDetails, DataLayer @@ -339,12 +339,13 @@ def get_search_categories(self, Returns None if no category search is requested. """ if assignment.category: - tokens = [t for t in self.query.get_tokens(assignment.category, - TokenType.CATEGORY) - if not self.details.categories - or t.get_category() in self.details.categories] - return dbf.WeightedCategories([t.get_category() for t in tokens], - [t.penalty for t in tokens]) + tokens: Dict[Tuple[str, str], float] = {} + for t in self.query.get_tokens(assignment.category, TokenType.CATEGORY): + cat = t.get_category() + if (not self.details.categories or cat in self.details.categories)\ + and t.penalty < tokens.get(cat, 1000.0): + tokens[cat] = t.penalty + return dbf.WeightedCategories(list(tokens.keys()), list(tokens.values())) if self.details.categories: return dbf.WeightedCategories(self.details.categories, diff --git a/nominatim/api/search/db_search_fields.py b/nominatim/api/search/db_search_fields.py index 612e90597..59af82608 100644 --- a/nominatim/api/search/db_search_fields.py +++ b/nominatim/api/search/db_search_fields.py @@ -7,7 +7,7 @@ """ Data structures for more complex fields in abstract search descriptions. """ -from typing import List, Tuple, Iterator, cast +from typing import List, Tuple, Iterator, cast, Dict import dataclasses import sqlalchemy as sa @@ -195,10 +195,17 @@ def set_qualifiers(self, tokens: List[Token]) -> None: """ Set the qulaifier field from the given tokens. """ if tokens: - min_penalty = min(t.penalty for t in tokens) + categories: Dict[Tuple[str, str], float] = {} + min_penalty = 1000.0 + for t in tokens: + if t.penalty < min_penalty: + min_penalty = t.penalty + cat = t.get_category() + if t.penalty < categories.get(cat, 1000.0): + categories[cat] = t.penalty self.penalty += min_penalty - self.qualifiers = WeightedCategories([t.get_category() for t in tokens], - [t.penalty - min_penalty for t in tokens]) + self.qualifiers = WeightedCategories(list(categories.keys()), + list(categories.values())) def set_ranking(self, rankings: List[FieldRanking]) -> None: From a8b023e57eda06bac3e5641f85f718e1d3104fe9 Mon Sep 17 00:00:00 2001 From: Sarah Hoffmann <lonvia@denofr.de> Date: Sun, 26 Nov 2023 17:41:29 +0100 Subject: [PATCH 05/14] restrict base results in near search by rank This avoids in particular that roads or POIs are used as base for the near search when a place result is present. --- nominatim/api/search/db_searches.py | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/nominatim/api/search/db_searches.py b/nominatim/api/search/db_searches.py index 10f1e3b44..ce5fbc634 100644 --- a/nominatim/api/search/db_searches.py +++ b/nominatim/api/search/db_searches.py @@ -256,9 +256,20 @@ async def lookup(self, conn: SearchConnection, base.sort(key=lambda r: (r.accuracy, r.rank_search)) max_accuracy = base[0].accuracy + 0.5 + if base[0].rank_address == 0: + min_rank = 0 + max_rank = 0 + elif base[0].rank_address < 26: + min_rank = 1 + max_rank = min(25, base[0].rank_address + 4) + else: + min_rank = 26 + max_rank = 30 base = nres.SearchResults(r for r in base if r.source_table == nres.SourceTable.PLACEX and r.accuracy <= max_accuracy - and r.bbox and r.bbox.area < 20) + and r.bbox and r.bbox.area < 20 + and r.rank_address >= min_rank + and r.rank_address <= max_rank) if base: baseids = [b.place_id for b in base[:5] if b.place_id] From a7f5c6c8f54eea4e3d9746141b2e0ac2d5722a4a Mon Sep 17 00:00:00 2001 From: Sarah Hoffmann <lonvia@denofr.de> Date: Sun, 26 Nov 2023 20:58:50 +0100 Subject: [PATCH 06/14] drop category tokens when they make up a full phrase --- nominatim/api/search/query.py | 14 ++++--- .../api/search/test_api_search_query.py | 37 ++++++++++++++++++- .../api/search/test_db_search_builder.py | 13 +++---- .../api/search/test_token_assignment.py | 16 +++----- 4 files changed, 55 insertions(+), 25 deletions(-) diff --git a/nominatim/api/search/query.py b/nominatim/api/search/query.py index 5d75eb0fb..4bf009a53 100644 --- a/nominatim/api/search/query.py +++ b/nominatim/api/search/query.py @@ -70,14 +70,16 @@ class PhraseType(enum.Enum): COUNTRY = enum.auto() """ Contains the country name or code. """ - def compatible_with(self, ttype: TokenType) -> bool: + def compatible_with(self, ttype: TokenType, + is_full_phrase: bool) -> bool: """ Check if the given token type can be used with the phrase type. """ if self == PhraseType.NONE: - return True + return not is_full_phrase or ttype != TokenType.QUALIFIER if self == PhraseType.AMENITY: - return ttype in (TokenType.WORD, TokenType.PARTIAL, - TokenType.QUALIFIER, TokenType.CATEGORY) + return ttype in (TokenType.WORD, TokenType.PARTIAL)\ + or (is_full_phrase and ttype == TokenType.CATEGORY)\ + or (not is_full_phrase and ttype == TokenType.QUALIFIER) if self == PhraseType.STREET: return ttype in (TokenType.WORD, TokenType.PARTIAL, TokenType.HOUSENUMBER) if self == PhraseType.POSTCODE: @@ -244,7 +246,9 @@ def add_token(self, trange: TokenRange, ttype: TokenType, token: Token) -> None: be added to, then the token is silently dropped. """ snode = self.nodes[trange.start] - if snode.ptype.compatible_with(ttype): + full_phrase = snode.btype in (BreakType.START, BreakType.PHRASE)\ + and self.nodes[trange.end].btype in (BreakType.PHRASE, BreakType.END) + if snode.ptype.compatible_with(ttype, full_phrase): tlist = snode.get_tokens(trange.end, ttype) if tlist is None: snode.starting.append(TokenList(trange.end, ttype, [token])) diff --git a/test/python/api/search/test_api_search_query.py b/test/python/api/search/test_api_search_query.py index f8c9c2dc8..69a17412c 100644 --- a/test/python/api/search/test_api_search_query.py +++ b/test/python/api/search/test_api_search_query.py @@ -28,12 +28,12 @@ def mktoken(tid: int): ('COUNTRY', 'COUNTRY'), ('POSTCODE', 'POSTCODE')]) def test_phrase_compatible(ptype, ttype): - assert query.PhraseType[ptype].compatible_with(query.TokenType[ttype]) + assert query.PhraseType[ptype].compatible_with(query.TokenType[ttype], False) @pytest.mark.parametrize('ptype', ['COUNTRY', 'POSTCODE']) def test_phrase_incompatible(ptype): - assert not query.PhraseType[ptype].compatible_with(query.TokenType.PARTIAL) + assert not query.PhraseType[ptype].compatible_with(query.TokenType.PARTIAL, True) def test_query_node_empty(): @@ -99,3 +99,36 @@ def test_query_struct_incompatible_token(): assert q.get_tokens(query.TokenRange(0, 1), query.TokenType.PARTIAL) == [] assert len(q.get_tokens(query.TokenRange(1, 2), query.TokenType.COUNTRY)) == 1 + + +def test_query_struct_amenity_single_word(): + q = query.QueryStruct([query.Phrase(query.PhraseType.AMENITY, 'bar')]) + q.add_node(query.BreakType.END, query.PhraseType.NONE) + + q.add_token(query.TokenRange(0, 1), query.TokenType.PARTIAL, mktoken(1)) + q.add_token(query.TokenRange(0, 1), query.TokenType.CATEGORY, mktoken(2)) + q.add_token(query.TokenRange(0, 1), query.TokenType.QUALIFIER, mktoken(3)) + + assert len(q.get_tokens(query.TokenRange(0, 1), query.TokenType.PARTIAL)) == 1 + assert len(q.get_tokens(query.TokenRange(0, 1), query.TokenType.CATEGORY)) == 1 + assert len(q.get_tokens(query.TokenRange(0, 1), query.TokenType.QUALIFIER)) == 0 + + +def test_query_struct_amenity_two_words(): + q = query.QueryStruct([query.Phrase(query.PhraseType.AMENITY, 'foo bar')]) + q.add_node(query.BreakType.WORD, query.PhraseType.AMENITY) + q.add_node(query.BreakType.END, query.PhraseType.NONE) + + for trange in [(0, 1), (1, 2)]: + q.add_token(query.TokenRange(*trange), query.TokenType.PARTIAL, mktoken(1)) + q.add_token(query.TokenRange(*trange), query.TokenType.CATEGORY, mktoken(2)) + q.add_token(query.TokenRange(*trange), query.TokenType.QUALIFIER, mktoken(3)) + + assert len(q.get_tokens(query.TokenRange(0, 1), query.TokenType.PARTIAL)) == 1 + assert len(q.get_tokens(query.TokenRange(0, 1), query.TokenType.CATEGORY)) == 0 + assert len(q.get_tokens(query.TokenRange(0, 1), query.TokenType.QUALIFIER)) == 1 + + assert len(q.get_tokens(query.TokenRange(1, 2), query.TokenType.PARTIAL)) == 1 + assert len(q.get_tokens(query.TokenRange(1, 2), query.TokenType.CATEGORY)) == 0 + assert len(q.get_tokens(query.TokenRange(1, 2), query.TokenType.QUALIFIER)) == 1 + diff --git a/test/python/api/search/test_db_search_builder.py b/test/python/api/search/test_db_search_builder.py index c93b8ead3..c10a6c77f 100644 --- a/test/python/api/search/test_db_search_builder.py +++ b/test/python/api/search/test_db_search_builder.py @@ -21,21 +21,18 @@ def get_category(self): def make_query(*args): - q = None + q = QueryStruct([Phrase(PhraseType.NONE, '')]) - for tlist in args: - if q is None: - q = QueryStruct([Phrase(PhraseType.NONE, '')]) - else: - q.add_node(BreakType.WORD, PhraseType.NONE) + for _ in range(max(inner[0] for tlist in args for inner in tlist)): + q.add_node(BreakType.WORD, PhraseType.NONE) + q.add_node(BreakType.END, PhraseType.NONE) - start = len(q.nodes) - 1 + for start, tlist in enumerate(args): for end, ttype, tinfo in tlist: for tid, word in tinfo: q.add_token(TokenRange(start, end), ttype, MyToken(0.5 if ttype == TokenType.PARTIAL else 0.0, tid, 1, word, True)) - q.add_node(BreakType.END, PhraseType.NONE) return q diff --git a/test/python/api/search/test_token_assignment.py b/test/python/api/search/test_token_assignment.py index dc123403a..6dc25b1e7 100644 --- a/test/python/api/search/test_token_assignment.py +++ b/test/python/api/search/test_token_assignment.py @@ -18,21 +18,17 @@ def get_category(self): def make_query(*args): - q = None + q = QueryStruct([Phrase(args[0][1], '')]) dummy = MyToken(3.0, 45, 1, 'foo', True) - for btype, ptype, tlist in args: - if q is None: - q = QueryStruct([Phrase(ptype, '')]) - else: - q.add_node(btype, ptype) + for btype, ptype, _ in args[1:]: + q.add_node(btype, ptype) + q.add_node(BreakType.END, PhraseType.NONE) - start = len(q.nodes) - 1 - for end, ttype in tlist: + for start, t in enumerate(args): + for end, ttype in t[2]: q.add_token(TokenRange(start, end), ttype, dummy) - q.add_node(BreakType.END, PhraseType.NONE) - return q From 70dc4957dc41df7d808ae4acb6c881e80b6348f9 Mon Sep 17 00:00:00 2001 From: Sarah Hoffmann <lonvia@denofr.de> Date: Tue, 28 Nov 2023 12:01:49 +0100 Subject: [PATCH 07/14] the category parameter in search should result in a qualifier --- nominatim/api/search/db_search_builder.py | 15 +++++++++------ test/python/api/search/test_db_search_builder.py | 4 ++-- 2 files changed, 11 insertions(+), 8 deletions(-) diff --git a/nominatim/api/search/db_search_builder.py b/nominatim/api/search/db_search_builder.py index 7826925ae..35d10e782 100644 --- a/nominatim/api/search/db_search_builder.py +++ b/nominatim/api/search/db_search_builder.py @@ -321,8 +321,15 @@ def get_search_data(self, assignment: TokenAssignment) -> Optional[dbf.SearchDat self.query.get_tokens(assignment.postcode, TokenType.POSTCODE)) if assignment.qualifier: - sdata.set_qualifiers(self.query.get_tokens(assignment.qualifier, - TokenType.QUALIFIER)) + tokens = self.query.get_tokens(assignment.qualifier, TokenType.QUALIFIER) + if self.details.categories: + tokens = [t for t in tokens if t.get_category() in self.details.categories] + if not tokens: + return None + sdata.set_qualifiers(tokens) + elif self.details.categories: + sdata.qualifiers = dbf.WeightedCategories(self.details.categories, + [0.0] * len(self.details.categories)) if assignment.address: sdata.set_ranking([self.get_addr_ranking(r) for r in assignment.address]) @@ -347,10 +354,6 @@ def get_search_categories(self, tokens[cat] = t.penalty return dbf.WeightedCategories(list(tokens.keys()), list(tokens.values())) - if self.details.categories: - return dbf.WeightedCategories(self.details.categories, - [0.0] * len(self.details.categories)) - return None diff --git a/test/python/api/search/test_db_search_builder.py b/test/python/api/search/test_db_search_builder.py index c10a6c77f..9d877b1db 100644 --- a/test/python/api/search/test_db_search_builder.py +++ b/test/python/api/search/test_db_search_builder.py @@ -309,8 +309,8 @@ def test_name_only_search_with_category(): assert len(searches) == 1 search = searches[0] - assert isinstance(search, dbs.NearSearch) - assert isinstance(search.search, dbs.PlaceSearch) + assert isinstance(search, dbs.PlaceSearch) + assert search.qualifiers.values == [('foo', 'bar')] def test_name_only_search_with_countries(): From 3f72ca4bcab2e0f6f0f6db89c7c2659d06858885 Mon Sep 17 00:00:00 2001 From: Sarah Hoffmann <lonvia@denofr.de> Date: Tue, 28 Nov 2023 16:27:05 +0100 Subject: [PATCH 08/14] rename use of category as POI search to near_item Use the term category only as a short-cut for "tuple of key and value". --- nominatim/api/search/db_search_builder.py | 29 +++++++++---------- nominatim/api/search/icu_tokenizer.py | 4 +-- nominatim/api/search/legacy_tokenizer.py | 8 ++--- nominatim/api/search/query.py | 4 +-- nominatim/api/search/token_assignment.py | 20 ++++++------- .../api/search/test_api_search_query.py | 10 +++---- .../api/search/test_db_search_builder.py | 16 +++++----- .../api/search/test_icu_query_analyzer.py | 6 ++-- .../api/search/test_legacy_query_analyzer.py | 6 ++-- .../api/search/test_token_assignment.py | 22 +++++++------- 10 files changed, 62 insertions(+), 63 deletions(-) diff --git a/nominatim/api/search/db_search_builder.py b/nominatim/api/search/db_search_builder.py index 35d10e782..a0018480d 100644 --- a/nominatim/api/search/db_search_builder.py +++ b/nominatim/api/search/db_search_builder.py @@ -89,12 +89,12 @@ def build(self, assignment: TokenAssignment) -> Iterator[dbs.AbstractSearch]: if sdata is None: return - categories = self.get_search_categories(assignment) + near_items = self.get_near_items(assignment) if assignment.name is None: - if categories and not sdata.postcodes: - sdata.qualifiers = categories - categories = None + if near_items and not sdata.postcodes: + sdata.qualifiers = near_items + near_items = None builder = self.build_poi_search(sdata) elif assignment.housenumber: hnr_tokens = self.query.get_tokens(assignment.housenumber, @@ -102,16 +102,16 @@ def build(self, assignment: TokenAssignment) -> Iterator[dbs.AbstractSearch]: builder = self.build_housenumber_search(sdata, hnr_tokens, assignment.address) else: builder = self.build_special_search(sdata, assignment.address, - bool(categories)) + bool(near_items)) else: builder = self.build_name_search(sdata, assignment.name, assignment.address, - bool(categories)) + bool(near_items)) - if categories: - penalty = min(categories.penalties) - categories.penalties = [p - penalty for p in categories.penalties] + if near_items: + penalty = min(near_items.penalties) + near_items.penalties = [p - penalty for p in near_items.penalties] for search in builder: - yield dbs.NearSearch(penalty + assignment.penalty, categories, search) + yield dbs.NearSearch(penalty + assignment.penalty, near_items, search) else: for search in builder: search.penalty += assignment.penalty @@ -339,15 +339,14 @@ def get_search_data(self, assignment: TokenAssignment) -> Optional[dbf.SearchDat return sdata - def get_search_categories(self, - assignment: TokenAssignment) -> Optional[dbf.WeightedCategories]: - """ Collect tokens for category search or use the categories + def get_near_items(self, assignment: TokenAssignment) -> Optional[dbf.WeightedCategories]: + """ Collect tokens for near items search or use the categories requested per parameter. Returns None if no category search is requested. """ - if assignment.category: + if assignment.near_item: tokens: Dict[Tuple[str, str], float] = {} - for t in self.query.get_tokens(assignment.category, TokenType.CATEGORY): + for t in self.query.get_tokens(assignment.near_item, TokenType.NEAR_ITEM): cat = t.get_category() if (not self.details.categories or cat in self.details.categories)\ and t.penalty < tokens.get(cat, 1000.0): diff --git a/nominatim/api/search/icu_tokenizer.py b/nominatim/api/search/icu_tokenizer.py index 196fde2a8..fceec2df5 100644 --- a/nominatim/api/search/icu_tokenizer.py +++ b/nominatim/api/search/icu_tokenizer.py @@ -184,13 +184,13 @@ async def analyze_query(self, phrases: List[qmod.Phrase]) -> qmod.QueryStruct: if row.type == 'S': if row.info['op'] in ('in', 'near'): if trange.start == 0: - query.add_token(trange, qmod.TokenType.CATEGORY, token) + query.add_token(trange, qmod.TokenType.NEAR_ITEM, token) else: query.add_token(trange, qmod.TokenType.QUALIFIER, token) if trange.start == 0 or trange.end == query.num_token_slots(): token = copy(token) token.penalty += 0.1 * (query.num_token_slots()) - query.add_token(trange, qmod.TokenType.CATEGORY, token) + query.add_token(trange, qmod.TokenType.NEAR_ITEM, token) else: query.add_token(trange, DB_TO_TOKEN_TYPE[row.type], token) diff --git a/nominatim/api/search/legacy_tokenizer.py b/nominatim/api/search/legacy_tokenizer.py index 26e4c126b..e7984ee41 100644 --- a/nominatim/api/search/legacy_tokenizer.py +++ b/nominatim/api/search/legacy_tokenizer.py @@ -107,15 +107,15 @@ async def analyze_query(self, phrases: List[qmod.Phrase]) -> qmod.QueryStruct: for row in await self.lookup_in_db(lookup_words): for trange in words[row.word_token.strip()]: token, ttype = self.make_token(row) - if ttype == qmod.TokenType.CATEGORY: + if ttype == qmod.TokenType.NEAR_ITEM: if trange.start == 0: - query.add_token(trange, qmod.TokenType.CATEGORY, token) + query.add_token(trange, qmod.TokenType.NEAR_ITEM, token) elif ttype == qmod.TokenType.QUALIFIER: query.add_token(trange, qmod.TokenType.QUALIFIER, token) if trange.start == 0 or trange.end == query.num_token_slots(): token = copy(token) token.penalty += 0.1 * (query.num_token_slots()) - query.add_token(trange, qmod.TokenType.CATEGORY, token) + query.add_token(trange, qmod.TokenType.NEAR_ITEM, token) elif ttype != qmod.TokenType.PARTIAL or trange.start + 1 == trange.end: query.add_token(trange, ttype, token) @@ -195,7 +195,7 @@ def make_token(self, row: SaRow) -> Tuple[LegacyToken, qmod.TokenType]: ttype = qmod.TokenType.POSTCODE lookup_word = row.word_token[1:] else: - ttype = qmod.TokenType.CATEGORY if row.operator in ('in', 'near')\ + ttype = qmod.TokenType.NEAR_ITEM if row.operator in ('in', 'near')\ else qmod.TokenType.QUALIFIER lookup_word = row.word elif row.word_token.startswith(' '): diff --git a/nominatim/api/search/query.py b/nominatim/api/search/query.py index 4bf009a53..ad1b69ef5 100644 --- a/nominatim/api/search/query.py +++ b/nominatim/api/search/query.py @@ -46,7 +46,7 @@ class TokenType(enum.Enum): """ Country name or reference. """ QUALIFIER = enum.auto() """ Special term used together with name (e.g. _Hotel_ Bellevue). """ - CATEGORY = enum.auto() + NEAR_ITEM = enum.auto() """ Special term used as searchable object(e.g. supermarket in ...). """ @@ -78,7 +78,7 @@ def compatible_with(self, ttype: TokenType, return not is_full_phrase or ttype != TokenType.QUALIFIER if self == PhraseType.AMENITY: return ttype in (TokenType.WORD, TokenType.PARTIAL)\ - or (is_full_phrase and ttype == TokenType.CATEGORY)\ + or (is_full_phrase and ttype == TokenType.NEAR_ITEM)\ or (not is_full_phrase and ttype == TokenType.QUALIFIER) if self == PhraseType.STREET: return ttype in (TokenType.WORD, TokenType.PARTIAL, TokenType.HOUSENUMBER) diff --git a/nominatim/api/search/token_assignment.py b/nominatim/api/search/token_assignment.py index 3f0e737b0..d94d69039 100644 --- a/nominatim/api/search/token_assignment.py +++ b/nominatim/api/search/token_assignment.py @@ -46,7 +46,7 @@ class TokenAssignment: # pylint: disable=too-many-instance-attributes housenumber: Optional[qmod.TokenRange] = None postcode: Optional[qmod.TokenRange] = None country: Optional[qmod.TokenRange] = None - category: Optional[qmod.TokenRange] = None + near_item: Optional[qmod.TokenRange] = None qualifier: Optional[qmod.TokenRange] = None @@ -64,8 +64,8 @@ def from_ranges(ranges: TypedRangeSeq) -> 'TokenAssignment': out.postcode = token.trange elif token.ttype == qmod.TokenType.COUNTRY: out.country = token.trange - elif token.ttype == qmod.TokenType.CATEGORY: - out.category = token.trange + elif token.ttype == qmod.TokenType.NEAR_ITEM: + out.near_item = token.trange elif token.ttype == qmod.TokenType.QUALIFIER: out.qualifier = token.trange return out @@ -109,7 +109,7 @@ def is_final(self) -> bool: """ # Country and category must be the final term for left-to-right return len(self.seq) > 1 and \ - self.seq[-1].ttype in (qmod.TokenType.COUNTRY, qmod.TokenType.CATEGORY) + self.seq[-1].ttype in (qmod.TokenType.COUNTRY, qmod.TokenType.NEAR_ITEM) def appendable(self, ttype: qmod.TokenType) -> Optional[int]: @@ -165,22 +165,22 @@ def appendable(self, ttype: qmod.TokenType) -> Optional[int]: if ttype == qmod.TokenType.COUNTRY: return None if self.direction == -1 else 1 - if ttype == qmod.TokenType.CATEGORY: + if ttype == qmod.TokenType.NEAR_ITEM: return self.direction if ttype == qmod.TokenType.QUALIFIER: if self.direction == 1: if (len(self.seq) == 1 - and self.seq[0].ttype in (qmod.TokenType.PARTIAL, qmod.TokenType.CATEGORY)) \ + and self.seq[0].ttype in (qmod.TokenType.PARTIAL, qmod.TokenType.NEAR_ITEM)) \ or (len(self.seq) == 2 - and self.seq[0].ttype == qmod.TokenType.CATEGORY + and self.seq[0].ttype == qmod.TokenType.NEAR_ITEM and self.seq[1].ttype == qmod.TokenType.PARTIAL): return 1 return None if self.direction == -1: return -1 - tempseq = self.seq[1:] if self.seq[0].ttype == qmod.TokenType.CATEGORY else self.seq + tempseq = self.seq[1:] if self.seq[0].ttype == qmod.TokenType.NEAR_ITEM else self.seq if len(tempseq) == 0: return 1 if len(tempseq) == 1 and self.seq[0].ttype == qmod.TokenType.HOUSENUMBER: @@ -253,7 +253,7 @@ def recheck_sequence(self) -> bool: priors = sum(1 for t in self.seq[hnrpos+1:] if t.ttype == qmod.TokenType.PARTIAL) if not self._adapt_penalty_from_priors(priors, 1): return False - if any(t.ttype == qmod.TokenType.CATEGORY for t in self.seq): + if any(t.ttype == qmod.TokenType.NEAR_ITEM for t in self.seq): self.penalty += 1.0 return True @@ -368,7 +368,7 @@ def get_assignments(self, query: qmod.QueryStruct) -> Iterator[TokenAssignment]: # Postcode or country-only search if not base.address: - if not base.housenumber and (base.postcode or base.country or base.category): + if not base.housenumber and (base.postcode or base.country or base.near_item): log().comment('postcode/country search') yield dataclasses.replace(base, penalty=self.penalty) else: diff --git a/test/python/api/search/test_api_search_query.py b/test/python/api/search/test_api_search_query.py index 69a17412c..fe850ce90 100644 --- a/test/python/api/search/test_api_search_query.py +++ b/test/python/api/search/test_api_search_query.py @@ -106,11 +106,11 @@ def test_query_struct_amenity_single_word(): q.add_node(query.BreakType.END, query.PhraseType.NONE) q.add_token(query.TokenRange(0, 1), query.TokenType.PARTIAL, mktoken(1)) - q.add_token(query.TokenRange(0, 1), query.TokenType.CATEGORY, mktoken(2)) + q.add_token(query.TokenRange(0, 1), query.TokenType.NEAR_ITEM, mktoken(2)) q.add_token(query.TokenRange(0, 1), query.TokenType.QUALIFIER, mktoken(3)) assert len(q.get_tokens(query.TokenRange(0, 1), query.TokenType.PARTIAL)) == 1 - assert len(q.get_tokens(query.TokenRange(0, 1), query.TokenType.CATEGORY)) == 1 + assert len(q.get_tokens(query.TokenRange(0, 1), query.TokenType.NEAR_ITEM)) == 1 assert len(q.get_tokens(query.TokenRange(0, 1), query.TokenType.QUALIFIER)) == 0 @@ -121,14 +121,14 @@ def test_query_struct_amenity_two_words(): for trange in [(0, 1), (1, 2)]: q.add_token(query.TokenRange(*trange), query.TokenType.PARTIAL, mktoken(1)) - q.add_token(query.TokenRange(*trange), query.TokenType.CATEGORY, mktoken(2)) + q.add_token(query.TokenRange(*trange), query.TokenType.NEAR_ITEM, mktoken(2)) q.add_token(query.TokenRange(*trange), query.TokenType.QUALIFIER, mktoken(3)) assert len(q.get_tokens(query.TokenRange(0, 1), query.TokenType.PARTIAL)) == 1 - assert len(q.get_tokens(query.TokenRange(0, 1), query.TokenType.CATEGORY)) == 0 + assert len(q.get_tokens(query.TokenRange(0, 1), query.TokenType.NEAR_ITEM)) == 0 assert len(q.get_tokens(query.TokenRange(0, 1), query.TokenType.QUALIFIER)) == 1 assert len(q.get_tokens(query.TokenRange(1, 2), query.TokenType.PARTIAL)) == 1 - assert len(q.get_tokens(query.TokenRange(1, 2), query.TokenType.CATEGORY)) == 0 + assert len(q.get_tokens(query.TokenRange(1, 2), query.TokenType.NEAR_ITEM)) == 0 assert len(q.get_tokens(query.TokenRange(1, 2), query.TokenType.QUALIFIER)) == 1 diff --git a/test/python/api/search/test_db_search_builder.py b/test/python/api/search/test_db_search_builder.py index 9d877b1db..e3feff0d3 100644 --- a/test/python/api/search/test_db_search_builder.py +++ b/test/python/api/search/test_db_search_builder.py @@ -147,11 +147,11 @@ def test_postcode_with_address_with_full_word(): @pytest.mark.parametrize('kwargs', [{'viewbox': '0,0,1,1', 'bounded_viewbox': True}, {'near': '10,10'}]) -def test_category_only(kwargs): - q = make_query([(1, TokenType.CATEGORY, [(2, 'foo')])]) +def test_near_item_only(kwargs): + q = make_query([(1, TokenType.NEAR_ITEM, [(2, 'foo')])]) builder = SearchBuilder(q, SearchDetails.from_kwargs(kwargs)) - searches = list(builder.build(TokenAssignment(category=TokenRange(0, 1)))) + searches = list(builder.build(TokenAssignment(near_item=TokenRange(0, 1)))) assert len(searches) == 1 @@ -163,11 +163,11 @@ def test_category_only(kwargs): @pytest.mark.parametrize('kwargs', [{'viewbox': '0,0,1,1'}, {}]) -def test_category_skipped(kwargs): - q = make_query([(1, TokenType.CATEGORY, [(2, 'foo')])]) +def test_near_item_skipped(kwargs): + q = make_query([(1, TokenType.NEAR_ITEM, [(2, 'foo')])]) builder = SearchBuilder(q, SearchDetails.from_kwargs(kwargs)) - searches = list(builder.build(TokenAssignment(category=TokenRange(0, 1)))) + searches = list(builder.build(TokenAssignment(near_item=TokenRange(0, 1)))) assert len(searches) == 0 @@ -284,13 +284,13 @@ def test_name_and_complex_address(): def test_name_only_near_search(): - q = make_query([(1, TokenType.CATEGORY, [(88, 'g')])], + q = make_query([(1, TokenType.NEAR_ITEM, [(88, 'g')])], [(2, TokenType.PARTIAL, [(1, 'a')]), (2, TokenType.WORD, [(100, 'a')])]) builder = SearchBuilder(q, SearchDetails()) searches = list(builder.build(TokenAssignment(name=TokenRange(1, 2), - category=TokenRange(0, 1)))) + near_item=TokenRange(0, 1)))) assert len(searches) == 1 search = searches[0] diff --git a/test/python/api/search/test_icu_query_analyzer.py b/test/python/api/search/test_icu_query_analyzer.py index faf813752..a88ca8b82 100644 --- a/test/python/api/search/test_icu_query_analyzer.py +++ b/test/python/api/search/test_icu_query_analyzer.py @@ -134,7 +134,7 @@ async def test_category_words_only_at_beginning(conn): assert query.num_token_slots() == 3 assert len(query.nodes[0].starting) == 1 - assert query.nodes[0].starting[0].ttype == TokenType.CATEGORY + assert query.nodes[0].starting[0].ttype == TokenType.NEAR_ITEM assert not query.nodes[2].starting @@ -148,9 +148,9 @@ async def test_qualifier_words(conn): query = await ana.analyze_query(make_phrase('foo BAR foo BAR foo')) assert query.num_token_slots() == 5 - assert set(t.ttype for t in query.nodes[0].starting) == {TokenType.CATEGORY, TokenType.QUALIFIER} + assert set(t.ttype for t in query.nodes[0].starting) == {TokenType.NEAR_ITEM, TokenType.QUALIFIER} assert set(t.ttype for t in query.nodes[2].starting) == {TokenType.QUALIFIER} - assert set(t.ttype for t in query.nodes[4].starting) == {TokenType.CATEGORY, TokenType.QUALIFIER} + assert set(t.ttype for t in query.nodes[4].starting) == {TokenType.NEAR_ITEM, TokenType.QUALIFIER} @pytest.mark.asyncio diff --git a/test/python/api/search/test_legacy_query_analyzer.py b/test/python/api/search/test_legacy_query_analyzer.py index cdea6ede7..507afaece 100644 --- a/test/python/api/search/test_legacy_query_analyzer.py +++ b/test/python/api/search/test_legacy_query_analyzer.py @@ -212,7 +212,7 @@ async def test_category_words_only_at_beginning(conn): assert query.num_token_slots() == 3 assert len(query.nodes[0].starting) == 1 - assert query.nodes[0].starting[0].ttype == TokenType.CATEGORY + assert query.nodes[0].starting[0].ttype == TokenType.NEAR_ITEM assert not query.nodes[2].starting @@ -226,9 +226,9 @@ async def test_qualifier_words(conn): query = await ana.analyze_query(make_phrase('foo BAR foo BAR foo')) assert query.num_token_slots() == 5 - assert set(t.ttype for t in query.nodes[0].starting) == {TokenType.CATEGORY, TokenType.QUALIFIER} + assert set(t.ttype for t in query.nodes[0].starting) == {TokenType.NEAR_ITEM, TokenType.QUALIFIER} assert set(t.ttype for t in query.nodes[2].starting) == {TokenType.QUALIFIER} - assert set(t.ttype for t in query.nodes[4].starting) == {TokenType.CATEGORY, TokenType.QUALIFIER} + assert set(t.ttype for t in query.nodes[4].starting) == {TokenType.NEAR_ITEM, TokenType.QUALIFIER} @pytest.mark.asyncio diff --git a/test/python/api/search/test_token_assignment.py b/test/python/api/search/test_token_assignment.py index 6dc25b1e7..2ed55a0f8 100644 --- a/test/python/api/search/test_token_assignment.py +++ b/test/python/api/search/test_token_assignment.py @@ -76,11 +76,11 @@ def test_single_country_name(): def test_single_word_poi_search(): q = make_query((BreakType.START, PhraseType.NONE, - [(1, TokenType.CATEGORY), + [(1, TokenType.NEAR_ITEM), (1, TokenType.QUALIFIER)])) res = list(yield_token_assignments(q)) - assert res == [TokenAssignment(category=TokenRange(0, 1))] + assert res == [TokenAssignment(near_item=TokenRange(0, 1))] @pytest.mark.parametrize('btype', [BreakType.WORD, BreakType.PART, BreakType.TOKEN]) @@ -182,7 +182,7 @@ def test_country_housenumber_postcode(): @pytest.mark.parametrize('ttype', [TokenType.POSTCODE, TokenType.COUNTRY, - TokenType.CATEGORY, TokenType.QUALIFIER]) + TokenType.NEAR_ITEM, TokenType.QUALIFIER]) def test_housenumber_with_only_special_terms(ttype): q = make_query((BreakType.START, PhraseType.NONE, [(1, TokenType.HOUSENUMBER)]), (BreakType.WORD, PhraseType.NONE, [(2, ttype)])) @@ -266,27 +266,27 @@ def test_postcode_with_designation_backwards(): address=[TokenRange(0, 1)])) -def test_category_at_beginning(): - q = make_query((BreakType.START, PhraseType.NONE, [(1, TokenType.CATEGORY)]), +def test_near_item_at_beginning(): + q = make_query((BreakType.START, PhraseType.NONE, [(1, TokenType.NEAR_ITEM)]), (BreakType.WORD, PhraseType.NONE, [(2, TokenType.PARTIAL)])) check_assignments(yield_token_assignments(q), TokenAssignment(penalty=0.1, name=TokenRange(1, 2), - category=TokenRange(0, 1))) + near_item=TokenRange(0, 1))) -def test_category_at_end(): +def test_near_item_at_end(): q = make_query((BreakType.START, PhraseType.NONE, [(1, TokenType.PARTIAL)]), - (BreakType.WORD, PhraseType.NONE, [(2, TokenType.CATEGORY)])) + (BreakType.WORD, PhraseType.NONE, [(2, TokenType.NEAR_ITEM)])) check_assignments(yield_token_assignments(q), TokenAssignment(penalty=0.1, name=TokenRange(0, 1), - category=TokenRange(1, 2))) + near_item=TokenRange(1, 2))) -def test_category_in_middle(): +def test_near_item_in_middle(): q = make_query((BreakType.START, PhraseType.NONE, [(1, TokenType.PARTIAL)]), - (BreakType.WORD, PhraseType.NONE, [(2, TokenType.CATEGORY)]), + (BreakType.WORD, PhraseType.NONE, [(2, TokenType.NEAR_ITEM)]), (BreakType.WORD, PhraseType.NONE, [(3, TokenType.PARTIAL)])) check_assignments(yield_token_assignments(q)) From 25279d009a3d6aa0662a49fbde3d937775e26322 Mon Sep 17 00:00:00 2001 From: Sarah Hoffmann <lonvia@denofr.de> Date: Tue, 28 Nov 2023 16:56:08 +0100 Subject: [PATCH 09/14] add tests for interaction of category parameter with category terms --- nominatim/api/search/db_search_builder.py | 5 ++ .../api/search/test_db_search_builder.py | 58 +++++++++++++++++++ 2 files changed, 63 insertions(+) diff --git a/nominatim/api/search/db_search_builder.py b/nominatim/api/search/db_search_builder.py index a0018480d..f89d8b62e 100644 --- a/nominatim/api/search/db_search_builder.py +++ b/nominatim/api/search/db_search_builder.py @@ -90,6 +90,8 @@ def build(self, assignment: TokenAssignment) -> Iterator[dbs.AbstractSearch]: return near_items = self.get_near_items(assignment) + if near_items is not None and not near_items: + return # impossible compbination of near items and category parameter if assignment.name is None: if near_items and not sdata.postcodes: @@ -348,6 +350,9 @@ def get_near_items(self, assignment: TokenAssignment) -> Optional[dbf.WeightedCa tokens: Dict[Tuple[str, str], float] = {} for t in self.query.get_tokens(assignment.near_item, TokenType.NEAR_ITEM): cat = t.get_category() + # The category of a near search will be that of near_item. + # Thus, if search is restricted to a category parameter, + # the two sets must intersect. if (not self.details.categories or cat in self.details.categories)\ and t.penalty < tokens.get(cat, 1000.0): tokens[cat] = t.penalty diff --git a/test/python/api/search/test_db_search_builder.py b/test/python/api/search/test_db_search_builder.py index e3feff0d3..87d752615 100644 --- a/test/python/api/search/test_db_search_builder.py +++ b/test/python/api/search/test_db_search_builder.py @@ -313,6 +313,64 @@ def test_name_only_search_with_category(): assert search.qualifiers.values == [('foo', 'bar')] +def test_name_with_near_item_search_with_category_mismatch(): + q = make_query([(1, TokenType.NEAR_ITEM, [(88, 'g')])], + [(2, TokenType.PARTIAL, [(1, 'a')]), + (2, TokenType.WORD, [(100, 'a')])]) + builder = SearchBuilder(q, SearchDetails.from_kwargs({'categories': [('foo', 'bar')]})) + + searches = list(builder.build(TokenAssignment(name=TokenRange(1, 2), + near_item=TokenRange(0, 1)))) + + assert len(searches) == 0 + + +def test_name_with_near_item_search_with_category_match(): + q = make_query([(1, TokenType.NEAR_ITEM, [(88, 'g')])], + [(2, TokenType.PARTIAL, [(1, 'a')]), + (2, TokenType.WORD, [(100, 'a')])]) + builder = SearchBuilder(q, SearchDetails.from_kwargs({'categories': [('foo', 'bar'), + ('this', 'that')]})) + + searches = list(builder.build(TokenAssignment(name=TokenRange(1, 2), + near_item=TokenRange(0, 1)))) + + assert len(searches) == 1 + search = searches[0] + + assert isinstance(search, dbs.NearSearch) + assert isinstance(search.search, dbs.PlaceSearch) + + +def test_name_with_qualifier_search_with_category_mismatch(): + q = make_query([(1, TokenType.QUALIFIER, [(88, 'g')])], + [(2, TokenType.PARTIAL, [(1, 'a')]), + (2, TokenType.WORD, [(100, 'a')])]) + builder = SearchBuilder(q, SearchDetails.from_kwargs({'categories': [('foo', 'bar')]})) + + searches = list(builder.build(TokenAssignment(name=TokenRange(1, 2), + qualifier=TokenRange(0, 1)))) + + assert len(searches) == 0 + + +def test_name_with_qualifier_search_with_category_match(): + q = make_query([(1, TokenType.QUALIFIER, [(88, 'g')])], + [(2, TokenType.PARTIAL, [(1, 'a')]), + (2, TokenType.WORD, [(100, 'a')])]) + builder = SearchBuilder(q, SearchDetails.from_kwargs({'categories': [('foo', 'bar'), + ('this', 'that')]})) + + searches = list(builder.build(TokenAssignment(name=TokenRange(1, 2), + qualifier=TokenRange(0, 1)))) + + assert len(searches) == 1 + search = searches[0] + + assert isinstance(search, dbs.PlaceSearch) + assert search.qualifiers.values == [('this', 'that')] + + def test_name_only_search_with_countries(): q = make_query([(1, TokenType.PARTIAL, [(1, 'a')]), (1, TokenType.WORD, [(100, 'a')])]) From b2319e52ff8c995eb3350098811cbabc29b32f51 Mon Sep 17 00:00:00 2001 From: Sarah Hoffmann <lonvia@denofr.de> Date: Tue, 28 Nov 2023 17:53:37 +0100 Subject: [PATCH 10/14] correctly exclude streets with housenumber searches Street result are not subject to the full filtering in the SQL query, so recheck. --- nominatim/api/search/db_searches.py | 13 +++++--- test/python/api/search/test_search_places.py | 31 ++++++++++++++++++++ 2 files changed, 40 insertions(+), 4 deletions(-) diff --git a/nominatim/api/search/db_searches.py b/nominatim/api/search/db_searches.py index ce5fbc634..232f816ef 100644 --- a/nominatim/api/search/db_searches.py +++ b/nominatim/api/search/db_searches.py @@ -766,9 +766,6 @@ async def lookup(self, conn: SearchConnection, assert result result.bbox = Bbox.from_wkb(row.bbox) result.accuracy = row.accuracy - if not details.excluded or not result.place_id in details.excluded: - results.append(result) - if self.housenumbers and row.rank_address < 30: if row.placex_hnr: subs = _get_placex_housenumbers(conn, row.placex_hnr, details) @@ -788,6 +785,14 @@ async def lookup(self, conn: SearchConnection, sub.accuracy += 0.6 results.append(sub) - result.accuracy += 1.0 # penalty for missing housenumber + # Only add the street as a result, if it meets all other + # filter conditions. + if (not details.excluded or result.place_id not in details.excluded)\ + and (not self.qualifiers or result.category in self.qualifiers.values)\ + and result.rank_address >= details.min_rank: + result.accuracy += 1.0 # penalty for missing housenumber + results.append(result) + else: + results.append(result) return results diff --git a/test/python/api/search/test_search_places.py b/test/python/api/search/test_search_places.py index 3853439f2..8a363e977 100644 --- a/test/python/api/search/test_search_places.py +++ b/test/python/api/search/test_search_places.py @@ -281,6 +281,37 @@ def test_lookup_exclude_street_placeid(self, apiobj): assert [r.place_id for r in results] == [2, 92, 2000] + def test_lookup_only_house_qualifier(self, apiobj): + lookup = FieldLookup('name_vector', [1,2], 'lookup_all') + ranking = FieldRanking('name_vector', 0.3, [RankedTokens(0.0, [10])]) + + results = run_search(apiobj, 0.1, [lookup], [ranking], hnrs=['22'], + quals=[('place', 'house')]) + + assert [r.place_id for r in results] == [2, 92] + + + def test_lookup_only_street_qualifier(self, apiobj): + lookup = FieldLookup('name_vector', [1,2], 'lookup_all') + ranking = FieldRanking('name_vector', 0.3, [RankedTokens(0.0, [10])]) + + results = run_search(apiobj, 0.1, [lookup], [ranking], hnrs=['22'], + quals=[('highway', 'residential')]) + + assert [r.place_id for r in results] == [1000, 2000] + + + @pytest.mark.parametrize('rank,found', [(26, True), (27, False), (30, False)]) + def test_lookup_min_rank(self, apiobj, rank, found): + lookup = FieldLookup('name_vector', [1,2], 'lookup_all') + ranking = FieldRanking('name_vector', 0.3, [RankedTokens(0.0, [10])]) + + results = run_search(apiobj, 0.1, [lookup], [ranking], hnrs=['22'], + details=SearchDetails(min_rank=rank)) + + assert [r.place_id for r in results] == ([2, 92, 1000, 2000] if found else [2, 92]) + + @pytest.mark.parametrize('geom', [napi.GeometryFormat.GEOJSON, napi.GeometryFormat.KML, napi.GeometryFormat.SVG, From 32e7b59b1f2ceb78a94616c820bef6d0a9c0fd05 Mon Sep 17 00:00:00 2001 From: Sarah Hoffmann <lonvia@denofr.de> Date: Tue, 28 Nov 2023 20:12:12 +0100 Subject: [PATCH 11/14] NearSearch needs to inherit penalty from inner search --- nominatim/api/search/db_search_builder.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/nominatim/api/search/db_search_builder.py b/nominatim/api/search/db_search_builder.py index f89d8b62e..6d94feaba 100644 --- a/nominatim/api/search/db_search_builder.py +++ b/nominatim/api/search/db_search_builder.py @@ -113,7 +113,10 @@ def build(self, assignment: TokenAssignment) -> Iterator[dbs.AbstractSearch]: penalty = min(near_items.penalties) near_items.penalties = [p - penalty for p in near_items.penalties] for search in builder: - yield dbs.NearSearch(penalty + assignment.penalty, near_items, search) + search_penalty = search.penalty + search.penalty = 0.0 + yield dbs.NearSearch(penalty + assignment.penalty + search_penalty, + near_items, search) else: for search in builder: search.penalty += assignment.penalty From 0c72a434e033c54729294129dd4e37527db19faf Mon Sep 17 00:00:00 2001 From: Sarah Hoffmann <lonvia@denofr.de> Date: Sat, 25 Nov 2023 18:39:28 +0100 Subject: [PATCH 12/14] use restrict for housenumber lookups with few numbers --- nominatim/api/search/db_search_builder.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/nominatim/api/search/db_search_builder.py b/nominatim/api/search/db_search_builder.py index 6d94feaba..39c25f6b3 100644 --- a/nominatim/api/search/db_search_builder.py +++ b/nominatim/api/search/db_search_builder.py @@ -163,11 +163,15 @@ def build_housenumber_search(self, sdata: dbf.SearchData, hnrs: List[Token], housenumber is the main name token. """ sdata.lookups = [dbf.FieldLookup('name_vector', [t.token for t in hnrs], 'lookup_any')] + expected_count = sum(t.count for t in hnrs) partials = [t for trange in address for t in self.query.get_partials_list(trange)] - if len(partials) != 1 or partials[0].count < 10000: + if expected_count < 8000: + sdata.lookups.append(dbf.FieldLookup('nameaddress_vector', + [t.token for t in partials], 'restrict')) + elif len(partials) != 1 or partials[0].count < 10000: sdata.lookups.append(dbf.FieldLookup('nameaddress_vector', [t.token for t in partials], 'lookup_all')) else: @@ -178,7 +182,7 @@ def build_housenumber_search(self, sdata: dbf.SearchData, hnrs: List[Token], 'lookup_any')) sdata.housenumbers = dbf.WeightedStrings([], []) - yield dbs.PlaceSearch(0.05, sdata, sum(t.count for t in hnrs)) + yield dbs.PlaceSearch(0.05, sdata, expected_count) def build_name_search(self, sdata: dbf.SearchData, From 3c7a28dab0e7fa9b1d9eb4a5f9bdddfb879d0384 Mon Sep 17 00:00:00 2001 From: Sarah Hoffmann <lonvia@denofr.de> Date: Wed, 29 Nov 2023 11:19:06 +0100 Subject: [PATCH 13/14] further restrict stop search criterion --- nominatim/api/search/geocoder.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/nominatim/api/search/geocoder.py b/nominatim/api/search/geocoder.py index 91c45b65a..bb3c6a1c8 100644 --- a/nominatim/api/search/geocoder.py +++ b/nominatim/api/search/geocoder.py @@ -79,7 +79,7 @@ async def execute_searches(self, query: QueryStruct, end_time = dt.datetime.now() + self.timeout - min_ranking = 1000.0 + min_ranking = searches[0].penalty + 2.0 prev_penalty = 0.0 for i, search in enumerate(searches): if search.penalty > prev_penalty and (search.penalty > min_ranking or i > 20): @@ -94,7 +94,7 @@ async def execute_searches(self, query: QueryStruct, prevresult.accuracy = min(prevresult.accuracy, result.accuracy) else: results[rhash] = result - min_ranking = min(min_ranking, result.ranking + 0.5, search.penalty + 0.3) + min_ranking = min(min_ranking, result.accuracy * 1.2) log().result_dump('Results', ((r.accuracy, r) for r in lookup_results)) prev_penalty = search.penalty if dt.datetime.now() >= end_time: From 8a2c6067a2fc4fae579a5a9b5747b6985ca09b87 Mon Sep 17 00:00:00 2001 From: Sarah Hoffmann <lonvia@denofr.de> Date: Fri, 1 Dec 2023 12:11:58 +0100 Subject: [PATCH 14/14] skip lookup with full names when there are none --- nominatim/api/search/db_search_builder.py | 21 +++++++++++---------- 1 file changed, 11 insertions(+), 10 deletions(-) diff --git a/nominatim/api/search/db_search_builder.py b/nominatim/api/search/db_search_builder.py index 39c25f6b3..c755f2a74 100644 --- a/nominatim/api/search/db_search_builder.py +++ b/nominatim/api/search/db_search_builder.py @@ -223,16 +223,17 @@ def yield_lookups(self, name: TokenRange, address: List[TokenRange])\ # Partial term to frequent. Try looking up by rare full names first. name_fulls = self.query.get_tokens(name, TokenType.WORD) - fulls_count = sum(t.count for t in name_fulls) - # At this point drop unindexed partials from the address. - # This might yield wrong results, nothing we can do about that. - if not partials_indexed: - addr_tokens = [t.token for t in addr_partials if t.is_indexed] - penalty += 1.2 * sum(t.penalty for t in addr_partials if not t.is_indexed) - # Any of the full names applies with all of the partials from the address - yield penalty, fulls_count / (2**len(addr_partials)),\ - dbf.lookup_by_any_name([t.token for t in name_fulls], addr_tokens, - 'restrict' if fulls_count < 10000 else 'lookup_all') + if name_fulls: + fulls_count = sum(t.count for t in name_fulls) + # At this point drop unindexed partials from the address. + # This might yield wrong results, nothing we can do about that. + if not partials_indexed: + addr_tokens = [t.token for t in addr_partials if t.is_indexed] + penalty += 1.2 * sum(t.penalty for t in addr_partials if not t.is_indexed) + # Any of the full names applies with all of the partials from the address + yield penalty, fulls_count / (2**len(addr_partials)),\ + dbf.lookup_by_any_name([t.token for t in name_fulls], addr_tokens, + 'restrict' if fulls_count < 10000 else 'lookup_all') # To catch remaining results, lookup by name and address # We only do this if there is a reasonable number of results expected.