Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[ku] extract gloss and example lists #1003

Merged
merged 2 commits into from
Jan 23, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 0 additions & 1 deletion src/wiktextract/data/fr/config.json
Original file line number Diff line number Diff line change
@@ -1,4 +1,3 @@
{
"extract_thesaurus_pages": false,
"save_ns_names": ["Main", "Template", "Module", "Conjugaison"]
}
3 changes: 3 additions & 0 deletions src/wiktextract/data/ku/config.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
{
"save_ns_names": ["Main", "Template", "Module", "Tewandin"]
}
46 changes: 46 additions & 0 deletions src/wiktextract/extractor/ku/example.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,46 @@
from wikitextprocessor import TemplateNode, WikiNode

from ...page import clean_node
from ...wxr_context import WiktextractContext
from .models import Example, Sense, WordEntry


def extract_example_list_item(
wxr: WiktextractContext,
word_entry: WordEntry,
sense: Sense,
list_item: WikiNode,
) -> None:
for node in list_item.children:
if isinstance(node, TemplateNode):
if node.template_name == "jêder" or node.template_name.startswith(
"jêder-"
):
extract_jêder_template(wxr, sense, node)


def extract_jêder_template(
wxr: WiktextractContext,
sense: Sense,
t_node: TemplateNode,
) -> None:
# https://ku.wiktionary.org/wiki/Şablon:jêder
expanded_node = wxr.wtp.parse(
wxr.wtp.node_to_wikitext(t_node), expand_all=True
)
e_data = Example(
text=clean_node(
wxr, None, t_node.template_parameters.get("jêgirtin", "")
),
roman=clean_node(wxr, None, t_node.template_parameters.get("tr", "")),
translation=clean_node(
wxr, None, t_node.template_parameters.get("werger", "")
),
)
for span_tag in expanded_node.find_html(
"span", attr_name="class", attr_value="jeder"
):
e_data.ref = clean_node(wxr, None, span_tag).strip("— ()")
if e_data.text != "":
sense.examples.append(e_data)
clean_node(wxr, sense, expanded_node)
45 changes: 45 additions & 0 deletions src/wiktextract/extractor/ku/models.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,45 @@
from pydantic import BaseModel, ConfigDict, Field


class KurdishBaseModel(BaseModel):
model_config = ConfigDict(
extra="forbid",
strict=True,
validate_assignment=True,
validate_default=True,
)


class Example(KurdishBaseModel):
text: str
translation: str = ""
roman: str = Field(
default="", description="Romanization of the example sentence"
)
ref: str = Field(
default="",
description="Source of the sentence, like book title and page number",
)
tags: list[str] = []
raw_tags: list[str] = []


class Sense(KurdishBaseModel):
glosses: list[str] = []
tags: list[str] = []
raw_tags: list[str] = []
categories: list[str] = []
examples: list[Example] = []


class WordEntry(KurdishBaseModel):
model_config = ConfigDict(title="Kurdish Wiktionary")
word: str = Field(description="Word string")
lang_code: str = Field(description="Wiktionary language code")
lang: str = Field(description="Localized language name")
pos: str = Field(description="Part of speech type")
pos_title: str = ""
senses: list[Sense] = []
categories: list[str] = []
tags: list[str] = []
raw_tags: list[str] = []
60 changes: 60 additions & 0 deletions src/wiktextract/extractor/ku/page.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,60 @@
from typing import Any

from wikitextprocessor.parser import LEVEL_KIND_FLAGS, LevelNode, NodeKind

from ...page import clean_node
from ...wxr_context import WiktextractContext
from .models import Sense, WordEntry
from .pos import extract_pos_section
from .section_titles import POS_DATA


def parse_section(
wxr: WiktextractContext,
page_data: list[WordEntry],
base_data: WordEntry,
level_node: LevelNode,
) -> None:
title_text = clean_node(wxr, None, level_node.largs)
wxr.wtp.start_subsection(title_text)
if title_text in POS_DATA:
extract_pos_section(wxr, page_data, base_data, level_node, title_text)

for next_level in level_node.find_child(LEVEL_KIND_FLAGS):
parse_section(wxr, page_data, base_data, next_level)


def parse_page(
wxr: WiktextractContext, page_title: str, page_text: str
) -> list[dict[str, Any]]:
# page layout
# https://ku.wiktionary.org/wiki/Wîkîferheng:Normalkirina_gotaran
# https://ku.wiktionary.org/wiki/Alîkarî:Formata_nivîsînê
wxr.wtp.start_page(page_title)
tree = wxr.wtp.parse(page_text, pre_expand=True)
page_data: list[WordEntry] = []
for level2_node in tree.find_child(NodeKind.LEVEL2):
cats = {}
lang_name = clean_node(wxr, cats, level2_node.largs)
lang_code = "unknown"
for t_node in level2_node.find_content(NodeKind.TEMPLATE):
new_lang_code = clean_node(
wxr, None, t_node.template_parameters.get(1, "")
)
if new_lang_code != "":
lang_code = new_lang_code
wxr.wtp.start_section(lang_name)
base_data = WordEntry(
word=wxr.wtp.title,
lang_code=lang_code,
lang=lang_name,
pos="unknown",
categories=cats.get("categories", []),
)
for next_level_node in level2_node.find_child(LEVEL_KIND_FLAGS):
parse_section(wxr, page_data, base_data, next_level_node)

for data in page_data:
if len(data.senses) == 0:
data.senses.append(Sense(tags=["no-gloss"]))
return [m.model_dump(exclude_defaults=True) for m in page_data]
75 changes: 75 additions & 0 deletions src/wiktextract/extractor/ku/pos.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,75 @@
import re

from wikitextprocessor import (
LevelNode,
NodeKind,
TemplateNode,
WikiNode,
)

from ...page import clean_node
from ...wxr_context import WiktextractContext
from .example import extract_example_list_item
from .models import Sense, WordEntry
from .section_titles import POS_DATA


def extract_pos_section(
wxr: WiktextractContext,
page_data: list[WordEntry],
base_data: WordEntry,
level_node: LevelNode,
pos_title: str,
) -> None:
page_data.append(base_data.model_copy(deep=True))
page_data[-1].pos_title = pos_title
pos_data = POS_DATA[pos_title]
page_data[-1].pos = pos_data["pos"]
page_data[-1].tags.extend(pos_data.get("tags", []))

for index, list_node in level_node.find_child(NodeKind.LIST, True):
for list_item in list_node.find_child(NodeKind.LIST_ITEM):
if list_node.sarg.startswith("#") and list_node.sarg.endswith("#"):
extract_gloss_list_item(wxr, page_data[-1], list_item)


def extract_gloss_list_item(
wxr: WiktextractContext,
word_entry: WordEntry,
list_item: WikiNode,
) -> None:
sense = Sense()
gloss_nodes = []
for node in list_item.children:
if isinstance(node, TemplateNode) and node.template_name in [
"f",
"ferhengok",
]:
extract_ferhengok_template(wxr, sense, node)
elif not (isinstance(node, WikiNode) and node.kind == NodeKind.LIST):
gloss_nodes.append(node)

for child_list in list_item.find_child(NodeKind.LIST):
if child_list.sarg.startswith("#") and child_list.sarg.endswith(
(":", "*")
):
for e_list_item in child_list.find_child(NodeKind.LIST_ITEM):
extract_example_list_item(wxr, word_entry, sense, e_list_item)

gloss_str = clean_node(wxr, sense, gloss_nodes)
if gloss_str != "":
sense.glosses.append(gloss_str)
word_entry.senses.append(sense)


def extract_ferhengok_template(
wxr: WiktextractContext,
sense: Sense,
t_node: TemplateNode,
) -> None:
# https://ku.wiktionary.org/wiki/Şablon:ferhengok
node_str = clean_node(wxr, sense, t_node).strip("() ")
for raw_tag in re.split(r",| an | û ", node_str):
raw_tag = raw_tag.strip()
if raw_tag != "":
sense.raw_tags.append(raw_tag)
34 changes: 34 additions & 0 deletions src/wiktextract/extractor/ku/section_titles.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,34 @@
# https://ku.wiktionary.org/wiki/Alîkarî:Cureyên_peyvan
POS_DATA = {
"Navdêr": {"pos": "noun"},
"Serenav": {"pos": "name"},
"Lêker": {"pos": "verb"},
"Rengdêr": {"pos": "adj"},
"Hoker": {"pos": "adv"},
"Cînav": {"pos": "pron"},
"Baneşan": {"pos": "intj"},
"Daçek": {"pos": "prep"},
"Pêşdaçek": {"pos": "prep"},
"Paşdaçek": {"pos": "postp"},
"Bazinedaçek": {"pos": "circumpos"},
"Girêdek": {"pos": "conj"},
"Artîkel": {"pos": "article"},
"Pirtik": {"pos": "article"},
"Navgir": {"pos": "infix", "tags": ["morpheme"]},
"Paşgir": {"pos": "suffix", "tags": ["morpheme"]},
"Pêşgir": {"pos": "prefix", "tags": ["morpheme"]},
"Reh": {"pos": "root", "tags": ["morpheme"]},
"Rehekî lêkerê": {"pos": "root", "tags": ["morpheme"]},
"Biwêj": {"pos": "phrase", "tags": ["idiomatic"]},
"Hevok": {"pos": "phrase"},
"Gotineke pêşiyan": {"pos": "proverb"},
"Hejmar": {"pos": "num", "tags": ["number"]},
"Tîp": {"pos": "character", "tags": ["letter"]},
"Sembol": {"pos": "symbol"},
"Kurtenav": {"pos": "abbrev", "tags": ["abbreviation"]},
"Formeke navdêrê": {"pos": "noun", "tags": ["form-of"]},
"Formeke lêkerê": {"pos": "verb", "tags": ["form-of"]},
"Formeke rengdêrê": {"pos": "adj", "tags": ["form-of"]},
"Formeke cînavê": {"pos": "pron", "tags": ["form-of"]},
"Formeke hokerê": {"pos": "adv", "tags": ["form-of"]},
}
57 changes: 57 additions & 0 deletions tests/test_ku_example.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,57 @@
from unittest import TestCase

from wikitextprocessor import Wtp

from wiktextract.config import WiktionaryConfig
from wiktextract.extractor.ku.page import parse_page
from wiktextract.wxr_context import WiktextractContext


class TestKuExample(TestCase):
maxDiff = None

def setUp(self) -> None:
self.wxr = WiktextractContext(
Wtp(lang_code="ku"),
WiktionaryConfig(
dump_file_lang_code="ku", capture_language_codes=None
),
)

def tearDown(self):
self.wxr.wtp.close_db_conn()

def test_jêder_example(self):
self.wxr.wtp.add_page("Şablon:ziman", 10, "Kurmancî")
self.wxr.wtp.add_page(
"Şablon:jêder",
10,
"""<i><span class="Latn" lang="ku">'''Kûçikên''' li hewşa pêş û paşî, ji hîva rûnixumandî tirsnaktir û ji ecacokê hartir, gez dikin bê.</span></i><span class="jeder">&nbsp;—&nbsp;(<span class="j-pewist">[[w:Îrfan Amîda|Îrfan Amîda]]</span>,&nbsp;<i><span class="j-pewist">Şevek Şîzofren</span></i>,&nbsp;[[w:Weşanên Lîs|Weşanên Lîs]],&nbsp;<span class="j-pewist">2018</span>,&nbsp;r. 6,&nbsp;ISBN 9786058152175[[Kategorî:Jêgirtinên kitêban bi kurmancî]][[Kategorî:Jêgirtinên ji Îrfan Amîda]])</span>[[Kategorî:Jêgirtin bi kurmancî]]<span class="example"><bdi lang="ku"></bdi></span>""",
)
page_data = parse_page(
self.wxr,
"kûçik",
"""== {{ziman|ku}} ==
=== Navdêr ===
# [[heywan|Heywanek]]
#* {{jêder|ku|jêgirtin='''Kûçikên''' li hewşa pêş û paşî, ji hîva rûnixumandî tirsnaktir û ji ecacokê hartir, gez dikin bê.|{{Jêgirtin/Îrfan Amîda/Şevek Şîzofren|r=6}}}}""",
)
self.assertEqual(
page_data[0]["senses"],
[
{
"categories": [
"Jêgirtinên kitêban bi kurmancî",
"Jêgirtinên ji Îrfan Amîda",
"Jêgirtin bi kurmancî",
],
"glosses": ["Heywanek"],
"examples": [
{
"text": "Kûçikên li hewşa pêş û paşî, ji hîva rûnixumandî tirsnaktir û ji ecacokê hartir, gez dikin bê.",
"ref": "Îrfan Amîda, Şevek Şîzofren, Weşanên Lîs, 2018, r. 6, ISBN 9786058152175",
}
],
}
],
)
59 changes: 59 additions & 0 deletions tests/test_ku_gloss.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,59 @@
from unittest import TestCase

from wikitextprocessor import Wtp

from wiktextract.config import WiktionaryConfig
from wiktextract.extractor.ku.page import parse_page
from wiktextract.wxr_context import WiktextractContext


class TestKuGloss(TestCase):
maxDiff = None

def setUp(self) -> None:
self.wxr = WiktextractContext(
Wtp(lang_code="ku"),
WiktionaryConfig(
dump_file_lang_code="ku", capture_language_codes=None
),
)

def tearDown(self):
self.wxr.wtp.close_db_conn()

def test_f_template(self):
self.wxr.wtp.add_page(
"Şablon:ziman",
10,
"""<span class="sectionlangue" id="ku">[[kurmancî|Kurmancî]][[Kategorî:Kurmancî]]</span>""",
)
self.wxr.wtp.add_page(
"Şablon:f",
10,
"""<i><span class="ib-brac">(</span><span class="ib-content">[[guhandar]][[Category:Guhandar bi kurmancî|A]]</span><span class="ib-brac">)</span></i>""",
)
page_data = parse_page(
self.wxr,
"kûçik",
"""== {{ziman|ku}} ==
=== Navdêr ===
# {{f|ku|guhandar}} [[heywan|Heywanek]]""",
)
self.assertEqual(
page_data[0],
{
"categories": ["Kurmancî"],
"lang": "Kurmancî",
"lang_code": "ku",
"word": "kûçik",
"pos": "noun",
"pos_title": "Navdêr",
"senses": [
{
"categories": ["Guhandar bi kurmancî"],
"glosses": ["Heywanek"],
"raw_tags": ["guhandar"],
}
],
},
)
Loading