Skip to content

Commit

Permalink
Merge pull request #37 from lindera/package_name
Browse files Browse the repository at this point in the history
Fix package name
  • Loading branch information
mosuka authored Dec 6, 2024
2 parents f66cfdb + c9af26e commit ec0ef8a
Show file tree
Hide file tree
Showing 11 changed files with 13 additions and 64 deletions.
2 changes: 1 addition & 1 deletion Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

5 changes: 3 additions & 2 deletions Cargo.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[package]
name = "lindera-py"
version = "0.38.2"
version = "0.38.3"
edition = "2021"
description = "Python binding for Lindera."
documentation = "https://docs.rs/lindera-py"
Expand All @@ -12,7 +12,8 @@ categories = ["text-processing"]
license = "MIT"

[lib]
name = "lindera"
name = "lindera_py"
path = "src/lib.rs"
crate-type = ["cdylib"]

[features]
Expand Down
2 changes: 0 additions & 2 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -26,8 +26,6 @@ lint:
poetry run isort --check-only --diff ./examples ./tests
poetry run black --check ./examples ./tests
poetry run flake8 ./examples ./tests

typecheck:
poetry run mypy ./examples ./tests

develop:
Expand Down
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ This command takes a long time because it builds a library that includes all the
## Example code

```python
from lindera import Segmenter, Tokenizer, load_dictionary
from lindera_py import Segmenter, Tokenizer, load_dictionary


def main():
Expand Down
2 changes: 1 addition & 1 deletion examples/tokenize.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
from lindera import Segmenter, Tokenizer, load_dictionary
from lindera_py import Segmenter, Tokenizer, load_dictionary


def main():
Expand Down
2 changes: 1 addition & 1 deletion examples/tokenize_with_decompose.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
from lindera import Segmenter, Tokenizer, load_dictionary
from lindera_py import Segmenter, Tokenizer, load_dictionary


def main():
Expand Down
2 changes: 1 addition & 1 deletion examples/tokenize_with_filters.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
from lindera import Segmenter, Tokenizer, load_dictionary
from lindera_py import Segmenter, Tokenizer, load_dictionary


def main():
Expand Down
2 changes: 1 addition & 1 deletion examples/tokenize_with_userdict.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
from pathlib import Path

from lindera import Segmenter, Tokenizer, load_dictionary, load_user_dictionary
from lindera_py import Segmenter, Tokenizer, load_dictionary, load_user_dictionary

project_root = Path(__file__).resolve().parent.parent

Expand Down
4 changes: 2 additions & 2 deletions pyproject.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[tool.poetry]
name = "lindera"
version = "0.38.2"
name = "lindera_py"
version = "0.38.3"
description = ""
authors = ["Minoru Osuka <[email protected]>"]
license = "MIT"
Expand Down
52 changes: 1 addition & 51 deletions src/lib.rs
Original file line number Diff line number Diff line change
@@ -1,74 +1,24 @@
// pub mod character_filter;
pub mod dictionary;
pub mod segmenter;
pub mod token;
// pub mod token_filter;
pub mod tokenizer;
pub mod util;

use pyo3::prelude::*;

// use crate::character_filter::japanese_iteration_mark::PyJapaneseIterationMarkCharacterFilter;
// use crate::character_filter::mapping::PyMappingCharacterFilter;
// use crate::character_filter::regex::PyRegexCharacterFilter;
// use crate::character_filter::unicode_normalize::PyUnicodeNormalizeCharacterFilter;
// use crate::character_filter::PyCharacterFilter;
use crate::dictionary::{load_dictionary, load_user_dictionary, PyDictionary, PyUserDictionary};
use crate::segmenter::PySegmenter;
use crate::token::PyToken;
// use crate::token_filter::japanese_base_form::PyJapaneseBaseFormTokenFilter;
// use crate::token_filter::japanese_compound_word::PyJapaneseCompoundWordTokenFilter;
// use crate::token_filter::japanese_kana::PyJapaneseKanaTokenFilter;
// use crate::token_filter::japanese_katakana_stem::PyJapaneseKatakanaStemTokenFilter;
// use crate::token_filter::japanese_keep_tags::PyJapaneseKeepTagsTokenFilter;
// use crate::token_filter::japanese_number::PyJapaneseNumberTokenFilter;
// use crate::token_filter::japanese_reading_form::PyJapaneseReadingFormTokenFilter;
// use crate::token_filter::japanese_stop_tags::PyJapaneseStopTagsTokenFilter;
// use crate::token_filter::keep_words::PyKeepWordsTokenFilter;
// use crate::token_filter::korean_keep_tags::PyKoreanKeepTagsTokenFilter;
// use crate::token_filter::korean_reading_form::PyKoreanReadingFormTokenFilter;
// use crate::token_filter::korean_stop_tags::PyKoreanStopTagsTokenFilter;
// use crate::token_filter::length::PyLengthTokenFilter;
// use crate::token_filter::lowercase::PyLowercaseTokenFilter;
// use crate::token_filter::mapping::PyMappingTokenFilter;
// use crate::token_filter::remove_diacritical_mark::PyRemoveDiacriticalMarkTokenFilter;
// use crate::token_filter::stop_words::PyStopWordsTokenFilter;
// use crate::token_filter::uppercase::PyUppercaseTokenFilter;
// use crate::token_filter::PyTokenFilter;
use crate::tokenizer::{PyTokenizer, PyTokenizerBuilder};

#[pymodule]
fn lindera(module: &Bound<'_, PyModule>) -> PyResult<()> {
fn lindera_py(module: &Bound<'_, PyModule>) -> PyResult<()> {
module.add_class::<PyToken>()?;
module.add_class::<PyDictionary>()?;
module.add_class::<PyUserDictionary>()?;
module.add_class::<PyTokenizerBuilder>()?;
module.add_class::<PyTokenizer>()?;
module.add_class::<PySegmenter>()?;
// module.add_class::<PyCharacterFilter>()?;
// module.add_class::<PyTokenFilter>()?;
// module.add_class::<PyJapaneseIterationMarkCharacterFilter>()?;
// module.add_class::<PyMappingCharacterFilter>()?;
// module.add_class::<PyRegexCharacterFilter>()?;
// module.add_class::<PyUnicodeNormalizeCharacterFilter>()?;
// module.add_class::<PyJapaneseBaseFormTokenFilter>()?;
// module.add_class::<PyJapaneseCompoundWordTokenFilter>()?;
// module.add_class::<PyJapaneseKanaTokenFilter>()?;
// module.add_class::<PyJapaneseKatakanaStemTokenFilter>()?;
// module.add_class::<PyJapaneseKeepTagsTokenFilter>()?;
// module.add_class::<PyJapaneseNumberTokenFilter>()?;
// module.add_class::<PyJapaneseReadingFormTokenFilter>()?;
// module.add_class::<PyJapaneseStopTagsTokenFilter>()?;
// module.add_class::<PyKeepWordsTokenFilter>()?;
// module.add_class::<PyKoreanKeepTagsTokenFilter>()?;
// module.add_class::<PyKoreanReadingFormTokenFilter>()?;
// module.add_class::<PyKoreanStopTagsTokenFilter>()?;
// module.add_class::<PyLengthTokenFilter>()?;
// module.add_class::<PyLowercaseTokenFilter>()?;
// module.add_class::<PyMappingTokenFilter>()?;
// module.add_class::<PyRemoveDiacriticalMarkTokenFilter>()?;
// module.add_class::<PyStopWordsTokenFilter>()?;
// module.add_class::<PyUppercaseTokenFilter>()?;

module.add_function(wrap_pyfunction!(load_dictionary, module)?)?;
module.add_function(wrap_pyfunction!(load_user_dictionary, module)?)?;
Expand Down
2 changes: 1 addition & 1 deletion tests/test_tokenize_ipadic.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
from lindera import Segmenter, Tokenizer, load_dictionary
from lindera_py import Segmenter, Tokenizer, load_dictionary


def test_tokenize_with_ipadic():
Expand Down

0 comments on commit ec0ef8a

Please sign in to comment.