Skip to content

Commit

Permalink
Rename package/project to langsfer
Browse files Browse the repository at this point in the history
  • Loading branch information
AnesBenmerzoug committed Oct 29, 2024
1 parent c5eed69 commit a0e0e21
Show file tree
Hide file tree
Showing 16 changed files with 35 additions and 38 deletions.
2 changes: 1 addition & 1 deletion .github/workflows/main.yml
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ env:
GITHUB_BOT_EMAIL: 41898282+github-actions[bot]@users.noreply.github.com
PY_COLORS: 1
PYTHON_VERSION: '3.11'
PACKAGE_NAME: 'language-transfer'
PACKAGE_NAME: 'langsfer'


defaults:
Expand Down
14 changes: 7 additions & 7 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -19,14 +19,14 @@ The implemented methods are:
To install the latest stable version from PyPI use:

```shell
pip install language-transfer
pip install langsfer
```

To install the latest development version from the repository use:

```shell
git clone [email protected]:AnesBenmerzoug/language-transfer.git
cd language-transfer
git clone [email protected]:AnesBenmerzoug/langsfer.git
cd langsfer
pip install .
```

Expand All @@ -38,10 +38,10 @@ without worrying too much about the package's internals.
For example, to instantiate the WECHSEL method, you would use:

```python
from language_transfer.high_level import wechsel
from language_transfer.initialization import WeightedAverageEmbeddingsInitialization
from language_transfer.embeddings import TransformersEmbeddings, FastTextEmbeddings
from language_transfer.utils import download_file
from langsfer.high_level import wechsel
from langsfer.initialization import WeightedAverageEmbeddingsInitialization
from langsfer.embeddings import TransformersEmbeddings, FastTextEmbeddings
from langsfer.utils import download_file
from transformers import AutoTokenizer

source_embeddings = TransformersEmbeddings.from_model_name_or_path("roberta-base")
Expand Down
19 changes: 8 additions & 11 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -3,13 +3,13 @@ requires = ["poetry-core"]
build-backend = "poetry.core.masonry.api"

[tool.poetry]
name = "language-transfer"
name = "langsfer"
version = "0.0.1-dev0"
description = ""
authors = ["Anes Benmerzoug <[email protected]>"]
license = "LGPL-2.1-only"
readme = "README.md"
repository = "https://github.com/AnesBenmerzoug/language-transfer"
repository = "https://github.com/AnesBenmerzoug/langsfer"
keywords = [""]
classifiers = [
"Development Status :: 4 - Beta",
Expand All @@ -24,7 +24,7 @@ classifiers = [
"Intended Audience :: Science/Research",
]
packages = [
{include = "language_transfer", from = "src"}
{include = "langsfer", from = "src"}
]

[tool.bumpversion]
Expand All @@ -48,7 +48,7 @@ serialize = [
"{major}.{minor}.{patch}",
]
files = [
{ filename = "src/language_transfer/__init__.py", search = '__version__ = "{current_version}"', replace = '__version__ = "{new_version}"' },
{ filename = "src/langsfer/__init__.py", search = '__version__ = "{current_version}"', replace = '__version__ = "{new_version}"' },
{ filename = "pyproject.toml", search = 'version = "{current_version}"', replace = 'version = "{new_version}"' }
]

Expand Down Expand Up @@ -77,7 +77,7 @@ hypothesis = {extras = ["numpy"], version = "^6.115.0"}
huggingface-hub = {extras = ["cli"], version = "^0.25.2"}

[tool.pytest.ini_options]
addopts = "-vv --failed-first --durations=10 --profile-svg --cov=language-transfer --cov-report=term-missing --cov-report=xml"
addopts = "-vv --failed-first --durations=10 --profile-svg --cov=langsfer --cov-report=term-missing --cov-report=xml"
testpaths = [
"tests"
]
Expand All @@ -88,16 +88,13 @@ log_cli = true
log_level = "INFO"

[tool.coverage.run]
source_pkgs = ["language_transfer", "tests"]
source_pkgs = ["langsfer", "tests"]
branch = true
parallel = true
omit = [
"src/language_transfer/__about__.py",
]

[tool.coverage.paths]
language_transfer = ["src/language_transfer", "*/language-transfer/src/language_transfer"]
tests = ["tests", "*/language-transfer/tests"]
langsfer = ["src/langsfer", "*/langsfer/src/langsfer"]
tests = ["tests", "*/langsfer/tests"]

[tool.coverage.report]
exclude_lines = [
Expand Down
File renamed without changes.
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@
from numpy.typing import NDArray
from scipy.linalg import orthogonal_procrustes

from language_transfer.embeddings import FastTextEmbeddings
from langsfer.embeddings import FastTextEmbeddings

__all__ = ["IdentityAlignment", "BilingualDictionaryAlignment"]

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,6 @@

HOME_DIR = Path.home()
CACHE_DIR = HOME_DIR / ".cache"
LANGUAGE_TRANSFER_CACHE_DIR = CACHE_DIR / "language_transfer"
MODEL_CACHE_DIR = LANGUAGE_TRANSFER_CACHE_DIR / "models"
LANGSFER_CACHE_DIR = CACHE_DIR / "langsfer"
MODEL_CACHE_DIR = LANGSFER_CACHE_DIR / "models"
MODEL_CACHE_DIR.mkdir(parents=True, exist_ok=True)
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@
from transformers import AutoModel, AutoTokenizer, PreTrainedTokenizer, PreTrainedModel
from gensim.models.fasttext import FastText, load_facebook_model

from language_transfer.constants import MODEL_CACHE_DIR
from langsfer.constants import MODEL_CACHE_DIR

__all__ = ["FastTextEmbeddings", "TransformersEmbeddings"]

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -8,17 +8,17 @@

from transformers import PreTrainedTokenizerBase

from language_transfer.initialization import WeightedAverageEmbeddingsInitialization
from language_transfer.alignment import BilingualDictionaryAlignment, IdentityAlignment
from language_transfer.embeddings import TransformersEmbeddings, FastTextEmbeddings
from language_transfer.similarity import CosineSimilarity
from language_transfer.weight import (
from langsfer.initialization import WeightedAverageEmbeddingsInitialization
from langsfer.alignment import BilingualDictionaryAlignment, IdentityAlignment
from langsfer.embeddings import TransformersEmbeddings, FastTextEmbeddings
from langsfer.similarity import CosineSimilarity
from langsfer.weight import (
IdentityWeights,
SoftmaxWeights,
TopKWeights,
SparsemaxWeights,
)
from language_transfer.token_overlap import (
from langsfer.token_overlap import (
SpecialTokenOverlap,
ExactMatchTokenOverlap,
FuzzyMatchTokenOverlap,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -7,11 +7,11 @@
from transformers import PreTrainedTokenizerBase
from tqdm.auto import tqdm

from language_transfer.alignment import AlignmentStrategy, IdentityAlignment
from language_transfer.embeddings import TransformersEmbeddings, FastTextEmbeddings
from language_transfer.similarity import SimilarityStrategy, CosineSimilarity
from language_transfer.weight import WeightsStrategy, IdentityWeights
from language_transfer.token_overlap import TokenOverlapStrategy, NoTokenOverlap
from langsfer.alignment import AlignmentStrategy, IdentityAlignment
from langsfer.embeddings import TransformersEmbeddings, FastTextEmbeddings
from langsfer.similarity import SimilarityStrategy, CosineSimilarity
from langsfer.weight import WeightsStrategy, IdentityWeights
from langsfer.token_overlap import TokenOverlapStrategy, NoTokenOverlap

__all__ = ["EmbeddingInitializer"]

Expand Down
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
4 changes: 2 additions & 2 deletions src/language_transfer/weight.py → src/langsfer/weight.py
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,7 @@ class SparsemaxWeights(WeightsStrategy):
The original code is license under the [MIT license.](https://github.com/AndreasMadsen/course-02456-sparsemax/blob/cd73efc1267b5c3b319fb3dc77774c99c10d5d82/LICENSE.md)
Examples:
>>> from language_transfer.weight import SparsemaxWeights
>>> from langsfer.weight import SparsemaxWeights
>>> import numpy as np
>>> weights_strategy = SparsemaxWeights()
>>> scores = np.array([[0.0, 1.0, 2.0], [10, 20, 30]])
Expand Down Expand Up @@ -100,7 +100,7 @@ class TopKWeights(WeightsStrategy):
The original code is licensed under [CC BY-SA 4.0](https://creativecommons.org/licenses/by-sa/4.0/)
Examples:
>>> from language_transfer.weights import TopKWeights
>>> from langsfer.weights import TopKWeights
>>> import numpy as np
>>> weight_strategy = TopKWeights(k=1)
>>> weight_strategy.apply(np.array([[3, 1, 10]])).tolist()
Expand Down
2 changes: 1 addition & 1 deletion tests/test_weight.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
from hypothesis.extra import numpy as numpy_st
from numpy.typing import NDArray

from language_transfer.weight import (
from langsfer.weight import (
IdentityWeights,
SoftmaxWeights,
SparsemaxWeights,
Expand Down
2 changes: 1 addition & 1 deletion tests/token_overlap/test_token_overlap.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
import pytest
from transformers import PreTrainedTokenizerBase, AutoTokenizer

from language_transfer.token_overlap import (
from langsfer.token_overlap import (
NoTokenOverlap,
SpecialTokenOverlap,
ExactMatchTokenOverlap,
Expand Down

0 comments on commit a0e0e21

Please sign in to comment.