Skip to content

Commit

Permalink
Merge pull request #27 from allegro/MINOTAUR-1124
Browse files Browse the repository at this point in the history
MINOTAUR-1124 | Lower minimal required version of python to 3.8
  • Loading branch information
megatron6000 authored Jun 26, 2024
2 parents 765e242 + f8bdebc commit 25a1cc8
Show file tree
Hide file tree
Showing 5 changed files with 74 additions and 55 deletions.
4 changes: 2 additions & 2 deletions allms/models/__init__.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
from typing import Type
from typing import Dict, Type

from allms.domain.enumerables import AvailableModels
from allms.models.abstract import AbstractModel
Expand All @@ -20,7 +20,7 @@
]


def get_available_models() -> dict[str, Type[AbstractModel]]:
def get_available_models() -> Dict[str, Type[AbstractModel]]:
return {
AvailableModels.AZURE_OPENAI_MODEL: AzureOpenAIModel,
AvailableModels.AZURE_LLAMA2_MODEL: AzureLlama2Model,
Expand Down
4 changes: 2 additions & 2 deletions allms/models/abstract.py
Original file line number Diff line number Diff line change
Expand Up @@ -167,7 +167,7 @@ async def _build_chat_prompts(
self,
prompt_template_args: dict,
system_prompt: SystemMessagePromptTemplate
) -> list[SystemMessagePromptTemplate | HumanMessagePromptTemplate]:
) -> typing.List[typing.Union[SystemMessagePromptTemplate, HumanMessagePromptTemplate]]:
human_message = HumanMessagePromptTemplate(prompt=PromptTemplate(**prompt_template_args))
if not system_prompt:
return [human_message]
Expand Down Expand Up @@ -330,7 +330,7 @@ def _validate_system_prompt(self, system_prompt: typing.Optional[str] = None) ->
raise ValueError(input_exception_message.get_system_prompt_contains_input_variables())

@staticmethod
def _extract_input_variables_from_prompt(prompt: str) -> set[str]:
def _extract_input_variables_from_prompt(prompt: str) -> typing.Set[str]:
input_variables_pattern = r'(?<!\{)\{([^{}]+)\}(?!\})'
input_variables_set = set(re.findall(input_variables_pattern, prompt))
return input_variables_set
Expand Down
102 changes: 60 additions & 42 deletions poetry.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

4 changes: 2 additions & 2 deletions pyproject.toml
Original file line number Diff line number Diff line change
@@ -1,13 +1,13 @@
[tool.poetry]
name = "allms"
version = "1.0.4"
version = "1.0.5"
description = ""
authors = ["Allegro Opensource <[email protected]>"]
readme = "README.md"
packages = [{include = "allms"}]

[tool.poetry.dependencies]
python = "^3.10"
python = ">=3.8.1,<4.0"
fsspec = "^2023.6.0"
google-cloud-aiplatform = "1.38.0"
pydash = "^7.0.6"
Expand Down
15 changes: 8 additions & 7 deletions tests/conftest.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
import asyncio
import typing
from contextlib import ExitStack
from dataclasses import dataclass
from unittest.mock import patch

Expand Down Expand Up @@ -36,13 +37,13 @@ def __init__(self, *args, **kwargs):
def models():
event_loop = asyncio.new_event_loop()

with (
patch("allms.models.vertexai_palm.CustomVertexAI", ModelWithoutAsyncRequestsMock),
patch("allms.models.vertexai_gemini.CustomVertexAI", ModelWithoutAsyncRequestsMock),
patch("allms.models.vertexai_gemma.VertexAIModelGardenWrapper", ModelWithoutAsyncRequestsMock),
patch("allms.models.azure_llama2.AzureMLOnlineEndpointAsync", ModelWithoutAsyncRequestsMock),
patch("allms.models.azure_mistral.AzureMLOnlineEndpointAsync", ModelWithoutAsyncRequestsMock)
):
with ExitStack() as stack:
stack.enter_context(patch("allms.models.vertexai_palm.CustomVertexAI", ModelWithoutAsyncRequestsMock))
stack.enter_context(patch("allms.models.vertexai_gemini.CustomVertexAI", ModelWithoutAsyncRequestsMock))
stack.enter_context(patch("allms.models.vertexai_gemma.VertexAIModelGardenWrapper", ModelWithoutAsyncRequestsMock))
stack.enter_context(patch("allms.models.azure_llama2.AzureMLOnlineEndpointAsync", ModelWithoutAsyncRequestsMock))
stack.enter_context(patch("allms.models.azure_mistral.AzureMLOnlineEndpointAsync", ModelWithoutAsyncRequestsMock))

return {
"azure_open_ai": AzureOpenAIModel(
config=AzureOpenAIConfiguration(
Expand Down

0 comments on commit 25a1cc8

Please sign in to comment.