Skip to content

Commit

Permalink
rearranged properties for readability
Browse files Browse the repository at this point in the history
  • Loading branch information
w4ffl35 committed Oct 14, 2024
1 parent 934a328 commit b57265f
Showing 1 changed file with 72 additions and 72 deletions.
144 changes: 72 additions & 72 deletions src/airunner/handlers/llm/agent/base_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -136,6 +136,78 @@ def bot_mood(self, value: str):
def bot_personality(self) -> str:
return self.chatbot.bot_personality

@property
def override_parameters(self):
generate_kwargs = prepare_llm_generate_kwargs(self.llm_generator_settings)
return generate_kwargs if self.llm_generator_settings.override_parameters else {}

@property
def system_instructions(self):
return self.chatbot.system_instructions

@property
def generator_settings(self) -> dict:
return prepare_llm_generate_kwargs(self.chatbot)

@property
def device(self):
return get_torch_device(self.memory_settings.default_gpu_llm)

@property
def target_files(self):
return [
target_file.file_path for target_file in self.chatbot.target_files
]

@property
def query_instruction(self):
if self.__state == AgentState.SEARCH:
return self.__query_instruction
elif self.__state == AgentState.CHAT:
return "Search through the chat history for anything relevant to the query."

@property
def text_instruction(self):
if self.__state == AgentState.SEARCH:
return self.__text_instruction
elif self.__state == AgentState.CHAT:
return "Use the text to respond to the user"

@property
def index(self):
if self.__state == AgentState.SEARCH:
return self.__index
elif self.__state == AgentState.CHAT:
return self.__chat_history_index

@property
def llm(self):
if self.__llm is None:
try:
if self.llm_generator_settings.use_api:
self.__llm = self.__model
else:
self.__llm = HuggingFaceLLM(model=self.__model, tokenizer=self.__tokenizer)
except Exception as e:
self.logger.error(f"Error loading LLM: {str(e)}")
return self.__llm

@property
def chat_engine(self):
return self.__chat_engine

@property
def is_llama_instruct(self):
return True

@property
def use_cuda(self):
return torch.cuda.is_available()

@property
def cuda_index(self):
return 0

def unload(self):
self.unload_rag()
del self.model
Expand Down Expand Up @@ -175,14 +247,6 @@ def do_interrupt_process(self):
interrupt = self.do_interrupt
return interrupt

@property
def use_cuda(self):
return torch.cuda.is_available()

@property
def cuda_index(self):
return 0

def mood(self, botname: str, bot_mood: str, use_mood: bool) -> str:
return (
f"{botname} is not a computer or a bot, you are a human with emotions. "
Expand Down Expand Up @@ -424,70 +488,6 @@ def get_rendered_template(
rendered_template = rendered_template.replace("{{ " + key + " }}", value)
return rendered_template

@property
def override_parameters(self):
generate_kwargs = prepare_llm_generate_kwargs(self.llm_generator_settings)
return generate_kwargs if self.llm_generator_settings.override_parameters else {}

@property
def system_instructions(self):
return self.chatbot.system_instructions

@property
def generator_settings(self) -> dict:
return prepare_llm_generate_kwargs(self.chatbot)

@property
def device(self):
return get_torch_device(self.memory_settings.default_gpu_llm)

@property
def target_files(self):
return [
target_file.file_path for target_file in self.chatbot.target_files
]

@property
def query_instruction(self):
if self.__state == AgentState.SEARCH:
return self.__query_instruction
elif self.__state == AgentState.CHAT:
return "Search through the chat history for anything relevant to the query."

@property
def text_instruction(self):
if self.__state == AgentState.SEARCH:
return self.__text_instruction
elif self.__state == AgentState.CHAT:
return "Use the text to respond to the user"

@property
def index(self):
if self.__state == AgentState.SEARCH:
return self.__index
elif self.__state == AgentState.CHAT:
return self.__chat_history_index

@property
def llm(self):
if self.__llm is None:
try:
if self.llm_generator_settings.use_api:
self.__llm = self.__model
else:
self.__llm = HuggingFaceLLM(model=self.__model, tokenizer=self.__tokenizer)
except Exception as e:
self.logger.error(f"Error loading LLM: {str(e)}")
return self.__llm

@property
def chat_engine(self):
return self.__chat_engine

@property
def is_llama_instruct(self):
return True

def run(
self,
prompt: str,
Expand Down

0 comments on commit b57265f

Please sign in to comment.