From f7c7a6f5827a0da89834f7ea5a1ed4e5fdfb158e Mon Sep 17 00:00:00 2001 From: diogoazevedo15 Date: Mon, 2 Sep 2024 14:10:23 +0100 Subject: [PATCH] Removed Logging --- examples/04_batching.ipynb | 182 ++++++++----------------------------- llmstudio/__init__.py | 2 +- llmstudio/llm/__init__.py | 74 +++------------ 3 files changed, 51 insertions(+), 207 deletions(-) diff --git a/examples/04_batching.ipynb b/examples/04_batching.ipynb index faaff1df..3bc3137a 100644 --- a/examples/04_batching.ipynb +++ b/examples/04_batching.ipynb @@ -11,7 +11,16 @@ "cell_type": "code", "execution_count": 1, "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Running LLMstudio Engine on http://localhost:50001 Running LLMstudio Tracking on http://localhost:50002 \n", + "\n" + ] + } + ], "source": [ "from llmstudio import LLM\n", "import nest_asyncio\n", @@ -29,16 +38,7 @@ "cell_type": "code", "execution_count": 2, "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Running LLMstudio Engine on http://localhost:56766 \n", - "Running LLMstudio Tracking on http://localhost:56767 \n" - ] - } - ], + "outputs": [], "source": [ "# llm = LLM(\"openai/gpt-3.5-turbo\")\n", "llm = LLM(\"vertexai/gemini-1.5-flash\")" @@ -53,7 +53,7 @@ }, { "cell_type": "code", - "execution_count": 3, + "execution_count": 4, "metadata": {}, "outputs": [], "source": [ @@ -90,21 +90,14 @@ }, { "cell_type": "code", - "execution_count": 4, + "execution_count": 7, "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ - "Getting chat responses: 0%| | 0/20 [00:00 List[str]: semaphore = DynamicSemaphore(coroutines, len(inputs), max_tokens=max_tokens) - if verbose > 0: - responses = await asyncio.gather( - *[ - self.chat_coroutine( - input=input, - semaphore=semaphore, - retries=retries, - error_threshold=error_threshold, - increment=increment, - verbose=verbose, - ) - for input in inputs - ], - ) - return responses - else: - responses = await tqdm_asyncio.gather( - *[ - self.chat_coroutine( - input=input, - semaphore=semaphore, - retries=retries, - error_threshold=error_threshold, - increment=increment, - verbose=verbose, - ) - for input in inputs - ], - desc="Getting chat responses: ", - ) - return responses + responses = await tqdm_asyncio.gather( + *[ + self.chat_coroutine( + input=input, + semaphore=semaphore, + retries=retries, + error_threshold=error_threshold, + increment=increment, + ) + for input in inputs + ], + desc="Getting chat responses: ", + ) + return responses def batch_chat( self, @@ -192,7 +148,6 @@ def batch_chat( error_threshold: int = 5, increment: int = 5, max_tokens=None, - verbose=0, ) -> List[str]: if coroutines > len(inputs): @@ -208,7 +163,6 @@ def batch_chat( error_threshold, increment, max_tokens, - verbose, ) ) return responses