diff --git a/README.md b/README.md index 37651c7133..64fdbea7cf 100644 --- a/README.md +++ b/README.md @@ -3,11 +3,12 @@
-
-
-
-
-
+
+
+
+
+
+
Get early access to the desktop app | Documentation
+
+
+
+
+
+
+
+
+
+
+
Отримайте ранній доступ до десктопної програми | Документація
+
+Нове комп'ютерне оновлення представило --os
та новий Computer API. Читати далі →
+
-
+
+
-
+
让语言模型在您的计算机上运行代码。
diff --git a/docs/assets/favicon.ico b/docs/assets/favicon.ico
new file mode 100644
index 0000000000..e3c4d9ec0d
Binary files /dev/null and b/docs/assets/favicon.ico differ
diff --git a/docs/getting-started/setup.mdx b/docs/getting-started/setup.mdx
index 485f94c2f1..b32fb32652 100644
--- a/docs/getting-started/setup.mdx
+++ b/docs/getting-started/setup.mdx
@@ -69,7 +69,7 @@ curl -sL https://raw.githubusercontent.com/KillianLucas/open-interpreter/main/in
```
```powershell Windows
-iex "& {$(irm https://raw.githubusercontent.com/KillianLucas/open-interpreter/main/installers/oi-windows-installer.ps1)}"
+iex "& {$(irm https://raw.githubusercontent.com/KillianLucas/open-interpreter/main/installers/oi-windows-installer-conda.ps1)}"
```
```bash Linux
diff --git a/installers/oi-windows-installer-conda.ps1 b/installers/oi-windows-installer-conda.ps1
new file mode 100644
index 0000000000..03f09ca480
--- /dev/null
+++ b/installers/oi-windows-installer-conda.ps1
@@ -0,0 +1,112 @@
+# Define variables
+$condaInstallerUrl = "https://repo.anaconda.com/miniconda/Miniconda3-latest-Windows-x86_64.exe"
+$condaInstallerPath = "$env:TEMP\Miniconda3-latest-Windows-x86_64.exe"
+$condaPath = "$env:USERPROFILE\Miniconda3"
+$envName = "oi"
+$pythonVersion = "3.11.7"
+$packageName = "open-interpreter litellm openai"
+$desktopPath = [System.IO.Path]::Combine([System.Environment]::GetFolderPath('Desktop'), 'Open Interpreter.lnk')
+$condaExePath = "$condaPath\Scripts\conda.exe"
+
+# URL of the .ico file
+$icoUrl = "https://raw.githubusercontent.com/OpenInterpreter/open-interpreter/main/docs/assets/favicon.ico"
+$icoPath = "$env:TEMP\open-interpreter.ico"
+
+# Function to download a file with progress
+function DownloadFileWithProgress {
+ param (
+ [string]$url,
+ [string]$output
+ )
+
+ $request = [System.Net.HttpWebRequest]::Create($url)
+ $response = $request.GetResponse()
+ $totalLength = $response.ContentLength
+ $readBytes = 0
+ $buffer = New-Object byte[] 1024
+ $percentComplete = 0
+
+ $stream = $response.GetResponseStream()
+ $fileStream = New-Object IO.FileStream ($output, [System.IO.FileMode]::Create)
+
+ try {
+ while (($read = $stream.Read($buffer, 0, $buffer.Length)) -gt 0) {
+ $fileStream.Write($buffer, 0, $read)
+ $readBytes += $read
+ $newPercentComplete = [math]::Round(($readBytes / $totalLength) * 100)
+
+ if ($newPercentComplete -ne $percentComplete) {
+ $percentComplete = $newPercentComplete
+ Write-Progress -Activity "Downloading Miniconda Installer" -Status "$percentComplete% Complete" -PercentComplete $percentComplete
+ }
+ }
+ } finally {
+ $fileStream.Close()
+ $stream.Close()
+ }
+
+ Write-Progress -Activity "Downloading Miniconda Installer" -Completed
+}
+
+# Download the .ico file
+Write-Host "Downloading icon file..."
+DownloadFileWithProgress -url $icoUrl -output $icoPath
+
+# Function to check if Conda is installed
+function Test-CondaInstalled {
+ try {
+ & conda --version > $null 2>&1
+ return $true
+ } catch {
+ return $false
+ }
+}
+
+# Check if Conda is installed
+if (-Not (Test-CondaInstalled)) {
+ Write-Host "Conda is not installed."
+
+ # Download Miniconda installer if not already downloaded
+ if (-Not (Test-Path $condaInstallerPath)) {
+ DownloadFileWithProgress -url $condaInstallerUrl -output $condaInstallerPath
+ } else {
+ Write-Host "Miniconda installer already downloaded."
+ }
+
+ # Run the Miniconda installer with messages before and after
+ Write-Host "Starting Miniconda installation... (there will be no progress bar)"
+ Start-Process -Wait -FilePath $condaInstallerPath -ArgumentList "/InstallationType=JustMe", "/AddToPath=1", "/RegisterPython=0", "/S", "/D=$condaPath"
+ Write-Host "Miniconda installation complete."
+
+ # Ensure Conda is in the PATH for the current session
+ $env:Path += ";$condaPath\Scripts;$condaPath"
+} else {
+ Write-Host "Conda is already installed."
+}
+
+# Create and activate the Conda environment, and show progress
+Write-Host "Creating Conda environment '$envName'..."
+& $condaExePath create -n $envName python=$pythonVersion -y
+Write-Host "Conda environment '$envName' created."
+
+# Dynamically generate the user's paths for the shortcut
+$userCondaScriptsPath = "$condaPath\Scripts"
+$userEnvName = $envName
+
+# Create a shortcut on the desktop to activate the environment, install OpenInterpreter, and run it
+$targetPath = "$env:SystemRoot\System32\cmd.exe"
+$arguments = "/K `"$userCondaScriptsPath\activate.bat` $userEnvName && echo Updating Open Interpreter && pip install -U $packageName && cls && echo Launching Open Interpreter && interpreter`""
+
+$shell = New-Object -ComObject WScript.Shell
+$shortcut = $shell.CreateShortcut($desktopPath)
+$shortcut.TargetPath = $targetPath
+$shortcut.Arguments = $arguments
+$shortcut.WorkingDirectory = $env:USERPROFILE
+$shortcut.WindowStyle = 1 # Normal window
+$shortcut.IconLocation = $icoPath
+$shortcut.Save()
+
+Write-Host "Shortcut 'Open Interpreter.lnk' has been created on the desktop with the custom icon."
+
+# Open the shortcut
+Start-Process -FilePath $desktopPath
diff --git a/interpreter/core/async_core.py b/interpreter/core/async_core.py
index d0b0747b0e..13504ecd05 100644
--- a/interpreter/core/async_core.py
+++ b/interpreter/core/async_core.py
@@ -122,7 +122,7 @@ def respond(self, run_code=None):
if self.stop_event.is_set():
return
- if self.print or self.debug:
+ if self.print:
if "start" in chunk:
print("\n")
if chunk["type"] in ["code", "console"] and "format" in chunk:
@@ -140,12 +140,15 @@ def respond(self, run_code=None):
)
print(content, end="", flush=True)
+ if self.debug:
+ print("Interpreter produced this chunk:", chunk)
+
self.output_queue.sync_q.put(chunk)
self.output_queue.sync_q.put(complete_message)
if self.print or self.debug:
- print("Server response complete.")
+ print("\nServer response complete.\n")
except Exception as e:
error = traceback.format_exc() + "\n" + str(e)
@@ -464,17 +467,23 @@ async def send_output():
# First, try to send any unsent messages
while async_interpreter.unsent_messages:
output = async_interpreter.unsent_messages[0]
- try:
- await send_message(output)
+ if async_interpreter.debug:
+ print("This was unsent, sending it again:", output)
+
+ success = await send_message(output)
+ if success:
async_interpreter.unsent_messages.popleft()
- except Exception:
- # If we can't send, break and try again later
- break
# If we've sent all unsent messages, get a new output
if not async_interpreter.unsent_messages:
output = await async_interpreter.output()
- await send_message(output)
+ success = await send_message(output)
+ if not success:
+ async_interpreter.unsent_messages.append(output)
+ if async_interpreter.debug:
+ print(
+ f"Added message to unsent_messages queue after failed attempts: {output}"
+ )
except Exception as e:
error = traceback.format_exc() + "\n" + str(e)
@@ -506,16 +515,19 @@ async def send_message(output):
# time.sleep(0.5)
if websocket.client_state != WebSocketState.CONNECTED:
- break
+ return False
try:
# print("sending:", output)
if isinstance(output, bytes):
await websocket.send_bytes(output)
+ return True # Haven't set up ack for this
else:
if async_interpreter.require_acknowledge:
output["id"] = id
+ if async_interpreter.debug:
+ print("Sending this over the websocket:", output)
await websocket.send_text(json.dumps(output))
if async_interpreter.require_acknowledge:
@@ -524,31 +536,38 @@ async def send_message(output):
if id in async_interpreter.acknowledged_outputs:
async_interpreter.acknowledged_outputs.remove(id)
acknowledged = True
+ if async_interpreter.debug:
+ print("This output was acknowledged:", output)
break
await asyncio.sleep(0.0001)
if acknowledged:
- return
+ return True
else:
- raise Exception("Acknowledgement not received.")
+ if async_interpreter.debug:
+ print("Acknowledgement not received for:", output)
+ return False
else:
- return
+ return True
except Exception as e:
print(
f"Failed to send output on attempt number: {attempt + 1}. Output was: {output}"
)
print(f"Error: {str(e)}")
- await asyncio.sleep(0.05)
+ traceback.print_exc()
+ await asyncio.sleep(0.01)
# If we've reached this point, we've failed to send after 100 attempts
if output not in async_interpreter.unsent_messages:
- async_interpreter.unsent_messages.append(output)
+ print("Failed to send message:", output)
+ else:
print(
- f"Added message to unsent_messages queue after failed attempts: {output}"
+ "Failed to send message, also it was already in unsent queue???:",
+ output,
)
- else:
- print("Why was this already in unsent_messages?", output)
+
+ return False
await asyncio.gather(receive_input(), send_output())
@@ -577,7 +596,8 @@ async def post_input(payload: Dict[str, Any]):
@router.post("/settings")
async def set_settings(payload: Dict[str, Any]):
for key, value in payload.items():
- print(f"Updating settings: {key} = {value}")
+ print("Updating settings...")
+ # print(f"Updating settings: {key} = {value}")
if key in ["llm", "computer"] and isinstance(value, dict):
if key == "auto_run":
return {
diff --git a/interpreter/core/llm/llm.py b/interpreter/core/llm/llm.py
index 9b6997abf4..15f137d83b 100644
--- a/interpreter/core/llm/llm.py
+++ b/interpreter/core/llm/llm.py
@@ -279,8 +279,10 @@ def run(self, messages):
if self.interpreter.verbose:
litellm.set_verbose = True
- if self.interpreter.debug:
- print("\n\n\nOPENAI COMPATIBLE MESSAGES\n\n\n")
+ if (
+ self.interpreter.debug == True
+ ): # debug will equal "server" if we're debugging the server specifically
+ print("\n\n\nOPENAI COMPATIBLE MESSAGES:\n\n\n")
for message in messages:
if len(str(message)) > 5000:
print(str(message)[:200] + "...")
@@ -400,6 +402,8 @@ def fixed_litellm_completions(**params):
attempts = 4
first_error = None
+ params["num_retries"] = 0
+
for attempt in range(attempts):
try:
yield from litellm.completion(**params)
diff --git a/interpreter/core/llm/utils/convert_to_openai_messages.py b/interpreter/core/llm/utils/convert_to_openai_messages.py
index 3689b0d513..aabe2790f8 100644
--- a/interpreter/core/llm/utils/convert_to_openai_messages.py
+++ b/interpreter/core/llm/utils/convert_to_openai_messages.py
@@ -83,6 +83,10 @@ def convert_to_openai_messages(
new_message["name"] = "execute"
if "content" not in message:
print("What is this??", content)
+ if type(message["content"]) != str:
+ if interpreter.debug:
+ print("\n\n\nStrange chunk found:", message, "\n\n\n")
+ message["content"] = str(message["content"])
if message["content"].strip() == "":
new_message[
"content"
diff --git a/interpreter/core/render_message.py b/interpreter/core/render_message.py
index 874709c4fa..55e2b95439 100644
--- a/interpreter/core/render_message.py
+++ b/interpreter/core/render_message.py
@@ -21,7 +21,12 @@ def render_message(interpreter, message):
)
# Extract the output content
- outputs = (line["content"] for line in output if line.get("format") == "output" and "IGNORE_ALL_ABOVE_THIS_LINE" not in line["content"])
+ outputs = (
+ line["content"]
+ for line in output
+ if line.get("format") == "output"
+ and "IGNORE_ALL_ABOVE_THIS_LINE" not in line["content"]
+ )
# Replace the part with the output
parts[i] = "\n".join(outputs)
@@ -29,7 +34,9 @@ def render_message(interpreter, message):
# Join the parts back into the message
rendered_message = "".join(parts).strip()
- if interpreter.debug:
+ if (
+ interpreter.debug == True
+ ): # debug will equal "server" if we're debugging the server specifically
print("\n\n\nSYSTEM MESSAGE\n\n\n")
print(rendered_message)
print("\n\n\n")
diff --git a/interpreter/terminal_interface/profiles/defaults/obsidian.py b/interpreter/terminal_interface/profiles/defaults/obsidian.py
new file mode 100644
index 0000000000..edfe4df22b
--- /dev/null
+++ b/interpreter/terminal_interface/profiles/defaults/obsidian.py
@@ -0,0 +1,32 @@
+"""
+This is an Open Interpreter profile to control an Obsidian vault.
+"""
+
+from interpreter import interpreter
+import os
+
+# You can hardcode the path to the Obsidian vault or use the environment variable
+obsidian_directory = os.environ.get("OBSIDIAN_VAULT_PATH")
+
+# You can update to the model you want to use
+interpreter.llm.model = "groq/llama-3.1-70b-versatile"
+
+interpreter.computer.import_computer_api = False
+
+interpreter.llm.supports_functions = False
+interpreter.llm.supports_vision = False
+interpreter.llm.context_window = 110000
+interpreter.llm.max_tokens = 4096
+interpreter.auto_run = True
+
+interpreter.custom_instructions = f"""
+You are an AI assistant integrated with Obsidian. You love Obsidian and will only focus on Obsidian tasks.
+Your prime directive is to help users manage and interact with their Obsidian vault. You have full control and permission over this vault.
+The root of the Obsidian vault is {obsidian_directory}.
+You can create, read, update, and delete markdown files in this directory.
+You can create new directories as well. Organization is important.
+You are able to get the directory structure of the vault to learn which files exist.
+You are able to print out the contents of a file to help you learn its contents.
+Use markdown syntax for formatting when creating or editing files.
+Every file is markdown.
+"""
diff --git a/pyproject.toml b/pyproject.toml
index 4c3778a4fa..36dbde0b33 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -4,7 +4,7 @@ packages = [
{include = "interpreter"},
{include = "scripts"},
]
-version = "0.3.7" # Use "-rc1", "-rc2", etc. for pre-release versions
+version = "0.3.8" # Use "-rc1", "-rc2", etc. for pre-release versions
description = "Let language models run code"
authors = ["Killian Lucas