diff --git a/interpreter/core/llm/llm.py b/interpreter/core/llm/llm.py index 980672db58..4b01fc14d9 100644 --- a/interpreter/core/llm/llm.py +++ b/interpreter/core/llm/llm.py @@ -26,15 +26,13 @@ # Create or get the logger logger = logging.getLogger("LiteLLM") - class SuppressDebugFilter(logging.Filter): def filter(self, record): # Suppress only the specific message containing the keywords - if "cost map" in record.getMessage(): + if "cost map" in record.getMessage() or "vision support" in record.getMessage(): return False # Suppress this log message return True # Allow all other messages - class Llm: """ A stateless LMC-style LLM with some helpful properties. diff --git a/interpreter/terminal_interface/local_setup.py b/interpreter/terminal_interface/local_setup.py index 95ee192baa..7ba0063722 100644 --- a/interpreter/terminal_interface/local_setup.py +++ b/interpreter/terminal_interface/local_setup.py @@ -303,6 +303,7 @@ def download_model(models_dir, models, interpreter): # Set the model to the selected model interpreter.llm.model = f"ollama/{model}" + interpreter.llm.supports_functions = False # Send a ping, which will actually load the model @@ -373,6 +374,7 @@ def download_model(models_dir, models, interpreter): interpreter.llm.model = jan_model_name interpreter.llm.api_key = "dummy" + interpreter.llm.supports_functions = False interpreter.display_message(f"\nUsing Jan model: `{jan_model_name}` \n") # time.sleep(1)