Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add Ollama support #465

Open
wants to merge 5 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 4 additions & 6 deletions spacy_llm/models/langchain/model.py
Original file line number Diff line number Diff line change
Expand Up @@ -172,12 +172,10 @@ def register_models() -> None:


@registry.llm_queries("spacy.CallLangChain.v1")
def query_langchain() -> (
Callable[
["langchain_community.llms.BaseLLM", Iterable[Iterable[Any]]],
Iterable[Iterable[Any]],
]
):
def query_langchain() -> Callable[
["langchain_community.llms.BaseLLM", Iterable[Iterable[Any]]],
Iterable[Iterable[Any]],
]:
"""Returns query Callable for LangChain.
RETURNS (Callable[["langchain_community.llms.BaseLLM", Iterable[Iterable[Any]]], Iterable[Iterable[Any]]]): Callable
executing simple prompts on the specified LangChain model.
Expand Down
3 changes: 2 additions & 1 deletion spacy_llm/models/rest/__init__.py
Original file line number Diff line number Diff line change
@@ -1,10 +1,11 @@
from . import anthropic, azure, base, cohere, noop, openai
from . import anthropic, azure, base, cohere, noop, openai, ollama

__all__ = [
"anthropic",
"azure",
"base",
"cohere",
"ollama",
"openai",
"noop",
]
170 changes: 170 additions & 0 deletions spacy_llm/models/rest/ollama/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,170 @@
from .model import Endpoints, Ollama
from .registry import (
ollama_llama3,
ollama_phi3,
ollama_wizardlm2,
ollama_mistral,
ollama_gemma,
ollama_mixtral,
ollama_llama2,
ollama_codegemma,
ollama_command_r,
ollama_command_r_plus,
ollama_llava,
ollama_dbrx,
ollama_codellama,
ollama_qwen,
ollama_dolphin_mixtral,
ollama_llama2_uncensored,
ollama_mistral_openorca,
ollama_deepseek_coder,
ollama_phi,
ollama_dolphin_mistral,
ollama_nomic_embed_text,
ollama_nous_hermes2,
ollama_orca_mini,
ollama_llama2_chinese,
ollama_zephyr,
ollama_wizard_vicuna_uncensored,
ollama_openhermes,
ollama_vicuna,
ollama_tinyllama,
ollama_tinydolphin,
ollama_openchat,
ollama_starcoder2,
ollama_wizardcoder,
ollama_stable_code,
ollama_starcoder,
ollama_neural_chat,
ollama_yi,
ollama_phind_codellama,
ollama_starling_lm,
ollama_wizard_math,
ollama_falcon,
ollama_dolphin_phi,
ollama_orca2,
ollama_dolphincoder,
ollama_mxbai_embed_large,
ollama_nous_hermes,
ollama_solar,
ollama_bakllava,
ollama_sqlcoder,
ollama_medllama2,
ollama_nous_hermes2_mixtral,
ollama_wizardlm_uncensored,
ollama_dolphin_llama3,
ollama_codeup,
ollama_stablelm2,
ollama_everythinglm,
ollama_all_minilm,
ollama_samantha_mistral,
ollama_yarn_mistral,
ollama_stable_beluga,
ollama_meditron,
ollama_yarn_llama2,
ollama_deepseek_llm,
ollama_llama_pro,
ollama_magicoder,
ollama_stablelm_zephyr,
ollama_codebooga,
ollama_codeqwen,
ollama_mistrallite,
ollama_wizard_vicuna,
ollama_nexusraven,
ollama_xwinlm,
ollama_goliath,
ollama_open_orca_platypus2,
ollama_wizardlm,
ollama_notux,
ollama_megadolphin,
ollama_duckdb_nsql,
ollama_alfred,
ollama_notus,
ollama_snowflake_arctic_embed,
)

__all__ = [
"Ollama",
"Endpoints",
"ollama_llama3",
"ollama_phi3",
"ollama_wizardlm2",
"ollama_mistral",
"ollama_gemma",
"ollama_mixtral",
"ollama_llama2",
"ollama_codegemma",
"ollama_command_r",
"ollama_command_r_plus",
"ollama_llava",
"ollama_dbrx",
"ollama_codellama",
"ollama_qwen",
"ollama_dolphin_mixtral",
"ollama_llama2_uncensored",
"ollama_mistral_openorca",
"ollama_deepseek_coder",
"ollama_phi",
"ollama_dolphin_mistral",
"ollama_nomic_embed_text",
"ollama_nous_hermes2",
"ollama_orca_mini",
"ollama_llama2_chinese",
"ollama_zephyr",
"ollama_wizard_vicuna_uncensored",
"ollama_openhermes",
"ollama_vicuna",
"ollama_tinyllama",
"ollama_tinydolphin",
"ollama_openchat",
"ollama_starcoder2",
"ollama_wizardcoder",
"ollama_stable_code",
"ollama_starcoder",
"ollama_neural_chat",
"ollama_yi",
"ollama_phind_codellama",
"ollama_starling_lm",
"ollama_wizard_math",
"ollama_falcon",
"ollama_dolphin_phi",
"ollama_orca2",
"ollama_dolphincoder",
"ollama_mxbai_embed_large",
"ollama_nous_hermes",
"ollama_solar",
"ollama_bakllava",
"ollama_sqlcoder",
"ollama_medllama2",
"ollama_nous_hermes2_mixtral",
"ollama_wizardlm_uncensored",
"ollama_dolphin_llama3",
"ollama_codeup",
"ollama_stablelm2",
"ollama_everythinglm",
"ollama_all_minilm",
"ollama_samantha_mistral",
"ollama_yarn_mistral",
"ollama_stable_beluga",
"ollama_meditron",
"ollama_yarn_llama2",
"ollama_deepseek_llm",
"ollama_llama_pro",
"ollama_magicoder",
"ollama_stablelm_zephyr",
"ollama_codebooga",
"ollama_codeqwen",
"ollama_mistrallite",
"ollama_wizard_vicuna",
"ollama_nexusraven",
"ollama_xwinlm",
"ollama_goliath",
"ollama_open_orca_platypus2",
"ollama_wizardlm",
"ollama_notux",
"ollama_megadolphin",
"ollama_duckdb_nsql",
"ollama_alfred",
"ollama_notus",
"ollama_snowflake_arctic_embed",
]
170 changes: 170 additions & 0 deletions spacy_llm/models/rest/ollama/model.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,170 @@
from enum import Enum
from typing import Any, Dict, Iterable, List, Sized

import requests # type: ignore[import]
from requests import HTTPError

from ..base import REST


class Endpoints(str, Enum):
GENERATE = "http://localhost:11434/api/generate"
EMBEDDINGS = "http://localhost:11434/api/embeddings"
TAGS = "http://localhost:11434/api/tags"


class Ollama(REST):
@property
def credentials(self) -> Dict[str, str]:
# No credentials needed for local Ollama server
return {}

def _verify_auth(self) -> None:
# Healthcheck: Verify connectivity to Ollama server
try:
r = requests.get(Endpoints.TAGS.value, timeout=5)
r.raise_for_status()
except (requests.exceptions.RequestException, HTTPError) as ex:
raise ValueError(
"Failed to connect to the Ollama server. Please ensure that the server is up and running."
) from ex

def __call__(self, prompts: Iterable[Iterable[str]]) -> Iterable[Iterable[str]]:
headers = {
"Content-Type": "application/json",
}
all_api_responses: List[List[str]] = []

for prompts_for_doc in prompts:
api_responses: List[str] = []
prompts_for_doc = list(prompts_for_doc)

def _request(json_data: Dict[str, Any]) -> Dict[str, Any]:
r = self.retry(
call_method=requests.post,
url=self._endpoint,
headers=headers,
json={
**json_data,
**self._config,
"model": self._name,
"stream": False,
},
timeout=self._max_request_time,
)
try:
r.raise_for_status()
except HTTPError as ex:
res_content = r.text
# Include specific error message in exception.
raise ValueError(
f"Request to Ollama API failed: {res_content}"
) from ex

response = r.json()

if "error" in response:
if self._strict:
raise ValueError(f"API call failed: {response['error']}.")
else:
assert isinstance(prompts_for_doc, Sized)
return {"error": [response["error"]] * len(prompts_for_doc)}

return response

for prompt in prompts_for_doc:
responses = _request({"prompt": prompt})
if "error" in responses:
return responses["error"]

api_responses.append(responses["response"])

all_api_responses.append(api_responses)

return all_api_responses

@staticmethod
def _get_context_lengths() -> Dict[str, int]:
return {
"llama3": 4096,
"phi3": 4096,
"wizardlm2": 4096,
"mistral": 4096,
"gemma": 4096,
"mixtral": 47000,
"llama2": 4096,
"codegemma": 4096,
"command-r": 35000,
"command-r-plus": 35000,
"llava": 4096,
"dbrx": 4096,
"codellama": 4096,
"qwen": 4096,
"dolphin-mixtral": 47000,
"llama2-uncensored": 4096,
"mistral-openorca": 4096,
"deepseek-coder": 4096,
"phi": 4096,
"dolphin-mistral": 47000,
"nomic-embed-text": 4096,
"nous-hermes2": 4096,
"orca-mini": 4096,
"llama2-chinese": 4096,
"zephyr": 4096,
"wizard-vicuna-uncensored": 4096,
"openhermes": 4096,
"vicuna": 4096,
"tinyllama": 4096,
"tinydolphin": 4096,
"openchat": 4096,
"starcoder2": 4096,
"wizardcoder": 4096,
"stable-code": 4096,
"starcoder": 4096,
"neural-chat": 4096,
"yi": 4096,
"phind-codellama": 4096,
"starling-lm": 4096,
"wizard-math": 4096,
"falcon": 4096,
"dolphin-phi": 4096,
"orca2": 4096,
"dolphincoder": 4096,
"mxbai-embed-large": 4096,
"nous-hermes": 4096,
"solar": 4096,
"bakllava": 4096,
"sqlcoder": 4096,
"medllama2": 4096,
"nous-hermes2-mixtral": 47000,
"wizardlm-uncensored": 4096,
"dolphin-llama3": 4096,
"codeup": 4096,
"stablelm2": 4096,
"everythinglm": 16384,
"all-minilm": 4096,
"samantha-mistral": 4096,
"yarn-mistral": 128000,
"stable-beluga": 4096,
"meditron": 4096,
"yarn-llama2": 128000,
"deepseek-llm": 4096,
"llama-pro": 4096,
"magicoder": 4096,
"stablelm-zephyr": 4096,
"codebooga": 4096,
"codeqwen": 4096,
"mistrallite": 8192,
"wizard-vicuna": 4096,
"nexusraven": 4096,
"xwinlm": 4096,
"goliath": 4096,
"open-orca-platypus2": 4096,
"wizardlm": 4096,
"notux": 4096,
"megadolphin": 4096,
"duckdb-nsql": 4096,
"alfred": 4096,
"notus": 4096,
"snowflake-arctic-embed": 4096,
}
Loading
Loading