emacs-elpa-diffs
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[elpa] externals/llm e20c47d232 1/5: Add llm-name, for displaying the mo


From: ELPA Syncer
Subject: [elpa] externals/llm e20c47d232 1/5: Add llm-name, for displaying the model or provider name
Date: Fri, 29 Dec 2023 21:58:04 -0500 (EST)

branch: externals/llm
commit e20c47d23281b489057a53d36daac8378b76b6a2
Author: Andrew Hyatt <ahyatt@gmail.com>
Commit: Andrew Hyatt <ahyatt@gmail.com>

    Add llm-name, for displaying the model or provider name
---
 NEWS.org        |  1 +
 README.org      |  1 +
 llm-fake.el     |  3 +++
 llm-gemini.el   |  4 ++++
 llm-gpt4all.el  |  4 ++++
 llm-llamacpp.el |  5 +++++
 llm-ollama.el   |  3 +++
 llm-openai.el   |  3 +++
 llm-vertex.el   |  3 +++
 llm.el          | 14 ++++++++++++++
 10 files changed, 41 insertions(+)

diff --git a/NEWS.org b/NEWS.org
index 6f267dc5ab..e7396caedb 100644
--- a/NEWS.org
+++ b/NEWS.org
@@ -1,5 +1,6 @@
 * Version 0.8
 - Allow users to change the Open AI URL, to allow for proxies and other 
services that re-use the API.
+- Add =llm-name= and =llm-cancel-request= to the API.
 * Version 0.7
 - Upgrade Google Cloud Vertex to Gemini - previous models are no longer 
available.
 - Added =gemini= provider, which is an alternate endpoint with alternate (and 
easier) authentication and setup compared to Cloud Vertex.
diff --git a/README.org b/README.org
index 7959d11616..7a8d362c97 100644
--- a/README.org
+++ b/README.org
@@ -98,6 +98,7 @@ For all callbacks, the callback will be executed in the 
buffer the function was
 - ~llm-embedding-async provider string vector-callback error-callback~: Same 
as ~llm-embedding~ but this is processed asynchronously. ~vector-callback~ is 
called with the vector embedding, and, in case of error, ~error-callback~ is 
called with the same arguments as in ~llm-chat-async~.
 - ~llm-count-tokens provider string~: Count how many tokens are in ~string~.  
This may vary by ~provider~, because some provideres implement an API for this, 
but typically is always about the same.  This gives an estimate if the provider 
has no API support.
 - ~llm-cancel-request request~ Cancels the given request, if possible.  The 
~request~ object is the return value of async and streaming functions.
+- ~llm-name provider~.  Provides a short name of the model or provider, 
suitable for showing to users.
 
   And the following helper functions:
   - ~llm-make-simple-chat-prompt text~: For the common case of just wanting a 
simple text prompt without the richness that ~llm-chat-prompt~ struct provides, 
use this to turn a string into a ~llm-chat-prompt~ that can be passed to the 
main functions above.
diff --git a/llm-fake.el b/llm-fake.el
index 30db7ba9c4..804ccdeb6f 100644
--- a/llm-fake.el
+++ b/llm-fake.el
@@ -115,4 +115,7 @@ message cons. If nil, the response will be a simple vector."
     (t (funcall error-callback (car err) (cdr err))))
   nil)
 
+(cl-defmethod llm-name ((_ llm-fake))
+  "Fake")
+
 (provide 'llm-fake)
diff --git a/llm-gemini.el b/llm-gemini.el
index b8309501a2..f228ece065 100644
--- a/llm-gemini.el
+++ b/llm-gemini.el
@@ -121,6 +121,10 @@ You can get this at 
https://makersuite.google.com/app/apikey.";
                      :data (llm-vertex--to-count-token-request 
(llm-vertex--chat-request-streaming (llm-make-simple-chat-prompt string))))
    #'llm-vertex--count-tokens-extract-response))
 
+(cl-defmethod llm-name ((_ llm-gemini))
+  "Return the name of PROVIDER."
+  "Gemini")
+
 (provide 'llm-gemini)
 
 ;;; llm-gemini.el ends here
diff --git a/llm-gpt4all.el b/llm-gpt4all.el
index 3b0f811085..6019f08f0a 100644
--- a/llm-gpt4all.el
+++ b/llm-gpt4all.el
@@ -81,6 +81,10 @@ default the default GPT4all port."
   ;; GPT4All does not implement streaming, so instead we just use the async 
method.
   (llm-chat-async provider prompt response-callback error-callback))
 
+(cl-defmethod llm-name ((provider llm-gpt4all))
+  "Return the name of the provider."
+  (llm-gpt4all-chat-model provider))
+
 (provide 'llm-gpt4all)
 
 ;;; llm-gpt4all.el ends here
diff --git a/llm-llamacpp.el b/llm-llamacpp.el
index d1629468d0..b624713379 100644
--- a/llm-llamacpp.el
+++ b/llm-llamacpp.el
@@ -196,5 +196,10 @@ them from 1 to however many are sent.")
                                    (llm-request-callback-in-buffer
                                     buf error-callback 'error "Unknown error 
calling llm-llamacpp")))))
 
+(cl-defmethod llm-name ((_ llm-llamacpp))
+  ;; We don't actually know the name of the model, so we have to just name 
Llama
+  ;; CPP itself.
+  "Llama CPP")
+
 (provide 'llm-llamacpp)
 ;;; llm-llamacpp.el ends here
diff --git a/llm-ollama.el b/llm-ollama.el
index a24d017caa..2ce66eac12 100644
--- a/llm-ollama.el
+++ b/llm-ollama.el
@@ -218,6 +218,9 @@ STREAMING if non-nil, turn on response streaming."
                   ;; seem to have an error response.
                   (llm-request-callback-in-buffer buf error-callback "Unknown 
error calling ollama")))))
 
+(cl-defmethod llm-name ((provider llm-ollama))
+  (llm-ollama-chat-model provider))
+
 (provide 'llm-ollama)
 
 ;;; llm-ollama.el ends here
diff --git a/llm-openai.el b/llm-openai.el
index d11163dbc7..18819693db 100644
--- a/llm-openai.el
+++ b/llm-openai.el
@@ -282,6 +282,9 @@ them from 1 to however many are sent.")
                                                          (list 
(make-llm-chat-prompt-interaction :role 'assistant :content response))))
                                            (llm-request-callback-in-buffer buf 
response-callback response))))))
 
+(cl-defmethod llm-name ((_ llm-openai))
+  "Open AI")
+
 (provide 'llm-openai)
 
 ;;; llm-openai.el ends here
diff --git a/llm-vertex.el b/llm-vertex.el
index 805d0b33c9..40a4630116 100644
--- a/llm-vertex.el
+++ b/llm-vertex.el
@@ -309,6 +309,9 @@ MODEL "
                              (llm-make-simple-chat-prompt string))))
    #'llm-vertex--count-tokens-extract-response))
 
+(cl-defmethod llm-name ((_ llm-vertex))
+  "Gemini")
+
 (provide 'llm-vertex)
 
 ;;; llm-vertex.el ends here
diff --git a/llm.el b/llm.el
index 8ae76632da..2daf60599d 100644
--- a/llm.el
+++ b/llm.el
@@ -289,6 +289,20 @@ methods."
             (kill-buffer-query-functions nil))
     (kill-buffer buf)))
 
+(cl-defgeneric llm-name (_)
+  "Return the name of the model in PROVIDER.
+This is expected to be suitable for short labels. For example, if
+the client wants to have a conversation with prefixes of `user> '
+and a similar label for LLM (for example `Mistral> '), this
+string should be short enough to fit that role.
+
+Names are expected to be one word where possible, and
+capitalized when appropriate.
+
+This should be the name of the model, not the provider, where it
+makes sense. This is not expected to be unique per provider."
+  "LLM")
+
 (defun llm-chat-prompt-to-text (prompt)
   "Convert PROMPT `llm-chat-prompt' to a simple text.
 This should only be used for logging or debugging."



reply via email to

[Prev in Thread] Current Thread [Next in Thread]