1 /**
2  * MIT License
3  * 
4  * Copyright (c) 2025 Matheus C. França
5  * 
6  * Permission is granted to use, modify, and distribute this software
7  * under the terms of the MIT License.
8  */
9 
10 /++
11  + Module providing a D language binding for the Ollama REST API.
12  +
13  + This module defines the `OllamaClient` class, which facilitates interaction with an Ollama server
14  + for tasks such as text generation, chat interactions, and model management. It supports both
15  + native Ollama endpoints and OpenAI-compatible endpoints, using `std.net.curl` for HTTP requests
16  + and `std.json` for JSON processing.
17  +
18  + Examples:
19  +     ---
20  +     import ollama.client;
21  +     import std.stdio;
22  +
23  +     void main() {
24  +         auto client = new OllamaClient();
25  +         auto response = client.generate("llama3", "What is the weather like?");
26  +         writeln(response["response"].str);
27  +     }
28  +     ---
29  +
30  + See_Also:
31  +     - $(LINK2 https://github.com/ollama/ollama/blob/main/docs/api.md, Ollama API Documentation)
32  +     - $(LINK2 https://github.com/ollama/ollama/blob/main/docs/openai.md, OpenAI Compatibility)
33  +/
34 module ollama.client;
35 
36 import std;
37 
38 @safe:
39 
40 /++
41  + Represents a single message in a chat interaction.
42  +/
43 struct Message
44 {
45     string role; /// The role of the message sender (e.g., "user", "assistant").
46     string content; /// The text content of the message.
47 
48     /++
49      + Converts the message to a JSON object.
50      +
51      + Returns: A `JSONValue` object with "role" and "content" fields.
52      +/
53     JSONValue toJson() const
54     {
55         JSONValue j = ["role": JSONValue(role), "content": JSONValue(content)];
56         return j;
57     }
58 }
59 
60 /++
61  + A client class for interacting with the Ollama REST API.
62  +
63  + This class provides methods for text generation, chat interactions, and model management using
64  + `std.net.curl` for HTTP requests and `std.json` for JSON handling. Streaming is not fully supported
65  + in this version due to limitations in `std.net.curl`.
66  +
67  + Examples:
68  +     ---
69  +     auto client = new OllamaClient();
70  +     auto chatResponse = client.chat("llama3", [Message("user", "Hi there!")]);
71  +     writeln(chatResponse["message"]["content"].str);
72  +     ---
73  +/
74 class OllamaClient
75 {
76     private string host; /// The base URL of the Ollama server.
77     private Duration timeout = 60.seconds; /// Default timeout for HTTP requests.
78 
79     /++
80      + Constructs a new Ollama client instance.
81      +
82      + Params:
83      +     host = The base URL of the Ollama server. Defaults to `DEFAULT_HOST` if not specified.
84      +/
85     this(string host = DEFAULT_HOST)
86     {
87         this.host = host;
88     }
89 
90     /++
91      + Sets the timeout duration for HTTP requests.
92      +
93      + Params:
94      +     timeout = The duration to wait before timing out HTTP requests.
95      +/
96     void setTimeOut(Duration timeout)
97     {
98         this.timeout = timeout;
99     }
100 
101     /++
102      + Private helper method for performing HTTP POST requests.
103      +
104      + Params:
105      +     url = The endpoint URL to send the request to.
106      +     data = The JSON data to send in the request body.
107      +     stream = Whether to request a streaming response (ignored in this implementation).
108      +
109      + Returns: A `JSONValue` object with the response.
110      +/
111     private JSONValue post(string url, JSONValue data, bool stream = false) @trusted
112     {
113         auto client = HTTP();
114         client.addRequestHeader("Content-Type", "application/json");
115         client.connectTimeout(timeout);
116 
117         auto jsonStr = data.toString();
118         auto response = std.net.curl.post(url, jsonStr, client);
119         auto jsonResponse = parseJSON(response);
120 
121         enforce("error" !in jsonResponse, "HTTP request failed: " ~ ("message" in jsonResponse["error"] ? jsonResponse["error"]["message"]
122                 .str : "Unknown error"));
123         return jsonResponse;
124     }
125 
126     /++
127      + Private helper method for performing HTTP GET requests.
128      +
129      + Params:
130      +     url = The endpoint URL to send the request to.
131      +
132      + Returns: A `JSONValue` object with the response.
133      +/
134     private JSONValue get(string url) @trusted
135     {
136         auto client = HTTP();
137         client.connectTimeout(timeout);
138 
139         auto response = std.net.curl.get(url, client);
140         auto jsonResponse = parseJSON(response);
141         enforce("error" !in jsonResponse, "HTTP request failed: " ~ ("message" in jsonResponse["error"] ? jsonResponse["error"]["message"]
142                 .str : "Unknown error"));
143         return jsonResponse;
144     }
145 
146     /++
147      + Generates text based on a prompt using the specified model.
148      +
149      + Params:
150      +     model = The name of the model to use (e.g., "llama3").
151      +     prompt = The input text to generate from.
152      +     options = Additional generation options (e.g., temperature, top_k).
153      +     stream = Whether to stream the response (ignored in this implementation).
154      +
155      + Returns: A `JSONValue` object containing the generated text and metadata.
156      +/
157     JSONValue generate(string model, string prompt, JSONValue options = JSONValue.init, bool stream = false)
158     {
159         auto url = host ~ "/api/generate";
160         JSONValue data = [
161             "model": JSONValue(model),
162             "prompt": JSONValue(prompt),
163             "options": options,
164             "stream": JSONValue(stream)
165         ];
166         return post(url, data, stream);
167     }
168 
169     /++
170      + Engages in a chat interaction using the specified model and message history.
171      +
172      + Params:
173      +     model = The name of the model to use.
174      +     messages = An array of `Message` structs representing the chat history.
175      +     options = Additional chat options (e.g., temperature).
176      +     stream = Whether to stream the response (ignored in this implementation).
177      +
178      + Returns: A `JSONValue` object containing the chat response and metadata.
179      +/
180     JSONValue chat(string model, Message[] messages, JSONValue options = JSONValue.init, bool stream = false)
181     {
182         auto url = host ~ "/api/chat";
183         JSONValue[] msgArray;
184         foreach (msg; messages)
185         {
186             msgArray ~= msg.toJson();
187         }
188         JSONValue data = [
189             "model": JSONValue(model),
190             "messages": JSONValue(msgArray),
191             "options": options,
192             "stream": JSONValue(stream)
193         ];
194         return post(url, data, stream);
195     }
196 
197     /++
198      + Retrieves a list of available models from the Ollama server in a formatted JSON string.
199      +
200      + Returns: A string containing the JSON-formatted list of model details, pretty-printed.
201      +/
202     string listModels()
203     {
204         auto url = host ~ "/api/tags";
205         auto jsonResponse = get(url);
206         return jsonResponse.toPrettyString();
207     }
208 
209     /++
210      + Retrieves detailed information about a specific model in a formatted JSON string.
211      +
212      + Params:
213      +     model = The name of the model to query.
214      +
215      + Returns: A string containing the JSON-formatted model metadata, pretty-printed.
216      +/
217     string showModel(string model)
218     {
219         auto url = host ~ "/api/show";
220         JSONValue data = ["name": JSONValue(model)];
221         auto jsonResponse = post(url, data);
222         return jsonResponse.toPrettyString();
223     }
224 
225     /++
226      + Creates a new model on the Ollama server using a modelfile.
227      +
228      + Params:
229      +     name = The name of the new model.
230      +     modelfile = The modelfile content defining the model.
231      +
232      + Returns: A `JSONValue` object with creation status.
233      +/
234     JSONValue createModel(string name, string modelfile)
235     {
236         auto url = host ~ "/api/create";
237         JSONValue data = [
238             "name": JSONValue(name),
239             "modelfile": JSONValue(modelfile)
240         ];
241         return post(url, data);
242     }
243 
244     /++
245      + Copies an existing model to a new name on the Ollama server.
246      +
247      + Params:
248      +     source = The name of the model to copy from (e.g., "llama3.1:8b").
249      +     destination = The name of the new model to create (e.g., "llama3.1:8b-copy").
250      +
251      + Returns: A `JSONValue` object with the copy status.
252      +/
253     JSONValue copy(string source, string destination)
254     {
255         auto url = host ~ "/api/copy";
256         JSONValue data = [
257             "source": JSONValue(source),
258             "destination": JSONValue(destination)
259         ];
260         return post(url, data);
261     }
262 
263     /++
264      + Deletes a model from the Ollama server.
265      +
266      + Params:
267      +     name = The name of the model to delete (e.g., "llama3.1:8b-copy").
268      +
269      + Returns: A `JSONValue` object with the delete status.
270      +/
271     JSONValue deleteModel(string name)
272     {
273         auto url = host ~ "/api/delete";
274         JSONValue data = [
275             "name": JSONValue(name)
276         ];
277         return post(url, data);
278     }
279 
280     /++
281      + Pulls a model from the Ollama server registry.
282      +
283      + Params:
284      +     name = The name of the model to pull (e.g., "llama3").
285      +     stream = Whether to stream the response (ignored in this implementation).
286      +
287      + Returns: A `JSONValue` object with the pull status.
288      +/
289     JSONValue pull(string name, bool stream = false)
290     {
291         auto url = host ~ "/api/pull";
292         JSONValue data = [
293             "name": JSONValue(name),
294             "stream": JSONValue(stream)
295         ];
296         return post(url, data, stream);
297     }
298 
299     /++
300      + Pushes a model to the Ollama server registry.
301      +
302      + Params:
303      +     name = The name of the model to push (e.g., "llama3").
304      +     stream = Whether to stream the response (ignored in this implementation).
305      +
306      + Returns: A `JSONValue` object with the push status.
307      +/
308     JSONValue push(string name, bool stream = false)
309     {
310         auto url = host ~ "/api/push";
311         JSONValue data = [
312             "name": JSONValue(name),
313             "stream": JSONValue(stream)
314         ];
315         return post(url, data, stream);
316     }
317 
318     /++
319      + Retrieves the version of the Ollama server.
320      +
321      + Returns: A string containing the server version.
322      +/
323     string getVersion()
324     {
325         auto url = host ~ "/api/version";
326         auto jsonResponse = get(url);
327         return jsonResponse["version"].str; // Returns just the version string
328     }
329 
330     /++
331      + Performs an OpenAI-style chat completion.
332      +
333      + Params:
334      +     model = The name of the model to use.
335      +     messages = An array of `Message` structs representing the chat history.
336      +     maxTokens = Maximum number of tokens to generate (0 for unlimited).
337      +     temperature = Sampling temperature (default: 1.0).
338      +     stream = Whether to stream the response (ignored in this implementation).
339      +
340      + Returns: A `JSONValue` object in OpenAI-compatible format.
341      +/
342     JSONValue chatCompletions(string model, Message[] messages, int maxTokens = 0, float temperature = 1.0, bool stream = false) @trusted
343     {
344         auto url = host ~ "/v1/chat/completions";
345         JSONValue[] msgArray;
346         foreach (msg; messages)
347         {
348             msgArray ~= msg.toJson();
349         }
350         JSONValue data = [
351             "model": JSONValue(model),
352             "messages": JSONValue(msgArray),
353             "stream": JSONValue(stream)
354         ];
355         if (maxTokens > 0)
356             data.object["max_tokens"] = JSONValue(maxTokens);
357         data.object["temperature"] = JSONValue(temperature);
358         return post(url, data, stream);
359     }
360 
361     /++
362      + Performs an OpenAI-style text completion.
363      +
364      + Params:
365      +     model = The name of the model to use.
366      +     prompt = The input prompt to complete.
367      +     maxTokens = Maximum number of tokens to generate (0 for unlimited).
368      +     temperature = Sampling temperature (default: 1.0).
369      +     stream = Whether to stream the response (ignored in this implementation).
370      +
371      + Returns: A `JSONValue` object in OpenAI-compatible format.
372      +/
373     JSONValue completions(string model, string prompt, int maxTokens = 0, float temperature = 1.0, bool stream = false) @trusted
374     {
375         auto url = host ~ "/v1/completions";
376         JSONValue data = [
377             "model": JSONValue(model),
378             "prompt": JSONValue(prompt),
379             "stream": JSONValue(stream)
380         ];
381         if (maxTokens > 0)
382             data.object["max_tokens"] = JSONValue(maxTokens);
383         data.object["temperature"] = JSONValue(temperature);
384         return post(url, data, stream);
385     }
386 
387     /++
388      + Lists models in an OpenAI-compatible format.
389      +
390      + Returns: A string containing the JSON-formatted list of model data, pretty-printed.
391      +/
392     string getModels()
393     {
394         auto url = host ~ "/v1/models";
395         return get(url).toPrettyString();
396     }
397 }
398 
399 /// Default host URL for the Ollama server.
400 enum DEFAULT_HOST = "http://127.0.0.1:11434";