From f19efce9a927d6c59a01a9bc8fdd77855e3c45df Mon Sep 17 00:00:00 2001 From: mikami Date: Sat, 24 Jan 2026 01:33:58 +0800 Subject: [PATCH 1/4] feat: port core logic to C++17 headers for non-module environments --- examples/cxx17/basic.cpp | 69 +++++++ examples/cxx17/chat.cpp | 49 +++++ examples/cxx17/hello_mcpp.cpp | 20 ++ examples/cxx17/xmake.lua | 17 ++ examples/xmake.lua | 1 + include/llmapi.hpp | 12 ++ include/llmapi/openai.hpp | 332 ++++++++++++++++++++++++++++++++++ include/llmapi/url.hpp | 18 ++ xmake.lua | 6 + 9 files changed, 524 insertions(+) create mode 100644 examples/cxx17/basic.cpp create mode 100644 examples/cxx17/chat.cpp create mode 100644 examples/cxx17/hello_mcpp.cpp create mode 100644 examples/cxx17/xmake.lua create mode 100644 include/llmapi.hpp create mode 100644 include/llmapi/openai.hpp create mode 100644 include/llmapi/url.hpp diff --git a/examples/cxx17/basic.cpp b/examples/cxx17/basic.cpp new file mode 100644 index 0000000..b82ad78 --- /dev/null +++ b/examples/cxx17/basic.cpp @@ -0,0 +1,69 @@ +// Basic usage example - demonstrates both streaming and non-streaming modes +#include "llmapi.hpp" +#include +#include + +using namespace mcpplibs; + +int main() { + auto api_key = std::getenv("OPENAI_API_KEY"); + if (!api_key) { + std::cout << "Error: OPENAI_API_KEY not set" << std::endl; + return 1; + } + + llmapi::Client client(api_key, llmapi::URL::Poe); + client.model("gpt-5").system("You are a helpful assistant."); + + std::cout << "=== llmapi Basic Usage Demo ===\n" << std::endl; + + try { + // Example 1: Non-streaming request + std::cout << "[Example 1] Non-streaming mode:" << std::endl; + std::cout << "Question: What is the capital of China?\n" << std::endl; + + client.user("What is the capital of China?"); + client.request(); + + std::cout << "Answer: {}\n" << std::endl; + + // Example 2: Streaming request + std::cout << "[Example 2] Streaming mode:" << std::endl; + std::cout << "Question: Convince me to use modern C++ (100 words)\n" << std::endl; + + client.user("Convince me to use modern C++ (100 words)"); + std::cout << "Answer: "; + + client.request([](std::string_view chunk) { + std::cout << "{}"; + std::cout.flush(); + }); + + std::cout << "\n" << std::endl; + + // Verify auto-save: get the last answer + auto last_answer = client.getAnswer(); + std::cout << "[Verification] Last answer length: {} chars\n" << std::endl; + + // Example 3: Translate the story to Chinese + std::cout << "[Example 3] Translation (streaming):" << std::endl; + std::cout << "Question: 请把上个回答翻译成中文。\n" << std::endl; + + client.user("请把上面的故事翻译成中文。"); + std::cout << "Answer: "; + + client.request([](std::string_view chunk) { + std::cout << "{}"; + std::cout.flush(); + }); + + std::cout << "\n" << std::endl; + + } catch (const std::exception& e) { + std::cout << "\nError: {}\n" << std::endl; + return 1; + } + + std::cout << "=== Demo Complete ===" << std::endl; + return 0; +} \ No newline at end of file diff --git a/examples/cxx17/chat.cpp b/examples/cxx17/chat.cpp new file mode 100644 index 0000000..6495af3 --- /dev/null +++ b/examples/cxx17/chat.cpp @@ -0,0 +1,49 @@ +// Simple and elegant AI chat CLI tool using streaming +#include "llmapi.hpp" +#include +#include + +using namespace mcpplibs; + +int main() { + auto api_key = std::getenv("OPENAI_API_KEY"); + if (!api_key) { + std::cout << "Error: OPENAI_API_KEY not set" << std::endl; + return 1; + } + + llmapi::Client client(api_key, llmapi::URL::Poe); + client.model("gpt-5").system("You are a helpful assistant."); + + std::cout << "AI Chat CLI - Type 'quit' to exit\n" << std::endl; + + while (true) { + std::cout << "You: "; + std::string input; + std::getline(std::cin, input); + + if (input == "quit" || input == "q") { + std::cout << "\nBye!" << std::endl; + break; + } + + if (input.empty()) continue; + + try { + client.user(input); + std::cout << "\nAI: "; + + client.request([](std::string_view chunk) { + std::cout << "{}"; + std::cout.flush(); + }); + + std::cout << "\n" << std::endl; + + } catch (const std::exception& e) { + std::cout << "\nError: {}\n" << std::endl; + } + } + + return 0; +} \ No newline at end of file diff --git a/examples/cxx17/hello_mcpp.cpp b/examples/cxx17/hello_mcpp.cpp new file mode 100644 index 0000000..dbd8223 --- /dev/null +++ b/examples/cxx17/hello_mcpp.cpp @@ -0,0 +1,20 @@ +// Minimal example - simplest way to use llmapi +#include "llmapi.hpp" +#include +#include + +int main() { + using namespace mcpplibs; + + llmapi::Client client(std::getenv("OPENAI_API_KEY"), llmapi::URL::Poe); + + client.model("gpt-5") + .system("You are a helpful assistant.") + .user("In one sentence, introduce modern C++. 并给出中文翻译") + .request([](std::string_view chunk) { + std::cout << chunk; + std::cout.flush(); + }); + + return 0; +} \ No newline at end of file diff --git a/examples/cxx17/xmake.lua b/examples/cxx17/xmake.lua new file mode 100644 index 0000000..e0e26d7 --- /dev/null +++ b/examples/cxx17/xmake.lua @@ -0,0 +1,17 @@ +target("cxx17_hello_mcpp") + set_kind("binary") + add_files("hello_mcpp.cpp") + set_languages("c++17") + add_deps("llmapi_cxx17") + +target("cxx17_basic") + set_kind("binary") + add_files("basic.cpp") + set_languages("c++17") + add_deps("llmapi_cxx17") + +target("cxx17_chat") + set_kind("binary") + add_files("chat.cpp") + set_languages("c++17") + add_deps("llmapi_cxx17") diff --git a/examples/xmake.lua b/examples/xmake.lua index 991dc33..edd9c43 100644 --- a/examples/xmake.lua +++ b/examples/xmake.lua @@ -14,3 +14,4 @@ target("chat") add_deps("llmapi") includes("c") +includes("cxx17") \ No newline at end of file diff --git a/include/llmapi.hpp b/include/llmapi.hpp new file mode 100644 index 0000000..338147a --- /dev/null +++ b/include/llmapi.hpp @@ -0,0 +1,12 @@ +#pragma once + +#include +#include "llmapi/url.hpp" +#include "llmapi/openai.hpp" + +namespace mcpplibs::llmapi { + using OpenAI = openai::OpenAI; + using Client = openai::OpenAI; + using URL = llmapi::URL; + using Json = nlohmann::json; +} // namespace mcpplibs::llmapi \ No newline at end of file diff --git a/include/llmapi/openai.hpp b/include/llmapi/openai.hpp new file mode 100644 index 0000000..1651510 --- /dev/null +++ b/include/llmapi/openai.hpp @@ -0,0 +1,332 @@ +#pragma once + +#include "url.hpp" + +#include +#include + +#include + +namespace mcpplibs::llmapi::openai { + +using Json = nlohmann::json; + +// Concept to constrain callback type +// template +// concept StreamCallback = std::invocable && +// std::same_as, void>; + +// SFINAE to constrain callback type +template +struct IsStreamCallback : std::conjunction< + std::is_invocable, + std::is_same, void> +> {}; + +template +inline constexpr bool IsStreamCallbackV = IsStreamCallback::value; + +class OpenAI { + std::string mApiKey; + std::string mBaseUrl; + std::string mModel; + std::string mEndpoint; + Json mMessages; + +public: + OpenAI(std::string_view apiKey, std::string_view baseUrl = llmapi::URL::OpenAI) + : mApiKey(apiKey), + mBaseUrl(baseUrl), + mMessages(Json::array()) + { + if (mApiKey.empty()) { + throw std::runtime_error("API key cannot be empty"); + } + } + + // add safe check for const char* overload - example: std::getenv("KEY") + OpenAI(const char* apiKey, std::string_view baseUrl = llmapi::URL::OpenAI) + : OpenAI(std::string_view(apiKey ? apiKey : ""), baseUrl) { } + + // Rule of five - explicitly defaulted + OpenAI(const OpenAI&) = default; + OpenAI(OpenAI&&) = default; + OpenAI& operator=(const OpenAI&) = default; + OpenAI& operator=(OpenAI&&) = default; + ~OpenAI() = default; + +public: // config methods (chainable) + + OpenAI& model(std::string_view model) { + mEndpoint = mBaseUrl + "/chat/completions"; + mModel = model; + return *this; + } + +public: // Message methods + + // Add messages + OpenAI& add_message(std::string_view role, std::string_view content) { + mMessages.push_back({ + {"role", role}, + {"content", content} + }); + return *this; + } + + OpenAI& user(std::string_view content) { + return add_message("user", content); + } + + OpenAI& system(std::string_view content) { + return add_message("system", content); + } + + OpenAI& assistant(std::string_view content) { + return add_message("assistant", content); + } + + // Clear conversation history + OpenAI& clear() { + mMessages = Json::array(); + return *this; + } + +public: + + // Getters + std::string_view getApiKey() const { return mApiKey; } + std::string_view getBaseUrl() const { return mBaseUrl; } + std::string_view getModel() const { return mModel; } + + Json getMessages() const { return mMessages; } + int getMessageCount() const { return static_cast(mMessages.size()) / 2; } + + std::string getAnswer() const { + if (mMessages.empty()) return ""; + const auto& lastMessage = mMessages.back(); + if (lastMessage.contains("role") && lastMessage["role"] == "assistant" && + lastMessage.contains("content")) { + return lastMessage["content"].get(); + } + return ""; + } + +public: // Request methods + + // Execute request (non-streaming) - auto saves assistant reply + Json request() { + validate_request(); + auto response = send_request(build_payload(mMessages, false)); + + // Auto-save assistant reply to conversation history + if (response.contains("choices") && !response["choices"].empty()) { + auto& choice = response["choices"][0]; + if (choice.contains("message") && choice["message"].contains("content")) { + std::string content = choice["message"]["content"]; + assistant(content); + } + } + + return response; + } + + // One-shot request without building conversation (non-streaming) + Json request(const Json& messages) { + validate_request(); + return send_request(build_payload(messages, false)); + } + + // Execute request with callback (streaming) - auto saves assistant reply + template, int> = 0> + void request(Callback&& callback) { + validate_request(); + + // Wrapper to collect full response + std::string full_response; + auto wrapper_callback = [&full_response, &callback](std::string_view chunk) { + full_response += chunk; + callback(chunk); + }; + + send_stream_request(build_payload(mMessages, true), wrapper_callback); + + // Auto-save assistant reply to conversation history + if (!full_response.empty()) { + assistant(full_response); + } + } + +private: + struct StreamContext { + std::function callback; + std::string buffer; + }; + + // Validate request preconditions + void validate_request() const { + if (mEndpoint.empty()) { + throw std::runtime_error("Endpoint not set. Call model() first."); + } + if (mModel.empty()) { + throw std::runtime_error("Model not set."); + } + } + + // Build request payload + Json build_payload(const Json& messages, bool stream) const { + Json payload; + payload["model"] = mModel; + payload["messages"] = messages; + if (stream) { + payload["stream"] = true; + } + return payload; + } + + // Setup common CURL headers + struct curl_slist* setup_headers() const { + struct curl_slist* headers = nullptr; + headers = curl_slist_append(headers, "Content-Type: application/json"); + std::string authHeader = "Authorization: Bearer " + mApiKey; + headers = curl_slist_append(headers, authHeader.c_str()); + return headers; + } + + Json send_request(const Json& payload) { + std::string payloadStr = payload.dump(); + std::string response; + + CURL* curl = curl_easy_init(); + if (!curl) { + throw std::runtime_error("Failed to initialize CURL"); + } + + // Set up headers + struct curl_slist* headers = setup_headers(); + + // Set CURL options + curl_easy_setopt(curl, CURLOPT_URL, mEndpoint.c_str()); + curl_easy_setopt(curl, CURLOPT_HTTPHEADER, headers); + curl_easy_setopt(curl, CURLOPT_POSTFIELDS, payloadStr.c_str()); + curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, writeCallback); + curl_easy_setopt(curl, CURLOPT_WRITEDATA, &response); + + // Perform the request + CURLcode res = curl_easy_perform(curl); + + // Cleanup + curl_slist_free_all(headers); + curl_easy_cleanup(curl); + + if (res != CURLE_OK) { + throw std::runtime_error(std::string("CURL error: ") + curl_easy_strerror(res)); + } + + return Json::parse(response); + } + + template, int> = 0> + void send_stream_request(const Json& payload, Callback&& callback) { + std::string payloadStr = payload.dump(); + + CURL* curl = curl_easy_init(); + if (!curl) { + throw std::runtime_error("Failed to initialize CURL"); + } + + StreamContext context; + context.callback = std::forward(callback); + + // Set up headers + struct curl_slist* headers = setup_headers(); + + // Set CURL options + curl_easy_setopt(curl, CURLOPT_URL, mEndpoint.c_str()); + curl_easy_setopt(curl, CURLOPT_HTTPHEADER, headers); + curl_easy_setopt(curl, CURLOPT_POSTFIELDS, payloadStr.c_str()); + curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, streamCallback); + curl_easy_setopt(curl, CURLOPT_WRITEDATA, &context); + + // Perform the request + CURLcode res = curl_easy_perform(curl); + + // Cleanup + curl_slist_free_all(headers); + curl_easy_cleanup(curl); + + if (res != CURLE_OK) { + throw std::runtime_error(std::string("CURL error: ") + curl_easy_strerror(res)); + } + } + + static size_t writeCallback(void* contents, size_t size, size_t nmemb, void* userp) { + size_t totalSize = size * nmemb; + std::string* response = static_cast(userp); + response->append(static_cast(contents), totalSize); + return totalSize; + } + + static size_t streamCallback(void* contents, size_t size, size_t nmemb, void* userp) { + size_t totalSize = size * nmemb; + StreamContext* context = static_cast(userp); + + std::string_view data(static_cast(contents), totalSize); + context->buffer.append(data); + + // Process SSE data line by line + size_t pos = 0; + while ((pos = context->buffer.find('\n')) != std::string::npos) { + std::string line = context->buffer.substr(0, pos); + context->buffer.erase(0, pos + 1); + + // Remove \r if present + if (!line.empty() && line.back() == '\r') { + line.pop_back(); + } + + // Skip empty lines + if (line.empty()) { + continue; + } + + // Check for data: prefix + if (line.find("data: ") == 0) { + std::string jsonStr = line.substr(6); + + // Check for [DONE] message + if (jsonStr == "[DONE]") { + continue; + } + + try { + auto chunk = Json::parse(jsonStr); + + // Extract content from the chunk + if (chunk.contains("choices") && !chunk["choices"].empty()) { + auto& choice = chunk["choices"][0]; + + // For chat completions streaming + if (choice.contains("delta") && choice["delta"].contains("content")) { + std::string content = choice["delta"]["content"]; + context->callback(content); + } + // For responses endpoint streaming + else if (choice.contains("message") && choice["message"].contains("content")) { + std::string content = choice["message"]["content"]; + context->callback(content); + } + } + } catch (const Json::exception& e) { + // Silently ignore JSON parsing errors in streaming + } + } + } + + return totalSize; + } +}; + +} // namespace mcpplibs::openai \ No newline at end of file diff --git a/include/llmapi/url.hpp b/include/llmapi/url.hpp new file mode 100644 index 0000000..4d3cf32 --- /dev/null +++ b/include/llmapi/url.hpp @@ -0,0 +1,18 @@ +#pragma once + +#include + +namespace mcpplibs::llmapi { + +struct URL { + // Models + inline static constexpr std::string_view OpenAI { "https://api.openai.com/v1" }; + inline static constexpr std::string_view Anthropic { "https://api.anthropic.com/v1" }; + inline static constexpr std::string_view DeepSeek { "https://api.deepseek.com/v1" }; + + // Providers + inline static constexpr std::string_view OpenRouter { "https://openrouter.ai/api/v1" }; + inline static constexpr std::string_view Poe { "https://api.poe.com/v1" }; +}; + +} // namespace mcpplibs::llmapi \ No newline at end of file diff --git a/xmake.lua b/xmake.lua index 4ffff9c..d915a14 100644 --- a/xmake.lua +++ b/xmake.lua @@ -28,4 +28,10 @@ target("llmapi_c") add_includedirs("include", { public = true }) add_headerfiles("include/llmapi.h") +target("llmapi_cxx17") + set_kind("headeronly") + add_packages("libcurl") + add_includedirs("include", { public = true }) + add_includedirs("src/json", { public = true }) + includes("examples") \ No newline at end of file From 0791702939c44e36e6a964c2117050e475cc6ee7 Mon Sep 17 00:00:00 2001 From: mikami Date: Sat, 24 Jan 2026 02:23:08 +0800 Subject: [PATCH 2/4] fix: make libcurl package public in llmapi_cxx17 target --- xmake.lua | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/xmake.lua b/xmake.lua index d915a14..20d7948 100644 --- a/xmake.lua +++ b/xmake.lua @@ -30,7 +30,7 @@ target("llmapi_c") target("llmapi_cxx17") set_kind("headeronly") - add_packages("libcurl") + add_packages("libcurl", { public = true }) add_includedirs("include", { public = true }) add_includedirs("src/json", { public = true }) From e444108d7d285b0d327186f72d59653769301c7b Mon Sep 17 00:00:00 2001 From: mikami Date: Sat, 24 Jan 2026 02:41:49 +0800 Subject: [PATCH 3/4] fix: update output formatting to display actual answers and errors in examples --- examples/cxx17/basic.cpp | 10 +++++----- examples/cxx17/chat.cpp | 4 ++-- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/examples/cxx17/basic.cpp b/examples/cxx17/basic.cpp index b82ad78..817c992 100644 --- a/examples/cxx17/basic.cpp +++ b/examples/cxx17/basic.cpp @@ -25,7 +25,7 @@ int main() { client.user("What is the capital of China?"); client.request(); - std::cout << "Answer: {}\n" << std::endl; + std::cout << "Answer: " << client.getAnswer() << "\n" << std::endl; // Example 2: Streaming request std::cout << "[Example 2] Streaming mode:" << std::endl; @@ -35,7 +35,7 @@ int main() { std::cout << "Answer: "; client.request([](std::string_view chunk) { - std::cout << "{}"; + std::cout << chunk; std::cout.flush(); }); @@ -43,7 +43,7 @@ int main() { // Verify auto-save: get the last answer auto last_answer = client.getAnswer(); - std::cout << "[Verification] Last answer length: {} chars\n" << std::endl; + std::cout << "[Verification] Last answer length: " << last_answer.size() << " chars\n" << std::endl; // Example 3: Translate the story to Chinese std::cout << "[Example 3] Translation (streaming):" << std::endl; @@ -53,14 +53,14 @@ int main() { std::cout << "Answer: "; client.request([](std::string_view chunk) { - std::cout << "{}"; + std::cout << chunk; std::cout.flush(); }); std::cout << "\n" << std::endl; } catch (const std::exception& e) { - std::cout << "\nError: {}\n" << std::endl; + std::cout << "\nError: " << e.what() << std::endl; return 1; } diff --git a/examples/cxx17/chat.cpp b/examples/cxx17/chat.cpp index 6495af3..e8c736d 100644 --- a/examples/cxx17/chat.cpp +++ b/examples/cxx17/chat.cpp @@ -34,14 +34,14 @@ int main() { std::cout << "\nAI: "; client.request([](std::string_view chunk) { - std::cout << "{}"; + std::cout << chunk; std::cout.flush(); }); std::cout << "\n" << std::endl; } catch (const std::exception& e) { - std::cout << "\nError: {}\n" << std::endl; + std::cout << "\nError: " << e.what() << std::endl; } } From fa081b4462417c2ea8ef0c4965f7bd4e1d15e981 Mon Sep 17 00:00:00 2001 From: mikami Date: Sat, 24 Jan 2026 03:15:32 +0800 Subject: [PATCH 4/4] feat: add C++17 header-only support to documentation --- README.md | 2 ++ README.zh.hant.md | 2 ++ README.zh.md | 2 ++ 3 files changed, 6 insertions(+) diff --git a/README.md b/README.md index f61a5b6..0919da0 100644 --- a/README.md +++ b/README.md @@ -3,6 +3,7 @@ > Modern C++ LLM API client with openai-compatible support [![C++23](https://img.shields.io/badge/C%2B%2B-23-blue.svg)](https://en.cppreference.com/w/cpp/23) +[![C++17](https://img.shields.io/badge/C%2B%2B-17-blue.svg)](https://en.cppreference.com/w/cpp/17) [![C API](https://img.shields.io/badge/C_API-ok-green.svg)](https://en.cppreference.com/w/cpp/23) [![Module](https://img.shields.io/badge/module-ok-green.svg)](https://en.cppreference.com/w/cpp/language/modules) [![License](https://img.shields.io/badge/license-Apache_2.0-blue.svg)](LICENSE) @@ -22,6 +23,7 @@ Clean, type-safe LLM API client using C++23 modules. Fluent interface with zero- - **Fluent Interface** - Chainable methods - **C API** - Full C language support with OOP style - **Provider Agnostic** - OpenAI, Poe, and compatible endpoints +- **C++17 Header-only** - Native support via `#include ` (API identical to C++23) ## Quick Start diff --git a/README.zh.hant.md b/README.zh.hant.md index 18817a8..2afed66 100644 --- a/README.zh.hant.md +++ b/README.zh.hant.md @@ -3,6 +3,7 @@ > Modern C++ LLM API client with openai-compatible support [![C++23](https://img.shields.io/badge/C%2B%2B-23-blue.svg)](https://en.cppreference.com/w/cpp/23) +[![C++17](https://img.shields.io/badge/C%2B%2B-17-blue.svg)](https://en.cppreference.com/w/cpp/17) [![C API](https://img.shields.io/badge/C_API-ok-green.svg)](https://en.cppreference.com/w/cpp/23) [![Module](https://img.shields.io/badge/module-ok-green.svg)](https://en.cppreference.com/w/cpp/language/modules) [![License](https://img.shields.io/badge/license-Apache_2.0-blue.svg)](LICENSE) @@ -22,6 +23,7 @@ - **流式介面** - 可鏈式呼叫的方法 - **C 語言 API** - 完整的 C 語言支援,物件導向風格 - **提供商無關** - OpenAI、Poe 及相容端點 +- **C++17 純標頭檔** - 原生支援 C++17,僅需 `#include `(API 與 C++23 一致) ## 快速開始 diff --git a/README.zh.md b/README.zh.md index 9164400..296a1a3 100644 --- a/README.zh.md +++ b/README.zh.md @@ -3,6 +3,7 @@ > Modern C++ LLM API client with openai-compatible support [![C++23](https://img.shields.io/badge/C%2B%2B-23-blue.svg)](https://en.cppreference.com/w/cpp/23) +[![C++17](https://img.shields.io/badge/C%2B%2B-17-blue.svg)](https://en.cppreference.com/w/cpp/17) [![C API](https://img.shields.io/badge/C_API-ok-green.svg)](https://en.cppreference.com/w/cpp/23) [![Module](https://img.shields.io/badge/module-ok-green.svg)](https://en.cppreference.com/w/cpp/language/modules) [![License](https://img.shields.io/badge/license-Apache_2.0-blue.svg)](LICENSE) @@ -22,6 +23,7 @@ - **流式接口** - 可链式调用的方法 - **C 语言 API** - 完整的 C 语言支持,面向对象风格 - **提供商无关** - OpenAI、Poe 及兼容端点 +- **C++17 纯头文件** - 原生支持 C++17,仅需 `#include `(API 与 C++23 一致) ## 快速开始