From 681dcd684c3f9c3d10ba0b0f52ce03d2d29dac98 Mon Sep 17 00:00:00 2001 From: Marcus Hellberg Date: Thu, 16 May 2024 15:09:48 +0200 Subject: [PATCH 1/2] Update dependencies, improve ChatView functionality, and optimize document loading - Upgraded `vaadin`, `langchain`, and Spring Boot versions in `pom.xml`. - Enhanced the `ChatView` with real-time message updates, scrolling, and additional styling. - Refined document loading to recurse through directories using `loadDocumentsRecursively`. --- pom.xml | 6 +- src/main/java/com/vaadin/demo/AIConfig.java | 7 +- .../java/com/vaadin/demo/views/ChatView.java | 91 +++++++++++-------- src/main/resources/application.properties | 12 ++- 4 files changed, 66 insertions(+), 50 deletions(-) diff --git a/pom.xml b/pom.xml index 9122f98..2697180 100644 --- a/pom.xml +++ b/pom.xml @@ -11,14 +11,14 @@ 17 - 24.4.0.beta1 - 0.30.0 + 24.8.6 + 1.3.0-beta9 org.springframework.boot spring-boot-starter-parent - 3.2.5 + 3.5.4 diff --git a/src/main/java/com/vaadin/demo/AIConfig.java b/src/main/java/com/vaadin/demo/AIConfig.java index dd3109e..727fb54 100644 --- a/src/main/java/com/vaadin/demo/AIConfig.java +++ b/src/main/java/com/vaadin/demo/AIConfig.java @@ -4,9 +4,6 @@ import dev.langchain4j.data.segment.TextSegment; import dev.langchain4j.memory.chat.ChatMemoryProvider; import dev.langchain4j.memory.chat.MessageWindowChatMemory; -import dev.langchain4j.memory.chat.TokenWindowChatMemory; -import dev.langchain4j.model.Tokenizer; -import dev.langchain4j.model.embedding.EmbeddingModel; import dev.langchain4j.rag.content.retriever.ContentRetriever; import dev.langchain4j.rag.content.retriever.EmbeddingStoreContentRetriever; import dev.langchain4j.store.embedding.EmbeddingStore; @@ -25,7 +22,7 @@ public class AIConfig { private static final Logger log = LoggerFactory.getLogger(AIConfig.class); - @Value("${docs.location}") + @Value("${ai.docs.location}") private String docsLocation; @@ -64,7 +61,7 @@ ApplicationRunner docImporter(EmbeddingStore embeddingStore) { return; } log.info("Importing documents from {}", docsLocation); - var docs = FileSystemDocumentLoader.loadDocuments(docsLocation); + var docs = FileSystemDocumentLoader.loadDocumentsRecursively(docsLocation); EmbeddingStoreIngestor.ingest(docs, embeddingStore); log.info("Imported {} documents", docs.size()); }; diff --git a/src/main/java/com/vaadin/demo/views/ChatView.java b/src/main/java/com/vaadin/demo/views/ChatView.java index ce5456a..c4d9950 100644 --- a/src/main/java/com/vaadin/demo/views/ChatView.java +++ b/src/main/java/com/vaadin/demo/views/ChatView.java @@ -3,12 +3,13 @@ import com.vaadin.demo.AiAssistant; import com.vaadin.flow.component.button.Button; import com.vaadin.flow.component.messages.MessageInput; +import com.vaadin.flow.component.messages.MessageList; +import com.vaadin.flow.component.messages.MessageListItem; import com.vaadin.flow.component.orderedlayout.Scroller; import com.vaadin.flow.component.orderedlayout.VerticalLayout; import com.vaadin.flow.router.PageTitle; import com.vaadin.flow.router.Route; import com.vaadin.flow.theme.lumo.LumoUtility; -import org.vaadin.firitin.components.messagelist.MarkdownMessage; import java.util.UUID; @@ -16,56 +17,72 @@ @Route(value = "", layout = MainLayout.class) public class ChatView extends VerticalLayout { + private final AiAssistant aiAssistant; + + private final MessageList messageList = new MessageList(); + private final MessageInput messageInput = new MessageInput(); + private final Scroller scroller = new Scroller(messageList); + private String chatId = UUID.randomUUID().toString(); - private MessageInput messageInput = new MessageInput(); public ChatView(AiAssistant aiAssistant) { - var newChatButton = new Button("New Chat"); - var messageList = new VerticalLayout(); - focusMessageInput(); - - setPadding(false); - setSpacing(false); - messageList.setSpacing(true); - messageList.addClassNames(LumoUtility.Padding.Horizontal.SMALL, LumoUtility.Margin.Horizontal.AUTO, - LumoUtility.MaxWidth.SCREEN_MEDIUM); + this.aiAssistant = aiAssistant; + var newChatButton = new Button("New Chat"); newChatButton.addClassName("new-chat-button"); newChatButton.addClickListener(e -> { chatId = UUID.randomUUID().toString(); - messageList.removeAll(); - focusMessageInput(); + messageList.getItems().clear(); + messageInput.focus(); }); + add(newChatButton); - messageInput.setWidthFull(); - messageInput.addClassNames(LumoUtility.Padding.Horizontal.LARGE, LumoUtility.Padding.Vertical.MEDIUM, - LumoUtility.Margin.Horizontal.AUTO, LumoUtility.MaxWidth.SCREEN_MEDIUM); - messageInput.addSubmitListener(e -> { - var questionText = e.getValue(); - var question = new MarkdownMessage(questionText, "You"); - question.addClassName("you"); - var answer = new MarkdownMessage("Assistant"); - answer.getElement().executeJs("this.scrollIntoView()"); - - messageList.add(question); - messageList.add(answer); + messageList.addClassNames(LumoUtility.MaxWidth.SCREEN_MEDIUM); + messageList.setMarkdown(true); - aiAssistant.chat(chatId, questionText) - .onNext(answer::appendMarkdownAsync) - .onError(err -> System.err.println("ooops" + e)) - .start(); - }); - - add(newChatButton); - var scroller = new Scroller(messageList); + scroller.addClassNames(LumoUtility.AlignContent.END, LumoUtility.MaxWidth.SCREEN_MEDIUM); scroller.setWidthFull(); - scroller.addClassName(LumoUtility.AlignContent.END); addAndExpand(scroller); + + messageInput.addClassNames(LumoUtility.MaxWidth.SCREEN_MEDIUM); + messageInput.setWidthFull(); + messageInput.addSubmitListener(this::onSubmit); + messageInput.focus(); add(messageInput); - } - private void focusMessageInput() { - messageInput.getElement().executeJs("requestAnimationFrame(() => this.querySelector('vaadin-text-area').focus() )"); + setAlignItems(Alignment.CENTER); + setPadding(false); + setSpacing(false); } + private void onSubmit(MessageInput.SubmitEvent submitEvent) { + + var uiOptional = submitEvent.getSource().getUI(); + + var questionText = submitEvent.getValue(); + var question = new MessageListItem(questionText, "You"); + question.setUserColorIndex(1); + question.addClassNames("you"); + + var answer = new MessageListItem("...", "Assistant"); + answer.setUserColorIndex(0); + + messageList.addItem(question); + messageList.addItem(answer); + + aiAssistant.chat(chatId, questionText) + .onPartialResponse(answerToken -> + uiOptional.ifPresent(ui -> + ui.access(() -> { + if (answer.getText().equals("...")) + answer.setText(""); + answer.appendText(answerToken); + scroller.scrollToBottom(); + }) + ) + ) + .onError(err -> System.err.println("ooops" + err.toString())) + .start(); + + } } diff --git a/src/main/resources/application.properties b/src/main/resources/application.properties index d5fd88b..b5dbe94 100644 --- a/src/main/resources/application.properties +++ b/src/main/resources/application.properties @@ -10,14 +10,15 @@ spring.sql.init.mode=always ### DocsChat config # Filesystem path to your documentation folder -docs.location=/Users/mhellber/Desktop/docs +ai.docs.location=/Users/mhellber/Desktop/docs ## NOTE: Use ONLY ONE of the two options below at a time. # OpenAI API # Better quality, requires sending data to OpenAI langchain4j.open-ai.streaming-chat-model.api-key=${OPENAI_API_KEY} -langchain4j.open-ai.streaming-chat-model.model-name=gpt-4-turbo +langchain4j.open-ai.streaming-chat-model.model-name=gpt-5 +langchain4j.open-ai.streaming-chat-model.temperature=1 # Local OpenAI compatible API (ollama) # Not as performant, but your data does not leave your computer @@ -26,6 +27,7 @@ langchain4j.open-ai.streaming-chat-model.model-name=gpt-4-turbo #langchain4j.open-ai.streaming-chat-model.model-name=llama3:latest # Debug logging to print requests -logging.level.dev.langchain4j=DEBUG -logging.level.dev.ai4j.openai4j=DEBUG -langchain4j.open-ai.streaming-chat-model.log-requests=true \ No newline at end of file +#logging.level.dev.langchain4j=DEBUG +#logging.level.dev.ai4j.openai4j=DEBUG +#langchain4j.open-ai.streaming-chat-model.log-requests=true +#langchain4j.open-ai.streaming-chat-model.log-responses=true From 9ac56dd8b39f5ccf538e39cf25f2c74f6cf5c557 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sebastian=20Ku=CC=88hnau?= Date: Tue, 19 Aug 2025 16:07:13 +0200 Subject: [PATCH 2/2] Update OpenAI model configuration - Changed model name from `gpt-4o` to `gpt-5`. - Added temperature configuration to enable tunable response diversity. --- src/main/resources/application.properties | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/main/resources/application.properties b/src/main/resources/application.properties index a4061fe..b5dbe94 100644 --- a/src/main/resources/application.properties +++ b/src/main/resources/application.properties @@ -17,7 +17,8 @@ ai.docs.location=/Users/mhellber/Desktop/docs # OpenAI API # Better quality, requires sending data to OpenAI langchain4j.open-ai.streaming-chat-model.api-key=${OPENAI_API_KEY} -langchain4j.open-ai.streaming-chat-model.model-name=gpt-4o +langchain4j.open-ai.streaming-chat-model.model-name=gpt-5 +langchain4j.open-ai.streaming-chat-model.temperature=1 # Local OpenAI compatible API (ollama) # Not as performant, but your data does not leave your computer