diff --git a/README.md b/README.md
index 17fd332..3bacc7e 100644
--- a/README.md
+++ b/README.md
@@ -1,7 +1,7 @@
# Lingput - AI-Powered Comprehensible Input for Language Learning
-[](https://github.com/mark-mdev/lingput/actions/workflows/pr-tests.yml)
-[](https://github.com/mark-mdev/lingput/actions/workflows/deploy.yml)
+[](https://github.com/markmdev/lingput/actions/workflows/pr-tests.yml)
+[](https://github.com/markmdev/lingput/actions/workflows/deploy.yml)
**Demo:**
https://lingput.dev/
@@ -18,15 +18,15 @@ Unlike generic flashcard apps, Lingput adapts to your vocabulary and provides **
## Architectural & Technical Highlights
-This project was built to production-grade standards, demonstrating expertise in full-stack development, system design, and scalability. Here are the key technical features:
-
- **Scalable Background Processing:** Utilizes a robust **Job Queue System (BullMQ & Redis)** to handle complex, long-running AI tasks (story and audio generation) asynchronously. This ensures the API remains fast and responsive, providing a seamless user experience with real-time progress updates on the frontend.
- **Clean Backend Architecture:** The Express.js backend is built on a **testable, multi-layered architecture** (Controller, Service, Repository) with **Dependency Injection** for loose coupling and maintainability.
-- **Robust Caching Strategy:** Leverages **Redis** for caching frequently accessed data (like stories and word lists), significantly reducing database load and improving API response times.
+- **Robust Caching Strategy:** Leverages **Redis** for caching frequently accessed data (like stories and word lists), significantly **reducing database load** and improving API response times.
- **Secure Authentication:** Implements a secure, modern authentication system using **HTTP-only cookies** with access and refresh tokens to protect against XSS attacks.
- **Advanced Frontend State Management:** The Next.js frontend features a **custom React hook (`handleJob`)** to intelligently manage the lifecycle of background jobs, abstracting away the complexity of polling and providing optimistic UI updates.
- **Containerized for Production:** The entire application is containerized using **Docker and Docker Compose**, ensuring consistent, reproducible deployments for all services (backend, frontend, workers, NGINX).
+Full tech stack: [Tech Stack](#tech-stack)
+
---
## CI/CD
@@ -42,7 +42,7 @@ This repo ships with a simple, reliable pipeline built around **Docker**, **GitH
On every **Pull Request** and on **pushes to `main`**, GitHub Actions runs:
-- **ESLint** for the codebase.
+- **ESLint**.
- **Unit/Integration tests**.
- Dependency caching to keep CI fast.
@@ -71,7 +71,6 @@ On every **Pull Request** and on **pushes to `main`**, GitHub Actions runs:
- [Features](#features)
- [Tech Stack](#tech-stack)
- [Quickstart](#quickstart)
-- [Production Deploy](#production-deploy)
- [Roadmap](#roadmap)
- [Contributing](#contributing)
- [License](#license)
@@ -88,14 +87,14 @@ On every **Pull Request** and on **pushes to `main`**, GitHub Actions runs:
## Features
-- 🔐 **Auth with secure cookies** - register/login with HTTP-only tokens, refresh flow included.
-- 📊 **Vocabulary assessment** - quick test estimates your vocab size using a frequency list.
-- 📚 **Personalized story generation** - AI generates stories with your known words (plus a few new).
-- 🌍 **Chunked translation** - story is split into chunks with translations for easier comprehension.
-- 🎧 **Audio generation** - full audio track (story + translations with pauses), stored in Supabase.
-- 📝 **Smart word tracking** - The app doesn't just show translations, it saves words with examples and helps you track your progress.
-- ⚡ **Background jobs** - BullMQ workers handle long-running tasks with progress updates.
-- 🚀 **Caching** - Redis caches stories and word lists for fast responses.
+- **Auth with secure cookies** - register/login with HTTP-only tokens, refresh flow included.
+- **Vocabulary assessment** - quick test estimates your vocab size using a frequency list.
+- **Personalized story generation** - AI generates stories with your known words (plus a few new).
+- **Chunked translation** - story is split into chunks with translations for easier comprehension.
+- **Audio generation** - full audio track (story + translations with pauses), stored in Supabase.
+- **Smart word tracking** - The app doesn't just show translations, it saves words with examples and helps you track your progress.
+- **Background jobs** - BullMQ workers handle long-running tasks with progress updates.
+- **Caching** - Redis caches stories and word lists for fast responses.
---
@@ -151,7 +150,7 @@ On every **Pull Request** and on **pushes to `main`**, GitHub Actions runs:
```bash
# Clone the repository
-git clone https://github.com/mark-mdev/lingput
+git clone https://github.com/markmdev/lingput
```
Create `.env` files for backend and frontend:
@@ -185,25 +184,6 @@ App: [http://localhost:3050](http://localhost:3050)
---
-## Production Deploy
-
-Use `docker-compose.yml` with prebuilt images:
-
-- `markmdev/lingput-backend`
-- `markmdev/lingput-worker`
-- `markmdev/lingput-frontend`
-- `markmdev/lingput-lemma`
-- `markmdev/lingput-nginx`
-
-Steps:
-
-1. Build & push images to your registry.
-2. Update image names in `docker-compose.yml`.
-3. Provide production env vars (`OPENAI_API_KEY`, `SUPABASE_URL`, `SUPABASE_SERVICE_API_KEY`, `DATABASE_URL`, `REDIS_HOST`, `REDIS_PORT`, `JWT_SECRET`, etc).
-4. Expose NGINX (`80` by default).
-
----
-
## Contributing
Contributions welcome!
diff --git a/apps/backend/.env.defaults b/apps/backend/.env.defaults
index 9dd52a5..20e922e 100644
--- a/apps/backend/.env.defaults
+++ b/apps/backend/.env.defaults
@@ -2,4 +2,4 @@ REDIS_HOST=redis
REDIS_PORT=6379
DATABASE_URL=postgresql://lingput:cmpinputpswd@postgres:5432/lingput?schema=public
APP_PORT=4000
-LEMMA_SERVICE_URL=http://lemma:8000
\ No newline at end of file
+LEMMA_SERVICE_URL=http://lemmas:8000
\ No newline at end of file
diff --git a/apps/backend/src/modules/story/services/lemmaAssembler/lemmatizationService.ts b/apps/backend/src/modules/story/services/lemmaAssembler/lemmatizationService.ts
index 5b7707c..4a82fb7 100644
--- a/apps/backend/src/modules/story/services/lemmaAssembler/lemmatizationService.ts
+++ b/apps/backend/src/modules/story/services/lemmaAssembler/lemmatizationService.ts
@@ -38,8 +38,8 @@ export class LemmatizationService {
let response: OpenAIResponse;
try {
response = await this.openai.responses.create({
- model: "gpt-5-mini",
- reasoning: { effort: "low" },
+ model: "gpt-5-nano",
+ reasoning: { effort: "minimal" },
input: [
{
role: "system",
diff --git a/apps/backend/src/modules/story/services/storyAssembler/storyGeneratorService.ts b/apps/backend/src/modules/story/services/storyAssembler/storyGeneratorService.ts
index e358fe3..9141d1b 100644
--- a/apps/backend/src/modules/story/services/storyAssembler/storyGeneratorService.ts
+++ b/apps/backend/src/modules/story/services/storyAssembler/storyGeneratorService.ts
@@ -13,8 +13,8 @@ export class StoryGeneratorService {
let response: OpenAIResponse;
try {
response = await this.openai.responses.create({
- model: "gpt-5-mini",
- reasoning: { effort: "low" },
+ model: "gpt-5",
+ reasoning: { effort: "minimal" },
input: [
{
role: "system",
diff --git a/apps/backend/src/modules/story/services/storyAssembler/translationService.ts b/apps/backend/src/modules/story/services/storyAssembler/translationService.ts
index dcf70be..c59d53e 100644
--- a/apps/backend/src/modules/story/services/storyAssembler/translationService.ts
+++ b/apps/backend/src/modules/story/services/storyAssembler/translationService.ts
@@ -21,8 +21,8 @@ export class TranslationService {
let response: OpenAIResponse;
try {
response = await this.openai.responses.create({
- model: "gpt-5-mini",
- reasoning: { effort: "low" },
+ model: "gpt-5",
+ reasoning: { effort: "minimal" },
input: [
{
role: "system",
diff --git a/apps/backend/src/modules/vocabAssessment/vocabAssessmentController.ts b/apps/backend/src/modules/vocabAssessment/vocabAssessmentController.ts
index 8fe4e51..fa0cfac 100644
--- a/apps/backend/src/modules/vocabAssessment/vocabAssessmentController.ts
+++ b/apps/backend/src/modules/vocabAssessment/vocabAssessmentController.ts
@@ -13,6 +13,13 @@ const answerSchema = z.object({
export class VocabAssessmentController {
constructor(private vocabAssessmentService: VocabAssessmentService) {}
+ skip = async (req: AuthedRequest, res: Response) => {
+ const user = req.user;
+
+ await this.vocabAssessmentService.skipAssessment(user.userId, "en", "de");
+ res.status(200).json(formatResponse({ success: true }));
+ };
+
start = async (req: AuthedRequest, res: Response) => {
const user = req.user;
diff --git a/apps/backend/src/modules/vocabAssessment/vocabAssessmentRoutes.ts b/apps/backend/src/modules/vocabAssessment/vocabAssessmentRoutes.ts
index 35ee5db..0915f2a 100644
--- a/apps/backend/src/modules/vocabAssessment/vocabAssessmentRoutes.ts
+++ b/apps/backend/src/modules/vocabAssessment/vocabAssessmentRoutes.ts
@@ -9,6 +9,7 @@ export function buildVocabAssessmentRouter(
const router = Router();
router.get("/start", authMiddleware, asyncHandler(controller.start));
+ router.post("/skip", authMiddleware, asyncHandler(controller.skip));
router.post("/answer", authMiddleware, asyncHandler(controller.answer));
return router;
diff --git a/apps/backend/src/modules/vocabAssessment/vocabAssessmentService.ts b/apps/backend/src/modules/vocabAssessment/vocabAssessmentService.ts
index 33f9b2a..453f235 100644
--- a/apps/backend/src/modules/vocabAssessment/vocabAssessmentService.ts
+++ b/apps/backend/src/modules/vocabAssessment/vocabAssessmentService.ts
@@ -31,6 +31,21 @@ export class VocabAssessmentService {
private redisWordsCache: RedisWordsCache,
) {}
+ async skipAssessment(
+ userId: number,
+ sourceLanguage: string,
+ targetLanguage: string,
+ ) {
+ const words = await this.getWordRanking(sourceLanguage, targetLanguage);
+ const knownVocabulary = words.slice(0, 50);
+ const vocabularyDTO: UserVocabularyDTO[] = knownVocabulary.map((word) => ({
+ word: word.word,
+ translation: word.translation,
+ article: null,
+ }));
+ await this.vocabularyService.saveManyWords(vocabularyDTO, userId);
+ }
+
async startAssessment(
userId: number,
sourceLanguage: string,
@@ -113,23 +128,7 @@ export class VocabAssessmentService {
};
}
- let words: WordRanking[] | null;
- words = await this.redisWordsCache.getWords(sourceLanguage, targetLanguage);
- if (!words) {
- words = await this.vocabAssessmentRepository.getWords(
- sourceLanguage,
- targetLanguage,
- );
- try {
- await this.redisWordsCache.saveWords(
- sourceLanguage,
- targetLanguage,
- words,
- );
- } catch (error) {
- logger.error("[cache] Failed to save words in Redis", error);
- }
- }
+ const words = await this.getWordRanking(sourceLanguage, targetLanguage);
const wordsToReview = state.wordsToReview;
const result = this.checkAnswer(answer, wordsToReview);
@@ -191,6 +190,31 @@ export class VocabAssessmentService {
};
}
+ private async getWordRanking(
+ sourceLanguage: string,
+ targetLanguage: string,
+ ): Promise Loading...
You can skip it for the demo purposes.
++ + Skip assessment + +
Step {apiResponse?.step}{" "} {apiResponse?.lastStep ? (Last) : ""} diff --git a/apps/frontend/src/features/dashboard/components/RightPanel.tsx b/apps/frontend/src/features/dashboard/components/RightPanel.tsx index d22ceec..788ff3a 100644 --- a/apps/frontend/src/features/dashboard/components/RightPanel.tsx +++ b/apps/frontend/src/features/dashboard/components/RightPanel.tsx @@ -7,7 +7,7 @@ export default function RightPanel({ }) { return (