191 lines
6.3 KiB
Python
191 lines
6.3 KiB
Python
from fastapi import FastAPI, HTTPException, Request
|
|
from fastapi.middleware.cors import CORSMiddleware
|
|
from starlette.middleware.base import BaseHTTPMiddleware
|
|
from starlette.responses import JSONResponse
|
|
from groq import Groq
|
|
from pydantic import BaseModel
|
|
from collections import defaultdict
|
|
import random, time, logging, hashlib
|
|
|
|
# Inisialisasi FastAPI
|
|
app = FastAPI()
|
|
|
|
# Middleware CORS
|
|
app.add_middleware(
|
|
CORSMiddleware,
|
|
allow_origins=["*"],
|
|
allow_credentials=True,
|
|
allow_methods=["*"],
|
|
allow_headers=["*"],
|
|
)
|
|
|
|
# Logging
|
|
logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s")
|
|
|
|
# Groq Client
|
|
client = Groq(api_key="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")
|
|
|
|
# Rate Limiting Middleware
|
|
class RateLimitMiddleware(BaseHTTPMiddleware):
|
|
def __init__(self, app, max_requests: int = 5, window_seconds: int = 60):
|
|
super().__init__(app)
|
|
self.max_requests = max_requests
|
|
self.window = window_seconds
|
|
self.ip_timestamps = defaultdict(list)
|
|
|
|
async def dispatch(self, request: Request, call_next):
|
|
ip = request.client.host
|
|
now = time.time()
|
|
timestamps = self.ip_timestamps[ip]
|
|
self.ip_timestamps[ip] = [t for t in timestamps if now - t < self.window]
|
|
|
|
if len(self.ip_timestamps[ip]) >= self.max_requests:
|
|
return JSONResponse(
|
|
status_code=429,
|
|
content={"detail": "Terlalu banyak permintaan. Silakan coba lagi beberapa saat."},
|
|
)
|
|
|
|
self.ip_timestamps[ip].append(now)
|
|
return await call_next(request)
|
|
|
|
# Tambahkan RateLimit Middleware
|
|
app.add_middleware(RateLimitMiddleware)
|
|
|
|
# Data sumber
|
|
data_sources = {
|
|
"cerita": {
|
|
"Malin Kundang": "Pada zaman dahulu, hiduplah seorang anak bernama Malin Kundang...",
|
|
"Bawang Merah Bawang Putih": "Bawang Merah selalu iri dengan kebaikan Bawang Putih...",
|
|
"Timun Mas": "Timun Mas adalah seorang gadis pemberian raksasa kepada seorang petani..."
|
|
},
|
|
"pantun": {
|
|
"Pantun Pendidikan": "Belajar pagi membaca buku,\nSiang datang janganlah lesu,\nMenuntut ilmu jangan jemu,\nAgar sukses di masa depanmu."
|
|
},
|
|
"puisi": {
|
|
"Puisi Alam": "Langit biru terbentang luas,\nGunung tinggi menjulang tegas,\nHijau daun menari bebas,\nAlam indah ciptaan yang cerdas."
|
|
}
|
|
}
|
|
|
|
@app.post("/generate/")
|
|
async def generate_text():
|
|
try:
|
|
selected_stories = random.sample(list(data_sources["cerita"].keys()), 3)
|
|
selected_pantun = random.choice(list(data_sources["pantun"].keys()))
|
|
selected_puisi = random.choice(list(data_sources["puisi"].keys()))
|
|
|
|
story_prompts = "\n\n".join([f"Judul: {story}\nIsi:\n{data_sources['cerita'][story]}" for story in selected_stories])
|
|
pantun_prompt = f"Judul: {selected_pantun}\nIsi:\n{data_sources['pantun'][selected_pantun]}"
|
|
puisi_prompt = f"Judul: {selected_puisi}\nIsi:\n{data_sources['puisi'][selected_puisi]}"
|
|
|
|
full_prompt = f"""
|
|
Kamu adalah asisten pengajar untuk siswa SD kelas 3. Berdasarkan teks di bawah ini, buat soal untuk latihan siswa.
|
|
|
|
**Instruksi:**
|
|
1. Untuk setiap teks (cerita, pantun, puisi), tampilkan:
|
|
- Judul
|
|
- Isi lengkap teks
|
|
- 1 soal pilihan ganda (A-D) beserta jawaban benar dalam format: Jawaban Benar: X
|
|
- 1 soal isian beserta jawaban ideal dalam format: Jawaban Ideal: [isi jawaban])
|
|
2. Gunakan bahasa sederhana dan mudah dipahami siswa SD kelas 3.
|
|
3. Gunakan format seperti contoh ini:
|
|
|
|
---
|
|
|
|
Judul: [judul]
|
|
Isi:
|
|
[isi teks]
|
|
|
|
**Soal Pilihan Ganda:**
|
|
1. [pertanyaan]
|
|
A. ...
|
|
B. ...
|
|
C. ...
|
|
D. ...
|
|
Jawaban Benar: X
|
|
|
|
**Soal Isian:**
|
|
[pertanyaan]
|
|
Jawaban Ideal: [isi jawaban]
|
|
|
|
---
|
|
|
|
**TEKS CERITA:**
|
|
{story_prompts}
|
|
|
|
**TEKS PANTUN:**
|
|
{pantun_prompt}
|
|
|
|
**TEKS PUISI:**
|
|
{puisi_prompt}
|
|
|
|
Jangan gabungkan semua soal jadi satu bagian. Setiap teks harus punya blok tersendiri seperti format di atas.
|
|
"""
|
|
|
|
logging.info("Mengirim prompt ke Groq...")
|
|
|
|
completion = client.chat.completions.create(
|
|
model="llama-3.1-8b-instant",
|
|
messages=[{"role": "user", "content": full_prompt}],
|
|
temperature=0.7,
|
|
max_tokens=1000
|
|
)
|
|
|
|
generated_text = completion.choices[0].message.content.strip()
|
|
|
|
return {
|
|
"selected_stories": [{"title": s, "content": data_sources["cerita"][s]} for s in selected_stories],
|
|
"selected_pantun": {"title": selected_pantun, "content": data_sources["pantun"][selected_pantun]},
|
|
"selected_puisi": {"title": selected_puisi, "content": data_sources["puisi"][selected_puisi]},
|
|
"generated_questions": generated_text
|
|
}
|
|
|
|
except Exception as e:
|
|
logging.error(f"Error saat generate: {e}")
|
|
raise HTTPException(status_code=500, detail=f"Terjadi kesalahan: {str(e)}")
|
|
|
|
# Cache Feedback
|
|
feedback_cache = {}
|
|
|
|
class FeedbackRequest(BaseModel):
|
|
user_answer: str
|
|
expected_answer: str
|
|
|
|
@app.post("/generate-feedback/")
|
|
async def generate_feedback(request: FeedbackRequest):
|
|
try:
|
|
user_answer = request.user_answer.strip()
|
|
expected_answer = request.expected_answer.strip()
|
|
|
|
# Hashing untuk cache
|
|
prompt_hash = hashlib.sha256(f"{user_answer}|{expected_answer}".encode()).hexdigest()
|
|
if prompt_hash in feedback_cache:
|
|
logging.info("Feedback dari cache.")
|
|
return {"feedback": feedback_cache[prompt_hash]}
|
|
|
|
prompt = f"""
|
|
Kamu adalah asisten pengajar untuk siswa SD kelas 3. Siswa memberikan jawaban berikut untuk soal isian.
|
|
|
|
**Jawaban Siswa:** {user_answer}
|
|
**Jawaban Ideal:** {expected_answer}
|
|
|
|
Beri feedback singkat dan membangun, maksimal 2 kalimat. Gunakan bahasa yang mudah dimengerti oleh siswa SD. Jika jawaban siswa salah, berikan petunjuk atau koreksi yang membantu.
|
|
"""
|
|
|
|
logging.info("Mengirim prompt feedback ke Groq...")
|
|
|
|
completion = client.chat.completions.create(
|
|
model="llama-3.1-8b-instant",
|
|
messages=[{"role": "user", "content": prompt}],
|
|
temperature=0.7,
|
|
max_tokens=150
|
|
)
|
|
|
|
feedback = completion.choices[0].message.content.strip()
|
|
feedback_cache[prompt_hash] = feedback
|
|
|
|
return {"feedback": feedback}
|
|
|
|
except Exception as e:
|
|
logging.error(f"Error saat generate feedback: {e}")
|
|
raise HTTPException(status_code=500, detail=f"Terjadi kesalahan: {str(e)}")
|