gradeai / app.py
atz21's picture
Update app.py
14a6aca verified
import os
import gradio as gr
import google.generativeai as genai
from markdown_pdf import MarkdownPdf, Section
# ---------- PROMPTS ----------
PROMPTS = {
"ALIGNMENT_PROMPT": {
"role": "system",
"content": """Your Role: You are an expert examiner and transcription specialist.
Your task is to **align three sources**:
- Question Paper (QP)
- Markscheme (MS)
- Student Answer Sheet (AS)
### Instructions
1. Parse all documents carefully and align them **per question and sub-question**.
2. For each question/sub-question, produce a structured block:
---
## Question X (and sub-question if applicable)
**QP:** [Insert the exact question text]
**MS:** [Insert the relevant part of the markscheme]
**AS:** [Insert the student's final cleaned answer transcription]
---
3. Formatting Rules:
- Use `##` for main questions and `###` for sub-questions.
- Write **QP | MS | AS** exactly in that order.
- Preserve all mathematical expressions inside fenced code blocks.
- Do not re-create diagrams/graphs. Write `[Graph omitted]`.
- If part of the student's answer is unreadable, write `[illegible]`.
- If a student skipped a question, write `[No response]`.
- Keep MS annotations (M1, A1, R1, etc.) exactly as in the original.
4. Output must be **clean, deterministic, and consistent** — so that another model can grade directly using this aligned representation.
### Example
## Question 1
**QP:** Expand `(1+x)^3`
**MS:** M1 for binomial expansion, A1 for coefficients, A1 for final form
**AS:**
"""
},
"GRADING_PROMPT": {
"role": "system",
"content": """You are an official examiner. Use the following grading rules strictly.
Abbreviations:
- M: Marks awarded for attempting to use a correct Method.
- A: Marks awarded for an Answer or for Accuracy; often dependent on preceding M marks.
- R: Marks awarded for clear Reasoning.
- AG: Answer given in the question and so no marks are awarded.
- FT: Follow through. The practice of awarding marks, despite candidate errors in previous parts, for their correct methods/answers using incorrect results.
--------------------------------------------
## 1. General
Award marks using the annotations as noted in the markscheme (e.g., M1, A2).
## 2. Method and Answer/Accuracy marks
- Do not automatically award full marks for a correct answer; all working must be checked.
- It is generally not possible to award M0 followed by A1.
- Where M and A marks are noted on the same line (M1A1), M is for method, A is for accuracy.
- Multiple A marks can be independent.
## 3. Implied marks
Implied marks (M1) can only be awarded if correct work is seen or implied.
## 4. Follow through (FT) marks
- Award FT if an earlier wrong answer is used consistently later.
- Do not award FT if the result contradicts the question (e.g., probability > 1).
## 5. Mis-read (MR)
- Penalize once if the candidate misreads a value.
- Award other marks as appropriate.
## 6. Alternative methods
- Accept valid alternatives unless "Hence" forbids it.
## 7. Alternative forms
- Accept equivalent numeric/algebraic forms unless specified otherwise.
## 8. Format and accuracy of answers
- Use correct accuracy (3 s.f. if not specified).
- Arithmetic and algebra should be simplified.
## 9. Presentation of candidate work
- Ignore crossed-out work unless indicated.
- Mark only the first solution unless candidate specifies otherwise.
## 10. Graph/Diagram Questions
- If a question requires drawing or interpreting a graph/diagram, assume the student has done it correctly and award full marks for that part.
--------------------------------------------
### OUTPUT FORMAT
Produce a GitHub-flavored Markdown table with 3 columns:
| Student wrote | Marks Awarded | Reason |
|---------------|---------------|--------|
Special Formatting Rule:
- Whenever a mark is lost (M0, A0, R0 etc.), wrap it in red using: `<span style="color:red">M0</span>`.
- Also wrap the corresponding Reason in red color.
- Keep awarded marks (M1, A1, etc.) in plain text.
- If mixed (e.g., M1A0A1), only highlight the lost marks (`A0`) and its reason.
After the table, provide:
### Summary & Final Mark
- Total marks obtained vs total available
- Any FT (follow-through) applied
- Classification of errors (Conceptual, Silly mistake, Misread, etc.)
"""
}
}
# -------------------- CONFIG --------------------
genai.configure(api_key=os.getenv("GEMINI_API_KEY"))
# ---------- HELPER: Save to PDF ----------
def save_as_pdf(text, filename="output.pdf"):
pdf = MarkdownPdf()
pdf.add_section(Section(text, toc=False))
pdf.save(filename)
return filename
# ---------- HELPER: Create Model with Fallback ----------
def create_model():
try:
print("⚡ Using gemini-2.5-pro model")
return genai.GenerativeModel("gemini-2.5-pro", generation_config={"temperature": 0})
except Exception:
print("⚡ Falling back to gemini-2.5-flash model")
return genai.GenerativeModel("gemini-2.5-flash", generation_config={"temperature": 0})
# ---------- PIPELINE: ALIGN + GRADE ----------
def align_and_grade(qp_file, ms_file, ans_file):
try:
# Uploads
qp_uploaded = genai.upload_file(path=qp_file, display_name="Question Paper")
ms_uploaded = genai.upload_file(path=ms_file, display_name="Markscheme")
ans_uploaded = genai.upload_file(path=ans_file, display_name="Answer Sheet")
model = create_model()
# Step 1: Alignment
resp = model.generate_content([
PROMPTS["ALIGNMENT_PROMPT"]["content"],
qp_uploaded,
ms_uploaded,
ans_uploaded
])
aligned_text = getattr(resp, "text", None)
if not aligned_text and resp.candidates:
aligned_text = resp.candidates[0].content.parts[0].text
aligned_pdf_path = save_as_pdf(aligned_text, "aligned_qp_ms_as.pdf")
# Step 2: Grading (automatic)
response = model.generate_content([
PROMPTS["GRADING_PROMPT"]["content"],
aligned_text
])
grading = getattr(response, "text", None)
if not grading and response.candidates:
grading = response.candidates[0].content.parts[0].text
# Save grading report with student's answer filename
base_name = os.path.splitext(os.path.basename(ans_file))[0]
grading_pdf_path = save_as_pdf(grading, f"{base_name}_graded.pdf")
return aligned_text, aligned_pdf_path, grading, grading_pdf_path
except Exception as e:
return f"❌ Error: {e}", None, None, None
# ---------- GRADIO APP ----------
with gr.Blocks(title="LeadIB AI Grading (Alignment + Auto-Grading)") as demo:
gr.Markdown("## LeadIB AI Grading\nUpload Question Paper, Markscheme, and Student Answer Sheet.\nThe system will align and grade automatically.")
with gr.Row():
qp_file = gr.File(label="Upload Question Paper (PDF)", type="filepath")
ms_file = gr.File(label="Upload Markscheme (PDF)", type="filepath")
ans_file = gr.File(label="Upload Student Answer Sheet (PDF)", type="filepath")
run_btn = gr.Button("Start Alignment + Auto-Grading")
with gr.Row():
aligned_out = gr.Textbox(label="📄 Aligned QP | MS | AS", lines=20)
aligned_pdf = gr.File(label="⬇️ Download Aligned (PDF)")
with gr.Row():
grading_out = gr.Textbox(label="✅ Grading Report", lines=20)
grading_pdf = gr.File(label="⬇️ Download Grading Report (PDF)")
run_btn.click(
fn=align_and_grade,
inputs=[qp_file, ms_file, ans_file],
outputs=[aligned_out, aligned_pdf, grading_out, grading_pdf],
show_progress=True
)
if __name__ == "__main__":
demo.launch()