-
Notifications
You must be signed in to change notification settings - Fork 4
/
Copy pathmodels.py
158 lines (129 loc) · 4.18 KB
/
models.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
from pydantic import BaseModel, Field
from typing import List, Union, Optional, Tuple
class MultipleChoiceQuestion(BaseModel):
question: str
options: list[str]
answer: str
class ShortResponseQuestion(BaseModel):
question: str
class ResponseFormat(BaseModel):
multiple_choice_questions: list[MultipleChoiceQuestion]
short_response_questions: list[ShortResponseQuestion]
class QuestionEvaluation(BaseModel):
understanding_level: str
key_points: list[str]
misconceptions: list[str]
score: int
class FinalEvaluation(BaseModel):
question_evaluations: list[QuestionEvaluation]
overall_understanding: str
strengths: list[str]
weaknesses: list[str]
recommendations: list[str]
composite_score: int
predicted_success: str
class BloomQuestion(BaseModel):
question: str
personalDifficulty: int
class ResponseTypeBloom(BaseModel):
questions: list[BloomQuestion]
class ScoreBloom(BaseModel):
correct: bool
feedback: str
correct_answer: str = "n/a" # Default value for when answer is correct
error_step: str = "n/a" # The step in guide where error occurred
new_step: str = "n/a" # The rewritten step
subpoints: list[str] = [] # How-to style subpoints
mistake: str = "n/a" # The mistake made on that step
class PracticeQuestion(BaseModel):
question: str
difficulty: str
class ResponseTypeNB(BaseModel):
topic: str
notes: list[str]
practice_questions: list[PracticeQuestion]
class Explanation(BaseModel):
style: str
text: str
score: int = 0
class ExplanationResponse(BaseModel):
explanations: List[Explanation]
class FillInBlankQuestion(BaseModel):
text: str
blanks: List[str]
blank_positions: List[int]
explanation: str
class MatchingQuestion(BaseModel):
terms: List[str]
definitions: List[str]
correct_pairs: List[Tuple[int, int]]
explanation: str
class OrderingQuestion(BaseModel):
items: List[str]
correct_order: List[int]
explanation: str
class MultipleChoiceQuestion(BaseModel):
question: str
options: List[str]
correct_index: int
explanation: str
class EquationQuestion(BaseModel):
problem: str
steps: List[str]
final_answer: str
latex: bool = True
explanation: str
class Question(BaseModel):
type: str
content: Union[FillInBlankQuestion, MatchingQuestion, OrderingQuestion, MultipleChoiceQuestion, EquationQuestion]
context: str
difficulty: int
class ThoughtProcess(BaseModel):
steps: List[str]
misconceptions: List[str]
comparison: Optional[List[Tuple[str, str]]]
class DeriveQuestion(BaseModel):
question: str
expected_answer: str
category: str
class DeriveQuestions(BaseModel):
questions: List[DeriveQuestion]
class DeriveResponse(BaseModel):
status: str
newLine: Optional[str] = None
simplifiedQuestion: Optional[str] = None
class ConceptMapNode(BaseModel):
id: str
label: str
description: str
prerequisites: List[str]
starter_prompts: List[str]
class ConceptMap(BaseModel):
classID: int
unit: str
nodes: List[ConceptMapNode]
edges: List[Tuple[str, str]] # (from_node_id, to_node_id)
created_on: str
updated_on: str
class UserNodeProgress(BaseModel):
status: str # "derived", "pending", "in_progress"
date_derived: Optional[str]
chat_history: List[str]
user_notes: str
mistake_history: List[dict]
class UserConceptMapProgress(BaseModel):
OSIS: int
classID: int
unit: str
node_progress: dict[str, UserNodeProgress] # node_id -> progress
last_accessed: str
class ProblemMapping(BaseModel):
problem_id: str
required_concepts: list[str]
class ProblemMappingResponse(BaseModel):
problem_mappings: list[ProblemMapping]
class EvaluationResponse(BaseModel):
score: float = Field(..., description="Score between 0 and 1 indicating level of understanding")
correct_concepts: List[str] = Field(..., description="List of correctly understood concepts")
misconceptions: List[str] = Field(..., description="List of misconceptions or areas needing improvement")
suggestions: List[str] = Field(..., description="Specific suggestions for improvement")