Skip to content

Commit

Permalink
fix: add step param to assess view
Browse files Browse the repository at this point in the history
  • Loading branch information
jansenk committed Dec 7, 2023
1 parent aa135ae commit 00acbdc
Show file tree
Hide file tree
Showing 4 changed files with 35 additions and 25 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -232,10 +232,11 @@ class AssessmentSubmitRequestSerializer(MfeAssessmentDataSerializer):
...
],
overallFeedback: (String / Empty)
step: (String): The step for which we are submitting an assessment
}
"""

continueGrading = BooleanField(required=False, default=False)
step = CharField()

def to_legacy_format(self, xblock):
"""
Expand Down
12 changes: 8 additions & 4 deletions openassessment/xblock/ui_mixins/mfe/mixin.py
Original file line number Diff line number Diff line change
Expand Up @@ -290,9 +290,12 @@ def _assessment_submit_handler(self, data):
if not serializer.is_valid():
raise OraApiException(400, error_codes.INCORRECT_PARAMETERS, serializer.errors)
assessment_data = serializer.to_legacy_format(self)
peer_data = self.peer_assessment_data(serializer.data['continueGrading'])
requested_step = serializer.data['step']
peer_data = self.peer_assessment_data()
try:
if peer_data.continue_grading or self.workflow_data.is_peer:
if self.workflow_data.is_cancelled:
raise InvalidStateToAssess()
if requested_step == 'peer':
peer_assess(
assessment_data['options_selected'],
assessment_data['feedback'],
Expand All @@ -301,7 +304,7 @@ def _assessment_submit_handler(self, data):
self.workflow_data,
peer_data,
)
elif self.workflow_data.is_self:
elif requested_step == 'self':
self_assess(
assessment_data['options_selected'],
assessment_data['criterion_feedback'],
Expand All @@ -310,7 +313,7 @@ def _assessment_submit_handler(self, data):
self.workflow_data,
self.self_data
)
elif self.workflow_data.is_training:
elif requested_step == 'studentTraining':
corrections = training_assess(
assessment_data['options_selected'],
self.config_data,
Expand All @@ -324,6 +327,7 @@ def _assessment_submit_handler(self, data):
# This catches the error we explicitly raise, as well as any that may be raised from within
# the assessment logic itself
context = {
'requested_step': requested_step,
'student_item': self.config_data.student_item_dict,
'workflow': self.workflow_data.workflow,
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -468,9 +468,9 @@ def test_assessment_data(self):
}
],
"overallFeedback": "Foo",
"continueGrading": True,
"step": "Wham"
}).data

self.assertEqual(assessment_submit_request_data["overallFeedback"], "Foo")
self.assertEqual(len(assessment_submit_request_data["criteria"]), 1)
self.assertTrue(assessment_submit_request_data["continueGrading"])
self.assertEqual(assessment_submit_request_data["step"], "Wham")
41 changes: 23 additions & 18 deletions openassessment/xblock/ui_mixins/mfe/test_mfe_mixin.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,7 @@
from openassessment.xblock.test.test_staff_area import NullUserService, UserStateService
from openassessment.xblock.test.test_submission import COURSE_ID, setup_mock_team
from openassessment.xblock.test.test_team import MOCK_TEAM_ID, MockTeamsService
from openassessment.xblock.ui_mixins.mfe.mixin import MFE_STEP_TO_WORKFLOW_MAPPINGS
from openassessment.xblock.ui_mixins.mfe.constants import error_codes, handler_suffixes
from openassessment.xblock.ui_mixins.mfe.submission_serializers import DraftResponseSerializer, SubmissionSerializer

Expand Down Expand Up @@ -150,9 +151,11 @@ def request_file_callback(self, xblock, payload):
response_format='response'
)

def request_assessment_submit(self, xblock, payload=None):
def request_assessment_submit(self, xblock, step=None, payload=None):
if payload is None:
payload = self.DEFAULT_ASSESSMENT_SUBMIT_VALUE
if step is not None:
payload['step'] = step
return super().request(
xblock,
'assessment',
Expand Down Expand Up @@ -936,6 +939,7 @@ def mock_assess_functions(self, self_kwargs=None, training_kwargs=None, peer_kwa
{
'criterionFeedback': {},
'overallFeedback': '',
'step': 'peer'
},
{
'optionsSelected': ['this is a list'],
Expand All @@ -949,58 +953,59 @@ def test_incorrect_params(self, xblock, payload):
assert resp.status_code == 400
assert resp.json['error']['errorCode'] == error_codes.INCORRECT_PARAMETERS

@ddt.data(None, 'cancelled', 'done', 'ai')
@ddt.data("self", "peer", "studentTraining")
@scenario("data/basic_scenario.xml")
def test_not_allowed_step_error(self, xblock, status):
with self.mock_workflow_status(status):
resp = self.request_assessment_submit(xblock)
def test_not_allowed_to_assess_when_cancelled(self, xblock, step):
with self.mock_workflow_status("cancelled"):
resp = self.request_assessment_submit(xblock, step=step)
assert resp.status_code == 400
assert resp.json['error']['errorCode'] == error_codes.INVALID_STATE_TO_ASSESS

@ddt.unpack
@ddt.data(
('self', True, False, False),
('training', False, True, False),
('studentTraining', False, True, False),
('peer', False, False, True)
)
@scenario("data/basic_scenario.xml")
def test_assess(self, xblock, step, expect_self, expect_training, expect_peer):
with self.mock_workflow_status(step):
def test_assess(self, xblock, mfe_step, expect_self, expect_training, expect_peer):
workflow_step = MFE_STEP_TO_WORKFLOW_MAPPINGS[mfe_step]
with self.mock_workflow_status(workflow_step):
with self.mock_assess_functions() as assess_mocks:
resp = self.request_assessment_submit(xblock)
resp = self.request_assessment_submit(xblock, step=mfe_step)
assert resp.status_code == 200
assert assess_mocks.self.called == expect_self
assert assess_mocks.training.called == expect_training
assert assess_mocks.peer.called == expect_peer

@ddt.data(None, 'cancelled', 'waiting', 'self', 'training', 'done')
@scenario("data/basic_scenario.xml")
def test_continue_grading(self, xblock, step):
def test_peer_assess_when_not_in_peer(self, xblock, step):
with self.mock_assess_functions() as assess_mocks:
with self.mock_workflow_status(step):
with self.mock_continue_grading(True):
resp = self.request_assessment_submit(xblock)
resp = self.request_assessment_submit(xblock, step="peer")

assert resp.status_code == 200
assess_mocks.self.assert_not_called()
assess_mocks.training.assert_not_called()
assess_mocks.peer.assert_called()

@ddt.data('self', 'training', 'peer')
@ddt.data('self', 'learnerTraining', 'peer')
@scenario("data/basic_scenario.xml")
def test_assess_error(self, xblock, step):
def test_assess_error(self, xblock, mfe_step):
error = AssessmentError("there was a problem")
with self.mock_workflow_status(step):
with self.mock_assess_functions(**{step + '_kwargs': {'side_effect': error}}):
resp = self.request_assessment_submit(xblock)
workflow_step = MFE_STEP_TO_WORKFLOW_MAPPINGS[mfe_step]
with self.mock_workflow_status(workflow_step):
with self.mock_assess_functions(**{workflow_step + '_kwargs': {'side_effect': error}}):
resp = self.request_assessment_submit(xblock, step=mfe_step)
assert_error_response(resp, 500, error_codes.INTERNAL_EXCEPTION, str(error))

@scenario("data/basic_scenario.xml")
def test_training_assess_corrections(self, xblock):
corrections = {'ferocity': 'sublime', 'element': 'hydrogen'}
with self.mock_workflow_status('training'):
with self.mock_assess_functions(training_kwargs={'return_value': corrections}):
resp = self.request_assessment_submit(xblock)
resp = self.request_assessment_submit(xblock, step='studentTraining')

assert_error_response(resp, 400, error_codes.TRAINING_ANSWER_INCORRECT, corrections)

Expand Down

0 comments on commit 00acbdc

Please sign in to comment.