Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

fix: add step param #2135

Merged
merged 5 commits into from
Dec 7, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion openassessment/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,4 +2,4 @@
Initialization Information for Open Assessment Module
"""

__version__ = '6.0.11'
__version__ = '6.0.12'
4 changes: 2 additions & 2 deletions openassessment/xblock/ui_mixins/mfe/assessment_serializers.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,6 @@
SerializerMethodField,
URLField,
Serializer,
BooleanField,
)
from openassessment.data import OraSubmissionAnswerFactory
from openassessment.xblock.ui_mixins.mfe.serializer_utils import NullField
Expand Down Expand Up @@ -224,10 +223,11 @@ class AssessmentSubmitRequestSerializer(MfeAssessmentDataSerializer):
...
],
overallFeedback: (String / Empty)
step: (String): The step for which we are submitting an assessment
}
"""

continueGrading = BooleanField(required=False, default=False)
step = CharField()

def to_legacy_format(self, xblock):
"""
Expand Down
13 changes: 8 additions & 5 deletions openassessment/xblock/ui_mixins/mfe/mixin.py
Original file line number Diff line number Diff line change
Expand Up @@ -290,18 +290,20 @@ def _assessment_submit_handler(self, data):
if not serializer.is_valid():
raise OraApiException(400, error_codes.INCORRECT_PARAMETERS, serializer.errors)
assessment_data = serializer.to_legacy_format(self)
peer_data = self.peer_assessment_data(serializer.data['continueGrading'])
requested_step = serializer.data['step']
try:
if peer_data.continue_grading or self.workflow_data.is_peer:
if self.workflow_data.is_cancelled:
raise InvalidStateToAssess()
if requested_step == 'peer':
peer_assess(
assessment_data['options_selected'],
assessment_data['feedback'],
assessment_data['criterion_feedback'],
self.config_data,
self.workflow_data,
peer_data,
self.peer_assessment_data(),
)
elif self.workflow_data.is_self:
elif requested_step == 'self':
self_assess(
assessment_data['options_selected'],
assessment_data['criterion_feedback'],
Expand All @@ -310,7 +312,7 @@ def _assessment_submit_handler(self, data):
self.workflow_data,
self.self_data
)
elif self.workflow_data.is_training:
elif requested_step == 'studentTraining':
corrections = training_assess(
assessment_data['options_selected'],
self.config_data,
Expand All @@ -324,6 +326,7 @@ def _assessment_submit_handler(self, data):
# This catches the error we explicitly raise, as well as any that may be raised from within
# the assessment logic itself
context = {
'requested_step': requested_step,
'student_item': self.config_data.student_item_dict,
'workflow': self.workflow_data.workflow,
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -476,9 +476,9 @@ def test_assessment_data(self):
}
],
"overallFeedback": "Foo",
"continueGrading": True,
"step": "Wham"
}).data

self.assertEqual(assessment_submit_request_data["overallFeedback"], "Foo")
self.assertEqual(len(assessment_submit_request_data["criteria"]), 1)
self.assertTrue(assessment_submit_request_data["continueGrading"])
self.assertEqual(assessment_submit_request_data["step"], "Wham")
50 changes: 31 additions & 19 deletions openassessment/xblock/ui_mixins/mfe/test_mfe_mixin.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,7 @@
from openassessment.xblock.test.test_staff_area import NullUserService, UserStateService
from openassessment.xblock.test.test_submission import COURSE_ID, setup_mock_team
from openassessment.xblock.test.test_team import MOCK_TEAM_ID, MockTeamsService
from openassessment.xblock.ui_mixins.mfe.mixin import MFE_STEP_TO_WORKFLOW_MAPPINGS
from openassessment.xblock.ui_mixins.mfe.constants import error_codes, handler_suffixes
from openassessment.xblock.ui_mixins.mfe.submission_serializers import DraftResponseSerializer, SubmissionSerializer

Expand Down Expand Up @@ -149,9 +150,11 @@ def request_file_callback(self, xblock, payload):
response_format='response'
)

def request_assessment_submit(self, xblock, payload=None):
def request_assessment_submit(self, xblock, step=None, payload=None):
if payload is None:
payload = self.DEFAULT_ASSESSMENT_SUBMIT_VALUE
if step is not None:
payload['step'] = step
return super().request(
xblock,
'assessment',
Expand Down Expand Up @@ -926,6 +929,7 @@ def mock_assess_functions(self, self_kwargs=None, training_kwargs=None, peer_kwa
{
'criterionFeedback': {},
'overallFeedback': '',
'step': 'peer'
},
{
'optionsSelected': ['this is a list'],
Expand All @@ -939,57 +943,65 @@ def test_incorrect_params(self, xblock, payload):
assert resp.status_code == 400
assert resp.json['error']['errorCode'] == error_codes.INCORRECT_PARAMETERS

@ddt.data(None, 'cancelled', 'done', 'ai')
@ddt.data("self", "peer", "studentTraining")
@scenario("data/basic_scenario.xml")
def test_not_allowed_step_error(self, xblock, status):
with self.mock_workflow_status(status):
resp = self.request_assessment_submit(xblock)
def test_not_allowed_to_assess_when_cancelled(self, xblock, step):
with self.mock_workflow_status("cancelled"):
resp = self.request_assessment_submit(xblock, step=step)
assert resp.status_code == 400
assert resp.json['error']['errorCode'] == error_codes.INVALID_STATE_TO_ASSESS

@ddt.unpack
@ddt.data(
('self', True, False, False),
('training', False, True, False),
('studentTraining', False, True, False),
('peer', False, False, True)
)
@scenario("data/basic_scenario.xml")
def test_assess(self, xblock, step, expect_self, expect_training, expect_peer):
with self.mock_workflow_status(step):
def test_assess(self, xblock, mfe_step, expect_self, expect_training, expect_peer):
workflow_step = MFE_STEP_TO_WORKFLOW_MAPPINGS[mfe_step]
with self.mock_workflow_status(workflow_step):
with self.mock_assess_functions() as assess_mocks:
resp = self.request_assessment_submit(xblock)
resp = self.request_assessment_submit(xblock, step=mfe_step)
assert resp.status_code == 200
assert assess_mocks.self.called == expect_self
assert assess_mocks.training.called == expect_training
assert assess_mocks.peer.called == expect_peer

@ddt.data(None, 'cancelled', 'waiting', 'self', 'training', 'done')
@ddt.data(None, 'waiting', 'self', 'training', 'done')
@scenario("data/basic_scenario.xml")
def test_continue_grading(self, xblock, step):
def test_peer_assess_when_not_in_peer(self, xblock, step):
with self.mock_assess_functions() as assess_mocks:
with self.mock_workflow_status(step):
with self.mock_continue_grading(True):
resp = self.request_assessment_submit(xblock)
resp = self.request_assessment_submit(xblock, step="peer")

assert resp.status_code == 200
assess_mocks.self.assert_not_called()
assess_mocks.training.assert_not_called()
assess_mocks.peer.assert_called()

@ddt.data('self', 'training', 'peer')
@ddt.data('self', 'studentTraining', 'peer')
@scenario("data/basic_scenario.xml")
def test_assess_error(self, xblock, step):
def test_assess_error(self, xblock, mfe_step):
error = AssessmentError("there was a problem")
with self.mock_workflow_status(step):
with self.mock_assess_functions(**{step + '_kwargs': {'side_effect': error}}):
resp = self.request_assessment_submit(xblock)
workflow_step = MFE_STEP_TO_WORKFLOW_MAPPINGS[mfe_step]
with self.mock_workflow_status(workflow_step):
with self.mock_assess_functions(**{workflow_step + '_kwargs': {'side_effect': error}}):
resp = self.request_assessment_submit(xblock, step=mfe_step)
assert_error_response(resp, 500, error_codes.INTERNAL_EXCEPTION, str(error))

@scenario("data/basic_scenario.xml")
def test_cant_submit_when_cancelled(self, xblock):
with self.mock_workflow_status('cancelled'):
resp = self.request_assessment_submit(xblock, step="peer")
assert resp.status_code == 400
assert resp.json['error']['errorCode'] == error_codes.INVALID_STATE_TO_ASSESS

@scenario("data/basic_scenario.xml")
def test_training_assess_corrections(self, xblock):
corrections = {'ferocity': 'sublime', 'element': 'hydrogen'}
with self.mock_workflow_status('training'):
with self.mock_assess_functions(training_kwargs={'return_value': corrections}):
resp = self.request_assessment_submit(xblock)
resp = self.request_assessment_submit(xblock, step='studentTraining')

assert_error_response(resp, 400, error_codes.TRAINING_ANSWER_INCORRECT, corrections)
2 changes: 1 addition & 1 deletion package.json
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
{
"name": "edx-ora2",
"version": "6.0.11",
"version": "6.0.12",
"repository": "https://github.com/openedx/edx-ora2.git",
"dependencies": {
"@edx/frontend-build": "8.0.6",
Expand Down