From 14809646631e2d8134d7921d39e0332a6d2754bc Mon Sep 17 00:00:00 2001 From: jansenk Date: Thu, 7 Dec 2023 13:27:15 -0500 Subject: [PATCH 1/5] fix: add step param to assess view --- .../ui_mixins/mfe/assessment_serializers.py | 3 +- openassessment/xblock/ui_mixins/mfe/mixin.py | 12 ++++-- .../mfe/test_assessment_serializers.py | 4 +- .../xblock/ui_mixins/mfe/test_mfe_mixin.py | 41 +++++++++++-------- 4 files changed, 35 insertions(+), 25 deletions(-) diff --git a/openassessment/xblock/ui_mixins/mfe/assessment_serializers.py b/openassessment/xblock/ui_mixins/mfe/assessment_serializers.py index 77969f7d54..529830054e 100644 --- a/openassessment/xblock/ui_mixins/mfe/assessment_serializers.py +++ b/openassessment/xblock/ui_mixins/mfe/assessment_serializers.py @@ -224,10 +224,11 @@ class AssessmentSubmitRequestSerializer(MfeAssessmentDataSerializer): ... ], overallFeedback: (String / Empty) + step: (String): The step for which we are submitting an assessment } """ - continueGrading = BooleanField(required=False, default=False) + step = CharField() def to_legacy_format(self, xblock): """ diff --git a/openassessment/xblock/ui_mixins/mfe/mixin.py b/openassessment/xblock/ui_mixins/mfe/mixin.py index 2356e8bef2..b7aeaceaff 100644 --- a/openassessment/xblock/ui_mixins/mfe/mixin.py +++ b/openassessment/xblock/ui_mixins/mfe/mixin.py @@ -290,9 +290,12 @@ def _assessment_submit_handler(self, data): if not serializer.is_valid(): raise OraApiException(400, error_codes.INCORRECT_PARAMETERS, serializer.errors) assessment_data = serializer.to_legacy_format(self) - peer_data = self.peer_assessment_data(serializer.data['continueGrading']) + requested_step = serializer.data['step'] + peer_data = self.peer_assessment_data() try: - if peer_data.continue_grading or self.workflow_data.is_peer: + if self.workflow_data.is_cancelled: + raise InvalidStateToAssess() + if requested_step == 'peer': peer_assess( assessment_data['options_selected'], assessment_data['feedback'], @@ -301,7 +304,7 @@ def _assessment_submit_handler(self, data): self.workflow_data, peer_data, ) - elif self.workflow_data.is_self: + elif requested_step == 'self': self_assess( assessment_data['options_selected'], assessment_data['criterion_feedback'], @@ -310,7 +313,7 @@ def _assessment_submit_handler(self, data): self.workflow_data, self.self_data ) - elif self.workflow_data.is_training: + elif requested_step == 'studentTraining': corrections = training_assess( assessment_data['options_selected'], self.config_data, @@ -324,6 +327,7 @@ def _assessment_submit_handler(self, data): # This catches the error we explicitly raise, as well as any that may be raised from within # the assessment logic itself context = { + 'requested_step': requested_step, 'student_item': self.config_data.student_item_dict, 'workflow': self.workflow_data.workflow, } diff --git a/openassessment/xblock/ui_mixins/mfe/test_assessment_serializers.py b/openassessment/xblock/ui_mixins/mfe/test_assessment_serializers.py index db697821d0..81f1f14ee7 100644 --- a/openassessment/xblock/ui_mixins/mfe/test_assessment_serializers.py +++ b/openassessment/xblock/ui_mixins/mfe/test_assessment_serializers.py @@ -476,9 +476,9 @@ def test_assessment_data(self): } ], "overallFeedback": "Foo", - "continueGrading": True, + "step": "Wham" }).data self.assertEqual(assessment_submit_request_data["overallFeedback"], "Foo") self.assertEqual(len(assessment_submit_request_data["criteria"]), 1) - self.assertTrue(assessment_submit_request_data["continueGrading"]) + self.assertEqual(assessment_submit_request_data["step"], "Wham") diff --git a/openassessment/xblock/ui_mixins/mfe/test_mfe_mixin.py b/openassessment/xblock/ui_mixins/mfe/test_mfe_mixin.py index 837b649114..bd2a5963ba 100644 --- a/openassessment/xblock/ui_mixins/mfe/test_mfe_mixin.py +++ b/openassessment/xblock/ui_mixins/mfe/test_mfe_mixin.py @@ -37,6 +37,7 @@ from openassessment.xblock.test.test_staff_area import NullUserService, UserStateService from openassessment.xblock.test.test_submission import COURSE_ID, setup_mock_team from openassessment.xblock.test.test_team import MOCK_TEAM_ID, MockTeamsService +from openassessment.xblock.ui_mixins.mfe.mixin import MFE_STEP_TO_WORKFLOW_MAPPINGS from openassessment.xblock.ui_mixins.mfe.constants import error_codes, handler_suffixes from openassessment.xblock.ui_mixins.mfe.submission_serializers import DraftResponseSerializer, SubmissionSerializer @@ -149,9 +150,11 @@ def request_file_callback(self, xblock, payload): response_format='response' ) - def request_assessment_submit(self, xblock, payload=None): + def request_assessment_submit(self, xblock, step=None, payload=None): if payload is None: payload = self.DEFAULT_ASSESSMENT_SUBMIT_VALUE + if step is not None: + payload['step'] = step return super().request( xblock, 'assessment', @@ -926,6 +929,7 @@ def mock_assess_functions(self, self_kwargs=None, training_kwargs=None, peer_kwa { 'criterionFeedback': {}, 'overallFeedback': '', + 'step': 'peer' }, { 'optionsSelected': ['this is a list'], @@ -939,25 +943,26 @@ def test_incorrect_params(self, xblock, payload): assert resp.status_code == 400 assert resp.json['error']['errorCode'] == error_codes.INCORRECT_PARAMETERS - @ddt.data(None, 'cancelled', 'done', 'ai') + @ddt.data("self", "peer", "studentTraining") @scenario("data/basic_scenario.xml") - def test_not_allowed_step_error(self, xblock, status): - with self.mock_workflow_status(status): - resp = self.request_assessment_submit(xblock) + def test_not_allowed_to_assess_when_cancelled(self, xblock, step): + with self.mock_workflow_status("cancelled"): + resp = self.request_assessment_submit(xblock, step=step) assert resp.status_code == 400 assert resp.json['error']['errorCode'] == error_codes.INVALID_STATE_TO_ASSESS @ddt.unpack @ddt.data( ('self', True, False, False), - ('training', False, True, False), + ('studentTraining', False, True, False), ('peer', False, False, True) ) @scenario("data/basic_scenario.xml") - def test_assess(self, xblock, step, expect_self, expect_training, expect_peer): - with self.mock_workflow_status(step): + def test_assess(self, xblock, mfe_step, expect_self, expect_training, expect_peer): + workflow_step = MFE_STEP_TO_WORKFLOW_MAPPINGS[mfe_step] + with self.mock_workflow_status(workflow_step): with self.mock_assess_functions() as assess_mocks: - resp = self.request_assessment_submit(xblock) + resp = self.request_assessment_submit(xblock, step=mfe_step) assert resp.status_code == 200 assert assess_mocks.self.called == expect_self assert assess_mocks.training.called == expect_training @@ -965,24 +970,24 @@ def test_assess(self, xblock, step, expect_self, expect_training, expect_peer): @ddt.data(None, 'cancelled', 'waiting', 'self', 'training', 'done') @scenario("data/basic_scenario.xml") - def test_continue_grading(self, xblock, step): + def test_peer_assess_when_not_in_peer(self, xblock, step): with self.mock_assess_functions() as assess_mocks: with self.mock_workflow_status(step): - with self.mock_continue_grading(True): - resp = self.request_assessment_submit(xblock) + resp = self.request_assessment_submit(xblock, step="peer") assert resp.status_code == 200 assess_mocks.self.assert_not_called() assess_mocks.training.assert_not_called() assess_mocks.peer.assert_called() - @ddt.data('self', 'training', 'peer') + @ddt.data('self', 'learnerTraining', 'peer') @scenario("data/basic_scenario.xml") - def test_assess_error(self, xblock, step): + def test_assess_error(self, xblock, mfe_step): error = AssessmentError("there was a problem") - with self.mock_workflow_status(step): - with self.mock_assess_functions(**{step + '_kwargs': {'side_effect': error}}): - resp = self.request_assessment_submit(xblock) + workflow_step = MFE_STEP_TO_WORKFLOW_MAPPINGS[mfe_step] + with self.mock_workflow_status(workflow_step): + with self.mock_assess_functions(**{workflow_step + '_kwargs': {'side_effect': error}}): + resp = self.request_assessment_submit(xblock, step=mfe_step) assert_error_response(resp, 500, error_codes.INTERNAL_EXCEPTION, str(error)) @scenario("data/basic_scenario.xml") @@ -990,6 +995,6 @@ def test_training_assess_corrections(self, xblock): corrections = {'ferocity': 'sublime', 'element': 'hydrogen'} with self.mock_workflow_status('training'): with self.mock_assess_functions(training_kwargs={'return_value': corrections}): - resp = self.request_assessment_submit(xblock) + resp = self.request_assessment_submit(xblock, step='studentTraining') assert_error_response(resp, 400, error_codes.TRAINING_ANSWER_INCORRECT, corrections) From 890b299b399ee747087568fbba24b16768e092d9 Mon Sep 17 00:00:00 2001 From: Jansen Kantor Date: Thu, 7 Dec 2023 18:35:57 +0000 Subject: [PATCH 2/5] style: remove unused import --- openassessment/xblock/ui_mixins/mfe/assessment_serializers.py | 1 - 1 file changed, 1 deletion(-) diff --git a/openassessment/xblock/ui_mixins/mfe/assessment_serializers.py b/openassessment/xblock/ui_mixins/mfe/assessment_serializers.py index 529830054e..e7ed9aece0 100644 --- a/openassessment/xblock/ui_mixins/mfe/assessment_serializers.py +++ b/openassessment/xblock/ui_mixins/mfe/assessment_serializers.py @@ -9,7 +9,6 @@ SerializerMethodField, URLField, Serializer, - BooleanField, ) from openassessment.data import OraSubmissionAnswerFactory from openassessment.xblock.ui_mixins.mfe.serializer_utils import NullField From 95c356daa43df960bc779a8ee27bdf15751100e0 Mon Sep 17 00:00:00 2001 From: Jansen Kantor Date: Thu, 7 Dec 2023 18:39:16 +0000 Subject: [PATCH 3/5] test: typo --- openassessment/xblock/ui_mixins/mfe/test_mfe_mixin.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/openassessment/xblock/ui_mixins/mfe/test_mfe_mixin.py b/openassessment/xblock/ui_mixins/mfe/test_mfe_mixin.py index bd2a5963ba..d997c1219c 100644 --- a/openassessment/xblock/ui_mixins/mfe/test_mfe_mixin.py +++ b/openassessment/xblock/ui_mixins/mfe/test_mfe_mixin.py @@ -980,7 +980,7 @@ def test_peer_assess_when_not_in_peer(self, xblock, step): assess_mocks.training.assert_not_called() assess_mocks.peer.assert_called() - @ddt.data('self', 'learnerTraining', 'peer') + @ddt.data('self', 'studentTraining', 'peer') @scenario("data/basic_scenario.xml") def test_assess_error(self, xblock, mfe_step): error = AssessmentError("there was a problem") From 4944d60a190f4bfdcf7a8b7da17a53e89f07380b Mon Sep 17 00:00:00 2001 From: jansenk Date: Thu, 7 Dec 2023 14:46:18 -0500 Subject: [PATCH 4/5] test: fix test failures --- openassessment/xblock/ui_mixins/mfe/mixin.py | 3 +-- openassessment/xblock/ui_mixins/mfe/test_mfe_mixin.py | 9 ++++++++- 2 files changed, 9 insertions(+), 3 deletions(-) diff --git a/openassessment/xblock/ui_mixins/mfe/mixin.py b/openassessment/xblock/ui_mixins/mfe/mixin.py index b7aeaceaff..37b45bcf65 100644 --- a/openassessment/xblock/ui_mixins/mfe/mixin.py +++ b/openassessment/xblock/ui_mixins/mfe/mixin.py @@ -291,7 +291,6 @@ def _assessment_submit_handler(self, data): raise OraApiException(400, error_codes.INCORRECT_PARAMETERS, serializer.errors) assessment_data = serializer.to_legacy_format(self) requested_step = serializer.data['step'] - peer_data = self.peer_assessment_data() try: if self.workflow_data.is_cancelled: raise InvalidStateToAssess() @@ -302,7 +301,7 @@ def _assessment_submit_handler(self, data): assessment_data['criterion_feedback'], self.config_data, self.workflow_data, - peer_data, + self.peer_assessment_data(), ) elif requested_step == 'self': self_assess( diff --git a/openassessment/xblock/ui_mixins/mfe/test_mfe_mixin.py b/openassessment/xblock/ui_mixins/mfe/test_mfe_mixin.py index d997c1219c..c8034cf76d 100644 --- a/openassessment/xblock/ui_mixins/mfe/test_mfe_mixin.py +++ b/openassessment/xblock/ui_mixins/mfe/test_mfe_mixin.py @@ -968,7 +968,7 @@ def test_assess(self, xblock, mfe_step, expect_self, expect_training, expect_pee assert assess_mocks.training.called == expect_training assert assess_mocks.peer.called == expect_peer - @ddt.data(None, 'cancelled', 'waiting', 'self', 'training', 'done') + @ddt.data(None, 'waiting', 'self', 'training', 'done') @scenario("data/basic_scenario.xml") def test_peer_assess_when_not_in_peer(self, xblock, step): with self.mock_assess_functions() as assess_mocks: @@ -990,6 +990,13 @@ def test_assess_error(self, xblock, mfe_step): resp = self.request_assessment_submit(xblock, step=mfe_step) assert_error_response(resp, 500, error_codes.INTERNAL_EXCEPTION, str(error)) + @scenario("data/basic_scenario.xml") + def test_cant_submit_when_cancelled(self, xblock): + with self.mock_workflow_status('cancelled'): + resp = self.request_assessment_submit(xblock, step="peer") + assert resp.status_code == 400 + assert resp.json['error']['errorCode'] == error_codes.INVALID_STATE_TO_ASSESS + @scenario("data/basic_scenario.xml") def test_training_assess_corrections(self, xblock): corrections = {'ferocity': 'sublime', 'element': 'hydrogen'} From 7d38565c9c21e7320889ee72f1116d401fac0d9b Mon Sep 17 00:00:00 2001 From: jansenk Date: Thu, 7 Dec 2023 14:51:20 -0500 Subject: [PATCH 5/5] chore: version --- openassessment/__init__.py | 2 +- package.json | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/openassessment/__init__.py b/openassessment/__init__.py index 9026118672..6ae6ec99d7 100644 --- a/openassessment/__init__.py +++ b/openassessment/__init__.py @@ -2,4 +2,4 @@ Initialization Information for Open Assessment Module """ -__version__ = '6.0.11' +__version__ = '6.0.12' diff --git a/package.json b/package.json index 7f89968c71..260dfe248d 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "edx-ora2", - "version": "6.0.11", + "version": "6.0.12", "repository": "https://github.com/openedx/edx-ora2.git", "dependencies": { "@edx/frontend-build": "8.0.6",