From dcf43162a679802c2291d1b8afa4fb6d8e608b9b Mon Sep 17 00:00:00 2001 From: Jeff Ericson Date: Wed, 4 Dec 2013 08:45:35 -0800 Subject: [PATCH 1/3] Added feature to act as timer between quiz attempts --- CHANGELOG.rst | 7 +++++ common/lib/xmodule/xmodule/capa_module.py | 34 +++++++++++++++++++++++ 2 files changed, 41 insertions(+) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index a62b0fb0091e..3a837cf22cf8 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -118,6 +118,13 @@ client error are correctly passed through to the client. LMS: Improve performance of page load and thread list load for discussion tab +Studio: Added feature to allow instructors to specify wait time between attempts +of the same quiz. In a problem's settings, instructors can specify how many +seconds student's are locked out of submitting another attempt of the same quiz. +The timer starts as soon as they submit an attempt for grading. Note that this +does not prevent a student from starting to work on another quiz attempt. It only +prevents the students from submitting a bunch of attempts in rapid succession. + LMS: The wiki markup cheatsheet dialog is now accessible to screen readers. (LMS-1303) diff --git a/common/lib/xmodule/xmodule/capa_module.py b/common/lib/xmodule/xmodule/capa_module.py index b78e2a4a5019..e9c2ca9c0ffb 100644 --- a/common/lib/xmodule/xmodule/capa_module.py +++ b/common/lib/xmodule/xmodule/capa_module.py @@ -141,6 +141,11 @@ class CapaFields(object): student_answers = Dict(help="Dictionary with the current student responses", scope=Scope.user_state) done = Boolean(help="Whether the student has answered the problem", scope=Scope.user_state) seed = Integer(help="Random seed for this student", scope=Scope.user_state) + + last_submission_time = Date(help="Last submission time", scope=Scope.user_state) + submission_wait_seconds = Integer(display_name="Seconds Between Submissions", help="Seconds to wait between submissions", + scope=Scope.settings, default=0) + weight = Float( display_name="Problem Weight", help=("Defines the number of points each problem is worth. " @@ -303,6 +308,12 @@ def set_state_from_lcp(self): self.student_answers = lcp_state['student_answers'] self.seed = lcp_state['seed'] + def set_last_submission_time(self): + """ + Set the module's last submission time (when the problem was checked) + """ + self.last_submission_time = datetime.datetime.now(UTC()) + def get_score(self): """ Access the problem's score @@ -925,17 +936,28 @@ def check_problem(self, data): if self.lcp.is_queued(): current_time = datetime.datetime.now(UTC()) prev_submit_time = self.lcp.get_recentmost_queuetime() + waittime_between_requests = self.system.xqueue['waittime'] if (current_time - prev_submit_time).total_seconds() < waittime_between_requests: msg = u'You must wait at least {wait} seconds between submissions'.format( wait=waittime_between_requests) return {'success': msg, 'html': ''} # Prompts a modal dialog in ajax callback + # Wait time between resets + current_time = datetime.datetime.now(UTC()) + if self.last_submission_time is not None: + if (current_time - self.last_submission_time).total_seconds() < self.submission_wait_seconds: + seconds_left = int(self.submission_wait_seconds - (current_time - self.last_submission_time).total_seconds()) + 1 + msg = u'You must wait at least {w} between submissions. {s} remaining.'.format( + w=self.pretty_print_seconds(self.submission_wait_seconds), s=self.pretty_print_seconds(seconds_left)) + return {'success': msg, 'html': ''} # Prompts a modal dialog in ajax callback + try: correct_map = self.lcp.grade_answers(answers) self.attempts = self.attempts + 1 self.lcp.done = True self.set_state_from_lcp() + self.set_last_submission_time() except (StudentInputError, ResponseError, LoncapaProblemError) as inst: log.warning("StudentInputError in capa_module:problem_check", @@ -994,6 +1016,18 @@ def check_problem(self, data): 'contents': html, } + def pretty_print_seconds(self, num_seconds): + """ + Returns time formatted nicely. + """ + if(num_seconds < 60): + plural = "s" if num_seconds > 1 else "" + return "%i second%s" % (num_seconds, plural) + elif(num_seconds < 60*60): + return "%i min, %i sec" % (int(num_seconds / 60), num_seconds % 60) + else: + return "%i hrs, %i min, %i sec" % (int(num_seconds / 3600), int((num_seconds % 3600) / 60), (num_seconds % 60)) + def rescore_problem(self): """ Checks whether the existing answers to a problem are correct. From 3f8c5593de6af32f81b82f4d519f73782215e954 Mon Sep 17 00:00:00 2001 From: Jeff Ericson Date: Wed, 4 Dec 2013 18:36:39 -0800 Subject: [PATCH 2/3] Added user setting specification to a file in hopes of passing tests now --- cms/djangoapps/contentstore/features/problem-editor.py | 2 ++ common/lib/xmodule/xmodule/capa_module.py | 7 +++++-- 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/cms/djangoapps/contentstore/features/problem-editor.py b/cms/djangoapps/contentstore/features/problem-editor.py index 2265b5010e4c..6f4233ee57c5 100644 --- a/cms/djangoapps/contentstore/features/problem-editor.py +++ b/cms/djangoapps/contentstore/features/problem-editor.py @@ -14,6 +14,7 @@ PROBLEM_WEIGHT = "Problem Weight" RANDOMIZATION = 'Randomization' SHOW_ANSWER = "Show Answer" +TIMER_BETWEEN_ATTEMPTS = "Timer Between Attempts" @step('I have created a Blank Common Problem$') @@ -45,6 +46,7 @@ def i_see_advanced_settings_with_values(step): [PROBLEM_WEIGHT, "", False], [RANDOMIZATION, "Never", False], [SHOW_ANSWER, "Finished", False], + [TIMER_BETWEEN_ATTEMPTS, "0", False] ]) diff --git a/common/lib/xmodule/xmodule/capa_module.py b/common/lib/xmodule/xmodule/capa_module.py index e9c2ca9c0ffb..9e9b4fed3278 100644 --- a/common/lib/xmodule/xmodule/capa_module.py +++ b/common/lib/xmodule/xmodule/capa_module.py @@ -143,8 +143,11 @@ class CapaFields(object): seed = Integer(help="Random seed for this student", scope=Scope.user_state) last_submission_time = Date(help="Last submission time", scope=Scope.user_state) - submission_wait_seconds = Integer(display_name="Seconds Between Submissions", help="Seconds to wait between submissions", - scope=Scope.settings, default=0) + submission_wait_seconds = Integer( + display_name="Timer Between Attempts", + help="Seconds a student must wait between submissions for a problem with multiple attempts.", + scope=Scope.settings, + default=0) weight = Float( display_name="Problem Weight", From ba299b59ee09b302b942e79d6c7a32ac4c43f01e Mon Sep 17 00:00:00 2001 From: Jeff Ericson Date: Wed, 4 Dec 2013 19:08:06 -0800 Subject: [PATCH 3/3] Added test file to be filled in later --- .../tests/test_delay_between_attempts.py | 733 ++++++++++++++++++ 1 file changed, 733 insertions(+) create mode 100644 common/lib/xmodule/xmodule/tests/test_delay_between_attempts.py diff --git a/common/lib/xmodule/xmodule/tests/test_delay_between_attempts.py b/common/lib/xmodule/xmodule/tests/test_delay_between_attempts.py new file mode 100644 index 000000000000..69079be5daff --- /dev/null +++ b/common/lib/xmodule/xmodule/tests/test_delay_between_attempts.py @@ -0,0 +1,733 @@ +""" +Tests the logic of problems with a delay between attempt submissions +""" + +import unittest +import textwrap +import datetime +import json +import random +import os +import textwrap +import unittest + +from mock import Mock, patch +import webob +from webob.multidict import MultiDict + +import xmodule +from xmodule.tests import DATA_DIR +from capa.responsetypes import (StudentInputError, LoncapaProblemError, + ResponseError) +from capa.xqueue_interface import XQueueInterface +from xmodule.capa_module import CapaModule, ComplexEncoder +from xmodule.modulestore import Location +from xblock.field_data import DictFieldData +from xblock.fields import ScopeIds + +from . import get_test_system +from pytz import UTC +from capa.correctmap import CorrectMap + + +class CapaFactory(object): + """ + A helper class to create problem modules with various parameters for testing. + """ + + sample_problem_xml = textwrap.dedent("""\ + + + +

What is pi, to two decimal places?

+
+ + + +
+ """) + + num = 0 + + @classmethod + def next_num(cls): + cls.num += 1 + return cls.num + + @classmethod + def input_key(cls, input_num=2): + """ + Return the input key to use when passing GET parameters + """ + return ("input_" + cls.answer_key(input_num)) + + @classmethod + def answer_key(cls, input_num=2): + """ + Return the key stored in the capa problem answer dict + """ + return ( + "%s_%d_1" % ( + "-".join(['i4x', 'edX', 'capa_test', 'problem', 'SampleProblem%d' % cls.num]), + input_num, + ) + ) + + @classmethod + def create(cls, + graceperiod=None, + due=None, + max_attempts=None, + showanswer=None, + rerandomize=None, + force_save_button=None, + attempts=None, + problem_state=None, + correct=False, + done=None, + text_customization=None + ): + """ + All parameters are optional, and are added to the created problem if specified. + + Arguments: + graceperiod: + due: + max_attempts: + showanswer: + force_save_button: + rerandomize: all strings, as specified in the policy for the problem + + problem_state: a dict to to be serialized into the instance_state of the + module. + + attempts: also added to instance state. Will be converted to an int. + """ + location = Location(["i4x", "edX", "capa_test", "problem", + "SampleProblem{0}".format(cls.next_num())]) + field_data = {'data': cls.sample_problem_xml} + + if graceperiod is not None: + field_data['graceperiod'] = graceperiod + if due is not None: + field_data['due'] = due + if max_attempts is not None: + field_data['max_attempts'] = max_attempts + if showanswer is not None: + field_data['showanswer'] = showanswer + if force_save_button is not None: + field_data['force_save_button'] = force_save_button + if rerandomize is not None: + field_data['rerandomize'] = rerandomize + if done is not None: + field_data['done'] = done + if text_customization is not None: + field_data['text_customization'] = text_customization + + descriptor = Mock(weight="1") + if problem_state is not None: + field_data.update(problem_state) + if attempts is not None: + # converting to int here because I keep putting "0" and "1" in the tests + # since everything else is a string. + field_data['attempts'] = int(attempts) + + system = get_test_system() + system.render_template = Mock(return_value="
Test Template HTML
") + module = CapaModule( + descriptor, + system, + DictFieldData(field_data), + ScopeIds(None, None, location, location), + ) + + if correct: + # TODO: probably better to actually set the internal state properly, but... + module.get_score = lambda: {'score': 1, 'total': 1} + else: + module.get_score = lambda: {'score': 0, 'total': 1} + + return module + +class XModuleQuizAttemptsDelayTest(unittest.TestCase): + ''' + Testing class + ''' + + def setUp(self): + now = datetime.datetime.now(UTC) + day_delta = datetime.timedelta(days=1) + self.yesterday_str = str(now - day_delta) + self.today_str = str(now) + self.tomorrow_str = str(now + day_delta) + + # in the capa grace period format, not in time delta format + self.two_day_delta_str = "2 days" + + def test_check_problem_resubmitted_with_randomize(self): + rerandomize_values = ['always', 'true'] + + for rerandomize in rerandomize_values: + # Randomize turned on + module = CapaFactory.create(rerandomize=rerandomize, attempts=0) + + # Simulate that the problem is completed + module.done = True + + # Expect that we cannot submit + with self.assertRaises(xmodule.exceptions.NotFoundError): + get_request_dict = {CapaFactory.input_key(): '3.14'} + module.check_problem(get_request_dict) + + # Expect that number of attempts NOT incremented + self.assertEqual(module.attempts, 0) + + # def test_reset_problem(self): + # module = CapaFactory.create(done=True) + # module.new_lcp = Mock(wraps=module.new_lcp) + # module.choose_new_seed = Mock(wraps=module.choose_new_seed) + + # # Stub out HTML rendering + # with patch('xmodule.capa_module.CapaModule.get_problem_html') as mock_html: + # mock_html.return_value = "
Test HTML
" + + # # Reset the problem + # get_request_dict = {} + # result = module.reset_problem(get_request_dict) + + # # Expect that the request was successful + # self.assertTrue('success' in result and result['success']) + + # # Expect that the problem HTML is retrieved + # self.assertTrue('html' in result) + # self.assertEqual(result['html'], "
Test HTML
") + + # # Expect that the problem was reset + # module.new_lcp.assert_called_once_with(None) + + # def test_targeted_feedback_not_finished(self): + # xml_str = textwrap.dedent(""" + # + #

What is the correct answer?

+ # + # + # wrong-1 + # wrong-2 + # correct-1 + # wrong-3 + # + # + + # + # + #
+ #

Targeted Feedback

+ #

This is the 1st WRONG solution

+ #
+ #
+ + # + #
+ #

Targeted Feedback

+ #

This is the 2nd WRONG solution

+ #
+ #
+ + # + #
+ #

Targeted Feedback

+ #

This is the 3rd WRONG solution

+ #
+ #
+ + # + #
+ #

Targeted Feedback

+ #

Feedback on your correct solution...

+ #
+ #
+ + #
+ + # + #
+ #

Explanation

+ #

This is the solution explanation

+ #

Not much to explain here, sorry!

+ #
+ #
+ #
+ + # """) + + # problem = new_loncapa_problem(xml_str) + + # the_html = problem.get_html() + # without_new_lines = the_html.replace("\n", "") + + # self.assertRegexpMatches(without_new_lines, r"
.*'wrong-1'.*'wrong-2'.*'correct-1'.*'wrong-3'.*
") + # self.assertNotRegexpMatches(without_new_lines, r"feedback1|feedback2|feedback3|feedbackC") + + # def test_targeted_feedback_student_answer1(self): + # xml_str = textwrap.dedent(""" + # + #

What is the correct answer?

+ # + # + # wrong-1 + # wrong-2 + # correct-1 + # wrong-3 + # + # + + # + # + #
+ #

Targeted Feedback

+ #

This is the 1st WRONG solution

+ #
+ #
+ + # + #
+ #

Targeted Feedback

+ #

This is the 2nd WRONG solution

+ #
+ #
+ + # + #
+ #

Targeted Feedback

+ #

This is the 3rd WRONG solution

+ #
+ #
+ + # + #
+ #

Targeted Feedback

+ #

Feedback on your correct solution...

+ #
+ #
+ + #
+ + # + #
+ #

Explanation

+ #

This is the solution explanation

+ #

Not much to explain here, sorry!

+ #
+ #
+ #
+ + # """) + + # problem = new_loncapa_problem(xml_str) + # problem.done = True + # problem.student_answers = {'1_2_1': 'choice_3'} + + # the_html = problem.get_html() + # without_new_lines = the_html.replace("\n", "") + + # self.assertRegexpMatches(without_new_lines, r".*3rd WRONG solution") + # self.assertNotRegexpMatches(without_new_lines, r"feedback1|feedback2|feedbackC") + + # def test_targeted_feedback_student_answer2(self): + # xml_str = textwrap.dedent(""" + # + #

What is the correct answer?

+ # + # + # wrong-1 + # wrong-2 + # correct-1 + # wrong-3 + # + # + + # + # + #
+ #

Targeted Feedback

+ #

This is the 1st WRONG solution

+ #
+ #
+ + # + #
+ #

Targeted Feedback

+ #

This is the 2nd WRONG solution

+ #
+ #
+ + # + #
+ #

Targeted Feedback

+ #

This is the 3rd WRONG solution

+ #
+ #
+ + # + #
+ #

Targeted Feedback

+ #

Feedback on your correct solution...

+ #
+ #
+ + #
+ + # + #
+ #

Explanation

+ #

This is the solution explanation

+ #

Not much to explain here, sorry!

+ #
+ #
+ #
+ + # """) + + # problem = new_loncapa_problem(xml_str) + # problem.done = True + # problem.student_answers = {'1_2_1': 'choice_0'} + + # the_html = problem.get_html() + # without_new_lines = the_html.replace("\n", "") + + # self.assertRegexpMatches(without_new_lines, r".*1st WRONG solution") + # self.assertRegexpMatches(without_new_lines, r"
\{.*'1_solution_1'.*\}
") + # self.assertNotRegexpMatches(without_new_lines, r"feedback2|feedback3|feedbackC") + + # def test_targeted_feedback_show_solution_explanation(self): + # xml_str = textwrap.dedent(""" + # + #

What is the correct answer?

+ # + # + # wrong-1 + # wrong-2 + # correct-1 + # wrong-3 + # + # + + # + # + #
+ #

Targeted Feedback

+ #

This is the 1st WRONG solution

+ #
+ #
+ + # + #
+ #

Targeted Feedback

+ #

This is the 2nd WRONG solution

+ #
+ #
+ + # + #
+ #

Targeted Feedback

+ #

This is the 3rd WRONG solution

+ #
+ #
+ + # + #
+ #

Targeted Feedback

+ #

Feedback on your correct solution...

+ #
+ #
+ + #
+ + # + #
+ #

Explanation

+ #

This is the solution explanation

+ #

Not much to explain here, sorry!

+ #
+ #
+ #
+ + # """) + + # problem = new_loncapa_problem(xml_str) + # problem.done = True + # problem.student_answers = {'1_2_1': 'choice_0'} + + # the_html = problem.get_html() + # without_new_lines = the_html.replace("\n", "") + + # self.assertRegexpMatches(without_new_lines, r".*1st WRONG solution") + # self.assertRegexpMatches(without_new_lines, r"\{.*'1_solution_1'.*\}") + # self.assertNotRegexpMatches(without_new_lines, r"feedback2|feedback3") + + # def test_targeted_feedback_no_show_solution_explanation(self): + # xml_str = textwrap.dedent(""" + # + #

What is the correct answer?

+ # + # + # wrong-1 + # wrong-2 + # correct-1 + # wrong-3 + # + # + + # + # + #
+ #

Targeted Feedback

+ #

This is the 1st WRONG solution

+ #
+ #
+ + # + #
+ #

Targeted Feedback

+ #

This is the 2nd WRONG solution

+ #
+ #
+ + # + #
+ #

Targeted Feedback

+ #

This is the 3rd WRONG solution

+ #
+ #
+ + # + #
+ #

Targeted Feedback

+ #

Feedback on your correct solution...

+ #
+ #
+ + #
+ + # + #
+ #

Explanation

+ #

This is the solution explanation

+ #

Not much to explain here, sorry!

+ #
+ #
+ #
+ + # """) + + # problem = new_loncapa_problem(xml_str) + # problem.done = True + # problem.student_answers = {'1_2_1': 'choice_0'} + + # the_html = problem.get_html() + # without_new_lines = the_html.replace("\n", "") + + # self.assertRegexpMatches(without_new_lines, r".*1st WRONG solution") + # self.assertNotRegexpMatches(without_new_lines, r"\{.*'1_solution_1'.*\}") + # self.assertNotRegexpMatches(without_new_lines, r"feedback2|feedback3|feedbackC") + + # def test_targeted_feedback_with_solutionset_explanation(self): + # xml_str = textwrap.dedent(""" + # + #

What is the correct answer?

+ # + # + # wrong-1 + # wrong-2 + # correct-1 + # wrong-3 + # correct-2 + # + # + + # + # + #
+ #

Targeted Feedback

+ #

This is the 1st WRONG solution

+ #
+ #
+ + # + #
+ #

Targeted Feedback

+ #

This is the 2nd WRONG solution

+ #
+ #
+ + # + #
+ #

Targeted Feedback

+ #

This is the 3rd WRONG solution

+ #
+ #
+ + # + #
+ #

Targeted Feedback

+ #

Feedback on your correct solution...

+ #
+ #
+ + # + #
+ #

Targeted Feedback

+ #

Feedback on the other solution...

+ #
+ #
+ + #
+ + # + # + #
+ #

Explanation

+ #

This is the other solution explanation

+ #

Not much to explain here, sorry!

+ #
+ #
+ #
+ #
+ + # """) + + # problem = new_loncapa_problem(xml_str) + # problem.done = True + # problem.student_answers = {'1_2_1': 'choice_0'} + + # the_html = problem.get_html() + # without_new_lines = the_html.replace("\n", "") + + # self.assertRegexpMatches(without_new_lines, r".*1st WRONG solution") + # self.assertRegexpMatches(without_new_lines, r"\{.*'1_solution_1'.*\}") + # self.assertNotRegexpMatches(without_new_lines, r"feedback2|feedback3") + + # def test_targeted_feedback_no_feedback_for_selected_choice1(self): + # xml_str = textwrap.dedent(""" + # + #

What is the correct answer?

+ # + # + # wrong-1 + # wrong-2 + # correct-1 + # wrong-3 + # + # + + # + # + #
+ #

Targeted Feedback

+ #

This is the 1st WRONG solution

+ #
+ #
+ + # + #
+ #

Targeted Feedback

+ #

This is the 3rd WRONG solution

+ #
+ #
+ + # + #
+ #

Targeted Feedback

+ #

Feedback on your correct solution...

+ #
+ #
+ + #
+ + # + # + #
+ #

Explanation

+ #

This is the solution explanation

+ #

Not much to explain here, sorry!

+ #
+ #
+ #
+ #
+ + # """) + + # problem = new_loncapa_problem(xml_str) + # problem.done = True + # problem.student_answers = {'1_2_1': 'choice_1'} + + # the_html = problem.get_html() + # without_new_lines = the_html.replace("\n", "") + + # self.assertRegexpMatches(without_new_lines, r"\{.*'1_solution_1'.*\}") + # self.assertNotRegexpMatches(without_new_lines, r"feedback1|feedback3") + + # def test_targeted_feedback_no_feedback_for_selected_choice2(self): + # xml_str = textwrap.dedent(""" + # + #

What is the correct answer?

+ # + # + # wrong-1 + # wrong-2 + # correct-1 + # wrong-3 + # + # + + # + # + #
+ #

Targeted Feedback

+ #

This is the 1st WRONG solution

+ #
+ #
+ + # + #
+ #

Targeted Feedback

+ #

This is the 3rd WRONG solution

+ #
+ #
+ + # + #
+ #

Targeted Feedback

+ #

Feedback on your correct solution...

+ #
+ #
+ + #
+ + # + # + #
+ #

Explanation

+ #

This is the solution explanation

+ #

Not much to explain here, sorry!

+ #
+ #
+ #
+ #
+ + # """) + + # problem = new_loncapa_problem(xml_str) + # problem.done = True + # problem.student_answers = {'1_2_1': 'choice_1'} + + # the_html = problem.get_html() + # without_new_lines = the_html.replace("\n", "") + + # self.assertNotRegexpMatches(without_new_lines, r"\{.*'1_solution_1'.*\}") + # self.assertNotRegexpMatches(without_new_lines, r"feedback1|feedback3|feedbackC") \ No newline at end of file