diff --git a/common/lib/capa/capa/responsetypes.py b/common/lib/capa/capa/responsetypes.py
index 731230ecc1fd..b53f38fd9018 100644
--- a/common/lib/capa/capa/responsetypes.py
+++ b/common/lib/capa/capa/responsetypes.py
@@ -915,7 +915,26 @@ def get_score(self, student_answers):
else:
return CorrectMap(self.answer_id, 'incorrect')
- # TODO: add check_hint_condition(self, hxml_set, student_answers)
+ def compare_answer(self, ans1, ans2):
+ """
+ Outside-facing function that lets us compare two numerical answers,
+ with this problem's tolerance.
+ """
+ return compare_with_tolerance(
+ evaluator({}, {}, ans1),
+ evaluator({}, {}, ans2),
+ self.tolerance
+ )
+
+ def validate_answer(self, answer):
+ """
+ Returns whether this answer is in a valid form.
+ """
+ try:
+ evaluator(dict(), dict(), answer)
+ return True
+ except (StudentInputError, UndefinedVariable):
+ return False
def get_answers(self):
return {self.answer_id: self.correct_answer}
@@ -1778,46 +1797,24 @@ def get_score(self, student_answers):
self.correct_answer, given, self.samples)
return CorrectMap(self.answer_id, correctness)
- def check_formula(self, expected, given, samples):
- variables = samples.split('@')[0].split(',')
- numsamples = int(samples.split('@')[1].split('#')[1])
- sranges = zip(*map(lambda x: map(float, x.split(",")),
- samples.split('@')[1].split('#')[0].split(':')))
-
- ranges = dict(zip(variables, sranges))
- for _ in range(numsamples):
- instructor_variables = self.strip_dict(dict(self.context))
- student_variables = {}
- # ranges give numerical ranges for testing
- for var in ranges:
- # TODO: allow specified ranges (i.e. integers and complex numbers) for random variables
- value = random.uniform(*ranges[var])
- instructor_variables[str(var)] = value
- student_variables[str(var)] = value
- # log.debug('formula: instructor_vars=%s, expected=%s' %
- # (instructor_variables,expected))
-
- # Call `evaluator` on the instructor's answer and get a number
- instructor_result = evaluator(
- instructor_variables, {},
- expected, case_sensitive=self.case_sensitive
- )
+ def tupleize_answers(self, answer, var_dict_list):
+ """
+ Takes in an answer and a list of dictionaries mapping variables to values.
+ Each dictionary represents a test case for the answer.
+ Returns a tuple of formula evaluation results.
+ """
+ out = []
+ for var_dict in var_dict_list:
try:
- # log.debug('formula: student_vars=%s, given=%s' %
- # (student_variables,given))
-
- # Call `evaluator` on the student's answer; look for exceptions
- student_result = evaluator(
- student_variables,
- {},
- given,
- case_sensitive=self.case_sensitive
- )
+ out.append(evaluator(
+ var_dict,
+ dict(),
+ answer,
+ case_sensitive=self.case_sensitive,
+ ))
except UndefinedVariable as uv:
log.debug(
- 'formularesponse: undefined variable in given=%s',
- given
- )
+ 'formularesponse: undefined variable in formula=%s' % answer)
raise StudentInputError(
"Invalid input: " + uv.message + " not permitted in answer"
)
@@ -1840,17 +1837,70 @@ def check_formula(self, expected, given, samples):
# If non-factorial related ValueError thrown, handle it the same as any other Exception
log.debug('formularesponse: error {0} in formula'.format(ve))
raise StudentInputError("Invalid input: Could not parse '%s' as a formula" %
- cgi.escape(given))
+ cgi.escape(answer))
except Exception as err:
# traceback.print_exc()
log.debug('formularesponse: error %s in formula', err)
raise StudentInputError("Invalid input: Could not parse '%s' as a formula" %
- cgi.escape(given))
+ cgi.escape(answer))
+ return out
- # No errors in student's response--actually test for correctness
- if not compare_with_tolerance(student_result, instructor_result, self.tolerance):
- return "incorrect"
- return "correct"
+ def randomize_variables(self, samples):
+ """
+ Returns a list of dictionaries mapping variables to random values in range,
+ as expected by tupleize_answers.
+ """
+ variables = samples.split('@')[0].split(',')
+ numsamples = int(samples.split('@')[1].split('#')[1])
+ sranges = zip(*map(lambda x: map(float, x.split(",")),
+ samples.split('@')[1].split('#')[0].split(':')))
+ ranges = dict(zip(variables, sranges))
+
+ out = []
+ for i in range(numsamples):
+ var_dict = {}
+ # ranges give numerical ranges for testing
+ for var in ranges:
+ # TODO: allow specified ranges (i.e. integers and complex numbers) for random variables
+ value = random.uniform(*ranges[var])
+ var_dict[str(var)] = value
+ out.append(var_dict)
+ return out
+
+ def check_formula(self, expected, given, samples):
+ """
+ Given an expected answer string, a given (student-produced) answer
+ string, and a samples string, return whether the given answer is
+ "correct" or "incorrect".
+ """
+ var_dict_list = self.randomize_variables(samples)
+ student_result = self.tupleize_answers(given, var_dict_list)
+ instructor_result = self.tupleize_answers(expected, var_dict_list)
+
+ correct = all(compare_with_tolerance(student, instructor, self.tolerance)
+ for student, instructor in zip(student_result, instructor_result))
+ if correct:
+ return "correct"
+ else:
+ return "incorrect"
+
+ def compare_answer(self, ans1, ans2):
+ """
+ An external interface for comparing whether a and b are equal.
+ """
+ internal_result = self.check_formula(ans1, ans2, self.samples)
+ return internal_result == "correct"
+
+ def validate_answer(self, answer):
+ """
+ Returns whether this answer is in a valid form.
+ """
+ var_dict_list = self.randomize_variables(self.samples)
+ try:
+ self.tupleize_answers(answer, var_dict_list)
+ return True
+ except StudentInputError:
+ return False
def strip_dict(self, d):
''' Takes a dict. Returns an identical dict, with all non-word
diff --git a/common/lib/capa/capa/tests/test_responsetypes.py b/common/lib/capa/capa/tests/test_responsetypes.py
index fd056f884e5f..84c52669ea0b 100644
--- a/common/lib/capa/capa/tests/test_responsetypes.py
+++ b/common/lib/capa/capa/tests/test_responsetypes.py
@@ -496,6 +496,20 @@ def test_raises_zero_division_err(self):
input_dict = {'1_2_1': '1/0'}
self.assertRaises(StudentInputError, problem.grade_answers, input_dict)
+ def test_validate_answer(self):
+ """
+ Makes sure that validate_answer works.
+ """
+ sample_dict = {'x': (1, 2)}
+ problem = self.build_problem(
+ sample_dict=sample_dict,
+ num_samples=10,
+ tolerance="1%",
+ answer="x"
+ )
+ self.assertTrue(problem.responders.values()[0].validate_answer('14*x'))
+ self.assertFalse(problem.responders.values()[0].validate_answer('3*y+2*x'))
+
class StringResponseTest(ResponseTest):
from capa.tests.response_xml_factory import StringResponseXMLFactory
@@ -915,6 +929,20 @@ def evaluator_side_effect(_, __, math_string):
with self.assertRaisesRegexp(StudentInputError, msg_regex):
problem.grade_answers({'1_2_1': 'foobar'})
+ def test_compare_answer(self):
+ """Tests the answer compare function."""
+ problem = self.build_problem(answer="42")
+ responder = problem.responders.values()[0]
+ self.assertTrue(responder.compare_answer('48', '8*6'))
+ self.assertFalse(responder.compare_answer('48', '9*5'))
+
+ def test_validate_answer(self):
+ """Tests the answer validation function."""
+ problem = self.build_problem(answer="42")
+ responder = problem.responders.values()[0]
+ self.assertTrue(responder.validate_answer('23.5'))
+ self.assertFalse(responder.validate_answer('fish'))
+
class CustomResponseTest(ResponseTest):
from capa.tests.response_xml_factory import CustomResponseXMLFactory
diff --git a/common/lib/xmodule/xmodule/crowdsource_hinter.py b/common/lib/xmodule/xmodule/crowdsource_hinter.py
index 7c7f94a26bb9..7e538efa24fe 100644
--- a/common/lib/xmodule/xmodule/crowdsource_hinter.py
+++ b/common/lib/xmodule/xmodule/crowdsource_hinter.py
@@ -7,15 +7,18 @@
import logging
import json
import random
+import copy
from pkg_resources import resource_string
from lxml import etree
from xmodule.x_module import XModule
-from xmodule.xml_module import XmlDescriptor
+from xmodule.raw_module import RawDescriptor
from xblock.core import Scope, String, Integer, Boolean, Dict, List
+from capa.responsetypes import FormulaResponse
+
from django.utils.html import escape
log = logging.getLogger(__name__)
@@ -37,10 +40,15 @@ class CrowdsourceHinterFields(object):
mod_queue = Dict(help='A dictionary containing hints still awaiting approval', scope=Scope.content,
default={})
hint_pk = Integer(help='Used to index hints.', scope=Scope.content, default=0)
- # A list of previous answers this student made to this problem.
- # Of the form [answer, [hint_pk_1, hint_pk_2, hint_pk_3]] for each problem. hint_pk's are
- # None if the hint was not given.
- previous_answers = List(help='A list of previous submissions.', scope=Scope.user_state, default=[])
+
+ # A list of previous hints that a student viewed.
+ # Of the form [answer, [hint_pk_1, ...]] for each problem.
+ # Sorry about the variable name - I know it's confusing.
+ previous_answers = List(help='A list of hints viewed.', scope=Scope.user_state, default=[])
+
+ # user_submissions actually contains a list of previous answers submitted.
+ # (Originally, preivous_answers did this job, hence the name confusion.)
+ user_submissions = List(help='A list of previous submissions', scope=Scope.user_state, default=[])
user_voted = Boolean(help='Specifies if the user has voted on this problem or not.',
scope=Scope.user_state, default=False)
@@ -68,6 +76,26 @@ class CrowdsourceHinterModule(CrowdsourceHinterFields, XModule):
def __init__(self, *args, **kwargs):
XModule.__init__(self, *args, **kwargs)
+ # We need to know whether we are working with a FormulaResponse problem.
+ try:
+ responder = self.get_display_items()[0].lcp.responders.values()[0]
+ except (IndexError, AttributeError):
+ log.exception('Unable to find a capa problem child.')
+ return
+
+ self.is_formula = isinstance(self, FormulaResponse)
+ if self.is_formula:
+ self.answer_to_str = self.formula_answer_to_str
+ else:
+ self.answer_to_str = self.numerical_answer_to_str
+ # compare_answer is expected to return whether its two inputs are close enough
+ # to be equal, or raise a StudentInputError if one of the inputs is malformatted.
+ if hasattr(responder, 'compare_answer') and hasattr(responder, 'validate_answer'):
+ self.compare_answer = responder.compare_answer
+ self.validate_answer = responder.validate_answer
+ else:
+ # This response type is not supported!
+ log.exception('Response type not supported for hinting: ' + str(responder))
def get_html(self):
"""
@@ -98,14 +126,29 @@ def get_html(self):
return out
- def capa_answer_to_str(self, answer):
+ def numerical_answer_to_str(self, answer):
"""
- Converts capa answer format to a string representation
+ Converts capa numerical answer format to a string representation
of the answer.
-Lon-capa dependent.
-Assumes that the problem only has one part.
"""
- return str(float(answer.values()[0]))
+ return str(answer.values()[0])
+
+ def formula_answer_to_str(self, answer):
+ """
+ Converts capa formula answer into a string.
+ -Lon-capa dependent.
+ -Assumes that the problem only has one part.
+ """
+ return str(answer.values()[0])
+
+ def get_matching_answers(self, answer):
+ """
+ Look in self.hints, and find all answer keys that are "equal with tolerance"
+ to the input answer.
+ """
+ return [key for key in self.hints if self.compare_answer(key, answer)]
def handle_ajax(self, dispatch, data):
"""
@@ -124,6 +167,9 @@ def handle_ajax(self, dispatch, data):
if out is None:
out = {'op': 'empty'}
+ elif 'error' in out:
+ # Error in processing.
+ out.update({'op': 'error'})
else:
out.update({'op': dispatch})
return json.dumps({'contents': self.system.render_template('hinter_display.html', out)})
@@ -134,51 +180,67 @@ def get_hint(self, data):
Called by hinter javascript after a problem is graded as incorrect.
Args:
- `data` -- must be interpretable by capa_answer_to_str.
+ `data` -- must be interpretable by answer_to_str.
Output keys:
- - 'best_hint' is the hint text with the most votes.
- - 'rand_hint_1' and 'rand_hint_2' are two random hints to the answer in `data`.
+ - 'hints' is a list of hint strings to show to the user.
- 'answer' is the parsed answer that was submitted.
+ Will record the user's wrong answer in user_submissions, and the hints shown
+ in previous_answers.
"""
+ # First, validate our inputs.
try:
- answer = self.capa_answer_to_str(data)
- except ValueError:
+ answer = self.answer_to_str(data)
+ except (ValueError, AttributeError):
# Sometimes, we get an answer that's just not parsable. Do nothing.
log.exception('Answer not parsable: ' + str(data))
return
- # Look for a hint to give.
- # Make a local copy of self.hints - this means we only need to do one json unpacking.
- # (This is because xblocks storage makes the following command a deep copy.)
- local_hints = self.hints
- if (answer not in local_hints) or (len(local_hints[answer]) == 0):
+ if not self.validate_answer(answer):
+ # Answer is not in the right form.
+ log.exception('Answer not valid: ' + str(answer))
+ return
+ if answer not in self.user_submissions:
+ self.user_submissions += [answer]
+
+ # For all answers similar enough to our own, accumulate all hints together.
+ # Also track the original answer of each hint.
+ matching_answers = self.get_matching_answers(answer)
+ matching_hints = {}
+ for matching_answer in matching_answers:
+ temp_dict = copy.deepcopy(self.hints[matching_answer])
+ for key, value in temp_dict.items():
+ # Each value now has hint, votes, matching_answer.
+ temp_dict[key] = value + [matching_answer]
+ matching_hints.update(temp_dict)
+ # matching_hints now maps pk's to lists of [hint, votes, matching_answer]
+
+ # Finally, randomly choose a subset of matching_hints to actually show.
+ if not matching_hints:
# No hints to give. Return.
- self.previous_answers += [[answer, [None, None, None]]]
return
# Get the top hint, plus two random hints.
- n_hints = len(local_hints[answer])
- best_hint_index = max(local_hints[answer], key=lambda key: local_hints[answer][key][1])
- best_hint = local_hints[answer][best_hint_index][0]
- if len(local_hints[answer]) == 1:
- rand_hint_1 = ''
- rand_hint_2 = ''
- self.previous_answers += [[answer, [best_hint_index, None, None]]]
- elif n_hints == 2:
- best_hint = local_hints[answer].values()[0][0]
- best_hint_index = local_hints[answer].keys()[0]
- rand_hint_1 = local_hints[answer].values()[1][0]
- hint_index_1 = local_hints[answer].keys()[1]
- rand_hint_2 = ''
- self.previous_answers += [[answer, [best_hint_index, hint_index_1, None]]]
- else:
- (hint_index_1, rand_hint_1), (hint_index_2, rand_hint_2) =\
- random.sample(local_hints[answer].items(), 2)
- rand_hint_1 = rand_hint_1[0]
- rand_hint_2 = rand_hint_2[0]
- self.previous_answers += [[answer, [best_hint_index, hint_index_1, hint_index_2]]]
-
- return {'best_hint': best_hint,
- 'rand_hint_1': rand_hint_1,
- 'rand_hint_2': rand_hint_2,
+ n_hints = len(matching_hints)
+ hints = []
+ # max(dict) returns the maximum key in dict.
+ # The key function takes each pk, and returns the number of votes for the
+ # hint with that pk.
+ best_hint_index = max(matching_hints, key=lambda pk: matching_hints[pk][1])
+ hints.append(matching_hints[best_hint_index][0])
+ best_hint_answer = matching_hints[best_hint_index][2]
+ # The brackets surrounding the index are for backwards compatability purposes.
+ # (It used to be that each answer was paired with multiple hints in a list.)
+ self.previous_answers += [[best_hint_answer, [best_hint_index]]]
+ for _ in xrange(min(2, n_hints - 1)):
+ # Keep making random hints until we hit a target, or run out.
+ while True:
+ # random.choice randomly chooses an element from its input list.
+ # (We then unpack the item, in this case data for a hint.)
+ (hint_index, (rand_hint, _, hint_answer)) =\
+ random.choice(matching_hints.items())
+ if rand_hint not in hints:
+ break
+ hints.append(rand_hint)
+ self.previous_answers += [[hint_answer, [hint_index]]]
+ return {'hints': hints,
'answer': answer}
def get_feedback(self, data):
@@ -188,38 +250,37 @@ def get_feedback(self, data):
Args:
`data` -- not actually used. (It is assumed that the answer is correct.)
Output keys:
- - 'index_to_hints' maps previous answer indices to hints that the user saw earlier.
- - 'index_to_answer' maps previous answer indices to the actual answer submitted.
+ - 'answer_to_hints': a nested dictionary.
+ answer_to_hints[answer][hint_pk] returns the text of the hint.
+ - 'user_submissions': the same thing as self.user_submissions. A list of
+ the answers that the user previously submitted.
"""
# The student got it right.
# Did he submit at least one wrong answer?
- if len(self.previous_answers) == 0:
+ if len(self.user_submissions) == 0:
# No. Nothing to do here.
return
# Make a hint-voting interface for each wrong answer. The student will only
# be allowed to make one vote / submission, but he can choose which wrong answer
# he wants to look at.
- # index_to_hints[previous answer #] = [(hint text, hint pk), + ]
- index_to_hints = {}
- # index_to_answer[previous answer #] = answer text
- index_to_answer = {}
+ answer_to_hints = {} # answer_to_hints[answer text][hint pk] -> hint text
# Go through each previous answer, and populate index_to_hints and index_to_answer.
for i in xrange(len(self.previous_answers)):
answer, hints_offered = self.previous_answers[i]
- index_to_hints[i] = []
- index_to_answer[i] = answer
+ if answer not in answer_to_hints:
+ answer_to_hints[answer] = {}
if answer in self.hints:
# Go through each hint, and add to index_to_hints
for hint_id in hints_offered:
- if hint_id is not None:
+ if (hint_id is not None) and (hint_id not in answer_to_hints[answer]):
try:
- index_to_hints[i].append((self.hints[answer][str(hint_id)][0], hint_id))
+ answer_to_hints[answer][hint_id] = self.hints[answer][str(hint_id)][0]
except KeyError:
# Sometimes, the hint that a user saw will have been deleted by the instructor.
continue
-
- return {'index_to_hints': index_to_hints, 'index_to_answer': index_to_answer}
+ return {'answer_to_hints': answer_to_hints,
+ 'user_submissions': self.user_submissions}
def tally_vote(self, data):
"""
@@ -227,31 +288,51 @@ def tally_vote(self, data):
Args:
`data` -- expected to have the following keys:
- 'answer': ans_no (index in previous_answers)
+ 'answer': text of answer we're voting on
'hint': hint_pk
+ 'pk_list': A list of [answer, pk] pairs, each of which representing a hint.
+ We will return a list of how many votes each hint in the list has so far.
+ It's up to the browser to specify which hints to return vote counts for.
+
Returns key 'hint_and_votes', a list of (hint_text, #votes) pairs.
"""
if self.user_voted:
- return {}
- ans_no = int(data['answer'])
- hint_no = str(data['hint'])
- answer = self.previous_answers[ans_no][0]
+ return {'error': 'Sorry, but you have already voted!'}
+ ans = data['answer']
+ if not self.validate_answer(ans):
+ # Uh oh. Invalid answer.
+ log.exception('Failure in hinter tally_vote: Unable to parse answer: {ans}'.format(ans=ans))
+ return {'error': 'Failure in voting!'}
+ hint_pk = str(data['hint'])
# We use temp_dict because we need to do a direct write for the database to update.
temp_dict = self.hints
- temp_dict[answer][hint_no][1] += 1
+ try:
+ temp_dict[ans][hint_pk][1] += 1
+ except KeyError:
+ log.exception('''Failure in hinter tally_vote: User voted for non-existant hint:
+ Answer={ans} pk={hint_pk}'''.format(ans=ans, hint_pk=hint_pk))
+ return {'error': 'Failure in voting!'}
self.hints = temp_dict
# Don't let the user vote again!
self.user_voted = True
# Return a list of how many votes each hint got.
+ pk_list = json.loads(data['pk_list'])
hint_and_votes = []
- for hint_no in self.previous_answers[ans_no][1]:
- if hint_no is None:
+ for answer, vote_pk in pk_list:
+ if not self.validate_answer(answer):
+ log.exception('In hinter tally_vote, couldn\'t parse {ans}'.format(ans=answer))
continue
- hint_and_votes.append(temp_dict[answer][str(hint_no)])
+ try:
+ hint_and_votes.append(temp_dict[answer][str(vote_pk)])
+ except KeyError:
+ log.exception('In hinter tally_vote, couldn\'t find: {ans}, {vote_pk}'.format(
+ ans=answer, vote_pk=str(vote_pk)))
- # Reset self.previous_answers.
+ hint_and_votes.sort(key=lambda pair: pair[1], reverse=True)
+ # Reset self.previous_answers and user_submissions.
self.previous_answers = []
+ self.user_submissions = []
return {'hint_and_votes': hint_and_votes}
def submit_hint(self, data):
@@ -260,13 +341,17 @@ def submit_hint(self, data):
Args:
`data` -- expected to have the following keys:
- 'answer': answer index in previous_answers
+ 'answer': text of answer
'hint': text of the new hint that the user is adding
Returns a thank-you message.
"""
# Do html escaping. Perhaps in the future do profanity filtering, etc. as well.
hint = escape(data['hint'])
- answer = self.previous_answers[int(data['answer'])][0]
+ answer = data['answer']
+ if not self.validate_answer(answer):
+ log.exception('Failure in hinter submit_hint: Unable to parse answer: {ans}'.format(
+ ans=answer))
+ return {'error': 'Could not submit answer'}
# Only allow a student to vote or submit a hint once.
if self.user_voted:
return {'message': 'Sorry, but you have already voted!'}
@@ -277,9 +362,9 @@ def submit_hint(self, data):
else:
temp_dict = self.hints
if answer in temp_dict:
- temp_dict[answer][self.hint_pk] = [hint, 1] # With one vote (the user himself).
+ temp_dict[answer][str(self.hint_pk)] = [hint, 1] # With one vote (the user himself).
else:
- temp_dict[answer] = {self.hint_pk: [hint, 1]}
+ temp_dict[answer] = {str(self.hint_pk): [hint, 1]}
self.hint_pk += 1
if self.moderate == 'True':
self.mod_queue = temp_dict
@@ -288,10 +373,11 @@ def submit_hint(self, data):
# Mark the user has having voted; reset previous_answers
self.user_voted = True
self.previous_answers = []
+ self.user_submissions = []
return {'message': 'Thank you for your hint!'}
-class CrowdsourceHinterDescriptor(CrowdsourceHinterFields, XmlDescriptor):
+class CrowdsourceHinterDescriptor(CrowdsourceHinterFields, RawDescriptor):
module_class = CrowdsourceHinterModule
stores_state = True
diff --git a/common/lib/xmodule/xmodule/css/crowdsource_hinter/display.scss b/common/lib/xmodule/xmodule/css/crowdsource_hinter/display.scss
index c1d7b1f04895..dab94ed2f7e5 100644
--- a/common/lib/xmodule/xmodule/css/crowdsource_hinter/display.scss
+++ b/common/lib/xmodule/xmodule/css/crowdsource_hinter/display.scss
@@ -7,52 +7,6 @@
background: rgb(253, 248, 235);
}
-#answer-tabs {
- background: #FFFFFF;
- border: none;
- margin-bottom: 20px;
- padding-bottom: 20px;
-}
-
-#answer-tabs .ui-widget-header {
- border-bottom: 1px solid #DCDCDC;
- background: #FDF8EB;
-}
-
-#answer-tabs .ui-tabs-nav .ui-state-default {
- border: 1px solid #DCDCDC;
- background: #E6E6E3;
- margin-bottom: 0px;
-}
-
-#answer-tabs .ui-tabs-nav .ui-state-default:hover {
- background: #FFFFFF;
-}
-
-#answer-tabs .ui-tabs-nav .ui-state-active:hover {
- background: #FFFFFF;
-}
-
-#answer-tabs .ui-tabs-nav .ui-state-active {
- border: 1px solid #DCDCDC;
- background: #FFFFFF;
-}
-
-#answer-tabs .ui-tabs-nav .ui-state-active a {
- color: #222222;
- background: #FFFFFF;
-}
-
-#answer-tabs .ui-tabs-nav .ui-state-default a:hover {
- color: #222222;
- background: #FFFFFF;
-}
-
-#answer-tabs .custom-hint {
- height: 100px;
- width: 100%;
-}
-
.hint-inner-container {
padding-left: 15px;
padding-right: 15px;
@@ -63,3 +17,24 @@
padding-top: 0px !important;
padding-bottom: 0px !important;
}
+
+.wizard-view {
+ float: left;
+ width: 790px;
+ margin-right: 10px;
+}
+
+.wizard-container {
+ width: 3000px;
+
+ -webkit-transition:all 1.0s ease-in-out;
+ -moz-transition:all 1.0s ease-in-out;
+ -o-transition:all 1.0s ease-in-out;
+ transition:all 1.0s ease-in-out;
+}
+
+.wizard-viewbox {
+ width: 800px;
+ overflow: hidden;
+ position: relative;
+}
diff --git a/common/lib/xmodule/xmodule/js/fixtures/crowdsource_hinter.html b/common/lib/xmodule/xmodule/js/fixtures/crowdsource_hinter.html
new file mode 100644
index 000000000000..02122017bdf2
--- /dev/null
+++ b/common/lib/xmodule/xmodule/js/fixtures/crowdsource_hinter.html
@@ -0,0 +1,52 @@
+
+
+
+
+
+
+
+
+
+
+
+ Numerical Input
+
+
+ (1/1 points)
+
+
+
The answer is 2*x^2*y + 5
+
Answer =
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/common/lib/xmodule/xmodule/js/spec/capa/display_spec.coffee b/common/lib/xmodule/xmodule/js/spec/capa/display_spec.coffee
index 33d74e233571..111174a0b49c 100644
--- a/common/lib/xmodule/xmodule/js/spec/capa/display_spec.coffee
+++ b/common/lib/xmodule/xmodule/js/spec/capa/display_spec.coffee
@@ -125,9 +125,10 @@ describe 'Problem', ->
expect(@problem.bind).toHaveBeenCalled()
describe 'check_fd', ->
- xit 'should have specs written for this functionality', ->
+ xit 'should have more specs written for this functionality', ->
expect(false)
+
describe 'check', ->
beforeEach ->
@problem = new Problem($('.xmodule_display'))
@@ -137,6 +138,15 @@ describe 'Problem', ->
@problem.check()
expect(Logger.log).toHaveBeenCalledWith 'problem_check', 'foo=1&bar=2'
+ it 'log the problem_graded event, after the problem is done grading.', ->
+ spyOn($, 'postWithPrefix').andCallFake (url, answers, callback) ->
+ response =
+ success: 'correct'
+ contents: 'mock grader response'
+ callback(response)
+ @problem.check()
+ expect(Logger.log).toHaveBeenCalledWith 'problem_graded', ['foo=1&bar=2', 'mock grader response'], @problem.url
+
it 'submit the answer for check', ->
spyOn $, 'postWithPrefix'
@problem.check()
diff --git a/common/lib/xmodule/xmodule/js/spec/crowdsource_hinter/display_spec.coffee b/common/lib/xmodule/xmodule/js/spec/crowdsource_hinter/display_spec.coffee
new file mode 100644
index 000000000000..917741f8af48
--- /dev/null
+++ b/common/lib/xmodule/xmodule/js/spec/crowdsource_hinter/display_spec.coffee
@@ -0,0 +1,54 @@
+describe 'Crowdsourced hinter', ->
+ beforeEach ->
+ window.update_schematics = ->
+ jasmine.stubRequests()
+ # note that the fixturesPath is set in spec/helper.coffee
+ loadFixtures 'crowdsource_hinter.html'
+ @hinter = new Hinter($('#hinter-root'))
+
+ describe 'high-level integration tests', ->
+ # High-level, happy-path tests for integration with capa problems.
+ beforeEach ->
+ # Make a more thorough $.postWithPrefix mock.
+ spyOn($, 'postWithPrefix').andCallFake( ->
+ last_argument = arguments[arguments.length - 1]
+ if typeof last_argument == 'function'
+ response =
+ success: 'incorrect'
+ contents: 'mock grader response'
+ last_argument(response)
+ )
+ @problem = new Problem($('#problem'))
+ @problem.bind()
+
+ it 'knows when a capa problem is graded, using check.', ->
+ @problem.answers = 'test answer'
+ @problem.check()
+ expect($.postWithPrefix).toHaveBeenCalledWith("#{@hinter.url}/get_hint", 'test answer', jasmine.any(Function))
+
+ it 'knows when a capa problem is graded usig check_fd.', ->
+ spyOn($, 'ajaxWithPrefix').andCallFake((url, settings) ->
+ response =
+ success: 'incorrect'
+ contents: 'mock grader response'
+ settings.success(response)
+ )
+ @problem.answers = 'test answer'
+ @problem.check_fd()
+ expect($.postWithPrefix).toHaveBeenCalledWith("#{@hinter.url}/get_hint", 'test answer', jasmine.any(Function))
+
+ describe 'capture_problem', ->
+ beforeEach ->
+ spyOn($, 'postWithPrefix').andReturn(null)
+
+ it 'gets hints for an incorrect answer', ->
+ data = ['some answers', '']
+ @hinter.capture_problem('problem_graded', data, 'fake element')
+ expect($.postWithPrefix).toHaveBeenCalledWith("#{@hinter.url}/get_hint", 'some answers', jasmine.any(Function))
+
+ it 'gets feedback for a correct answer', ->
+ data = ['some answers', '']
+ @hinter.capture_problem('problem_graded', data, 'fake element')
+ expect($.postWithPrefix).toHaveBeenCalledWith("#{@hinter.url}/get_feedback", 'some answers', jasmine.any(Function))
+
+
diff --git a/common/lib/xmodule/xmodule/js/src/capa/display.coffee b/common/lib/xmodule/xmodule/js/src/capa/display.coffee
index 7263dde1f01a..61df101d08ec 100644
--- a/common/lib/xmodule/xmodule/js/src/capa/display.coffee
+++ b/common/lib/xmodule/xmodule/js/src/capa/display.coffee
@@ -19,7 +19,6 @@ class @Problem
problem_prefix = @element_id.replace(/problem_/,'')
@inputs = @$("[id^=input_#{problem_prefix}_]")
-
@$('section.action input:button').click @refreshAnswers
@$('section.action input.check').click @check_fd
@$('section.action input.reset').click @reset
@@ -247,6 +246,7 @@ class @Problem
@updateProgress response
else
@gentle_alert response.success
+ Logger.log 'problem_graded', [@answers, response.contents], @url
if not abort_submission
$.ajaxWithPrefix("#{@url}/problem_check", settings)
diff --git a/common/lib/xmodule/xmodule/js/src/crowdsource_hinter/display.coffee b/common/lib/xmodule/xmodule/js/src/crowdsource_hinter/display.coffee
index 72522f5b03ca..f9f3e438262b 100644
--- a/common/lib/xmodule/xmodule/js/src/crowdsource_hinter/display.coffee
+++ b/common/lib/xmodule/xmodule/js/src/crowdsource_hinter/display.coffee
@@ -29,42 +29,73 @@ class @Hinter
$(selector, @el)
bind: =>
- window.update_schematics()
@$('input.vote').click @vote
@$('input.submit-hint').click @submit_hint
@$('.custom-hint').click @clear_default_text
- @$('#answer-tabs').tabs({active: 0})
- @$('.expand-goodhint').click @expand_goodhint
+ @$('.expand').click @expand
+ @$('.wizard-link').click @wizard_link_handle
+ @$('.answer-choice').click @answer_choice_handle
- expand_goodhint: =>
- if @$('.goodhint').css('display') == 'none'
- @$('.goodhint').css('display', 'block')
+ expand: (eventObj) =>
+ # Expand a hidden div.
+ target = @$('#' + @$(eventObj.currentTarget).data('target'))
+ if @$(target).css('display') == 'none'
+ @$(target).css('display', 'block')
else
- @$('.goodhint').css('display', 'none')
+ @$(target).css('display', 'none')
+ # Fix positioning errors with the bottom class.
+ @set_bottom_links()
vote: (eventObj) =>
+ # Make an ajax request with the user's vote.
target = @$(eventObj.currentTarget)
- post_json = {'answer': target.data('answer'), 'hint': target.data('hintno')}
+ all_pks = @$('#pk-list').attr('data-pk-list')
+ post_json = {'answer': target.attr('data-answer'), 'hint': target.data('hintno'), 'pk_list': all_pks}
$.postWithPrefix "#{@url}/vote", post_json, (response) =>
@render(response.contents)
submit_hint: (eventObj) =>
- target = @$(eventObj.currentTarget)
- textarea_id = '#custom-hint-' + target.data('answer')
- post_json = {'answer': target.data('answer'), 'hint': @$(textarea_id).val()}
+ # Make an ajax request with the user's new hint.
+ textarea = $('.custom-hint')
+ if @answer == ''
+ # The user didn't choose an answer, somehow. Do nothing.
+ return
+ post_json = {'answer': @answer, 'hint': textarea.val()}
$.postWithPrefix "#{@url}/submit_hint",post_json, (response) =>
@render(response.contents)
clear_default_text: (eventObj) =>
+ # Remove placeholder text in the hint submission textbox.
target = @$(eventObj.currentTarget)
if target.data('cleared') == undefined
target.val('')
target.data('cleared', true)
+ wizard_link_handle: (eventObj) =>
+ # Move to another wizard view, based on the link that the user clicked.
+ target = @$(eventObj.currentTarget)
+ @go_to(target.attr('dest'))
+
+ answer_choice_handle: (eventObj) =>
+ # A special case of wizard_link_handle - we need to track a state variable,
+ # the answer that the user chose.
+ @answer = @$(eventObj.target).attr('value')
+ @$('#blank-answer').html(@answer)
+ @go_to('p3')
+
+ set_bottom_links: =>
+ # Makes each .bottom class stick to the bottom of .wizard-viewbox
+ @$('.bottom').css('margin-top', '0px')
+ viewbox_height = parseInt(@$('.wizard-viewbox').css('height'), 10)
+ @$('.bottom').each((index, obj) ->
+ view_height = parseInt($(obj).parent().css('height'), 10)
+ $(obj).css('margin-top', (viewbox_height - view_height) + 'px')
+ )
+
render: (content) ->
if content
# Trim leading and trailing whitespace
- content = content.replace /^\s+|\s+$/g, ""
+ content = content.trim()
if content
@el.html(content)
@@ -74,3 +105,37 @@ class @Hinter
@$('#previous-answer-0').css('display', 'inline')
else
@el.hide()
+ # Initialize the answer choice - remembers which answer the user picked on
+ # p2 when he submits a hint on p3.
+ @answer = ''
+ # Determine whether the browser supports CSS3 transforms.
+ styles = document.body.style
+ if styles.WebkitTransform == '' or styles.transform == ''
+ @go_to = @transform_go_to
+ else
+ @go_to = @legacy_go_to
+
+ # Make the correct wizard view show up.
+ hints_exist = @$('#hints-exist').html() == 'True'
+ if hints_exist
+ @go_to('p1')
+ else
+ @go_to('p2')
+
+ transform_go_to: (view_id) ->
+ # Switch wizard views using sliding transitions.
+ id_to_index = {
+ 'p1': 0,
+ 'p2': 1,
+ 'p3': 2,
+ }
+ translate_string = 'translateX(' +id_to_index[view_id] * -1 * parseInt($('#' + view_id).css('width'), 10) + 'px)'
+ @$('.wizard-container').css('transform', translate_string)
+ @$('.wizard-container').css('-webkit-transform', translate_string)
+ @set_bottom_links()
+
+ legacy_go_to: (view_id) ->
+ # For older browsers - switch wizard views by changing the screen.
+ @$('.wizard-view').css('display', 'none')
+ @$('#' + view_id).css('display', 'block')
+ @set_bottom_links()
\ No newline at end of file
diff --git a/common/lib/xmodule/xmodule/tests/test_crowdsource_hinter.py b/common/lib/xmodule/xmodule/tests/test_crowdsource_hinter.py
index 19e156a0f3f7..8347b71076ed 100644
--- a/common/lib/xmodule/xmodule/tests/test_crowdsource_hinter.py
+++ b/common/lib/xmodule/xmodule/tests/test_crowdsource_hinter.py
@@ -2,7 +2,7 @@
Tests the crowdsourced hinter xmodule.
"""
-from mock import Mock
+from mock import Mock, MagicMock
import unittest
import copy
@@ -53,6 +53,7 @@ def next_num():
@staticmethod
def create(hints=None,
previous_answers=None,
+ user_submissions=None,
user_voted=None,
moderate=None,
mod_queue=None):
@@ -85,17 +86,59 @@ def create(hints=None,
else:
model_data['previous_answers'] = [
['24.0', [0, 3, 4]],
- ['29.0', [None, None, None]]
+ ['29.0', []]
]
+ if user_submissions is not None:
+ model_data['user_submissions'] = user_submissions
+ else:
+ model_data['user_submissions'] = ['24.0', '29.0']
+
if user_voted is not None:
model_data['user_voted'] = user_voted
if moderate is not None:
model_data['moderate'] = moderate
- descriptor = Mock(weight="1")
+ descriptor = Mock(weight='1')
+ # Make the descriptor have a capa problem child.
+ capa_descriptor = MagicMock()
+ capa_descriptor.name = 'capa'
+ descriptor.get_children = lambda: [capa_descriptor]
+
+ # Make a fake capa module.
+ capa_module = MagicMock()
+ capa_module.lcp = MagicMock()
+ responder = MagicMock()
+
+ def validate_answer(answer):
+ """ A mock answer validator - simulates a numerical response"""
+ try:
+ float(answer)
+ return True
+ except ValueError:
+ return False
+ responder.validate_answer = validate_answer
+
+ def compare_answer(ans1, ans2):
+ """ A fake answer comparer """
+ return ans1 == ans2
+ responder.compare_answer = compare_answer
+
+ capa_module.lcp.responders = {'responder0': responder}
+ capa_module.displayable_items = lambda: [capa_module]
+
system = get_test_system()
+ # Make the system have a marginally-functional get_module
+
+ def fake_get_module(descriptor):
+ """
+ A fake module-maker.
+ """
+ if descriptor.name == 'capa':
+ return capa_module
+ system.get_module = fake_get_module
+
module = CrowdsourceHinterModule(system, descriptor, model_data)
return module
@@ -146,11 +189,13 @@ class VerticalWithModulesFactory(object):
@staticmethod
def next_num():
+ """Increments a global counter for naming."""
CHModuleFactory.num += 1
return CHModuleFactory.num
@staticmethod
def create():
+ """Make a vertical."""
model_data = {'data': VerticalWithModulesFactory.sample_problem_xml}
system = get_test_system()
descriptor = VerticalDescriptor.from_xml(VerticalWithModulesFactory.sample_problem_xml, system)
@@ -226,6 +271,24 @@ def test_gethtml_multiple(self):
self.assertTrue('Test numerical problem.' in out_html)
self.assertTrue('Another test numerical problem.' in out_html)
+ def test_numerical_answer_to_str(self):
+ """
+ Tests the get request to string converter for numerical responses.
+ """
+ mock_module = CHModuleFactory.create()
+ get = {'response1': '4'}
+ parsed = mock_module.numerical_answer_to_str(get)
+ self.assertTrue(parsed == '4')
+
+ def test_formula_answer_to_str(self):
+ """
+ Tests the get request to string converter for formula responses.
+ """
+ mock_module = CHModuleFactory.create()
+ get = {'response1': 'x*y^2'}
+ parsed = mock_module.formula_answer_to_str(get)
+ self.assertTrue(parsed == 'x*y^2')
+
def test_gethint_0hint(self):
"""
Someone asks for a hint, when there's no hint to give.
@@ -235,21 +298,36 @@ def test_gethint_0hint(self):
mock_module = CHModuleFactory.create()
json_in = {'problem_name': '26.0'}
out = mock_module.get_hint(json_in)
+ print mock_module.previous_answers
self.assertTrue(out is None)
- self.assertTrue(['26.0', [None, None, None]] in mock_module.previous_answers)
+ self.assertTrue('26.0' in mock_module.user_submissions)
def test_gethint_unparsable(self):
"""
- Someone submits a hint that cannot be parsed into a float.
+ Someone submits an answer that is in the wrong format.
- The answer should not be added to previous_answers.
"""
mock_module = CHModuleFactory.create()
old_answers = copy.deepcopy(mock_module.previous_answers)
- json_in = {'problem_name': 'fish'}
+ json_in = 'blah'
out = mock_module.get_hint(json_in)
self.assertTrue(out is None)
self.assertTrue(mock_module.previous_answers == old_answers)
+ def test_gethint_signature_error(self):
+ """
+ Someone submits an answer that cannot be calculated as a float.
+ Nothing should change.
+ """
+ mock_module = CHModuleFactory.create()
+ old_answers = copy.deepcopy(mock_module.previous_answers)
+ old_user_submissions = copy.deepcopy(mock_module.user_submissions)
+ json_in = {'problem1': 'fish'}
+ out = mock_module.get_hint(json_in)
+ self.assertTrue(out is None)
+ self.assertTrue(mock_module.previous_answers == old_answers)
+ self.assertTrue(mock_module.user_submissions == old_user_submissions)
+
def test_gethint_1hint(self):
"""
Someone asks for a hint, with exactly one hint in the database.
@@ -258,7 +336,11 @@ def test_gethint_1hint(self):
mock_module = CHModuleFactory.create()
json_in = {'problem_name': '25.0'}
out = mock_module.get_hint(json_in)
- self.assertTrue(out['best_hint'] == 'Really popular hint')
+ self.assertTrue('Really popular hint' in out['hints'])
+ # Also make sure that the input gets added to user_submissions,
+ # and that the hint is logged in previous_answers.
+ self.assertTrue('25.0' in mock_module.user_submissions)
+ self.assertTrue(['25.0', ['1']] in mock_module.previous_answers)
def test_gethint_manyhints(self):
"""
@@ -271,18 +353,18 @@ def test_gethint_manyhints(self):
mock_module = CHModuleFactory.create()
json_in = {'problem_name': '24.0'}
out = mock_module.get_hint(json_in)
- self.assertTrue(out['best_hint'] == 'Best hint')
- self.assertTrue('rand_hint_1' in out)
- self.assertTrue('rand_hint_2' in out)
+ self.assertTrue('Best hint' in out['hints'])
+ self.assertTrue(len(out['hints']) == 3)
def test_getfeedback_0wronganswers(self):
"""
Someone has gotten the problem correct on the first try.
Output should be empty.
"""
- mock_module = CHModuleFactory.create(previous_answers=[])
+ mock_module = CHModuleFactory.create(previous_answers=[], user_submissions=[])
json_in = {'problem_name': '42.5'}
out = mock_module.get_feedback(json_in)
+ print out
self.assertTrue(out is None)
def test_getfeedback_1wronganswer_nohints(self):
@@ -294,9 +376,7 @@ def test_getfeedback_1wronganswer_nohints(self):
mock_module = CHModuleFactory.create(previous_answers=[['26.0', [None, None, None]]])
json_in = {'problem_name': '42.5'}
out = mock_module.get_feedback(json_in)
- print out['index_to_answer']
- self.assertTrue(out['index_to_hints'][0] == [])
- self.assertTrue(out['index_to_answer'][0] == '26.0')
+ self.assertTrue(out['answer_to_hints'] == {'26.0': {}})
def test_getfeedback_1wronganswer_withhints(self):
"""
@@ -307,8 +387,7 @@ def test_getfeedback_1wronganswer_withhints(self):
mock_module = CHModuleFactory.create(previous_answers=[['24.0', [0, 3, None]]])
json_in = {'problem_name': '42.5'}
out = mock_module.get_feedback(json_in)
- print out['index_to_hints']
- self.assertTrue(len(out['index_to_hints'][0]) == 2)
+ self.assertTrue(len(out['answer_to_hints']['24.0']) == 2)
def test_getfeedback_missingkey(self):
"""
@@ -319,7 +398,7 @@ def test_getfeedback_missingkey(self):
previous_answers=[['24.0', [0, 100, None]]])
json_in = {'problem_name': '42.5'}
out = mock_module.get_feedback(json_in)
- self.assertTrue(len(out['index_to_hints'][0]) == 1)
+ self.assertTrue(len(out['answer_to_hints']['24.0']) == 1)
def test_vote_nopermission(self):
"""
@@ -327,7 +406,7 @@ def test_vote_nopermission(self):
Should not change any vote tallies.
"""
mock_module = CHModuleFactory.create(user_voted=True)
- json_in = {'answer': 0, 'hint': 1}
+ json_in = {'answer': '24.0', 'hint': 1, 'pk_list': json.dumps([['24.0', 1], ['24.0', 3]])}
old_hints = copy.deepcopy(mock_module.hints)
mock_module.tally_vote(json_in)
self.assertTrue(mock_module.hints == old_hints)
@@ -339,19 +418,56 @@ def test_vote_withpermission(self):
"""
mock_module = CHModuleFactory.create(
previous_answers=[['24.0', [0, 3, None]]])
- json_in = {'answer': 0, 'hint': 3}
+ json_in = {'answer': '24.0', 'hint': 3, 'pk_list': json.dumps([['24.0', 0], ['24.0', 3]])}
dict_out = mock_module.tally_vote(json_in)
self.assertTrue(mock_module.hints['24.0']['0'][1] == 40)
self.assertTrue(mock_module.hints['24.0']['3'][1] == 31)
self.assertTrue(['Best hint', 40] in dict_out['hint_and_votes'])
self.assertTrue(['Another hint', 31] in dict_out['hint_and_votes'])
+ def test_vote_unparsable(self):
+ """
+ A user somehow votes for an unparsable answer.
+ Should return a friendly error.
+ (This is an unusual exception path - I don't know how it occurs,
+ except if you manually make a post request. But, it seems to happen
+ occasionally.)
+ """
+ mock_module = CHModuleFactory.create()
+ # None means that the answer couldn't be parsed.
+ mock_module.answer_signature = lambda text: None
+ json_in = {'answer': 'fish', 'hint': 3, 'pk_list': '[]'}
+ dict_out = mock_module.tally_vote(json_in)
+ print dict_out
+ self.assertTrue(dict_out == {'error': 'Failure in voting!'})
+
+ def test_vote_nohint(self):
+ """
+ A user somehow votes for a hint that doesn't exist.
+ Should return a friendly error.
+ """
+ mock_module = CHModuleFactory.create()
+ json_in = {'answer': '24.0', 'hint': '25', 'pk_list': '[]'}
+ dict_out = mock_module.tally_vote(json_in)
+ self.assertTrue(dict_out == {'error': 'Failure in voting!'})
+
+ def test_vote_badpklist(self):
+ """
+ Some of the pk's specified in pk_list are invalid.
+ Should just skip those.
+ """
+ mock_module = CHModuleFactory.create()
+ json_in = {'answer': '24.0', 'hint': '0', 'pk_list': json.dumps([['24.0', 0], ['24.0', 12]])}
+ hint_and_votes = mock_module.tally_vote(json_in)['hint_and_votes']
+ self.assertTrue(['Best hint', 41] in hint_and_votes)
+ self.assertTrue(len(hint_and_votes) == 1)
+
def test_submithint_nopermission(self):
"""
A user tries to submit a hint, but he has already voted.
"""
mock_module = CHModuleFactory.create(user_voted=True)
- json_in = {'answer': 1, 'hint': 'This is a new hint.'}
+ json_in = {'answer': '29.0', 'hint': 'This is a new hint.'}
print mock_module.user_voted
mock_module.submit_hint(json_in)
print mock_module.hints
@@ -363,7 +479,7 @@ def test_submithint_withpermission_new(self):
exist yet.
"""
mock_module = CHModuleFactory.create()
- json_in = {'answer': 1, 'hint': 'This is a new hint.'}
+ json_in = {'answer': '29.0', 'hint': 'This is a new hint.'}
mock_module.submit_hint(json_in)
self.assertTrue('29.0' in mock_module.hints)
@@ -373,13 +489,12 @@ def test_submithint_withpermission_existing(self):
already.
"""
mock_module = CHModuleFactory.create(previous_answers=[['25.0', [1, None, None]]])
- json_in = {'answer': 0, 'hint': 'This is a new hint.'}
+ json_in = {'answer': '25.0', 'hint': 'This is a new hint.'}
mock_module.submit_hint(json_in)
# Make a hint request.
json_in = {'problem name': '25.0'}
out = mock_module.get_hint(json_in)
- self.assertTrue((out['best_hint'] == 'This is a new hint.')
- or (out['rand_hint_1'] == 'This is a new hint.'))
+ self.assertTrue('This is a new hint.' in out['hints'])
def test_submithint_moderate(self):
"""
@@ -388,7 +503,7 @@ def test_submithint_moderate(self):
dict.
"""
mock_module = CHModuleFactory.create(moderate='True')
- json_in = {'answer': 1, 'hint': 'This is a new hint.'}
+ json_in = {'answer': '29.0', 'hint': 'This is a new hint.'}
mock_module.submit_hint(json_in)
self.assertTrue('29.0' not in mock_module.hints)
self.assertTrue('29.0' in mock_module.mod_queue)
@@ -398,10 +513,20 @@ def test_submithint_escape(self):
Make sure that hints are being html-escaped.
"""
mock_module = CHModuleFactory.create()
- json_in = {'answer': 1, 'hint': ''}
+ json_in = {'answer': '29.0', 'hint': ''}
mock_module.submit_hint(json_in)
+ self.assertTrue(mock_module.hints['29.0']['0'][0] == u'<script> alert("Trololo"); </script>')
+
+ def test_submithint_unparsable(self):
+ mock_module = CHModuleFactory.create()
+ mock_module.answer_signature = lambda text: None
+ json_in = {'answer': 'fish', 'hint': 'A hint'}
+ dict_out = mock_module.submit_hint(json_in)
+ print dict_out
print mock_module.hints
- self.assertTrue(mock_module.hints['29.0'][0][0] == u'<script> alert("Trololo"); </script>')
+ self.assertTrue('error' in dict_out)
+ self.assertTrue(None not in mock_module.hints)
+ self.assertTrue('fish' not in mock_module.hints)
def test_template_gethint(self):
"""
@@ -409,7 +534,7 @@ def test_template_gethint(self):
"""
mock_module = CHModuleFactory.create()
- def fake_get_hint(get):
+ def fake_get_hint(_):
"""
Creates a rendering dictionary, with which we can test
the templates.
diff --git a/common/templates/hinter_display.html b/common/templates/hinter_display.html
index 6f5d6f37fb50..12f6b9f2f48a 100644
--- a/common/templates/hinter_display.html
+++ b/common/templates/hinter_display.html
@@ -3,94 +3,137 @@
<%def name="get_hint()">
- % if best_hint != '':
+ % if len(hints) > 0:
Hints from students who made similar mistakes:
-
${best_hint}
- % endif
- % if rand_hint_1 != '':
-
${rand_hint_1}
- % endif
- % if rand_hint_2 != '':
-
${rand_hint_2}
+ % for hint in hints:
+
${hint}
+ % endfor
+
% endif
-
%def>
<%def name="get_feedback()">
-
Participation in the hinting system is strictly optional, and will not influence your grade.
-
- Help your classmates by writing hints for this problem. Start by picking one of your previous incorrect answers from below:
-
+ <%
+ def unspace(in_str):
+ """
+ HTML id's can't have spaces in them. This little function
+ removes spaces.
+ """
+ return ''.join(in_str.split())
+
+ # Make a list of all hints shown. (This is fed back to the site as pk_list.)
+ # At the same time, determine whether any hints were shown at all.
+ # If the user never saw hints, don't ask him to vote.
+ import json
+ hints_exist = False
+ pk_list = []
+ for answer, pk_dict in answer_to_hints.items():
+ if len(pk_dict) > 0:
+ hints_exist = True
+ for pk, hint_text in pk_dict.items():
+ pk_list.append([answer, pk])
+ json_pk_list = json.dumps(pk_list)
+ %>
+
+
+ ${hints_exist}
+
+
+
+ Optional. Help us improve our hints! Which hint was most helpful to you?
+
-
-
- % for index, answer in index_to_answer.items():
-
It depends on the type of problem you ran into. For stupid errors --
- an arithmetic error or similar -- simply letting the student you'll be
- helping to check their signs is sufficient.
+
-
For deeper errors of understanding, the best hints allow students to
- discover a contradiction in how they are thinking about the
- problem. An example that clearly demonstrates inconsistency or
- cognitive dissonace
- is ideal, although in most cases, not possible.
+
- Good hints either:
-
-
Point out the specific misunderstanding your classmate might have
-
Point to concepts or theories where your classmates might have a
- misunderstanding
-
Show simpler, analogous examples.
-
Provide references to relevant parts of the text
-
+ Write a hint for other students who get the wrong answer of .
+
It depends on the type of problem you ran into. For stupid errors --
+ an arithmetic error or similar -- simply letting the student you'll be
+ helping to check their signs is sufficient.
+
+
For deeper errors of understanding, the best hints allow students to
+ discover a contradiction in how they are thinking about the
+ problem. An example that clearly demonstrates inconsistency or
+ cognitive dissonace
+ is ideal, although in most cases, not possible.
+
+
+ Good hints either:
+
+
Point out the specific misunderstanding your classmate might have
+
Point to concepts or theories where your classmates might have a
+ misunderstanding
+
Show simpler, analogous examples.
+
Provide references to relevant parts of the text
+
+
+
+
Still, remember even a crude hint -- virtually anything short of
+ giving away the answer -- is better than no hint.