Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

E0d/fix conflicts #1955

Closed
wants to merge 11 commits into from
13 changes: 5 additions & 8 deletions cms/djangoapps/auth/authz.py
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,7 @@ def get_all_course_role_groupnames(location, role, use_filter=True):
# filter to the ones which exist
default = groupnames[0]
if use_filter:
groupnames = [group for group in groupnames if Group.objects.filter(name=group).exists()]
groupnames = [group.name for group in Group.objects.filter(name__in=groupnames)]
return groupnames, default


Expand Down Expand Up @@ -203,12 +203,9 @@ def remove_user_from_course_group(caller, user, location, role):

# see if the user is actually in that role, if not then we don't have to do anything
groupnames, _ = get_all_course_role_groupnames(location, role)
for groupname in groupnames:
groups = user.groups.filter(name=groupname)
if groups:
# will only be one with that name
user.groups.remove(groups[0])
user.save()
for group in user.groups.filter(name__in=groupnames):
user.groups.remove(group)
user.save()


def remove_user_from_creator_group(caller, user):
Expand Down Expand Up @@ -243,7 +240,7 @@ def is_user_in_course_group_role(user, location, role, check_staff=True):
if check_staff and user.is_staff:
return True
groupnames, _ = get_all_course_role_groupnames(location, role)
return any(user.groups.filter(name=groupname).exists() for groupname in groupnames)
return user.groups.filter(name__in=groupnames).exists()

return False

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -258,8 +258,23 @@ def states_sort_key(self, idx_task_states):
if not task_states:
return (0, 0, state_values[OpenEndedChild.INITIAL], idx)

final_child_state = json.loads(task_states[-1])
scores = [attempt.get('score', 0) for attempt in final_child_state.get('child_history', [])]
final_task_xml = self.task_xml[-1]
final_child_state_json = task_states[-1]
final_child_state = json.loads(final_child_state_json)

tag_name = self.get_tag_name(final_task_xml)
children = self.child_modules()
task_descriptor = children['descriptors'][tag_name](self.system)
task_parsed_xml = task_descriptor.definition_from_xml(etree.fromstring(final_task_xml), self.system)
task = children['modules'][tag_name](
self.system,
self.location,
task_parsed_xml,
task_descriptor,
self.static_data,
instance_state=final_child_state_json,
)
scores = task.all_scores()
if scores:
best_score = max(scores)
else:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -679,7 +679,7 @@ def save_answer(self, data, system):
return {
'success': success,
'error': error_message,
'student_response': data['student_answer'].replace("\n","<br/>")
'student_response': data['student_answer'].replace("\n", "<br/>")
}

def update_score(self, data, system):
Expand Down Expand Up @@ -738,6 +738,44 @@ def get_html(self, system):
html = system.render_template('{0}/open_ended.html'.format(self.TEMPLATE_DIR), context)
return html

def latest_score(self):
"""None if not available"""
if not self.child_history:
return None
return self.score_for_attempt(-1)

def all_scores(self):
"""None if not available"""
if not self.child_history:
return None
return [self.score_for_attempt(index) for index in xrange(0, len(self.child_history))]

def score_for_attempt(self, index):
"""
Return sum of rubric scores for ML grading otherwise return attempt["score"].
"""
attempt = self.child_history[index]
score = attempt.get('score')
post_assessment_data = self._parse_score_msg(attempt.get('post_assessment'), self.system)
grader_types = post_assessment_data.get('grader_types')

# According to _parse_score_msg in ML grading there should be only one grader type.
if len(grader_types) == 1 and grader_types[0] == 'ML':
rubric_scores = post_assessment_data.get("rubric_scores")

# Similarly there should be only one list of rubric scores.
if len(rubric_scores) == 1:
rubric_scores_sum = sum(rubric_scores[0])
log.debug("""Score normalized for location={loc}, old_score={old_score},
new_score={new_score}, rubric_score={rubric_score}""".format(
loc=self.location_string,
old_score=score,
new_score=rubric_scores_sum,
rubric_score=rubric_scores
))
return rubric_scores_sum
return score


class OpenEndedDescriptor():
"""
Expand Down
182 changes: 177 additions & 5 deletions common/lib/xmodule/xmodule/tests/test_combined_open_ended.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,9 @@
from xmodule.tests.test_util_open_ended import (
DummyModulestore, TEST_STATE_SA_IN,
MOCK_INSTANCE_STATE, TEST_STATE_SA, TEST_STATE_AI, TEST_STATE_AI2, TEST_STATE_AI2_INVALID,
TEST_STATE_SINGLE, TEST_STATE_PE_SINGLE, MockUploadedFile
TEST_STATE_SINGLE, TEST_STATE_PE_SINGLE, MockUploadedFile, INSTANCE_INCONSISTENT_STATE,
INSTANCE_INCONSISTENT_STATE2, INSTANCE_INCONSISTENT_STATE3, INSTANCE_INCONSISTENT_STATE4,
INSTANCE_INCONSISTENT_STATE5
)

from xblock.field_data import DictFieldData
Expand Down Expand Up @@ -358,7 +360,7 @@ def test_open_ended_display(self):

# Create a module with no state yet. Important that this start off as a blank slate.
test_module = OpenEndedModule(self.test_system, self.location,
self.definition, self.descriptor, self.static_data, self.metadata)
self.definition, self.descriptor, self.static_data, self.metadata)

saved_response = "Saved response."
submitted_response = "Submitted response."
Expand All @@ -369,7 +371,7 @@ def test_open_ended_display(self):
self.assertEqual(test_module.get_display_answer(), "")

# Now, store an answer in the module.
test_module.handle_ajax("store_answer", {'student_answer' : saved_response}, get_test_system())
test_module.handle_ajax("store_answer", {'student_answer': saved_response}, get_test_system())
# The stored answer should now equal our response.
self.assertEqual(test_module.stored_answer, saved_response)
self.assertEqual(test_module.get_display_answer(), saved_response)
Expand All @@ -387,6 +389,7 @@ def test_open_ended_display(self):
# Confirm that the answer is stored properly.
self.assertEqual(test_module.latest_answer(), submitted_response)


class CombinedOpenEndedModuleTest(unittest.TestCase):
"""
Unit tests for the combined open ended xmodule
Expand Down Expand Up @@ -610,7 +613,6 @@ def test_alternate_orderings(self):
metadata=self.metadata,
instance_state={'task_states': TEST_STATE_SA_IN})


def test_get_score_realistic(self):
"""
Try to parse the correct score from a json instance state
Expand Down Expand Up @@ -717,6 +719,175 @@ def test_state_pe_single(self):
self.ai_state_success(TEST_STATE_PE_SINGLE, iscore=0, tasks=[self.task_xml2])


class CombinedOpenEndedModuleConsistencyTest(unittest.TestCase):
"""
Unit tests for the combined open ended xmodule rubric scores consistency.
"""

# location, definition_template, prompt, rubric, max_score, metadata, oeparam, task_xml1, task_xml2
# All these variables are used to construct the xmodule descriptor.
location = Location(["i4x", "edX", "open_ended", "combinedopenended",
"SampleQuestion"])
definition_template = """
<combinedopenended attempts="10000">
{rubric}
{prompt}
<task>
{task1}
</task>
<task>
{task2}
</task>
</combinedopenended>
"""
prompt = "<prompt>This is a question prompt</prompt>"
rubric = '''<rubric><rubric>
<category>
<description>Response Quality</description>
<option>The response is not a satisfactory answer to the question. It either fails to address the question or does so in a limited way, with no evidence of higher-order thinking.</option>
<option>Second option</option>
</category>
</rubric></rubric>'''
max_score = 10

metadata = {'attempts': '10', 'max_score': max_score}

oeparam = etree.XML('''
<openendedparam>
<initial_display>Enter essay here.</initial_display>
<answer_display>This is the answer.</answer_display>
<grader_payload>{"grader_settings" : "ml_grading.conf", "problem_id" : "6.002x/Welcome/OETest"}</grader_payload>
</openendedparam>
''')

task_xml1 = '''
<selfassessment>
<hintprompt>
What hint about this problem would you give to someone?
</hintprompt>
<submitmessage>
Save Succcesful. Thanks for participating!
</submitmessage>
</selfassessment>
'''
task_xml2 = '''
<openended min_score_to_attempt="1" max_score_to_attempt="10">
<openendedparam>
<initial_display>Enter essay here.</initial_display>
<answer_display>This is the answer.</answer_display>
<grader_payload>{"grader_settings" : "ml_grading.conf", "problem_id" : "6.002x/Welcome/OETest"}</grader_payload>
</openendedparam>
</openended>'''


static_data = {
'max_attempts': 20,
'prompt': prompt,
'rubric': rubric,
'max_score': max_score,
'display_name': 'Name',
'accept_file_upload': False,
'close_date': "",
's3_interface': test_util_open_ended.S3_INTERFACE,
'open_ended_grading_interface': test_util_open_ended.OPEN_ENDED_GRADING_INTERFACE,
'skip_basic_checks': False,
'graded': True,
}

definition = {'prompt': etree.XML(prompt), 'rubric': etree.XML(rubric), 'task_xml': [task_xml1, task_xml2]}
full_definition = definition_template.format(prompt=prompt, rubric=rubric, task1=task_xml1, task2=task_xml2)
descriptor = Mock(data=full_definition)
test_system = get_test_system()
test_system.open_ended_grading_interface = None
combinedoe_container = CombinedOpenEndedModule(
descriptor=descriptor,
runtime=test_system,
field_data=DictFieldData({
'data': full_definition,
'weight': '1',
}),
scope_ids=ScopeIds(None, None, None, None),
)

def setUp(self):
self.combinedoe = CombinedOpenEndedV1Module(self.test_system,
self.location,
self.definition,
self.descriptor,
static_data=self.static_data,
metadata=self.metadata,
instance_state=json.loads(INSTANCE_INCONSISTENT_STATE))

def test_get_score(self):
"""
If grader type is ML score should be updated from rubric scores. Aggregate rubric scores = sum([3])*5.
"""
score_dict = self.combinedoe.get_score()
self.assertEqual(score_dict['score'], 15.0)
self.assertEqual(score_dict['total'], 5.0)

def test_get_score_with_pe_grader(self):
"""
If grader type is PE score should not be updated from rubric scores. Aggregate rubric scores = sum([3])*5.
"""
combinedoe = CombinedOpenEndedV1Module(self.test_system,
self.location,
self.definition,
self.descriptor,
static_data=self.static_data,
metadata=self.metadata,
instance_state=json.loads(INSTANCE_INCONSISTENT_STATE2))
score_dict = combinedoe.get_score()
self.assertNotEqual(score_dict['score'], 15.0)

def test_get_score_with_different_score_value_in_rubric(self):
"""
If grader type is ML score should be updated from rubric scores. Aggregate rubric scores = sum([5])*5.
"""
combinedoe = CombinedOpenEndedV1Module(self.test_system,
self.location,
self.definition,
self.descriptor,
static_data=self.static_data,
metadata=self.metadata,
instance_state=json.loads(INSTANCE_INCONSISTENT_STATE3))
score_dict = combinedoe.get_score()
self.assertEqual(score_dict['score'], 25.0)
self.assertEqual(score_dict['total'], 5.0)

def test_get_score_with_old_task_states(self):
"""
If grader type is ML and old_task_states are present in instance inconsistent state score should be updated
from rubric scores. Aggregate rubric scores = sum([3])*5.
"""
combinedoe = CombinedOpenEndedV1Module(self.test_system,
self.location,
self.definition,
self.descriptor,
static_data=self.static_data,
metadata=self.metadata,
instance_state=json.loads(INSTANCE_INCONSISTENT_STATE4))
score_dict = combinedoe.get_score()
self.assertEqual(score_dict['score'], 15.0)
self.assertEqual(score_dict['total'], 5.0)

def test_get_score_with_score_missing(self):
"""
If grader type is ML and score field is missing in instance inconsistent state score should be updated from
rubric scores. Aggregate rubric scores = sum([3])*5.
"""
combinedoe = CombinedOpenEndedV1Module(self.test_system,
self.location,
self.definition,
self.descriptor,
static_data=self.static_data,
metadata=self.metadata,
instance_state=json.loads(INSTANCE_INCONSISTENT_STATE5))
score_dict = combinedoe.get_score()
self.assertEqual(score_dict['score'], 15.0)
self.assertEqual(score_dict['total'], 5.0)


class OpenEndedModuleXmlTest(unittest.TestCase, DummyModulestore):
"""
Test the student flow in the combined open ended xmodule
Expand Down Expand Up @@ -948,6 +1119,7 @@ def test_reset_fail(self):
reset_data = json.loads(self._handle_ajax("reset", {}))
self.assertEqual(reset_data['success'], False)


class OpenEndedModuleXmlImageUploadTest(unittest.TestCase, DummyModulestore):
"""
Test if student is able to upload images properly.
Expand Down Expand Up @@ -1018,7 +1190,7 @@ def test_link_submission_success(self):
# Simulate a student saving an answer with a link.
response = module.handle_ajax("save_answer", {
"student_answer": "{0} {1}".format(self.answer_text, self.answer_link)
})
})

response = json.loads(response)

Expand Down
Loading