diff --git a/bin/deploy-docker.sh b/bin/deploy-docker.sh
index 65c5e0d724..43bbddf307 100755
--- a/bin/deploy-docker.sh
+++ b/bin/deploy-docker.sh
@@ -11,6 +11,7 @@ Usage:
$cmdname [-h] [-b] [-n]
-h Show this help message
-b Backup current database before attempting update
+ -f Ignore recent activity check before attempting restart
-n Do not pull docker images prior to starting
Docker deployment script
diff --git a/manage.py b/manage.py
index 52b38ba675..13cadc19d7 100644
--- a/manage.py
+++ b/manage.py
@@ -44,7 +44,11 @@
QuestionnaireBank,
add_static_questionnaire_bank,
)
-from portal.models.questionnaire_response import QuestionnaireResponse
+from portal.models.questionnaire_response import (
+ QuestionnaireResponse,
+ capture_patient_state,
+ present_before_after_state,
+)
from portal.models.relationship import add_static_relationships
from portal.models.research_study import (
BASE_RS_ID,
@@ -611,94 +615,6 @@ def update_qnr(qnr_id, link_id, actor, noop, replacement):
click.echo(message)
-@click.option('--subject_id', type=int, multiple=True, help="Subject user ID", required=True)
-@click.option(
- '--actor',
- default="__system__",
- required=False,
- help='email address of user taking this action, for audit trail'
-)
-@app.cli.command()
-def remove_post_withdrawn_qnrs(subject_id, actor):
- """Remove QNRs posted beyond subject's withdrawal date"""
- from sqlalchemy.types import DateTime
- from portal.cache import cache
- from portal.models.questionnaire_bank import trigger_date
-
- rs_id = 0 # only base study till need arises
- acting_user = get_actor(actor, require_admin=True)
-
- for subject_id in subject_id:
- # Confirm user has withdrawn
- subject = get_target(id=subject_id)
- study_id = subject.external_study_id
-
- # Make sure we're not working w/ stale timeline data
- QuestionnaireResponse.purge_qb_relationship(
- subject_id=subject_id,
- research_study_id=rs_id,
- acting_user_id=acting_user.id)
- cache.delete_memoized(trigger_date)
- update_users_QBT(
- subject_id,
- research_study_id=rs_id,
- invalidate_existing=True)
-
- deceased_date = None if not subject.deceased else subject.deceased.timestamp
- withdrawn_visit = QBT.withdrawn_qbd(subject_id, rs_id)
- if not withdrawn_visit:
- raise ValueError("Only applicable to withdrawn users")
-
- # Obtain all QNRs submitted beyond withdrawal date
- query = QuestionnaireResponse.query.filter(
- QuestionnaireResponse.document["authored"].astext.cast(DateTime) >
- withdrawn_visit.relative_start
- ).filter(
- QuestionnaireResponse.subject_id == subject_id).with_entities(
- QuestionnaireResponse.id,
- QuestionnaireResponse.questionnaire_bank_id,
- QuestionnaireResponse.qb_iteration,
- QuestionnaireResponse.document["questionnaire"]["reference"].
- label("instrument"),
- QuestionnaireResponse.document["authored"].
- label("authored")
- ).order_by(QuestionnaireResponse.document["authored"])
-
- for qnr in query:
- # match format in bug report for easy diff
- sub_padding = " "*(11 - len(str(subject_id)))
- stdy_padding = " "*(12 - len(study_id))
- out = (
- f"{sub_padding}{subject_id} | "
- f"{study_id}{stdy_padding}| "
- f"{withdrawn_visit.relative_start.strftime('%Y-%m-%d %H:%M:%S.%f')[:-3]} | "
- f"{qnr.authored} | ")
-
- # do not include any belonging to the last active visit, unless
- # they came in after deceased date
- if (
- qnr.questionnaire_bank_id == withdrawn_visit.qb_id and
- qnr.qb_iteration == withdrawn_visit.iteration and
- (not deceased_date or FHIR_datetime.parse(
- qnr.authored) < deceased_date)):
- print(f"{out}keep")
- continue
- if "irondemog" in qnr.instrument:
- print(f"{out}keep (indefinite)")
- continue
- print(f"{out}delete")
- db.session.delete(QuestionnaireResponse.query.get(qnr.id))
- auditable_event(
- message=(
- "deleted questionnaire response submitted beyond "
- "withdrawal visit as per request by PCCTC"),
- context="assessment",
- user_id=acting_user.id,
- subject_id=subject_id)
- db.session.commit()
- return
-
-
@click.option('--src_id', type=int, help="Source Patient ID (WILL BE DELETED!)")
@click.option('--tgt_id', type=int, help="Target Patient ID")
@click.option(
@@ -804,71 +720,6 @@ def merge_users(src_id, tgt_id, actor):
print(message)
-def capture_patient_state(patient_id):
- """Call to capture QBT and QNR state for patient, used for before/after"""
- qnrs = QuestionnaireResponse.qnr_state(patient_id)
- tl = QBT.timeline_state(patient_id)
- return {'qnrs': qnrs, 'timeline': tl}
-
-
-def present_before_after_state(user_id, external_study_id, before_state):
- from portal.dict_tools import dict_compare
- after_qnrs = QuestionnaireResponse.qnr_state(user_id)
- after_timeline = QBT.timeline_state(user_id)
- qnrs_lost_reference = []
-
- def visit_from_timeline(qb_name, qb_iteration, timeline_results):
- """timeline results have computed visit name - quick lookup"""
- for visit, name, iteration in timeline_results.values():
- if qb_name == name and qb_iteration == iteration:
- return visit
- raise ValueError(f"no visit found for {qb_name}, {qb_iteration}")
-
- # Compare results
- added_q, removed_q, modified_q, same = dict_compare(
- after_qnrs, before_state['qnrs'])
- assert not added_q
- assert not removed_q
-
- added_t, removed_t, modified_t, same = dict_compare(
- after_timeline, before_state['timeline'])
-
- if any((added_t, removed_t, modified_t, modified_q)):
- print(f"\nPatient {user_id} ({external_study_id}):")
- if modified_q:
- print("\tModified QNRs (old, new)")
- for mod in sorted(modified_q):
- print(f"\t\t{mod} {modified_q[mod][1]} ==>"
- f" {modified_q[mod][0]}")
- # If the QNR previously had a reference QB and since
- # lost it, capture for reporting.
- if (
- modified_q[mod][1][0] != "none of the above" and
- modified_q[mod][0][0] == "none of the above"):
- visit_name = visit_from_timeline(
- modified_q[mod][1][0],
- modified_q[mod][1][1],
- before_state["timeline"])
- qnrs_lost_reference.append((visit_name, modified_q[mod][1][2]))
- if added_t:
- print("\tAdditional timeline rows:")
- for item in sorted(added_t):
- print(f"\t\t{item} {after_timeline[item]}")
- if removed_t:
- print("\tRemoved timeline rows:")
- for item in sorted(removed_t):
- print(
- f"\t\t{item} "
- f"{before_state['timeline'][item]}")
- if modified_t:
- print(f"\tModified timeline rows: (old, new)")
- for item in sorted(modified_t):
- print(f"\t\t{item}")
- print(f"\t\t\t{modified_t[item][1]} ==> {modified_t[item][0]}")
-
- return after_qnrs, after_timeline, qnrs_lost_reference
-
-
@app.cli.command()
@click.option(
'--correct_overlaps', '-c', default=False, is_flag=True,
@@ -989,7 +840,7 @@ def preview_site_update(org_id, retired):
)
update_users_QBT(
patient.id, research_study_id=0, invalidate_existing=True)
- after_qnrs, after_timeline, qnrs_lost_reference = present_before_after_state(
+ after_qnrs, after_timeline, qnrs_lost_reference, _ = present_before_after_state(
patient.id, patient.external_study_id, patient_state[patient.id])
total_qnrs += len(patient_state[patient.id]['qnrs'])
total_qbs_completed_b4 += len(
diff --git a/portal/config/config.py b/portal/config/config.py
index 0631e4795e..d3fecfc076 100644
--- a/portal/config/config.py
+++ b/portal/config/config.py
@@ -1,8 +1,7 @@
"""Configuration"""
import os
-import redis
-
+from portal.factories.redis import create_redis
from portal.models.role import ROLE
SITE_CFG = 'site.cfg'
@@ -152,7 +151,7 @@ class BaseConfig(object):
REDIS_URL
)
- SESSION_REDIS = redis.from_url(SESSION_REDIS_URL)
+ SESSION_REDIS = create_redis(SESSION_REDIS_URL)
UPDATE_PATIENT_TASK_BATCH_SIZE = int(
os.environ.get('UPDATE_PATIENT_TASK_BATCH_SIZE', 16)
diff --git a/portal/config/eproms/Organization.json b/portal/config/eproms/Organization.json
index f60757572d..2a1998ceb6 100644
--- a/portal/config/eproms/Organization.json
+++ b/portal/config/eproms/Organization.json
@@ -3189,7 +3189,8 @@
},
{
"research_protocols": [
- {"name": "IRONMAN v3"}
+ {"name": "IRONMAN v3", "retired_as_of": "2024-02-12T12:00:00Z"},
+ {"name": "IRONMAN v5"}
],
"url": "http://us.truenth.org/identity-codes/research-protocol"
}
@@ -3217,7 +3218,8 @@
},
{
"research_protocols": [
- {"name": "IRONMAN v3"}
+ {"name": "IRONMAN v3", "retired_as_of": "2024-02-12T12:00:00Z"},
+ {"name": "IRONMAN v5"}
],
"url": "http://us.truenth.org/identity-codes/research-protocol"
}
diff --git a/portal/config/eproms/Questionnaire.json b/portal/config/eproms/Questionnaire.json
index 9a1cc079dc..2ace521d25 100644
--- a/portal/config/eproms/Questionnaire.json
+++ b/portal/config/eproms/Questionnaire.json
@@ -5851,6 +5851,42 @@
"display": "Other",
"code": "irondemog_v3.26.8"
}
+ },
+ {
+ "valueCoding": {
+ "display": "African",
+ "code": "irondemog_v3.26.9"
+ }
+ },
+ {
+ "valueCoding": {
+ "display": "Black",
+ "code": "irondemog_v3.26.10"
+ }
+ },
+ {
+ "valueCoding": {
+ "display": "Coloured",
+ "code": "irondemog_v3.26.11"
+ }
+ },
+ {
+ "valueCoding": {
+ "display": "Indian",
+ "code": "irondemog_v3.26.12"
+ }
+ },
+ {
+ "valueCoding": {
+ "display": "White / Caucasian",
+ "code": "irondemog_v3.26.13"
+ }
+ },
+ {
+ "valueCoding": {
+ "display": "Other",
+ "code": "irondemog_v3.26.14"
+ }
}
]
},
diff --git a/portal/factories/redis.py b/portal/factories/redis.py
new file mode 100644
index 0000000000..d5debfb579
--- /dev/null
+++ b/portal/factories/redis.py
@@ -0,0 +1,4 @@
+import redis
+
+def create_redis(url):
+ return redis.Redis.from_url(url)
diff --git a/portal/migrations/versions/3c871e710277_.py b/portal/migrations/versions/3c871e710277_.py
new file mode 100644
index 0000000000..85aed5bdfb
--- /dev/null
+++ b/portal/migrations/versions/3c871e710277_.py
@@ -0,0 +1,225 @@
+"""Correct user_consent regression issues raised by PR #4343
+
+Revision ID: 3c871e710277
+Revises: edb52362d013
+Create Date: 2024-01-25 20:04:48.109980
+
+"""
+from alembic import op
+from sqlalchemy.orm import sessionmaker
+from sqlalchemy.sql.functions import func
+
+from portal.cache import cache
+from portal.models.adherence_data import AdherenceData
+from portal.models.research_study import BASE_RS_ID, EMPRO_RS_ID
+from portal.models.qb_timeline import QBT, update_users_QBT
+from portal.models.questionnaire_bank import trigger_date
+from portal.models.questionnaire_response import (
+ QuestionnaireResponse,
+ capture_patient_state,
+ present_before_after_state,
+)
+from portal.models.user import User
+from portal.models.user_consent import UserConsent, consent_withdrawal_dates
+from portal.timeout_lock import ADHERENCE_DATA_KEY, CacheModeration
+
+Session = sessionmaker()
+
+
+# revision identifiers, used by Alembic.
+revision = '3c871e710277'
+down_revision = 'edb52362d013'
+
+
+# csv values direct from attachment in #IRONN-210, used to verify
+verified_user_consent_dates = (
+ {
+ 101: ("13-Dec-17", "13-Dec-17"),
+ 1073: ("16-Nov-18", "19-Sep-23"),
+ 1113: ("24-Oct-18", "27-Oct-21"),
+ 1186: ("19-Dec-18", "19-Dec-18"),
+ 1229: ("14-Jan-19", "10-Jan-24"),
+ 145: ("11-Jan-18", "17-Oct-18"),
+ 1524: ("12-Mar-19", "28-Oct-21"),
+ 1608: ("2-Apr-19", "7-Jun-21"),
+ 164: ("8-Jan-18", "7-Mar-18"),
+ 184: ("2-Feb-18", "2-May-19"),
+ 2049: ("4-Jul-19", "1-Jun-22"),
+ 209: ("22-Feb-18", "14-Dec-20"),
+ 224: ("28-Feb-18", "9-Mar-18"),
+ 2425: ("18-Sep-19", "26-May-21"),
+ 2547: ("25-Sep-19", "4-Aug-21"),
+ 2748: ("19-Nov-19", "22-Oct-22"),
+ 2845: ("23-Aug-19", "23-Sep-21"),
+ 2911: ("27-Nov-19", "9-Sep-23"),
+ 310: ("12-Apr-18", "16-Aug-18"),
+ 3251: ("16-Mar-20", "19-Jan-22"),
+ 3256: ("19-Mar-20", "5-May-22"),
+ 3427: ("26-May-20", "2-Sep-22"),
+ 3430: ("16-Jun-20", "15-May-21"),
+ 3455: ("4-Jun-20", "7-May-21"),
+ 3826: ("11-Nov-20", "30-Nov-20"),
+ 4316: ("19-Apr-21", "27-Apr-22"),
+ 4806: ("17-Feb-22", "13-Oct-22"),
+ 482: ("8-Aug-17", "28-Jul-20"),
+ 4861: ("28-Sep-21", "27-Feb-22"),
+ 4868: ("3-Mar-22", "18-Aug-22"),
+ 5004: ("5-Oct-21", "24-Sep-23"),
+ 5336: ("31-Jan-22", "7-Nov-23"),
+ 5852: ("5-Jul-22", "15-Apr-23"),
+ 5853: ("5-Jul-22", "20-Apr-23"),
+ 5959: ("26-Jul-22", "17-Aug-22"),
+ 6204: ("17-Sep-22", "25-Oct-23"),
+ 6218: ("27-Sep-22", "29-Oct-23"),
+ 641: ("7-Aug-18", "29-Dec-20"),
+ 653: ("9-Jul-18", "10-Sep-18"),
+ 6686: ("29-Jan-23", "12-Jun-23"),
+ # 719: ("29-May-18", "27-Aug-18"), as per story, leave alone
+ # 723: ("16-May-18", "25-Aug-23"), as per story, leave alone
+ 774: ("24-Oct-17", "9-Nov-17"),
+ 833: ("12-Sep-18", "11-Sep-23"),
+ 892: ("18-Sep-18", "5-Jan-20"),
+ 98: ("13-Dec-17", "22-Mar-18"),
+ 986: ("6-Sep-18", "22-Jun-23"),
+ 987: ("26-Jul-18", "14-Oct-19"),
+ },
+ {
+ 563: ("10-Nov-22", "16-Dec-22"),
+ 3591: ("1-Oct-22", "1-Oct-23"),
+ 5596: ("12-Jul-22", "12-Oct-22"),
+ 5747: ("6-Jun-22", "10-Jun-23"),
+ 5849: ("5-Jul-22", "12-Oct-22"),
+ 6026: ("4-Nov-22", "4-Nov-23"),
+ }
+)
+
+
+def upgrade():
+ """Correct UserConsents for any negatively affected patients
+
+ Prior to the release of 23.10.12.1, moving withdrawal dates wasn't
+ allowed. This made lookups for the last valid user_consent *prior*
+ to the withdrawal date, reliable, as user_consents land in the table
+ in an ordered fashion, and the most recently deleted prior to
+ withdrawal would have been in use.
+
+ The implementation of IRONN-210 enabled moving of withdrawal dates,
+ and incorrectly assumed it would be necessary to allow lookups of
+ the previous valid consent, to just work further back in the stack
+ of user_consents. That would only be correct on the few tested,
+ where the user didn't have multiple user_consents on file prior to
+ withdrawal.
+
+ To enable moving withdrawal dates, user_consents now allow multiple
+ of status "suspended", with the most recent by id taking precedence.
+ To determine the valid consent in use prior to withdrawal, look back
+ by insertion order (id) for the first deleted user consent prior to
+ status "suspended".
+
+ With code changes in place, migration must simply locate any potential
+ consent changes since the error was introduced and recalculate timeline
+ """
+ # turns out, we have no reliable mechanism to determine which patients
+ # may have been affected, as the acceptance date on withdrawn was simply
+ # changed on the already withdrawn user_consent, and no audit of the
+ # modification was recorded. need to try a recalc and find persist
+ # any changes for any users with a suspended user_consent and more
+ # than two (the original valid consent plus the suspended one) on
+ # any given research study.
+ bind = op.get_bind()
+ session = Session(bind=bind)
+
+ for study_id in (BASE_RS_ID, EMPRO_RS_ID):
+ # due to changes in adherence report for withdrawn users
+ # this query is now simply any withdrawn patient who isn't
+ # deleted from the system.
+ subquery = session.query(User.id).filter(
+ User.deleted_id.is_(None)).subquery()
+ query = session.query(UserConsent.user_id.distinct()).filter(
+ UserConsent.research_study_id == study_id).filter(
+ UserConsent.status == "suspended").filter(
+ UserConsent.user_id.in_(subquery))
+
+ delay_timeline_updates_till_after_migration = True
+ slow_report_details = False
+ delete_adh_ids = []
+ for row in query:
+ patient_id = row[0]
+ if patient_id in (719, 1186, 1305):
+ # special cases best left alone
+ continue
+ user = User.query.get(patient_id)
+ consent_date, withdrawal_date = consent_withdrawal_dates(
+ user, study_id)
+ if withdrawal_date is None:
+ # scenario happens with a withdrawn patient re-start
+ # i.e. as withdrawal was entered in error.
+ # no change needed in this situation
+ continue
+
+ if slow_report_details:
+ # report if dates don't match spreadsheet in IRONN-210
+ cd_str = '{dt.day}-{dt:%b}-{dt:%y}'.format(dt=consent_date)
+ wd_str = '{dt.day}-{dt:%b}-{dt:%y}'.format(dt=withdrawal_date)
+ try:
+ match = verified_user_consent_dates[study_id][patient_id]
+ if (cd_str, wd_str) != match:
+ print(f"user_id {patient_id} \t {cd_str} \t {wd_str}")
+ print(" vs expected:")
+ print(f"\t\t {match[0]} \t {match[1]}")
+ except KeyError:
+ # user found to not see timeline change
+ pass
+
+ # fake an adherence cache run to avoid unnecessary and more
+ # important, to prevent from locking out a subsequent update
+ # needed after recognizing a real change below
+ adherence_cache_moderation = CacheModeration(key=ADHERENCE_DATA_KEY.format(
+ patient_id=patient_id,
+ research_study_id=study_id))
+ adherence_cache_moderation.run_now()
+
+ b4_state = capture_patient_state(patient_id)
+ update_users_QBT(
+ patient_id,
+ research_study_id=study_id,
+ invalidate_existing=True)
+ _, _, _, any_changes = present_before_after_state(
+ patient_id, study_id, b4_state)
+ if not any_changes:
+ continue
+
+ print(f"{patient_id} changed, purge old adherence data and relationships")
+ adherence_cache_moderation.reset()
+
+ QuestionnaireResponse.purge_qb_relationship(
+ subject_id=patient_id,
+ research_study_id=study_id,
+ acting_user_id=patient_id)
+ cache.delete_memoized(trigger_date)
+
+ if delay_timeline_updates_till_after_migration:
+ session.query(QBT).filter(QBT.user_id == patient_id).filter(
+ QBT.research_study_id == study_id).delete()
+ adh_ids = session.query(AdherenceData.id).filter(
+ AdherenceData.patient_id == patient_id).filter(
+ AdherenceData.rs_id_visit.like(f"{study_id}:%")
+ )
+ for ad_id in adh_ids:
+ delete_adh_ids.append(ad_id)
+ else:
+ update_users_QBT(
+ patient_id,
+ research_study_id=study_id,
+ invalidate_existing=True)
+
+ # SQL alchemy can't combine `like` expression with delete op.
+ for ad_id in delete_adh_ids:
+ # yes this should be possible in a single stmt,
+ # not a loop, but no dice
+ session.query(AdherenceData).filter(
+ AdherenceData.id == ad_id).delete()
+
+def downgrade():
+ """no downgrade available"""
+ pass
diff --git a/portal/migrations/versions/66368e673005_.py b/portal/migrations/versions/66368e673005_.py
new file mode 100644
index 0000000000..cd021f96a6
--- /dev/null
+++ b/portal/migrations/versions/66368e673005_.py
@@ -0,0 +1,73 @@
+"""IRONN-225 update adherence data for expired EMPRO users
+
+Revision ID: 66368e673005
+Revises: d1f3ed8d16ef
+Create Date: 2023-12-11 16:56:10.427854
+
+"""
+from alembic import op
+from datetime import datetime
+from sqlalchemy.orm import sessionmaker
+
+from portal.models.user import User
+from portal.models.user_consent import consent_withdrawal_dates
+
+
+# revision identifiers, used by Alembic.
+revision = '66368e673005'
+down_revision = 'd1f3ed8d16ef'
+
+Session = sessionmaker()
+
+
+def upgrade():
+ # IRONN-225 noted expired EMPRO users adherence data showed
+ # `not yet available`. Code corrected, need to force renewal
+ # for those affected.
+
+ bind = op.get_bind()
+ session = Session(bind=bind)
+
+ now = datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S")
+ patient_ids = []
+ # get list of non-deleted users with a 12th month expiration
+ # that has already passed. (12 = baseline + zero-index 10)
+ for patient_id in session.execute(
+ "SELECT DISTINCT(user_id) FROM qb_timeline JOIN users"
+ " ON users.id = user_id WHERE deleted_id IS NULL"
+ " AND research_study_id = 1 AND qb_iteration = 10"
+ f" AND status = 'expired' AND at < '{now}'"):
+ patient_ids.append(patient_id[0])
+
+ # purge their respective rows from adherence cache, IFF status
+ # shows IRONN-225 symptom.
+ rs_visit = "1:Month 12"
+ for patient_id in patient_ids:
+ status = session.execute(
+ "SELECT data->>'status' FROM adherence_data WHERE"
+ f" patient_id = {patient_id} AND"
+ f" rs_id_visit = '{rs_visit}'"
+ ).first()
+ if status and status[0] != "Not Yet Available":
+ continue
+
+ # if the patient is withdrawn, skip over, will get picked
+ # up in migration 3c871e710277, going out in same release
+ patient = User.query.get(patient_id)
+ _, withdrawal_date = consent_withdrawal_dates(
+ patient, 1)
+ if withdrawal_date:
+ continue
+
+ # purge the user's EMPRO adherence rows to force refresh
+ session.execute(
+ "DELETE FROM adherence_data WHERE"
+ f" patient_id = {patient_id} AND"
+ f" rs_id_visit like '1:%'"
+ )
+
+
+def downgrade():
+ # No reasonable downgrade
+ pass
+
diff --git a/portal/migrations/versions/edb52362d013_.py b/portal/migrations/versions/edb52362d013_.py
new file mode 100644
index 0000000000..6dced10eae
--- /dev/null
+++ b/portal/migrations/versions/edb52362d013_.py
@@ -0,0 +1,144 @@
+"""Remove/correct bogus user_consents as per IRONN-210
+
+Revision ID: edb52362d013
+Revises: 66368e673005
+Create Date: 2024-01-11 16:23:34.961937
+
+"""
+from alembic import op
+from datetime import datetime
+from flask import current_app
+from sqlalchemy.orm import sessionmaker
+
+from portal.models.user_consent import UserConsent
+
+# revision identifiers, used by Alembic.
+revision = 'edb52362d013'
+down_revision = '66368e673005'
+
+Session = sessionmaker()
+
+
+def user_consent_manual_cleanup(session):
+ # turned into a detailed situation, of obtaining original dates from MR
+ # and correcting a number of bogus rows in the user_consent table.
+ # This hand curated list comes from attachments in
+ # https://movember.atlassian.net/browse/IRONN-210
+ # run these first, then confirm everything looks clean.
+ now = datetime.utcnow()
+ version = current_app.config.metadata['version']
+
+ admin_id = session.execute(
+ "SELECT id FROM users WHERE email = '__system__'"
+ ).next()[0]
+
+ def audit_insert(subject_id, user_consent_id, acceptance_date=None):
+ msg = f"remove bogus user_consent {user_consent_id} per IRONN-210"
+ if acceptance_date:
+ msg = f"corrected user_consent {user_consent_id} to {acceptance_date} per IRONN-210"
+ print(msg)
+ insert = (
+ "INSERT INTO AUDIT"
+ " (user_id, subject_id, context, timestamp, version, comment)"
+ " VALUES"
+ f"({admin_id}, {subject_id}, 'consent',"
+ f" '{now}', '{version}', '{msg}')")
+ session.execute(insert)
+
+ def delete_user_consent(user_id, user_consent_id):
+ return UserConsent.query.filter(
+ UserConsent.id == user_consent_id).filter(
+ UserConsent.user_id == user_id).delete()
+
+ def update_user_consent(user_id, user_consent_id, acceptance_date):
+ uc = UserConsent.query.filter(
+ UserConsent.id == user_consent_id).filter(
+ UserConsent.user_id == user_id).first()
+ if uc:
+ uc.acceptance_date = acceptance_date
+ return True
+
+ bogus_values = (
+ {'user_id': 101, 'user_consent_id': 219},
+ {'user_id': 145, 'user_consent_id': 1238},
+ {'user_id': 164, 'user_consent_id': 218},
+ {'user_id': 224, 'user_consent_id': 211},
+ {'user_id': 310, 'user_consent_id': 1198},
+ {'user_id': 310, 'user_consent_id': 1199},
+ {'user_id': 310, 'user_consent_id': 1200},
+ {'user_id': 4316, 'user_consent_id': 5033},
+ {'user_id': 4316, 'user_consent_id': 5032},
+ {'user_id': 98, 'user_consent_id': 339},
+ {'user_id': 774, 'user_consent_id': 897},
+ {'user_id': 723, 'user_consent_id': 551},
+ {'user_id': 653, 'user_consent_id': 820},
+ {'user_id': 563, 'user_consent_id': 5896},
+ {'user_id': 6686, 'user_consent_id': 6117},
+ )
+
+ correct_values = (
+ {'user_id': 986, 'user_consent_id': 7434, 'acceptance_date': '2023/06/22 18:00:00'},
+ )
+ for row in correct_values:
+ if update_user_consent(
+ user_id=row['user_id'],
+ user_consent_id=row['user_consent_id'],
+ acceptance_date=row['acceptance_date']):
+ audit_insert(
+ subject_id=row['user_id'],
+ user_consent_id=row['user_consent_id'],
+ acceptance_date=row['acceptance_date'])
+ session.commit()
+
+ for row in bogus_values:
+ if delete_user_consent(
+ user_id=row['user_id'],
+ user_consent_id=row['user_consent_id']):
+ audit_insert(
+ subject_id=row['user_id'],
+ user_consent_id=row['user_consent_id'])
+ session.commit()
+
+
+def upgrade():
+ """Correct UserConsents for any negatively affected patients
+
+ Prior to the release of 23.10.12.1, moving withdrawal dates wasn't
+ allowed. This made lookups for the last valid user_consent *prior*
+ to the withdrawal date, reliable, as user_consents land in the table
+ in an ordered fashion, and the most recently deleted prior to
+ withdrawal would have been in use.
+
+ The implementation of IRONN-210 enabled moving of withdrawal dates,
+ and incorrectly assumed it would be necessary to allow lookups of
+ the previous valid consent, to just work further back in the stack
+ of user_consents. That would only be correct on the few tested,
+ where the user didn't have multiple user_consents on file prior to
+ withdrawal.
+
+ To enable moving withdrawal dates, user_consents now allow multiple
+ of status "suspended", with the most recent by id taking precedence.
+ To determine the valid consent in use prior to withdrawal, look back
+ by insertion order (id) for the first deleted user consent prior to
+ status "suspended".
+
+ With code changes in place, migration must simply locate any potential
+ consent changes since the error was introduced and recalculate timeline
+ """
+ # turns out, we have no reliable mechanism to determine which patients
+ # may have been affected, as the acceptance date on withdrawn was simply
+ # changed on the already withdrawn user_consent, and no audit of the
+ # modification was recorded. need to try a recalc and find persist
+ # any changes for any users with a suspended user_consent and more
+ # than two (the original valid consent plus the suspended one) on
+ # any given research study.
+ bind = op.get_bind()
+ session = Session(bind=bind)
+
+ user_consent_manual_cleanup(session)
+ session.commit()
+
+
+def downgrade():
+ """no downgrade available"""
+ pass
diff --git a/portal/models/adherence_data.py b/portal/models/adherence_data.py
index a633d27709..f704736561 100644
--- a/portal/models/adherence_data.py
+++ b/portal/models/adherence_data.py
@@ -2,9 +2,10 @@
from datetime import datetime, timedelta
from sqlalchemy.dialects.postgresql import JSONB
from sqlalchemy import UniqueConstraint
+import re
from ..database import db
-
+withdrawn = " post-withdrawn"
class AdherenceData(db.Model):
""" Cached adherence report data
@@ -35,16 +36,20 @@ class AdherenceData(db.Model):
'patient_id', 'rs_id_visit', name='_adherence_unique_patient_visit'),)
@staticmethod
- def rs_visit_string(rs_id, visit_string):
+ def rs_visit_string(rs_id, visit_string, post_withdrawn=False):
"""trivial helper to build rs_id_visit string into desired format"""
assert isinstance(rs_id, int)
assert visit_string
+ if post_withdrawn:
+ visit_string += withdrawn
return f"{rs_id}:{visit_string}"
def rs_visit_parse(self):
"""break parts of rs_id and visit_string out of rs_id_visit field"""
rs_id, visit_string = self.rs_id_visit.split(':')
assert visit_string
+ if visit_string.endswith(withdrawn):
+ visit_string = visit_string[:-(len(withdrawn))]
return int(rs_id), visit_string
@staticmethod
@@ -92,15 +97,24 @@ def sort_by_visit_key(d):
:returns: list of values sorted by keys
"""
+ pattern = re.compile(f"Month ([0-9]+)({withdrawn})?")
def sort_key(key):
if key == 'Baseline':
return 0, 0
+ elif key == f"Baseline{withdrawn}":
+ return 0, 1
elif key == 'Indefinite':
return 2, 0
+ elif key == f'Indefinite{withdrawn}':
+ return 2, 1
else:
- month, num = key.split(" ")
- assert month == "Month"
- return 1, int(num)
+ match = pattern.match(key)
+ if not match.groups():
+ raise ValueError(f"couldn't parse key {key}")
+ month_num = int(match.groups()[0])
+ if match.groups()[1]:
+ month_num += 100
+ return 1, month_num
sorted_keys = sorted(d.keys(), key=sort_key)
sorted_values = [d[key] for key in sorted_keys]
diff --git a/portal/models/communication.py b/portal/models/communication.py
index c37753b7c3..034d74f707 100644
--- a/portal/models/communication.py
+++ b/portal/models/communication.py
@@ -115,6 +115,13 @@ def _lookup_assessment_link():
'{label}'.format(
ae_link=access_link(next_step='present_needed'), label=label))
+ def _lookup_main_study_registry_link():
+ label = _('Learn more about the IRONMAN registry')
+ registry_link = 'https://ironmanregistry.org/'
+ return (
+ '{label}'.format(
+ registry_link=registry_link, label=label))
+
def _lookup_clinic_name():
if user.organizations:
return _(user.organizations[0].name)
diff --git a/portal/models/message.py b/portal/models/message.py
index 5a2180a592..36209ca251 100644
--- a/portal/models/message.py
+++ b/portal/models/message.py
@@ -116,6 +116,11 @@ def recipients(self):
def recipients(self, value):
"""Set recipients_id if a user is found w/ matching email"""
+ if value is None:
+ self._recipients = None
+ self.recipient_id = None
+ return
+
# As the schema only tracks a single recipient_id, capture abuse;
# don't allow comma in recipients till schema can capture
if ',' in value:
@@ -152,6 +157,10 @@ def send_message(self, cc_address=None):
NB the cc isn't persisted with the rest of the record.
"""
+ if not self.recipients:
+ current_app.logger.error(
+ "can't email w/o recipients. failed to send "
+ f"'{self.subject}' to user {self.recipient_id}")
message = Message(
subject=self.subject,
extra_headers=extra_headers(),
diff --git a/portal/models/qb_status.py b/portal/models/qb_status.py
index 83711bb620..c6cf1e7740 100644
--- a/portal/models/qb_status.py
+++ b/portal/models/qb_status.py
@@ -77,6 +77,11 @@ def _sync_timeline(self):
# locate current qb - last found with start <= self.as_of_date
cur_index, cur_qbd = None, None
for i, qbd in zip(range(len(self.__ordered_qbs)), self.__ordered_qbs):
+ if self._withdrawal_date and (
+ qbd.relative_start > self._withdrawal_date):
+ # as we now keep timeline data beyond withdrawal, break
+ # out if the requested date is beyond withdrawal
+ break
if qbd.relative_start <= self.as_of_date:
cur_index = i
cur_qbd = qbd
@@ -84,22 +89,24 @@ def _sync_timeline(self):
break
# w/o a cur, probably hasn't started, set expired and leave
- if not cur_qbd and (
- self.__ordered_qbs[0].relative_start > self.as_of_date):
- trace(
- "no current QBD (too early); first qb doesn't start till"
- " {} vs as_of {}".format(
- self.__ordered_qbs[0].relative_start, self.as_of_date))
+ if not cur_qbd:
+ if self.__ordered_qbs[0].relative_start > self.as_of_date:
+ trace(
+ "no current QBD (too early); first qb doesn't start till"
+ " {} vs as_of {}".format(
+ self.__ordered_qbs[0].relative_start, self.as_of_date))
+ else:
+ current_app.logger.error(f"patient {self.user.id} w/o cur_qbd??")
self._overall_status = OverallStatus.expired
self.next_qbd = self.__ordered_qbs[0]
return
- if cur_index > 0:
+ if cur_index and cur_index > 0:
self.prev_qbd = self.__ordered_qbs[cur_index-1]
else:
self.prev_qbd = None
- if cur_index < len(self.__ordered_qbs) - 1:
+ if cur_index and cur_index < len(self.__ordered_qbs) - 1:
self.next_qbd = self.__ordered_qbs[cur_index+1]
else:
self.next_qbd = None
diff --git a/portal/models/qb_timeline.py b/portal/models/qb_timeline.py
index 08f34e6de7..aee9822afc 100644
--- a/portal/models/qb_timeline.py
+++ b/portal/models/qb_timeline.py
@@ -4,7 +4,6 @@
from dateutil.relativedelta import relativedelta
from flask import current_app
-import redis
from redis.exceptions import ConnectionError
from sqlalchemy.types import Enum as SQLA_Enum
from werkzeug.exceptions import BadRequest
@@ -13,13 +12,9 @@
from ..cache import cache, TWO_HOURS
from ..database import db
from ..date_tools import FHIR_datetime, RelativeDelta
-from ..factories.celery import create_celery
+from ..factories.redis import create_redis
from ..set_tools import left_center_right
-from ..timeout_lock import (
- ADHERENCE_DATA_KEY,
- CacheModeration,
- TimeoutLock,
-)
+from ..timeout_lock import ADHERENCE_DATA_KEY, CacheModeration, TimeoutLock
from ..trace import trace
from .adherence_data import AdherenceData
from .overall_status import OverallStatus
@@ -95,6 +90,8 @@ def timeline_state(user_id):
results = dict()
for i in tl:
qb = QuestionnaireBank.query.get(i.qb_id)
+ if qb is None:
+ continue
recur_id = qb.recurs[0].id if qb.recurs else None
vn = visit_name(QBD(
relative_start=None,
@@ -105,25 +102,6 @@ def timeline_state(user_id):
vn, name_map[i.qb_id], i.qb_iteration]
return results
- @staticmethod
- def withdrawn_qbd(user_id, research_study_id):
- """Returns active QBD at time of user's withdrawal if applicable
-
- :returns: a QBD representing the visit active at point of withdrawal
- from given study, using `relative_start` to hold date-time of
- withdrawal; or None if n/a
- """
- qbt = QBT.query.filter(QBT.user_id == user_id).filter(
- QBT.research_study_id == research_study_id).filter(
- QBT.status == OverallStatus.withdrawn).first()
- if not qbt:
- return None
- return QBD(
- relative_start=qbt.at,
- iteration=qbt.qb_iteration,
- recur_id=qbt.qb_recur_id,
- qb_id=qbt.qb_id)
-
class AtOrderedList(list):
"""Specialize ``list`` to maintain insertion order and ``at`` attribute
@@ -289,6 +267,8 @@ def calc_and_adjust_start(user, research_study_id, qbd, initial_trigger):
return qbd.relative_start
delta = users_trigger - initial_trigger
+ # this case should no longer be possible; raise the alarm
+ raise RuntimeError("found initial trigger to differ by: %s", str(delta))
current_app.logger.debug("calc_and_adjust_start delta: %s", str(delta))
return qbd.relative_start + delta
@@ -604,7 +584,7 @@ def ordered_qbs(user, research_study_id, classification=None):
This does NOT include the indefinite classification unless requested,
as it plays by a different set of rules.
- :param user: the user to lookup
+ :param user: the user to look up
:param research_study_id: the research study being processed
:param classification: set to ``indefinite`` for that special handling
:returns: QBD for each (QB, iteration, recur)
@@ -723,11 +703,6 @@ def ordered_qbs(user, research_study_id, classification=None):
if transition_now:
rp_flyweight.transition()
- # done if user withdrew before QB starts
- if withdrawal_date and withdrawal_date < rp_flyweight.cur_start:
- trace("withdrawn as of {}".format(withdrawal_date))
- break
-
rp_flyweight.adjust_start()
yield rp_flyweight.cur_qbd
@@ -786,6 +761,17 @@ def invalidate_users_QBT(user_id, research_study_id):
for ad in adh_data:
db.session.delete(ad)
+ if not current_app.config.get("TESTING", False):
+ # clear the timeout lock as well, since we need a refresh
+ # after deletion of the adherence data
+ # otherwise, we experience a deadlock situation where tables can't be dropped
+ # between test runs, as postgres believes a deadlock condition exists
+ cache_moderation = CacheModeration(key=ADHERENCE_DATA_KEY.format(
+ patient_id=user_id,
+ research_study_id=research_study_id))
+ cache_moderation.reset()
+
+
# args have to match order and values - no wild carding avail
as_of = QB_StatusCacheKey().current()
if research_study_id != 'all':
@@ -886,6 +872,7 @@ def update_users_QBT(user_id, research_study_id, invalidate_existing=False):
def attempt_update(user_id, research_study_id, invalidate_existing):
"""Updates user's QBT or raises if lock is unattainable"""
from .qb_status import patient_research_study_status
+ from ..tasks import LOW_PRIORITY, cache_single_patient_adherence_data
# acquire a multiprocessing lock to prevent multiple requests
# from duplicating rows during this slow process
@@ -933,7 +920,7 @@ def attempt_update(user_id, research_study_id, invalidate_existing):
trace(f"user determined ineligible for {research_study_id}")
return
- # Create time line for user, from initial trigger date
+ # Create time-line for user, from initial trigger date
qb_generator = ordered_qbs(user, research_study_id)
user_qnrs = QNR_results(user, research_study_id)
@@ -979,7 +966,7 @@ def attempt_update(user_id, research_study_id, invalidate_existing):
# QBs - one needing to be removed (say the old
# month 36) in favor of the skipped new (say
# month 33), and the last legit old one (say
- # month 30) needing it's endpoint adjusted
+ # month 30) needing its endpoint adjusted
# further below.
remove_qb_id = pending_qbts[i].qb_id
remove_iteration = pending_qbts[i].qb_iteration
@@ -1054,7 +1041,7 @@ def attempt_update(user_id, research_study_id, invalidate_existing):
"Problematic qbd: %s", user_id, str(qbd))
continue
- # Must double check overlap; may no longer be true, if
+ # Must double-check overlap; may no longer be true, if
# last_posted_index was one before...
if pending_qbts[last_posted_index].at > start:
# For questionnaires with common instrument names that
@@ -1170,25 +1157,27 @@ def attempt_update(user_id, research_study_id, invalidate_existing):
pending_qbts.append(QBT(
at=expired_date, status='expired', **kwargs))
- # If user withdrew from study - remove any rows post withdrawal
+ # If user withdrew from study, add a row marking the withdrawal
+ # to the user's timeline, at the proper sequence.
num_stored = 0
_, withdrawal_date = consent_withdrawal_dates(
user, research_study_id=research_study_id)
if withdrawal_date:
trace("withdrawn as of {}".format(withdrawal_date))
- store_rows = [
- qbt for qbt in pending_qbts if qbt.at < withdrawal_date]
- if store_rows:
- # To satisfy the `Withdrawn sanity check` in qb_status
- # the withdrawn row needs to match the last valid qb
- kwargs['qb_id'] = store_rows[-1].qb_id
- kwargs['qb_iteration'] = store_rows[-1].qb_iteration
- kwargs['qb_recur_id'] = store_rows[-1].qb_recur_id
-
- store_rows.append(QBT(
- at=withdrawal_date,
- status='withdrawn',
- **kwargs))
+ j = 0
+ for qbt in pending_qbts:
+ if qbt.at > withdrawal_date:
+ break
+ j += 1
+ if j > 0:
+ # include visit in withdrawn for qb_status functionality
+ kwargs['qb_id'] = pending_qbts[j-1].qb_id
+ kwargs['qb_iteration'] = pending_qbts[j-1].qb_iteration
+ kwargs['qb_recur_id'] = pending_qbts[j-1].qb_recur_id
+ store_rows = (
+ pending_qbts[0:j] +
+ [QBT(at=withdrawal_date, status='withdrawn', **kwargs)] +
+ pending_qbts[j:])
check_for_overlaps(store_rows)
db.session.add_all(store_rows)
num_stored = len(store_rows)
@@ -1204,19 +1193,12 @@ def attempt_update(user_id, research_study_id, invalidate_existing):
db.session.commit()
# With fresh calculation of a user's timeline, queue update of
- # user's adherence data as celery job, avoiding recursive issues
- # if this call happens to be part of an already running update
- cache_moderation = CacheModeration(key=ADHERENCE_DATA_KEY.format(
- patient_id=user_id,
- research_study_id=research_study_id))
- if not cache_moderation.run_recently():
- kwargs = {
- 'patient_id': user_id,
- 'research_study_id': research_study_id}
- celery = create_celery(current_app)
- celery.send_task(
- 'portal.tasks.cache_adherence_data_task',
- kwargs=kwargs)
+ # user's adherence data as celery job
+ kwargs = {
+ 'patient_id': user_id,
+ 'research_study_id': research_study_id}
+ cache_single_patient_adherence_data.apply_async(
+ kwargs=kwargs, queue=LOW_PRIORITY, retry=False)
success = False
for attempt in range(1, 6):
@@ -1262,8 +1244,7 @@ def __init__(self):
# Lookup the configured expiration of the matching cache
# container ("DOGPILE_CACHE_REGIONS" -> "assessment_cache_region")
if self.redis is None:
- self.redis = redis.StrictRedis.from_url(
- current_app.config['REDIS_URL'])
+ self.redis = create_redis(current_app.config['REDIS_URL'])
regions = current_app.config['DOGPILE_CACHE_REGIONS']
for region_name, duration in regions:
if region_name == self.region_name:
diff --git a/portal/models/questionnaire_response.py b/portal/models/questionnaire_response.py
index 2084135813..0353ffa6f8 100644
--- a/portal/models/questionnaire_response.py
+++ b/portal/models/questionnaire_response.py
@@ -842,7 +842,7 @@ def required_qs(self, qb_id):
def aggregate_responses(
instrument_ids, current_user, research_study_id, patch_dstu2=False,
- ignore_qb_requirement=False, celery_task=None):
+ ignore_qb_requirement=False, celery_task=None, patient_ids=None):
"""Build a bundle of QuestionnaireResponses
:param instrument_ids: list of instrument_ids to restrict results to
@@ -852,13 +852,19 @@ def aggregate_responses(
:param patch_dstu2: set to make bundle DSTU2 compliant
:param ignore_qb_requirement: set to include all questionnaire responses
:param celery_task: if defined, send occasional progress updates
+ :param patient_ids: if defined, limit result set to given patient list
+ NB: research_study_id not used to filter / restrict query set, but rather
+ for lookup of visit name. Use instrument_ids to restrict query set.
"""
from .qb_timeline import qb_status_visit_name # avoid cycle
# Gather up the patient IDs for whom current user has 'view' permission
user_ids = patients_query(
- current_user, include_test_role=False).with_entities(User.id)
+ current_user,
+ include_test_role=False,
+ filter_by_ids=patient_ids,
+ ).with_entities(User.id)
annotated_questionnaire_responses = []
questionnaire_responses = QuestionnaireResponse.query.filter(
@@ -920,7 +926,7 @@ def aggregate_responses(
'resource': document,
# Todo: return URL to individual QuestionnaireResponse resource
'fullUrl': url_for(
- '.assessment',
+ 'assessment_engine_api.assessment',
patient_id=subject.id,
_external=True,
),
@@ -1211,3 +1217,76 @@ def first_last_like_qnr(qnr):
continue
last = q
return initial, last
+
+
+def capture_patient_state(patient_id):
+ """Call to capture QBT and QNR state for patient, used for before/after"""
+ from .qb_timeline import QBT
+ qnrs = QuestionnaireResponse.qnr_state(patient_id)
+ tl = QBT.timeline_state(patient_id)
+ return {'qnrs': qnrs, 'timeline': tl}
+
+
+def present_before_after_state(user_id, external_study_id, before_state):
+ from .qb_timeline import QBT
+ from ..dict_tools import dict_compare
+ after_qnrs = QuestionnaireResponse.qnr_state(user_id)
+ after_timeline = QBT.timeline_state(user_id)
+ qnrs_lost_reference = []
+ any_change_noted = False
+
+ def visit_from_timeline(qb_name, qb_iteration, timeline_results):
+ """timeline results have computed visit name - quick lookup"""
+ for visit, name, iteration in timeline_results.values():
+ if qb_name == name and qb_iteration == iteration:
+ return visit
+ raise ValueError(f"no visit found for {qb_name}, {qb_iteration}")
+
+ # Compare results
+ added_q, removed_q, modified_q, same = dict_compare(
+ after_qnrs, before_state['qnrs'])
+ assert not added_q
+ assert not removed_q
+
+ added_t, removed_t, modified_t, same = dict_compare(
+ after_timeline, before_state['timeline'])
+
+ if any((added_t, removed_t, modified_t, modified_q)):
+ any_change_noted = True
+ print(f"\nPatient {user_id} ({external_study_id}):")
+ if modified_q:
+ any_change_noted = True
+ print("\tModified QNRs (old, new)")
+ for mod in sorted(modified_q):
+ print(f"\t\t{mod} {modified_q[mod][1]} ==>"
+ f" {modified_q[mod][0]}")
+ # If the QNR previously had a reference QB and since
+ # lost it, capture for reporting.
+ if (
+ modified_q[mod][1][0] != "none of the above" and
+ modified_q[mod][0][0] == "none of the above"):
+ visit_name = visit_from_timeline(
+ modified_q[mod][1][0],
+ modified_q[mod][1][1],
+ before_state["timeline"])
+ qnrs_lost_reference.append((visit_name, modified_q[mod][1][2]))
+ if added_t:
+ any_change_noted = True
+ print("\tAdditional timeline rows:")
+ for item in sorted(added_t):
+ print(f"\t\t{item} {after_timeline[item]}")
+ if removed_t:
+ any_change_noted = True
+ print("\tRemoved timeline rows:")
+ for item in sorted(removed_t):
+ print(
+ f"\t\t{item} "
+ f"{before_state['timeline'][item]}")
+ if modified_t:
+ any_change_noted = True
+ print(f"\tModified timeline rows: (old, new)")
+ for item in sorted(modified_t):
+ print(f"\t\t{item}")
+ print(f"\t\t\t{modified_t[item][1]} ==> {modified_t[item][0]}")
+
+ return after_qnrs, after_timeline, qnrs_lost_reference, any_change_noted
diff --git a/portal/models/reporting.py b/portal/models/reporting.py
index af9ea6e40f..6292f383e8 100644
--- a/portal/models/reporting.py
+++ b/portal/models/reporting.py
@@ -22,7 +22,7 @@
from .overall_status import OverallStatus
from .questionnaire_response import aggregate_responses
from .qb_status import QB_Status
-from .qb_timeline import qb_status_visit_name
+from .qb_timeline import QBT, qb_status_visit_name
from .questionnaire_bank import visit_name
from .questionnaire_response import (
QNR_results,
@@ -35,7 +35,7 @@
from .user_consent import consent_withdrawal_dates
-def single_patient_adherence_data(patient, as_of_date, research_study_id):
+def single_patient_adherence_data(patient_id, research_study_id):
"""Update any missing (from cache) adherence data for patient
NB: all changes are side effects, persisted in adherence_data table.
@@ -48,8 +48,9 @@ def single_patient_adherence_data(patient, as_of_date, research_study_id):
:returns: number of added rows
"""
+ as_of_date = datetime.utcnow()
cache_moderation = CacheModeration(key=ADHERENCE_DATA_KEY.format(
- patient_id=patient.id,
+ patient_id=patient_id,
research_study_id=research_study_id))
if cache_moderation.run_recently():
return 0
@@ -86,19 +87,22 @@ def patient_data(patient):
def general_row_detail(row, patient, qbd):
"""Add general (either study) data for given (patient, qbd)"""
# purge values that may have previous row data set and aren't certain
- for key in "completion_date", "oow_completion_date", "entry_method":
+ for key in "oow_completion_date", "entry_method", "visit":
row.pop(key, None)
row['qb'] = qbd.questionnaire_bank.name
- row['visit'] = visit_name(qbd)
- if row['status'] == 'Completed':
- row['completion_date'] = report_format(
- qbd.completed_date(patient.id)) or ""
- row['oow_completion_date'] = report_format(
- qbd.oow_completed_date(patient.id)) or ""
+
+ # if withdrawn, include a row with that and little more
if row['status'] == 'Withdrawn':
- # visit unreliable when withdrawn - clear
- row['visit'] = ''
+ # use date of withdrawal for "completion date"
+ _, withdrawal_date = consent_withdrawal_dates(
+ user=patient, research_study_id=research_study_id)
+ row['completion_date'] = report_format(withdrawal_date)
+ return
+ row['visit'] = visit_name(qbd)
+ row['completion_date'] = (
+ report_format(qbd.completed_date(patient.id))
+ if row['status'] == 'Completed' else '')
entry_method = QNR_results(
patient,
research_study_id=research_study_id,
@@ -109,6 +113,9 @@ def general_row_detail(row, patient, qbd):
def empro_row_detail(row, ts_reporting):
"""Add EMPRO specifics"""
+ if not ts_reporting:
+ return
+
# Rename column header for EMPRO
if 'completion_date' in row:
row['EMPRO_questionnaire_completion_date'] = (
@@ -135,6 +142,7 @@ def empro_row_detail(row, ts_reporting):
row['content_domains_accessed'] = ', '.join(da) if da else ""
added_rows = 0
+ patient = User.query.get(patient_id)
qb_stats = QB_Status(
user=patient,
research_study_id=research_study_id,
@@ -152,20 +160,30 @@ def empro_row_detail(row, ts_reporting):
# build up data until we find valid cache for patient's history
status = str(qb_stats.overall_status)
row = patient_data(patient)
+ row["status"] = status
if status == "Expired" and research_study_id == EMPRO_RS_ID:
- row["status"] = "Not Yet Available"
- else:
- row["status"] = status
-
+ # Expired status ambiguous for EMPRO study.
+ # - If the last available questionnaire in the study is present in
+ # the user's timeline and the expired date has passed, it is
+ # legitimately "Expired".
+ # - Otherwise, due to complex business rules around delayed
+ # start/availability mark as "Not Yet Available"
+ exp_row = QBT.query.filter(QBT.research_study_id == EMPRO_RS_ID).filter(
+ QBT.user_id == patient.id).filter(
+ QBT.status == 'expired').filter(
+ QBT.qb_iteration == 10).first() # baseline is 1, 11 iterations base 0
+ if not exp_row or exp_row.at > as_of_date:
+ row["status"] = "Not Yet Available"
+
+ ts_reporting = (
+ TriggerStatesReporting(patient_id=patient.id)
+ if research_study_id == EMPRO_RS_ID else None)
if last_viable:
general_row_detail(row, patient, last_viable)
- if research_study_id == EMPRO_RS_ID:
- # Initialize trigger states reporting for patient
- ts_reporting = TriggerStatesReporting(patient_id=patient.id)
- empro_row_detail(row, ts_reporting)
+ empro_row_detail(row, ts_reporting)
- # latest is only valid for a week, unless the user withdrew
- valid_for = 500 if row['status'] in ('Expired', 'Withdrawn') else 7
+ # latest is only valid for a day, unless the user withdrew
+ valid_for = 30 if row['status'] in ('Expired', 'Withdrawn') else 1
AdherenceData.persist(
patient_id=patient.id,
rs_id_visit=rs_visit,
@@ -173,24 +191,60 @@ def empro_row_detail(row, ts_reporting):
data=row)
added_rows += 1
+ # if the last row was withdrawn, add any completed visits beyond
+ # date of withdrawal
+ if row["status"] == 'Withdrawn':
+ withdrawal_date = (
+ row['completion_date'] if 'completion_date' in row
+ else row['EMPRO_questionnaire_completion_date'])
+ missing_qbts = []
+ completed_after_withdrawn = QBT.query.filter(
+ QBT.at > withdrawal_date).filter(
+ QBT.status == OverallStatus.completed).filter(
+ QBT.research_study_id == research_study_id).filter(
+ QBT.user_id == patient.id).order_by(QBT.at)
+ for qbt in completed_after_withdrawn:
+ missing_qbts.append((qbt.at, qbt.qbd()))
+
+ # one more special case! the withdrawn visit was completed
+ # but BEFORE the user withdrew. the qb_status accurately sees
+ # the visit as withdrawn, and wrote that to the last row, but
+ # failed to write out the completed status first.
+ pre_wd_visit_cd = last_viable.completed_date(patient.id)
+ if pre_wd_visit_cd and not [
+ x for x, y in missing_qbts if x == pre_wd_visit_cd]:
+ missing_qbts.append((pre_wd_visit_cd, last_viable))
+
+ for at, qbd in missing_qbts:
+ row['status'] = 'Completed' # overwrite withdrawn state
+ general_row_detail(row, patient, qbd)
+ empro_row_detail(row, ts_reporting)
+ rs_visit = AdherenceData.rs_visit_string(
+ research_study_id, row['visit'], post_withdrawn=True)
+ AdherenceData.persist(
+ patient_id=patient.id,
+ rs_id_visit=rs_visit,
+ valid_for_days=30,
+ data=row)
+ added_rows += 1
+
# as we require a full history, continue to add rows for each previous
for qbd, status in qb_stats.older_qbds(last_viable):
rs_visit = AdherenceData.rs_visit_string(
research_study_id, visit_name(qbd))
- # once we find cached_data, the rest of the user's history is good
+ # once we find cached_data, the rest of the user's history is likely
+ # good, but best to verify nothing is stale
if AdherenceData.fetch(patient_id=patient.id, rs_id_visit=rs_visit):
- break
+ continue
historic = row.copy()
historic['status'] = status
general_row_detail(historic, patient, qbd)
-
- if research_study_id == EMPRO_RS_ID:
- empro_row_detail(historic, ts_reporting)
+ empro_row_detail(historic, ts_reporting)
AdherenceData.persist(
patient_id=patient.id,
rs_id_visit=rs_visit,
- valid_for_days=500,
+ valid_for_days=30,
data=historic)
added_rows += 1
@@ -223,7 +277,7 @@ def empro_row_detail(row, ts_reporting):
AdherenceData.persist(
patient_id=patient.id,
rs_id_visit=rs_visit,
- valid_for_days=500,
+ valid_for_days=30,
data=indef)
added_rows += 1
@@ -256,12 +310,13 @@ def cache_adherence_data(
if limit was hit
"""
+ from ..tasks import cache_single_patient_adherence_data
# For building cache, use system account; skip privilege checks
acting_user = User.query.filter_by(email='__system__').one()
as_of_date = datetime.utcnow()
# Purge any rows that have or will soon expire
- valid = (as_of_date + timedelta(days=1))
+ valid = (as_of_date + timedelta(hours=1))
AdherenceData.query.filter(AdherenceData.valid_till < valid).delete()
db.session.commit()
@@ -284,11 +339,16 @@ def patient_generator():
added_rows = 0
for patient in patient_generator():
- if added_rows > limit:
+ if limit and added_rows > limit:
current_app.logger.info(
"pre-mature exit caching adherence data having hit limit")
break
- single_patient_adherence_data(patient, as_of_date, research_study_id)
+ # queue patient's adherence cache refresh as a separate job
+ kwargs = {
+ 'patient_id': patient.id,
+ 'research_study_id': research_study_id}
+ cache_single_patient_adherence_data.apply_async(
+ kwargs=kwargs, retry=False)
return {'added': added_rows, 'limit_hit': limit and added_rows > limit}
diff --git a/portal/models/user.py b/portal/models/user.py
index b7b073d1fc..b39de7116c 100644
--- a/portal/models/user.py
+++ b/portal/models/user.py
@@ -323,7 +323,7 @@ class User(db.Model, UserMixin):
cascade='delete')
_consents = db.relationship(
'UserConsent', lazy='joined', cascade='delete',
- order_by="desc(UserConsent.acceptance_date)")
+ order_by="desc(UserConsent.id)")
indigenous = db.relationship(Coding, lazy='dynamic',
secondary="user_indigenous")
encounters = db.relationship('Encounter', cascade='delete')
diff --git a/portal/models/user_consent.py b/portal/models/user_consent.py
index 7b3df880c0..c326e6bcbe 100644
--- a/portal/models/user_consent.py
+++ b/portal/models/user_consent.py
@@ -165,7 +165,7 @@ def latest_consent(user, research_study_id):
if no match is located
"""
- # consents are ordered desc(acceptance_date)
+ # consents are ordered desc(id), i.e. most recent action first
for consent in user.valid_consents:
if consent.research_study_id != research_study_id:
continue
@@ -197,7 +197,10 @@ def consent_withdrawal_dates(user, research_study_id):
return consent.acceptance_date, withdrawal_date
# Look for withdrawn case. If found, also look up the previous
- # consent date (prior to withdrawal)
+ # consent date (prior to withdrawal). As withdrawal dates can
+ # be moved, continue to look back beyond all `suspended` until
+ # one of status deleted is found, as that would be the last one
+ # valid prior to withdrawal.
prior_acceptance = None
for consent in user.all_consents:
@@ -205,13 +208,15 @@ def consent_withdrawal_dates(user, research_study_id):
continue
if not withdrawal_date and (
consent.status == 'suspended' and not consent.deleted_id):
+ # the first or most recent withdrawal takes precedence.
withdrawal_date = consent.acceptance_date
if prior_acceptance:
raise ValueError(
"don't expect prior acceptance before withdrawal date")
if consent.status == 'deleted' and withdrawal_date:
+ # the first deleted prior to any number of `suspended` is
+ # taken to be the most recent legit consent prior to withdrawal
prior_acceptance = consent.acceptance_date
- # situation where consent date was changed before withdrawal
- # requires we continue to look and use last found (don't break!)
+ break
return prior_acceptance, withdrawal_date
diff --git a/portal/tasks.py b/portal/tasks.py
index f53a9b20d4..be2317f5bc 100644
--- a/portal/tasks.py
+++ b/portal/tasks.py
@@ -14,13 +14,13 @@
from celery.utils.log import get_task_logger
from flask import current_app
-import redis
from requests import Request, Session
from requests.exceptions import RequestException
from sqlalchemy import and_
from .database import db
from .factories.app import create_app
+from .factories.redis import create_redis
from .factories.celery import create_celery
from .models.communication import Communication
from .models.communication_request import queue_outstanding_messages
@@ -32,6 +32,7 @@
cache_adherence_data,
generate_and_send_summaries,
research_report,
+ single_patient_adherence_data,
)
from .models.research_study import ResearchStudy
from .models.role import ROLE, Role
@@ -113,9 +114,16 @@ def info():
queue=LOW_PRIORITY)
@scheduled_task
def cache_adherence_data_task(**kwargs):
+ """Queues up all patients needing a cache refresh"""
return cache_adherence_data(**kwargs)
+@celery.task(queue=LOW_PRIORITY, ignore_results=True)
+def cache_single_patient_adherence_data(**kwargs):
+ """Populates adherence data for a single patient"""
+ return single_patient_adherence_data(**kwargs)
+
+
@celery.task(bind=True, track_started=True, queue=LOW_PRIORITY)
def adherence_report_task(self, **kwargs):
current_app.logger.debug("launch adherence report task: %s", self.request.id)
@@ -393,7 +401,7 @@ def token_watchdog(**kwargs):
def celery_beat_health_check(**kwargs):
"""Refreshes self-expiring redis value for /healthcheck of celerybeat"""
- rs = redis.StrictRedis.from_url(current_app.config['REDIS_URL'])
+ rs = create_redis(current_app.config['REDIS_URL'])
return rs.setex(
name='last_celery_beat_ping',
time=current_app.config['LAST_CELERY_BEAT_PING_EXPIRATION_TIME'],
@@ -406,7 +414,7 @@ def celery_beat_health_check(**kwargs):
def celery_beat_health_check_low_priority_queue(**kwargs):
"""Refreshes self-expiring redis value for /healthcheck of celerybeat"""
- rs = redis.StrictRedis.from_url(current_app.config['REDIS_URL'])
+ rs = create_redis(current_app.config['REDIS_URL'])
return rs.setex(
name='last_celery_beat_ping_low_priority_queue',
time=10*current_app.config['LAST_CELERY_BEAT_PING_EXPIRATION_TIME'],
diff --git a/portal/timeout_lock.py b/portal/timeout_lock.py
index 87cc611dbc..21ab8af39b 100644
--- a/portal/timeout_lock.py
+++ b/portal/timeout_lock.py
@@ -1,8 +1,8 @@
import time
from flask import current_app
-import redis
+from .factories.redis import create_redis
class LockTimeout(BaseException):
"""Exception raised when wait for TimeoutLock exceeds timeout"""
@@ -31,8 +31,7 @@ def __init__(self, key, expires=60, timeout=10):
self.key = key
self.timeout = timeout
self.expires = expires
- self.redis = redis.StrictRedis.from_url(
- current_app.config['REDIS_URL'])
+ self.redis = create_redis(current_app.config['REDIS_URL'])
def __enter__(self):
timeout = self.timeout
@@ -102,11 +101,10 @@ def guarded_task_launch(task, **kwargs):
class CacheModeration(object):
"""Redis key implementation to prevent same key from excessive updates"""
- def __init__(self, key, timeout=3600):
+ def __init__(self, key, timeout=300):
self.key = key
self.timeout = timeout
- self.redis = redis.StrictRedis.from_url(
- current_app.config['REDIS_URL'])
+ self.redis = create_redis(current_app.config['REDIS_URL'])
def run_recently(self):
"""if key has value in redis (i.e. didn't expire) return value"""
diff --git a/portal/trigger_states/empro_messages.py b/portal/trigger_states/empro_messages.py
index 65dcf26c6c..0ad1c5015f 100644
--- a/portal/trigger_states/empro_messages.py
+++ b/portal/trigger_states/empro_messages.py
@@ -37,6 +37,7 @@ def patient_email(patient, soft_triggers, hard_triggers):
mr = MailResource(
app_text(name), locale_code=patient.locale_code, variables=args)
em = EmailMessage(
+ recipient_id=patient.id,
recipients=patient.email,
sender=current_app.config['MAIL_DEFAULT_SENDER'],
subject=mr.subject,
@@ -112,11 +113,15 @@ def staff_emails(patient, hard_triggers, initial_notification):
}
emails = []
for staff in staff_list:
+ if not staff.email_ready():
+ current_app.logger.error(f"can't email staff {staff} without email")
+ continue
mr = MailResource(
app_text(app_text_name),
locale_code=staff.locale_code,
variables=args)
emails.append(EmailMessage(
+ recipient_id=staff.id,
recipients=staff.email,
sender=current_app.config['MAIL_DEFAULT_SENDER'],
subject=mr.subject,
diff --git a/portal/trigger_states/empro_states.py b/portal/trigger_states/empro_states.py
index 70e4907117..a2c73e52f3 100644
--- a/portal/trigger_states/empro_states.py
+++ b/portal/trigger_states/empro_states.py
@@ -292,9 +292,13 @@ def process_processed(ts):
patient = User.query.get(ts.user_id)
# Patient always gets mail
- pending_emails.append((
- patient_email(patient, soft_triggers, hard_triggers),
- "patient thank you"))
+ if patient.email_ready():
+ pending_emails.append((
+ patient_email(patient, soft_triggers, hard_triggers),
+ "patient thank you"))
+ else:
+ current_app.logger.error(
+ f"EMPRO Patient({patient.id}) w/o email! Can't send message")
if hard_triggers:
triggers['action_state'] = 'required'
diff --git a/portal/views/healthcheck.py b/portal/views/healthcheck.py
index af96c5db35..be84061a80 100644
--- a/portal/views/healthcheck.py
+++ b/portal/views/healthcheck.py
@@ -3,12 +3,12 @@
from celery.exceptions import TimeoutError
from celery.result import AsyncResult
from flask import Blueprint, current_app
-import redis
from redis.exceptions import ConnectionError
from sqlalchemy import text
from ..database import db
from ..factories.celery import create_celery
+from ..factories.redis import create_redis
HEALTHCHECK_FAILURE_STATUS_CODE = 200
@@ -23,7 +23,7 @@ def celery_beat_ping():
This allows us to monitor whether celery beat tasks are running
"""
try:
- rs = redis.StrictRedis.from_url(current_app.config['REDIS_URL'])
+ rs = create_redis(current_app.config['REDIS_URL'])
rs.setex(
name='last_celery_beat_ping',
time=current_app.config['LAST_CELERY_BEAT_PING_EXPIRATION_TIME'],
@@ -64,7 +64,7 @@ def celery_available():
def celery_beat_available():
"""Determines whether celery beat is available"""
try:
- rs = redis.from_url(current_app.config['REDIS_URL'])
+ rs = create_redis(current_app.config['REDIS_URL'])
# Celery beat feeds scheduled jobs (a la cron) to the respective
# job queues (standard and low priority). As a monitor, a job
@@ -109,7 +109,7 @@ def redis_available():
# is available. Otherwise we assume
# it's not available
try:
- rs = redis.from_url(current_app.config["REDIS_URL"])
+ rs = create_redis(current_app.config["REDIS_URL"])
rs.ping()
return True, 'Redis is available.'
except Exception as e:
diff --git a/portal/views/patient.py b/portal/views/patient.py
index 09a39ce89c..7ef1e9c166 100644
--- a/portal/views/patient.py
+++ b/portal/views/patient.py
@@ -30,9 +30,13 @@
from ..models.questionnaire_bank import QuestionnaireBank, trigger_date
from ..models.questionnaire_response import QuestionnaireResponse
from ..models.reference import Reference
-from ..models.research_study import ResearchStudy
+from ..models.research_study import (
+ ResearchStudy,
+ research_study_id_from_questionnaire
+)
from ..models.role import ROLE
from ..models.user import User, current_user, get_user
+from ..models.user_consent import consent_withdrawal_dates
from ..timeout_lock import ADHERENCE_DATA_KEY, CacheModeration
from .crossdomain import crossdomain
from .demographics import demographics
@@ -313,6 +317,7 @@ def patient_timeline(patient_id):
:param research_study_id: set to alternative research study ID - default 0
:param trace: set 'true' to view detailed logs generated, works best in
concert with purge
+ :param only: set to filter all results but top level attribute given
"""
from ..date_tools import FHIR_datetime, RelativeDelta
@@ -323,7 +328,9 @@ def patient_timeline(patient_id):
from ..models.qbd import QBD
from ..models.qb_status import QB_Status
from ..models.questionnaire_bank import visit_name
+ from ..models.questionnaire_response import aggregate_responses
from ..models.research_protocol import ResearchProtocol
+ from ..tasks import cache_single_patient_adherence_data
from ..trace import dump_trace, establish_trace
user = get_user(patient_id, permission='view')
@@ -341,6 +348,7 @@ def patient_timeline(patient_id):
# questionnaire_response : qb relationships and remove cache lock
# on adherence data.
if purge == 'all':
+ # remove adherence cache key to allow fresh run
cache_moderation = CacheModeration(key=ADHERENCE_DATA_KEY.format(
patient_id=patient_id,
research_study_id=research_study_id))
@@ -455,18 +463,69 @@ def get_recur_id(qnr):
status['indefinite status'] = indef_status
adherence_data = sorted_adherence_data(patient_id, research_study_id)
+ if not adherence_data:
+ # immediately following a cache purge, adherence data is gone and
+ # needs to be recreated.
+ kwargs = {
+ "patient_id": user.id,
+ "research_study_id": research_study_id,
+ }
+ cache_single_patient_adherence_data(**kwargs)
+ adherence_data = sorted_adherence_data(patient_id, research_study_id)
+
+ qnr_responses = aggregate_responses(
+ instrument_ids=None,
+ current_user=current_user(),
+ research_study_id=research_study_id,
+ patch_dstu2=True,
+ ignore_qb_requirement=True,
+ patient_ids=[patient_id]
+ )
+ # filter qnr data to a manageable result data set
+ qnr_data = []
+ for row in qnr_responses['entry']:
+ i = {}
+ d = row['resource']
+ i['questionnaire'] = d['questionnaire']['reference'].split('/')[-1]
+
+ # qnr_responses return all. filter to requested research_study
+ study_id = research_study_id_from_questionnaire(i['questionnaire'])
+ if study_id != research_study_id:
+ continue
+ i['auth_method'] = d['encounter']['auth_method']
+ i['encounter_period'] = d['encounter']['period']
+ i['document_authored'] = d['authored']
+ try:
+ i['ae_session'] = d['identifier']['value']
+ except KeyError:
+ # happens with sub-study follow up, skip ae_session
+ pass
+ i['status'] = d['status']
+ i['org'] = d['subject']['careProvider'][0]['display']
+ i['visit'] = d['timepoint']
+ qnr_data.append(i)
+
+ consent_date, withdrawal_date = consent_withdrawal_dates(user, research_study_id)
+ consents = {"consent_date": consent_date, "withdrawal_date": withdrawal_date}
+ kwargs = {
+ "consents": consents,
+ "rps": rps,
+ "status": status,
+ "posted": posted,
+ "timeline": results,
+ "adherence_data": adherence_data,
+ "qnr_data": qnr_data
+ }
if trace:
- return jsonify(
- rps=rps,
- status=status,
- posted=posted,
- timeline=results,
- adherence_data=adherence_data,
- trace=dump_trace("END time line lookup"))
- return jsonify(
- rps=rps, status=status, posted=posted, timeline=results,
- adherence_data=adherence_data)
+ kwargs["trace"] = dump_trace("END time line lookup")
+
+ only = request.args.get('only', False)
+ if only:
+ if only not in kwargs:
+ raise ValueError(f"{only} not in {kwargs.keys()}")
+ return jsonify(only, kwargs[only])
+ return jsonify(**kwargs)
@patient_api.route('/api/patient//timewarp/')
diff --git a/portal/views/user.py b/portal/views/user.py
index 45e0b7ccd9..5acce26ea4 100644
--- a/portal/views/user.py
+++ b/portal/views/user.py
@@ -765,7 +765,9 @@ def withdraw_user_consent(user_id):
Used to withdraw a consent agreements between a user and an organization.
If a consent exists for the given user/org, the consent will be marked
deleted, and a matching consent (with new status/option values) will be
- created in its place.
+ created in its place. If the user was already withdrawn, a new row will
+ be created also with status `suspended`, the prior will retain its
+ `suspended` status and marked deleted.
NB Invalid to request a withdrawal date prior to current consent.
@@ -891,8 +893,18 @@ def withdraw_consent(
if acceptance_date > datetime.utcnow() + timedelta(days=1):
raise ValueError(
"Can't suspend with acceptance date in the future")
+ prior_withdrawal_date = wc.acceptance_date
wc.acceptance_date = acceptance_date
db.session.commit()
+ # Audit the change
+ auditable_event(
+ f"Consent agreement {wc.id} updated from {prior_withdrawal_date} "
+ f"to {acceptance_date}",
+ user_id=current_user().id,
+ subject_id=user.id,
+ context="consent"
+ )
+
# As withdrawal time just changed, force recalculation
invalidate_users_QBT(
user_id=user.id, research_study_id=research_study_id)
diff --git a/tests/test_consent.py b/tests/test_consent.py
index da0c4b8dd9..77000eba80 100644
--- a/tests/test_consent.py
+++ b/tests/test_consent.py
@@ -109,11 +109,11 @@ def test_consent_order(self):
'/api/user/{}/consent'.format(TEST_USER_ID))
assert response.status_code == 200
assert len(response.json['consent_agreements']) == 3
- # should be ordered by acceptance date, descending: (uc3, uc1, uc2)
+ # regardless of age, most recent action takes precedence
uc1, uc2, uc3 = map(db.session.merge, (uc1, uc2, uc3))
assert response.json['consent_agreements'][0] == uc3.as_json()
- assert response.json['consent_agreements'][1] == uc1.as_json()
- assert response.json['consent_agreements'][2] == uc2.as_json()
+ assert response.json['consent_agreements'][1] == uc2.as_json()
+ assert response.json['consent_agreements'][2] == uc1.as_json()
def test_post_user_consent(self):
self.shallow_org_tree()
diff --git a/tests/test_healthcheck.py b/tests/test_healthcheck.py
index 7ace20aa43..66bc368c62 100644
--- a/tests/test_healthcheck.py
+++ b/tests/test_healthcheck.py
@@ -29,21 +29,21 @@ def test_celery_available_fails_when_celery_ping_fails(
results = celery_available()
assert results[0] is False
- @patch('portal.views.healthcheck.redis')
+ @patch('portal.views.healthcheck.create_redis')
def test_celery_beat_available_fails_when_redis_var_none(
self,
- redis_mock
+ create_redis_mock
):
- redis_mock.from_url.return_value.get.return_value = None
+ create_redis_mock.return_value.get.return_value = None
results = celery_beat_available()
assert results[0] is False
- @patch('portal.views.healthcheck.redis')
+ @patch('portal.views.healthcheck.create_redis')
def test_celery_beat_available_succeeds_when_redis_var_set(
self,
- redis_mock
+ create_redis_mock
):
- redis_mock.from_url.return_value.get.return_value = \
+ create_redis_mock.return_value.get.return_value = \
str(datetime.now())
results = celery_beat_available()
assert results[0] is True
@@ -68,21 +68,21 @@ def test_postgresql_available_fails_when_query_exception(
results = postgresql_available()
assert results[0] is False
- @patch('portal.views.healthcheck.redis')
+ @patch('portal.views.healthcheck.create_redis')
def test_redis_available_succeeds_when_ping_successful(
self,
- redis_mock
+ create_redis_mock
):
- redis_mock.from_url.return_value.ping.return_value = True
+ create_redis_mock.return_value.ping.return_value = True
results = redis_available()
assert results[0] is True
- @patch('portal.views.healthcheck.redis')
+ @patch('portal.views.healthcheck.create_redis')
def test_redis_available_fails_when_ping_throws_exception(
self,
- redis_mock
+ create_redis_mock
):
- redis_mock.from_url.return_value.ping.side_effect = \
+ create_redis_mock.return_value.ping.side_effect = \
redis.ConnectionError()
results = redis_available()
assert results[0] is False
diff --git a/tests/test_intervention.py b/tests/test_intervention.py
index 69efd1070e..a29fd32142 100644
--- a/tests/test_intervention.py
+++ b/tests/test_intervention.py
@@ -555,6 +555,7 @@ def test_in_role(initialize_static, test_user):
assert sm.quick_access_check(user)
+@pytest.mark.skip("no longer supporting moving initial trigger dates")
def test_card_html_update(
client, initialize_static, initialized_patient_logged_in):
"""Confirm assessment status state affects AE card on /home view"""
diff --git a/tests/test_qb_timeline.py b/tests/test_qb_timeline.py
index 19e0b92935..2aba026f5a 100644
--- a/tests/test_qb_timeline.py
+++ b/tests/test_qb_timeline.py
@@ -309,7 +309,7 @@ def test_qb_post_consent_change(self):
assert qbstatus.overall_status == OverallStatus.completed
def test_withdrawn(self):
- # qbs should halt beyond withdrawal
+ # check qb_status post withdrawal
crv = self.setup_org_qbs()
crv_id = crv.id
# consent 17 months in past
@@ -334,8 +334,9 @@ def test_withdrawn(self):
for n in (3, 6, 9, 15):
assert visit_name(next(gen)) == 'Month {}'.format(n)
- with pytest.raises(StopIteration):
- next(gen)
+ # current should be withdrawn, subsequent avail in case
+ # post withdrawn results come in
+ assert visit_name(next(gen)) == 'Month 18'
# Confirm withdrawn user can still access "current"
# as needed for reporting
@@ -343,9 +344,10 @@ def test_withdrawn(self):
qb_stats = QB_Status(
user=user,
research_study_id=0,
- as_of_date=now)
+ as_of_date=now+relativedelta(days=1))
current = qb_stats.current_qbd(even_if_withdrawn=True)
assert current
+ assert qb_stats.overall_status == OverallStatus.withdrawn
def test_change_midstream_rp(self):
back7, nowish = associative_backdate(
diff --git a/tests/test_reporting.py b/tests/test_reporting.py
index a81c39ff5c..0d16752720 100644
--- a/tests/test_reporting.py
+++ b/tests/test_reporting.py
@@ -3,6 +3,7 @@
from datetime import datetime
from dateutil.relativedelta import relativedelta
from flask_webtest import SessionScope
+from time import sleep
from portal.cache import cache
from portal.extensions import db
@@ -150,16 +151,40 @@ def test_adherence_sort(self):
"completion_date ": "19 - Jun - 2023 07: 42:46 ",
"oow_completion_date": ""
},
+ "Month 12 post-withdrawn": {
+ "qb": "CRV Baseline v2",
+ "site": "CRV",
+ "visit": "Month 12",
+ "status": "Completed",
+ "consent": "20 - May - 2023 07: 42:46 ",
+ "completion_date ": "25 - Jun - 2023 00:00:00 ",
+ "country ": None,
+ "user_id ": 3,
+ "study_id": "study user 3",
+ "site_code": ""},
"Month 12": {
"qb": "CRV Baseline v2",
"site": "CRV",
"visit": "Month 12",
- "status": "Overdue",
+ "status": "Withdrawn",
+ "completion_date ": "22 - Jun - 2023 00:00:00 ",
"consent": "20 - May - 2023 07: 42:46 ",
"country ": None,
"user_id ": 3,
"study_id": "study user 3",
"site_code": ""},
+ "Baseline post-withdrawn": {
+ "qb": "CRV Baseline v2",
+ "site": "CRV",
+ "visit": "Baseline",
+ "status": "Completed",
+ "completion_date ": "22 - Jun - 2023 00:00:00 ",
+ "consent": "19 - Jun - 2023 07: 42:46",
+ "country ": None,
+ "user_id ": 2,
+ "study_id": "study user 2",
+ "site_code": ""
+ },
"Baseline": {
"qb": "CRV Baseline v2",
"site": "CRV",
@@ -173,10 +198,17 @@ def test_adherence_sort(self):
},
}
results = sort_by_visit_key(sort_me)
- assert len(results) == 3
+ assert len(results) == 5
assert results[0]["visit"] == "Baseline"
- assert results[1]["visit"] == "Month 3"
- assert results[2]["visit"] == "Month 12"
+ assert results[0]["status"] == "Due"
+ assert results[1]["visit"] == "Baseline"
+ assert results[1]["status"] == "Completed"
+ assert results[2]["visit"] == "Month 3"
+ assert results[2]["status"] == "Completed"
+ assert results[3]["visit"] == "Month 12"
+ assert results[3]["status"] == "Withdrawn"
+ assert results[4]["visit"] == "Month 12"
+ assert results[4]["status"] == "Completed"
def populate_adherence_cache(self, test_users):
"""helper method to bring current test user state into adherence cache"""
@@ -298,6 +330,7 @@ def test_results(self):
self.consent_with_org(org_id=org_id)
self.login()
self.populate_adherence_cache(test_users=(user2, user3, user4))
+ sleep(5) # as adherence jobs run independently, give em time
response = self.results_from_async_call(
"/api/report/questionnaire_status", timeout=10)