Skip to content

Commit

Permalink
Convert share ID during snapshot task restore rockstor#2355
Browse files Browse the repository at this point in the history
We previously recreated a scheduled task of type snapshot literally
from the config backup file. As share IDs are highly likely to differ
between system installations, this resulted in scheduled tasks of type
snapshot being re-created upon config backup restore against the wrong
share.

This commit fixes this by:
  1. fetching the share name from the ID in the backup file (source)
  2. fetching the corresponding share ID in the new (target) system
  3. updating the restore API call accordingly

This commit also fixes some related issues by:
  - updating the API endpoint for scheduled tasks following rockstor#2365
  - continuing restore process if a taskdef fails to be validated rockstor#2355

Moreover, the current `restore_scheduled_tasks()` logic does both the
preparation/parsing of the API payload, and the API call. This makes it
difficult to test the former. This commit thus refactors this part to
split into:
  - parsing/preparation: `validate_task_definitions()`
  - API call: `restore_scheduled_tasks()`

Accordingly two new unit tests are included.
  • Loading branch information
FroggyFlox committed Mar 3, 2023
1 parent f397bc9 commit 16edb07
Show file tree
Hide file tree
Showing 3 changed files with 275 additions and 28 deletions.
62 changes: 62 additions & 0 deletions src/rockstor/storageadmin/fixtures/test_config_backup.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,62 @@
[
{
"model": "storageadmin.pool",
"pk": 2,
"fields": {
"name": "rock-pool",
"uuid": "3d60105b-abae-456f-bbd1-a5f410295e9c",
"size": 5242880,
"raid": "single",
"toc": "2023-02-26T22:15:14.503Z",
"compression": "no",
"mnt_options": null,
"role": null
}
},
{
"model": "storageadmin.share",
"pk": 3,
"fields": {
"pool": 2,
"qgroup": "0/301",
"pqgroup": "2015/136",
"name": "test_share01",
"uuid": null,
"size": 5242880,
"owner": "root",
"group": "root",
"perms": "755",
"toc": "2023-02-26T22:15:14.581Z",
"subvol_name": "test_share01",
"replica": false,
"compression_algo": null,
"rusage": 16,
"eusage": 16,
"pqgroup_rusage": 16,
"pqgroup_eusage": 16
}
},
{
"model": "storageadmin.share",
"pk": 4,
"fields": {
"pool": 2,
"qgroup": "0/302",
"pqgroup": "2015/137",
"name": "test_share02",
"uuid": null,
"size": 5242880,
"owner": "root",
"group": "root",
"perms": "755",
"toc": "2023-02-26T22:15:14.606Z",
"subvol_name": "test_share02",
"replica": false,
"compression_algo": null,
"rusage": 16,
"eusage": 16,
"pqgroup_rusage": 16,
"pqgroup_eusage": 16
}
}
]
126 changes: 124 additions & 2 deletions src/rockstor/storageadmin/tests/test_config_backup.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,12 +23,30 @@
validate_install_config,
validate_update_config,
validate_service_status,
validate_taskdef_meta,
validate_task_definitions,
)

"""
Fixture creation instructions:
System needs 1 non system pool 'rock-pool', at any raid level.
- Create 1 share named 'test_share01'
- Create 1 share named 'test_share02'
poetry run django-admin dumpdata storageadmin.pool storageadmin.share \
--natural-foreign --indent 4 > \
src/rockstor/storageadmin/fixtures/test_config_backup.json
To run the tests:
export DJANGO_SETTINGS_MODULE="settings"
cd src/rockstor && poetry run django-admin test -v 2 -p test_config_backup.py
"""


class ConfigBackupTests(APITestMixin):
# Proposed fixture = "test_config-backup" was "fix2.json"
fixtures = ["test_api.json"]
fixtures = ["test_api.json", "test_config_backup.json"]
BASE_URL = "/api/config-backup"
sa_ml = [
{
Expand Down Expand Up @@ -1138,6 +1156,42 @@ class ConfigBackupTests(APITestMixin):
"model": "smart_manager.servicestatus",
"pk": 32,
},
{
"fields": {
"name": "snap_daily_ts01",
"task_type": "snapshot",
"json_meta": '{"writable": true, "visible": true, "prefix": "snap_daily_ts01", "share": "2", "max_count": "4"}',
"enabled": False,
"crontab": "42 3 * * *",
"crontabwindow": "*-*-*-*-*-*",
},
"model": "smart_manager.taskdefinition",
"pk": 1,
},
{
"fields": {
"name": "snap_daily_ts02",
"task_type": "snapshot",
"json_meta": '{"writable": true, "visible": true, "prefix": "snap_daily_ts02", "share": "33", "max_count": "4"}',
"enabled": False,
"crontab": "42 3 * * *",
"crontabwindow": "*-*-*-*-*-*",
},
"model": "smart_manager.taskdefinition",
"pk": 2,
},
{
"fields": {
"name": "snap_daily_ts04",
"task_type": "snapshot",
"json_meta": '{"writable": true, "visible": true, "prefix": "snap_daily_ts04", "share": "5", "max_count": "4"}',
"enabled": False,
"crontab": "42 3 * * *",
"crontabwindow": "*-*-*-*-*-*",
},
"model": "smart_manager.taskdefinition",
"pk": 3,
},
]

@classmethod
Expand Down Expand Up @@ -1793,3 +1847,71 @@ def test_validate_service_status(self):
"returned = {}.\n "
"expected = {}.".format(ret, o),
)

def test_validate_taskdef_meta(self):
"""
Input as per sm_ml above:
The share in question is test_share01, which has:
- an ID of 2 in sa_ml above
- an ID of 3 in the test_config_backup.json fixture
"""
task_type = ["snapshot"] # list as will receive appends later on
taskdef_meta = [
{
"writable": True,
"visible": True,
"prefix": "snap_daily_ts01",
"share": "2",
"max_count": "4",
}
]

out = [
{
"writable": True,
"visible": True,
"prefix": "snap_daily_ts01",
"share": "3",
"max_count": "4",
}
]

for t, m, o in zip(task_type, taskdef_meta, out):
ret = validate_taskdef_meta(self.sa_ml, m, t)
self.assertEqual(
ret,
o,
msg="Unexpected validate_taskdef_meta() result:\n "
"returned = {}.\n "
"expected = {}.".format(ret, o),
)

def test_validate_task_definitions(self):
"""
Test:
- valid metadata (snap_daily_ts01 in sm_ml)
- invalid metadata: wrong share ID in backup file (snap_daily_ts02 in sm_ml)
- invalid metadata: share does not exist on target system (snap_daily_ts04 in sm_ml)
"""
out = [{
"task_type": "snapshot",
"name": "snap_daily_ts01",
"crontabwindow": "*-*-*-*-*-*",
"enabled": False,
"crontab": "42 3 * * *",
"meta": {
"writable": True,
"visible": True,
"prefix": "snap_daily_ts01",
"share": "3",
"max_count": "4",
},
}]
ret = validate_task_definitions(self.sm_ml, self.sa_ml)
self.assertEqual(
ret,
out,
msg="Unexpected validate_task_definitions() result:\n "
"returned = {}.\n "
"expected = {}.".format(ret, out),
)
115 changes: 89 additions & 26 deletions src/rockstor/storageadmin/views/config_backup.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@
import rest_framework_custom as rfc
from cli.rest_util import api_call
from smart_manager.models.service import Service, ServiceStatus
from storageadmin.models import ConfigBackup, RockOn
from storageadmin.models import ConfigBackup, RockOn, Share
from storageadmin.serializers import ConfigBackupSerializer
from storageadmin.util import handle_exception
from storageadmin.views.rockon_helpers import rockon_tasks_pending
Expand Down Expand Up @@ -185,45 +185,100 @@ def validate_service_status(ml, pkid):
return m["fields"]["status"]


def restore_scheduled_tasks(ml):
def validate_taskdef_meta(sa_ml, taskdef_meta, task_type):
"""
Task definition of type snapshot include a share ID in their
json_meta field (taskdef_meta). The share ID for the share in question
is most likely different in the new system on which the config backup
is to be restored. We thus need to fetch the new share ID for the share
in question.
Example input taskdef_meta:
json_meta: {
"writable": true,
"visible": true,
"prefix": "snap_daily_ts01",
"share": "77",
"max_count": "4"
}
:param sa_ml: list of storageadmin models of interest as parsed by restore_config()
:param taskdef_meta: dict loaded from validate_task_definitions()
:param task_type: string, can be "snapshot" or "scrub"
:return: dict
"""
if task_type == "snapshot":
# get source share name from config backup based on its ID
source_id = int(taskdef_meta["share"])
source_name = get_sname(sa_ml, source_id)
# get ID of source share name in the target system
target_share_id = get_target_share_id(source_name)
# Update taskdef_meta (needs to be a unicode object)
taskdef_meta["share"] = unicode(target_share_id)
return taskdef_meta


def restore_scheduled_tasks(ml, sa_ml):
"""
Simple wrapper to trigger the preparation of the list of scheduled tasks
to be restored, followed by the actual API request.
:param ml: list of smart_manager models of interest as parsed by restore_config()
:param sa_ml: list of storageadmin models of interest as parsed by restore_config()
"""
logger.info("Started restoring scheduled tasks.")
tasks = validate_task_definitions(ml, sa_ml)
for t in tasks:
generic_post("{}/sm/tasks".format(BASE_URL), t)
logger.info("Finished restoring scheduled tasks.")


def validate_task_definitions(ml, sa_ml):
"""
Parses the config backup to re-create a valid POST request to be sent to the
sm/tasks API in order to re-create the scheduled task(s) in question.
If multiple tasks are to be re-created, the config for each one is stored
inside a list that is then looped through to send an api request for each task.
inside a list that is then looped through to send an API request for each task.
Need the following info for each request:
- name
- task_type
- crontab
- crontabwindow
- meta
- enabled
:param ml: list of smart_manager models of interest as parsed by restore_config()
:param sa_ml: list of storageadmin models of interest as parsed by restore_config()
:return: list of tasks to restore
"""
logger.info("Started restoring scheduled tasks.")
tasks = []
for m in ml:
if m["model"] == "smart_manager.taskdefinition":
name = m["fields"]["name"]
task_type = m["fields"]["task_type"]
crontab = m["fields"]["crontab"]
crontabwindow = m["fields"]["crontabwindow"]
enabled = m["fields"]["enabled"]
json_meta = m["fields"]["json_meta"]
if json_meta is not None:
jmeta = json.loads(json_meta)
taskdef = {
"name": name,
"task_type": task_type,
"crontab": crontab,
"crontabwindow": crontabwindow,
"enabled": enabled,
"meta": jmeta,
}
tasks.append(taskdef)
for t in tasks:
generic_post("{}/sm/tasks/".format(BASE_URL), t)
logger.info("Finished restoring scheduled tasks.")
try:
name = m["fields"]["name"]
task_type = m["fields"]["task_type"]
crontab = m["fields"]["crontab"]
crontabwindow = m["fields"]["crontabwindow"]
enabled = m["fields"]["enabled"]
json_meta = m["fields"]["json_meta"]
if json_meta is not None:
jmeta = json.loads(json_meta)
jmeta = validate_taskdef_meta(sa_ml, jmeta, task_type)
taskdef = {
"name": name,
"task_type": task_type,
"crontab": crontab,
"crontabwindow": crontabwindow,
"enabled": enabled,
"meta": jmeta,
}
tasks.append(taskdef)
except Exception as e:
logger.info(
"An unexpected error occurred while trying to restore a task ({}): {}".format(
name, e
)
)
return tasks


@db_task()
Expand Down Expand Up @@ -495,6 +550,14 @@ def get_sname(ml, share_id):
return sname


def get_target_share_id(source_name):
"""
Takes a share name and returns its ID from the database.
"""
so = Share.objects.get(name=source_name)
return so.id


@db_task()
@lock_task("restore_config_lock")
def restore_config(cbid):
Expand All @@ -512,8 +575,8 @@ def restore_config(cbid):
# restore_dashboard(ml)
# restore_appliances(ml)
# restore_network(sa_ml)
restore_scheduled_tasks(sm_ml)
# N.B. the following is also a Huey task in it's own right.
restore_scheduled_tasks(sm_ml, sa_ml)
# N.B. the following is also a Huey task in its own right.
restore_rockons(sa_ml)


Expand Down

0 comments on commit 16edb07

Please sign in to comment.