Skip to content

Commit

Permalink
Merge pull request #751 from gkadillak/728_edit_scheduled_tasks
Browse files Browse the repository at this point in the history
Add detail view to scheduled tasks fixes #728
  • Loading branch information
schakrava committed Jul 29, 2015
2 parents 371895f + 8096476 commit 7ce0206
Show file tree
Hide file tree
Showing 31 changed files with 725 additions and 687 deletions.
2 changes: 1 addition & 1 deletion base-buildout.cfg
Original file line number Diff line number Diff line change
Expand Up @@ -150,7 +150,7 @@ output = ${buildout:directory}/conf/docker.service
[js-libraries]
recipe = hexagonit.recipe.download
url = http://rockstor.com/downloads/jslibs/lib.tgz
md5sum = 69e609368173a881bf170fde6a05f77c
md5sum = a0a0c0576d9e8dc0f954f3e411f34541
strip-top-level-dir = true
destination = ${buildout:directory}/static/js/lib
on-update = true
Expand Down
1 change: 0 additions & 1 deletion buildout.cfg
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,6 @@ logdir = ${buildout:directory}/var/log
gunicorn_cmd = ${buildout:directory}/bin/gunicorn --bind=${init-gunicorn:bind}:${init-gunicorn:port} --pid=${init-gunicorn:pidfile} --workers=${init-gunicorn:workers} --log-file=${init-gunicorn:logfile} --pythonpath=${buildout:directory}/src/rockstor --settings=settings --timeout=120 --graceful-timeout=120 wsgi:application
smart_manager_cmd = ${buildout:directory}/bin/sm
replicad_cmd = ${buildout:directory}/bin/replicad
ts_cmd = ${buildout:directory}/bin/task-scheduler
dc_cmd = ${buildout:directory}/bin/data-collector
sm_cmd = ${buildout:directory}/bin/service-monitor
jd_cmd = ${buildout:directory}/bin/job-dispatcher
Expand Down
15 changes: 14 additions & 1 deletion conf/settings.conf.in
Original file line number Diff line number Diff line change
Expand Up @@ -238,6 +238,14 @@ LOGGING = {
'handlers': ['file'],
'level': 'DEBUG',
},
'system': {
'handlers': ['file'],
'level': 'DEBUG',
},
'scripts': {
'handlers': ['file'],
'level': 'DEBUG',
},
}
}

Expand Down Expand Up @@ -333,5 +341,10 @@ COMPRESSION_TYPES = ('lzo', 'zlib', 'no',)

SUPPORTED_KERNEL_VERSION = ${django-settings-conf:kernel}

SNAP_TS_FORMAT = '%Y%m%d%H%M%S'
SNAP_TS_FORMAT = '%Y%m%d%H%M'
ROOT_POOL = 'rockstor_rockstor'


MODEL_DEFS = {
'pqgroup': '-1/-1',
}
24 changes: 0 additions & 24 deletions conf/supervisord-prod.conf.in
Original file line number Diff line number Diff line change
Expand Up @@ -118,30 +118,6 @@ stderr_logfile=${supervisord-conf:logdir}/supervisord_%(program_name)s_stderr.lo
stderr_logfile_maxbytes=1MB ; max # logfile bytes b4 rotation (default 50MB)
stderr_logfile_backups=10 ; # of stderr logfile backups (default 10)

; Task scheduler
[program:task-scheduler]
environment=DJANGO_SETTINGS_MODULE=settings
command=${supervisord-conf:ts_cmd} ; the program (relative uses PATH, can take
; args)
process_name=%(program_name)s ; process_name expr (default %(program_name)s)
numprocs=1 ; number of processes copies to start (def 1)
priority=200
autostart=false ; start at supervisord start (default: true)
autorestart=unexpected ; whether/when to restart (default: unexpected)
startsecs=2 ; number of secs prog must stay running (def. 1)
startretries=3 ; max # of serial start failures (default 3)
exitcodes=0,2 ; 'expected' exit codes for process (default 0,2)
stopsignal=TERM ; signal used to kill process (default TERM)
stopwaitsecs=5 ; max num secs to wait b4 SIGKILL (default 10)
stdout_logfile=${supervisord-conf:logdir}/supervisord_%(program_name)s_stdout.log
; stdout log path, NONE for none; default AUTO
stdout_logfile_maxbytes=1MB ; max # logfile bytes b4 rotation (default 50MB)
stdout_logfile_backups=10 ; # of stdout logfile backups (default 10)
stderr_logfile=${supervisord-conf:logdir}/supervisord_%(program_name)s_stderr.log
; stderr log path, NONE for none; default AUTO
stderr_logfile_maxbytes=1MB ; max # logfile bytes b4 rotation (default 50MB)
stderr_logfile_backups=10 ; # of stderr logfile backups (default 10)

; Data Collector
[program:data-collector]
environment=DJANGO_SETTINGS_MODULE=settings
Expand Down
20 changes: 0 additions & 20 deletions conf/supervisord.conf.in
Original file line number Diff line number Diff line change
Expand Up @@ -119,26 +119,6 @@ stderr_logfile=${supervisord-conf:logdir}/supervisord_%(program_name)s_stderr.lo
stderr_logfile_maxbytes=1MB ; max # logfile bytes b4 rotation (default 50MB)
stderr_logfile_backups=10 ; # of stderr logfile backups (default 10)

; Task scheduler
[program:task-scheduler]
environment=DJANGO_SETTINGS_MODULE=settings
command=${supervisord-conf:ts_cmd} ; the program (relative uses PATH, can take args)
process_name=%(program_name)s ; process_name expr (default %(program_name)s)
numprocs=1 ; number of processes copies to start (def 1)
priority=200
autostart=false ; start at supervisord start (default: true)
autorestart=unexpected ; whether/when to restart (default: unexpected)
startsecs=2 ; number of secs prog must stay running (def. 1)
startretries=3 ; max # of serial start failures (default 3)
exitcodes=0,2 ; 'expected' exit codes for process (default 0,2)
stopsignal=TERM ; signal used to kill process (default TERM)
stopwaitsecs=5 ; max num secs to wait b4 SIGKILL (default 10)
stdout_logfile=${supervisord-conf:logdir}/supervisord_%(program_name)s_stdout.log ; stdout log path, NONE for none; default AUTO
stdout_logfile_maxbytes=1MB ; max # logfile bytes b4 rotation (default 50MB)
stdout_logfile_backups=10 ; # of stdout logfile backups (default 10)
stderr_logfile=${supervisord-conf:logdir}/supervisord_%(program_name)s_stderr.log ; stderr log path, NONE for none; default AUTO
stderr_logfile_maxbytes=1MB ; max # logfile bytes b4 rotation (default 50MB)
stderr_logfile_backups=10 ; # of stderr logfile backups (default 10)

; Data Collector
[program:data-collector]
Expand Down
1 change: 0 additions & 1 deletion prod-buildout.cfg
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,6 @@ logdir = ${buildout:depdir}/var/log
gunicorn_cmd = ${buildout:depdir}/bin/gunicorn --bind=${init-gunicorn:bind}:${init-gunicorn:port} --pid=${init-gunicorn:pidfile} --workers=${init-gunicorn:workers} --log-file=${init-gunicorn:logfile} --pythonpath=${buildout:depdir}/src/rockstor --settings=settings --timeout=120 --graceful-timeout=120 wsgi:application
smart_manager_cmd = ${buildout:depdir}/bin/sm
replicad_cmd = ${buildout:depdir}/bin/replicad
ts_cmd = ${buildout:depdir}/bin/task-scheduler
dc_cmd = ${buildout:depdir}/bin/data-collector
sm_cmd = ${buildout:depdir}/bin/service-monitor
ztask_cmd = ${buildout:depdir}/bin/django ztaskd --noreload --replayfailed -f ${supervisord-conf:logdir}/ztask.log
Expand Down
3 changes: 2 additions & 1 deletion setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,6 @@
'pwreset = scripts.pwreset:main',
'backup-plugin = backup.scheduler:main',
'initrock = scripts.initrock:main',
'task-scheduler = smart_manager.scheduler.task_dispatcher:main',
'data-collector = smart_manager.data_collector:main',
'docker-wrapper = scripts.docker_wrapper:main',
'ovpn-initpki = scripts.ovpn_util:initpki',
Expand All @@ -50,6 +49,8 @@
'rockon-json = scripts.rockon_util:main',
'flash-optimize = scripts.flash_optimize:main',
'dc2 = smart_manager.dc2:main',
'st-snapshot = scripts.scheduled_tasks.snapshot:main',
'st-pool-scrub = scripts.scheduled_tasks.pool_scrub:main',
],
},

Expand Down
18 changes: 9 additions & 9 deletions src/rockstor/fs/btrfs.py
Original file line number Diff line number Diff line change
Expand Up @@ -352,7 +352,7 @@ def remove_share(pool, pool_device, share_name, pqgroup):
return
qgroup = ('0/%s' % share_id(pool, pool_device, share_name))
delete_cmd = [BTRFS, 'subvolume', 'delete', subvol_mnt_pt]
run_command(delete_cmd)
run_command(delete_cmd, log=True)
qgroup_destroy(qgroup, root_pool_mnt)
return qgroup_destroy(pqgroup, root_pool_mnt)

Expand All @@ -364,7 +364,7 @@ def remove_snap(pool, pool_device, share_name, snap_name):
umount_root(snap_path)
if (is_subvol(snap_path)):
qgroup = ('0/%s' % share_id(pool, pool_device, snap_name))
run_command([BTRFS, 'subvolume', 'delete', snap_path])
run_command([BTRFS, 'subvolume', 'delete', snap_path], log=True)
return qgroup_destroy(qgroup, root_mnt)
else:
o, e, rc = run_command([BTRFS, 'subvolume', 'list', '-s', root_mnt])
Expand Down Expand Up @@ -465,7 +465,7 @@ def qgroup_id(pool, disk_name, share_name):
return '0/' + sid

def qgroup_max(mnt_pt):
o, e, rc = run_command([BTRFS, 'qgroup', 'show', mnt_pt])
o, e, rc = run_command([BTRFS, 'qgroup', 'show', mnt_pt], log=True)
res = 0
for l in o:
if (re.match('%s/' % QID, l) is not None):
Expand All @@ -479,7 +479,7 @@ def qgroup_create(pool):
pd = pool.disk_set.first().name
mnt_pt = mount_root(pool, ('/dev/%s' % pd))
qid = ('%s/%d' % (QID, qgroup_max(mnt_pt) + 1))
o, e, rc = run_command([BTRFS, 'qgroup', 'create', qid, mnt_pt])
o, e, rc = run_command([BTRFS, 'qgroup', 'create', qid, mnt_pt], log=True)
return qid


Expand All @@ -488,17 +488,17 @@ def qgroup_destroy(qid, mnt_pt):
for l in o:
if (re.match(qid, l) is not None and
l.split()[0] == qid):
return run_command([BTRFS, 'qgroup', 'destroy', qid, mnt_pt])
return run_command([BTRFS, 'qgroup', 'destroy', qid, mnt_pt], log=True)
return False

def qgroup_assign(qid, pqid, mnt_pt):
return run_command([BTRFS, 'qgroup', 'assign', qid, pqid, mnt_pt])
return run_command([BTRFS, 'qgroup', 'assign', qid, pqid, mnt_pt], log=True)

def update_quota(pool, pool_device, qgroup, size_bytes):
pool_device = '/dev/' + pool_device
root_pool_mnt = mount_root(pool, pool_device)
cmd = [BTRFS, 'qgroup', 'limit', str(size_bytes), qgroup, root_pool_mnt]
return run_command(cmd)
return run_command(cmd, log=True)


def convert_to_KiB(size):
Expand All @@ -525,7 +525,7 @@ def share_usage(pool, pool_device, share_id):
root_pool_mnt = mount_root(pool, pool_device)
run_command([BTRFS, 'quota', 'rescan', root_pool_mnt], throw=False)
cmd = [BTRFS, 'qgroup', 'show', root_pool_mnt]
out, err, rc = run_command(cmd)
out, err, rc = run_command(cmd, log=True)
rusage = eusage = None
for line in out:
fields = line.split()
Expand All @@ -551,7 +551,7 @@ def shares_usage(pool, pool_device, share_map, snap_map):
mnt_pt = mount_root(pool, '/dev/' + pool_device)
run_command([BTRFS, 'quota', 'rescan', mnt_pt], throw=False)
cmd = [BTRFS, 'qgroup', 'show', mnt_pt]
out, err, rc = run_command(cmd)
out, err, rc = run_command(cmd, log=True)
combined_map = dict(share_map, **snap_map)
for line in out:
fields = line.split()
Expand Down
105 changes: 105 additions & 0 deletions src/rockstor/scripts/scheduled_tasks/snapshot.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,105 @@
"""
Copyright (c) 2012-2015 RockStor, Inc. <http://rockstor.com>
This file is part of RockStor.
RockStor is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published
by the Free Software Foundation; either version 2 of the License,
or (at your option) any later version.
RockStor is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""

import os
os.environ['DJANGO_SETTINGS_MODULE'] = 'settings'
import sys
import json
from datetime import datetime
from storageadmin.models import (Share, Snapshot)
from smart_manager.models import (Task, TaskDefinition)
from cli.rest_util import api_call
from django.utils.timezone import utc
from django.conf import settings
import logging
logger = logging.getLogger(__name__)


def validate_snap_meta(meta):
if (type(meta) != dict):
raise Exception('meta must be a dictionary, not %s' % type(meta))
if ('prefix' not in meta):
raise Exception('prefix missing from meta. %s' % meta)
if ('share' not in meta):
raise Exception('share missing from meta. %s' % meta)
if (not Share.objects.filter(name=meta['share']).exists()):
raise Exception('Non-existent Share(%s) in meta. %s' %
(meta['share'], meta))
if ('max_count' not in meta):
raise Exception('max_count missing from meta. %s' % meta)
try:
max_count = int(float(meta['max_count']))
except:
raise Exception('max_count is not an integer. %s' % meta)
if (max_count < 1):
raise Exception('max_count must atleast be 1, not %d' % max_count)
if ('visible' not in meta or type(meta['visible']) != bool):
meta['visible'] = False
return meta

def main():
tid = int(sys.argv[1])
tdo = TaskDefinition.objects.get(id=tid)
stype = 'task_scheduler'
baseurl = 'https://localhost/api/'
if (tdo.task_type != 'snapshot'):
logger.error('task_type(%s) is not snapshot.' % tdo.task_type)
return
meta = json.loads(tdo.json_meta)
validate_snap_meta(meta)

max_count = int(float(meta['max_count']))
share = Share.objects.get(name=meta['share'])
prefix = ('%s_' % meta['prefix'])
snapshots = Snapshot.objects.filter(share=share, snap_type=stype,
name__startswith=prefix).order_by('-id')
if (len(snapshots) > max_count):
for snap in snapshots[max_count:]:
url = ('%s/shares/%s/snapshots/%s' %
(baseurl, meta['share'], snap.name))
try:
api_call(url, data=None, calltype='delete', save_error=False)
logger.debug('deleted old snapshot at %s' % url)
except Exception, e:
logger.error('Failed to delete old snapshot at %s' % url)
logger.exception(e)
return

now = datetime.utcnow().replace(second=0, microsecond=0, tzinfo=utc)
t = Task(task_def=tdo, state='started', start=now)
try:
name = ('%s_%s' % (meta['prefix'], datetime.utcnow().replace(
tzinfo=utc).strftime(settings.SNAP_TS_FORMAT)))
url = ('%sshares/%s/snapshots/%s' % (baseurl, meta['share'], name))
data = {'snap_type': stype,
'uvisible': meta['visible'], }
headers = {'content-type': 'application/json'}
api_call(url, data=data, calltype='post', headers=headers, save_error=False)
logger.debug('created snapshot at %s' % url)
t.state = 'finished'
except Exception, e:
logger.error('Failed to create snapshot at %s' % url)
t.state = 'error'
logger.exception(e)
finally:
t.end = datetime.utcnow().replace(tzinfo=utc)
t.save()

if __name__ == '__main__':
#takes one argument. taskdef object id.
main()
Loading

0 comments on commit 7ce0206

Please sign in to comment.