diff --git a/kolibri/core/auth/csv_utils.py b/kolibri/core/auth/csv_utils.py index 3a1dc8c8edf..a9cc9a80e42 100644 --- a/kolibri/core/auth/csv_utils.py +++ b/kolibri/core/auth/csv_utils.py @@ -1,9 +1,9 @@ import csv import logging -import os from collections import OrderedDict from functools import partial +from django.core.files.storage import default_storage from django.db.models import OuterRef from django.db.models import Q @@ -159,9 +159,9 @@ def map_input(obj): ) -def csv_file_generator(facility, filepath, overwrite=True, demographic=False): - if not overwrite and os.path.exists(filepath): - raise ValueError("{} already exists".format(filepath)) +def csv_file_generator(facility, filename, overwrite=True, demographic=False): + if not overwrite and default_storage.exists(filename): + raise ValueError("{} already exists".format(filename)) queryset = FacilityUser.objects.filter(facility=facility) header_labels = tuple( @@ -174,8 +174,6 @@ def csv_file_generator(facility, filepath, overwrite=True, demographic=False): column for column in db_columns if demographic or column not in DEMO_FIELDS ) - csv_file = open_csv_for_writing(filepath) - mappings = {} for key in output_mappings: @@ -184,9 +182,9 @@ def csv_file_generator(facility, filepath, overwrite=True, demographic=False): map_output = partial(output_mapper, labels=labels, output_mappings=mappings) - with csv_file as f: + with open_csv_for_writing(filename) as f: writer = csv.DictWriter(f, header_labels) - logger.info("Creating csv file {filename}".format(filename=filepath)) + logger.info("Creating csv file {filename}".format(filename=filename)) writer.writeheader() usernames = set() for item in ( diff --git a/kolibri/core/auth/management/commands/bulkexportusers.py b/kolibri/core/auth/management/commands/bulkexportusers.py index 84735e3407e..56d70995259 100644 --- a/kolibri/core/auth/management/commands/bulkexportusers.py +++ b/kolibri/core/auth/management/commands/bulkexportusers.py @@ -1,11 +1,10 @@ import csv import logging -import ntpath -import os from collections import OrderedDict from functools import partial from django.conf import settings +from django.core.files.storage import default_storage from django.core.management.base import CommandError from django.db.models import OuterRef from django.db.models import Subquery @@ -28,7 +27,6 @@ from kolibri.core.tasks.utils import get_current_job from kolibri.core.utils.csv import open_csv_for_writing from kolibri.core.utils.csv import output_mapper -from kolibri.utils import conf try: FileNotFoundError @@ -152,18 +150,16 @@ def translate_labels(): ) -def csv_file_generator(facility, filepath, overwrite=True): - if not overwrite and os.path.exists(filepath): - raise ValueError("{} already exists".format(filepath)) +def csv_file_generator(facility, filename, overwrite=True): + if not overwrite and default_storage.exists(filename): + raise ValueError("{} already exists".format(filename)) queryset = FacilityUser.objects.filter(facility=facility) header_labels = translate_labels().values() - csv_file = open_csv_for_writing(filepath) - - with csv_file as f: + with open_csv_for_writing(filename) as f: writer = csv.DictWriter(f, header_labels) - logger.info("Creating csv file {filename}".format(filename=filepath)) + logger.info("Creating csv file {filename}".format(filename=filename)) writer.writeheader() usernames = set() @@ -248,18 +244,14 @@ def get_facility(self, options): return default_facility - def get_filepath(self, options, facility): + def get_filename(self, options, facility): if options["output_file"] is None: - export_dir = os.path.join(conf.KOLIBRI_HOME, "log_export") - if not os.path.isdir(export_dir): - os.mkdir(export_dir) - filepath = os.path.join( - export_dir, - CSV_EXPORT_FILENAMES["user"].format(facility.name, facility.id[:4]), + filename = default_storage.get_available_name( + CSV_EXPORT_FILENAMES["user"].format(facility.name, facility.id[:4]) ) else: - filepath = os.path.join(os.getcwd(), options["output_file"]) - return filepath + filename = options["output_file"] + return filename def handle_async(self, *args, **options): # set language for the translation of the messages @@ -268,14 +260,14 @@ def handle_async(self, *args, **options): self.overall_error = [] facility = self.get_facility(options) - filepath = self.get_filepath(options, facility) + filename = self.get_filename(options, facility) job = get_current_job() total_rows = FacilityUser.objects.filter(facility=facility).count() with self.start_progress(total=total_rows) as progress_update: try: for row in csv_file_generator( - facility, filepath, overwrite=options["overwrite"] + facility, filename, overwrite=options["overwrite"] ): progress_update(1) except (ValueError, IOError) as e: @@ -288,11 +280,11 @@ def handle_async(self, *args, **options): if job: job.extra_metadata["overall_error"] = self.overall_error job.extra_metadata["users"] = total_rows - job.extra_metadata["filename"] = ntpath.basename(filepath) + job.extra_metadata["filename"] = filename job.save_meta() else: logger.info( - "Created csv file {} with {} lines".format(filepath, total_rows) + "Created csv file {} with {} lines".format(filename, total_rows) ) translation.deactivate() diff --git a/kolibri/core/auth/management/commands/bulkimportusers.py b/kolibri/core/auth/management/commands/bulkimportusers.py index 5f6fafddff6..9509c9939c8 100644 --- a/kolibri/core/auth/management/commands/bulkimportusers.py +++ b/kolibri/core/auth/management/commands/bulkimportusers.py @@ -483,8 +483,7 @@ def append_error(self, msg): self.overall_error.append(str(msg)) def csv_headers_validation(self, filepath): - csv_file = open_csv_for_reading(filepath) - with csv_file as f: + with open_csv_for_reading(filepath) as f: header = next(csv.reader(f, strict=True)) has_header = False self.header_translation = { @@ -882,8 +881,7 @@ def handle_async(self, *args, **options): self.exit_if_error() self.progress_update(1) # state=csv_headers try: - csv_file = open_csv_for_reading(filepath) - with csv_file as f: + with open_csv_for_reading(filepath) as f: reader = csv.DictReader(f, strict=True) per_line_errors, classes, users, roles = self.csv_values_validation( reader, self.header_translation, self.default_facility diff --git a/kolibri/core/auth/management/commands/exportusers.py b/kolibri/core/auth/management/commands/exportusers.py index f65d065b084..57c3228080d 100644 --- a/kolibri/core/auth/management/commands/exportusers.py +++ b/kolibri/core/auth/management/commands/exportusers.py @@ -1,5 +1,4 @@ import logging -import os import sys from django.core.management.base import CommandError @@ -57,15 +56,13 @@ def handle_async(self, *args, **options): else: filename = options["output_file"] - filepath = os.path.join(os.getcwd(), filename) - total_rows = FacilityUser.objects.filter(facility=facility).count() with self.start_progress(total=total_rows) as progress_update: try: for row in csv_file_generator( facility, - filepath, + filename, overwrite=options["overwrite"], demographic=options["demographic"], ): diff --git a/kolibri/core/auth/management/commands/importusers.py b/kolibri/core/auth/management/commands/importusers.py index 747007e16aa..7df55ab1dec 100644 --- a/kolibri/core/auth/management/commands/importusers.py +++ b/kolibri/core/auth/management/commands/importusers.py @@ -197,8 +197,7 @@ def handle(self, *args, **options): fieldnames = input_fields + tuple(val for val in labels.values()) - csv_file = open_csv_for_reading(options["filepath"]) - with csv_file as f: + with open_csv_for_reading(options["filepath"]) as f: header = next(csv.reader(f, strict=True)) has_header = False if all(col in fieldnames for col in header): @@ -213,8 +212,7 @@ def handle(self, *args, **options): "Mix of valid and invalid header labels found in first row" ) - csv_file = open_csv_for_reading(options["filepath"]) - with csv_file as f: + with open_csv_for_reading(options["filepath"]) as f: if has_header: reader = csv.DictReader(f, strict=True) else: diff --git a/kolibri/core/auth/test/test_bulk_export.py b/kolibri/core/auth/test/test_bulk_export.py index ec13fd2d88b..bb8d0699158 100644 --- a/kolibri/core/auth/test/test_bulk_export.py +++ b/kolibri/core/auth/test/test_bulk_export.py @@ -1,5 +1,4 @@ import csv -import tempfile from django.test import override_settings from django.test import TestCase @@ -34,11 +33,10 @@ def setUpTestData(cls): classroom_count=CLASSROOMS, learnergroup_count=1 ) cls.facility = cls.data["facility"] - - _, cls.filepath = tempfile.mkstemp(suffix=".csv") + cls.filename = "temp.csv" cls.csv_rows = [] - for row in cls.b.csv_file_generator(cls.facility, cls.filepath, True): + for row in cls.b.csv_file_generator(cls.facility, cls.filename, True): cls.csv_rows.append(row) def test_not_specified(self): @@ -130,8 +128,7 @@ def test_passwords_as_asterisks(self): assert row["password"] == "*" def get_data_from_csv_file(self): - csv_file = open_csv_for_reading(self.filepath) - with csv_file as f: + with open_csv_for_reading(self.filename) as f: results = [row for row in csv.DictReader(f)] return results diff --git a/kolibri/core/auth/test/test_bulk_import.py b/kolibri/core/auth/test/test_bulk_import.py index a00b76d9c69..1ffcdd0eecb 100644 --- a/kolibri/core/auth/test/test_bulk_import.py +++ b/kolibri/core/auth/test/test_bulk_import.py @@ -1,5 +1,4 @@ import csv -import tempfile from io import StringIO from uuid import uuid4 @@ -104,10 +103,11 @@ def setUp(self): ) self.facility = self.data["facility"] - _, self.filepath = tempfile.mkstemp(suffix=".csv") + self.filename = "temp.csv" + call_command( "bulkexportusers", - output_file=self.filepath, + output_file=self.filename, overwrite=True, facility=self.facility.id, ) @@ -115,12 +115,10 @@ def setUp(self): FacilityUser.objects.all().delete() Classroom.objects.all().delete() - def create_csv(self, filepath, rows, remove_uuid=False): + def create_csv(self, filename, rows, remove_uuid=False): header_labels = list(labels.values()) - csv_file = open_csv_for_writing(filepath) - - with csv_file as f: + with open_csv_for_writing(filename) as f: writer = csv.writer(f) writer.writerow(header_labels) for item in rows: @@ -131,33 +129,32 @@ def create_csv(self, filepath, rows, remove_uuid=False): def import_exported_csv(self): # Replace asterisk in passwords to be able to import it # Remove UUID so new users are created - _, new_filepath = tempfile.mkstemp(suffix=".csv") + new_filename = "import_exported_csv_" + self.filename rows = [] - with open_csv_for_reading(self.filepath) as source: + with open_csv_for_reading(self.filename) as source: reader = csv.reader(source, strict=True) for row in reader: row[0] = None if row[2] == "*": row[2] = "temp_password" rows.append(row) - self.create_csv(new_filepath, rows[1:]) # remove header - self.filepath = new_filepath + self.create_csv(new_filename, rows[1:]) # remove header # import exported csv - call_command("bulkimportusers", self.filepath, facility=self.facility.id) + call_command("bulkimportusers", new_filename, facility=self.facility.id) current_classes = Classroom.objects.filter(parent_id=self.facility).all() for classroom in current_classes: assert len(classroom.get_members()) == CLASSROOMS assert len(classroom.get_coaches()) == 1 def test_dryrun_from_export_csv(self): - with open_csv_for_reading(self.filepath) as source: + with open_csv_for_reading(self.filename) as source: header = next(csv.reader(source, strict=True)) header_translation = { lbl.partition("(")[2].partition(")")[0]: lbl for lbl in header } cmd = b.Command() - with open_csv_for_reading(self.filepath) as source: + with open_csv_for_reading(self.filename) as source: reader = csv.DictReader(source, strict=True) per_line_errors, classes, users, roles = cmd.csv_values_validation( reader, header_translation, self.facility @@ -179,7 +176,7 @@ def test_dryrun_from_export_csv(self): assert assigned_classes["classroom1"] == ["classcoach1"] def test_password_is_required(self): - _, new_filepath = tempfile.mkstemp(suffix=".csv") + new_filename = "test_password_is_required_" + self.filename rows = [ [ None, @@ -218,16 +215,16 @@ def test_password_is_required(self): "new_class", ], ] - self.create_csv(new_filepath, rows) + self.create_csv(new_filename, rows) - with open_csv_for_reading(new_filepath) as source: + with open_csv_for_reading(new_filename) as source: header = next(csv.reader(source, strict=True)) header_translation = { lbl.partition("(")[2].partition(")")[0]: lbl for lbl in header } cmd = b.Command() - with open_csv_for_reading(new_filepath) as source: + with open_csv_for_reading(new_filename) as source: reader = csv.DictReader(source, strict=True) per_line_errors, classes, users, roles = cmd.csv_values_validation( reader, header_translation, self.facility @@ -240,7 +237,7 @@ def test_password_is_required(self): out_log = StringIO() call_command( "bulkimportusers", - new_filepath, + new_filename, facility=self.facility.id, errorlines=out_log, ) @@ -251,7 +248,7 @@ def test_password_is_required(self): assert "'row': 2" in result[1] def test_case_insensitive_usernames(self): - _, first_filepath = tempfile.mkstemp(suffix=".csv") + first_filename = "case_insensitive_usernames_" + self.filename rows = [ [ None, @@ -278,8 +275,8 @@ def test_case_insensitive_usernames(self): "new_class", ], ] - self.create_csv(first_filepath, rows) - call_command("bulkimportusers", first_filepath, facility=self.facility.id) + self.create_csv(first_filename, rows) + call_command("bulkimportusers", first_filename, facility=self.facility.id) # Retrieve the user(s) users = FacilityUser.objects.filter(username__iexact="peter") @@ -288,7 +285,7 @@ def test_case_insensitive_usernames(self): assert users.count() == 1 def test_username_already_exists(self): - _, first_filepath = tempfile.mkstemp(suffix=".csv") + first_filename = "username_exists_1_" + self.filename rows = [ [ None, @@ -303,9 +300,9 @@ def test_username_already_exists(self): None, ], ] - self.create_csv(first_filepath, rows) + self.create_csv(first_filename, rows) - call_command("bulkimportusers", first_filepath, facility=self.facility.id) + call_command("bulkimportusers", first_filename, facility=self.facility.id) # Get the initial count of users with the username "peter" initial_peter_count = FacilityUser.objects.filter(username="peter").count() @@ -315,7 +312,7 @@ def test_username_already_exists(self): assert initial_peter_count == 1 # Attempt to add another user with the same username "peter" - _, second_filepath = tempfile.mkstemp(suffix=".csv") + second_filename = "username_exists_2_" + self.filename rows = [ [ None, @@ -330,10 +327,10 @@ def test_username_already_exists(self): None, ], ] - self.create_csv(second_filepath, rows) + self.create_csv(second_filename, rows) # Check that the command raises an IntegrityError when trying to add a user with an existing username - call_command("bulkimportusers", second_filepath, facility=self.facility.id) + call_command("bulkimportusers", second_filename, facility=self.facility.id) # Check that the count of users with the username "peter" is still one assert FacilityUser.objects.filter(username="peter").count() == 1 @@ -343,7 +340,7 @@ def test_username_already_exists(self): assert passwd2 == passwd1 def test_username_already_exists_on_different_facility(self): - _, first_filepath = tempfile.mkstemp(suffix=".csv") + first_filename = "username_exists_first_" + self.filename rows = [ [ None, @@ -358,7 +355,7 @@ def test_username_already_exists_on_different_facility(self): None, ], ] - self.create_csv(first_filepath, rows) + self.create_csv(first_filename, rows) data = create_dummy_facility_data( classroom_count=CLASSROOMS, learnergroup_count=1 @@ -367,10 +364,10 @@ def test_username_already_exists_on_different_facility(self): facility2 = data["facility"] # First import this user into a different facility - call_command("bulkimportusers", first_filepath, facility=facility2.id) + call_command("bulkimportusers", first_filename, facility=facility2.id) # Then import into the main facility and confirm that it works! - call_command("bulkimportusers", first_filepath, facility=self.facility.id) + call_command("bulkimportusers", first_filename, facility=self.facility.id) # Assert that we have created a user like this in both facilities. assert FacilityUser.objects.filter( @@ -381,7 +378,7 @@ def test_username_already_exists_on_different_facility(self): ).exists() def test_asterisk_in_password(self): - _, first_filepath = tempfile.mkstemp(suffix=".csv") + first_filename = "asterisk_in_password_1_" + self.filename rows = [ [ None, @@ -408,8 +405,8 @@ def test_asterisk_in_password(self): "new_class", ], ] - self.create_csv(first_filepath, rows) - call_command("bulkimportusers", first_filepath, facility=self.facility.id) + self.create_csv(first_filename, rows) + call_command("bulkimportusers", first_filename, facility=self.facility.id) user1 = FacilityUser.objects.get(username="new_learner") passwd1 = user1.password uid1 = user1.id @@ -418,7 +415,7 @@ def test_asterisk_in_password(self): uid2 = user2.id # let's edit the users with a new import - _, second_filepath = tempfile.mkstemp(suffix=".csv") + second_filename = "asterisk_in_password_2_" + self.filename rows = [ [ uid1, @@ -445,8 +442,8 @@ def test_asterisk_in_password(self): "new_class", ], ] - self.create_csv(second_filepath, rows) - call_command("bulkimportusers", second_filepath, facility=self.facility.id) + self.create_csv(second_filename, rows) + call_command("bulkimportusers", second_filename, facility=self.facility.id) assert passwd1 != FacilityUser.objects.get(username="new_learner").password # When updating, an asterisk should keep the previous password: assert passwd2 == FacilityUser.objects.get(username="new_coach").password @@ -455,7 +452,7 @@ def test_delete_users_and_classes(self): self.import_exported_csv() # new csv to import and clear classes and delete non-admin users: - _, new_filepath = tempfile.mkstemp(suffix=".csv") + new_filename = "delete_users_and_classes_" + self.filename rows = [ [ None, @@ -482,9 +479,9 @@ def test_delete_users_and_classes(self): "new_class", ], ] - self.create_csv(new_filepath, rows) + self.create_csv(new_filename, rows) call_command( - "bulkimportusers", new_filepath, "--delete", facility=self.facility.id + "bulkimportusers", new_filename, "--delete", facility=self.facility.id ) # Previous users have been deleted, excepting the existing admin: @@ -520,7 +517,7 @@ def test_add_users_and_classes(self): self.import_exported_csv() old_users = FacilityUser.objects.count() # new csv to import and update classes, adding users and keeping previous not been in the csv: - _, new_filepath = tempfile.mkstemp(suffix=".csv") + new_filename = "add_users_and_classes_" + self.filename rows = [ [ None, @@ -546,8 +543,8 @@ def test_add_users_and_classes(self): "classroom0", ], ] - self.create_csv(new_filepath, rows) - call_command("bulkimportusers", new_filepath, facility=self.facility.id) + self.create_csv(new_filename, rows) + call_command("bulkimportusers", new_filename, facility=self.facility.id) assert FacilityUser.objects.count() == old_users + 2 current_classes = Classroom.objects.filter(parent_id=self.facility).all() for classroom in current_classes: @@ -568,7 +565,7 @@ def test_add_users_and_classes(self): assert new_coach.gender == demographics.MALE def test_classes_names_case_insensitive(self): - _, new_filepath = tempfile.mkstemp(suffix=".csv") + new_filename = "class_names_case_insensitive_" + self.filename # first inside the same csv file rows = [ [ @@ -595,8 +592,8 @@ def test_classes_names_case_insensitive(self): "My other class, AnotheR ClasS", ], ] - self.create_csv(new_filepath, rows) - call_command("bulkimportusers", new_filepath, facility=self.facility.id) + self.create_csv(new_filename, rows) + call_command("bulkimportusers", new_filename, facility=self.facility.id) classrooms = Classroom.objects.all() assert len(classrooms) == 3 @@ -615,28 +612,29 @@ def test_classes_names_case_insensitive(self): "Another CLASS ", ] ] - self.create_csv(new_filepath, rows) - call_command("bulkimportusers", new_filepath, facility=self.facility.id) + self.create_csv("new_" + new_filename, rows) + new_new_filename = "new_" + new_filename + call_command("bulkimportusers", new_new_filename, facility=self.facility.id) classrooms = Classroom.objects.all() assert len(classrooms) == 4 def test_non_existing_uuid(self): self.import_exported_csv() - _, new_filepath = tempfile.mkstemp(suffix=".csv") + new_filename = "new_" + self.filename rows = [] - with open_csv_for_reading(self.filepath) as source: + with open_csv_for_reading(self.filename) as source: reader = csv.reader(source, strict=True) for row in reader: row[0] = uuid4() row[2] = "*" rows.append(row) - self.create_csv(new_filepath, rows[1:]) # remove header + self.create_csv(new_filename, rows[1:]) # remove header number_of_rows = len(rows) - 1 # exclude header # import exported csv out_log = StringIO() call_command( "bulkimportusers", - new_filepath, + new_filename, facility=self.facility.id, errorlines=out_log, ) diff --git a/kolibri/core/auth/test/test_user_export.py b/kolibri/core/auth/test/test_user_export.py index 9195d83e1e4..cbe601da968 100644 --- a/kolibri/core/auth/test/test_user_export.py +++ b/kolibri/core/auth/test/test_user_export.py @@ -3,7 +3,6 @@ Also tests whether the users with permissions can create logs. """ import csv -import tempfile from django.core.management import call_command from django.test import TestCase @@ -57,12 +56,12 @@ def test_csv_export_with_demographics(self): for user in users: FacilityUser.objects.create(facility=facility, **user) expected_count = FacilityUser.objects.count() - _, filepath = tempfile.mkstemp(suffix=".csv") + filename = "csv_export_with_demogs.csv" call_command( - "exportusers", output_file=filepath, overwrite=True, demographic=True + "exportusers", output_file=filename, overwrite=True, demographic=True ) - csv_file = open_csv_for_reading(filepath) - with csv_file as f: + + with open_csv_for_reading(filename) as f: results = [row for row in csv.DictReader(f)] for row in results: @@ -91,12 +90,12 @@ def test_csv_export_no_demographics(self): for user in users: FacilityUser.objects.create(facility=facility, **user) expected_count = FacilityUser.objects.count() - _, filepath = tempfile.mkstemp(suffix=".csv") + filename = "csv_no_demogs.csv" call_command( - "exportusers", output_file=filepath, overwrite=True, demographic=False + "exportusers", output_file=filename, overwrite=True, demographic=False ) - csv_file = open_csv_for_reading(filepath) - with csv_file as f: + + with open_csv_for_reading(filename) as f: results = [row for row in csv.DictReader(f)] self.assertEqual(len(results), expected_count) @@ -112,12 +111,11 @@ def test_csv_export_user_in_multiple_classes(self): user_obj = FacilityUser.objects.create(facility=facility, **user) classroom1.add_member(user_obj) classroom2.add_member(user_obj) - _, filepath = tempfile.mkstemp(suffix=".csv") + filename = "csv_no_user_multiple_classes.csv" call_command( - "exportusers", output_file=filepath, overwrite=True, demographic=True + "exportusers", output_file=filename, overwrite=True, demographic=True ) - csv_file = open_csv_for_reading(filepath) - with csv_file as f: + with open_csv_for_reading(filename) as f: results = [row for row in csv.DictReader(f)] self.assertEqual(len(results), 2) @@ -130,12 +128,11 @@ def test_csv_export_user_in_one_class_one_group(self): user_obj = FacilityUser.objects.create(facility=facility, **user) classroom.add_member(user_obj) group.add_member(user_obj) - _, filepath = tempfile.mkstemp(suffix=".csv") + filename = "csv_no_1class1group.csv" call_command( - "exportusers", output_file=filepath, overwrite=True, demographic=True + "exportusers", output_file=filename, overwrite=True, demographic=True ) - csv_file = open_csv_for_reading(filepath) - with csv_file as f: + with open_csv_for_reading(filename) as f: results = [row for row in csv.DictReader(f)] self.assertEqual(len(results), 2) diff --git a/kolibri/core/auth/test/test_user_import.py b/kolibri/core/auth/test/test_user_import.py index 5eb5d8973d5..deb46728a07 100644 --- a/kolibri/core/auth/test/test_user_import.py +++ b/kolibri/core/auth/test/test_user_import.py @@ -1,7 +1,6 @@ import csv -import os -import tempfile +from django.core.files.storage import default_storage from django.core.management import call_command from django.core.management.base import CommandError from django.test import TestCase @@ -137,11 +136,8 @@ def test_create_user_not_exist_bad_username(self): class DeviceNotSetup(TestCase): def test_device_not_setup(self): - csvfile, csvpath = tempfile.mkstemp(suffix="csv") with self.assertRaisesRegex(CommandError, "No default facility exists"): - call_command("importusers", csvpath) - os.close(csvfile) - os.remove(csvpath) + call_command("importusers", "you_cannot_import_yet.csv") class UserImportCommandTestCase(TestCase): @@ -157,34 +153,32 @@ def setUpClass(self): self.facility, self.superuser = setup_device() def setUp(self): - self.csvfile, self.csvpath = tempfile.mkstemp(suffix="csv") + self.csvfilename = default_storage.get_available_name("test.csv") def tearDown(self): FacilityUser.objects.exclude(username=self.superuser.username).delete() - os.close(self.csvfile) - os.remove(self.csvpath) + default_storage.delete(self.csvfilename) def importFromRows(self, *args): - csv_file = open_csv_for_writing(self.csvpath) - with csv_file as f: + with open_csv_for_writing(self.csvfilename) as f: writer = csv.writer(f) writer.writerows([a for a in args]) - call_command("importusers", self.csvpath) + call_command("importusers", self.csvfilename) def test_setup_headers_no_username(self): with self.assertRaisesRegex(CommandError, "No usernames specified"): self.importFromRows(["class", "facility"]) - call_command("importusers", self.csvpath) + call_command("importusers", self.csvfilename) def test_setup_headers_invalid_header(self): with self.assertRaisesRegex(CommandError, "Mix of valid and invalid header"): self.importFromRows(["class", "facility", "dogfood"]) - call_command("importusers", self.csvpath) + call_command("importusers", self.csvfilename) def test_setup_headers_make_user(self): self.importFromRows(["username"], ["testuser"]) - call_command("importusers", self.csvpath) + call_command("importusers", self.csvfilename) self.assertTrue(FacilityUser.objects.filter(username="testuser").exists()) def test_setup_no_headers_make_user(self): @@ -271,10 +265,13 @@ def test_import_from_export_csv(self): for user in users: FacilityUser.objects.create(facility=self.facility, **user) call_command( - "exportusers", output_file=self.csvpath, overwrite=True, demographic=True + "exportusers", + output_file=self.csvfilename, + overwrite=True, + demographic=True, ) FacilityUser.objects.all().delete() - call_command("importusers", self.csvpath) + call_command("importusers", self.csvfilename) for user in users: user_model = FacilityUser.objects.get(username=user["username"]) self.assertEqual(user_model.gender, user["gender"]) @@ -284,16 +281,18 @@ def test_import_from_export_csv(self): def test_import_from_export_missing_headers(self): for user in users: FacilityUser.objects.create(facility=self.facility, **user) + call_command( - "exportusers", output_file=self.csvpath, overwrite=True, demographic=True + "exportusers", + output_file=self.csvfilename, + overwrite=True, + demographic=True, ) cols_to_remove = ["Facility id", "Gender"] - csv_file = open_csv_for_reading(self.csvpath) - with csv_file as source: + with open_csv_for_reading(self.csvfilename) as source: reader = csv.DictReader(source) rows = [row for row in reader] - csv_file = open_csv_for_writing(self.csvpath) - with csv_file as result: + with open_csv_for_writing("new" + self.csvfilename) as result: writer = csv.DictWriter( result, tuple( @@ -305,8 +304,9 @@ def test_import_from_export_missing_headers(self): for col in cols_to_remove: del row[col] writer.writerow(row) + FacilityUser.objects.all().delete() - call_command("importusers", self.csvpath) + call_command("importusers", "new" + self.csvfilename) for user in users: user_model = FacilityUser.objects.get(username=user["username"]) self.assertEqual(user_model.birth_year, user["birth_year"]) @@ -316,15 +316,16 @@ def test_import_from_export_mixed_headers(self): for user in users: FacilityUser.objects.create(facility=self.facility, **user) call_command( - "exportusers", output_file=self.csvpath, overwrite=True, demographic=True + "exportusers", + output_file=self.csvfilename, + overwrite=True, + demographic=True, ) cols_to_replace = {"Facility id": "facility", "Gender": "gender"} - csv_file = open_csv_for_reading(self.csvpath) - with csv_file as source: + with open_csv_for_reading(self.csvfilename) as source: reader = csv.DictReader(source) rows = [row for row in reader] - csv_file = open_csv_for_writing(self.csvpath) - with csv_file as result: + with open_csv_for_writing("new" + self.csvfilename) as result: writer = csv.DictWriter( result, tuple( @@ -339,7 +340,7 @@ def test_import_from_export_mixed_headers(self): del row[col] writer.writerow(row) FacilityUser.objects.all().delete() - call_command("importusers", self.csvpath) + call_command("importusers", "new" + self.csvfilename) for user in users: user_model = FacilityUser.objects.get(username=user["username"]) self.assertEqual(user_model.birth_year, user["birth_year"]) diff --git a/kolibri/core/logger/csv_export.py b/kolibri/core/logger/csv_export.py index 6ef35cc24f7..d875f18c276 100644 --- a/kolibri/core/logger/csv_export.py +++ b/kolibri/core/logger/csv_export.py @@ -2,11 +2,11 @@ import datetime import logging import math -import os from collections import OrderedDict from dateutil import parser from django.core.cache import cache +from django.core.files.storage import default_storage from django.db.models import F from django.db.models import Max from django.db.models import OuterRef @@ -256,7 +256,7 @@ def csv_file_generator( else parser.parse(end_date) + datetime.timedelta(days=1) ) - if not overwrite and os.path.exists(filepath): + if not overwrite and default_storage.exists(filepath): raise ValueError("{} already exists".format(filepath)) queryset = log_info["queryset"].filter( dataset_id=facility.dataset_id, @@ -285,9 +285,7 @@ def csv_file_generator( label for _, label in topic_headers ] - csv_file = open_csv_for_writing(filepath) - - with csv_file as f: + with open_csv_for_writing(filepath) as f: writer = csv.DictWriter(f, header_labels) logger.info("Creating csv file {filename}".format(filename=filepath)) writer.writeheader() diff --git a/kolibri/core/logger/tasks.py b/kolibri/core/logger/tasks.py index 4220c3520c5..b98018d0fe0 100644 --- a/kolibri/core/logger/tasks.py +++ b/kolibri/core/logger/tasks.py @@ -1,5 +1,6 @@ import os +from django.core.files.storage import default_storage from django.core.management import call_command from rest_framework import serializers @@ -12,18 +13,13 @@ from kolibri.core.tasks.decorators import register_task from kolibri.core.tasks.permissions import IsAdminForJob from kolibri.core.tasks.validation import JobValidator -from kolibri.utils import conf LOGS_CLEANUP_JOB_ID = "18" def get_filepath(log_type, facility_id, start_date, end_date): facility = Facility.objects.get(id=facility_id) - logs_dir = os.path.join(conf.KOLIBRI_HOME, "log_export") - if not os.path.isdir(logs_dir): - os.mkdir(logs_dir) - filepath = os.path.join( - logs_dir, + filepath = default_storage.path( CSV_EXPORT_FILENAMES[log_type].format( facility.name, facility.id[:4], start_date[:10], end_date[:10] ), @@ -177,10 +173,8 @@ def log_exports_cleanup(): Cleanup log_exports csv files that does not have related reocord in GenerateCSVLogRequest model """ - logs_dir = os.path.join(conf.KOLIBRI_HOME, "log_export") - if not os.path.isdir(logs_dir): - return valid_filenames_set = get_valid_filenames() - for filename in os.listdir(logs_dir): + _, files_in_storage = default_storage.listdir("") + for filename in files_in_storage: if filename not in valid_filenames_set: - os.remove(os.path.join(logs_dir, filename)) + default_storage.delete(filename) diff --git a/kolibri/core/logger/test/test_api.py b/kolibri/core/logger/test/test_api.py index e8c8ddc5bb7..5ae0968d942 100644 --- a/kolibri/core/logger/test/test_api.py +++ b/kolibri/core/logger/test/test_api.py @@ -6,11 +6,11 @@ import csv import datetime import os -import tempfile import uuid import mock import pytz +from django.core.files.storage import default_storage from django.core.management import call_command from django.urls import reverse from rest_framework.test import APITestCase @@ -33,7 +33,7 @@ from kolibri.core.logger.csv_export import labels from kolibri.core.logger.tasks import get_filepath from kolibri.core.logger.tasks import log_exports_cleanup -from kolibri.utils import conf +from kolibri.core.utils.csv import open_csv_for_reading from kolibri.utils.time_utils import local_now @@ -64,7 +64,7 @@ def setUpTestData(cls): def test_csv_download(self): expected_count = ContentSummaryLog.objects.count() - _, filepath = tempfile.mkstemp(suffix=".csv") + filepath = default_storage.path("{}.csv".format(uuid.uuid4())) call_command( "exportlogs", log_type="summary", @@ -73,7 +73,7 @@ def test_csv_download(self): start_date=self.start_date, end_date=self.end_date, ) - with open(filepath, "r", newline="") as f: + with open_csv_for_reading(filepath) as f: results = list(csv.reader(f)) for row in results[1:]: self.assertEqual(len(results[0]), len(row)) @@ -88,7 +88,7 @@ def test_csv_download_deleted_content(self): expected_count = ContentSummaryLog.objects.count() ContentNode.objects.all().delete() ChannelMetadata.objects.all().delete() - _, filepath = tempfile.mkstemp(suffix=".csv") + filepath = default_storage.path("{}.csv".format(uuid.uuid4())) call_command( "exportlogs", log_type="summary", @@ -97,7 +97,7 @@ def test_csv_download_deleted_content(self): start_date=self.start_date, end_date=self.end_date, ) - with open(filepath, "r", newline="") as f: + with open_csv_for_reading(filepath) as f: results = list(csv.reader(f)) for row in results[1:]: self.assertEqual(len(results[0]), len(row)) @@ -120,7 +120,7 @@ def test_csv_download_unicode_username(self): ) expected_count = ContentSummaryLog.objects.count() - _, filepath = tempfile.mkstemp(suffix=".csv") + filepath = default_storage.path("{}.csv".format(uuid.uuid4())) call_command( "exportlogs", log_type="summary", @@ -129,7 +129,7 @@ def test_csv_download_unicode_username(self): start_date=self.start_date, end_date=self.end_date, ) - with open(filepath, "r", newline="") as f: + with open_csv_for_reading(filepath) as f: results = list(csv.reader(f)) for row in results[1:]: self.assertEqual(len(results[0]), len(row)) @@ -178,11 +178,11 @@ def test_csv_cleanup(self, mock_enqueue): # latest should persist and the old one should be deleted log_exports_cleanup() - logs_dir = os.path.join(conf.KOLIBRI_HOME, "log_export") - # currently there are two file. logs export and users csv export - assert len(os.listdir(logs_dir)) == 2 - assert os.path.basename(filepath_2) in os.listdir(logs_dir) - assert os.path.basename(filepath) not in os.listdir(logs_dir) + _, files_uploaded = default_storage.listdir("") + + # logs export and users csv export + assert os.path.basename(filepath_2) in files_uploaded + assert os.path.basename(filepath) not in files_uploaded # make sure the csv file for the record saved in the database exists log_request = GenerateCSVLogRequest.objects.get(log_type=log_type) @@ -196,8 +196,8 @@ def test_csv_cleanup(self, mock_enqueue): expected_users_csv_file_path = USER_CSV_EXPORT_FILENAMES["user"].format( self.facility.name, self.facility.id[:4] ) - assert os.path.basename(expected_file_path) in os.listdir(logs_dir) - assert expected_users_csv_file_path in os.listdir(logs_dir) + assert os.path.basename(expected_file_path) in files_uploaded + assert expected_users_csv_file_path in files_uploaded assert mock_enqueue.has_calls(2) @@ -227,7 +227,7 @@ def setUpTestData(cls): def test_csv_download(self): expected_count = ContentSessionLog.objects.count() - _, filepath = tempfile.mkstemp(suffix=".csv") + filepath = default_storage.path("{}.csv".format(uuid.uuid4())) call_command( "exportlogs", log_type="session", @@ -236,7 +236,7 @@ def test_csv_download(self): start_date=self.start_date, end_date=self.end_date, ) - with open(filepath, "r", newline="") as f: + with open_csv_for_reading(filepath) as f: results = list(csv.reader(f)) for row in results[1:]: self.assertEqual(len(results[0]), len(row)) @@ -251,7 +251,7 @@ def test_csv_download_deleted_content(self): expected_count = ContentSessionLog.objects.count() ContentNode.objects.all().delete() ChannelMetadata.objects.all().delete() - _, filepath = tempfile.mkstemp(suffix=".csv") + filepath = default_storage.path("{}.csv".format(uuid.uuid4())) call_command( "exportlogs", log_type="session", @@ -260,7 +260,7 @@ def test_csv_download_deleted_content(self): start_date=self.start_date, end_date=self.end_date, ) - with open(filepath, "r", newline="") as f: + with open_csv_for_reading(filepath) as f: results = list(csv.reader(f)) for row in results[1:]: self.assertEqual(len(results[0]), len(row)) @@ -283,7 +283,7 @@ def test_csv_download_unicode_username(self): ) expected_count = ContentSessionLog.objects.count() - _, filepath = tempfile.mkstemp(suffix=".csv") + filepath = default_storage.path("{}.csv".format(uuid.uuid4())) call_command( "exportlogs", log_type="session", @@ -292,7 +292,7 @@ def test_csv_download_unicode_username(self): start_date=self.start_date, end_date=self.end_date, ) - with open(filepath, "r", newline="") as f: + with open_csv_for_reading(filepath) as f: results = list(csv.reader(f)) for row in results[1:]: self.assertEqual(len(results[0]), len(row)) @@ -304,7 +304,7 @@ def test_csv_download_unicode_username(self): ) def test_csv_download_no_completion_timestamp(self): - _, filepath = tempfile.mkstemp(suffix=".csv") + filepath = default_storage.path("{}.csv".format(uuid.uuid4())) call_command( "exportlogs", log_type="session", @@ -313,7 +313,7 @@ def test_csv_download_no_completion_timestamp(self): start_date=self.start_date, end_date=self.end_date, ) - with open(filepath, "r", newline="") as f: + with open_csv_for_reading(filepath) as f: results = list(csv.reader(f)) for column_label in results[0]: self.assertNotEqual(column_label, labels["completion_timestamp"]) @@ -361,11 +361,10 @@ def test_csv_cleanup(self, mock_enqueue): # latest csv should persist and the old one should be deleted log_exports_cleanup() - logs_dir = os.path.join(conf.KOLIBRI_HOME, "log_export") + _, files_uploaded = default_storage.listdir("") # currently there are two file. logs export and users csv export - assert len(os.listdir(logs_dir)) == 2 - assert os.path.basename(filepath_2) in os.listdir(logs_dir) - assert os.path.basename(filepath) not in os.listdir(logs_dir) + assert os.path.basename(filepath_2) in files_uploaded + assert os.path.basename(filepath) not in files_uploaded # make sure the csv file for the record saved in the database exists log_request = GenerateCSVLogRequest.objects.get(log_type=log_type) @@ -379,8 +378,8 @@ def test_csv_cleanup(self, mock_enqueue): expected_users_csv_file_path = USER_CSV_EXPORT_FILENAMES["user"].format( self.facility.name, self.facility.id[:4] ) - assert os.path.basename(expected_file_path) in os.listdir(logs_dir) - assert expected_users_csv_file_path in os.listdir(logs_dir) + assert os.path.basename(expected_file_path) in files_uploaded + assert expected_users_csv_file_path in files_uploaded assert mock_enqueue.has_calls(2) diff --git a/kolibri/core/utils/csv.py b/kolibri/core/utils/csv.py index 02d20c23a9c..dde98d8938d 100644 --- a/kolibri/core/utils/csv.py +++ b/kolibri/core/utils/csv.py @@ -1,14 +1,49 @@ import io import re +from contextlib import contextmanager from numbers import Number +from django.core.files.storage import default_storage -def open_csv_for_writing(filepath): - return io.open(filepath, "w", newline="", encoding="utf-8-sig") +@contextmanager +def open_csv_for_writing(filename): + if default_storage.exists(filename): + # If the file exists, we need to open it and return it wrapped in a TextIOWrapper + with default_storage.open(filename, "rb+") as f: + encoded_fh = io.TextIOWrapper( + f, newline="", encoding="utf-8-sig", write_through=True + ) + yield encoded_fh + encoded_fh.flush() + default_storage.save(filename, f) + else: + # If the file does not exist, we need to create it and return it wrapped in a TextIOWrapper + with io.BytesIO() as f: + encoded_fh = io.TextIOWrapper( + f, + newline="", + encoding="utf-8-sig", + write_through=True, + line_buffering=True, + ) + yield encoded_fh + encoded_fh.flush() + default_storage.save(filename, f) -def open_csv_for_reading(filepath): - return io.open(filepath, "r", newline="", encoding="utf-8-sig") + +@contextmanager +def open_csv_for_reading(filename): + with default_storage.open(filename, "rb") as f: + encoded_fh = io.TextIOWrapper( + f, + newline="", + encoding="utf-8-sig", + write_through=True, + line_buffering=True, + ) + yield encoded_fh + encoded_fh.flush() negative_number_regex = re.compile("^-?[0-9,\\.]+$") diff --git a/kolibri/deployment/default/settings/base.py b/kolibri/deployment/default/settings/base.py index 66c91932a09..9437b0dbcf5 100644 --- a/kolibri/deployment/default/settings/base.py +++ b/kolibri/deployment/default/settings/base.py @@ -190,6 +190,18 @@ DEFAULT_AUTO_FIELD = "django.db.models.AutoField" +# File Storage Backend +# https://docs.djangoproject.com/en/3.2/ref/files/storage/ + +if not os.environ.get("DEFAULT_FILE_STORAGE"): + if conf.OPTIONS["FileStorage"]["STORAGE_BACKEND"] == "gcs": + # Options per https://django-storages.readthedocs.io/en/latest/backends/gcloud.html + DEFAULT_FILE_STORAGE = "storages.backends.gcloud.GoogleCloudStorage" + GS_BUCKET_NAME = conf.OPTIONS["FileStorage"]["GS_BUCKET_NAME"] + GS_PROJECT_ID = conf.OPTIONS["FileStorage"]["GS_PROJECT_ID"] + GS_CREDENTIALS = conf.OPTIONS["FileStorage"]["GS_CREDENTIALS"] + GS_DEFAULT_ACL = conf.OPTIONS["FileStorage"]["GS_DEFAULT_ACL"] + # Internationalization # https://docs.djangoproject.com/en/3.2/topics/i18n/ diff --git a/kolibri/plugins/facility/test/test_api.py b/kolibri/plugins/facility/test/test_api.py index 007183be222..862fce549aa 100644 --- a/kolibri/plugins/facility/test/test_api.py +++ b/kolibri/plugins/facility/test/test_api.py @@ -4,11 +4,11 @@ Also tests whether the users with permissions can create logs. """ import datetime -import os import uuid import mock import pytz +from django.core.files.storage import default_storage from django.core.management import call_command from django.urls import reverse from rest_framework.test import APITestCase @@ -21,29 +21,22 @@ from kolibri.core.logger.test.factory_logger import ContentSummaryLogFactory from kolibri.core.logger.test.factory_logger import FacilityUserFactory from kolibri.plugins.facility.views import CSV_EXPORT_FILENAMES -from kolibri.utils import conf from kolibri.utils.time_utils import utc_now def output_filename(log_type, facility, **kwargs): - logs_dir = os.path.join(conf.KOLIBRI_HOME, "log_export") - if not os.path.isdir(logs_dir): - os.mkdir(logs_dir) if log_type in ("summary", "session"): start_date = kwargs.get("start_date") end_date = kwargs.get("end_date") - log_path = os.path.join( - logs_dir, + return default_storage.path( CSV_EXPORT_FILENAMES[log_type].format( facility.name, facility.id[:4], start_date[:10], end_date[:10] - ), + ) ) else: - log_path = os.path.join( - logs_dir, - CSV_EXPORT_FILENAMES[log_type].format(facility.name, facility.id[:4]), + return default_storage.path( + CSV_EXPORT_FILENAMES[log_type].format(facility.name, facility.id[:4]) ) - return log_path class ContentSummaryLogCSVExportTestCase(APITestCase): diff --git a/kolibri/plugins/facility/views.py b/kolibri/plugins/facility/views.py index 8960ec75284..7ae92f53a27 100644 --- a/kolibri/plugins/facility/views.py +++ b/kolibri/plugins/facility/views.py @@ -1,9 +1,8 @@ -import io import json -import os from datetime import datetime as dt from django.core.exceptions import PermissionDenied +from django.core.files.storage import default_storage from django.http import Http404 from django.http import HttpResponse from django.http.response import FileResponse @@ -27,7 +26,6 @@ ) from kolibri.core.logger.models import ContentSessionLog from kolibri.core.logger.models import GenerateCSVLogRequest -from kolibri.utils import conf CSV_EXPORT_FILENAMES = {} @@ -97,7 +95,6 @@ def exported_csv_info(request, facility_id): """ facility = _get_facility_check_permissions(request, facility_id) - logs_dir = os.path.join(conf.KOLIBRI_HOME, "log_export") csv_statuses = {} for log_type in CSV_EXPORT_FILENAMES: @@ -109,19 +106,16 @@ def exported_csv_info(request, facility_id): else: start = "" end = "" - log_path = os.path.join( - logs_dir, - CSV_EXPORT_FILENAMES[log_type].format( + filename = CSV_EXPORT_FILENAMES[log_type].format( facility.name, facility.id[:4], start[:10], end[:10] - ), - ) + ) else: - log_path = os.path.join( - logs_dir, - CSV_EXPORT_FILENAMES[log_type].format(facility.name, facility.id[:4]), + filename = CSV_EXPORT_FILENAMES[log_type].format( + facility.name, facility.id[:4] ) - if os.path.exists(log_path): - csv_statuses[log_type] = os.path.getmtime(log_path) + + if default_storage.exists(filename): + csv_statuses[log_type] = default_storage.get_modified_time(filename) else: csv_statuses[log_type] = None @@ -189,32 +183,26 @@ def download_csv_file(request, csv_type, facility_id): if csv_type in CSV_EXPORT_FILENAMES.keys(): if csv_type == "user": - filepath = os.path.join( - conf.KOLIBRI_HOME, - "log_export", - CSV_EXPORT_FILENAMES[csv_type].format(facility.name, facility.id[:4]), + filename = CSV_EXPORT_FILENAMES[csv_type].format( + facility.name, facility.id[:4] ) else: log_request = _get_log_request(csv_type, facility_id) if log_request: start = log_request.selected_start_date.isoformat() end = log_request.selected_end_date.isoformat() - filepath = os.path.join( - conf.KOLIBRI_HOME, - "log_export", - CSV_EXPORT_FILENAMES[csv_type].format( + filename = CSV_EXPORT_FILENAMES[csv_type].format( facility.name, facility.id[:4], start[:10], end[:10] - ), - ) + ) else: - filepath = None + filename = None # if the file does not exist on disk, return a 404 - if filepath is None or not os.path.exists(filepath): + if filename is None or not default_storage.exists(filename): raise Http404("There is no csv export file for {} available".format(csv_type)) # generate a file response - response = FileResponse(io.open(filepath, "rb")) + response = FileResponse(default_storage.open(filename, "rb")) # set the content-type by guessing from the filename response.headers["Content-Type"] = "text/csv" @@ -234,6 +222,6 @@ def download_csv_file(request, csv_type, facility_id): translation.deactivate() # set the content-length to the file size - response.headers["Content-Length"] = os.path.getsize(filepath) + response.headers["Content-Length"] = default_storage.size(filename) return response diff --git a/kolibri/utils/options.py b/kolibri/utils/options.py index 7f2ea3327c3..ce94d91be2b 100644 --- a/kolibri/utils/options.py +++ b/kolibri/utils/options.py @@ -271,6 +271,26 @@ def multiprocess_bool(value): return False +def storage_option(value, *opts): + """ + Validate the storage options. + Check that the given option is valid, then check that needed external + libraries are available where relevant. + """ + value = is_option(value, *opts) + if value == "gcs": + try: + from storages.backends.gcloud import GoogleCloudStorage # noqa + + return value + except ModuleNotFoundError: + logger.error( + "Google Cloud Storage backend is not available.", + "Are storage requirements installed?", + ) + raise VdtValueError(value) + + def cache_option(value): """ Validate the cache options. @@ -385,6 +405,45 @@ def csp_source_list(value): base_option_spec = { + "FileStorage": { + "STORAGE_BACKEND": { + "type": "storage_option", + "options": ("file_system", "gcs"), + "default": "file_system", + "description": """ + The storage backend class that Django will use when managing files. The class given here must implement + the django.files.storage.Storage class. + """, + }, + "GS_BUCKET_NAME": { + "type": "string", + "default": "", + "description": """ + The name of the Google Cloud Storage bucket that will be used to store content files. + """, + }, + "GS_PROJECT_ID": { + "type": "string", + "default": "", + "description": """ + The Google Cloud project ID that the bucket is associated with. + """, + }, + "GS_CREDENTIALS": { + "type": "path", + "default": "", + "description": """ + The path to the Google Cloud Storage credentials file that will be used to authenticate with the bucket. + """, + }, + "GS_DEFAULT_ACL": { + "type": "string", + "default": "publicRead", + "description": """ + The default access control list (ACL) to apply to new objects in the bucket. + """, + }, + }, "Cache": { "CACHE_BACKEND": { "type": "cache_option", @@ -779,6 +838,7 @@ def _get_validator(): "url_prefix": url_prefix, "bytes": validate_bytes, "multiprocess_bool": multiprocess_bool, + "storage_option": storage_option, "cache_option": cache_option, "lazy_import_callback_list": lazy_import_callback_list, "csp_source_list": csp_source_list, diff --git a/requirements/storages.txt b/requirements/storages.txt new file mode 100644 index 00000000000..a20ce2fe0c2 --- /dev/null +++ b/requirements/storages.txt @@ -0,0 +1,2 @@ +# Additional reqs for running kolibri with GCS file storage backend +django-storages[google]==1.14.2