Skip to content

Commit

Permalink
Merge branch 'develop' of https://github.com/openvinotoolkit/cvat int…
Browse files Browse the repository at this point in the history
…o dkru/cypress-case-115-ellipse-actions
  • Loading branch information
dvkruchinin committed Jan 20, 2022
2 parents 1fc25fb + 129584b commit a9ab555
Show file tree
Hide file tree
Showing 10 changed files with 253 additions and 57 deletions.
4 changes: 3 additions & 1 deletion CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -57,12 +57,14 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
- Uncaught TypeError: this.el.node.getScreenCTM() is null in Firefox (<https://github.com/openvinotoolkit/cvat/pull/4175>)
- Bug: canvas is busy when start playing, start resizing a shape and do not release the mouse cursor (<https://github.com/openvinotoolkit/cvat/pull/4151>)
- Bug: could not receive frame N. TypeError: Cannot read properties of undefined (reding "filename") (<https://github.com/openvinotoolkit/cvat/pull/4187>)
- Cannot choose a dataset format for a linked repository if a task type is annotation (<https://github.com/openvinotoolkit/cvat/pull/4203>)
- Fixed tus upload error over https (<https://github.com/openvinotoolkit/cvat/pull/4154>)
- Issues disappear when rescale a browser (<https://github.com/openvinotoolkit/cvat/pull/4189>)
- Auth token key is not returned when registering without email verification (<https://github.com/openvinotoolkit/cvat/pull/4092>)
- Error in create project from backup for standard 3D annotation (<https://github.com/openvinotoolkit/cvat/pull/4160>)

### Security
- Updated ELK to 6.8.22 which uses log4j 2.17.0 (<https://github.com/openvinotoolkit/cvat/pull/4052>)
- Updated ELK to 6.8.23 which uses log4j 2.17.1 (<https://github.com/openvinotoolkit/cvat/pull/4206>)

## \[1.7.0] - 2021-11-15

Expand Down
6 changes: 3 additions & 3 deletions components/analytics/docker-compose.analytics.yml
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ services:
build:
context: ./components/analytics/elasticsearch
args:
ELK_VERSION: 6.8.22
ELK_VERSION: 6.8.23
volumes:
- cvat_events:/usr/share/elasticsearch/data
restart: always
Expand All @@ -21,7 +21,7 @@ services:
build:
context: ./components/analytics/kibana
args:
ELK_VERSION: 6.8.22
ELK_VERSION: 6.8.23
depends_on: ['elasticsearch']
restart: always

Expand Down Expand Up @@ -62,7 +62,7 @@ services:
build:
context: ./components/analytics/logstash
args:
ELK_VERSION: 6.8.22
ELK_VERSION: 6.8.23
http_proxy: ${http_proxy}
https_proxy: ${https_proxy}
environment:
Expand Down
9 changes: 5 additions & 4 deletions cvat/apps/dataset_manager/formats/registry.py
Original file line number Diff line number Diff line change
Expand Up @@ -57,10 +57,11 @@ def __call__(self, *args, **kwargs):

def format_for(export_format, mode):
format_name = export_format
if mode == "annotation":
format_name = "CVAT for images 1.1"
elif export_format not in EXPORT_FORMATS:
format_name = "CVAT for video 1.1"
if export_format not in EXPORT_FORMATS:
if mode == "annotation":
format_name = "CVAT for images 1.1"
else:
format_name = "CVAT for video 1.1"
return format_name


Expand Down
12 changes: 8 additions & 4 deletions cvat/apps/engine/media_extractors.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
# Copyright (C) 2019-2020 Intel Corporation
# Copyright (C) 2019-2022 Intel Corporation
#
# SPDX-License-Identifier: MIT

Expand Down Expand Up @@ -136,6 +136,9 @@ def __iter__(self):
for i in range(self._start, self._stop, self._step):
yield (self.get_image(i), self.get_path(i), i)

def __contains__(self, media_file):
return media_file in self._source_path

def filter(self, callback):
source_path = list(filter(callback, self._source_path))
ImageListReader.__init__(
Expand Down Expand Up @@ -172,14 +175,14 @@ def get_image_size(self, i):
img = Image.open(self._source_path[i])
return img.width, img.height

def reconcile(self, source_files, step=1, start=0, stop=None, dimension=DimensionType.DIM_2D):
def reconcile(self, source_files, step=1, start=0, stop=None, dimension=DimensionType.DIM_2D, sorting_method=None):
# FIXME
ImageListReader.__init__(self,
source_path=source_files,
step=step,
start=start,
stop=stop,
sorting_method=self._sorting_method,
sorting_method=sorting_method if sorting_method else self._sorting_method,
)
self._dimension = dimension

Expand Down Expand Up @@ -328,13 +331,14 @@ def get_path(self, i):
else: # necessary for mime_type definition
return self._source_path[i]

def reconcile(self, source_files, step=1, start=0, stop=None, dimension=DimensionType.DIM_2D):
def reconcile(self, source_files, step=1, start=0, stop=None, dimension=DimensionType.DIM_2D, sorting_method=None):
super().reconcile(
source_files=source_files,
step=step,
start=start,
stop=stop,
dimension=dimension,
sorting_method=sorting_method
)

def extract(self):
Expand Down
58 changes: 38 additions & 20 deletions cvat/apps/engine/task.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@

# Copyright (C) 2018-2021 Intel Corporation
# Copyright (C) 2018-2022 Intel Corporation
#
# SPDX-License-Identifier: MIT

Expand Down Expand Up @@ -326,22 +326,6 @@ def _create_thread(db_task, data, isBackupRestore=False, isDatasetImport=False):
db_data.start_frame = 0
data['stop_frame'] = None
db_data.frame_filter = ''
if isBackupRestore and media_type != 'video' and db_data.storage_method == models.StorageMethodChoice.CACHE:
# we should sort media_files according to the manifest content sequence
manifest = ImageManifestManager(db_data.get_manifest_path())
manifest.set_index()
sorted_media_files = []
for idx in range(len(media_files)):
properties = manifest[manifest_index(idx)]
image_name = properties.get('name', None)
image_extension = properties.get('extension', None)

full_image_path = f"{image_name}{image_extension}" if image_name and image_extension else None
if full_image_path and full_image_path in media_files:
sorted_media_files.append(full_image_path)
media_files = sorted_media_files.copy()
del sorted_media_files
data['sorting_method'] = models.SortingMethod.PREDEFINED
source_paths=[os.path.join(upload_dir, f) for f in media_files]
if manifest_file and not isBackupRestore and data['sorting_method'] in {models.SortingMethod.RANDOM, models.SortingMethod.PREDEFINED}:
raise Exception("It isn't supported to upload manifest file and use random sorting")
Expand All @@ -368,8 +352,8 @@ def _create_thread(db_task, data, isBackupRestore=False, isDatasetImport=False):
extractor.extract()

if db_data.storage == models.StorageChoice.LOCAL or \
(db_data.storage == models.StorageChoice.SHARE and \
isinstance(extractor, MEDIA_TYPES['zip']['extractor'])):
(db_data.storage == models.StorageChoice.SHARE and \
isinstance(extractor, MEDIA_TYPES['zip']['extractor'])):
validate_dimension.set_path(upload_dir)
validate_dimension.validate()

Expand All @@ -379,8 +363,15 @@ def _create_thread(db_task, data, isBackupRestore=False, isDatasetImport=False):
if validate_dimension.dimension == models.DimensionType.DIM_3D:
db_task.dimension = models.DimensionType.DIM_3D

keys_of_related_files = validate_dimension.related_files.keys()
absolute_keys_of_related_files = [os.path.join(upload_dir, f) for f in keys_of_related_files]
# When a task is created, the sorting method can be random and in this case, reinitialization will be with correct sorting
# but when a task is restored from a backup, a random sorting is changed to predefined and we need to manually sort files
# in the correct order.
source_files = absolute_keys_of_related_files if not isBackupRestore else \
[item for item in extractor.absolute_source_paths if item in absolute_keys_of_related_files]
extractor.reconcile(
source_files=[os.path.join(upload_dir, f) for f in validate_dimension.related_files.keys()],
source_files=source_files,
step=db_data.get_frame_step(),
start=db_data.start_frame,
stop=data['stop_frame'],
Expand All @@ -392,6 +383,33 @@ def _create_thread(db_task, data, isBackupRestore=False, isDatasetImport=False):
extractor.filter(lambda x: not re.search(r'(^|{0})related_images{0}'.format(os.sep), x))
related_images = detect_related_images(extractor.absolute_source_paths, upload_dir)

if isBackupRestore and not isinstance(extractor, MEDIA_TYPES['video']['extractor']) and db_data.storage_method == models.StorageMethodChoice.CACHE and \
db_data.sorting_method in {models.SortingMethod.RANDOM, models.SortingMethod.PREDEFINED} and validate_dimension.dimension != models.DimensionType.DIM_3D:
# we should sort media_files according to the manifest content sequence
# and we should do this in general after validation step for 3D data and after filtering from related_images
manifest = ImageManifestManager(db_data.get_manifest_path())
manifest.set_index()
sorted_media_files = []

for idx in range(len(extractor.absolute_source_paths)):
properties = manifest[idx]
image_name = properties.get('name', None)
image_extension = properties.get('extension', None)

full_image_path = os.path.join(upload_dir, f"{image_name}{image_extension}") if image_name and image_extension else None
if full_image_path and full_image_path in extractor:
sorted_media_files.append(full_image_path)
media_files = sorted_media_files.copy()
del sorted_media_files
data['sorting_method'] = models.SortingMethod.PREDEFINED
extractor.reconcile(
source_files=media_files,
step=db_data.get_frame_step(),
start=db_data.start_frame,
stop=data['stop_frame'],
sorting_method=data['sorting_method'],
)

db_task.mode = task_mode
db_data.compressed_chunk_type = models.DataChoice.VIDEO if task_mode == 'interpolation' and not data['use_zip_chunks'] else models.DataChoice.IMAGESET
db_data.original_chunk_type = models.DataChoice.VIDEO if task_mode == 'interpolation' else models.DataChoice.IMAGESET
Expand Down
14 changes: 12 additions & 2 deletions cvat/apps/engine/tests/test_rest_api.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
# Copyright (C) 2020-2021 Intel Corporation
# Copyright (C) 2020-2022 Intel Corporation
#
# SPDX-License-Identifier: MIT

Expand Down Expand Up @@ -2553,6 +2553,16 @@ def setUpTestData(cls):
}
)

for sorting, _ in SortingMethod.choices():
cls.media_data.append(
{
"image_quality": 75,
"server_files[0]": filename,
'use_cache': True,
'sorting_method': sorting,
}
)

filename = os.path.join("videos", "test_video_1.mp4")
path = os.path.join(settings.SHARE_ROOT, filename)
os.makedirs(os.path.dirname(path))
Expand Down Expand Up @@ -2617,7 +2627,7 @@ def setUpTestData(cls):
**use_cache_data,
'sorting_method': SortingMethod.RANDOM,
},
# predefined: test_1.jpg, test_2.jpg, test_10.jpg, test_2.jpg
# predefined: test_1.jpg, test_2.jpg, test_10.jpg, test_3.jpg
{
**use_cache_data,
'sorting_method': SortingMethod.PREDEFINED,
Expand Down
13 changes: 12 additions & 1 deletion cvat/apps/iam/rules/projects.rego
Original file line number Diff line number Diff line change
Expand Up @@ -88,12 +88,23 @@ filter = [] { # Django Q object to filter list of entries
utils.is_admin
utils.is_sandbox
} else = qobject {
utils.is_admin
utils.is_organization
qobject := [ {"organization": input.auth.organization.id} ]
} else = qobject {
utils.is_sandbox
user := input.auth.user
qobject := [ {"owner_id": user.id}, {"assignee_id": user.id}, "|"]
qobject := [ {"owner_id": user.id}, {"assignee_id": user.id}, "|" ]
} else = qobject {
utils.is_organization
utils.has_perm(utils.USER)
organizations.has_perm(organizations.MAINTAINER)
qobject := [ {"organization": input.auth.organization.id} ]
} else = qobject {
organizations.has_perm(organizations.WORKER)
user := input.auth.user
qobject := [ {"owner_id": user.id}, {"assignee_id": user.id}, "|",
{"organization": input.auth.organization.id}, "&" ]
}

allow {
Expand Down
Loading

0 comments on commit a9ab555

Please sign in to comment.